1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Aspeed 24XX/25XX I2C Controller.
4 *
5 * Copyright (C) 2012-2017 ASPEED Technology Inc.
6 * Copyright 2017 IBM Corporation
7 * Copyright 2017 Google, Inc.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/irqchip/chained_irq.h>
20 #include <linux/irqdomain.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
27 #include <linux/reset.h>
28 #include <linux/slab.h>
29
30 /* I2C Register */
31 #define ASPEED_I2C_FUN_CTRL_REG 0x00
32 #define ASPEED_I2C_AC_TIMING_REG1 0x04
33 #define ASPEED_I2C_AC_TIMING_REG2 0x08
34 #define ASPEED_I2C_INTR_CTRL_REG 0x0c
35 #define ASPEED_I2C_INTR_STS_REG 0x10
36 #define ASPEED_I2C_CMD_REG 0x14
37 #define ASPEED_I2C_DEV_ADDR_REG 0x18
38 #define ASPEED_I2C_BYTE_BUF_REG 0x20
39
40 /* Global Register Definition */
41 /* 0x00 : I2C Interrupt Status Register */
42 /* 0x08 : I2C Interrupt Target Assignment */
43
44 /* Device Register Definition */
45 /* 0x00 : I2CD Function Control Register */
46 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15)
47 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8)
48 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7)
49 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6)
50 #define ASPEED_I2CD_SLAVE_EN BIT(1)
51 #define ASPEED_I2CD_MASTER_EN BIT(0)
52
53 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
54 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28)
55 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24)
56 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20)
57 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16
58 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16)
59 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12
60 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12)
61 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0)
62 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0)
63 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
64 #define ASPEED_NO_TIMEOUT_CTRL 0
65
66 /* 0x0c : I2CD Interrupt Control Register &
67 * 0x10 : I2CD Interrupt Status Register
68 *
69 * These share bit definitions, so use the same values for the enable &
70 * status bits.
71 */
72 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
73 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
74 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
75 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6)
76 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5)
77 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4)
78 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3)
79 #define ASPEED_I2CD_INTR_RX_DONE BIT(2)
80 #define ASPEED_I2CD_INTR_TX_NAK BIT(1)
81 #define ASPEED_I2CD_INTR_TX_ACK BIT(0)
82 #define ASPEED_I2CD_INTR_MASTER_ERRORS \
83 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
84 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
85 ASPEED_I2CD_INTR_ABNORMAL | \
86 ASPEED_I2CD_INTR_ARBIT_LOSS)
87 #define ASPEED_I2CD_INTR_ALL \
88 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
89 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \
90 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
91 ASPEED_I2CD_INTR_ABNORMAL | \
92 ASPEED_I2CD_INTR_NORMAL_STOP | \
93 ASPEED_I2CD_INTR_ARBIT_LOSS | \
94 ASPEED_I2CD_INTR_RX_DONE | \
95 ASPEED_I2CD_INTR_TX_NAK | \
96 ASPEED_I2CD_INTR_TX_ACK)
97
98 /* 0x14 : I2CD Command/Status Register */
99 #define ASPEED_I2CD_SCL_LINE_STS BIT(18)
100 #define ASPEED_I2CD_SDA_LINE_STS BIT(17)
101 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16)
102 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11)
103
104 /* Command Bit */
105 #define ASPEED_I2CD_M_STOP_CMD BIT(5)
106 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4)
107 #define ASPEED_I2CD_M_RX_CMD BIT(3)
108 #define ASPEED_I2CD_S_TX_CMD BIT(2)
109 #define ASPEED_I2CD_M_TX_CMD BIT(1)
110 #define ASPEED_I2CD_M_START_CMD BIT(0)
111 #define ASPEED_I2CD_MASTER_CMDS_MASK \
112 (ASPEED_I2CD_M_STOP_CMD | \
113 ASPEED_I2CD_M_S_RX_CMD_LAST | \
114 ASPEED_I2CD_M_RX_CMD | \
115 ASPEED_I2CD_M_TX_CMD | \
116 ASPEED_I2CD_M_START_CMD)
117
118 /* 0x18 : I2CD Slave Device Address Register */
119 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
120
121 enum aspeed_i2c_master_state {
122 ASPEED_I2C_MASTER_INACTIVE,
123 ASPEED_I2C_MASTER_PENDING,
124 ASPEED_I2C_MASTER_START,
125 ASPEED_I2C_MASTER_TX_FIRST,
126 ASPEED_I2C_MASTER_TX,
127 ASPEED_I2C_MASTER_RX_FIRST,
128 ASPEED_I2C_MASTER_RX,
129 ASPEED_I2C_MASTER_STOP,
130 };
131
132 enum aspeed_i2c_slave_state {
133 ASPEED_I2C_SLAVE_INACTIVE,
134 ASPEED_I2C_SLAVE_START,
135 ASPEED_I2C_SLAVE_READ_REQUESTED,
136 ASPEED_I2C_SLAVE_READ_PROCESSED,
137 ASPEED_I2C_SLAVE_WRITE_REQUESTED,
138 ASPEED_I2C_SLAVE_WRITE_RECEIVED,
139 ASPEED_I2C_SLAVE_STOP,
140 };
141
142 struct aspeed_i2c_bus {
143 struct i2c_adapter adap;
144 struct device *dev;
145 void __iomem *base;
146 struct reset_control *rst;
147 /* Synchronizes I/O mem access to base. */
148 spinlock_t lock;
149 struct completion cmd_complete;
150 u32 (*get_clk_reg_val)(struct device *dev,
151 u32 divisor);
152 unsigned long parent_clk_frequency;
153 u32 bus_frequency;
154 /* Transaction state. */
155 enum aspeed_i2c_master_state master_state;
156 struct i2c_msg *msgs;
157 size_t buf_index;
158 size_t msgs_index;
159 size_t msgs_count;
160 bool send_stop;
161 int cmd_err;
162 /* Protected only by i2c_lock_bus */
163 int master_xfer_result;
164 /* Multi-master */
165 bool multi_master;
166 #if IS_ENABLED(CONFIG_I2C_SLAVE)
167 struct i2c_client *slave;
168 enum aspeed_i2c_slave_state slave_state;
169 #endif /* CONFIG_I2C_SLAVE */
170 };
171
172 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
173
aspeed_i2c_recover_bus(struct aspeed_i2c_bus * bus)174 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
175 {
176 unsigned long time_left, flags;
177 int ret = 0;
178 u32 command;
179
180 spin_lock_irqsave(&bus->lock, flags);
181 command = readl(bus->base + ASPEED_I2C_CMD_REG);
182
183 if (command & ASPEED_I2CD_SDA_LINE_STS) {
184 /* Bus is idle: no recovery needed. */
185 if (command & ASPEED_I2CD_SCL_LINE_STS)
186 goto out;
187 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n",
188 command);
189
190 reinit_completion(&bus->cmd_complete);
191 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
192 spin_unlock_irqrestore(&bus->lock, flags);
193
194 time_left = wait_for_completion_timeout(
195 &bus->cmd_complete, bus->adap.timeout);
196
197 spin_lock_irqsave(&bus->lock, flags);
198 if (time_left == 0)
199 goto reset_out;
200 else if (bus->cmd_err)
201 goto reset_out;
202 /* Recovery failed. */
203 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
204 ASPEED_I2CD_SCL_LINE_STS))
205 goto reset_out;
206 /* Bus error. */
207 } else {
208 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n",
209 command);
210
211 reinit_completion(&bus->cmd_complete);
212 /* Writes 1 to 8 SCL clock cycles until SDA is released. */
213 writel(ASPEED_I2CD_BUS_RECOVER_CMD,
214 bus->base + ASPEED_I2C_CMD_REG);
215 spin_unlock_irqrestore(&bus->lock, flags);
216
217 time_left = wait_for_completion_timeout(
218 &bus->cmd_complete, bus->adap.timeout);
219
220 spin_lock_irqsave(&bus->lock, flags);
221 if (time_left == 0)
222 goto reset_out;
223 else if (bus->cmd_err)
224 goto reset_out;
225 /* Recovery failed. */
226 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
227 ASPEED_I2CD_SDA_LINE_STS))
228 goto reset_out;
229 }
230
231 out:
232 spin_unlock_irqrestore(&bus->lock, flags);
233
234 return ret;
235
236 reset_out:
237 spin_unlock_irqrestore(&bus->lock, flags);
238
239 return aspeed_i2c_reset(bus);
240 }
241
242 #if IS_ENABLED(CONFIG_I2C_SLAVE)
aspeed_i2c_slave_irq(struct aspeed_i2c_bus * bus,u32 irq_status)243 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
244 {
245 u32 command, irq_handled = 0;
246 struct i2c_client *slave = bus->slave;
247 u8 value;
248
249 if (!slave)
250 return 0;
251
252 command = readl(bus->base + ASPEED_I2C_CMD_REG);
253
254 /* Slave was requested, restart state machine. */
255 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
256 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
257 bus->slave_state = ASPEED_I2C_SLAVE_START;
258 }
259
260 /* Slave is not currently active, irq was for someone else. */
261 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
262 return irq_handled;
263
264 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
265 irq_status, command);
266
267 /* Slave was sent something. */
268 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
269 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
270 /* Handle address frame. */
271 if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
272 if (value & 0x1)
273 bus->slave_state =
274 ASPEED_I2C_SLAVE_READ_REQUESTED;
275 else
276 bus->slave_state =
277 ASPEED_I2C_SLAVE_WRITE_REQUESTED;
278 }
279 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
280 }
281
282 /* Slave was asked to stop. */
283 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
284 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
285 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
286 }
287 if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
288 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
289 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
290 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
291 }
292
293 switch (bus->slave_state) {
294 case ASPEED_I2C_SLAVE_READ_REQUESTED:
295 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
296 dev_err(bus->dev, "Unexpected ACK on read request.\n");
297 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED;
298 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
299 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
300 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
301 break;
302 case ASPEED_I2C_SLAVE_READ_PROCESSED:
303 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
304 dev_err(bus->dev,
305 "Expected ACK after processed read.\n");
306 break;
307 }
308 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
309 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
310 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
311 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
312 break;
313 case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
314 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
315 i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
316 break;
317 case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
318 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
319 break;
320 case ASPEED_I2C_SLAVE_STOP:
321 i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
322 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
323 break;
324 case ASPEED_I2C_SLAVE_START:
325 /* Slave was just started. Waiting for the next event. */;
326 break;
327 default:
328 dev_err(bus->dev, "unknown slave_state: %d\n",
329 bus->slave_state);
330 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
331 break;
332 }
333
334 return irq_handled;
335 }
336 #endif /* CONFIG_I2C_SLAVE */
337
338 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_start(struct aspeed_i2c_bus * bus)339 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
340 {
341 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
342 struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
343 u8 slave_addr = i2c_8bit_addr_from_msg(msg);
344
345 #if IS_ENABLED(CONFIG_I2C_SLAVE)
346 /*
347 * If it's requested in the middle of a slave session, set the master
348 * state to 'pending' then H/W will continue handling this master
349 * command when the bus comes back to the idle state.
350 */
351 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
352 bus->master_state = ASPEED_I2C_MASTER_PENDING;
353 return;
354 }
355 #endif /* CONFIG_I2C_SLAVE */
356
357 bus->master_state = ASPEED_I2C_MASTER_START;
358 bus->buf_index = 0;
359
360 if (msg->flags & I2C_M_RD) {
361 command |= ASPEED_I2CD_M_RX_CMD;
362 /* Need to let the hardware know to NACK after RX. */
363 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
364 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
365 }
366
367 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
368 writel(command, bus->base + ASPEED_I2C_CMD_REG);
369 }
370
371 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_stop(struct aspeed_i2c_bus * bus)372 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
373 {
374 bus->master_state = ASPEED_I2C_MASTER_STOP;
375 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
376 }
377
378 /* precondition: bus.lock has been acquired. */
aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus * bus)379 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
380 {
381 if (bus->msgs_index + 1 < bus->msgs_count) {
382 bus->msgs_index++;
383 aspeed_i2c_do_start(bus);
384 } else {
385 aspeed_i2c_do_stop(bus);
386 }
387 }
388
aspeed_i2c_is_irq_error(u32 irq_status)389 static int aspeed_i2c_is_irq_error(u32 irq_status)
390 {
391 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
392 return -EAGAIN;
393 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |
394 ASPEED_I2CD_INTR_SCL_TIMEOUT))
395 return -EBUSY;
396 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL))
397 return -EPROTO;
398
399 return 0;
400 }
401
aspeed_i2c_master_irq(struct aspeed_i2c_bus * bus,u32 irq_status)402 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
403 {
404 u32 irq_handled = 0, command = 0;
405 struct i2c_msg *msg;
406 u8 recv_byte;
407 int ret;
408
409 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) {
410 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
411 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE;
412 goto out_complete;
413 }
414
415 /*
416 * We encountered an interrupt that reports an error: the hardware
417 * should clear the command queue effectively taking us back to the
418 * INACTIVE state.
419 */
420 ret = aspeed_i2c_is_irq_error(irq_status);
421 if (ret) {
422 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
423 irq_status);
424 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
425 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
426 bus->cmd_err = ret;
427 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
428 goto out_complete;
429 }
430 }
431
432 /* Master is not currently active, irq was for someone else. */
433 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
434 bus->master_state == ASPEED_I2C_MASTER_PENDING)
435 goto out_no_complete;
436
437 /* We are in an invalid state; reset bus to a known state. */
438 if (!bus->msgs) {
439 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
440 irq_status);
441 bus->cmd_err = -EIO;
442 if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
443 bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
444 aspeed_i2c_do_stop(bus);
445 goto out_no_complete;
446 }
447 msg = &bus->msgs[bus->msgs_index];
448
449 /*
450 * START is a special case because we still have to handle a subsequent
451 * TX or RX immediately after we handle it, so we handle it here and
452 * then update the state and handle the new state below.
453 */
454 if (bus->master_state == ASPEED_I2C_MASTER_START) {
455 #if IS_ENABLED(CONFIG_I2C_SLAVE)
456 /*
457 * If a peer master starts a xfer immediately after it queues a
458 * master command, clear the queued master command and change
459 * its state to 'pending'. To simplify handling of pending
460 * cases, it uses S/W solution instead of H/W command queue
461 * handling.
462 */
463 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
464 writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
465 ~ASPEED_I2CD_MASTER_CMDS_MASK,
466 bus->base + ASPEED_I2C_CMD_REG);
467 bus->master_state = ASPEED_I2C_MASTER_PENDING;
468 dev_dbg(bus->dev,
469 "master goes pending due to a slave start\n");
470 goto out_no_complete;
471 }
472 #endif /* CONFIG_I2C_SLAVE */
473 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
474 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
475 bus->cmd_err = -ENXIO;
476 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
477 goto out_complete;
478 }
479 pr_devel("no slave present at %02x\n", msg->addr);
480 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
481 bus->cmd_err = -ENXIO;
482 aspeed_i2c_do_stop(bus);
483 goto out_no_complete;
484 }
485 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
486 if (msg->len == 0) { /* SMBUS_QUICK */
487 aspeed_i2c_do_stop(bus);
488 goto out_no_complete;
489 }
490 if (msg->flags & I2C_M_RD)
491 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST;
492 else
493 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
494 }
495
496 switch (bus->master_state) {
497 case ASPEED_I2C_MASTER_TX:
498 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
499 dev_dbg(bus->dev, "slave NACKed TX\n");
500 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
501 goto error_and_stop;
502 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
503 dev_err(bus->dev, "slave failed to ACK TX\n");
504 goto error_and_stop;
505 }
506 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
507 /* fall through */
508 case ASPEED_I2C_MASTER_TX_FIRST:
509 if (bus->buf_index < msg->len) {
510 bus->master_state = ASPEED_I2C_MASTER_TX;
511 writel(msg->buf[bus->buf_index++],
512 bus->base + ASPEED_I2C_BYTE_BUF_REG);
513 writel(ASPEED_I2CD_M_TX_CMD,
514 bus->base + ASPEED_I2C_CMD_REG);
515 } else {
516 aspeed_i2c_next_msg_or_stop(bus);
517 }
518 goto out_no_complete;
519 case ASPEED_I2C_MASTER_RX_FIRST:
520 /* RX may not have completed yet (only address cycle) */
521 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
522 goto out_no_complete;
523 /* fall through */
524 case ASPEED_I2C_MASTER_RX:
525 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
526 dev_err(bus->dev, "master failed to RX\n");
527 goto error_and_stop;
528 }
529 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
530
531 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
532 msg->buf[bus->buf_index++] = recv_byte;
533
534 if (msg->flags & I2C_M_RECV_LEN) {
535 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
536 bus->cmd_err = -EPROTO;
537 aspeed_i2c_do_stop(bus);
538 goto out_no_complete;
539 }
540 msg->len = recv_byte +
541 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
542 msg->flags &= ~I2C_M_RECV_LEN;
543 }
544
545 if (bus->buf_index < msg->len) {
546 bus->master_state = ASPEED_I2C_MASTER_RX;
547 command = ASPEED_I2CD_M_RX_CMD;
548 if (bus->buf_index + 1 == msg->len)
549 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
550 writel(command, bus->base + ASPEED_I2C_CMD_REG);
551 } else {
552 aspeed_i2c_next_msg_or_stop(bus);
553 }
554 goto out_no_complete;
555 case ASPEED_I2C_MASTER_STOP:
556 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
557 dev_err(bus->dev,
558 "master failed to STOP. irq_status:0x%x\n",
559 irq_status);
560 bus->cmd_err = -EIO;
561 /* Do not STOP as we have already tried. */
562 } else {
563 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
564 }
565
566 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
567 goto out_complete;
568 case ASPEED_I2C_MASTER_INACTIVE:
569 dev_err(bus->dev,
570 "master received interrupt 0x%08x, but is inactive\n",
571 irq_status);
572 bus->cmd_err = -EIO;
573 /* Do not STOP as we should be inactive. */
574 goto out_complete;
575 default:
576 WARN(1, "unknown master state\n");
577 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
578 bus->cmd_err = -EINVAL;
579 goto out_complete;
580 }
581 error_and_stop:
582 bus->cmd_err = -EIO;
583 aspeed_i2c_do_stop(bus);
584 goto out_no_complete;
585 out_complete:
586 bus->msgs = NULL;
587 if (bus->cmd_err)
588 bus->master_xfer_result = bus->cmd_err;
589 else
590 bus->master_xfer_result = bus->msgs_index + 1;
591 complete(&bus->cmd_complete);
592 out_no_complete:
593 return irq_handled;
594 }
595
aspeed_i2c_bus_irq(int irq,void * dev_id)596 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
597 {
598 struct aspeed_i2c_bus *bus = dev_id;
599 u32 irq_received, irq_remaining, irq_handled;
600
601 spin_lock(&bus->lock);
602 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
603 /* Ack all interrupts except for Rx done */
604 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
605 bus->base + ASPEED_I2C_INTR_STS_REG);
606 irq_remaining = irq_received;
607
608 #if IS_ENABLED(CONFIG_I2C_SLAVE)
609 /*
610 * In most cases, interrupt bits will be set one by one, although
611 * multiple interrupt bits could be set at the same time. It's also
612 * possible that master interrupt bits could be set along with slave
613 * interrupt bits. Each case needs to be handled using corresponding
614 * handlers depending on the current state.
615 */
616 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
617 bus->master_state != ASPEED_I2C_MASTER_PENDING) {
618 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
619 irq_remaining &= ~irq_handled;
620 if (irq_remaining)
621 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
622 } else {
623 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
624 irq_remaining &= ~irq_handled;
625 if (irq_remaining)
626 irq_handled |= aspeed_i2c_master_irq(bus,
627 irq_remaining);
628 }
629
630 /*
631 * Start a pending master command at here if a slave operation is
632 * completed.
633 */
634 if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
635 bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
636 aspeed_i2c_do_start(bus);
637 #else
638 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
639 #endif /* CONFIG_I2C_SLAVE */
640
641 irq_remaining &= ~irq_handled;
642 if (irq_remaining)
643 dev_err(bus->dev,
644 "irq handled != irq. expected 0x%08x, but was 0x%08x\n",
645 irq_received, irq_handled);
646
647 /* Ack Rx done */
648 if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
649 writel(ASPEED_I2CD_INTR_RX_DONE,
650 bus->base + ASPEED_I2C_INTR_STS_REG);
651 spin_unlock(&bus->lock);
652 return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
653 }
654
aspeed_i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)655 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
656 struct i2c_msg *msgs, int num)
657 {
658 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
659 unsigned long time_left, flags;
660
661 spin_lock_irqsave(&bus->lock, flags);
662 bus->cmd_err = 0;
663
664 /* If bus is busy in a single master environment, attempt recovery. */
665 if (!bus->multi_master &&
666 (readl(bus->base + ASPEED_I2C_CMD_REG) &
667 ASPEED_I2CD_BUS_BUSY_STS)) {
668 int ret;
669
670 spin_unlock_irqrestore(&bus->lock, flags);
671 ret = aspeed_i2c_recover_bus(bus);
672 if (ret)
673 return ret;
674 spin_lock_irqsave(&bus->lock, flags);
675 }
676
677 bus->cmd_err = 0;
678 bus->msgs = msgs;
679 bus->msgs_index = 0;
680 bus->msgs_count = num;
681
682 reinit_completion(&bus->cmd_complete);
683 aspeed_i2c_do_start(bus);
684 spin_unlock_irqrestore(&bus->lock, flags);
685
686 time_left = wait_for_completion_timeout(&bus->cmd_complete,
687 bus->adap.timeout);
688
689 if (time_left == 0) {
690 /*
691 * If timed out and bus is still busy in a multi master
692 * environment, attempt recovery at here.
693 */
694 if (bus->multi_master &&
695 (readl(bus->base + ASPEED_I2C_CMD_REG) &
696 ASPEED_I2CD_BUS_BUSY_STS))
697 aspeed_i2c_recover_bus(bus);
698
699 /*
700 * If timed out and the state is still pending, drop the pending
701 * master command.
702 */
703 spin_lock_irqsave(&bus->lock, flags);
704 if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
705 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
706 spin_unlock_irqrestore(&bus->lock, flags);
707
708 return -ETIMEDOUT;
709 }
710
711 return bus->master_xfer_result;
712 }
713
aspeed_i2c_functionality(struct i2c_adapter * adap)714 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap)
715 {
716 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
717 }
718
719 #if IS_ENABLED(CONFIG_I2C_SLAVE)
720 /* precondition: bus.lock has been acquired. */
__aspeed_i2c_reg_slave(struct aspeed_i2c_bus * bus,u16 slave_addr)721 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
722 {
723 u32 addr_reg_val, func_ctrl_reg_val;
724
725 /* Set slave addr. */
726 addr_reg_val = readl(bus->base + ASPEED_I2C_DEV_ADDR_REG);
727 addr_reg_val &= ~ASPEED_I2CD_DEV_ADDR_MASK;
728 addr_reg_val |= slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
729 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
730
731 /* Turn on slave mode. */
732 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
733 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
734 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
735 }
736
aspeed_i2c_reg_slave(struct i2c_client * client)737 static int aspeed_i2c_reg_slave(struct i2c_client *client)
738 {
739 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
740 unsigned long flags;
741
742 spin_lock_irqsave(&bus->lock, flags);
743 if (bus->slave) {
744 spin_unlock_irqrestore(&bus->lock, flags);
745 return -EINVAL;
746 }
747
748 __aspeed_i2c_reg_slave(bus, client->addr);
749
750 bus->slave = client;
751 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
752 spin_unlock_irqrestore(&bus->lock, flags);
753
754 return 0;
755 }
756
aspeed_i2c_unreg_slave(struct i2c_client * client)757 static int aspeed_i2c_unreg_slave(struct i2c_client *client)
758 {
759 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
760 u32 func_ctrl_reg_val;
761 unsigned long flags;
762
763 spin_lock_irqsave(&bus->lock, flags);
764 if (!bus->slave) {
765 spin_unlock_irqrestore(&bus->lock, flags);
766 return -EINVAL;
767 }
768
769 /* Turn off slave mode. */
770 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
771 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
772 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
773
774 bus->slave = NULL;
775 spin_unlock_irqrestore(&bus->lock, flags);
776
777 return 0;
778 }
779 #endif /* CONFIG_I2C_SLAVE */
780
781 static const struct i2c_algorithm aspeed_i2c_algo = {
782 .master_xfer = aspeed_i2c_master_xfer,
783 .functionality = aspeed_i2c_functionality,
784 #if IS_ENABLED(CONFIG_I2C_SLAVE)
785 .reg_slave = aspeed_i2c_reg_slave,
786 .unreg_slave = aspeed_i2c_unreg_slave,
787 #endif /* CONFIG_I2C_SLAVE */
788 };
789
aspeed_i2c_get_clk_reg_val(struct device * dev,u32 clk_high_low_mask,u32 divisor)790 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev,
791 u32 clk_high_low_mask,
792 u32 divisor)
793 {
794 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp;
795
796 /*
797 * SCL_high and SCL_low represent a value 1 greater than what is stored
798 * since a zero divider is meaningless. Thus, the max value each can
799 * store is every bit set + 1. Since SCL_high and SCL_low are added
800 * together (see below), the max value of both is the max value of one
801 * them times two.
802 */
803 clk_high_low_max = (clk_high_low_mask + 1) * 2;
804
805 /*
806 * The actual clock frequency of SCL is:
807 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
808 * = APB_freq / divisor
809 * where base_freq is a programmable clock divider; its value is
810 * base_freq = 1 << base_clk_divisor
811 * SCL_high is the number of base_freq clock cycles that SCL stays high
812 * and SCL_low is the number of base_freq clock cycles that SCL stays
813 * low for a period of SCL.
814 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
815 * thus, they start counting at zero. So
816 * SCL_high = clk_high + 1
817 * SCL_low = clk_low + 1
818 * Thus,
819 * SCL_freq = APB_freq /
820 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
821 * The documentation recommends clk_high >= clk_high_max / 2 and
822 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
823 * gives us the following solution:
824 */
825 base_clk_divisor = divisor > clk_high_low_max ?
826 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
827
828 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) {
829 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK;
830 clk_low = clk_high_low_mask;
831 clk_high = clk_high_low_mask;
832 dev_err(dev,
833 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
834 divisor, (1 << base_clk_divisor) * clk_high_low_max);
835 } else {
836 tmp = (divisor + (1 << base_clk_divisor) - 1)
837 >> base_clk_divisor;
838 clk_low = tmp / 2;
839 clk_high = tmp - clk_low;
840
841 if (clk_high)
842 clk_high--;
843
844 if (clk_low)
845 clk_low--;
846 }
847
848
849 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT)
850 & ASPEED_I2CD_TIME_SCL_HIGH_MASK)
851 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT)
852 & ASPEED_I2CD_TIME_SCL_LOW_MASK)
853 | (base_clk_divisor
854 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK);
855 }
856
aspeed_i2c_24xx_get_clk_reg_val(struct device * dev,u32 divisor)857 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
858 {
859 /*
860 * clk_high and clk_low are each 3 bits wide, so each can hold a max
861 * value of 8 giving a clk_high_low_max of 16.
862 */
863 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
864 }
865
aspeed_i2c_25xx_get_clk_reg_val(struct device * dev,u32 divisor)866 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
867 {
868 /*
869 * clk_high and clk_low are each 4 bits wide, so each can hold a max
870 * value of 16 giving a clk_high_low_max of 32.
871 */
872 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
873 }
874
875 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init_clk(struct aspeed_i2c_bus * bus)876 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
877 {
878 u32 divisor, clk_reg_val;
879
880 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
881 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1);
882 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK |
883 ASPEED_I2CD_TIME_THDSTA_MASK |
884 ASPEED_I2CD_TIME_TACST_MASK);
885 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
886 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
887 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
888
889 return 0;
890 }
891
892 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init(struct aspeed_i2c_bus * bus,struct platform_device * pdev)893 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
894 struct platform_device *pdev)
895 {
896 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN;
897 int ret;
898
899 /* Disable everything. */
900 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
901
902 ret = aspeed_i2c_init_clk(bus);
903 if (ret < 0)
904 return ret;
905
906 if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
907 bus->multi_master = true;
908 else
909 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS;
910
911 /* Enable Master Mode */
912 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg,
913 bus->base + ASPEED_I2C_FUN_CTRL_REG);
914
915 #if IS_ENABLED(CONFIG_I2C_SLAVE)
916 /* If slave has already been registered, re-enable it. */
917 if (bus->slave)
918 __aspeed_i2c_reg_slave(bus, bus->slave->addr);
919 #endif /* CONFIG_I2C_SLAVE */
920
921 /* Set interrupt generation of I2C controller */
922 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
923
924 return 0;
925 }
926
aspeed_i2c_reset(struct aspeed_i2c_bus * bus)927 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
928 {
929 struct platform_device *pdev = to_platform_device(bus->dev);
930 unsigned long flags;
931 int ret;
932
933 spin_lock_irqsave(&bus->lock, flags);
934
935 /* Disable and ack all interrupts. */
936 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
937 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
938
939 ret = aspeed_i2c_init(bus, pdev);
940
941 spin_unlock_irqrestore(&bus->lock, flags);
942
943 return ret;
944 }
945
946 static const struct of_device_id aspeed_i2c_bus_of_table[] = {
947 {
948 .compatible = "aspeed,ast2400-i2c-bus",
949 .data = aspeed_i2c_24xx_get_clk_reg_val,
950 },
951 {
952 .compatible = "aspeed,ast2500-i2c-bus",
953 .data = aspeed_i2c_25xx_get_clk_reg_val,
954 },
955 { },
956 };
957 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
958
aspeed_i2c_probe_bus(struct platform_device * pdev)959 static int aspeed_i2c_probe_bus(struct platform_device *pdev)
960 {
961 const struct of_device_id *match;
962 struct aspeed_i2c_bus *bus;
963 struct clk *parent_clk;
964 struct resource *res;
965 int irq, ret;
966
967 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
968 if (!bus)
969 return -ENOMEM;
970
971 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
972 bus->base = devm_ioremap_resource(&pdev->dev, res);
973 if (IS_ERR(bus->base))
974 return PTR_ERR(bus->base);
975
976 parent_clk = devm_clk_get(&pdev->dev, NULL);
977 if (IS_ERR(parent_clk))
978 return PTR_ERR(parent_clk);
979 bus->parent_clk_frequency = clk_get_rate(parent_clk);
980 /* We just need the clock rate, we don't actually use the clk object. */
981 devm_clk_put(&pdev->dev, parent_clk);
982
983 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
984 if (IS_ERR(bus->rst)) {
985 dev_err(&pdev->dev,
986 "missing or invalid reset controller device tree entry\n");
987 return PTR_ERR(bus->rst);
988 }
989 reset_control_deassert(bus->rst);
990
991 ret = of_property_read_u32(pdev->dev.of_node,
992 "bus-frequency", &bus->bus_frequency);
993 if (ret < 0) {
994 dev_err(&pdev->dev,
995 "Could not read bus-frequency property\n");
996 bus->bus_frequency = 100000;
997 }
998
999 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
1000 if (!match)
1001 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1002 else
1003 bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
1004 match->data;
1005
1006 /* Initialize the I2C adapter */
1007 spin_lock_init(&bus->lock);
1008 init_completion(&bus->cmd_complete);
1009 bus->adap.owner = THIS_MODULE;
1010 bus->adap.retries = 0;
1011 bus->adap.algo = &aspeed_i2c_algo;
1012 bus->adap.dev.parent = &pdev->dev;
1013 bus->adap.dev.of_node = pdev->dev.of_node;
1014 strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
1015 i2c_set_adapdata(&bus->adap, bus);
1016
1017 bus->dev = &pdev->dev;
1018
1019 /* Clean up any left over interrupt state. */
1020 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1021 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
1022 /*
1023 * bus.lock does not need to be held because the interrupt handler has
1024 * not been enabled yet.
1025 */
1026 ret = aspeed_i2c_init(bus, pdev);
1027 if (ret < 0)
1028 return ret;
1029
1030 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1031 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
1032 0, dev_name(&pdev->dev), bus);
1033 if (ret < 0)
1034 return ret;
1035
1036 ret = i2c_add_adapter(&bus->adap);
1037 if (ret < 0)
1038 return ret;
1039
1040 platform_set_drvdata(pdev, bus);
1041
1042 dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
1043 bus->adap.nr, irq);
1044
1045 return 0;
1046 }
1047
aspeed_i2c_remove_bus(struct platform_device * pdev)1048 static int aspeed_i2c_remove_bus(struct platform_device *pdev)
1049 {
1050 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&bus->lock, flags);
1054
1055 /* Disable everything. */
1056 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
1057 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1058
1059 spin_unlock_irqrestore(&bus->lock, flags);
1060
1061 reset_control_assert(bus->rst);
1062
1063 i2c_del_adapter(&bus->adap);
1064
1065 return 0;
1066 }
1067
1068 static struct platform_driver aspeed_i2c_bus_driver = {
1069 .probe = aspeed_i2c_probe_bus,
1070 .remove = aspeed_i2c_remove_bus,
1071 .driver = {
1072 .name = "aspeed-i2c-bus",
1073 .of_match_table = aspeed_i2c_bus_of_table,
1074 },
1075 };
1076 module_platform_driver(aspeed_i2c_bus_driver);
1077
1078 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1079 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1080 MODULE_LICENSE("GPL v2");
1081