1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Aspeed 24XX/25XX I2C Controller.
4 *
5 * Copyright (C) 2012-2017 ASPEED Technology Inc.
6 * Copyright 2017 IBM Corporation
7 * Copyright 2017 Google, Inc.
8 */
9
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
27
28 /* I2C Register */
29 #define ASPEED_I2C_FUN_CTRL_REG 0x00
30 #define ASPEED_I2C_AC_TIMING_REG1 0x04
31 #define ASPEED_I2C_AC_TIMING_REG2 0x08
32 #define ASPEED_I2C_INTR_CTRL_REG 0x0c
33 #define ASPEED_I2C_INTR_STS_REG 0x10
34 #define ASPEED_I2C_CMD_REG 0x14
35 #define ASPEED_I2C_DEV_ADDR_REG 0x18
36 #define ASPEED_I2C_BYTE_BUF_REG 0x20
37
38 /* Global Register Definition */
39 /* 0x00 : I2C Interrupt Status Register */
40 /* 0x08 : I2C Interrupt Target Assignment */
41
42 /* Device Register Definition */
43 /* 0x00 : I2CD Function Control Register */
44 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15)
45 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8)
46 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7)
47 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6)
48 #define ASPEED_I2CD_SLAVE_EN BIT(1)
49 #define ASPEED_I2CD_MASTER_EN BIT(0)
50
51 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
52 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28)
53 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24)
54 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20)
55 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16
56 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16)
57 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12
58 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12)
59 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0)
60 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0)
61 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
62 #define ASPEED_NO_TIMEOUT_CTRL 0
63
64 /* 0x0c : I2CD Interrupt Control Register &
65 * 0x10 : I2CD Interrupt Status Register
66 *
67 * These share bit definitions, so use the same values for the enable &
68 * status bits.
69 */
70 #define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
71 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
72 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
73 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
74 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6)
75 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5)
76 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4)
77 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3)
78 #define ASPEED_I2CD_INTR_RX_DONE BIT(2)
79 #define ASPEED_I2CD_INTR_TX_NAK BIT(1)
80 #define ASPEED_I2CD_INTR_TX_ACK BIT(0)
81 #define ASPEED_I2CD_INTR_MASTER_ERRORS \
82 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
83 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
84 ASPEED_I2CD_INTR_ABNORMAL | \
85 ASPEED_I2CD_INTR_ARBIT_LOSS)
86 #define ASPEED_I2CD_INTR_ALL \
87 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
88 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \
89 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
90 ASPEED_I2CD_INTR_ABNORMAL | \
91 ASPEED_I2CD_INTR_NORMAL_STOP | \
92 ASPEED_I2CD_INTR_ARBIT_LOSS | \
93 ASPEED_I2CD_INTR_RX_DONE | \
94 ASPEED_I2CD_INTR_TX_NAK | \
95 ASPEED_I2CD_INTR_TX_ACK)
96
97 /* 0x14 : I2CD Command/Status Register */
98 #define ASPEED_I2CD_SCL_LINE_STS BIT(18)
99 #define ASPEED_I2CD_SDA_LINE_STS BIT(17)
100 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16)
101 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11)
102
103 /* Command Bit */
104 #define ASPEED_I2CD_M_STOP_CMD BIT(5)
105 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4)
106 #define ASPEED_I2CD_M_RX_CMD BIT(3)
107 #define ASPEED_I2CD_S_TX_CMD BIT(2)
108 #define ASPEED_I2CD_M_TX_CMD BIT(1)
109 #define ASPEED_I2CD_M_START_CMD BIT(0)
110 #define ASPEED_I2CD_MASTER_CMDS_MASK \
111 (ASPEED_I2CD_M_STOP_CMD | \
112 ASPEED_I2CD_M_S_RX_CMD_LAST | \
113 ASPEED_I2CD_M_RX_CMD | \
114 ASPEED_I2CD_M_TX_CMD | \
115 ASPEED_I2CD_M_START_CMD)
116
117 /* 0x18 : I2CD Slave Device Address Register */
118 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
119
120 enum aspeed_i2c_master_state {
121 ASPEED_I2C_MASTER_INACTIVE,
122 ASPEED_I2C_MASTER_PENDING,
123 ASPEED_I2C_MASTER_START,
124 ASPEED_I2C_MASTER_TX_FIRST,
125 ASPEED_I2C_MASTER_TX,
126 ASPEED_I2C_MASTER_RX_FIRST,
127 ASPEED_I2C_MASTER_RX,
128 ASPEED_I2C_MASTER_STOP,
129 };
130
131 enum aspeed_i2c_slave_state {
132 ASPEED_I2C_SLAVE_INACTIVE,
133 ASPEED_I2C_SLAVE_START,
134 ASPEED_I2C_SLAVE_READ_REQUESTED,
135 ASPEED_I2C_SLAVE_READ_PROCESSED,
136 ASPEED_I2C_SLAVE_WRITE_REQUESTED,
137 ASPEED_I2C_SLAVE_WRITE_RECEIVED,
138 ASPEED_I2C_SLAVE_STOP,
139 };
140
141 struct aspeed_i2c_bus {
142 struct i2c_adapter adap;
143 struct device *dev;
144 void __iomem *base;
145 struct reset_control *rst;
146 /* Synchronizes I/O mem access to base. */
147 spinlock_t lock;
148 struct completion cmd_complete;
149 u32 (*get_clk_reg_val)(struct device *dev,
150 u32 divisor);
151 unsigned long parent_clk_frequency;
152 u32 bus_frequency;
153 /* Transaction state. */
154 enum aspeed_i2c_master_state master_state;
155 struct i2c_msg *msgs;
156 size_t buf_index;
157 size_t msgs_index;
158 size_t msgs_count;
159 bool send_stop;
160 int cmd_err;
161 /* Protected only by i2c_lock_bus */
162 int master_xfer_result;
163 /* Multi-master */
164 bool multi_master;
165 #if IS_ENABLED(CONFIG_I2C_SLAVE)
166 struct i2c_client *slave;
167 enum aspeed_i2c_slave_state slave_state;
168 #endif /* CONFIG_I2C_SLAVE */
169 };
170
171 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
172
aspeed_i2c_recover_bus(struct aspeed_i2c_bus * bus)173 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
174 {
175 unsigned long time_left, flags;
176 int ret = 0;
177 u32 command;
178
179 spin_lock_irqsave(&bus->lock, flags);
180 command = readl(bus->base + ASPEED_I2C_CMD_REG);
181
182 if (command & ASPEED_I2CD_SDA_LINE_STS) {
183 /* Bus is idle: no recovery needed. */
184 if (command & ASPEED_I2CD_SCL_LINE_STS)
185 goto out;
186 dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n",
187 command);
188
189 reinit_completion(&bus->cmd_complete);
190 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
191 spin_unlock_irqrestore(&bus->lock, flags);
192
193 time_left = wait_for_completion_timeout(
194 &bus->cmd_complete, bus->adap.timeout);
195
196 spin_lock_irqsave(&bus->lock, flags);
197 if (time_left == 0)
198 goto reset_out;
199 else if (bus->cmd_err)
200 goto reset_out;
201 /* Recovery failed. */
202 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
203 ASPEED_I2CD_SCL_LINE_STS))
204 goto reset_out;
205 /* Bus error. */
206 } else {
207 dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n",
208 command);
209
210 reinit_completion(&bus->cmd_complete);
211 /* Writes 1 to 8 SCL clock cycles until SDA is released. */
212 writel(ASPEED_I2CD_BUS_RECOVER_CMD,
213 bus->base + ASPEED_I2C_CMD_REG);
214 spin_unlock_irqrestore(&bus->lock, flags);
215
216 time_left = wait_for_completion_timeout(
217 &bus->cmd_complete, bus->adap.timeout);
218
219 spin_lock_irqsave(&bus->lock, flags);
220 if (time_left == 0)
221 goto reset_out;
222 else if (bus->cmd_err)
223 goto reset_out;
224 /* Recovery failed. */
225 else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) &
226 ASPEED_I2CD_SDA_LINE_STS))
227 goto reset_out;
228 }
229
230 out:
231 spin_unlock_irqrestore(&bus->lock, flags);
232
233 return ret;
234
235 reset_out:
236 spin_unlock_irqrestore(&bus->lock, flags);
237
238 return aspeed_i2c_reset(bus);
239 }
240
241 #if IS_ENABLED(CONFIG_I2C_SLAVE)
aspeed_i2c_slave_irq(struct aspeed_i2c_bus * bus,u32 irq_status)242 static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
243 {
244 u32 command, irq_handled = 0;
245 struct i2c_client *slave = bus->slave;
246 u8 value;
247 int ret;
248
249 if (!slave)
250 return 0;
251
252 command = readl(bus->base + ASPEED_I2C_CMD_REG);
253
254 /* Slave was requested, restart state machine. */
255 if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
256 irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
257 bus->slave_state = ASPEED_I2C_SLAVE_START;
258 }
259
260 /* Slave is not currently active, irq was for someone else. */
261 if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
262 return irq_handled;
263
264 dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
265 irq_status, command);
266
267 /* Slave was sent something. */
268 if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
269 value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
270 /* Handle address frame. */
271 if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
272 if (value & 0x1)
273 bus->slave_state =
274 ASPEED_I2C_SLAVE_READ_REQUESTED;
275 else
276 bus->slave_state =
277 ASPEED_I2C_SLAVE_WRITE_REQUESTED;
278 }
279 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
280 }
281
282 /* Slave was asked to stop. */
283 if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
284 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
285 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
286 }
287 if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
288 bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
289 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
290 bus->slave_state = ASPEED_I2C_SLAVE_STOP;
291 }
292
293 switch (bus->slave_state) {
294 case ASPEED_I2C_SLAVE_READ_REQUESTED:
295 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
296 dev_err(bus->dev, "Unexpected ACK on read request.\n");
297 bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED;
298 i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
299 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
300 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
301 break;
302 case ASPEED_I2C_SLAVE_READ_PROCESSED:
303 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
304 dev_err(bus->dev,
305 "Expected ACK after processed read.\n");
306 break;
307 }
308 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
309 i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
310 writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG);
311 writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG);
312 break;
313 case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
314 bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
315 ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
316 /*
317 * Slave ACK's on this address phase already but as the backend driver
318 * returns an errno, the bus driver should nack the next incoming byte.
319 */
320 if (ret < 0)
321 writel(ASPEED_I2CD_M_S_RX_CMD_LAST, bus->base + ASPEED_I2C_CMD_REG);
322 break;
323 case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
324 i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
325 break;
326 case ASPEED_I2C_SLAVE_STOP:
327 i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
328 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
329 break;
330 case ASPEED_I2C_SLAVE_START:
331 /* Slave was just started. Waiting for the next event. */;
332 break;
333 default:
334 dev_err(bus->dev, "unknown slave_state: %d\n",
335 bus->slave_state);
336 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
337 break;
338 }
339
340 return irq_handled;
341 }
342 #endif /* CONFIG_I2C_SLAVE */
343
344 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_start(struct aspeed_i2c_bus * bus)345 static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
346 {
347 u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
348 struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
349 u8 slave_addr = i2c_8bit_addr_from_msg(msg);
350
351 #if IS_ENABLED(CONFIG_I2C_SLAVE)
352 /*
353 * If it's requested in the middle of a slave session, set the master
354 * state to 'pending' then H/W will continue handling this master
355 * command when the bus comes back to the idle state.
356 */
357 if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
358 bus->master_state = ASPEED_I2C_MASTER_PENDING;
359 return;
360 }
361 #endif /* CONFIG_I2C_SLAVE */
362
363 bus->master_state = ASPEED_I2C_MASTER_START;
364 bus->buf_index = 0;
365
366 if (msg->flags & I2C_M_RD) {
367 command |= ASPEED_I2CD_M_RX_CMD;
368 /* Need to let the hardware know to NACK after RX. */
369 if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
370 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
371 }
372
373 writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
374 writel(command, bus->base + ASPEED_I2C_CMD_REG);
375 }
376
377 /* precondition: bus.lock has been acquired. */
aspeed_i2c_do_stop(struct aspeed_i2c_bus * bus)378 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
379 {
380 bus->master_state = ASPEED_I2C_MASTER_STOP;
381 writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
382 }
383
384 /* precondition: bus.lock has been acquired. */
aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus * bus)385 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
386 {
387 if (bus->msgs_index + 1 < bus->msgs_count) {
388 bus->msgs_index++;
389 aspeed_i2c_do_start(bus);
390 } else {
391 aspeed_i2c_do_stop(bus);
392 }
393 }
394
aspeed_i2c_is_irq_error(u32 irq_status)395 static int aspeed_i2c_is_irq_error(u32 irq_status)
396 {
397 if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
398 return -EAGAIN;
399 if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT |
400 ASPEED_I2CD_INTR_SCL_TIMEOUT))
401 return -EBUSY;
402 if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL))
403 return -EPROTO;
404
405 return 0;
406 }
407
aspeed_i2c_master_irq(struct aspeed_i2c_bus * bus,u32 irq_status)408 static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
409 {
410 u32 irq_handled = 0, command = 0;
411 struct i2c_msg *msg;
412 u8 recv_byte;
413 int ret;
414
415 if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) {
416 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
417 irq_handled |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE;
418 goto out_complete;
419 }
420
421 /*
422 * We encountered an interrupt that reports an error: the hardware
423 * should clear the command queue effectively taking us back to the
424 * INACTIVE state.
425 */
426 ret = aspeed_i2c_is_irq_error(irq_status);
427 if (ret) {
428 dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
429 irq_status);
430 irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
431 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
432 bus->cmd_err = ret;
433 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
434 goto out_complete;
435 }
436 }
437
438 /* Master is not currently active, irq was for someone else. */
439 if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
440 bus->master_state == ASPEED_I2C_MASTER_PENDING)
441 goto out_no_complete;
442
443 /* We are in an invalid state; reset bus to a known state. */
444 if (!bus->msgs) {
445 dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
446 irq_status);
447 bus->cmd_err = -EIO;
448 if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
449 bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
450 aspeed_i2c_do_stop(bus);
451 goto out_no_complete;
452 }
453 msg = &bus->msgs[bus->msgs_index];
454
455 /*
456 * START is a special case because we still have to handle a subsequent
457 * TX or RX immediately after we handle it, so we handle it here and
458 * then update the state and handle the new state below.
459 */
460 if (bus->master_state == ASPEED_I2C_MASTER_START) {
461 #if IS_ENABLED(CONFIG_I2C_SLAVE)
462 /*
463 * If a peer master starts a xfer immediately after it queues a
464 * master command, clear the queued master command and change
465 * its state to 'pending'. To simplify handling of pending
466 * cases, it uses S/W solution instead of H/W command queue
467 * handling.
468 */
469 if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
470 writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
471 ~ASPEED_I2CD_MASTER_CMDS_MASK,
472 bus->base + ASPEED_I2C_CMD_REG);
473 bus->master_state = ASPEED_I2C_MASTER_PENDING;
474 dev_dbg(bus->dev,
475 "master goes pending due to a slave start\n");
476 goto out_no_complete;
477 }
478 #endif /* CONFIG_I2C_SLAVE */
479 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
480 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
481 bus->cmd_err = -ENXIO;
482 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
483 goto out_complete;
484 }
485 pr_devel("no slave present at %02x\n", msg->addr);
486 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
487 bus->cmd_err = -ENXIO;
488 aspeed_i2c_do_stop(bus);
489 goto out_no_complete;
490 }
491 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
492 if (msg->len == 0) { /* SMBUS_QUICK */
493 aspeed_i2c_do_stop(bus);
494 goto out_no_complete;
495 }
496 if (msg->flags & I2C_M_RD)
497 bus->master_state = ASPEED_I2C_MASTER_RX_FIRST;
498 else
499 bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
500 }
501
502 switch (bus->master_state) {
503 case ASPEED_I2C_MASTER_TX:
504 if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
505 dev_dbg(bus->dev, "slave NACKed TX\n");
506 irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
507 goto error_and_stop;
508 } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
509 dev_err(bus->dev, "slave failed to ACK TX\n");
510 goto error_and_stop;
511 }
512 irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
513 fallthrough;
514 case ASPEED_I2C_MASTER_TX_FIRST:
515 if (bus->buf_index < msg->len) {
516 bus->master_state = ASPEED_I2C_MASTER_TX;
517 writel(msg->buf[bus->buf_index++],
518 bus->base + ASPEED_I2C_BYTE_BUF_REG);
519 writel(ASPEED_I2CD_M_TX_CMD,
520 bus->base + ASPEED_I2C_CMD_REG);
521 } else {
522 aspeed_i2c_next_msg_or_stop(bus);
523 }
524 goto out_no_complete;
525 case ASPEED_I2C_MASTER_RX_FIRST:
526 /* RX may not have completed yet (only address cycle) */
527 if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE))
528 goto out_no_complete;
529 fallthrough;
530 case ASPEED_I2C_MASTER_RX:
531 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
532 dev_err(bus->dev, "master failed to RX\n");
533 goto error_and_stop;
534 }
535 irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
536
537 recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
538 msg->buf[bus->buf_index++] = recv_byte;
539
540 if (msg->flags & I2C_M_RECV_LEN) {
541 if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
542 bus->cmd_err = -EPROTO;
543 aspeed_i2c_do_stop(bus);
544 goto out_no_complete;
545 }
546 msg->len = recv_byte +
547 ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
548 msg->flags &= ~I2C_M_RECV_LEN;
549 }
550
551 if (bus->buf_index < msg->len) {
552 bus->master_state = ASPEED_I2C_MASTER_RX;
553 command = ASPEED_I2CD_M_RX_CMD;
554 if (bus->buf_index + 1 == msg->len)
555 command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
556 writel(command, bus->base + ASPEED_I2C_CMD_REG);
557 } else {
558 aspeed_i2c_next_msg_or_stop(bus);
559 }
560 goto out_no_complete;
561 case ASPEED_I2C_MASTER_STOP:
562 if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) {
563 dev_err(bus->dev,
564 "master failed to STOP. irq_status:0x%x\n",
565 irq_status);
566 bus->cmd_err = -EIO;
567 /* Do not STOP as we have already tried. */
568 } else {
569 irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
570 }
571
572 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
573 goto out_complete;
574 case ASPEED_I2C_MASTER_INACTIVE:
575 dev_err(bus->dev,
576 "master received interrupt 0x%08x, but is inactive\n",
577 irq_status);
578 bus->cmd_err = -EIO;
579 /* Do not STOP as we should be inactive. */
580 goto out_complete;
581 default:
582 WARN(1, "unknown master state\n");
583 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
584 bus->cmd_err = -EINVAL;
585 goto out_complete;
586 }
587 error_and_stop:
588 bus->cmd_err = -EIO;
589 aspeed_i2c_do_stop(bus);
590 goto out_no_complete;
591 out_complete:
592 bus->msgs = NULL;
593 if (bus->cmd_err)
594 bus->master_xfer_result = bus->cmd_err;
595 else
596 bus->master_xfer_result = bus->msgs_index + 1;
597 complete(&bus->cmd_complete);
598 out_no_complete:
599 return irq_handled;
600 }
601
aspeed_i2c_bus_irq(int irq,void * dev_id)602 static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
603 {
604 struct aspeed_i2c_bus *bus = dev_id;
605 u32 irq_received, irq_remaining, irq_handled;
606
607 spin_lock(&bus->lock);
608 irq_received = readl(bus->base + ASPEED_I2C_INTR_STS_REG);
609 /* Ack all interrupts except for Rx done */
610 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
611 bus->base + ASPEED_I2C_INTR_STS_REG);
612 readl(bus->base + ASPEED_I2C_INTR_STS_REG);
613 irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
614 irq_remaining = irq_received;
615
616 #if IS_ENABLED(CONFIG_I2C_SLAVE)
617 /*
618 * In most cases, interrupt bits will be set one by one, although
619 * multiple interrupt bits could be set at the same time. It's also
620 * possible that master interrupt bits could be set along with slave
621 * interrupt bits. Each case needs to be handled using corresponding
622 * handlers depending on the current state.
623 */
624 if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
625 bus->master_state != ASPEED_I2C_MASTER_PENDING) {
626 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
627 irq_remaining &= ~irq_handled;
628 if (irq_remaining)
629 irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
630 } else {
631 irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
632 irq_remaining &= ~irq_handled;
633 if (irq_remaining)
634 irq_handled |= aspeed_i2c_master_irq(bus,
635 irq_remaining);
636 }
637
638 /*
639 * Start a pending master command at here if a slave operation is
640 * completed.
641 */
642 if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
643 bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
644 aspeed_i2c_do_start(bus);
645 #else
646 irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
647 #endif /* CONFIG_I2C_SLAVE */
648
649 irq_remaining &= ~irq_handled;
650 if (irq_remaining)
651 dev_err(bus->dev,
652 "irq handled != irq. expected 0x%08x, but was 0x%08x\n",
653 irq_received, irq_handled);
654
655 /* Ack Rx done */
656 if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
657 writel(ASPEED_I2CD_INTR_RX_DONE,
658 bus->base + ASPEED_I2C_INTR_STS_REG);
659 readl(bus->base + ASPEED_I2C_INTR_STS_REG);
660 }
661 spin_unlock(&bus->lock);
662 return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
663 }
664
aspeed_i2c_master_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)665 static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
666 struct i2c_msg *msgs, int num)
667 {
668 struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
669 unsigned long time_left, flags;
670
671 spin_lock_irqsave(&bus->lock, flags);
672 bus->cmd_err = 0;
673
674 /* If bus is busy in a single master environment, attempt recovery. */
675 if (!bus->multi_master &&
676 (readl(bus->base + ASPEED_I2C_CMD_REG) &
677 ASPEED_I2CD_BUS_BUSY_STS)) {
678 int ret;
679
680 spin_unlock_irqrestore(&bus->lock, flags);
681 ret = aspeed_i2c_recover_bus(bus);
682 if (ret)
683 return ret;
684 spin_lock_irqsave(&bus->lock, flags);
685 }
686
687 bus->cmd_err = 0;
688 bus->msgs = msgs;
689 bus->msgs_index = 0;
690 bus->msgs_count = num;
691
692 reinit_completion(&bus->cmd_complete);
693 aspeed_i2c_do_start(bus);
694 spin_unlock_irqrestore(&bus->lock, flags);
695
696 time_left = wait_for_completion_timeout(&bus->cmd_complete,
697 bus->adap.timeout);
698
699 if (time_left == 0) {
700 /*
701 * In a multi-master setup, if a timeout occurs, attempt
702 * recovery. But if the bus is idle, we still need to reset the
703 * i2c controller to clear the remaining interrupts.
704 */
705 if (bus->multi_master &&
706 (readl(bus->base + ASPEED_I2C_CMD_REG) &
707 ASPEED_I2CD_BUS_BUSY_STS))
708 aspeed_i2c_recover_bus(bus);
709 else
710 aspeed_i2c_reset(bus);
711
712 /*
713 * If timed out and the state is still pending, drop the pending
714 * master command.
715 */
716 spin_lock_irqsave(&bus->lock, flags);
717 if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
718 bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
719 spin_unlock_irqrestore(&bus->lock, flags);
720
721 return -ETIMEDOUT;
722 }
723
724 return bus->master_xfer_result;
725 }
726
aspeed_i2c_functionality(struct i2c_adapter * adap)727 static u32 aspeed_i2c_functionality(struct i2c_adapter *adap)
728 {
729 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
730 }
731
732 #if IS_ENABLED(CONFIG_I2C_SLAVE)
733 /* precondition: bus.lock has been acquired. */
__aspeed_i2c_reg_slave(struct aspeed_i2c_bus * bus,u16 slave_addr)734 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
735 {
736 u32 addr_reg_val, func_ctrl_reg_val;
737
738 /*
739 * Set slave addr. Reserved bits can all safely be written with zeros
740 * on all of ast2[456]00, so zero everything else to ensure we only
741 * enable a single slave address (ast2500 has two, ast2600 has three,
742 * the enable bits for which are also in this register) so that we don't
743 * end up with additional phantom devices responding on the bus.
744 */
745 addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
746 writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
747
748 /* Turn on slave mode. */
749 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
750 func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
751 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
752
753 bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
754 }
755
aspeed_i2c_reg_slave(struct i2c_client * client)756 static int aspeed_i2c_reg_slave(struct i2c_client *client)
757 {
758 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
759 unsigned long flags;
760
761 spin_lock_irqsave(&bus->lock, flags);
762 if (bus->slave) {
763 spin_unlock_irqrestore(&bus->lock, flags);
764 return -EINVAL;
765 }
766
767 __aspeed_i2c_reg_slave(bus, client->addr);
768
769 bus->slave = client;
770 spin_unlock_irqrestore(&bus->lock, flags);
771
772 return 0;
773 }
774
aspeed_i2c_unreg_slave(struct i2c_client * client)775 static int aspeed_i2c_unreg_slave(struct i2c_client *client)
776 {
777 struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter);
778 u32 func_ctrl_reg_val;
779 unsigned long flags;
780
781 spin_lock_irqsave(&bus->lock, flags);
782 if (!bus->slave) {
783 spin_unlock_irqrestore(&bus->lock, flags);
784 return -EINVAL;
785 }
786
787 /* Turn off slave mode. */
788 func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
789 func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
790 writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
791
792 bus->slave = NULL;
793 spin_unlock_irqrestore(&bus->lock, flags);
794
795 return 0;
796 }
797 #endif /* CONFIG_I2C_SLAVE */
798
799 static const struct i2c_algorithm aspeed_i2c_algo = {
800 .master_xfer = aspeed_i2c_master_xfer,
801 .functionality = aspeed_i2c_functionality,
802 #if IS_ENABLED(CONFIG_I2C_SLAVE)
803 .reg_slave = aspeed_i2c_reg_slave,
804 .unreg_slave = aspeed_i2c_unreg_slave,
805 #endif /* CONFIG_I2C_SLAVE */
806 };
807
aspeed_i2c_get_clk_reg_val(struct device * dev,u32 clk_high_low_mask,u32 divisor)808 static u32 aspeed_i2c_get_clk_reg_val(struct device *dev,
809 u32 clk_high_low_mask,
810 u32 divisor)
811 {
812 u32 base_clk_divisor, clk_high_low_max, clk_high, clk_low, tmp;
813
814 /*
815 * SCL_high and SCL_low represent a value 1 greater than what is stored
816 * since a zero divider is meaningless. Thus, the max value each can
817 * store is every bit set + 1. Since SCL_high and SCL_low are added
818 * together (see below), the max value of both is the max value of one
819 * them times two.
820 */
821 clk_high_low_max = (clk_high_low_mask + 1) * 2;
822
823 /*
824 * The actual clock frequency of SCL is:
825 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
826 * = APB_freq / divisor
827 * where base_freq is a programmable clock divider; its value is
828 * base_freq = 1 << base_clk_divisor
829 * SCL_high is the number of base_freq clock cycles that SCL stays high
830 * and SCL_low is the number of base_freq clock cycles that SCL stays
831 * low for a period of SCL.
832 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
833 * thus, they start counting at zero. So
834 * SCL_high = clk_high + 1
835 * SCL_low = clk_low + 1
836 * Thus,
837 * SCL_freq = APB_freq /
838 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
839 * The documentation recommends clk_high >= clk_high_max / 2 and
840 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
841 * gives us the following solution:
842 */
843 base_clk_divisor = divisor > clk_high_low_max ?
844 ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
845
846 if (base_clk_divisor > ASPEED_I2CD_TIME_BASE_DIVISOR_MASK) {
847 base_clk_divisor = ASPEED_I2CD_TIME_BASE_DIVISOR_MASK;
848 clk_low = clk_high_low_mask;
849 clk_high = clk_high_low_mask;
850 dev_err(dev,
851 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
852 divisor, (1 << base_clk_divisor) * clk_high_low_max);
853 } else {
854 tmp = (divisor + (1 << base_clk_divisor) - 1)
855 >> base_clk_divisor;
856 clk_low = tmp / 2;
857 clk_high = tmp - clk_low;
858
859 if (clk_high)
860 clk_high--;
861
862 if (clk_low)
863 clk_low--;
864 }
865
866
867 return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT)
868 & ASPEED_I2CD_TIME_SCL_HIGH_MASK)
869 | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT)
870 & ASPEED_I2CD_TIME_SCL_LOW_MASK)
871 | (base_clk_divisor
872 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK);
873 }
874
aspeed_i2c_24xx_get_clk_reg_val(struct device * dev,u32 divisor)875 static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
876 {
877 /*
878 * clk_high and clk_low are each 3 bits wide, so each can hold a max
879 * value of 8 giving a clk_high_low_max of 16.
880 */
881 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
882 }
883
aspeed_i2c_25xx_get_clk_reg_val(struct device * dev,u32 divisor)884 static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
885 {
886 /*
887 * clk_high and clk_low are each 4 bits wide, so each can hold a max
888 * value of 16 giving a clk_high_low_max of 32.
889 */
890 return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
891 }
892
893 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init_clk(struct aspeed_i2c_bus * bus)894 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
895 {
896 u32 divisor, clk_reg_val;
897
898 divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
899 clk_reg_val = readl(bus->base + ASPEED_I2C_AC_TIMING_REG1);
900 clk_reg_val &= (ASPEED_I2CD_TIME_TBUF_MASK |
901 ASPEED_I2CD_TIME_THDSTA_MASK |
902 ASPEED_I2CD_TIME_TACST_MASK);
903 clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
904 writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
905 writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
906
907 return 0;
908 }
909
910 /* precondition: bus.lock has been acquired. */
aspeed_i2c_init(struct aspeed_i2c_bus * bus,struct platform_device * pdev)911 static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
912 struct platform_device *pdev)
913 {
914 u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN;
915 int ret;
916
917 /* Disable everything. */
918 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
919
920 ret = aspeed_i2c_init_clk(bus);
921 if (ret < 0)
922 return ret;
923
924 if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
925 bus->multi_master = true;
926 else
927 fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS;
928
929 /* Enable Master Mode */
930 writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg,
931 bus->base + ASPEED_I2C_FUN_CTRL_REG);
932
933 #if IS_ENABLED(CONFIG_I2C_SLAVE)
934 /* If slave has already been registered, re-enable it. */
935 if (bus->slave)
936 __aspeed_i2c_reg_slave(bus, bus->slave->addr);
937 #endif /* CONFIG_I2C_SLAVE */
938
939 /* Set interrupt generation of I2C controller */
940 writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
941
942 return 0;
943 }
944
aspeed_i2c_reset(struct aspeed_i2c_bus * bus)945 static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
946 {
947 struct platform_device *pdev = to_platform_device(bus->dev);
948 unsigned long flags;
949 int ret;
950
951 spin_lock_irqsave(&bus->lock, flags);
952
953 /* Disable and ack all interrupts. */
954 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
955 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
956
957 ret = aspeed_i2c_init(bus, pdev);
958
959 spin_unlock_irqrestore(&bus->lock, flags);
960
961 return ret;
962 }
963
964 static const struct of_device_id aspeed_i2c_bus_of_table[] = {
965 {
966 .compatible = "aspeed,ast2400-i2c-bus",
967 .data = aspeed_i2c_24xx_get_clk_reg_val,
968 },
969 {
970 .compatible = "aspeed,ast2500-i2c-bus",
971 .data = aspeed_i2c_25xx_get_clk_reg_val,
972 },
973 {
974 .compatible = "aspeed,ast2600-i2c-bus",
975 .data = aspeed_i2c_25xx_get_clk_reg_val,
976 },
977 { },
978 };
979 MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
980
aspeed_i2c_probe_bus(struct platform_device * pdev)981 static int aspeed_i2c_probe_bus(struct platform_device *pdev)
982 {
983 const struct of_device_id *match;
984 struct aspeed_i2c_bus *bus;
985 struct clk *parent_clk;
986 int irq, ret;
987
988 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
989 if (!bus)
990 return -ENOMEM;
991
992 bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
993 if (IS_ERR(bus->base))
994 return PTR_ERR(bus->base);
995
996 parent_clk = devm_clk_get(&pdev->dev, NULL);
997 if (IS_ERR(parent_clk))
998 return PTR_ERR(parent_clk);
999 bus->parent_clk_frequency = clk_get_rate(parent_clk);
1000 /* We just need the clock rate, we don't actually use the clk object. */
1001 devm_clk_put(&pdev->dev, parent_clk);
1002
1003 bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
1004 if (IS_ERR(bus->rst)) {
1005 dev_err(&pdev->dev,
1006 "missing or invalid reset controller device tree entry\n");
1007 return PTR_ERR(bus->rst);
1008 }
1009 reset_control_deassert(bus->rst);
1010
1011 ret = of_property_read_u32(pdev->dev.of_node,
1012 "bus-frequency", &bus->bus_frequency);
1013 if (ret < 0) {
1014 dev_err(&pdev->dev,
1015 "Could not read bus-frequency property\n");
1016 bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
1017 }
1018
1019 match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node);
1020 if (!match)
1021 bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1022 else
1023 bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
1024 match->data;
1025
1026 /* Initialize the I2C adapter */
1027 spin_lock_init(&bus->lock);
1028 init_completion(&bus->cmd_complete);
1029 bus->adap.owner = THIS_MODULE;
1030 bus->adap.retries = 0;
1031 bus->adap.algo = &aspeed_i2c_algo;
1032 bus->adap.dev.parent = &pdev->dev;
1033 bus->adap.dev.of_node = pdev->dev.of_node;
1034 strscpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
1035 i2c_set_adapdata(&bus->adap, bus);
1036
1037 bus->dev = &pdev->dev;
1038
1039 /* Clean up any left over interrupt state. */
1040 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1041 writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
1042 /*
1043 * bus.lock does not need to be held because the interrupt handler has
1044 * not been enabled yet.
1045 */
1046 ret = aspeed_i2c_init(bus, pdev);
1047 if (ret < 0)
1048 return ret;
1049
1050 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1051 ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
1052 0, dev_name(&pdev->dev), bus);
1053 if (ret < 0)
1054 return ret;
1055
1056 ret = i2c_add_adapter(&bus->adap);
1057 if (ret < 0)
1058 return ret;
1059
1060 platform_set_drvdata(pdev, bus);
1061
1062 dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
1063 bus->adap.nr, irq);
1064
1065 return 0;
1066 }
1067
aspeed_i2c_remove_bus(struct platform_device * pdev)1068 static void aspeed_i2c_remove_bus(struct platform_device *pdev)
1069 {
1070 struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev);
1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&bus->lock, flags);
1074
1075 /* Disable everything. */
1076 writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
1077 writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
1078
1079 spin_unlock_irqrestore(&bus->lock, flags);
1080
1081 reset_control_assert(bus->rst);
1082
1083 i2c_del_adapter(&bus->adap);
1084 }
1085
1086 static struct platform_driver aspeed_i2c_bus_driver = {
1087 .probe = aspeed_i2c_probe_bus,
1088 .remove_new = aspeed_i2c_remove_bus,
1089 .driver = {
1090 .name = "aspeed-i2c-bus",
1091 .of_match_table = aspeed_i2c_bus_of_table,
1092 },
1093 };
1094 module_platform_driver(aspeed_i2c_bus_driver);
1095
1096 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1097 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1098 MODULE_LICENSE("GPL v2");
1099