1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMx3 Wkup M3 IPC driver
4 *
5 * Copyright (C) 2015 Texas Instruments, Inc.
6 *
7 * Dave Gerlach <d-gerlach@ti.com>
8 */
9
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/firmware.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/omap-mailbox.h>
20 #include <linux/platform_device.h>
21 #include <linux/remoteproc.h>
22 #include <linux/suspend.h>
23 #include <linux/wkup_m3_ipc.h>
24
25 #define AM33XX_CTRL_IPC_REG_COUNT 0x8
26 #define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
27
28 /* AM33XX M3_TXEV_EOI register */
29 #define AM33XX_CONTROL_M3_TXEV_EOI 0x00
30
31 #define AM33XX_M3_TXEV_ACK (0x1 << 0)
32 #define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
33
34 #define IPC_CMD_DS0 0x4
35 #define IPC_CMD_STANDBY 0xc
36 #define IPC_CMD_IDLE 0x10
37 #define IPC_CMD_RESET 0xe
38 #define DS_IPC_DEFAULT 0xffffffff
39 #define M3_VERSION_UNKNOWN 0x0000ffff
40 #define M3_BASELINE_VERSION 0x191
41 #define M3_STATUS_RESP_MASK (0xffff << 16)
42 #define M3_FW_VERSION_MASK 0xffff
43 #define M3_WAKE_SRC_MASK 0xff
44
45 #define IPC_MEM_TYPE_SHIFT (0x0)
46 #define IPC_MEM_TYPE_MASK (0x7 << 0)
47 #define IPC_VTT_STAT_SHIFT (0x3)
48 #define IPC_VTT_STAT_MASK (0x1 << 3)
49 #define IPC_VTT_GPIO_PIN_SHIFT (0x4)
50 #define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
51 #define IPC_IO_ISOLATION_STAT_SHIFT (10)
52 #define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
53
54 #define IPC_DBG_HALT_SHIFT (11)
55 #define IPC_DBG_HALT_MASK (0x1 << 11)
56
57 #define M3_STATE_UNKNOWN 0
58 #define M3_STATE_RESET 1
59 #define M3_STATE_INITED 2
60 #define M3_STATE_MSG_FOR_LP 3
61 #define M3_STATE_MSG_FOR_RESET 4
62
63 #define WKUP_M3_SD_FW_MAGIC 0x570C
64
65 #define WKUP_M3_DMEM_START 0x80000
66 #define WKUP_M3_AUXDATA_OFFSET 0x1000
67 #define WKUP_M3_AUXDATA_SIZE 0xFF
68
69 static struct wkup_m3_ipc *m3_ipc_state;
70
71 static const struct wkup_m3_wakeup_src wakeups[] = {
72 {.irq_nr = 16, .src = "PRCM"},
73 {.irq_nr = 35, .src = "USB0_PHY"},
74 {.irq_nr = 36, .src = "USB1_PHY"},
75 {.irq_nr = 40, .src = "I2C0"},
76 {.irq_nr = 41, .src = "RTC Timer"},
77 {.irq_nr = 42, .src = "RTC Alarm"},
78 {.irq_nr = 43, .src = "Timer0"},
79 {.irq_nr = 44, .src = "Timer1"},
80 {.irq_nr = 45, .src = "UART"},
81 {.irq_nr = 46, .src = "GPIO0"},
82 {.irq_nr = 48, .src = "MPU_WAKE"},
83 {.irq_nr = 49, .src = "WDT0"},
84 {.irq_nr = 50, .src = "WDT1"},
85 {.irq_nr = 51, .src = "ADC_TSC"},
86 {.irq_nr = 0, .src = "Unknown"},
87 };
88
89 /**
90 * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
91 * @data - pointer to data
92 * @sz - size of data to copy (limit 256 bytes)
93 *
94 * Copies any additional blob of data to the wkup_m3 dmem to be used by the
95 * firmware
96 */
wkup_m3_copy_aux_data(struct wkup_m3_ipc * m3_ipc,const void * data,int sz)97 static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
98 const void *data, int sz)
99 {
100 unsigned long aux_data_dev_addr;
101 void *aux_data_addr;
102
103 aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
104 aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
105 aux_data_dev_addr,
106 WKUP_M3_AUXDATA_SIZE,
107 NULL);
108 memcpy(aux_data_addr, data, sz);
109
110 return WKUP_M3_AUXDATA_OFFSET;
111 }
112
wkup_m3_scale_data_fw_cb(const struct firmware * fw,void * context)113 static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
114 {
115 unsigned long val, aux_base;
116 struct wkup_m3_scale_data_header hdr;
117 struct wkup_m3_ipc *m3_ipc = context;
118 struct device *dev = m3_ipc->dev;
119
120 if (!fw) {
121 dev_err(dev, "Voltage scale fw name given but file missing.\n");
122 return;
123 }
124
125 memcpy(&hdr, fw->data, sizeof(hdr));
126
127 if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
128 dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
129 goto release_sd_fw;
130 }
131
132 aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
133 fw->size - sizeof(hdr));
134
135 val = (aux_base + hdr.sleep_offset);
136 val |= ((aux_base + hdr.wake_offset) << 16);
137
138 m3_ipc->volt_scale_offsets = val;
139
140 release_sd_fw:
141 release_firmware(fw);
142 };
143
wkup_m3_init_scale_data(struct wkup_m3_ipc * m3_ipc,struct device * dev)144 static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
145 struct device *dev)
146 {
147 int ret = 0;
148
149 /*
150 * If no name is provided, user has already been warned, pm will
151 * still work so return 0
152 */
153
154 if (!m3_ipc->sd_fw_name)
155 return ret;
156
157 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
158 m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
159 m3_ipc, wkup_m3_scale_data_fw_cb);
160
161 return ret;
162 }
163
164 #ifdef CONFIG_DEBUG_FS
wkup_m3_set_halt_late(bool enabled)165 static void wkup_m3_set_halt_late(bool enabled)
166 {
167 if (enabled)
168 m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
169 else
170 m3_ipc_state->halt = 0;
171 }
172
option_get(void * data,u64 * val)173 static int option_get(void *data, u64 *val)
174 {
175 u32 *option = data;
176
177 *val = *option;
178
179 return 0;
180 }
181
option_set(void * data,u64 val)182 static int option_set(void *data, u64 val)
183 {
184 u32 *option = data;
185
186 *option = val;
187
188 if (option == &m3_ipc_state->halt) {
189 if (val)
190 wkup_m3_set_halt_late(true);
191 else
192 wkup_m3_set_halt_late(false);
193 }
194
195 return 0;
196 }
197
198 DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
199 "%llu\n");
200
wkup_m3_ipc_dbg_init(struct wkup_m3_ipc * m3_ipc)201 static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
202 {
203 m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
204
205 if (IS_ERR(m3_ipc->dbg_path))
206 return -EINVAL;
207
208 (void)debugfs_create_file("enable_late_halt", 0644,
209 m3_ipc->dbg_path,
210 &m3_ipc->halt,
211 &wkup_m3_ipc_option_fops);
212
213 return 0;
214 }
215
wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc * m3_ipc)216 static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
217 {
218 debugfs_remove_recursive(m3_ipc->dbg_path);
219 }
220 #else
wkup_m3_ipc_dbg_init(struct wkup_m3_ipc * m3_ipc)221 static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
222 {
223 return 0;
224 }
225
wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc * m3_ipc)226 static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
227 {
228 }
229 #endif /* CONFIG_DEBUG_FS */
230
am33xx_txev_eoi(struct wkup_m3_ipc * m3_ipc)231 static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
232 {
233 writel(AM33XX_M3_TXEV_ACK,
234 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
235 }
236
am33xx_txev_enable(struct wkup_m3_ipc * m3_ipc)237 static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
238 {
239 writel(AM33XX_M3_TXEV_ENABLE,
240 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
241 }
242
wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc * m3_ipc,u32 val,int ipc_reg_num)243 static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
244 u32 val, int ipc_reg_num)
245 {
246 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
247 "ipc register operation out of range"))
248 return;
249
250 writel(val, m3_ipc->ipc_mem_base +
251 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
252 }
253
wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc * m3_ipc,int ipc_reg_num)254 static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
255 int ipc_reg_num)
256 {
257 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
258 "ipc register operation out of range"))
259 return 0;
260
261 return readl(m3_ipc->ipc_mem_base +
262 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
263 }
264
wkup_m3_fw_version_read(struct wkup_m3_ipc * m3_ipc)265 static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
266 {
267 int val;
268
269 val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
270
271 return val & M3_FW_VERSION_MASK;
272 }
273
wkup_m3_txev_handler(int irq,void * ipc_data)274 static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
275 {
276 struct wkup_m3_ipc *m3_ipc = ipc_data;
277 struct device *dev = m3_ipc->dev;
278 int ver = 0;
279
280 am33xx_txev_eoi(m3_ipc);
281
282 switch (m3_ipc->state) {
283 case M3_STATE_RESET:
284 ver = wkup_m3_fw_version_read(m3_ipc);
285
286 if (ver == M3_VERSION_UNKNOWN ||
287 ver < M3_BASELINE_VERSION) {
288 dev_warn(dev, "CM3 Firmware Version %x not supported\n",
289 ver);
290 } else {
291 dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
292 }
293
294 m3_ipc->state = M3_STATE_INITED;
295 wkup_m3_init_scale_data(m3_ipc, dev);
296 complete(&m3_ipc->sync_complete);
297 break;
298 case M3_STATE_MSG_FOR_RESET:
299 m3_ipc->state = M3_STATE_INITED;
300 complete(&m3_ipc->sync_complete);
301 break;
302 case M3_STATE_MSG_FOR_LP:
303 complete(&m3_ipc->sync_complete);
304 break;
305 case M3_STATE_UNKNOWN:
306 dev_warn(dev, "Unknown CM3 State\n");
307 }
308
309 am33xx_txev_enable(m3_ipc);
310
311 return IRQ_HANDLED;
312 }
313
wkup_m3_ping(struct wkup_m3_ipc * m3_ipc)314 static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
315 {
316 struct device *dev = m3_ipc->dev;
317 mbox_msg_t dummy_msg = 0;
318 int ret;
319
320 if (!m3_ipc->mbox) {
321 dev_err(dev,
322 "No IPC channel to communicate with wkup_m3!\n");
323 return -EIO;
324 }
325
326 /*
327 * Write a dummy message to the mailbox in order to trigger the RX
328 * interrupt to alert the M3 that data is available in the IPC
329 * registers. We must enable the IRQ here and disable it after in
330 * the RX callback to avoid multiple interrupts being received
331 * by the CM3.
332 */
333 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
334 if (ret < 0) {
335 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
336 __func__, ret);
337 return ret;
338 }
339
340 ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
341 msecs_to_jiffies(500));
342 if (!ret) {
343 dev_err(dev, "MPU<->CM3 sync failure\n");
344 m3_ipc->state = M3_STATE_UNKNOWN;
345 return -EIO;
346 }
347
348 mbox_client_txdone(m3_ipc->mbox, 0);
349 return 0;
350 }
351
wkup_m3_ping_noirq(struct wkup_m3_ipc * m3_ipc)352 static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
353 {
354 struct device *dev = m3_ipc->dev;
355 mbox_msg_t dummy_msg = 0;
356 int ret;
357
358 if (!m3_ipc->mbox) {
359 dev_err(dev,
360 "No IPC channel to communicate with wkup_m3!\n");
361 return -EIO;
362 }
363
364 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
365 if (ret < 0) {
366 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
367 __func__, ret);
368 return ret;
369 }
370
371 mbox_client_txdone(m3_ipc->mbox, 0);
372 return 0;
373 }
374
wkup_m3_is_available(struct wkup_m3_ipc * m3_ipc)375 static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
376 {
377 return ((m3_ipc->state != M3_STATE_RESET) &&
378 (m3_ipc->state != M3_STATE_UNKNOWN));
379 }
380
wkup_m3_set_vtt_gpio(struct wkup_m3_ipc * m3_ipc,int gpio)381 static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
382 {
383 m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
384 (gpio << IPC_VTT_GPIO_PIN_SHIFT);
385 }
386
wkup_m3_set_io_isolation(struct wkup_m3_ipc * m3_ipc)387 static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
388 {
389 m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
390 }
391
392 /* Public functions */
393 /**
394 * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
395 * @m3_ipc: Pointer to wkup_m3_ipc context
396 * @mem_type: memory type value read directly from emif
397 *
398 * wkup_m3 must know what memory type is in use to properly suspend
399 * and resume.
400 */
wkup_m3_set_mem_type(struct wkup_m3_ipc * m3_ipc,int mem_type)401 static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
402 {
403 m3_ipc->mem_type = mem_type;
404 }
405
406 /**
407 * wkup_m3_set_resume_address - Pass wkup_m3 resume address
408 * @m3_ipc: Pointer to wkup_m3_ipc context
409 * @addr: Physical address from which resume code should execute
410 */
wkup_m3_set_resume_address(struct wkup_m3_ipc * m3_ipc,void * addr)411 static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
412 {
413 m3_ipc->resume_addr = (unsigned long)addr;
414 }
415
416 /**
417 * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
418 * @m3_ipc: Pointer to wkup_m3_ipc context
419 *
420 * Returns code representing the status of a low power mode transition.
421 * 0 - Successful transition
422 * 1 - Failure to transition to low power state
423 */
wkup_m3_request_pm_status(struct wkup_m3_ipc * m3_ipc)424 static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
425 {
426 unsigned int i;
427 int val;
428
429 val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
430
431 i = M3_STATUS_RESP_MASK & val;
432 i >>= __ffs(M3_STATUS_RESP_MASK);
433
434 return i;
435 }
436
437 /**
438 * wkup_m3_prepare_low_power - Request preparation for transition to
439 * low power state
440 * @m3_ipc: Pointer to wkup_m3_ipc context
441 * @state: A kernel suspend state to enter, either MEM or STANDBY
442 *
443 * Returns 0 if preparation was successful, otherwise returns error code
444 */
wkup_m3_prepare_low_power(struct wkup_m3_ipc * m3_ipc,int state)445 static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
446 {
447 struct device *dev = m3_ipc->dev;
448 int m3_power_state;
449 int ret = 0;
450
451 if (!wkup_m3_is_available(m3_ipc))
452 return -ENODEV;
453
454 switch (state) {
455 case WKUP_M3_DEEPSLEEP:
456 m3_power_state = IPC_CMD_DS0;
457 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
458 break;
459 case WKUP_M3_STANDBY:
460 m3_power_state = IPC_CMD_STANDBY;
461 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
462 break;
463 case WKUP_M3_IDLE:
464 m3_power_state = IPC_CMD_IDLE;
465 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
466 break;
467 default:
468 return 1;
469 }
470
471 /* Program each required IPC register then write defaults to others */
472 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
473 wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
474 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
475 m3_ipc->vtt_conf |
476 m3_ipc->isolation_conf |
477 m3_ipc->halt, 4);
478
479 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
480 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
481 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
482 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
483
484 m3_ipc->state = M3_STATE_MSG_FOR_LP;
485
486 if (state == WKUP_M3_IDLE)
487 ret = wkup_m3_ping_noirq(m3_ipc);
488 else
489 ret = wkup_m3_ping(m3_ipc);
490
491 if (ret) {
492 dev_err(dev, "Unable to ping CM3\n");
493 return ret;
494 }
495
496 return 0;
497 }
498
499 /**
500 * wkup_m3_finish_low_power - Return m3 to reset state
501 * @m3_ipc: Pointer to wkup_m3_ipc context
502 *
503 * Returns 0 if reset was successful, otherwise returns error code
504 */
wkup_m3_finish_low_power(struct wkup_m3_ipc * m3_ipc)505 static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
506 {
507 struct device *dev = m3_ipc->dev;
508 int ret = 0;
509
510 if (!wkup_m3_is_available(m3_ipc))
511 return -ENODEV;
512
513 wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
514 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
515
516 m3_ipc->state = M3_STATE_MSG_FOR_RESET;
517
518 ret = wkup_m3_ping(m3_ipc);
519 if (ret) {
520 dev_err(dev, "Unable to ping CM3\n");
521 return ret;
522 }
523
524 return 0;
525 }
526
527 /**
528 * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
529 * @m3_ipc: Pointer to wkup_m3_ipc context
530 */
wkup_m3_request_wake_src(struct wkup_m3_ipc * m3_ipc)531 static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
532 {
533 unsigned int wakeup_src_idx;
534 int j, val;
535
536 val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
537
538 wakeup_src_idx = val & M3_WAKE_SRC_MASK;
539
540 for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
541 if (wakeups[j].irq_nr == wakeup_src_idx)
542 return wakeups[j].src;
543 }
544 return wakeups[j].src;
545 }
546
547 /**
548 * wkup_m3_set_rtc_only - Set the rtc_only flag
549 * @m3_ipc: Pointer to wkup_m3_ipc context
550 */
wkup_m3_set_rtc_only(struct wkup_m3_ipc * m3_ipc)551 static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
552 {
553 if (m3_ipc_state)
554 m3_ipc_state->is_rtc_only = true;
555 }
556
557 static struct wkup_m3_ipc_ops ipc_ops = {
558 .set_mem_type = wkup_m3_set_mem_type,
559 .set_resume_address = wkup_m3_set_resume_address,
560 .prepare_low_power = wkup_m3_prepare_low_power,
561 .finish_low_power = wkup_m3_finish_low_power,
562 .request_pm_status = wkup_m3_request_pm_status,
563 .request_wake_src = wkup_m3_request_wake_src,
564 .set_rtc_only = wkup_m3_set_rtc_only,
565 };
566
567 /**
568 * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
569 *
570 * Returns NULL if the wkup_m3 is not yet available, otherwise returns
571 * pointer to wkup_m3_ipc struct.
572 */
wkup_m3_ipc_get(void)573 struct wkup_m3_ipc *wkup_m3_ipc_get(void)
574 {
575 if (m3_ipc_state)
576 get_device(m3_ipc_state->dev);
577 else
578 return NULL;
579
580 return m3_ipc_state;
581 }
582 EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
583
584 /**
585 * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
586 * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
587 */
wkup_m3_ipc_put(struct wkup_m3_ipc * m3_ipc)588 void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
589 {
590 if (m3_ipc_state)
591 put_device(m3_ipc_state->dev);
592 }
593 EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
594
wkup_m3_rproc_boot_thread(void * arg)595 static int wkup_m3_rproc_boot_thread(void *arg)
596 {
597 struct wkup_m3_ipc *m3_ipc = arg;
598 struct device *dev = m3_ipc->dev;
599 int ret;
600
601 init_completion(&m3_ipc->sync_complete);
602
603 ret = rproc_boot(m3_ipc->rproc);
604 if (ret)
605 dev_err(dev, "rproc_boot failed\n");
606 else
607 m3_ipc_state = m3_ipc;
608
609 return 0;
610 }
611
wkup_m3_ipc_probe(struct platform_device * pdev)612 static int wkup_m3_ipc_probe(struct platform_device *pdev)
613 {
614 struct device *dev = &pdev->dev;
615 int irq, ret, temp;
616 phandle rproc_phandle;
617 struct rproc *m3_rproc;
618 struct task_struct *task;
619 struct wkup_m3_ipc *m3_ipc;
620 struct device_node *np = dev->of_node;
621
622 m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
623 if (!m3_ipc)
624 return -ENOMEM;
625
626 m3_ipc->ipc_mem_base = devm_platform_ioremap_resource(pdev, 0);
627 if (IS_ERR(m3_ipc->ipc_mem_base))
628 return PTR_ERR(m3_ipc->ipc_mem_base);
629
630 irq = platform_get_irq(pdev, 0);
631 if (irq < 0)
632 return irq;
633
634 ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
635 0, "wkup_m3_txev", m3_ipc);
636 if (ret) {
637 dev_err(dev, "request_irq failed\n");
638 return ret;
639 }
640
641 m3_ipc->mbox_client.dev = dev;
642 m3_ipc->mbox_client.tx_done = NULL;
643 m3_ipc->mbox_client.tx_prepare = NULL;
644 m3_ipc->mbox_client.rx_callback = NULL;
645 m3_ipc->mbox_client.tx_block = false;
646 m3_ipc->mbox_client.knows_txdone = false;
647
648 m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
649
650 if (IS_ERR(m3_ipc->mbox)) {
651 dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
652 PTR_ERR(m3_ipc->mbox));
653 return PTR_ERR(m3_ipc->mbox);
654 }
655
656 if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
657 dev_err(&pdev->dev, "could not get rproc phandle\n");
658 ret = -ENODEV;
659 goto err_free_mbox;
660 }
661
662 m3_rproc = rproc_get_by_phandle(rproc_phandle);
663 if (!m3_rproc) {
664 dev_err(&pdev->dev, "could not get rproc handle\n");
665 ret = -EPROBE_DEFER;
666 goto err_free_mbox;
667 }
668
669 m3_ipc->rproc = m3_rproc;
670 m3_ipc->dev = dev;
671 m3_ipc->state = M3_STATE_RESET;
672
673 m3_ipc->ops = &ipc_ops;
674
675 if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
676 if (temp >= 0 && temp <= 31)
677 wkup_m3_set_vtt_gpio(m3_ipc, temp);
678 else
679 dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
680 }
681
682 if (of_property_read_bool(np, "ti,set-io-isolation"))
683 wkup_m3_set_io_isolation(m3_ipc);
684
685 ret = of_property_read_string(np, "firmware-name",
686 &m3_ipc->sd_fw_name);
687 if (ret) {
688 dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
689 }
690
691 /*
692 * Wait for firmware loading completion in a thread so we
693 * can boot the wkup_m3 as soon as it's ready without holding
694 * up kernel boot
695 */
696 task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc,
697 "wkup_m3_rproc_loader");
698
699 if (IS_ERR(task)) {
700 dev_err(dev, "can't create rproc_boot thread\n");
701 ret = PTR_ERR(task);
702 goto err_put_rproc;
703 }
704
705 wkup_m3_ipc_dbg_init(m3_ipc);
706
707 return 0;
708
709 err_put_rproc:
710 rproc_put(m3_rproc);
711 err_free_mbox:
712 mbox_free_channel(m3_ipc->mbox);
713 return ret;
714 }
715
wkup_m3_ipc_remove(struct platform_device * pdev)716 static int wkup_m3_ipc_remove(struct platform_device *pdev)
717 {
718 wkup_m3_ipc_dbg_destroy(m3_ipc_state);
719
720 mbox_free_channel(m3_ipc_state->mbox);
721
722 rproc_shutdown(m3_ipc_state->rproc);
723 rproc_put(m3_ipc_state->rproc);
724
725 m3_ipc_state = NULL;
726
727 return 0;
728 }
729
wkup_m3_ipc_suspend(struct device * dev)730 static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
731 {
732 /*
733 * Nothing needs to be done on suspend even with rtc_only flag set
734 */
735 return 0;
736 }
737
wkup_m3_ipc_resume(struct device * dev)738 static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
739 {
740 if (m3_ipc_state->is_rtc_only) {
741 rproc_shutdown(m3_ipc_state->rproc);
742 rproc_boot(m3_ipc_state->rproc);
743 }
744
745 m3_ipc_state->is_rtc_only = false;
746
747 return 0;
748 }
749
750 static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
751 SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
752 };
753
754 static const struct of_device_id wkup_m3_ipc_of_match[] = {
755 { .compatible = "ti,am3352-wkup-m3-ipc", },
756 { .compatible = "ti,am4372-wkup-m3-ipc", },
757 {},
758 };
759 MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
760
761 static struct platform_driver wkup_m3_ipc_driver = {
762 .probe = wkup_m3_ipc_probe,
763 .remove = wkup_m3_ipc_remove,
764 .driver = {
765 .name = "wkup_m3_ipc",
766 .of_match_table = wkup_m3_ipc_of_match,
767 .pm = &wkup_m3_ipc_pm_ops,
768 },
769 };
770
771 module_platform_driver(wkup_m3_ipc_driver);
772
773 MODULE_LICENSE("GPL v2");
774 MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
775 MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
776