Lines Matching refs:mhi_cntrl

19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,  in mhi_read_reg()  argument
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, in mhi_read_reg_field() argument
32 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); in mhi_read_reg_field()
41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, in mhi_poll_reg_field() argument
46 u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; in mhi_poll_reg_field()
49 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); in mhi_poll_reg_field()
62 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, in mhi_write_reg() argument
65 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
68 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, in mhi_write_reg_field() argument
75 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); in mhi_write_reg_field()
81 mhi_write_reg(mhi_cntrl, base, offset, tmp); in mhi_write_reg_field()
86 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, in mhi_write_db() argument
89 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); in mhi_write_db()
90 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); in mhi_write_db()
93 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, in mhi_db_brstmode() argument
100 mhi_write_db(mhi_cntrl, db_addr, db_val); in mhi_db_brstmode()
105 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, in mhi_db_brstmode_disable() argument
111 mhi_write_db(mhi_cntrl, db_addr, db_val); in mhi_db_brstmode_disable()
118 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
122 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) in mhi_ring_cmd_db() argument
129 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
132 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, in mhi_ring_chan_db() argument
147 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
151 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) in mhi_get_exec_env() argument
154 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
160 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) in mhi_get_mhi_state() argument
163 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
169 void mhi_soc_reset(struct mhi_controller *mhi_cntrl) in mhi_soc_reset() argument
171 if (mhi_cntrl->reset) { in mhi_soc_reset()
172 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
177 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
182 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, in mhi_map_single_no_bb() argument
185 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
188 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
194 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, in mhi_map_single_use_bb() argument
197 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
211 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, in mhi_unmap_single_no_bb() argument
214 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
218 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, in mhi_unmap_single_use_bb() argument
224 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
228 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, in get_nr_avail_ring_elements() argument
249 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, in mhi_add_ring_element() argument
259 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, in mhi_del_ring_element() argument
278 struct mhi_controller *mhi_cntrl; in mhi_destroy_device() local
285 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
323 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
336 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count() local
341 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); in mhi_get_free_desc_count()
360 void mhi_create_devices(struct mhi_controller *mhi_cntrl) in mhi_create_devices() argument
364 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
367 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
368 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
370 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
372 mhi_dev = mhi_alloc_device(mhi_cntrl); in mhi_create_devices()
397 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
416 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
432 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler() local
443 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
444 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
449 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
453 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
480 struct mhi_controller *mhi_cntrl = priv; in mhi_intvec_threaded_handler() local
481 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
486 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
487 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
488 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
492 state = mhi_get_mhi_state(mhi_cntrl); in mhi_intvec_threaded_handler()
493 ee = mhi_get_exec_env(mhi_cntrl); in mhi_intvec_threaded_handler()
495 TO_MHI_EXEC_STR(mhi_cntrl->ee), in mhi_intvec_threaded_handler()
496 mhi_state_str(mhi_cntrl->dev_state), in mhi_intvec_threaded_handler()
501 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_intvec_threaded_handler()
504 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
506 if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) in mhi_intvec_threaded_handler()
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
514 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
515 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
522 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
523 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
524 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_intvec_threaded_handler()
527 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
528 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_intvec_threaded_handler()
539 struct mhi_controller *mhi_cntrl = dev; in mhi_intvec_handler() local
542 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
547 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, in mhi_recycle_ev_ring_element() argument
567 static int parse_xfer_event(struct mhi_controller *mhi_cntrl, in parse_xfer_event() argument
572 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
610 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
641 mhi_del_ring_element(mhi_cntrl, buf_ring); in parse_xfer_event()
642 mhi_del_ring_element(mhi_cntrl, tre_ring); in parse_xfer_event()
649 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
651 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
679 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
681 MHI_DB_ACCESS_VALID(mhi_cntrl)) { in parse_xfer_event()
682 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in parse_xfer_event()
684 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
702 static int parse_rsc_event(struct mhi_controller *mhi_cntrl, in parse_rsc_event() argument
757 mhi_del_ring_element(mhi_cntrl, tre_ring); in parse_rsc_event()
766 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, in mhi_process_cmd_completion() argument
770 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
777 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
786 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
787 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
788 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
794 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
798 mhi_del_ring_element(mhi_cntrl, mhi_ring); in mhi_process_cmd_completion()
801 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, in mhi_process_ctrl_ev_ring() argument
808 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
810 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
820 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
824 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
840 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
841 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
846 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
848 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
862 mhi_pm_m0_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
865 mhi_pm_m1_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
868 mhi_pm_m3_transition(mhi_cntrl); in mhi_process_ctrl_ev_ring()
875 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
876 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_process_ctrl_ev_ring()
878 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
880 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_process_ctrl_ev_ring()
891 mhi_process_cmd_completion(mhi_cntrl, local_rp); in mhi_process_ctrl_ev_ring()
912 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
913 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
914 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
915 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
916 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
923 mhi_queue_state_transition(mhi_cntrl, st); in mhi_process_ctrl_ev_ring()
930 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
936 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
937 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
940 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_ctrl_ev_ring()
949 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); in mhi_process_ctrl_ev_ring()
954 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
963 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
964 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_process_ctrl_ev_ring()
966 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
971 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, in mhi_process_data_event_ring() argument
978 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
984 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
988 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1001 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1007 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1008 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1009 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1012 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_data_event_ring()
1015 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); in mhi_process_data_event_ring()
1020 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); in mhi_process_data_event_ring()
1025 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1033 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1034 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_process_data_event_ring()
1036 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1044 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task() local
1048 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task() local
1056 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1066 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1072 mhi_trigger_resume(mhi_cntrl); in mhi_ctrl_ev_task()
1078 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1085 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1086 state = mhi_get_mhi_state(mhi_cntrl); in mhi_ctrl_ev_task()
1089 pm_state = mhi_tryset_pm_state(mhi_cntrl, in mhi_ctrl_ev_task()
1092 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1094 mhi_pm_sys_err_handler(mhi_cntrl); in mhi_ctrl_ev_task()
1098 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, in mhi_is_ring_full() argument
1112 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue() local
1119 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1122 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1124 ret = mhi_is_ring_full(mhi_cntrl, tre_ring); in mhi_queue()
1130 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); in mhi_queue()
1138 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1141 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1144 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1146 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_queue()
1147 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in mhi_queue()
1150 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1153 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1195 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, in mhi_gen_tre() argument
1220 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1236 mhi_add_ring_element(mhi_cntrl, tre_ring); in mhi_gen_tre()
1237 mhi_add_ring_element(mhi_cntrl, buf_ring); in mhi_gen_tre()
1257 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full() local
1262 return mhi_is_ring_full(mhi_cntrl, tre_ring); in mhi_queue_is_full()
1266 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, in mhi_send_cmd() argument
1271 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1273 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1280 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { in mhi_send_cmd()
1309 mhi_add_ring_element(mhi_cntrl, ring); in mhi_send_cmd()
1310 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1311 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) in mhi_send_cmd()
1312 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); in mhi_send_cmd()
1313 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1319 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, in mhi_update_channel_state() argument
1364 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1367 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1370 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); in mhi_update_channel_state()
1378 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1400 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1401 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1406 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, in mhi_unprepare_channel() argument
1414 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1416 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1421 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, in mhi_unprepare_channel()
1433 mhi_reset_chan(mhi_cntrl, mhi_chan); in mhi_unprepare_channel()
1434 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_unprepare_channel()
1441 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, in mhi_prepare_channel() argument
1447 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1449 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1457 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1462 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, in mhi_prepare_channel()
1472 int nr_el = get_nr_avail_ring_elements(mhi_cntrl, in mhi_prepare_channel()
1474 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1490 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); in mhi_prepare_channel()
1497 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1498 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { in mhi_prepare_channel()
1500 mhi_ring_chan_db(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1503 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1512 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1521 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in mhi_prepare_channel()
1526 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, in mhi_mark_stale_events() argument
1534 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1547 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1569 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, in mhi_reset_data_chan() argument
1584 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1586 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1590 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1592 mhi_del_ring_element(mhi_cntrl, buf_ring); in mhi_reset_data_chan()
1593 mhi_del_ring_element(mhi_cntrl, tre_ring); in mhi_reset_data_chan()
1604 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) in mhi_reset_chan() argument
1614 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1615 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1616 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1618 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); in mhi_reset_chan()
1620 mhi_reset_data_chan(mhi_cntrl, mhi_chan); in mhi_reset_chan()
1622 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1628 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer() local
1636 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); in __mhi_prepare_for_transfer()
1649 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in __mhi_prepare_for_transfer()
1669 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer() local
1678 mhi_unprepare_channel(mhi_cntrl, mhi_chan); in mhi_unprepare_from_transfer()
1685 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_poll() local
1687 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_poll()
1691 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); in mhi_poll()