Lines Matching +full:cmd +full:- +full:db
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); in mhi_read_reg()
46 u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; in mhi_poll_reg_field()
48 while (retry--) { in mhi_poll_reg_field()
59 return -ETIMEDOUT; in mhi_poll_reg_field()
65 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); in mhi_write_reg()
98 if (db_cfg->db_mode) { in mhi_db_brstmode()
99 db_cfg->db_val = db_val; in mhi_db_brstmode()
101 db_cfg->db_mode = 0; in mhi_db_brstmode()
110 db_cfg->db_val = db_val; in mhi_db_brstmode_disable()
116 struct mhi_ring *ring = &mhi_event->ring; in mhi_ring_er_db()
118 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, in mhi_ring_er_db()
119 ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); in mhi_ring_er_db()
124 dma_addr_t db; in mhi_ring_cmd_db() local
125 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_ring_cmd_db()
127 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_cmd_db()
128 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_cmd_db()
129 mhi_write_db(mhi_cntrl, ring->db_addr, db); in mhi_ring_cmd_db()
135 struct mhi_ring *ring = &mhi_chan->tre_ring; in mhi_ring_chan_db()
136 dma_addr_t db; in mhi_ring_chan_db() local
138 db = ring->iommu_base + (ring->wp - ring->base); in mhi_ring_chan_db()
145 *ring->ctxt_wp = cpu_to_le64(db); in mhi_ring_chan_db()
147 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, in mhi_ring_chan_db()
148 ring->db_addr, db); in mhi_ring_chan_db()
154 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); in mhi_get_exec_env()
163 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, in mhi_get_mhi_state()
171 if (mhi_cntrl->reset) { in mhi_soc_reset()
172 mhi_cntrl->reset(mhi_cntrl); in mhi_soc_reset()
177 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, in mhi_soc_reset()
185 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, in mhi_map_single_no_bb()
186 buf_info->v_addr, buf_info->len, in mhi_map_single_no_bb()
187 buf_info->dir); in mhi_map_single_no_bb()
188 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) in mhi_map_single_no_bb()
189 return -ENOMEM; in mhi_map_single_no_bb()
197 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_map_single_use_bb()
198 &buf_info->p_addr, GFP_ATOMIC); in mhi_map_single_use_bb()
201 return -ENOMEM; in mhi_map_single_use_bb()
203 if (buf_info->dir == DMA_TO_DEVICE) in mhi_map_single_use_bb()
204 memcpy(buf, buf_info->v_addr, buf_info->len); in mhi_map_single_use_bb()
206 buf_info->bb_addr = buf; in mhi_map_single_use_bb()
214 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, in mhi_unmap_single_no_bb()
215 buf_info->dir); in mhi_unmap_single_no_bb()
221 if (buf_info->dir == DMA_FROM_DEVICE) in mhi_unmap_single_use_bb()
222 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); in mhi_unmap_single_use_bb()
224 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, in mhi_unmap_single_use_bb()
225 buf_info->bb_addr, buf_info->p_addr); in mhi_unmap_single_use_bb()
233 if (ring->wp < ring->rp) { in get_nr_avail_ring_elements()
234 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; in get_nr_avail_ring_elements()
236 nr_el = (ring->rp - ring->base) / ring->el_size; in get_nr_avail_ring_elements()
237 nr_el += ((ring->base + ring->len - ring->wp) / in get_nr_avail_ring_elements()
238 ring->el_size) - 1; in get_nr_avail_ring_elements()
246 return (addr - ring->iommu_base) + ring->base; in mhi_to_virtual()
252 ring->wp += ring->el_size; in mhi_add_ring_element()
253 if (ring->wp >= (ring->base + ring->len)) in mhi_add_ring_element()
254 ring->wp = ring->base; in mhi_add_ring_element()
262 ring->rp += ring->el_size; in mhi_del_ring_element()
263 if (ring->rp >= (ring->base + ring->len)) in mhi_del_ring_element()
264 ring->rp = ring->base; in mhi_del_ring_element()
271 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; in is_valid_ring_ptr()
281 if (dev->bus != &mhi_bus_type) in mhi_destroy_device()
285 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_destroy_device()
288 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_destroy_device()
291 ul_chan = mhi_dev->ul_chan; in mhi_destroy_device()
292 dl_chan = mhi_dev->dl_chan; in mhi_destroy_device()
310 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
313 put_device(&ul_chan->mhi_dev->dev); in mhi_destroy_device()
317 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) in mhi_destroy_device()
320 put_device(&dl_chan->mhi_dev->dev); in mhi_destroy_device()
323 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", in mhi_destroy_device()
324 mhi_dev->name); in mhi_destroy_device()
336 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_get_free_desc_count()
338 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_get_free_desc_count()
339 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_get_free_desc_count()
349 if (!mhi_dev->dev.driver) in mhi_notify()
352 mhi_drv = to_mhi_driver(mhi_dev->dev.driver); in mhi_notify()
354 if (mhi_drv->status_cb) in mhi_notify()
355 mhi_drv->status_cb(mhi_dev, cb_reason); in mhi_notify()
364 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_create_devices()
367 mhi_chan = mhi_cntrl->mhi_chan; in mhi_create_devices()
368 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_create_devices()
369 if (!mhi_chan->configured || mhi_chan->mhi_dev || in mhi_create_devices()
370 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) in mhi_create_devices()
376 mhi_dev->dev_type = MHI_DEVICE_XFER; in mhi_create_devices()
377 switch (mhi_chan->dir) { in mhi_create_devices()
379 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
380 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
384 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
385 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
389 put_device(&mhi_dev->dev); in mhi_create_devices()
393 get_device(&mhi_dev->dev); in mhi_create_devices()
394 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
397 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { in mhi_create_devices()
398 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { in mhi_create_devices()
401 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_create_devices()
402 mhi_dev->ul_chan = mhi_chan; in mhi_create_devices()
403 mhi_dev->ul_chan_id = mhi_chan->chan; in mhi_create_devices()
405 mhi_dev->dl_chan = mhi_chan; in mhi_create_devices()
406 mhi_dev->dl_chan_id = mhi_chan->chan; in mhi_create_devices()
408 get_device(&mhi_dev->dev); in mhi_create_devices()
409 mhi_chan->mhi_dev = mhi_dev; in mhi_create_devices()
414 mhi_dev->name = mhi_chan->name; in mhi_create_devices()
415 dev_set_name(&mhi_dev->dev, "%s_%s", in mhi_create_devices()
416 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_create_devices()
417 mhi_dev->name); in mhi_create_devices()
420 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) in mhi_create_devices()
421 device_init_wakeup(&mhi_dev->dev, true); in mhi_create_devices()
423 ret = device_add(&mhi_dev->dev); in mhi_create_devices()
425 put_device(&mhi_dev->dev); in mhi_create_devices()
432 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_irq_handler()
434 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_irq_handler()
443 if (!mhi_cntrl->mhi_ctxt) { in mhi_irq_handler()
444 dev_dbg(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
449 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_irq_handler()
450 ptr = le64_to_cpu(er_ctxt->rp); in mhi_irq_handler()
453 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_irq_handler()
461 if (ev_ring->rp == dev_rp) in mhi_irq_handler()
465 if (mhi_event->cl_manage) { in mhi_irq_handler()
466 struct mhi_chan *mhi_chan = mhi_event->mhi_chan; in mhi_irq_handler()
467 struct mhi_device *mhi_dev = mhi_chan->mhi_dev; in mhi_irq_handler()
472 tasklet_schedule(&mhi_event->task); in mhi_irq_handler()
481 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_intvec_threaded_handler()
486 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
487 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_intvec_threaded_handler()
488 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
495 TO_MHI_EXEC_STR(mhi_cntrl->ee), in mhi_intvec_threaded_handler()
496 mhi_state_str(mhi_cntrl->dev_state), in mhi_intvec_threaded_handler()
504 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_intvec_threaded_handler()
506 if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) in mhi_intvec_threaded_handler()
512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { in mhi_intvec_threaded_handler()
513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_intvec_threaded_handler()
514 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
515 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); in mhi_intvec_threaded_handler()
522 mhi_cntrl->ee = ee; in mhi_intvec_threaded_handler()
523 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
527 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_threaded_handler()
542 wake_up_all(&mhi_cntrl->state_event); in mhi_intvec_handler()
551 ring->wp += ring->el_size; in mhi_recycle_ev_ring_element()
553 if (ring->wp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
554 ring->wp = ring->base; in mhi_recycle_ev_ring_element()
556 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); in mhi_recycle_ev_ring_element()
559 ring->rp += ring->el_size; in mhi_recycle_ev_ring_element()
560 if (ring->rp >= (ring->base + ring->len)) in mhi_recycle_ev_ring_element()
561 ring->rp = ring->base; in mhi_recycle_ev_ring_element()
572 struct device *dev = &mhi_cntrl->mhi_dev->dev; in parse_xfer_event()
578 buf_ring = &mhi_chan->buf_ring; in parse_xfer_event()
579 tre_ring = &mhi_chan->tre_ring; in parse_xfer_event()
582 -EOVERFLOW : 0; in parse_xfer_event()
585 * If it's a DB Event then we need to grab the lock in parse_xfer_event()
587 * have to update db register and there are chances that in parse_xfer_event()
591 write_lock_irqsave(&mhi_chan->lock, flags); in parse_xfer_event()
593 read_lock_bh(&mhi_chan->lock); in parse_xfer_event()
595 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_xfer_event()
610 dev_err(&mhi_cntrl->mhi_dev->dev, in parse_xfer_event()
618 if (dev_rp >= (tre_ring->base + tre_ring->len)) in parse_xfer_event()
619 dev_rp = tre_ring->base; in parse_xfer_event()
621 result.dir = mhi_chan->dir; in parse_xfer_event()
623 local_rp = tre_ring->rp; in parse_xfer_event()
625 buf_info = buf_ring->rp; in parse_xfer_event()
630 xfer_len = buf_info->len; in parse_xfer_event()
632 /* Unmap if it's not pre-mapped by client */ in parse_xfer_event()
633 if (likely(!buf_info->pre_mapped)) in parse_xfer_event()
634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in parse_xfer_event()
636 result.buf_addr = buf_info->cb_buf; in parse_xfer_event()
640 min_t(u16, xfer_len, buf_info->len); in parse_xfer_event()
643 local_rp = tre_ring->rp; in parse_xfer_event()
646 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_xfer_event()
648 if (mhi_chan->dir == DMA_TO_DEVICE) { in parse_xfer_event()
649 atomic_dec(&mhi_cntrl->pending_pkts); in parse_xfer_event()
651 mhi_cntrl->runtime_put(mhi_cntrl); in parse_xfer_event()
655 * Recycle the buffer if buffer is pre-allocated, in parse_xfer_event()
659 if (mhi_chan->pre_alloc) { in parse_xfer_event()
660 if (mhi_queue_buf(mhi_chan->mhi_dev, in parse_xfer_event()
661 mhi_chan->dir, in parse_xfer_event()
662 buf_info->cb_buf, in parse_xfer_event()
663 buf_info->len, MHI_EOT)) { in parse_xfer_event()
666 mhi_chan->chan); in parse_xfer_event()
667 kfree(buf_info->cb_buf); in parse_xfer_event()
678 mhi_chan->db_cfg.db_mode = 1; in parse_xfer_event()
679 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
680 if (tre_ring->wp != tre_ring->rp && in parse_xfer_event()
684 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); in parse_xfer_event()
695 write_unlock_irqrestore(&mhi_chan->lock, flags); in parse_xfer_event()
697 read_unlock_bh(&mhi_chan->lock); in parse_xfer_event()
713 buf_ring = &mhi_chan->buf_ring; in parse_rsc_event()
714 tre_ring = &mhi_chan->tre_ring; in parse_rsc_event()
721 WARN_ON(cookie >= buf_ring->len); in parse_rsc_event()
723 buf_info = buf_ring->base + cookie; in parse_rsc_event()
726 -EOVERFLOW : 0; in parse_rsc_event()
729 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); in parse_rsc_event()
730 result.buf_addr = buf_info->cb_buf; in parse_rsc_event()
731 result.dir = mhi_chan->dir; in parse_rsc_event()
733 read_lock_bh(&mhi_chan->lock); in parse_rsc_event()
735 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in parse_rsc_event()
738 WARN_ON(!buf_info->used); in parse_rsc_event()
741 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in parse_rsc_event()
747 * receive, so even though completion event is different we can re-use in parse_rsc_event()
758 buf_info->used = false; in parse_rsc_event()
761 read_unlock_bh(&mhi_chan->lock); in parse_rsc_event()
770 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_process_cmd_completion()
771 struct mhi_ring *mhi_ring = &cmd_ring->ring; in mhi_process_cmd_completion()
777 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
778 "Event element points outside of the cmd ring\n"); in mhi_process_cmd_completion()
786 if (chan < mhi_cntrl->max_chan && in mhi_process_cmd_completion()
787 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_cmd_completion()
788 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_cmd_completion()
789 write_lock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
790 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); in mhi_process_cmd_completion()
791 complete(&mhi_chan->completion); in mhi_process_cmd_completion()
792 write_unlock_bh(&mhi_chan->lock); in mhi_process_cmd_completion()
794 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_cmd_completion()
806 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_ctrl_ev_ring()
808 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_ctrl_ev_ring()
810 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_process_ctrl_ev_ring()
813 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
820 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_ctrl_ev_ring()
821 return -EIO; in mhi_process_ctrl_ev_ring()
824 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
826 return -EIO; in mhi_process_ctrl_ev_ring()
830 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
840 link_info = &mhi_cntrl->mhi_link_info; in mhi_process_ctrl_ev_ring()
841 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
842 link_info->target_link_speed = in mhi_process_ctrl_ev_ring()
844 link_info->target_link_width = in mhi_process_ctrl_ev_ring()
846 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
848 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); in mhi_process_ctrl_ev_ring()
875 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
878 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
912 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); in mhi_process_ctrl_ev_ring()
913 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
914 mhi_cntrl->ee = event; in mhi_process_ctrl_ev_ring()
915 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
916 wake_up_all(&mhi_cntrl->state_event); in mhi_process_ctrl_ev_ring()
930 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_ctrl_ev_ring()
936 if (chan < mhi_cntrl->max_chan) { in mhi_process_ctrl_ev_ring()
937 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_ctrl_ev_ring()
938 if (!mhi_chan->configured) in mhi_process_ctrl_ev_ring()
941 event_quota--; in mhi_process_ctrl_ev_ring()
950 local_rp = ev_ring->rp; in mhi_process_ctrl_ev_ring()
952 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_ctrl_ev_ring()
954 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_ctrl_ev_ring()
956 return -EIO; in mhi_process_ctrl_ev_ring()
963 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
966 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_ctrl_ev_ring()
976 struct mhi_ring *ev_ring = &mhi_event->ring; in mhi_process_data_event_ring()
978 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; in mhi_process_data_event_ring()
982 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
984 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) in mhi_process_data_event_ring()
985 return -EIO; in mhi_process_data_event_ring()
988 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
990 return -EIO; in mhi_process_data_event_ring()
994 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1001 WARN_ON(chan >= mhi_cntrl->max_chan); in mhi_process_data_event_ring()
1007 if (chan < mhi_cntrl->max_chan && in mhi_process_data_event_ring()
1008 mhi_cntrl->mhi_chan[chan].configured) { in mhi_process_data_event_ring()
1009 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_process_data_event_ring()
1013 event_quota--; in mhi_process_data_event_ring()
1016 event_quota--; in mhi_process_data_event_ring()
1021 local_rp = ev_ring->rp; in mhi_process_data_event_ring()
1023 ptr = le64_to_cpu(er_ctxt->rp); in mhi_process_data_event_ring()
1025 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_process_data_event_ring()
1027 return -EIO; in mhi_process_data_event_ring()
1033 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1036 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_process_data_event_ring()
1044 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ev_task()
1047 spin_lock_bh(&mhi_event->lock); in mhi_ev_task()
1048 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ev_task()
1049 spin_unlock_bh(&mhi_event->lock); in mhi_ev_task()
1055 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; in mhi_ctrl_ev_task()
1056 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ctrl_ev_task()
1066 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { in mhi_ctrl_ev_task()
1078 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); in mhi_ctrl_ev_task()
1085 write_lock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1092 write_unlock_irq(&mhi_cntrl->pm_lock); in mhi_ctrl_ev_task()
1101 void *tmp = ring->wp + ring->el_size; in mhi_is_ring_full()
1103 if (tmp >= (ring->base + ring->len)) in mhi_is_ring_full()
1104 tmp = ring->base; in mhi_is_ring_full()
1106 return (tmp == ring->rp); in mhi_is_ring_full()
1112 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue()
1113 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue()
1114 mhi_dev->dl_chan; in mhi_queue()
1115 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue()
1119 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) in mhi_queue()
1120 return -EIO; in mhi_queue()
1122 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1126 ret = -EAGAIN; in mhi_queue()
1135 * for host->device buffer, balanced put is done on buffer completion in mhi_queue()
1136 * for device->host buffer, balanced put is after ringing the DB in mhi_queue()
1138 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_queue()
1141 mhi_cntrl->wake_toggle(mhi_cntrl); in mhi_queue()
1143 if (mhi_chan->dir == DMA_TO_DEVICE) in mhi_queue()
1144 atomic_inc(&mhi_cntrl->pending_pkts); in mhi_queue()
1150 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_queue()
1153 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); in mhi_queue()
1161 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_skb()
1162 mhi_dev->dl_chan; in mhi_queue_skb()
1165 buf_info.v_addr = skb->data; in mhi_queue_skb()
1169 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_skb()
1170 return -EINVAL; in mhi_queue_skb()
1179 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : in mhi_queue_dma()
1180 mhi_dev->dl_chan; in mhi_queue_dma()
1183 buf_info.p_addr = mhi_buf->dma_addr; in mhi_queue_dma()
1188 if (unlikely(mhi_chan->pre_alloc)) in mhi_queue_dma()
1189 return -EINVAL; in mhi_queue_dma()
1204 buf_ring = &mhi_chan->buf_ring; in mhi_gen_tre()
1205 tre_ring = &mhi_chan->tre_ring; in mhi_gen_tre()
1207 buf_info = buf_ring->wp; in mhi_gen_tre()
1208 WARN_ON(buf_info->used); in mhi_gen_tre()
1209 buf_info->pre_mapped = info->pre_mapped; in mhi_gen_tre()
1210 if (info->pre_mapped) in mhi_gen_tre()
1211 buf_info->p_addr = info->p_addr; in mhi_gen_tre()
1213 buf_info->v_addr = info->v_addr; in mhi_gen_tre()
1214 buf_info->cb_buf = info->cb_buf; in mhi_gen_tre()
1215 buf_info->wp = tre_ring->wp; in mhi_gen_tre()
1216 buf_info->dir = mhi_chan->dir; in mhi_gen_tre()
1217 buf_info->len = info->len; in mhi_gen_tre()
1219 if (!info->pre_mapped) { in mhi_gen_tre()
1220 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); in mhi_gen_tre()
1228 bei = !!(mhi_chan->intmod); in mhi_gen_tre()
1230 mhi_tre = tre_ring->wp; in mhi_gen_tre()
1231 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); in mhi_gen_tre()
1232 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); in mhi_gen_tre()
1233 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); in mhi_gen_tre()
1257 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_queue_is_full()
1259 mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_queue_is_full()
1260 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; in mhi_queue_is_full()
1268 enum mhi_cmd_type cmd) in mhi_send_cmd() argument
1271 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; in mhi_send_cmd()
1272 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_send_cmd()
1273 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_send_cmd()
1277 chan = mhi_chan->chan; in mhi_send_cmd()
1279 spin_lock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1281 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1282 return -ENOMEM; in mhi_send_cmd()
1285 /* prepare the cmd tre */ in mhi_send_cmd()
1286 cmd_tre = ring->wp; in mhi_send_cmd()
1287 switch (cmd) { in mhi_send_cmd()
1289 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; in mhi_send_cmd()
1290 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; in mhi_send_cmd()
1291 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); in mhi_send_cmd()
1294 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; in mhi_send_cmd()
1295 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; in mhi_send_cmd()
1296 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); in mhi_send_cmd()
1299 cmd_tre->ptr = MHI_TRE_CMD_START_PTR; in mhi_send_cmd()
1300 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; in mhi_send_cmd()
1301 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); in mhi_send_cmd()
1310 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1313 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_send_cmd()
1314 spin_unlock_bh(&mhi_cmd->lock); in mhi_send_cmd()
1323 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_update_channel_state()
1324 enum mhi_cmd_type cmd = MHI_CMD_NOP; in mhi_update_channel_state() local
1327 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, in mhi_update_channel_state()
1332 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1333 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1334 mhi_chan->ch_state != MHI_CH_STATE_ENABLED && in mhi_update_channel_state()
1335 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { in mhi_update_channel_state()
1336 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1337 return -EINVAL; in mhi_update_channel_state()
1339 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_update_channel_state()
1340 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1342 cmd = MHI_CMD_RESET_CHAN; in mhi_update_channel_state()
1345 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) in mhi_update_channel_state()
1346 return -EINVAL; in mhi_update_channel_state()
1348 cmd = MHI_CMD_STOP_CHAN; in mhi_update_channel_state()
1351 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && in mhi_update_channel_state()
1352 mhi_chan->ch_state != MHI_CH_STATE_DISABLED) in mhi_update_channel_state()
1353 return -EINVAL; in mhi_update_channel_state()
1355 cmd = MHI_CMD_START_CHAN; in mhi_update_channel_state()
1359 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1360 return -EINVAL; in mhi_update_channel_state()
1364 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1367 mhi_cntrl->runtime_get(mhi_cntrl); in mhi_update_channel_state()
1369 reinit_completion(&mhi_chan->completion); in mhi_update_channel_state()
1370 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); in mhi_update_channel_state()
1373 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1377 ret = wait_for_completion_timeout(&mhi_chan->completion, in mhi_update_channel_state()
1378 msecs_to_jiffies(mhi_cntrl->timeout_ms)); in mhi_update_channel_state()
1379 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { in mhi_update_channel_state()
1382 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1383 ret = -EIO; in mhi_update_channel_state()
1390 write_lock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1391 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? in mhi_update_channel_state()
1393 write_unlock_irq(&mhi_chan->lock); in mhi_update_channel_state()
1397 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); in mhi_update_channel_state()
1400 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_update_channel_state()
1401 mhi_device_put(mhi_cntrl->mhi_dev); in mhi_update_channel_state()
1410 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_unprepare_channel()
1412 mutex_lock(&mhi_chan->mutex); in mhi_unprepare_channel()
1414 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_unprepare_channel()
1416 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_unprepare_channel()
1425 mhi_chan->chan); in mhi_unprepare_channel()
1428 write_lock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1429 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_unprepare_channel()
1430 write_unlock_irq(&mhi_chan->lock); in mhi_unprepare_channel()
1432 if (!mhi_chan->offload_ch) { in mhi_unprepare_channel()
1436 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); in mhi_unprepare_channel()
1438 mutex_unlock(&mhi_chan->mutex); in mhi_unprepare_channel()
1445 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_prepare_channel()
1447 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { in mhi_prepare_channel()
1449 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); in mhi_prepare_channel()
1450 return -ENOTCONN; in mhi_prepare_channel()
1453 mutex_lock(&mhi_chan->mutex); in mhi_prepare_channel()
1456 if (!mhi_chan->offload_ch) { in mhi_prepare_channel()
1467 if (mhi_chan->dir == DMA_FROM_DEVICE) in mhi_prepare_channel()
1468 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); in mhi_prepare_channel()
1470 /* Pre-allocate buffer for xfer ring */ in mhi_prepare_channel()
1471 if (mhi_chan->pre_alloc) { in mhi_prepare_channel()
1473 &mhi_chan->tre_ring); in mhi_prepare_channel()
1474 size_t len = mhi_cntrl->buffer_len; in mhi_prepare_channel()
1476 while (nr_el--) { in mhi_prepare_channel()
1482 ret = -ENOMEM; in mhi_prepare_channel()
1497 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1499 read_lock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1501 read_unlock_irq(&mhi_chan->lock); in mhi_prepare_channel()
1503 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_prepare_channel()
1506 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1511 if (!mhi_chan->offload_ch) in mhi_prepare_channel()
1515 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1520 mutex_unlock(&mhi_chan->mutex); in mhi_prepare_channel()
1534 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_mark_stale_events()
1540 ev_ring = &mhi_event->ring; in mhi_mark_stale_events()
1543 spin_lock_irqsave(&mhi_event->lock, flags); in mhi_mark_stale_events()
1545 ptr = le64_to_cpu(er_ctxt->rp); in mhi_mark_stale_events()
1547 dev_err(&mhi_cntrl->mhi_dev->dev, in mhi_mark_stale_events()
1549 dev_rp = ev_ring->rp; in mhi_mark_stale_events()
1554 local_rp = ev_ring->rp; in mhi_mark_stale_events()
1558 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, in mhi_mark_stale_events()
1561 if (local_rp == (ev_ring->base + ev_ring->len)) in mhi_mark_stale_events()
1562 local_rp = ev_ring->base; in mhi_mark_stale_events()
1566 spin_unlock_irqrestore(&mhi_event->lock, flags); in mhi_mark_stale_events()
1576 buf_ring = &mhi_chan->buf_ring; in mhi_reset_data_chan()
1577 tre_ring = &mhi_chan->tre_ring; in mhi_reset_data_chan()
1578 result.transaction_status = -ENOTCONN; in mhi_reset_data_chan()
1580 while (tre_ring->rp != tre_ring->wp) { in mhi_reset_data_chan()
1581 struct mhi_buf_info *buf_info = buf_ring->rp; in mhi_reset_data_chan()
1583 if (mhi_chan->dir == DMA_TO_DEVICE) { in mhi_reset_data_chan()
1584 atomic_dec(&mhi_cntrl->pending_pkts); in mhi_reset_data_chan()
1586 mhi_cntrl->runtime_put(mhi_cntrl); in mhi_reset_data_chan()
1589 if (!buf_info->pre_mapped) in mhi_reset_data_chan()
1590 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); in mhi_reset_data_chan()
1595 if (mhi_chan->pre_alloc) { in mhi_reset_data_chan()
1596 kfree(buf_info->cb_buf); in mhi_reset_data_chan()
1598 result.buf_addr = buf_info->cb_buf; in mhi_reset_data_chan()
1599 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_reset_data_chan()
1608 int chan = mhi_chan->chan; in mhi_reset_chan()
1611 if (mhi_chan->offload_ch) in mhi_reset_chan()
1614 read_lock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1615 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_reset_chan()
1616 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; in mhi_reset_chan()
1622 read_unlock_bh(&mhi_cntrl->pm_lock); in mhi_reset_chan()
1628 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in __mhi_prepare_for_transfer()
1632 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1644 for (--dir; dir >= 0; dir--) { in __mhi_prepare_for_transfer()
1645 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; in __mhi_prepare_for_transfer()
1669 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_unprepare_from_transfer()
1674 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_unprepare_from_transfer()
1685 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_poll()
1686 struct mhi_chan *mhi_chan = mhi_dev->dl_chan; in mhi_poll()
1687 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_poll()
1690 spin_lock_bh(&mhi_event->lock); in mhi_poll()
1691 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); in mhi_poll()
1692 spin_unlock_bh(&mhi_event->lock); in mhi_poll()