Lines Matching +full:odd +full:- +full:numbered
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/dma-direction.h>
31 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_send_event()
36 mutex_lock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring; in mhi_ep_send_event()
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; in mhi_ep_send_event()
39 if (!ring->started) { in mhi_ep_send_event()
54 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); in mhi_ep_send_event()
66 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_send_event()
76 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); in mhi_ep_send_completion_event()
78 event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); in mhi_ep_send_completion_event()
80 return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); in mhi_ep_send_completion_event()
105 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; in mhi_ep_send_cmd_comp_event()
108 event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); in mhi_ep_send_cmd_comp_event()
117 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; in mhi_ep_process_cmd_ring()
118 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_process_cmd_ring()
126 mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_process_cmd_ring()
127 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_process_cmd_ring()
133 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
135 if (!ch_ring->started) { in mhi_ep_process_cmd_ring()
137 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); in mhi_ep_process_cmd_ring()
150 mhi_chan->state = MHI_CH_STATE_RUNNING; in mhi_ep_process_cmd_ring()
151 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
154 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
163 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
174 if (!(ch_id % 2) && !mhi_chan->mhi_dev) { in mhi_ep_process_cmd_ring()
189 if (!ch_ring->started) { in mhi_ep_process_cmd_ring()
191 return -ENODEV; in mhi_ep_process_cmd_ring()
194 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
199 result.transaction_status = -ENOTCONN; in mhi_ep_process_cmd_ring()
201 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_cmd_ring()
204 mhi_chan->state = MHI_CH_STATE_STOP; in mhi_ep_process_cmd_ring()
205 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
208 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
217 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
221 if (!ch_ring->started) { in mhi_ep_process_cmd_ring()
223 return -ENODEV; in mhi_ep_process_cmd_ring()
226 mutex_lock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
231 result.transaction_status = -ENOTCONN; in mhi_ep_process_cmd_ring()
233 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_cmd_ring()
236 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_process_cmd_ring()
237 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); in mhi_ep_process_cmd_ring()
240 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); in mhi_ep_process_cmd_ring()
249 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
254 return -EINVAL; in mhi_ep_process_cmd_ring()
260 mutex_unlock(&mhi_chan->lock); in mhi_ep_process_cmd_ring()
267 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : in mhi_ep_queue_is_empty()
268 mhi_dev->ul_chan; in mhi_ep_queue_is_empty()
269 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_queue_is_empty()
270 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_is_empty()
272 return !!(ring->rd_offset == ring->wr_offset); in mhi_ep_queue_is_empty()
281 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_read_channel()
282 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_read_channel()
295 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { in mhi_ep_read_channel()
297 return -ENODEV; in mhi_ep_read_channel()
300 el = &ring->ring_cache[ring->rd_offset]; in mhi_ep_read_channel()
303 if (mhi_chan->tre_bytes_left) { in mhi_ep_read_channel()
304 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); in mhi_ep_read_channel()
305 tr_len = min(buf_left, mhi_chan->tre_bytes_left); in mhi_ep_read_channel()
307 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); in mhi_ep_read_channel()
308 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); in mhi_ep_read_channel()
309 mhi_chan->tre_bytes_left = mhi_chan->tre_size; in mhi_ep_read_channel()
311 tr_len = min(buf_left, mhi_chan->tre_size); in mhi_ep_read_channel()
314 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; in mhi_ep_read_channel()
315 write_offset = len - buf_left; in mhi_ep_read_channel()
316 read_addr = mhi_chan->tre_loc + read_offset; in mhi_ep_read_channel()
317 write_addr = result->buf_addr + write_offset; in mhi_ep_read_channel()
319 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); in mhi_ep_read_channel()
320 ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); in mhi_ep_read_channel()
322 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); in mhi_ep_read_channel()
326 buf_left -= tr_len; in mhi_ep_read_channel()
327 mhi_chan->tre_bytes_left -= tr_len; in mhi_ep_read_channel()
336 if (!mhi_chan->tre_bytes_left) { in mhi_ep_read_channel()
352 dev_err(&mhi_chan->mhi_dev->dev, in mhi_ep_read_channel()
368 dev_err(&mhi_chan->mhi_dev->dev, in mhi_ep_read_channel()
380 result->bytes_xferd += tr_len; in mhi_ep_read_channel()
388 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; in mhi_ep_process_ch_ring()
394 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_process_ch_ring()
400 if (!mhi_chan->xfer_cb) { in mhi_ep_process_ch_ring()
401 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); in mhi_ep_process_ch_ring()
402 return -ENODEV; in mhi_ep_process_ch_ring()
405 if (ring->ch_id % 2) { in mhi_ep_process_ch_ring()
407 result.dir = mhi_chan->dir; in mhi_ep_process_ch_ring()
408 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_ch_ring()
413 return -ENOMEM; in mhi_ep_process_ch_ring()
418 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); in mhi_ep_process_ch_ring()
423 result.dir = mhi_chan->dir; in mhi_ep_process_ch_ring()
424 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_process_ch_ring()
429 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); in mhi_ep_process_ch_ring()
440 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_queue_skb()
441 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; in mhi_ep_queue_skb()
442 struct device *dev = &mhi_chan->mhi_dev->dev; in mhi_ep_queue_skb()
453 buf_left = skb->len; in mhi_ep_queue_skb()
454 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; in mhi_ep_queue_skb()
456 mutex_lock(&mhi_chan->lock); in mhi_ep_queue_skb()
460 if (mhi_chan->state != MHI_CH_STATE_RUNNING) { in mhi_ep_queue_skb()
462 ret = -ENODEV; in mhi_ep_queue_skb()
468 ret = -ENOSPC; in mhi_ep_queue_skb()
472 el = &ring->ring_cache[ring->rd_offset]; in mhi_ep_queue_skb()
476 read_offset = skb->len - buf_left; in mhi_ep_queue_skb()
477 read_addr = skb->data + read_offset; in mhi_ep_queue_skb()
480 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); in mhi_ep_queue_skb()
481 ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); in mhi_ep_queue_skb()
487 buf_left -= tr_len; in mhi_ep_queue_skb()
508 mutex_unlock(&mhi_chan->lock); in mhi_ep_queue_skb()
513 mutex_unlock(&mhi_chan->lock); in mhi_ep_queue_skb()
522 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_cache_host_cfg()
529 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); in mhi_ep_cache_host_cfg()
531 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; in mhi_ep_cache_host_cfg()
532 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; in mhi_ep_cache_host_cfg()
539 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, in mhi_ep_cache_host_cfg()
540 &mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_cache_host_cfg()
541 (void __iomem **) &mhi_cntrl->ch_ctx_cache, in mhi_ep_cache_host_cfg()
552 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, in mhi_ep_cache_host_cfg()
553 &mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_cache_host_cfg()
554 (void __iomem **) &mhi_cntrl->ev_ctx_cache, in mhi_ep_cache_host_cfg()
565 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, in mhi_ep_cache_host_cfg()
566 &mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_cache_host_cfg()
567 (void __iomem **) &mhi_cntrl->cmd_ctx_cache, in mhi_ep_cache_host_cfg()
575 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, in mhi_ep_cache_host_cfg()
576 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); in mhi_ep_cache_host_cfg()
585 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_cache_host_cfg()
586 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); in mhi_ep_cache_host_cfg()
589 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_cache_host_cfg()
590 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); in mhi_ep_cache_host_cfg()
593 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_cache_host_cfg()
594 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); in mhi_ep_cache_host_cfg()
603 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; in mhi_ep_free_host_cfg()
604 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; in mhi_ep_free_host_cfg()
607 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, in mhi_ep_free_host_cfg()
608 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); in mhi_ep_free_host_cfg()
610 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, in mhi_ep_free_host_cfg()
611 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); in mhi_ep_free_host_cfg()
613 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, in mhi_ep_free_host_cfg()
614 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); in mhi_ep_free_host_cfg()
630 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_enable()
650 return -ETIMEDOUT; in mhi_ep_enable()
670 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; in mhi_ep_cmd_ring_worker()
671 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_cmd_ring_worker()
683 if (ring->rd_offset == ring->wr_offset) in mhi_ep_cmd_ring_worker()
690 while (ring->rd_offset != ring->wr_offset) { in mhi_ep_cmd_ring_worker()
691 el = &ring->ring_cache[ring->rd_offset]; in mhi_ep_cmd_ring_worker()
695 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); in mhi_ep_cmd_ring_worker()
704 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_ch_ring_worker()
713 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); in mhi_ep_ch_ring_worker()
714 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); in mhi_ep_ch_ring_worker()
715 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); in mhi_ep_ch_ring_worker()
719 list_del(&itr->node); in mhi_ep_ch_ring_worker()
720 ring = itr->ring; in mhi_ep_ch_ring_worker()
731 if (ring->rd_offset == ring->wr_offset) { in mhi_ep_ch_ring_worker()
736 el = &ring->ring_cache[ring->rd_offset]; in mhi_ep_ch_ring_worker()
737 chan = &mhi_cntrl->mhi_chan[ring->ch_id]; in mhi_ep_ch_ring_worker()
739 mutex_lock(&chan->lock); in mhi_ep_ch_ring_worker()
740 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); in mhi_ep_ch_ring_worker()
744 ring->ch_id, ret); in mhi_ep_ch_ring_worker()
745 mutex_unlock(&chan->lock); in mhi_ep_ch_ring_worker()
750 mutex_unlock(&chan->lock); in mhi_ep_ch_ring_worker()
758 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_state_worker()
764 spin_lock_irqsave(&mhi_cntrl->list_lock, flags); in mhi_ep_state_worker()
765 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); in mhi_ep_state_worker()
766 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); in mhi_ep_state_worker()
769 list_del(&itr->node); in mhi_ep_state_worker()
771 mhi_state_str(itr->state)); in mhi_ep_state_worker()
773 switch (itr->state) { in mhi_ep_state_worker()
785 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); in mhi_ep_state_worker()
806 ring = &mhi_cntrl->mhi_chan[ch_id].ring; in mhi_ep_queue_channel_db()
811 item->ring = ring; in mhi_ep_queue_channel_db()
812 list_add_tail(&item->node, &head); in mhi_ep_queue_channel_db()
817 spin_lock(&mhi_cntrl->list_lock); in mhi_ep_queue_channel_db()
818 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); in mhi_ep_queue_channel_db()
819 spin_unlock(&mhi_cntrl->list_lock); in mhi_ep_queue_channel_db()
821 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); in mhi_ep_queue_channel_db()
842 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; in mhi_ep_check_channel_interrupt()
846 mhi_cntrl->chdb[i].status); in mhi_ep_check_channel_interrupt()
860 item->state = state; in mhi_ep_process_ctrl_interrupt()
861 spin_lock(&mhi_cntrl->list_lock); in mhi_ep_process_ctrl_interrupt()
862 list_add_tail(&item->node, &mhi_cntrl->st_transition_list); in mhi_ep_process_ctrl_interrupt()
863 spin_unlock(&mhi_cntrl->list_lock); in mhi_ep_process_ctrl_interrupt()
865 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); in mhi_ep_process_ctrl_interrupt()
876 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_irq()
891 disable_irq_nosync(mhi_cntrl->irq); in mhi_ep_irq()
892 schedule_work(&mhi_cntrl->reset_work); in mhi_ep_irq()
902 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); in mhi_ep_irq()
919 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_abort_transfer()
920 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
921 if (!mhi_chan->ring.started) in mhi_ep_abort_transfer()
924 mutex_lock(&mhi_chan->lock); in mhi_ep_abort_transfer()
926 if (mhi_chan->xfer_cb) { in mhi_ep_abort_transfer()
927 result.transaction_status = -ENOTCONN; in mhi_ep_abort_transfer()
929 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_abort_transfer()
932 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_abort_transfer()
933 mutex_unlock(&mhi_chan->lock); in mhi_ep_abort_transfer()
936 flush_workqueue(mhi_cntrl->wq); in mhi_ep_abort_transfer()
939 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); in mhi_ep_abort_transfer()
942 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_abort_transfer()
943 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_abort_transfer()
944 if (!mhi_chan->ring.started) in mhi_ep_abort_transfer()
947 ch_ring = &mhi_cntrl->mhi_chan[i].ring; in mhi_ep_abort_transfer()
948 mutex_lock(&mhi_chan->lock); in mhi_ep_abort_transfer()
950 mutex_unlock(&mhi_chan->lock); in mhi_ep_abort_transfer()
954 for (i = 0; i < mhi_cntrl->event_rings; i++) { in mhi_ep_abort_transfer()
955 ev_ring = &mhi_cntrl->mhi_event[i].ring; in mhi_ep_abort_transfer()
956 if (!ev_ring->started) in mhi_ep_abort_transfer()
959 mutex_lock(&mhi_cntrl->event_lock); in mhi_ep_abort_transfer()
961 mutex_unlock(&mhi_cntrl->event_lock); in mhi_ep_abort_transfer()
965 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); in mhi_ep_abort_transfer()
970 mhi_cntrl->enabled = false; in mhi_ep_abort_transfer()
976 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_reset_worker()
982 spin_lock_bh(&mhi_cntrl->state_lock); in mhi_ep_reset_worker()
985 cur_state = mhi_cntrl->mhi_state; in mhi_ep_reset_worker()
986 spin_unlock_bh(&mhi_cntrl->state_lock); in mhi_ep_reset_worker()
990 * issue reset during shutdown also and we don't need to do re-init in in mhi_ep_reset_worker()
1012 enable_irq(mhi_cntrl->irq); in mhi_ep_reset_worker()
1023 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_handle_syserr()
1038 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_ep_power_up()
1048 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), in mhi_ep_power_up()
1050 if (!mhi_cntrl->mhi_event) in mhi_ep_power_up()
1051 return -ENOMEM; in mhi_ep_power_up()
1054 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); in mhi_ep_power_up()
1055 for (i = 0; i < mhi_cntrl->max_chan; i++) in mhi_ep_power_up()
1056 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); in mhi_ep_power_up()
1057 for (i = 0; i < mhi_cntrl->event_rings; i++) in mhi_ep_power_up()
1058 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); in mhi_ep_power_up()
1060 mhi_cntrl->mhi_state = MHI_STATE_RESET; in mhi_ep_power_up()
1078 enable_irq(mhi_cntrl->irq); in mhi_ep_power_up()
1079 mhi_cntrl->enabled = true; in mhi_ep_power_up()
1084 kfree(mhi_cntrl->mhi_event); in mhi_ep_power_up()
1092 if (mhi_cntrl->enabled) in mhi_ep_power_down()
1095 kfree(mhi_cntrl->mhi_event); in mhi_ep_power_down()
1096 disable_irq(mhi_cntrl->irq); in mhi_ep_power_down()
1106 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_suspend_channels()
1107 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_suspend_channels()
1109 if (!mhi_chan->mhi_dev) in mhi_ep_suspend_channels()
1112 mutex_lock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1114 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); in mhi_ep_suspend_channels()
1116 mutex_unlock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1120 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); in mhi_ep_suspend_channels()
1124 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); in mhi_ep_suspend_channels()
1125 mutex_unlock(&mhi_chan->lock); in mhi_ep_suspend_channels()
1135 for (i = 0; i < mhi_cntrl->max_chan; i++) { in mhi_ep_resume_channels()
1136 mhi_chan = &mhi_cntrl->mhi_chan[i]; in mhi_ep_resume_channels()
1138 if (!mhi_chan->mhi_dev) in mhi_ep_resume_channels()
1141 mutex_lock(&mhi_chan->lock); in mhi_ep_resume_channels()
1143 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); in mhi_ep_resume_channels()
1145 mutex_unlock(&mhi_chan->lock); in mhi_ep_resume_channels()
1149 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); in mhi_ep_resume_channels()
1153 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); in mhi_ep_resume_channels()
1154 mutex_unlock(&mhi_chan->lock); in mhi_ep_resume_channels()
1162 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_ep_release_device()
1163 mhi_dev->mhi_cntrl->mhi_dev = NULL; in mhi_ep_release_device()
1166 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI in mhi_ep_release_device()
1170 if (mhi_dev->ul_chan) in mhi_ep_release_device()
1171 mhi_dev->ul_chan->mhi_dev = NULL; in mhi_ep_release_device()
1173 if (mhi_dev->dl_chan) in mhi_ep_release_device()
1174 mhi_dev->dl_chan->mhi_dev = NULL; in mhi_ep_release_device()
1187 return ERR_PTR(-ENOMEM); in mhi_ep_alloc_device()
1189 dev = &mhi_dev->dev; in mhi_ep_alloc_device()
1191 dev->bus = &mhi_ep_bus_type; in mhi_ep_alloc_device()
1192 dev->release = mhi_ep_release_device; in mhi_ep_alloc_device()
1197 dev->parent = mhi_cntrl->cntrl_dev; in mhi_ep_alloc_device()
1200 dev->parent = &mhi_cntrl->mhi_dev->dev; in mhi_ep_alloc_device()
1202 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_ep_alloc_device()
1203 mhi_dev->dev_type = dev_type; in mhi_ep_alloc_device()
1209 * MHI channels are always defined in pairs with UL as the even numbered
1210 * channel and DL as odd numbered one. This function gets UL channel (primary)
1216 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; in mhi_ep_create_device()
1217 struct device *dev = mhi_cntrl->cntrl_dev; in mhi_ep_create_device()
1222 if (strcmp(mhi_chan->name, mhi_chan[1].name)) { in mhi_ep_create_device()
1224 mhi_chan->name, mhi_chan[1].name); in mhi_ep_create_device()
1225 return -EINVAL; in mhi_ep_create_device()
1233 mhi_dev->ul_chan = mhi_chan; in mhi_ep_create_device()
1234 get_device(&mhi_dev->dev); in mhi_ep_create_device()
1235 mhi_chan->mhi_dev = mhi_dev; in mhi_ep_create_device()
1239 mhi_dev->dl_chan = mhi_chan; in mhi_ep_create_device()
1240 get_device(&mhi_dev->dev); in mhi_ep_create_device()
1241 mhi_chan->mhi_dev = mhi_dev; in mhi_ep_create_device()
1244 mhi_dev->name = mhi_chan->name; in mhi_ep_create_device()
1245 ret = dev_set_name(&mhi_dev->dev, "%s_%s", in mhi_ep_create_device()
1246 dev_name(&mhi_cntrl->mhi_dev->dev), in mhi_ep_create_device()
1247 mhi_dev->name); in mhi_ep_create_device()
1249 put_device(&mhi_dev->dev); in mhi_ep_create_device()
1253 ret = device_add(&mhi_dev->dev); in mhi_ep_create_device()
1255 put_device(&mhi_dev->dev); in mhi_ep_create_device()
1266 if (dev->bus != &mhi_ep_bus_type) in mhi_ep_destroy_device()
1270 mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_ep_destroy_device()
1273 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_ep_destroy_device()
1276 ul_chan = mhi_dev->ul_chan; in mhi_ep_destroy_device()
1277 dl_chan = mhi_dev->dl_chan; in mhi_ep_destroy_device()
1280 put_device(&ul_chan->mhi_dev->dev); in mhi_ep_destroy_device()
1283 put_device(&dl_chan->mhi_dev->dev); in mhi_ep_destroy_device()
1285 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", in mhi_ep_destroy_device()
1286 mhi_dev->name); in mhi_ep_destroy_device()
1299 struct device *dev = mhi_cntrl->cntrl_dev; in mhi_ep_chan_init()
1301 int ret = -EINVAL; in mhi_ep_chan_init()
1303 mhi_cntrl->max_chan = config->max_channels; in mhi_ep_chan_init()
1309 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), in mhi_ep_chan_init()
1311 if (!mhi_cntrl->mhi_chan) in mhi_ep_chan_init()
1312 return -ENOMEM; in mhi_ep_chan_init()
1314 for (i = 0; i < config->num_channels; i++) { in mhi_ep_chan_init()
1317 ch_cfg = &config->ch_cfg[i]; in mhi_ep_chan_init()
1319 chan = ch_cfg->num; in mhi_ep_chan_init()
1320 if (chan >= mhi_cntrl->max_chan) { in mhi_ep_chan_init()
1322 chan, mhi_cntrl->max_chan); in mhi_ep_chan_init()
1326 /* Bi-directional and direction less channels are not supported */ in mhi_ep_chan_init()
1327 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { in mhi_ep_chan_init()
1329 ch_cfg->dir, chan); in mhi_ep_chan_init()
1333 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in mhi_ep_chan_init()
1334 mhi_chan->name = ch_cfg->name; in mhi_ep_chan_init()
1335 mhi_chan->chan = chan; in mhi_ep_chan_init()
1336 mhi_chan->dir = ch_cfg->dir; in mhi_ep_chan_init()
1337 mutex_init(&mhi_chan->lock); in mhi_ep_chan_init()
1343 kfree(mhi_cntrl->mhi_chan); in mhi_ep_chan_init()
1358 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) in mhi_ep_register_controller()
1359 return -EINVAL; in mhi_ep_register_controller()
1365 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); in mhi_ep_register_controller()
1366 if (!mhi_cntrl->mhi_cmd) { in mhi_ep_register_controller()
1367 ret = -ENOMEM; in mhi_ep_register_controller()
1371 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); in mhi_ep_register_controller()
1372 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); in mhi_ep_register_controller()
1373 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); in mhi_ep_register_controller()
1374 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); in mhi_ep_register_controller()
1376 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); in mhi_ep_register_controller()
1377 if (!mhi_cntrl->wq) { in mhi_ep_register_controller()
1378 ret = -ENOMEM; in mhi_ep_register_controller()
1382 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); in mhi_ep_register_controller()
1383 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); in mhi_ep_register_controller()
1384 spin_lock_init(&mhi_cntrl->state_lock); in mhi_ep_register_controller()
1385 spin_lock_init(&mhi_cntrl->list_lock); in mhi_ep_register_controller()
1386 mutex_init(&mhi_cntrl->event_lock); in mhi_ep_register_controller()
1389 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); in mhi_ep_register_controller()
1397 mhi_cntrl->index = ret; in mhi_ep_register_controller()
1399 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); in mhi_ep_register_controller()
1400 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, in mhi_ep_register_controller()
1403 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); in mhi_ep_register_controller()
1410 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); in mhi_ep_register_controller()
1415 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); in mhi_ep_register_controller()
1419 mhi_dev->name = dev_name(&mhi_dev->dev); in mhi_ep_register_controller()
1420 mhi_cntrl->mhi_dev = mhi_dev; in mhi_ep_register_controller()
1422 ret = device_add(&mhi_dev->dev); in mhi_ep_register_controller()
1426 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); in mhi_ep_register_controller()
1431 put_device(&mhi_dev->dev); in mhi_ep_register_controller()
1433 free_irq(mhi_cntrl->irq, mhi_cntrl); in mhi_ep_register_controller()
1435 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); in mhi_ep_register_controller()
1437 destroy_workqueue(mhi_cntrl->wq); in mhi_ep_register_controller()
1439 kfree(mhi_cntrl->mhi_cmd); in mhi_ep_register_controller()
1441 kfree(mhi_cntrl->mhi_chan); in mhi_ep_register_controller()
1453 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; in mhi_ep_unregister_controller()
1455 destroy_workqueue(mhi_cntrl->wq); in mhi_ep_unregister_controller()
1457 free_irq(mhi_cntrl->irq, mhi_cntrl); in mhi_ep_unregister_controller()
1459 kfree(mhi_cntrl->mhi_cmd); in mhi_ep_unregister_controller()
1460 kfree(mhi_cntrl->mhi_chan); in mhi_ep_unregister_controller()
1462 device_del(&mhi_dev->dev); in mhi_ep_unregister_controller()
1463 put_device(&mhi_dev->dev); in mhi_ep_unregister_controller()
1465 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); in mhi_ep_unregister_controller()
1472 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); in mhi_ep_driver_probe()
1473 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; in mhi_ep_driver_probe()
1474 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; in mhi_ep_driver_probe()
1476 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; in mhi_ep_driver_probe()
1477 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; in mhi_ep_driver_probe()
1479 return mhi_drv->probe(mhi_dev, mhi_dev->id); in mhi_ep_driver_probe()
1485 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); in mhi_ep_driver_remove()
1491 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_ep_driver_remove()
1496 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_ep_driver_remove()
1501 mutex_lock(&mhi_chan->lock); in mhi_ep_driver_remove()
1503 if (mhi_chan->xfer_cb) { in mhi_ep_driver_remove()
1504 result.transaction_status = -ENOTCONN; in mhi_ep_driver_remove()
1506 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); in mhi_ep_driver_remove()
1509 mhi_chan->state = MHI_CH_STATE_DISABLED; in mhi_ep_driver_remove()
1510 mhi_chan->xfer_cb = NULL; in mhi_ep_driver_remove()
1511 mutex_unlock(&mhi_chan->lock); in mhi_ep_driver_remove()
1515 mhi_drv->remove(mhi_dev); in mhi_ep_driver_remove()
1522 struct device_driver *driver = &mhi_drv->driver; in __mhi_ep_driver_register()
1524 if (!mhi_drv->probe || !mhi_drv->remove) in __mhi_ep_driver_register()
1525 return -EINVAL; in __mhi_ep_driver_register()
1528 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) in __mhi_ep_driver_register()
1529 return -EINVAL; in __mhi_ep_driver_register()
1531 driver->bus = &mhi_ep_bus_type; in __mhi_ep_driver_register()
1532 driver->owner = owner; in __mhi_ep_driver_register()
1533 driver->probe = mhi_ep_driver_probe; in __mhi_ep_driver_register()
1534 driver->remove = mhi_ep_driver_remove; in __mhi_ep_driver_register()
1542 driver_unregister(&mhi_drv->driver); in mhi_ep_driver_unregister()
1551 mhi_dev->name); in mhi_ep_uevent()
1564 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_ep_match()
1567 for (id = mhi_drv->id_table; id->chan[0]; id++) in mhi_ep_match()
1568 if (!strcmp(mhi_dev->name, id->chan)) { in mhi_ep_match()
1569 mhi_dev->id = id; in mhi_ep_match()