Lines Matching +full:ipa +full:- +full:setup +full:- +full:ready
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
26 * DOC: The IPA Generic Software Interface
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core. The modem uses the GSI layer as well.
32 * -------- ---------
34 * | AP +<---. .----+ Modem |
35 * | +--. | | .->+ |
37 * -------- | | | | ---------
39 * --+-+---+-+--
41 * |-----------|
43 * | IPA |
45 * -------------
48 * (EEs), which are independent operating environments that use the IPA for
52 * of data to or from the IPA. A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA. A transfer
55 * element can also contain an immediate command, requesting the IPA perform
58 * Each TRE refers to a block of data--also located DRAM. After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
83 * Note that all GSI registers are little-endian, which is the assumed
88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
167 /** gsi_channel_scratch_gpi - GPI protocol scratch register
186 /** gsi_channel_scratch - channel scratch configuration area
188 * The exact interpretation of this register is protocol-specific.
229 return channel - &channel->gsi->channel[0]; in gsi_channel_id()
236 gsi->event_enable_bitmap |= BIT(evt_ring_id); in gsi_irq_ieob_enable()
237 val = gsi->event_enable_bitmap; in gsi_irq_ieob_enable()
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_ieob_enable()
245 gsi->event_enable_bitmap &= ~BIT(evt_ring_id); in gsi_irq_ieob_disable()
246 val = gsi->event_enable_bitmap; in gsi_irq_ieob_disable()
247 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_ieob_disable()
255 /* We don't use inter-EE channel or event interrupts */ in gsi_irq_enable()
259 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); in gsi_irq_enable()
261 val = GENMASK(gsi->channel_count - 1, 0); in gsi_irq_enable()
262 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); in gsi_irq_enable()
264 val = GENMASK(gsi->evt_ring_count - 1, 0); in gsi_irq_enable()
265 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); in gsi_irq_enable()
268 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_enable()
271 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); in gsi_irq_enable()
275 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); in gsi_irq_enable()
281 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); in gsi_irq_disable()
282 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); in gsi_irq_disable()
283 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_disable()
284 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); in gsi_irq_disable()
285 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); in gsi_irq_disable()
286 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); in gsi_irq_disable()
293 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; in gsi_ring_virt()
296 /* Return the 32-bit DMA address associated with a ring index */
299 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; in gsi_ring_addr()
302 /* Return the ring index of a 32-bit ring offset */
305 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; in gsi_ring_index()
317 iowrite32(val, gsi->virt + reg); in gsi_command()
328 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); in gsi_evt_ring_state()
337 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in evt_ring_command()
338 struct completion *completion = &evt_ring->completion; in evt_ring_command()
339 struct device *dev = gsi->dev; in evt_ring_command()
349 opcode, evt_ring_id, evt_ring->state); in evt_ring_command()
351 return -ETIMEDOUT; in evt_ring_command()
357 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_alloc_command()
361 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_alloc_command()
362 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { in gsi_evt_ring_alloc_command()
363 dev_err(gsi->dev, "bad event ring state %u before alloc\n", in gsi_evt_ring_alloc_command()
364 evt_ring->state); in gsi_evt_ring_alloc_command()
365 return -EINVAL; in gsi_evt_ring_alloc_command()
369 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { in gsi_evt_ring_alloc_command()
370 dev_err(gsi->dev, "bad event ring state %u after alloc\n", in gsi_evt_ring_alloc_command()
371 evt_ring->state); in gsi_evt_ring_alloc_command()
372 ret = -EIO; in gsi_evt_ring_alloc_command()
381 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_reset_command()
382 enum gsi_evt_ring_state state = evt_ring->state; in gsi_evt_ring_reset_command()
387 dev_err(gsi->dev, "bad event ring state %u before reset\n", in gsi_evt_ring_reset_command()
388 evt_ring->state); in gsi_evt_ring_reset_command()
393 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) in gsi_evt_ring_reset_command()
394 dev_err(gsi->dev, "bad event ring state %u after reset\n", in gsi_evt_ring_reset_command()
395 evt_ring->state); in gsi_evt_ring_reset_command()
398 /* Issue a hardware de-allocation request for an allocated event ring */
401 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_de_alloc_command()
404 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { in gsi_evt_ring_de_alloc_command()
405 dev_err(gsi->dev, "bad event ring state %u before dealloc\n", in gsi_evt_ring_de_alloc_command()
406 evt_ring->state); in gsi_evt_ring_de_alloc_command()
411 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) in gsi_evt_ring_de_alloc_command()
412 dev_err(gsi->dev, "bad event ring state %u after dealloc\n", in gsi_evt_ring_de_alloc_command()
413 evt_ring->state); in gsi_evt_ring_de_alloc_command()
420 void *virt = channel->gsi->virt; in gsi_channel_state()
432 struct completion *completion = &channel->completion; in gsi_channel_command()
434 struct gsi *gsi = channel->gsi; in gsi_channel_command()
435 struct device *dev = gsi->dev; in gsi_channel_command()
447 return -ETIMEDOUT; in gsi_channel_command()
453 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_alloc_command()
454 struct device *dev = gsi->dev; in gsi_channel_alloc_command()
462 return -EINVAL; in gsi_channel_alloc_command()
471 ret = -EIO; in gsi_channel_alloc_command()
480 struct device *dev = channel->gsi->dev; in gsi_channel_start_command()
488 return -EINVAL; in gsi_channel_start_command()
497 ret = -EIO; in gsi_channel_start_command()
506 struct device *dev = channel->gsi->dev; in gsi_channel_stop_command()
521 return -EINVAL; in gsi_channel_stop_command()
533 return -EAGAIN; in gsi_channel_stop_command()
537 return -EIO; in gsi_channel_stop_command()
543 struct device *dev = channel->gsi->dev; in gsi_channel_reset_command()
567 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_de_alloc_command()
568 struct device *dev = gsi->dev; in gsi_channel_de_alloc_command()
593 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; in gsi_evt_ring_doorbell()
596 ring->index = index; /* Next unused entry */ in gsi_evt_ring_doorbell()
599 val = gsi_ring_addr(ring, (index - 1) % ring->count); in gsi_evt_ring_doorbell()
600 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); in gsi_evt_ring_doorbell()
606 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_program()
607 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; in gsi_evt_ring_program()
613 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
616 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
618 /* The context 2 and 3 registers store the low-order and in gsi_evt_ring_program()
619 * high-order 32 bits of the address of the event ring, in gsi_evt_ring_program()
622 val = evt_ring->ring.addr & GENMASK(31, 0); in gsi_evt_ring_program()
623 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
625 val = evt_ring->ring.addr >> 32; in gsi_evt_ring_program()
626 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
631 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
634 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
635 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
636 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
639 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
640 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
649 struct gsi_trans_info *trans_info = &channel->trans_info; in gsi_channel_trans_last()
652 spin_lock_bh(&trans_info->spinlock); in gsi_channel_trans_last()
654 if (!list_empty(&trans_info->complete)) in gsi_channel_trans_last()
655 trans = list_last_entry(&trans_info->complete, in gsi_channel_trans_last()
657 else if (!list_empty(&trans_info->polled)) in gsi_channel_trans_last()
658 trans = list_last_entry(&trans_info->polled, in gsi_channel_trans_last()
665 refcount_inc(&trans->refcount); in gsi_channel_trans_last()
667 spin_unlock_bh(&trans_info->spinlock); in gsi_channel_trans_last()
680 wait_for_completion(&trans->completion); in gsi_channel_trans_quiesce()
690 napi_disable(&channel->napi); in gsi_channel_freeze()
692 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); in gsi_channel_freeze()
698 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); in gsi_channel_thaw()
700 napi_enable(&channel->napi); in gsi_channel_thaw()
706 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; in gsi_channel_program()
710 struct gsi *gsi = channel->gsi; in gsi_channel_program()
715 channel->tre_ring.index = 0; in gsi_channel_program()
719 if (channel->toward_ipa) in gsi_channel_program()
721 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); in gsi_channel_program()
723 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); in gsi_channel_program()
726 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); in gsi_channel_program()
728 /* The context 2 and 3 registers store the low-order and in gsi_channel_program()
729 * high-order 32 bits of the address of the channel ring, in gsi_channel_program()
732 val = channel->tre_ring.addr & GENMASK(31, 0); in gsi_channel_program()
733 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); in gsi_channel_program()
735 val = channel->tre_ring.addr >> 32; in gsi_channel_program()
736 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); in gsi_channel_program()
738 /* Command channel gets low weighted round-robin priority */ in gsi_channel_program()
739 if (channel->command) in gsi_channel_program()
749 if (!channel->use_prefetch) in gsi_channel_program()
752 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); in gsi_channel_program()
756 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * in gsi_channel_program()
758 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; in gsi_channel_program()
761 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); in gsi_channel_program()
764 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); in gsi_channel_program()
767 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); in gsi_channel_program()
773 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); in gsi_channel_program()
775 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); in gsi_channel_program()
788 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_start()
791 mutex_lock(&gsi->mutex); in gsi_channel_start()
795 mutex_unlock(&gsi->mutex); in gsi_channel_start()
805 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_stop()
812 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; in gsi_channel_stop()
814 mutex_lock(&gsi->mutex); in gsi_channel_stop()
818 if (ret != -EAGAIN) in gsi_channel_stop()
821 } while (retries--); in gsi_channel_stop()
823 mutex_unlock(&gsi->mutex); in gsi_channel_stop()
835 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_reset()
837 mutex_lock(&gsi->mutex); in gsi_channel_reset()
841 if (legacy && !channel->toward_ipa) in gsi_channel_reset()
847 mutex_unlock(&gsi->mutex); in gsi_channel_reset()
853 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_suspend()
866 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_resume()
877 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
899 byte_count = channel->byte_count - channel->queued_byte_count; in gsi_channel_tx_queued()
900 trans_count = channel->trans_count - channel->queued_trans_count; in gsi_channel_tx_queued()
901 channel->queued_byte_count = channel->byte_count; in gsi_channel_tx_queued()
902 channel->queued_trans_count = channel->trans_count; in gsi_channel_tx_queued()
904 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_queued()
909 * gsi_channel_tx_update() - Report completed TX transfers
927 * Calling this each time we learn of a newly-completed transaction
935 u64 byte_count = trans->byte_count + trans->len; in gsi_channel_tx_update()
936 u64 trans_count = trans->trans_count + 1; in gsi_channel_tx_update()
938 byte_count -= channel->compl_byte_count; in gsi_channel_tx_update()
939 channel->compl_byte_count += byte_count; in gsi_channel_tx_update()
940 trans_count -= channel->compl_trans_count; in gsi_channel_tx_update()
941 channel->compl_trans_count += trans_count; in gsi_channel_tx_update()
943 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_update()
952 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); in gsi_isr_chan_ctrl()
953 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); in gsi_isr_chan_ctrl()
961 channel = &gsi->channel[channel_id]; in gsi_isr_chan_ctrl()
963 complete(&channel->completion); in gsi_isr_chan_ctrl()
972 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); in gsi_isr_evt_ctrl()
973 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); in gsi_isr_evt_ctrl()
981 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_isr_evt_ctrl()
982 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_isr_evt_ctrl()
984 complete(&evt_ring->completion); in gsi_isr_evt_ctrl()
993 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); in gsi_isr_glob_chan_err()
994 complete(&gsi->channel[channel_id].completion); in gsi_isr_glob_chan_err()
999 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", in gsi_isr_glob_chan_err()
1008 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_isr_glob_evt_err()
1009 u32 channel_id = gsi_channel_id(evt_ring->channel); in gsi_isr_glob_evt_err()
1011 complete(&evt_ring->completion); in gsi_isr_glob_evt_err()
1012 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", in gsi_isr_glob_evt_err()
1018 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", in gsi_isr_glob_evt_err()
1032 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_isr_glob_err()
1033 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_isr_glob_err()
1034 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); in gsi_isr_glob_err()
1046 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); in gsi_isr_glob_err()
1055 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_isr_gp_int1()
1058 dev_err(gsi->dev, "global INT1 generic result %u\n", result); in gsi_isr_gp_int1()
1060 complete(&gsi->completion); in gsi_isr_gp_int1()
1063 /* Inter-EE interrupt handler */
1068 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); in gsi_isr_glob_ee()
1073 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); in gsi_isr_glob_ee()
1083 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); in gsi_isr_glob_ee()
1091 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); in gsi_isr_ieob()
1092 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); in gsi_isr_ieob()
1100 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); in gsi_isr_ieob()
1107 struct device *dev = gsi->dev; in gsi_isr_general()
1110 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); in gsi_isr_general()
1111 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); in gsi_isr_general()
1118 * gsi_isr() - Top level GSI interrupt service routine
1131 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { in gsi_isr()
1155 dev_err(gsi->dev, in gsi_isr()
1163 dev_err(gsi->dev, "interrupt flood\n"); in gsi_isr()
1179 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); in gsi_event_trans()
1180 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); in gsi_event_trans()
1186 * gsi_evt_ring_rx_update() - Record lengths of received data
1206 struct gsi_channel *channel = evt_ring->channel; in gsi_evt_ring_rx_update()
1207 struct gsi_ring *ring = &evt_ring->ring; in gsi_evt_ring_rx_update()
1216 trans_info = &channel->trans_info; in gsi_evt_ring_rx_update()
1218 /* We'll start with the oldest un-processed event. RX channels in gsi_evt_ring_rx_update()
1219 * replenish receive buffers in single-TRE transactions, so we in gsi_evt_ring_rx_update()
1223 old_index = ring->index; in gsi_evt_ring_rx_update()
1230 event_avail = ring->count - old_index % ring->count; in gsi_evt_ring_rx_update()
1233 trans->len = __le16_to_cpu(event->len); in gsi_evt_ring_rx_update()
1234 byte_count += trans->len; in gsi_evt_ring_rx_update()
1237 if (--event_avail) in gsi_evt_ring_rx_update()
1241 trans = gsi_trans_pool_next(&trans_info->pool, trans); in gsi_evt_ring_rx_update()
1245 channel->byte_count += byte_count; in gsi_evt_ring_rx_update()
1246 channel->trans_count++; in gsi_evt_ring_rx_update()
1253 struct device *dev = gsi->dev; in gsi_ring_alloc()
1257 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); in gsi_ring_alloc()
1258 if (ring->virt && addr % size) { in gsi_ring_alloc()
1259 dma_free_coherent(dev, size, ring->virt, ring->addr); in gsi_ring_alloc()
1260 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", in gsi_ring_alloc()
1262 return -EINVAL; /* Not a good error value, but distinct */ in gsi_ring_alloc()
1263 } else if (!ring->virt) { in gsi_ring_alloc()
1264 return -ENOMEM; in gsi_ring_alloc()
1266 ring->addr = addr; in gsi_ring_alloc()
1267 ring->count = count; in gsi_ring_alloc()
1272 /* Free a previously-allocated ring */
1275 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; in gsi_ring_free()
1277 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); in gsi_ring_free()
1285 if (gsi->event_bitmap == ~0U) { in gsi_evt_ring_id_alloc()
1286 dev_err(gsi->dev, "event rings exhausted\n"); in gsi_evt_ring_id_alloc()
1287 return -ENOSPC; in gsi_evt_ring_id_alloc()
1290 evt_ring_id = ffz(gsi->event_bitmap); in gsi_evt_ring_id_alloc()
1291 gsi->event_bitmap |= BIT(evt_ring_id); in gsi_evt_ring_id_alloc()
1296 /* Free a previously-allocated event ring id */
1299 gsi->event_bitmap &= ~BIT(evt_ring_id); in gsi_evt_ring_id_free()
1302 /* Ring a channel doorbell, reporting the first un-filled entry */
1305 struct gsi_ring *tre_ring = &channel->tre_ring; in gsi_channel_doorbell()
1307 struct gsi *gsi = channel->gsi; in gsi_channel_doorbell()
1311 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); in gsi_channel_doorbell()
1312 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); in gsi_channel_doorbell()
1318 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_update()
1319 struct gsi *gsi = channel->gsi; in gsi_channel_update()
1326 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_update()
1327 ring = &evt_ring->ring; in gsi_channel_update()
1333 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); in gsi_channel_update()
1334 if (index == ring->index % ring->count) in gsi_channel_update()
1341 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); in gsi_channel_update()
1342 refcount_inc(&trans->refcount); in gsi_channel_update()
1349 if (channel->toward_ipa) in gsi_channel_update()
1357 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); in gsi_channel_update()
1363 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1393 * gsi_channel_poll() - NAPI poll function for a channel
1421 napi_complete(&channel->napi); in gsi_channel_poll()
1422 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); in gsi_channel_poll()
1435 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); in gsi_event_bitmap_init()
1442 /* Setup function for event rings */
1454 /* Setup function for a single channel */
1458 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_setup_one()
1459 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_setup_one()
1462 if (!channel->gsi) in gsi_channel_setup_one()
1477 if (channel->toward_ipa) in gsi_channel_setup_one()
1478 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1481 netif_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1496 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_teardown_one()
1497 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_teardown_one()
1499 if (!channel->gsi) in gsi_channel_teardown_one()
1502 netif_napi_del(&channel->napi); in gsi_channel_teardown_one()
1513 struct completion *completion = &gsi->completion; in gsi_generic_command()
1517 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_generic_command()
1519 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_generic_command()
1529 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", in gsi_generic_command()
1532 return -ETIMEDOUT; in gsi_generic_command()
1547 dev_err(gsi->dev, "error %d halting modem channel %u\n", in gsi_modem_channel_halt()
1551 /* Setup function for channels */
1561 mutex_lock(&gsi->mutex); in gsi_channel_setup()
1567 } while (++channel_id < gsi->channel_count); in gsi_channel_setup()
1571 struct gsi_channel *channel = &gsi->channel[channel_id++]; in gsi_channel_setup()
1573 if (!channel->gsi) in gsi_channel_setup()
1576 dev_err(gsi->dev, "channel %u not supported by hardware\n", in gsi_channel_setup()
1577 channel_id - 1); in gsi_channel_setup()
1578 channel_id = gsi->channel_count; in gsi_channel_setup()
1583 mask = gsi->modem_channel_bitmap; in gsi_channel_setup()
1595 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1601 mask ^= gsi->modem_channel_bitmap; in gsi_channel_setup()
1611 while (channel_id--) in gsi_channel_setup()
1614 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1625 u32 mask = gsi->modem_channel_bitmap; in gsi_channel_teardown()
1628 mutex_lock(&gsi->mutex); in gsi_channel_teardown()
1638 channel_id = gsi->channel_count - 1; in gsi_channel_teardown()
1641 while (channel_id--); in gsi_channel_teardown()
1643 mutex_unlock(&gsi->mutex); in gsi_channel_teardown()
1649 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1652 struct device *dev = gsi->dev; in gsi_setup()
1656 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); in gsi_setup()
1659 return -EIO; in gsi_setup()
1662 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); in gsi_setup()
1664 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); in gsi_setup()
1665 if (!gsi->channel_count) { in gsi_setup()
1667 return -EINVAL; in gsi_setup()
1669 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { in gsi_setup()
1672 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); in gsi_setup()
1673 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; in gsi_setup()
1676 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); in gsi_setup()
1677 if (!gsi->evt_ring_count) { in gsi_setup()
1679 return -EINVAL; in gsi_setup()
1681 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { in gsi_setup()
1684 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); in gsi_setup()
1685 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; in gsi_setup()
1689 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_setup()
1692 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); in gsi_setup()
1706 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_init()
1713 channel->evt_ring_id = ret; in gsi_channel_evt_ring_init()
1715 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; in gsi_channel_evt_ring_init()
1716 evt_ring->channel = channel; in gsi_channel_evt_ring_init()
1718 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); in gsi_channel_evt_ring_init()
1722 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", in gsi_channel_evt_ring_init()
1725 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); in gsi_channel_evt_ring_init()
1733 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_evt_ring_exit()
1734 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_exit()
1737 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_evt_ring_exit()
1738 gsi_ring_free(gsi, &evt_ring->ring); in gsi_channel_evt_ring_exit()
1747 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); in gsi_evt_ring_init()
1748 gsi->event_enable_bitmap = 0; in gsi_evt_ring_init()
1750 init_completion(&gsi->evt_ring[evt_ring_id].completion); in gsi_evt_ring_init()
1764 u32 channel_id = data->channel_id; in gsi_channel_data_valid()
1765 struct device *dev = gsi->dev; in gsi_channel_data_valid()
1774 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { in gsi_channel_data_valid()
1775 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); in gsi_channel_data_valid()
1779 if (!data->channel.tlv_count || in gsi_channel_data_valid()
1780 data->channel.tlv_count > GSI_TLV_MAX) { in gsi_channel_data_valid()
1782 channel_id, data->channel.tlv_count, GSI_TLV_MAX); in gsi_channel_data_valid()
1786 /* We have to allow at least one maximally-sized transaction to in gsi_channel_data_valid()
1791 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { in gsi_channel_data_valid()
1793 channel_id, data->channel.tlv_count, in gsi_channel_data_valid()
1794 data->channel.tre_count); in gsi_channel_data_valid()
1798 if (!is_power_of_2(data->channel.tre_count)) { in gsi_channel_data_valid()
1800 channel_id, data->channel.tre_count); in gsi_channel_data_valid()
1804 if (!is_power_of_2(data->channel.event_count)) { in gsi_channel_data_valid()
1806 channel_id, data->channel.event_count); in gsi_channel_data_valid()
1824 return -EINVAL; in gsi_channel_init_one()
1827 if (data->channel.tre_count > data->channel.event_count) { in gsi_channel_init_one()
1828 tre_count = data->channel.event_count; in gsi_channel_init_one()
1829 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", in gsi_channel_init_one()
1830 data->channel_id, tre_count); in gsi_channel_init_one()
1832 tre_count = data->channel.tre_count; in gsi_channel_init_one()
1835 channel = &gsi->channel[data->channel_id]; in gsi_channel_init_one()
1838 channel->gsi = gsi; in gsi_channel_init_one()
1839 channel->toward_ipa = data->toward_ipa; in gsi_channel_init_one()
1840 channel->command = command; in gsi_channel_init_one()
1841 channel->use_prefetch = command && prefetch; in gsi_channel_init_one()
1842 channel->tlv_count = data->channel.tlv_count; in gsi_channel_init_one()
1843 channel->tre_count = tre_count; in gsi_channel_init_one()
1844 channel->event_count = data->channel.event_count; in gsi_channel_init_one()
1845 init_completion(&channel->completion); in gsi_channel_init_one()
1851 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); in gsi_channel_init_one()
1853 dev_err(gsi->dev, "error %d allocating channel %u ring\n", in gsi_channel_init_one()
1854 ret, data->channel_id); in gsi_channel_init_one()
1858 ret = gsi_channel_trans_init(gsi, data->channel_id); in gsi_channel_init_one()
1863 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); in gsi_channel_init_one()
1872 gsi_ring_free(gsi, &channel->tre_ring); in gsi_channel_init_one()
1876 channel->gsi = NULL; /* Mark it not (fully) initialized */ in gsi_channel_init_one()
1884 if (!channel->gsi) in gsi_channel_exit_one()
1887 if (channel->command) in gsi_channel_exit_one()
1890 gsi_ring_free(channel->gsi, &channel->tre_ring); in gsi_channel_exit_one()
1914 gsi->modem_channel_bitmap |= in gsi_channel_init()
1927 while (i--) { in gsi_channel_init()
1931 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); in gsi_channel_init()
1934 gsi_channel_exit_one(&gsi->channel[data->channel_id]); in gsi_channel_init()
1944 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; in gsi_channel_exit()
1947 gsi_channel_exit_one(&gsi->channel[channel_id]); in gsi_channel_exit()
1948 while (channel_id--); in gsi_channel_exit()
1949 gsi->modem_channel_bitmap = 0; in gsi_channel_exit()
1954 /* Init function for GSI. GSI hardware does not need to be "ready" */
1959 struct device *dev = &pdev->dev; in gsi_init()
1967 gsi->dev = dev; in gsi_init()
1973 init_dummy_netdev(&gsi->dummy_dev); in gsi_init()
1978 return ret ? : -EINVAL; in gsi_init()
1987 gsi->irq = irq; in gsi_init()
1993 ret = -ENODEV; in gsi_init()
1998 if (res->start > U32_MAX || size > U32_MAX - res->start) { in gsi_init()
2000 ret = -EINVAL; in gsi_init()
2004 gsi->virt = ioremap(res->start, size); in gsi_init()
2005 if (!gsi->virt) { in gsi_init()
2007 ret = -ENOMEM; in gsi_init()
2015 mutex_init(&gsi->mutex); in gsi_init()
2016 init_completion(&gsi->completion); in gsi_init()
2021 iounmap(gsi->virt); in gsi_init()
2023 free_irq(gsi->irq, gsi); in gsi_init()
2031 mutex_destroy(&gsi->mutex); in gsi_exit()
2033 free_irq(gsi->irq, gsi); in gsi_exit()
2034 iounmap(gsi->virt); in gsi_exit()
2045 * would be (tre_count - 1).
2053 * a pool to avoid crossing that power-of-2 boundary, and this can
2059 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_tre_max()
2061 /* Hardware limit is channel->tre_count - 1 */ in gsi_channel_tre_max()
2062 return channel->tre_count - (channel->tlv_count - 1); in gsi_channel_tre_max()
2068 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_tre_max()
2070 return channel->tlv_count; in gsi_channel_trans_tre_max()