Lines Matching full:channel
53 * of data to or from the IPA. A channel is implemented as a ring buffer,
60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
64 * Each channel has a GSI "event ring" associated with it. An event ring
65 * is implemented very much like a channel ring, but is always directed from
66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
67 * events by adding an entry to the event ring associated with the channel.
70 * to the channel TRE whose completion the event represents.
72 * Each TRE in a channel ring has a set of flags. One flag indicates whether
74 * an interrupt) in the channel's event ring. Other flags allow transfer
77 * to signal completion of channel transfers.
79 * Elements in channel and event rings are completed (or consumed) strictly
116 * on a channel (in bytes). This determines the amount of prefetch
118 * the TLV FIFO for the channel.
121 * should update the channel doorbell. We configure this to equal
132 /** gsi_channel_scratch - channel scratch configuration area
153 /* Code assumes the size of channel and event ring element are in gsi_validate_build()
165 /* The channel element size must fit in this field */ in gsi_validate_build()
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel) in gsi_channel_id() argument
175 return channel - &channel->gsi->channel[0]; in gsi_channel_id()
178 /* An initialized channel has a non-null GSI pointer */
179 static bool gsi_channel_initialized(struct gsi_channel *channel) in gsi_channel_initialized() argument
181 return !!channel->gsi; in gsi_channel_initialized()
227 /* Channel commands are performed one at a time. Their completion is
228 * signaled by the channel control GSI interrupt type, which is only
229 * enabled when we issue a channel command. Only the channel being
246 /* Disable channel control interrupts */
262 /* Enable the interrupt type if this is the first channel enabled */ in gsi_irq_ieob_enable_one()
273 /* Disable the interrupt type if this was the last enabled channel */ in gsi_irq_ieob_disable()
465 /* Fetch the current state of a channel from hardware */
466 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) in gsi_channel_state() argument
468 u32 channel_id = gsi_channel_id(channel); in gsi_channel_state()
469 void __iomem *virt = channel->gsi->virt; in gsi_channel_state()
477 /* Issue a channel command and wait for it to complete */
479 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) in gsi_channel_command() argument
481 struct completion *completion = &channel->completion; in gsi_channel_command()
482 u32 channel_id = gsi_channel_id(channel); in gsi_channel_command()
483 struct gsi *gsi = channel->gsi; in gsi_channel_command()
500 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", in gsi_channel_command()
501 opcode, channel_id, gsi_channel_state(channel)); in gsi_channel_command()
504 /* Allocate GSI channel in NOT_ALLOCATED state */
507 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_alloc_command() local
511 /* Get initial channel state */ in gsi_channel_alloc_command()
512 state = gsi_channel_state(channel); in gsi_channel_alloc_command()
514 dev_err(dev, "channel %u bad state %u before alloc\n", in gsi_channel_alloc_command()
519 gsi_channel_command(channel, GSI_CH_ALLOCATE); in gsi_channel_alloc_command()
521 /* If successful the channel state will have changed */ in gsi_channel_alloc_command()
522 state = gsi_channel_state(channel); in gsi_channel_alloc_command()
526 dev_err(dev, "channel %u bad state %u after alloc\n", in gsi_channel_alloc_command()
532 /* Start an ALLOCATED channel */
533 static int gsi_channel_start_command(struct gsi_channel *channel) in gsi_channel_start_command() argument
535 struct device *dev = channel->gsi->dev; in gsi_channel_start_command()
538 state = gsi_channel_state(channel); in gsi_channel_start_command()
541 dev_err(dev, "channel %u bad state %u before start\n", in gsi_channel_start_command()
542 gsi_channel_id(channel), state); in gsi_channel_start_command()
546 gsi_channel_command(channel, GSI_CH_START); in gsi_channel_start_command()
548 /* If successful the channel state will have changed */ in gsi_channel_start_command()
549 state = gsi_channel_state(channel); in gsi_channel_start_command()
553 dev_err(dev, "channel %u bad state %u after start\n", in gsi_channel_start_command()
554 gsi_channel_id(channel), state); in gsi_channel_start_command()
559 /* Stop a GSI channel in STARTED state */
560 static int gsi_channel_stop_command(struct gsi_channel *channel) in gsi_channel_stop_command() argument
562 struct device *dev = channel->gsi->dev; in gsi_channel_stop_command()
565 state = gsi_channel_state(channel); in gsi_channel_stop_command()
567 /* Channel could have entered STOPPED state since last call in gsi_channel_stop_command()
575 dev_err(dev, "channel %u bad state %u before stop\n", in gsi_channel_stop_command()
576 gsi_channel_id(channel), state); in gsi_channel_stop_command()
580 gsi_channel_command(channel, GSI_CH_STOP); in gsi_channel_stop_command()
582 /* If successful the channel state will have changed */ in gsi_channel_stop_command()
583 state = gsi_channel_state(channel); in gsi_channel_stop_command()
591 dev_err(dev, "channel %u bad state %u after stop\n", in gsi_channel_stop_command()
592 gsi_channel_id(channel), state); in gsi_channel_stop_command()
597 /* Reset a GSI channel in ALLOCATED or ERROR state. */
598 static void gsi_channel_reset_command(struct gsi_channel *channel) in gsi_channel_reset_command() argument
600 struct device *dev = channel->gsi->dev; in gsi_channel_reset_command()
606 state = gsi_channel_state(channel); in gsi_channel_reset_command()
609 /* No need to reset a channel already in ALLOCATED state */ in gsi_channel_reset_command()
611 dev_err(dev, "channel %u bad state %u before reset\n", in gsi_channel_reset_command()
612 gsi_channel_id(channel), state); in gsi_channel_reset_command()
616 gsi_channel_command(channel, GSI_CH_RESET); in gsi_channel_reset_command()
618 /* If successful the channel state will have changed */ in gsi_channel_reset_command()
619 state = gsi_channel_state(channel); in gsi_channel_reset_command()
621 dev_err(dev, "channel %u bad state %u after reset\n", in gsi_channel_reset_command()
622 gsi_channel_id(channel), state); in gsi_channel_reset_command()
625 /* Deallocate an ALLOCATED GSI channel */
628 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_de_alloc_command() local
632 state = gsi_channel_state(channel); in gsi_channel_de_alloc_command()
634 dev_err(dev, "channel %u bad state %u before dealloc\n", in gsi_channel_de_alloc_command()
639 gsi_channel_command(channel, GSI_CH_DE_ALLOC); in gsi_channel_de_alloc_command()
641 /* If successful the channel state will have changed */ in gsi_channel_de_alloc_command()
642 state = gsi_channel_state(channel); in gsi_channel_de_alloc_command()
645 dev_err(dev, "channel %u bad state %u after dealloc\n", in gsi_channel_de_alloc_command()
693 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ in gsi_evt_ring_program()
709 /* Find the transaction whose completion indicates a channel is quiesced */
710 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) in gsi_channel_trans_last() argument
712 struct gsi_trans_info *trans_info = &channel->trans_info; in gsi_channel_trans_last()
721 if (channel->toward_ipa) { in gsi_channel_trans_last()
751 /* Wait for transaction activity on a channel to complete */
752 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) in gsi_channel_trans_quiesce() argument
757 trans = gsi_channel_trans_last(channel); in gsi_channel_trans_quiesce()
764 /* Program a channel for use; there is no gsi_channel_deprogram() */
765 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) in gsi_channel_program() argument
767 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; in gsi_channel_program()
768 u32 channel_id = gsi_channel_id(channel); in gsi_channel_program()
771 struct gsi *gsi = channel->gsi; in gsi_channel_program()
775 /* Arbitrarily pick TRE 0 as the first channel element to use */ in gsi_channel_program()
776 channel->tre_ring.index = 0; in gsi_channel_program()
780 if (channel->toward_ipa) in gsi_channel_program()
782 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); in gsi_channel_program()
790 * high-order 32 bits of the address of the channel ring, in gsi_channel_program()
793 val = lower_32_bits(channel->tre_ring.addr); in gsi_channel_program()
795 val = upper_32_bits(channel->tre_ring.addr); in gsi_channel_program()
798 /* Command channel gets low weighted round-robin priority */ in gsi_channel_program()
799 if (channel->command) in gsi_channel_program()
810 * on all but the AP command channel. in gsi_channel_program()
812 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { in gsi_channel_program()
852 static int __gsi_channel_start(struct gsi_channel *channel, bool resume) in __gsi_channel_start() argument
854 struct gsi *gsi = channel->gsi; in __gsi_channel_start()
863 ret = gsi_channel_start_command(channel); in __gsi_channel_start()
870 /* Start an allocated GSI channel */
873 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_start() local
877 napi_enable(&channel->napi); in gsi_channel_start()
878 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); in gsi_channel_start()
880 ret = __gsi_channel_start(channel, false); in gsi_channel_start()
882 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); in gsi_channel_start()
883 napi_disable(&channel->napi); in gsi_channel_start()
889 static int gsi_channel_stop_retry(struct gsi_channel *channel) in gsi_channel_stop_retry() argument
895 ret = gsi_channel_stop_command(channel); in gsi_channel_stop_retry()
904 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) in __gsi_channel_stop() argument
906 struct gsi *gsi = channel->gsi; in __gsi_channel_stop()
910 gsi_channel_trans_quiesce(channel); in __gsi_channel_stop()
918 ret = gsi_channel_stop_retry(channel); in __gsi_channel_stop()
925 /* Stop a started channel */
928 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_stop() local
931 ret = __gsi_channel_stop(channel, false); in gsi_channel_stop()
936 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); in gsi_channel_stop()
937 napi_disable(&channel->napi); in gsi_channel_stop()
942 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
945 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_reset() local
949 gsi_channel_reset_command(channel); in gsi_channel_reset()
951 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) in gsi_channel_reset()
952 gsi_channel_reset_command(channel); in gsi_channel_reset()
954 gsi_channel_program(channel, doorbell); in gsi_channel_reset()
955 gsi_channel_trans_cancel_pending(channel); in gsi_channel_reset()
960 /* Stop a started channel for suspend */
963 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_suspend() local
966 ret = __gsi_channel_stop(channel, true); in gsi_channel_suspend()
971 napi_synchronize(&channel->napi); in gsi_channel_suspend()
976 /* Resume a suspended channel (starting if stopped) */
979 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_resume() local
981 return __gsi_channel_start(channel, true); in gsi_channel_resume()
997 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
998 * @channel: Channel for which to report
1004 * For each channel we track the number of transactions used and bytes of
1010 * Calling this each time we ring the channel doorbell allows us to
1014 void gsi_channel_tx_queued(struct gsi_channel *channel) in gsi_channel_tx_queued() argument
1019 byte_count = channel->byte_count - channel->queued_byte_count; in gsi_channel_tx_queued()
1020 trans_count = channel->trans_count - channel->queued_trans_count; in gsi_channel_tx_queued()
1021 channel->queued_byte_count = channel->byte_count; in gsi_channel_tx_queued()
1022 channel->queued_trans_count = channel->trans_count; in gsi_channel_tx_queued()
1024 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_queued()
1030 * @channel: Channel that has completed transmitting packets
1034 * over a TX channel since the given transaction was committed. Report this
1037 * At the time a transaction is committed, we record its channel's
1053 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) in gsi_channel_tx_update() argument
1058 byte_count -= channel->compl_byte_count; in gsi_channel_tx_update()
1059 channel->compl_byte_count += byte_count; in gsi_channel_tx_update()
1060 trans_count -= channel->compl_trans_count; in gsi_channel_tx_update()
1061 channel->compl_trans_count += trans_count; in gsi_channel_tx_update()
1063 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_update()
1067 /* Channel control interrupt handler */
1077 struct gsi_channel *channel; in gsi_isr_chan_ctrl() local
1081 channel = &gsi->channel[channel_id]; in gsi_isr_chan_ctrl()
1083 complete(&channel->completion); in gsi_isr_chan_ctrl()
1107 /* Global channel error interrupt handler */
1112 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); in gsi_isr_glob_chan_err()
1113 complete(&gsi->channel[channel_id].completion); in gsi_isr_glob_chan_err()
1118 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", in gsi_isr_glob_chan_err()
1128 u32 channel_id = gsi_channel_id(evt_ring->channel); in gsi_isr_glob_evt_err()
1131 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", in gsi_isr_glob_evt_err()
1178 * have no way of knowing the channel's state at any given time. in gsi_isr_gp_int1()
1181 * when shutting down, but it's possible the channel isn't running in gsi_isr_gp_int1()
1183 * that case, but it's harmless (the channel is already halted). in gsi_isr_gp_int1()
1247 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); in gsi_isr_ieob()
1333 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, in gsi_event_trans() argument
1341 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); in gsi_event_trans()
1343 return gsi_channel_trans_mapped(channel, tre_index); in gsi_event_trans()
1348 * @evt_ring: Event ring associated with channel that received packets
1367 struct gsi_channel *channel = evt_ring->channel; in gsi_evt_ring_rx_update() local
1377 trans_info = &channel->trans_info; in gsi_evt_ring_rx_update()
1386 trans = gsi_event_trans(channel, event); in gsi_evt_ring_rx_update()
1406 channel->byte_count += byte_count; in gsi_evt_ring_rx_update()
1407 channel->trans_count++; in gsi_evt_ring_rx_update()
1461 /* Ring a channel doorbell, reporting the first un-filled entry */
1462 void gsi_channel_doorbell(struct gsi_channel *channel) in gsi_channel_doorbell() argument
1464 struct gsi_ring *tre_ring = &channel->tre_ring; in gsi_channel_doorbell()
1465 u32 channel_id = gsi_channel_id(channel); in gsi_channel_doorbell()
1466 struct gsi *gsi = channel->gsi; in gsi_channel_doorbell()
1475 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) in gsi_channel_update() argument
1477 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_update()
1478 struct gsi *gsi = channel->gsi; in gsi_channel_update()
1500 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); in gsi_channel_update()
1508 if (channel->toward_ipa) in gsi_channel_update()
1509 gsi_channel_tx_update(channel, trans); in gsi_channel_update()
1516 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); in gsi_channel_update()
1520 return gsi_channel_trans_complete(channel); in gsi_channel_update()
1524 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1525 * @channel: Channel to be polled
1529 * This function returns the first entry on a channel's completed transaction
1535 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) in gsi_channel_poll_one() argument
1540 trans = gsi_channel_trans_complete(channel); in gsi_channel_poll_one()
1542 trans = gsi_channel_update(channel); in gsi_channel_poll_one()
1551 * gsi_channel_poll() - NAPI poll function for a channel
1552 * @napi: NAPI structure for the channel
1564 struct gsi_channel *channel; in gsi_channel_poll() local
1567 channel = container_of(napi, struct gsi_channel, napi); in gsi_channel_poll()
1571 trans = gsi_channel_poll_one(channel); in gsi_channel_poll()
1578 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); in gsi_channel_poll()
1597 /* Setup function for a single channel */
1600 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_setup_one() local
1601 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_setup_one()
1604 if (!gsi_channel_initialized(channel)) in gsi_channel_setup_one()
1617 gsi_channel_program(channel, true); in gsi_channel_setup_one()
1619 if (channel->toward_ipa) in gsi_channel_setup_one()
1620 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1623 netif_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1638 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_teardown_one() local
1639 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_teardown_one()
1641 if (!gsi_channel_initialized(channel)) in gsi_channel_teardown_one()
1644 netif_napi_del(&channel->napi); in gsi_channel_teardown_one()
1662 * halt a modem channel) and only from this function. So we in gsi_generic_command()
1686 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", in gsi_generic_command()
1709 dev_err(gsi->dev, "error %d halting modem channel %u\n", in gsi_modem_channel_halt()
1732 struct gsi_channel *channel = &gsi->channel[channel_id++]; in gsi_channel_setup() local
1734 if (!gsi_channel_initialized(channel)) in gsi_channel_setup()
1738 dev_err(gsi->dev, "channel %u not supported by hardware\n", in gsi_channel_setup()
1851 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */
1939 /* Initialize a channel's event ring */
1940 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) in gsi_channel_evt_ring_init() argument
1942 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_init()
1949 channel->evt_ring_id = ret; in gsi_channel_evt_ring_init()
1951 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; in gsi_channel_evt_ring_init()
1952 evt_ring->channel = channel; in gsi_channel_evt_ring_init()
1954 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); in gsi_channel_evt_ring_init()
1958 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", in gsi_channel_evt_ring_init()
1959 ret, gsi_channel_id(channel)); in gsi_channel_evt_ring_init()
1961 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); in gsi_channel_evt_ring_init()
1967 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) in gsi_channel_evt_ring_exit() argument
1969 u32 evt_ring_id = channel->evt_ring_id; in gsi_channel_evt_ring_exit()
1970 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_exit()
1996 /* Make sure channel ids are in the range driver supports */ in gsi_channel_data_valid()
1998 dev_err(dev, "bad channel id %u; must be less than %u\n", in gsi_channel_data_valid()
2008 if (!data->channel.tlv_count || in gsi_channel_data_valid()
2009 data->channel.tlv_count > GSI_TLV_MAX) { in gsi_channel_data_valid()
2010 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", in gsi_channel_data_valid()
2011 channel_id, data->channel.tlv_count, GSI_TLV_MAX); in gsi_channel_data_valid()
2020 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { in gsi_channel_data_valid()
2021 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", in gsi_channel_data_valid()
2022 channel_id, data->channel.tlv_count, in gsi_channel_data_valid()
2023 data->channel.tre_count); in gsi_channel_data_valid()
2027 if (!is_power_of_2(data->channel.tre_count)) { in gsi_channel_data_valid()
2028 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", in gsi_channel_data_valid()
2029 channel_id, data->channel.tre_count); in gsi_channel_data_valid()
2033 if (!is_power_of_2(data->channel.event_count)) { in gsi_channel_data_valid()
2034 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", in gsi_channel_data_valid()
2035 channel_id, data->channel.event_count); in gsi_channel_data_valid()
2042 /* Init function for a single channel */
2047 struct gsi_channel *channel; in gsi_channel_init_one() local
2055 if (data->channel.tre_count > data->channel.event_count) { in gsi_channel_init_one()
2056 tre_count = data->channel.event_count; in gsi_channel_init_one()
2057 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", in gsi_channel_init_one()
2060 tre_count = data->channel.tre_count; in gsi_channel_init_one()
2063 channel = &gsi->channel[data->channel_id]; in gsi_channel_init_one()
2064 memset(channel, 0, sizeof(*channel)); in gsi_channel_init_one()
2066 channel->gsi = gsi; in gsi_channel_init_one()
2067 channel->toward_ipa = data->toward_ipa; in gsi_channel_init_one()
2068 channel->command = command; in gsi_channel_init_one()
2069 channel->tlv_count = data->channel.tlv_count; in gsi_channel_init_one()
2070 channel->tre_count = tre_count; in gsi_channel_init_one()
2071 channel->event_count = data->channel.event_count; in gsi_channel_init_one()
2072 init_completion(&channel->completion); in gsi_channel_init_one()
2074 ret = gsi_channel_evt_ring_init(channel); in gsi_channel_init_one()
2078 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); in gsi_channel_init_one()
2080 dev_err(gsi->dev, "error %d allocating channel %u ring\n", in gsi_channel_init_one()
2092 ret = ipa_cmd_pool_init(channel, tre_max); in gsi_channel_init_one()
2097 gsi_channel_trans_exit(channel); in gsi_channel_init_one()
2099 gsi_ring_free(gsi, &channel->tre_ring); in gsi_channel_init_one()
2101 gsi_channel_evt_ring_exit(channel); in gsi_channel_init_one()
2103 channel->gsi = NULL; /* Mark it not (fully) initialized */ in gsi_channel_init_one()
2109 static void gsi_channel_exit_one(struct gsi_channel *channel) in gsi_channel_exit_one() argument
2111 if (!gsi_channel_initialized(channel)) in gsi_channel_exit_one()
2114 if (channel->command) in gsi_channel_exit_one()
2115 ipa_cmd_pool_exit(channel); in gsi_channel_exit_one()
2116 gsi_channel_trans_exit(channel); in gsi_channel_exit_one()
2117 gsi_ring_free(channel->gsi, &channel->tre_ring); in gsi_channel_exit_one()
2118 gsi_channel_evt_ring_exit(channel); in gsi_channel_exit_one()
2164 gsi_channel_exit_one(&gsi->channel[data->channel_id]); in gsi_channel_init()
2176 gsi_channel_exit_one(&gsi->channel[channel_id]); in gsi_channel_exit()
2198 * for the channel NAPI contexts to be associated with. in gsi_init()
2259 /* The maximum number of outstanding TREs on a channel. This limits
2260 * a channel's maximum number of transactions outstanding (worst case
2263 * The absolute limit is the number of TREs in the channel's TRE ring,
2281 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_tre_max() local
2283 /* Hardware limit is channel->tre_count - 1 */ in gsi_channel_tre_max()
2284 return channel->tre_count - (channel->tlv_count - 1); in gsi_channel_tre_max()
2287 /* Returns the maximum number of TREs in a single transaction for a channel */
2290 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_tre_max() local
2292 return channel->tlv_count; in gsi_channel_trans_tre_max()