Lines Matching +full:ipa +full:- +full:clock +full:- +full:enabled +full:- +full:valid

1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
12 #include <linux/dma-direction.h>
16 #include "ipa.h"
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
42 /** enum ipa_status_opcode - status element opcode hardware values */
50 /** enum ipa_status_exception - status element exception type */
81 * IPA hardware as a number of KB. We don't use "hard byte in ipa_endpoint_validate_build()
102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
113 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
114 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
117 data->endpoint_id); in ipa_endpoint_data_valid_one()
124 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
125 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
129 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
138 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
143 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
146 data->endpoint_id); in ipa_endpoint_data_valid_one()
151 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
152 /* ...make sure it has status enabled. */ in ipa_endpoint_data_valid_one()
153 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
155 "status not enabled for endpoint %u\n", in ipa_endpoint_data_valid_one()
156 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
162 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
163 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
167 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
175 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
208 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
212 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid()
225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
233 /* Allocate a transaction to use on a non-command endpoint */
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
238 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
247 * Note that suspend is not supported starting with IPA v4.0.
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work in ipa_endpoint_init_ctrl()
259 * correctly on IPA v4.2. in ipa_endpoint_init_ctrl()
261 * if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
262 * assert(ipa->version != IPA_VERSION_4.2); in ipa_endpoint_init_ctrl()
264 * assert(ipa->version == IPA_VERSION_3_5_1); in ipa_endpoint_init_ctrl()
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; in ipa_endpoint_init_ctrl()
268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
283 /* assert(endpoint->toward_ipa); */ in ipa_endpoint_program_delay()
285 /* Delay mode doesn't work properly for IPA v4.2 */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
292 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
297 /* assert(mask & ipa->available); */ in ipa_endpoint_aggr_active()
298 offset = ipa_reg_state_aggr_active_offset(ipa->version); in ipa_endpoint_aggr_active()
299 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_aggr_active()
306 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
309 /* assert(mask & ipa->available); */ in ipa_endpoint_force_close()
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); in ipa_endpoint_force_close()
314 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
326 if (!endpoint->data->aggregation) in ipa_endpoint_suspend_aggr()
336 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
339 /* Returns previous suspend state (true means suspend was enabled) */
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
346 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
348 /* assert(!endpoint->toward_ipa); */ in ipa_endpoint_program_suspend()
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
367 /* DELAY mode doesn't work correctly on IPA v4.2 */ in ipa_endpoint_modem_pause_all()
368 if (ipa->version == IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
374 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
378 if (endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
388 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
393 * bound on that by assuming all initialized endpoints are modem->IPA. in ipa_endpoint_modem_exception_reset_all()
398 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
400 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
402 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_cfg()
440 if (endpoint->data->checksum) { in ipa_endpoint_init_cfg()
441 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
446 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
469 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
470 * packet size field, and we have the IPA hardware populate both for each
477 * The mux_id comes from a 4-byte metadata value supplied with each packet
479 * value that we want, in its low-order byte. A bitmask defined in the
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr()
490 if (endpoint->data->qmap) { in ipa_endpoint_init_hdr()
494 if (endpoint->toward_ipa && endpoint->data->checksum) in ipa_endpoint_init_hdr()
499 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
502 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
506 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr_ext()
526 u32 pad_align = endpoint->data->rx.pad_align; in ipa_endpoint_init_hdr_ext()
537 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
545 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
554 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
558 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
559 return; /* Register not valid for TX endpoints */ in ipa_endpoint_init_hdr_metadata_mask()
564 if (endpoint->data->qmap) in ipa_endpoint_init_hdr_metadata_mask()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_mode()
575 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
576 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_mode()
578 if (endpoint->data->dma_mode) { in ipa_endpoint_init_mode()
579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; in ipa_endpoint_init_mode()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
601 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_aggr()
611 if (endpoint->data->aggregation) { in ipa_endpoint_init_aggr()
612 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
627 if (endpoint->data->rx.aggr_close_eof) in ipa_endpoint_init_aggr()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
645 /* The head-of-line blocking timer is defined as a tick count, where each
646 * tick represents 128 cycles of the IPA core clock. Return the value
650 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) in ipa_reg_init_hol_block_timer_val() argument
663 rate = ipa_clock_rate(ipa); in ipa_reg_init_hol_block_timer_val()
665 /* ...but we still need to fit into a 32-bit register */ in ipa_reg_init_hol_block_timer_val()
668 /* IPA v3.5.1 just records the tick count */ in ipa_reg_init_hol_block_timer_val()
669 if (ipa->version == IPA_VERSION_3_5_1) in ipa_reg_init_hol_block_timer_val()
672 /* For IPA v4.2, the tick count is represented by base and in ipa_reg_init_hol_block_timer_val()
673 * scale fields within the 32-bit timer register, where: in ipa_reg_init_hol_block_timer_val()
682 scale = high > width ? high - width : 0; in ipa_reg_init_hol_block_timer_val()
685 ticks += 1 << (scale - 1); in ipa_reg_init_hol_block_timer_val()
701 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
707 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); in ipa_endpoint_init_hol_block_timer()
708 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_timer()
714 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_enable()
720 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
723 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
728 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
730 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
740 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_deaggr()
743 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
744 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_deaggr()
748 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ in ipa_endpoint_init_deaggr()
751 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
756 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_seq()
757 u32 seq_type = endpoint->seq_type; in ipa_endpoint_init_seq()
760 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
761 return; /* Register not valid for RX endpoints */ in ipa_endpoint_init_seq()
770 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
774 * ipa_endpoint_skb_tx() - Transmit a socket buffer
790 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
791 if (1 + nr_frags > endpoint->trans_tre_max) { in ipa_endpoint_skb_tx()
793 return -E2BIG; in ipa_endpoint_skb_tx()
799 return -EBUSY; in ipa_endpoint_skb_tx()
804 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
813 return -ENOMEM; in ipa_endpoint_skb_tx()
818 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
819 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
825 if (endpoint->data->status_enable) { in ipa_endpoint_status()
827 if (endpoint->toward_ipa) { in ipa_endpoint_status()
831 name = endpoint->data->tx.status_endpoint; in ipa_endpoint_status()
832 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
838 /* The next field is present for IPA v4.0 and above */ in ipa_endpoint_status()
842 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_status()
856 return -ENOMEM; in ipa_endpoint_replenish_one()
864 len = IPA_RX_BUFFER_SIZE - offset; in ipa_endpoint_replenish_one()
869 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
871 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { in ipa_endpoint_replenish_one()
873 endpoint->replenish_ready = 0; in ipa_endpoint_replenish_one()
885 return -ENOMEM; in ipa_endpoint_replenish_one()
889 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
902 if (!endpoint->replenish_enabled) { in ipa_endpoint_replenish()
904 atomic_add(count, &endpoint->replenish_saved); in ipa_endpoint_replenish()
909 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) in ipa_endpoint_replenish()
913 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
919 backlog = atomic_inc_return(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
922 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
930 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
931 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
932 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
938 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
942 endpoint->replenish_enabled = true; in ipa_endpoint_replenish_enable()
943 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) in ipa_endpoint_replenish_enable()
944 atomic_add(saved, &endpoint->replenish_backlog); in ipa_endpoint_replenish_enable()
947 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable()
948 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) in ipa_endpoint_replenish_enable()
956 endpoint->replenish_enabled = false; in ipa_endpoint_replenish_disable()
957 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
958 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
979 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
980 skb->truesize += extra; in ipa_endpoint_skb_copy()
984 if (endpoint->netdev) in ipa_endpoint_skb_copy()
985 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
996 if (!endpoint->netdev) in ipa_endpoint_skb_build()
999 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ in ipa_endpoint_skb_build()
1008 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1034 if (!ipa_status_format_packet(status->opcode)) in ipa_endpoint_status_skip()
1036 if (!status->pkt_len) in ipa_endpoint_status_skip()
1038 endpoint_id = u32_get_bits(status->endp_dst_idx, in ipa_endpoint_status_skip()
1040 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1052 if (status->exception) in ipa_status_drop_packet()
1053 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; in ipa_status_drop_packet()
1056 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); in ipa_status_drop_packet()
1065 u32 unused = IPA_RX_BUFFER_SIZE - total_len; in ipa_endpoint_status_parse()
1074 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1083 resid -= sizeof(*status); in ipa_endpoint_status_parse()
1090 * account for that. And if checksum offload is is enabled in ipa_endpoint_status_parse()
1094 align = endpoint->data->rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1095 len = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1097 if (endpoint->data->checksum) in ipa_endpoint_status_parse()
1107 u32 len2 = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1115 resid -= len; in ipa_endpoint_status_parse()
1133 if (trans->cancelled) in ipa_endpoint_rx_complete()
1137 page = trans->data; in ipa_endpoint_rx_complete()
1138 if (endpoint->data->status_enable) in ipa_endpoint_rx_complete()
1139 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_rx_complete()
1140 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_rx_complete()
1141 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_rx_complete()
1147 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1156 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1157 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1160 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1161 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1167 struct page *page = trans->data; in ipa_endpoint_trans_release()
1174 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1185 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); in ipa_endpoint_default_route_set()
1188 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1190 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1194 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1199 * taken to ensure the IPA pipeline is properly cleared.
1205 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1207 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1218 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1222 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1231 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1234 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1240 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1244 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1254 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1259 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1261 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1263 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1267 /* Finally, reset and reconfigure the channel again (re-enabling the in ipa_endpoint_reset_rx_aggr()
1272 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset_rx_aggr()
1273 gsi_channel_reset(gsi, endpoint->channel_id, legacy); in ipa_endpoint_reset_rx_aggr()
1280 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1293 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1294 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1299 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1303 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. in ipa_endpoint_reset()
1305 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset()
1306 special = !endpoint->toward_ipa && endpoint->data->aggregation; in ipa_endpoint_reset()
1310 gsi_channel_reset(&ipa->gsi, channel_id, legacy); in ipa_endpoint_reset()
1313 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1315 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1320 if (endpoint->toward_ipa) in ipa_endpoint_program()
1337 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1338 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1341 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1343 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1345 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1346 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1350 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1351 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1352 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1356 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1363 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1364 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1365 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1368 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1371 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1373 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1375 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1376 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1379 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1380 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1382 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1384 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1389 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1390 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1394 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1397 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1402 /* IPA v3.5.1 doesn't use channel stop for suspend */ in ipa_endpoint_suspend_one()
1403 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1404 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); in ipa_endpoint_suspend_one()
1407 endpoint->channel_id); in ipa_endpoint_suspend_one()
1412 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1413 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1417 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1420 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1423 /* IPA v3.5.1 doesn't use channel start for resume */ in ipa_endpoint_resume_one()
1424 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1425 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); in ipa_endpoint_resume_one()
1428 endpoint->channel_id); in ipa_endpoint_resume_one()
1429 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1433 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1435 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1438 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1439 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1441 ipa_cmd_tag_process(ipa); in ipa_endpoint_suspend()
1443 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1444 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1447 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1449 if (!ipa->setup_complete) in ipa_endpoint_resume()
1452 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1453 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1455 if (ipa->modem_netdev) in ipa_endpoint_resume()
1456 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1461 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1462 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1465 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1468 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); in ipa_endpoint_setup_one()
1469 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1473 endpoint->replenish_enabled = false; in ipa_endpoint_setup_one()
1474 atomic_set(&endpoint->replenish_saved, in ipa_endpoint_setup_one()
1475 gsi_channel_tre_max(gsi, endpoint->channel_id)); in ipa_endpoint_setup_one()
1476 atomic_set(&endpoint->replenish_backlog, 0); in ipa_endpoint_setup_one()
1477 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1483 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1488 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1490 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1491 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1496 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1498 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1500 ipa->set_up = 0; in ipa_endpoint_setup()
1506 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1510 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1512 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1519 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1521 ipa->set_up = 0; in ipa_endpoint_teardown()
1524 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1526 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1538 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); in ipa_endpoint_config()
1540 /* Our RX is an IPA producer */ in ipa_endpoint_config()
1546 return -EINVAL; in ipa_endpoint_config()
1548 rx_mask = GENMASK(max - 1, rx_base); in ipa_endpoint_config()
1550 /* Our TX is an IPA consumer */ in ipa_endpoint_config()
1552 tx_mask = GENMASK(max - 1, 0); in ipa_endpoint_config()
1554 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1557 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1559 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1560 ret = -EINVAL; /* Report other errors too */ in ipa_endpoint_config()
1563 initialized = ipa->initialized; in ipa_endpoint_config()
1571 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1572 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { in ipa_endpoint_config()
1575 ret = -EINVAL; in ipa_endpoint_config()
1582 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1584 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1587 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
1592 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1594 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
1595 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1596 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1598 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1599 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1600 endpoint->seq_type = data->endpoint.seq_type; in ipa_endpoint_init_one()
1601 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1602 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1603 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1604 endpoint->data = &data->endpoint.config; in ipa_endpoint_init_one()
1606 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1611 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1616 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
1618 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1625 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1627 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1628 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1632 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
1638 if (!ipa_endpoint_data_valid(ipa, count, data)) in ipa_endpoint_init()
1641 ipa->initialized = 0; in ipa_endpoint_init()
1648 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
1650 if (data->endpoint.filter_support) in ipa_endpoint_init()
1651 filter_map |= BIT(data->endpoint_id); in ipa_endpoint_init()
1654 if (!ipa_filter_map_valid(ipa, filter_map)) in ipa_endpoint_init()
1657 return filter_map; /* Non-zero bitmask */ in ipa_endpoint_init()
1660 ipa_endpoint_exit(ipa); in ipa_endpoint_init()