Lines Matching +full:ipa +full:- +full:ap +full:- +full:to +full:- +full:modem
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2022 Linaro Ltd.
12 #include <linux/dma-direction.h>
16 #include "ipa.h"
27 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
30 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 /** enum ipa_status_opcode - status element opcode hardware values */
45 /** enum ipa_status_exception - status element exception type */
73 /* Compute the aggregation size value to use for a given buffer size */
81 * after that limit to receive a full MTU of data plus overhead. in ipa_aggr_size_kb()
84 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
91 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
96 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
102 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
109 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
112 data->endpoint_id); in ipa_endpoint_data_valid_one()
116 /* Nothing more to check for non-AP RX */ in ipa_endpoint_data_valid_one()
117 if (data->ee_id != GSI_EE_AP) in ipa_endpoint_data_valid_one()
120 rx_config = &data->endpoint.config.rx; in ipa_endpoint_data_valid_one()
123 buffer_size = rx_config->buffer_size; in ipa_endpoint_data_valid_one()
127 data->endpoint_id, buffer_size, limit); in ipa_endpoint_data_valid_one()
131 if (!data->endpoint.config.aggregation) { in ipa_endpoint_data_valid_one()
135 if (rx_config->aggr_time_limit) { in ipa_endpoint_data_valid_one()
138 data->endpoint_id); in ipa_endpoint_data_valid_one()
142 if (rx_config->aggr_hard_limit) { in ipa_endpoint_data_valid_one()
144 data->endpoint_id); in ipa_endpoint_data_valid_one()
148 if (rx_config->aggr_close_eof) { in ipa_endpoint_data_valid_one()
150 data->endpoint_id); in ipa_endpoint_data_valid_one()
154 return result; /* Nothing more to check */ in ipa_endpoint_data_valid_one()
163 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_data_valid_one()
164 rx_config->aggr_hard_limit); in ipa_endpoint_data_valid_one()
165 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_data_valid_one()
170 data->endpoint_id, aggr_size, limit); in ipa_endpoint_data_valid_one()
175 return true; /* Nothing more to check for RX */ in ipa_endpoint_data_valid_one()
178 /* Starting with IPA v4.5 sequencer replication is obsolete */ in ipa_endpoint_data_valid_one()
179 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
180 if (data->endpoint.config.tx.seq_rep_type) { in ipa_endpoint_data_valid_one()
181 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", in ipa_endpoint_data_valid_one()
182 data->endpoint_id); in ipa_endpoint_data_valid_one()
187 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
188 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
192 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
201 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
205 /* ...and has to be an RX endpoint... */ in ipa_endpoint_data_valid_one()
206 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
209 data->endpoint_id); in ipa_endpoint_data_valid_one()
213 /* ...and if it's to be an AP endpoint... */ in ipa_endpoint_data_valid_one()
214 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
216 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
219 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
225 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
226 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
230 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
238 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
246 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
250 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
269 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
273 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
278 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid()
284 /* Allocate a transaction to use on a non-command endpoint */
288 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
289 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
292 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
298 * Note that suspend is not supported starting with IPA v4.0, and
299 * delay mode should not be used starting with IPA v4.2.
304 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
312 if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
313 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
315 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
317 reg = ipa_reg(ipa, ENDP_INIT_CTRL); in ipa_endpoint_init_ctrl()
318 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
319 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
321 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; in ipa_endpoint_init_ctrl()
329 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
339 /* Delay mode should not be used for IPA v4.2+ */ in ipa_endpoint_program_delay()
340 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
341 WARN_ON(!endpoint->toward_ipa); in ipa_endpoint_program_delay()
348 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
349 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
353 WARN_ON(!(mask & ipa->available)); in ipa_endpoint_aggr_active()
355 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); in ipa_endpoint_aggr_active()
356 val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); in ipa_endpoint_aggr_active()
363 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
364 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
367 WARN_ON(!(mask & ipa->available)); in ipa_endpoint_force_close()
369 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); in ipa_endpoint_force_close()
370 iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg)); in ipa_endpoint_force_close()
374 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
375 * @endpoint: Endpoint on which to emulate a suspend
377 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
378 * with an open aggregation frame. This is to work around a hardware
379 * issue in IPA version 3.5.1 where the suspend interrupt will not be
384 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
386 if (!endpoint->config.aggregation) in ipa_endpoint_suspend_aggr()
389 /* Nothing to do if the endpoint doesn't have aggregation open */ in ipa_endpoint_suspend_aggr()
396 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
405 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
406 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
408 WARN_ON(endpoint->toward_ipa); in ipa_endpoint_program_suspend()
413 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
422 /* Put all modem RX endpoints into suspend mode, and stop transmission
423 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
424 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
427 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
432 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
434 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
437 if (!endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
439 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
442 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
443 endpoint->channel_id, in ipa_endpoint_modem_pause_all()
448 /* Reset all modem endpoints to use the default exception endpoint */
449 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
451 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
455 /* We need one command per modem TX endpoint, plus the commands in ipa_endpoint_modem_exception_reset_all()
458 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
459 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
461 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
462 "no transaction to reset modem exception endpoints\n"); in ipa_endpoint_modem_exception_reset_all()
463 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
474 /* We only reset modem TX endpoints */ in ipa_endpoint_modem_exception_reset_all()
475 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
476 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
479 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_modem_exception_reset_all()
493 ipa_cmd_pipeline_clear_wait(ipa); in ipa_endpoint_modem_exception_reset_all()
500 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_cfg()
501 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg() local
506 reg = ipa_reg(ipa, ENDP_INIT_CFG); in ipa_endpoint_init_cfg()
508 if (endpoint->config.checksum) { in ipa_endpoint_init_cfg()
509 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
511 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
514 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
532 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
537 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_nat()
538 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat() local
542 if (!endpoint->toward_ipa) in ipa_endpoint_init_nat()
545 reg = ipa_reg(ipa, ENDP_INIT_NAT); in ipa_endpoint_init_nat()
548 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
557 if (!endpoint->config.checksum) in ipa_qmap_header_size()
561 /* Checksum header inserted for AP TX endpoints only */ in ipa_qmap_header_size()
562 if (endpoint->toward_ipa) in ipa_qmap_header_size()
579 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_header_size_encode()
586 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
601 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_metadata_offset_encode()
608 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
617 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
621 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
622 * packet size field, and we have the IPA hardware populate both for each
624 * to use big endian format.
629 * The mux_id comes from a 4-byte metadata value supplied with each packet
630 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
631 * value that we want, in its low-order byte. A bitmask defined in the
632 * endpoint's METADATA_MASK register defines which byte within the modem
639 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr()
640 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr() local
644 reg = ipa_reg(ipa, ENDP_INIT_HDR); in ipa_endpoint_init_hdr()
645 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr()
646 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
652 /* Define how to fill fields in a received QMAP header */ in ipa_endpoint_init_hdr()
653 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
656 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
660 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
662 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
669 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ in ipa_endpoint_init_hdr()
678 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
683 u32 pad_align = endpoint->config.rx.pad_align; in ipa_endpoint_init_hdr_ext()
684 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_ext()
685 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext() local
689 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); in ipa_endpoint_init_hdr_ext()
690 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext()
701 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
710 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
713 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
716 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
718 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
730 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
735 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
736 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask() local
741 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
744 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); in ipa_endpoint_init_hdr_metadata_mask()
748 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
751 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
756 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode() local
761 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
764 reg = ipa_reg(ipa, ENDP_INIT_MODE); in ipa_endpoint_init_mode()
765 if (endpoint->config.dma_mode) { in ipa_endpoint_init_mode()
766 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; in ipa_endpoint_init_mode()
767 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
776 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_mode()
777 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
780 /* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
781 * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
782 * they're configured to have granularity 100 usec and 1 msec, respectively.
784 * The return value is the positive or negative Qtime value to use to
797 /* Have to use pulse generator 1 (millisecond granularity) */ in ipa_qtime_val()
801 return (int)-val; in ipa_qtime_val()
804 /* Encode the aggregation timer limit (microseconds) based on IPA version */
805 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg, in aggr_time_limit_encode() argument
812 return 0; /* Nothing to compute if time limit is 0 */ in aggr_time_limit_encode()
815 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
819 /* Compute the Qtime limit value to use */ in aggr_time_limit_encode()
822 val = -ret; in aggr_time_limit_encode()
842 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_aggr()
843 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr() local
847 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_init_aggr()
848 if (endpoint->config.aggregation) { in ipa_endpoint_init_aggr()
849 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
854 rx_config = &endpoint->config.rx; in ipa_endpoint_init_aggr()
858 buffer_size = rx_config->buffer_size; in ipa_endpoint_init_aggr()
859 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_init_aggr()
860 rx_config->aggr_hard_limit); in ipa_endpoint_init_aggr()
863 limit = rx_config->aggr_time_limit; in ipa_endpoint_init_aggr()
864 val |= aggr_time_limit_encode(ipa, reg, limit); in ipa_endpoint_init_aggr()
868 if (rx_config->aggr_close_eof) in ipa_endpoint_init_aggr()
876 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ in ipa_endpoint_init_aggr()
882 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
885 /* The head-of-line blocking timer is defined as a tick count. For
886 * IPA version 4.5 the tick count is based on the Qtimer, which is
887 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
888 * each tick represents 128 cycles of the IPA core clock.
891 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
893 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg, in hol_block_timer_encode() argument
904 return 0; /* Nothing to compute if timer period is 0 */ in hol_block_timer_encode()
906 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
911 /* Compute the Qtime limit value to use */ in hol_block_timer_encode()
914 val = -ret; in hol_block_timer_encode()
924 /* Use 64 bit arithmetic to avoid overflow */ in hol_block_timer_encode()
925 rate = ipa_core_clock_rate(ipa); in hol_block_timer_encode()
928 /* We still need the result to fit into the field */ in hol_block_timer_encode()
931 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
932 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
935 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
936 * scale fields within the 32-bit timer register, where: in hol_block_timer_encode()
945 scale = high > width ? high - width : 0; in hol_block_timer_encode()
947 /* If we're scaling, round up to get a closer result */ in hol_block_timer_encode()
948 ticks += 1 << (scale - 1); in hol_block_timer_encode()
964 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
965 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
970 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); in ipa_endpoint_init_hol_block_timer()
971 val = hol_block_timer_encode(ipa, reg, microseconds); in ipa_endpoint_init_hol_block_timer()
973 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
979 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_en()
980 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en() local
985 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); in ipa_endpoint_init_hol_block_en()
989 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
991 /* When enabling, the register must be written twice for IPA v4.5+ */ in ipa_endpoint_init_hol_block_en()
992 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
993 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1009 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
1014 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
1016 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
1026 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_deaggr()
1027 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr() local
1031 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
1034 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); in ipa_endpoint_init_deaggr()
1040 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1045 u32 resource_group = endpoint->config.resource_group; in ipa_endpoint_init_rsrc_grp()
1046 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_rsrc_grp()
1047 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp() local
1051 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); in ipa_endpoint_init_rsrc_grp()
1054 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1059 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_seq()
1060 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq() local
1064 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
1067 reg = ipa_reg(ipa, ENDP_INIT_SEQ); in ipa_endpoint_init_seq()
1069 /* Low-order byte configures primary packet processing */ in ipa_endpoint_init_seq()
1070 val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); in ipa_endpoint_init_seq()
1073 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1075 endpoint->config.tx.seq_rep_type); in ipa_endpoint_init_seq()
1077 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1081 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1083 * @skb: Socket buffer to send
1093 /* Make sure source endpoint's TLV FIFO has enough entries to in ipa_endpoint_skb_tx()
1097 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
1098 if (nr_frags > endpoint->skb_frag_max) { in ipa_endpoint_skb_tx()
1100 return -E2BIG; in ipa_endpoint_skb_tx()
1106 return -EBUSY; in ipa_endpoint_skb_tx()
1111 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
1120 return -ENOMEM; in ipa_endpoint_skb_tx()
1125 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
1126 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
1130 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_status()
1131 if (endpoint->config.status_enable) { in ipa_endpoint_status()
1133 if (endpoint->toward_ipa) { in ipa_endpoint_status()
1137 name = endpoint->config.tx.status_endpoint; in ipa_endpoint_status()
1138 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1144 * packet (not present for IPA v4.5+) in ipa_endpoint_status()
1149 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1161 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_replenish_one()
1164 return -ENOMEM; in ipa_endpoint_replenish_one()
1166 /* Offset the buffer to make space for skb headroom */ in ipa_endpoint_replenish_one()
1168 len = buffer_size - offset; in ipa_endpoint_replenish_one()
1174 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
1180 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1181 * @endpoint: Endpoint to be replenished
1183 * The IPA hardware can hold a fixed number of receive buffers for an RX
1185 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1186 * more receive buffers can be supplied to the hardware. Replenishing for
1187 * an endpoint can be disabled, in which case buffers are not queued to
1194 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1198 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1209 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); in ipa_endpoint_replenish()
1213 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1219 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1221 /* Whenever a receive buffer transaction completes we'll try to in ipa_endpoint_replenish()
1222 * replenish again. It's unlikely, but if we fail to supply even in ipa_endpoint_replenish()
1224 * If the hardware has no receive buffers queued, schedule work to in ipa_endpoint_replenish()
1227 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1228 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
1234 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_enable()
1237 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1243 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_disable()
1261 if (!endpoint->netdev) in ipa_endpoint_skb_copy()
1268 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
1269 skb->truesize += extra; in ipa_endpoint_skb_copy()
1272 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1278 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_skb_build()
1281 /* Nothing to do if there's no netdev */ in ipa_endpoint_skb_build()
1282 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1285 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); in ipa_endpoint_skb_build()
1294 /* Receive the buffer (or record drop if unable to build it) */ in ipa_endpoint_skb_build()
1295 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1321 if (!ipa_status_format_packet(status->opcode)) in ipa_endpoint_status_skip()
1323 if (!status->pkt_len) in ipa_endpoint_status_skip()
1325 endpoint_id = u8_get_bits(status->endp_dst_idx, in ipa_endpoint_status_skip()
1327 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1337 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag() local
1340 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) in ipa_endpoint_status_tag()
1343 /* The status contains a valid tag. We know the packet was sent to in ipa_endpoint_status_tag()
1345 * If the packet came from the AP->command TX endpoint we know in ipa_endpoint_status_tag()
1348 endpoint_id = u8_get_bits(status->endp_src_idx, in ipa_endpoint_status_tag()
1350 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag()
1351 if (endpoint_id == command_endpoint->endpoint_id) { in ipa_endpoint_status_tag()
1352 complete(&ipa->completion); in ipa_endpoint_status_tag()
1354 dev_err(&ipa->pdev->dev, in ipa_endpoint_status_tag()
1373 if (status->exception) in ipa_endpoint_status_drop()
1374 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; in ipa_endpoint_status_drop()
1376 /* Drop the packet if it fails to match a routing rule; otherwise no */ in ipa_endpoint_status_drop()
1377 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); in ipa_endpoint_status_drop()
1385 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_status_parse()
1387 u32 unused = buffer_size - total_len; in ipa_endpoint_status_parse()
1396 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1405 resid -= sizeof(*status); in ipa_endpoint_status_parse()
1411 * to pad packet data to an aligned boundary, account for that. in ipa_endpoint_status_parse()
1415 align = endpoint->config.rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1416 len = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1418 if (endpoint->config.checksum) in ipa_endpoint_status_parse()
1428 len2 = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1441 resid -= len; in ipa_endpoint_status_parse()
1450 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1453 if (trans->cancelled) in ipa_endpoint_trans_complete()
1457 page = trans->data; in ipa_endpoint_trans_complete()
1458 if (endpoint->config.status_enable) in ipa_endpoint_trans_complete()
1459 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_trans_complete()
1460 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_trans_complete()
1461 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_trans_complete()
1469 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1470 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1472 /* Nothing to do for command transactions */ in ipa_endpoint_trans_release()
1473 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1474 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1480 struct page *page = trans->data; in ipa_endpoint_trans_release()
1487 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1492 reg = ipa_reg(ipa, ROUTE); in ipa_endpoint_default_route_set()
1500 iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); in ipa_endpoint_default_route_set()
1503 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1505 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1509 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1510 * @endpoint: Endpoint to be reset
1514 * taken to ensure the IPA pipeline is properly cleared.
1520 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1521 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1522 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1532 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1536 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1545 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1548 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1554 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1558 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1562 /* Wait for aggregation to be closed on the channel */ in ipa_endpoint_reset_rx_aggr()
1568 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1573 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1575 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1577 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1581 /* Finally, reset and reconfigure the channel again (re-enabling in ipa_endpoint_reset_rx_aggr()
1582 * the doorbell engine if appropriate). Sleep for 1 millisecond to in ipa_endpoint_reset_rx_aggr()
1586 gsi_channel_reset(gsi, endpoint->channel_id, true); in ipa_endpoint_reset_rx_aggr()
1593 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1606 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1607 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1611 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1612 * is active, we need to handle things specially to recover. in ipa_endpoint_reset()
1613 * All other cases just need to reset the underlying GSI channel. in ipa_endpoint_reset()
1615 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1616 endpoint->config.aggregation; in ipa_endpoint_reset()
1620 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1623 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1625 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1630 if (endpoint->toward_ipa) { in ipa_endpoint_program()
1631 /* Newer versions of IPA use GSI channel flow control in ipa_endpoint_program()
1632 * instead of endpoint DELAY mode to prevent sending data. in ipa_endpoint_program()
1633 * Flow control is disabled for newly-allocated channels, in ipa_endpoint_program()
1635 * for AP TX channels. in ipa_endpoint_program()
1637 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1640 /* Ensure suspend mode is off on all AP RX endpoints */ in ipa_endpoint_program()
1650 if (!endpoint->toward_ipa) { in ipa_endpoint_program()
1651 if (endpoint->config.rx.holb_drop) in ipa_endpoint_program()
1664 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1665 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1668 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1670 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1672 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1673 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1677 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1678 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1679 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1683 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1690 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1691 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1692 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1695 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1698 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1700 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1702 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1703 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1706 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1707 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1709 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1710 "error %d attempting to stop endpoint %u\n", ret, in ipa_endpoint_disable_one()
1711 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1716 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1717 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1720 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1723 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1728 ret = gsi_channel_suspend(gsi, endpoint->channel_id); in ipa_endpoint_suspend_one()
1731 endpoint->channel_id); in ipa_endpoint_suspend_one()
1736 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1737 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1740 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1743 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1746 ret = gsi_channel_resume(gsi, endpoint->channel_id); in ipa_endpoint_resume_one()
1749 endpoint->channel_id); in ipa_endpoint_resume_one()
1750 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1754 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1756 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1759 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1760 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1762 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1763 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1766 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1768 if (!ipa->setup_complete) in ipa_endpoint_resume()
1771 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1772 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1774 if (ipa->modem_netdev) in ipa_endpoint_resume()
1775 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1780 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1781 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1783 /* Only AP endpoints get set up */ in ipa_endpoint_setup_one()
1784 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1787 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; in ipa_endpoint_setup_one()
1788 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1792 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1793 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1794 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1800 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1805 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1807 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1808 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1813 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1815 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1817 ipa->set_up = 0; in ipa_endpoint_setup()
1823 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1827 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1829 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1836 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1838 ipa->set_up = 0; in ipa_endpoint_teardown()
1841 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1843 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1853 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported. in ipa_endpoint_config()
1860 * just set the available mask to support any endpoint, and in ipa_endpoint_config()
1863 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
1864 ipa->available = ~0; in ipa_endpoint_config()
1871 reg = ipa_reg(ipa, FLAVOR_0); in ipa_endpoint_config()
1872 val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); in ipa_endpoint_config()
1874 /* Our RX is an IPA producer */ in ipa_endpoint_config()
1880 return -EINVAL; in ipa_endpoint_config()
1882 rx_mask = GENMASK(max - 1, rx_base); in ipa_endpoint_config()
1884 /* Our TX is an IPA consumer */ in ipa_endpoint_config()
1886 tx_mask = GENMASK(max - 1, 0); in ipa_endpoint_config()
1888 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1891 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1893 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1894 ret = -EINVAL; /* Report other errors too */ in ipa_endpoint_config()
1897 initialized = ipa->initialized; in ipa_endpoint_config()
1905 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1906 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { in ipa_endpoint_config()
1909 ret = -EINVAL; in ipa_endpoint_config()
1916 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1918 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1921 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
1926 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1928 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
1929 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1930 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1932 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1933 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1934 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1935 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1936 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1937 endpoint->config = data->endpoint.config; in ipa_endpoint_init_one()
1939 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1944 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1949 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
1951 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1958 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1960 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1961 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1965 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
1973 if (!ipa_endpoint_data_valid(ipa, count, data)) in ipa_endpoint_init()
1976 ipa->initialized = 0; in ipa_endpoint_init()
1983 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
1985 if (data->endpoint.filter_support) in ipa_endpoint_init()
1986 filter_map |= BIT(data->endpoint_id); in ipa_endpoint_init()
1987 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) in ipa_endpoint_init()
1988 ipa->modem_tx_count++; in ipa_endpoint_init()
1991 if (!ipa_filter_map_valid(ipa, filter_map)) in ipa_endpoint_init()
1994 return filter_map; /* Non-zero bitmask */ in ipa_endpoint_init()
1997 ipa_endpoint_exit(ipa); in ipa_endpoint_init()