Lines Matching full:endpoint
90 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
92 "RX endpoint %u\n", in ipa_endpoint_data_valid_one()
100 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
101 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
103 dev_err(dev, "status endpoint name %u out of range " in ipa_endpoint_data_valid_one()
104 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
109 /* Status endpoint must be defined... */ in ipa_endpoint_data_valid_one()
112 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
113 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
118 /* ...and has to be an RX endpoint... */ in ipa_endpoint_data_valid_one()
121 "status endpoint for endpoint %u not RX\n", in ipa_endpoint_data_valid_one()
126 /* ...and if it's to be an AP endpoint... */ in ipa_endpoint_data_valid_one()
129 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
131 "status not enabled for endpoint %u\n", in ipa_endpoint_data_valid_one()
138 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
139 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
141 dev_err(dev, "DMA endpoint name %u out of range " in ipa_endpoint_data_valid_one()
142 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
149 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
150 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
203 dev_err(dev, "command TX endpoint not defined\n"); in ipa_endpoint_data_valid()
207 dev_err(dev, "LAN RX endpoint not defined\n"); in ipa_endpoint_data_valid()
211 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
215 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
226 /* Allocate a transaction to use on a non-command endpoint */
227 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_alloc() argument
230 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
231 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
234 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
243 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) in ipa_endpoint_init_ctrl() argument
245 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
246 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl()
254 if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
259 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; in ipa_endpoint_init_ctrl()
275 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_delay() argument
277 WARN_ON(!endpoint->toward_ipa); in ipa_endpoint_program_delay()
280 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
281 (void)ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_delay()
284 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) in ipa_endpoint_aggr_active() argument
286 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
287 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active()
299 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) in ipa_endpoint_force_close() argument
301 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
302 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close()
311 * @endpoint: Endpoint on which to emulate a suspend
313 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
318 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_aggr() argument
320 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr()
322 if (!endpoint->data->aggregation) in ipa_endpoint_suspend_aggr()
325 /* Nothing to do if the endpoint doesn't have aggregation open */ in ipa_endpoint_suspend_aggr()
326 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_suspend_aggr()
330 ipa_endpoint_force_close(endpoint); in ipa_endpoint_suspend_aggr()
337 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_suspend() argument
341 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
344 WARN_ON(endpoint->toward_ipa); in ipa_endpoint_program_suspend()
346 suspended = ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_suspend()
353 ipa_endpoint_suspend_aggr(endpoint); in ipa_endpoint_program_suspend()
368 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all() local
370 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
374 if (endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
375 ipa_endpoint_program_delay(endpoint, enable); in ipa_endpoint_modem_pause_all()
377 (void)ipa_endpoint_program_suspend(endpoint, enable); in ipa_endpoint_modem_pause_all()
381 /* Reset all modem endpoints to use the default exception endpoint */
388 /* We need one command per modem TX endpoint. We can get an upper in ipa_endpoint_modem_exception_reset_all()
403 struct ipa_endpoint *endpoint; in ipa_endpoint_modem_exception_reset_all() local
409 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
410 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
416 * means status is disabled on the endpoint, and as a in ipa_endpoint_modem_exception_reset_all()
432 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) in ipa_endpoint_init_cfg() argument
434 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_cfg()
439 if (endpoint->data->checksum) { in ipa_endpoint_init_cfg()
440 enum ipa_version version = endpoint->ipa->version; in ipa_endpoint_init_cfg()
442 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
465 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
468 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) in ipa_endpoint_init_nat() argument
473 if (!endpoint->toward_ipa) in ipa_endpoint_init_nat()
476 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_nat()
479 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_nat()
483 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) in ipa_qmap_header_size() argument
488 if (!endpoint->data->checksum) in ipa_qmap_header_size()
493 if (endpoint->toward_ipa) in ipa_qmap_header_size()
504 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
505 * @endpoint: Endpoint pointer
519 * endpoint's METADATA_MASK register defines which byte within the modem
524 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr() argument
526 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr()
527 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr()
530 if (endpoint->data->qmap) { in ipa_endpoint_init_hdr()
534 header_size = ipa_qmap_header_size(version, endpoint); in ipa_endpoint_init_hdr()
538 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
566 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_ext() argument
568 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr_ext()
569 u32 pad_align = endpoint->data->rx.pad_align; in ipa_endpoint_init_hdr_ext()
570 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext()
581 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
589 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
597 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
610 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_metadata_mask() argument
612 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
616 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
622 if (endpoint->data->qmap) in ipa_endpoint_init_hdr_metadata_mask()
625 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
628 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) in ipa_endpoint_init_mode() argument
630 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_mode()
633 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
636 if (endpoint->data->dma_mode) { in ipa_endpoint_init_mode()
637 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; in ipa_endpoint_init_mode()
640 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
649 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
664 /* Encoded values for AGGR endpoint register fields */
717 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_aggr() argument
719 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_aggr()
720 enum ipa_version version = endpoint->ipa->version; in ipa_endpoint_init_aggr()
723 if (endpoint->data->aggregation) { in ipa_endpoint_init_aggr()
724 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
739 close_eof = endpoint->data->rx.aggr_close_eof; in ipa_endpoint_init_aggr()
756 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
848 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, in ipa_endpoint_init_hol_block_timer() argument
851 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
852 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer()
862 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_init_hol_block_enable() argument
864 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_enable()
870 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
878 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all() local
880 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
883 ipa_endpoint_init_hol_block_timer(endpoint, 0); in ipa_endpoint_modem_hol_block_clear_all()
884 ipa_endpoint_init_hol_block_enable(endpoint, true); in ipa_endpoint_modem_hol_block_clear_all()
888 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_deaggr() argument
890 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_deaggr()
893 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
901 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
904 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) in ipa_endpoint_init_rsrc_grp() argument
906 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_rsrc_grp()
907 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp()
910 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); in ipa_endpoint_init_rsrc_grp()
914 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) in ipa_endpoint_init_seq() argument
916 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_seq()
919 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
923 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); in ipa_endpoint_init_seq()
926 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, in ipa_endpoint_init_seq()
929 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
934 * @endpoint: Endpoint pointer
939 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) in ipa_endpoint_skb_tx() argument
945 /* Make sure source endpoint's TLV FIFO has enough entries to in ipa_endpoint_skb_tx()
950 if (1 + nr_frags > endpoint->trans_tre_max) { in ipa_endpoint_skb_tx()
956 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); in ipa_endpoint_skb_tx()
975 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) in ipa_endpoint_status() argument
977 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
978 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status()
984 if (endpoint->data->status_enable) { in ipa_endpoint_status()
986 if (endpoint->toward_ipa) { in ipa_endpoint_status()
990 name = endpoint->data->tx.status_endpoint; in ipa_endpoint_status()
1005 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_one() argument
1018 trans = ipa_endpoint_trans_alloc(endpoint, 1); in ipa_endpoint_replenish_one()
1031 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { in ipa_endpoint_replenish_one()
1033 endpoint->replenish_ready = 0; in ipa_endpoint_replenish_one()
1049 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1050 * @endpoint: Endpoint to be replenished
1054 * endpoint, based on the number of entries in the underlying channel ring
1055 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1057 * an endpoint can be disabled, in which case requests to replenish a
1061 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) in ipa_endpoint_replenish() argument
1066 if (!endpoint->replenish_enabled) { in ipa_endpoint_replenish()
1068 atomic_inc(&endpoint->replenish_saved); in ipa_endpoint_replenish()
1072 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) in ipa_endpoint_replenish()
1073 if (ipa_endpoint_replenish_one(endpoint)) in ipa_endpoint_replenish()
1076 atomic_inc(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
1082 backlog = atomic_inc_return(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
1085 atomic_inc(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
1093 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
1094 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1095 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
1099 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_enable() argument
1101 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
1105 endpoint->replenish_enabled = true; in ipa_endpoint_replenish_enable()
1106 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) in ipa_endpoint_replenish_enable()
1107 atomic_add(saved, &endpoint->replenish_backlog); in ipa_endpoint_replenish_enable()
1110 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable()
1111 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) in ipa_endpoint_replenish_enable()
1112 ipa_endpoint_replenish(endpoint, false); in ipa_endpoint_replenish_enable()
1115 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_disable() argument
1119 endpoint->replenish_enabled = false; in ipa_endpoint_replenish_disable()
1120 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
1121 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
1127 struct ipa_endpoint *endpoint; in ipa_endpoint_replenish_work() local
1129 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); in ipa_endpoint_replenish_work()
1131 ipa_endpoint_replenish(endpoint, false); in ipa_endpoint_replenish_work()
1134 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_copy() argument
1147 if (endpoint->netdev) in ipa_endpoint_skb_copy()
1148 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1153 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_build() argument
1159 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1172 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1193 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, in ipa_endpoint_status_skip() argument
1204 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1210 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, in ipa_endpoint_status_tag() argument
1214 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag()
1221 * this endpoint (already verified by ipa_endpoint_status_skip()). in ipa_endpoint_status_tag()
1222 * If the packet came from the AP->command TX endpoint we know in ipa_endpoint_status_tag()
1232 "unexpected tagged packet from endpoint %u\n", in ipa_endpoint_status_tag()
1240 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, in ipa_endpoint_status_drop() argument
1246 if (ipa_endpoint_status_tag(endpoint, status)) in ipa_endpoint_status_drop()
1259 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, in ipa_endpoint_status_parse() argument
1272 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1279 if (ipa_endpoint_status_skip(endpoint, status)) { in ipa_endpoint_status_parse()
1291 align = endpoint->data->rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1294 if (endpoint->data->checksum) in ipa_endpoint_status_parse()
1297 if (!ipa_endpoint_status_drop(endpoint, status)) { in ipa_endpoint_status_parse()
1312 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); in ipa_endpoint_status_parse()
1322 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_tx_complete() argument
1328 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_rx_complete() argument
1333 ipa_endpoint_replenish(endpoint, true); in ipa_endpoint_rx_complete()
1340 if (endpoint->data->status_enable) in ipa_endpoint_rx_complete()
1341 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_rx_complete()
1342 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_rx_complete()
1346 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_complete() argument
1349 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1350 ipa_endpoint_tx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1352 ipa_endpoint_rx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1355 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_release() argument
1358 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1359 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release()
1362 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1396 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1397 * @endpoint: Endpoint to be reset
1399 * If aggregation is active on an RX endpoint when a reset is performed
1405 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_reset_rx_aggr() argument
1407 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1408 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr()
1428 ipa_endpoint_force_close(endpoint); in ipa_endpoint_reset_rx_aggr()
1435 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1438 suspended = ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_reset_rx_aggr()
1441 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1445 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1452 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1458 if (ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1459 dev_err(dev, "endpoint %u still active during reset\n", in ipa_endpoint_reset_rx_aggr()
1460 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1462 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1464 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1473 gsi_channel_reset(gsi, endpoint->channel_id, true); in ipa_endpoint_reset_rx_aggr()
1480 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1483 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_reset_rx_aggr()
1491 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) in ipa_endpoint_reset() argument
1493 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1494 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset()
1498 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1502 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1503 endpoint->data->aggregation; in ipa_endpoint_reset()
1504 if (special && ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset()
1505 ret = ipa_endpoint_reset_rx_aggr(endpoint); in ipa_endpoint_reset()
1511 "error %d resetting channel %u for endpoint %u\n", in ipa_endpoint_reset()
1512 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1515 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) in ipa_endpoint_program() argument
1517 if (endpoint->toward_ipa) in ipa_endpoint_program()
1518 ipa_endpoint_program_delay(endpoint, false); in ipa_endpoint_program()
1520 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_program()
1521 ipa_endpoint_init_cfg(endpoint); in ipa_endpoint_program()
1522 ipa_endpoint_init_nat(endpoint); in ipa_endpoint_program()
1523 ipa_endpoint_init_hdr(endpoint); in ipa_endpoint_program()
1524 ipa_endpoint_init_hdr_ext(endpoint); in ipa_endpoint_program()
1525 ipa_endpoint_init_hdr_metadata_mask(endpoint); in ipa_endpoint_program()
1526 ipa_endpoint_init_mode(endpoint); in ipa_endpoint_program()
1527 ipa_endpoint_init_aggr(endpoint); in ipa_endpoint_program()
1528 ipa_endpoint_init_deaggr(endpoint); in ipa_endpoint_program()
1529 ipa_endpoint_init_rsrc_grp(endpoint); in ipa_endpoint_program()
1530 ipa_endpoint_init_seq(endpoint); in ipa_endpoint_program()
1531 ipa_endpoint_status(endpoint); in ipa_endpoint_program()
1534 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_enable_one() argument
1536 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one()
1540 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1543 "error %d starting %cX channel %u for endpoint %u\n", in ipa_endpoint_enable_one()
1544 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1545 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1549 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1551 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1552 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_enable_one()
1555 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1560 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_disable_one() argument
1562 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1563 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one()
1572 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1573 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_disable_one()
1575 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1579 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1582 "error %d attempting to stop endpoint %u\n", ret, in ipa_endpoint_disable_one()
1583 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1586 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_one() argument
1588 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1589 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1592 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1595 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1596 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_suspend_one()
1597 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_suspend_one()
1600 ret = gsi_channel_suspend(gsi, endpoint->channel_id); in ipa_endpoint_suspend_one()
1603 endpoint->channel_id); in ipa_endpoint_suspend_one()
1606 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) in ipa_endpoint_resume_one() argument
1608 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1609 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1612 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1615 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1616 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_resume_one()
1618 ret = gsi_channel_resume(gsi, endpoint->channel_id); in ipa_endpoint_resume_one()
1621 endpoint->channel_id); in ipa_endpoint_resume_one()
1622 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1623 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_resume_one()
1652 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) in ipa_endpoint_setup_one() argument
1654 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1655 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1658 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1661 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); in ipa_endpoint_setup_one()
1662 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1666 endpoint->replenish_enabled = false; in ipa_endpoint_setup_one()
1667 atomic_set(&endpoint->replenish_saved, in ipa_endpoint_setup_one()
1668 gsi_channel_tre_max(gsi, endpoint->channel_id)); in ipa_endpoint_setup_one()
1669 atomic_set(&endpoint->replenish_backlog, 0); in ipa_endpoint_setup_one()
1670 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1674 ipa_endpoint_program(endpoint); in ipa_endpoint_setup_one()
1676 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1679 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) in ipa_endpoint_teardown_one() argument
1681 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1683 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1684 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1686 ipa_endpoint_reset(endpoint); in ipa_endpoint_teardown_one()
1699 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1712 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1730 * endpoint numbers started with 0 and RX endpoints had numbers in ipa_endpoint_config()
1735 * just set the available mask to support any endpoint, and in ipa_endpoint_config()
1766 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", in ipa_endpoint_config()
1774 struct ipa_endpoint *endpoint; in ipa_endpoint_config() local
1779 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1780 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { in ipa_endpoint_config()
1781 dev_err(dev, "endpoint id %u wrong direction\n", in ipa_endpoint_config()
1798 struct ipa_endpoint *endpoint; in ipa_endpoint_init_one() local
1800 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1803 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1804 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1806 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1807 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1808 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1809 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1810 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1811 endpoint->data = &data->endpoint.config; in ipa_endpoint_init_one()
1813 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1816 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) in ipa_endpoint_exit_one() argument
1818 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1820 memset(endpoint, 0, sizeof(*endpoint)); in ipa_endpoint_exit_one()
1832 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1857 if (data->endpoint.filter_support) in ipa_endpoint_init()