Lines Matching full:endpoint

114 		if (data->endpoint.filter_support) {  in ipa_endpoint_data_valid_one()
116 "RX endpoint %u\n", in ipa_endpoint_data_valid_one()
124 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
125 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
127 dev_err(dev, "status endpoint name %u out of range " in ipa_endpoint_data_valid_one()
128 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
133 /* Status endpoint must be defined... */ in ipa_endpoint_data_valid_one()
136 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
137 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
142 /* ...and has to be an RX endpoint... */ in ipa_endpoint_data_valid_one()
145 "status endpoint for endpoint %u not RX\n", in ipa_endpoint_data_valid_one()
150 /* ...and if it's to be an AP endpoint... */ in ipa_endpoint_data_valid_one()
153 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
155 "status not enabled for endpoint %u\n", in ipa_endpoint_data_valid_one()
162 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
163 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
165 dev_err(dev, "DMA endpoint name %u out of range " in ipa_endpoint_data_valid_one()
166 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
173 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
174 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
200 dev_err(dev, "command TX endpoint not defined\n"); in ipa_endpoint_data_valid()
204 dev_err(dev, "LAN RX endpoint not defined\n"); in ipa_endpoint_data_valid()
208 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
212 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
233 /* Allocate a transaction to use on a non-command endpoint */
234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_alloc() argument
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
238 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) in ipa_endpoint_init_ctrl() argument
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl()
261 * if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; in ipa_endpoint_init_ctrl()
281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_delay() argument
283 /* assert(endpoint->toward_ipa); */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
287 (void)ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_delay()
290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) in ipa_endpoint_aggr_active() argument
292 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active()
304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) in ipa_endpoint_force_close() argument
306 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close()
315 * @endpoint: Endpoint on which to emulate a suspend
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_aggr() argument
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr()
326 if (!endpoint->data->aggregation) in ipa_endpoint_suspend_aggr()
329 /* Nothing to do if the endpoint doesn't have aggregation open */ in ipa_endpoint_suspend_aggr()
330 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_suspend_aggr()
334 ipa_endpoint_force_close(endpoint); in ipa_endpoint_suspend_aggr()
341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_suspend() argument
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
348 /* assert(!endpoint->toward_ipa); */ in ipa_endpoint_program_suspend()
350 suspended = ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_suspend()
357 ipa_endpoint_suspend_aggr(endpoint); in ipa_endpoint_program_suspend()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all() local
374 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
378 if (endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
379 ipa_endpoint_program_delay(endpoint, enable); in ipa_endpoint_modem_pause_all()
381 (void)ipa_endpoint_program_suspend(endpoint, enable); in ipa_endpoint_modem_pause_all()
385 /* Reset all modem endpoints to use the default exception endpoint */
392 /* We need one command per modem TX endpoint. We can get an upper in ipa_endpoint_modem_exception_reset_all()
407 struct ipa_endpoint *endpoint; in ipa_endpoint_modem_exception_reset_all() local
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
420 * means status is disabled on the endpoint, and as a in ipa_endpoint_modem_exception_reset_all()
434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) in ipa_endpoint_init_cfg() argument
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_cfg()
440 if (endpoint->data->checksum) { in ipa_endpoint_init_cfg()
441 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
466 * @endpoint: Endpoint pointer
480 * endpoint's METADATA_MASK register defines which byte within the modem
485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr() argument
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr()
490 if (endpoint->data->qmap) { in ipa_endpoint_init_hdr()
494 if (endpoint->toward_ipa && endpoint->data->checksum) in ipa_endpoint_init_hdr()
499 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
523 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_ext() argument
525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr_ext()
526 u32 pad_align = endpoint->data->rx.pad_align; in ipa_endpoint_init_hdr_ext()
537 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
545 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
552 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_metadata_mask() argument
554 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
558 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
564 if (endpoint->data->qmap) in ipa_endpoint_init_hdr_metadata_mask()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
570 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) in ipa_endpoint_init_mode() argument
572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_mode()
575 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
578 if (endpoint->data->dma_mode) { in ipa_endpoint_init_mode()
579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; in ipa_endpoint_init_mode()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
606 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_aggr() argument
608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_aggr()
611 if (endpoint->data->aggregation) { in ipa_endpoint_init_aggr()
612 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
627 if (endpoint->data->rx.aggr_close_eof) in ipa_endpoint_init_aggr()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
698 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, in ipa_endpoint_init_hol_block_timer() argument
701 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer()
712 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_init_hol_block_enable() argument
714 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_enable()
720 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
728 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all() local
730 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
733 ipa_endpoint_init_hol_block_timer(endpoint, 0); in ipa_endpoint_modem_hol_block_clear_all()
734 ipa_endpoint_init_hol_block_enable(endpoint, true); in ipa_endpoint_modem_hol_block_clear_all()
738 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_deaggr() argument
740 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_deaggr()
743 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
751 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
754 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) in ipa_endpoint_init_seq() argument
756 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_seq()
757 u32 seq_type = endpoint->seq_type; in ipa_endpoint_init_seq()
760 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
770 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
775 * @endpoint: Endpoint pointer
780 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) in ipa_endpoint_skb_tx() argument
786 /* Make sure source endpoint's TLV FIFO has enough entries to in ipa_endpoint_skb_tx()
791 if (1 + nr_frags > endpoint->trans_tre_max) { in ipa_endpoint_skb_tx()
797 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); in ipa_endpoint_skb_tx()
816 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) in ipa_endpoint_status() argument
818 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
819 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status()
825 if (endpoint->data->status_enable) { in ipa_endpoint_status()
827 if (endpoint->toward_ipa) { in ipa_endpoint_status()
831 name = endpoint->data->tx.status_endpoint; in ipa_endpoint_status()
845 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_one() argument
858 trans = ipa_endpoint_trans_alloc(endpoint, 1); in ipa_endpoint_replenish_one()
871 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { in ipa_endpoint_replenish_one()
873 endpoint->replenish_ready = 0; in ipa_endpoint_replenish_one()
890 * @endpoint: Endpoint to be replenished
894 * for an endpoint. These are supplied to the hardware, which fills
897 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) in ipa_endpoint_replenish() argument
902 if (!endpoint->replenish_enabled) { in ipa_endpoint_replenish()
904 atomic_add(count, &endpoint->replenish_saved); in ipa_endpoint_replenish()
909 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) in ipa_endpoint_replenish()
910 if (ipa_endpoint_replenish_one(endpoint)) in ipa_endpoint_replenish()
913 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
919 backlog = atomic_inc_return(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
922 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
930 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
931 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
932 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
936 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_enable() argument
938 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
942 endpoint->replenish_enabled = true; in ipa_endpoint_replenish_enable()
943 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) in ipa_endpoint_replenish_enable()
944 atomic_add(saved, &endpoint->replenish_backlog); in ipa_endpoint_replenish_enable()
947 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable()
948 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) in ipa_endpoint_replenish_enable()
949 ipa_endpoint_replenish(endpoint, 0); in ipa_endpoint_replenish_enable()
952 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_disable() argument
956 endpoint->replenish_enabled = false; in ipa_endpoint_replenish_disable()
957 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
958 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
964 struct ipa_endpoint *endpoint; in ipa_endpoint_replenish_work() local
966 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); in ipa_endpoint_replenish_work()
968 ipa_endpoint_replenish(endpoint, 0); in ipa_endpoint_replenish_work()
971 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_copy() argument
984 if (endpoint->netdev) in ipa_endpoint_skb_copy()
985 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
990 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_build() argument
996 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1008 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1029 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, in ipa_endpoint_status_skip() argument
1040 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1061 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, in ipa_endpoint_status_parse() argument
1074 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1081 if (ipa_endpoint_status_skip(endpoint, status)) { in ipa_endpoint_status_parse()
1094 align = endpoint->data->rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1097 if (endpoint->data->checksum) in ipa_endpoint_status_parse()
1110 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); in ipa_endpoint_status_parse()
1120 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_tx_complete() argument
1126 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_rx_complete() argument
1131 ipa_endpoint_replenish(endpoint, 1); in ipa_endpoint_rx_complete()
1138 if (endpoint->data->status_enable) in ipa_endpoint_rx_complete()
1139 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_rx_complete()
1140 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_rx_complete()
1144 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_complete() argument
1147 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1148 ipa_endpoint_tx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1150 ipa_endpoint_rx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1153 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_release() argument
1156 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1157 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release()
1160 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1194 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1195 * @endpoint: Endpoint to be reset
1197 * If aggregation is active on an RX endpoint when a reset is performed
1203 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_reset_rx_aggr() argument
1205 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr()
1227 ipa_endpoint_force_close(endpoint); in ipa_endpoint_reset_rx_aggr()
1234 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1237 suspended = ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_reset_rx_aggr()
1240 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1244 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1251 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1257 if (ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1258 dev_err(dev, "endpoint %u still active during reset\n", in ipa_endpoint_reset_rx_aggr()
1259 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1261 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1263 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1273 gsi_channel_reset(gsi, endpoint->channel_id, legacy); in ipa_endpoint_reset_rx_aggr()
1280 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1283 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_reset_rx_aggr()
1291 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) in ipa_endpoint_reset() argument
1293 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1294 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset()
1299 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1306 special = !endpoint->toward_ipa && endpoint->data->aggregation; in ipa_endpoint_reset()
1307 if (special && ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset()
1308 ret = ipa_endpoint_reset_rx_aggr(endpoint); in ipa_endpoint_reset()
1314 "error %d resetting channel %u for endpoint %u\n", in ipa_endpoint_reset()
1315 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1318 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) in ipa_endpoint_program() argument
1320 if (endpoint->toward_ipa) in ipa_endpoint_program()
1321 ipa_endpoint_program_delay(endpoint, false); in ipa_endpoint_program()
1323 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_program()
1324 ipa_endpoint_init_cfg(endpoint); in ipa_endpoint_program()
1325 ipa_endpoint_init_hdr(endpoint); in ipa_endpoint_program()
1326 ipa_endpoint_init_hdr_ext(endpoint); in ipa_endpoint_program()
1327 ipa_endpoint_init_hdr_metadata_mask(endpoint); in ipa_endpoint_program()
1328 ipa_endpoint_init_mode(endpoint); in ipa_endpoint_program()
1329 ipa_endpoint_init_aggr(endpoint); in ipa_endpoint_program()
1330 ipa_endpoint_init_deaggr(endpoint); in ipa_endpoint_program()
1331 ipa_endpoint_init_seq(endpoint); in ipa_endpoint_program()
1332 ipa_endpoint_status(endpoint); in ipa_endpoint_program()
1335 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_enable_one() argument
1337 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one()
1341 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1344 "error %d starting %cX channel %u for endpoint %u\n", in ipa_endpoint_enable_one()
1345 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1346 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1350 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1352 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1353 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_enable_one()
1356 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1361 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_disable_one() argument
1363 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1364 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one()
1373 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1374 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_disable_one()
1376 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1380 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1383 "error %d attempting to stop endpoint %u\n", ret, in ipa_endpoint_disable_one()
1384 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1387 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_one() argument
1389 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1390 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1394 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1397 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1398 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_suspend_one()
1399 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_suspend_one()
1403 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1404 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); in ipa_endpoint_suspend_one()
1407 endpoint->channel_id); in ipa_endpoint_suspend_one()
1410 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) in ipa_endpoint_resume_one() argument
1412 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1413 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1417 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1420 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1421 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_resume_one()
1424 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1425 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); in ipa_endpoint_resume_one()
1428 endpoint->channel_id); in ipa_endpoint_resume_one()
1429 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1430 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_resume_one()
1459 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) in ipa_endpoint_setup_one() argument
1461 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1462 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1465 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1468 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); in ipa_endpoint_setup_one()
1469 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1473 endpoint->replenish_enabled = false; in ipa_endpoint_setup_one()
1474 atomic_set(&endpoint->replenish_saved, in ipa_endpoint_setup_one()
1475 gsi_channel_tre_max(gsi, endpoint->channel_id)); in ipa_endpoint_setup_one()
1476 atomic_set(&endpoint->replenish_backlog, 0); in ipa_endpoint_setup_one()
1477 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1481 ipa_endpoint_program(endpoint); in ipa_endpoint_setup_one()
1483 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1486 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) in ipa_endpoint_teardown_one() argument
1488 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1490 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1491 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1493 ipa_endpoint_reset(endpoint); in ipa_endpoint_teardown_one()
1506 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1519 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1558 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", in ipa_endpoint_config()
1566 struct ipa_endpoint *endpoint; in ipa_endpoint_config() local
1571 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1572 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { in ipa_endpoint_config()
1573 dev_err(dev, "endpoint id %u wrong direction\n", in ipa_endpoint_config()
1590 struct ipa_endpoint *endpoint; in ipa_endpoint_init_one() local
1592 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1595 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1596 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1598 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1599 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1600 endpoint->seq_type = data->endpoint.seq_type; in ipa_endpoint_init_one()
1601 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1602 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1603 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1604 endpoint->data = &data->endpoint.config; in ipa_endpoint_init_one()
1606 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1609 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) in ipa_endpoint_exit_one() argument
1611 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1613 memset(endpoint, 0, sizeof(*endpoint)); in ipa_endpoint_exit_one()
1625 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1650 if (data->endpoint.filter_support) in ipa_endpoint_init()