Lines Matching full:ipa
16 #include "ipa.h"
81 * IPA hardware as a number of KB. We don't use "hard byte in ipa_endpoint_validate_build()
102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid()
225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
247 * Note that suspend is not supported starting with IPA v4.0.
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work in ipa_endpoint_init_ctrl()
259 * correctly on IPA v4.2. in ipa_endpoint_init_ctrl()
262 * assert(ipa->version != IPA_VERSION_4.2); in ipa_endpoint_init_ctrl()
264 * assert(ipa->version == IPA_VERSION_3_5_1); in ipa_endpoint_init_ctrl()
268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
285 /* Delay mode doesn't work properly for IPA v4.2 */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
297 /* assert(mask & ipa->available); */ in ipa_endpoint_aggr_active()
298 offset = ipa_reg_state_aggr_active_offset(ipa->version); in ipa_endpoint_aggr_active()
299 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_aggr_active()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
309 /* assert(mask & ipa->available); */ in ipa_endpoint_force_close()
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); in ipa_endpoint_force_close()
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
336 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
346 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
367 /* DELAY mode doesn't work correctly on IPA v4.2 */ in ipa_endpoint_modem_pause_all()
368 if (ipa->version == IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
388 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
393 * bound on that by assuming all initialized endpoints are modem->IPA. in ipa_endpoint_modem_exception_reset_all()
398 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
400 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
470 * packet size field, and we have the IPA hardware populate both for each
502 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
506 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
646 * tick represents 128 cycles of the IPA core clock. Return the value
650 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) in ipa_reg_init_hol_block_timer_val() argument
663 rate = ipa_clock_rate(ipa); in ipa_reg_init_hol_block_timer_val()
668 /* IPA v3.5.1 just records the tick count */ in ipa_reg_init_hol_block_timer_val()
669 if (ipa->version == IPA_VERSION_3_5_1) in ipa_reg_init_hol_block_timer_val()
672 /* For IPA v4.2, the tick count is represented by base and in ipa_reg_init_hol_block_timer_val()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
707 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); in ipa_endpoint_init_hol_block_timer()
708 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_timer()
720 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
723 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
728 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
751 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
770 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
819 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
832 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
838 /* The next field is present for IPA v4.0 and above */ in ipa_endpoint_status()
842 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_status()
930 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
938 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
1074 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1157 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1160 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1174 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1185 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); in ipa_endpoint_default_route_set()
1188 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1190 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1199 * taken to ensure the IPA pipeline is properly cleared.
1205 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1207 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1272 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset_rx_aggr()
1294 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1299 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1303 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. in ipa_endpoint_reset()
1305 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset()
1310 gsi_channel_reset(&ipa->gsi, channel_id, legacy); in ipa_endpoint_reset()
1313 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1337 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1338 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1343 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1351 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1356 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1364 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1365 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1368 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1371 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1375 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1382 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1389 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1390 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1394 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1402 /* IPA v3.5.1 doesn't use channel stop for suspend */ in ipa_endpoint_suspend_one()
1403 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1412 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1413 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1417 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1423 /* IPA v3.5.1 doesn't use channel start for resume */ in ipa_endpoint_resume_one()
1424 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1433 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1435 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1438 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1439 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1441 ipa_cmd_tag_process(ipa); in ipa_endpoint_suspend()
1443 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1444 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1447 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1449 if (!ipa->setup_complete) in ipa_endpoint_resume()
1452 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1453 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1455 if (ipa->modem_netdev) in ipa_endpoint_resume()
1456 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1461 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1483 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1488 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1496 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1498 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1500 ipa->set_up = 0; in ipa_endpoint_setup()
1506 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1510 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1512 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1519 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1521 ipa->set_up = 0; in ipa_endpoint_teardown()
1524 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1526 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1538 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); in ipa_endpoint_config()
1540 /* Our RX is an IPA producer */ in ipa_endpoint_config()
1550 /* Our TX is an IPA consumer */ in ipa_endpoint_config()
1554 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1557 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1559 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1563 initialized = ipa->initialized; in ipa_endpoint_config()
1571 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1582 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1584 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1587 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
1592 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1595 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1596 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1598 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1606 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1611 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1616 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
1618 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1625 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1627 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1628 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1632 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
1638 if (!ipa_endpoint_data_valid(ipa, count, data)) in ipa_endpoint_init()
1641 ipa->initialized = 0; in ipa_endpoint_init()
1648 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
1654 if (!ipa_filter_map_valid(ipa, filter_map)) in ipa_endpoint_init()
1660 ipa_endpoint_exit(ipa); in ipa_endpoint_init()