Lines Matching full:gsi

18 #include "gsi.h"
28 * The generic software interface (GSI) is an integral component of the IPA,
30 * and the IPA core. The modem uses the GSI layer as well.
40 * | GSI |
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
63 * Each channel has a GSI "event ring" associated with it. An event ring
67 * The GSI then writes its doorbell for the event ring, causing the target
83 * Note that all GSI registers are little-endian, which is the assumed
229 return channel - &channel->gsi->channel[0]; in gsi_channel_id()
232 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ieob_enable() argument
236 gsi->event_enable_bitmap |= BIT(evt_ring_id); in gsi_irq_ieob_enable()
237 val = gsi->event_enable_bitmap; in gsi_irq_ieob_enable()
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_ieob_enable()
241 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ieob_disable() argument
245 gsi->event_enable_bitmap &= ~BIT(evt_ring_id); in gsi_irq_ieob_disable()
246 val = gsi->event_enable_bitmap; in gsi_irq_ieob_disable()
247 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_ieob_disable()
251 static void gsi_irq_enable(struct gsi *gsi) in gsi_irq_enable() argument
259 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); in gsi_irq_enable()
261 val = GENMASK(gsi->channel_count - 1, 0); in gsi_irq_enable()
262 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); in gsi_irq_enable()
264 val = GENMASK(gsi->evt_ring_count - 1, 0); in gsi_irq_enable()
265 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); in gsi_irq_enable()
268 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_enable()
271 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); in gsi_irq_enable()
275 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); in gsi_irq_enable()
279 static void gsi_irq_disable(struct gsi *gsi) in gsi_irq_disable() argument
281 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); in gsi_irq_disable()
282 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); in gsi_irq_disable()
283 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); in gsi_irq_disable()
284 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); in gsi_irq_disable()
285 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); in gsi_irq_disable()
286 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); in gsi_irq_disable()
308 /* Issue a GSI command by writing a value to a register, then wait for
313 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) in gsi_command() argument
317 iowrite32(val, gsi->virt + reg); in gsi_command()
324 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_state() argument
328 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); in gsi_evt_ring_state()
334 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, in evt_ring_command() argument
337 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in evt_ring_command()
339 struct device *dev = gsi->dev; in evt_ring_command()
345 if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) in evt_ring_command()
348 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", in evt_ring_command()
355 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_alloc_command() argument
357 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_alloc_command()
361 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_alloc_command()
363 dev_err(gsi->dev, "bad event ring state %u before alloc\n", in gsi_evt_ring_alloc_command()
368 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); in gsi_evt_ring_alloc_command()
370 dev_err(gsi->dev, "bad event ring state %u after alloc\n", in gsi_evt_ring_alloc_command()
378 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
379 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_reset_command() argument
381 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_reset_command()
387 dev_err(gsi->dev, "bad event ring state %u before reset\n", in gsi_evt_ring_reset_command()
392 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); in gsi_evt_ring_reset_command()
394 dev_err(gsi->dev, "bad event ring state %u after reset\n", in gsi_evt_ring_reset_command()
399 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_de_alloc_command() argument
401 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_de_alloc_command()
405 dev_err(gsi->dev, "bad event ring state %u before dealloc\n", in gsi_evt_ring_de_alloc_command()
410 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); in gsi_evt_ring_de_alloc_command()
412 dev_err(gsi->dev, "bad event ring state %u after dealloc\n", in gsi_evt_ring_de_alloc_command()
420 void *virt = channel->gsi->virt; in gsi_channel_state()
434 struct gsi *gsi = channel->gsi; in gsi_channel_command() local
435 struct device *dev = gsi->dev; in gsi_channel_command()
441 if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) in gsi_channel_command()
444 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", in gsi_channel_command()
450 /* Allocate GSI channel in NOT_ALLOCATED state */
451 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) in gsi_channel_alloc_command() argument
453 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_alloc_command()
454 struct device *dev = gsi->dev; in gsi_channel_alloc_command()
480 struct device *dev = channel->gsi->dev; in gsi_channel_start_command()
503 /* Stop a GSI channel in STARTED state */
506 struct device *dev = channel->gsi->dev; in gsi_channel_stop_command()
540 /* Reset a GSI channel in ALLOCATED or ERROR state. */
543 struct device *dev = channel->gsi->dev; in gsi_channel_reset_command()
564 /* Deallocate an ALLOCATED GSI channel */
565 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) in gsi_channel_de_alloc_command() argument
567 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_de_alloc_command()
568 struct device *dev = gsi->dev; in gsi_channel_de_alloc_command()
591 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) in gsi_evt_ring_doorbell() argument
593 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; in gsi_evt_ring_doorbell()
600 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); in gsi_evt_ring_doorbell()
604 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_program() argument
606 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_program()
613 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
616 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
623 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
626 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
631 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
634 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
635 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
636 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
639 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
640 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); in gsi_evt_ring_program()
643 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); in gsi_evt_ring_program()
692 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); in gsi_channel_freeze()
698 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); in gsi_channel_thaw()
710 struct gsi *gsi = channel->gsi; in gsi_channel_program() local
723 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); in gsi_channel_program()
726 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); in gsi_channel_program()
733 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); in gsi_channel_program()
736 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); in gsi_channel_program()
752 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); in gsi_channel_program()
756 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * in gsi_channel_program()
761 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); in gsi_channel_program()
764 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); in gsi_channel_program()
767 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); in gsi_channel_program()
773 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); in gsi_channel_program()
775 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); in gsi_channel_program()
785 /* Start an allocated GSI channel */
786 int gsi_channel_start(struct gsi *gsi, u32 channel_id) in gsi_channel_start() argument
788 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_start()
791 mutex_lock(&gsi->mutex); in gsi_channel_start()
795 mutex_unlock(&gsi->mutex); in gsi_channel_start()
803 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) in gsi_channel_stop() argument
805 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_stop()
814 mutex_lock(&gsi->mutex); in gsi_channel_stop()
823 mutex_unlock(&gsi->mutex); in gsi_channel_stop()
833 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) in gsi_channel_reset() argument
835 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_reset()
837 mutex_lock(&gsi->mutex); in gsi_channel_reset()
847 mutex_unlock(&gsi->mutex); in gsi_channel_reset()
851 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) in gsi_channel_suspend() argument
853 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_suspend()
856 return gsi_channel_stop(gsi, channel_id); in gsi_channel_suspend()
864 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) in gsi_channel_resume() argument
866 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_resume()
869 return gsi_channel_start(gsi, channel_id); in gsi_channel_resume()
904 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_queued()
943 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), in gsi_channel_tx_update()
948 static void gsi_isr_chan_ctrl(struct gsi *gsi) in gsi_isr_chan_ctrl() argument
952 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); in gsi_isr_chan_ctrl()
953 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); in gsi_isr_chan_ctrl()
961 channel = &gsi->channel[channel_id]; in gsi_isr_chan_ctrl()
968 static void gsi_isr_evt_ctrl(struct gsi *gsi) in gsi_isr_evt_ctrl() argument
972 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); in gsi_isr_evt_ctrl()
973 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); in gsi_isr_evt_ctrl()
981 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_isr_evt_ctrl()
982 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_isr_evt_ctrl()
990 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) in gsi_isr_glob_chan_err() argument
993 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); in gsi_isr_glob_chan_err()
994 complete(&gsi->channel[channel_id].completion); in gsi_isr_glob_chan_err()
999 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", in gsi_isr_glob_chan_err()
1005 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) in gsi_isr_glob_evt_err() argument
1008 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_isr_glob_evt_err()
1012 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", in gsi_isr_glob_evt_err()
1018 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", in gsi_isr_glob_evt_err()
1023 static void gsi_isr_glob_err(struct gsi *gsi) in gsi_isr_glob_err() argument
1032 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_isr_glob_err()
1033 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_isr_glob_err()
1034 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); in gsi_isr_glob_err()
1042 gsi_isr_glob_chan_err(gsi, ee, which, code); in gsi_isr_glob_err()
1044 gsi_isr_glob_evt_err(gsi, ee, which, code); in gsi_isr_glob_err()
1046 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); in gsi_isr_glob_err()
1050 static void gsi_isr_gp_int1(struct gsi *gsi) in gsi_isr_gp_int1() argument
1055 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_isr_gp_int1()
1058 dev_err(gsi->dev, "global INT1 generic result %u\n", result); in gsi_isr_gp_int1()
1060 complete(&gsi->completion); in gsi_isr_gp_int1()
1064 static void gsi_isr_glob_ee(struct gsi *gsi) in gsi_isr_glob_ee() argument
1068 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); in gsi_isr_glob_ee()
1071 gsi_isr_glob_err(gsi); in gsi_isr_glob_ee()
1073 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); in gsi_isr_glob_ee()
1079 gsi_isr_gp_int1(gsi); in gsi_isr_glob_ee()
1083 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); in gsi_isr_glob_ee()
1087 static void gsi_isr_ieob(struct gsi *gsi) in gsi_isr_ieob() argument
1091 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); in gsi_isr_ieob()
1092 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); in gsi_isr_ieob()
1099 gsi_irq_ieob_disable(gsi, evt_ring_id); in gsi_isr_ieob()
1100 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); in gsi_isr_ieob()
1105 static void gsi_isr_general(struct gsi *gsi) in gsi_isr_general() argument
1107 struct device *dev = gsi->dev; in gsi_isr_general()
1110 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); in gsi_isr_general()
1111 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); in gsi_isr_general()
1118 * gsi_isr() - Top level GSI interrupt service routine
1120 * @dev_id: GSI pointer supplied to request_irq()
1122 * This is the main handler function registered for the GSI IRQ. Each type
1127 struct gsi *gsi = dev_id; in gsi_isr() local
1131 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { in gsi_isr()
1132 /* intr_mask contains bitmask of pending GSI interrupts */ in gsi_isr()
1140 gsi_isr_chan_ctrl(gsi); in gsi_isr()
1143 gsi_isr_evt_ctrl(gsi); in gsi_isr()
1146 gsi_isr_glob_ee(gsi); in gsi_isr()
1149 gsi_isr_ieob(gsi); in gsi_isr()
1152 gsi_isr_general(gsi); in gsi_isr()
1155 dev_err(gsi->dev, in gsi_isr()
1163 dev_err(gsi->dev, "interrupt flood\n"); in gsi_isr()
1194 * This function is called whenever we learn that the GSI hardware has filled
1250 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) in gsi_ring_alloc() argument
1253 struct device *dev = gsi->dev; in gsi_ring_alloc()
1273 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) in gsi_ring_free() argument
1277 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); in gsi_ring_free()
1281 static int gsi_evt_ring_id_alloc(struct gsi *gsi) in gsi_evt_ring_id_alloc() argument
1285 if (gsi->event_bitmap == ~0U) { in gsi_evt_ring_id_alloc()
1286 dev_err(gsi->dev, "event rings exhausted\n"); in gsi_evt_ring_id_alloc()
1290 evt_ring_id = ffz(gsi->event_bitmap); in gsi_evt_ring_id_alloc()
1291 gsi->event_bitmap |= BIT(evt_ring_id); in gsi_evt_ring_id_alloc()
1297 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_id_free() argument
1299 gsi->event_bitmap &= ~BIT(evt_ring_id); in gsi_evt_ring_id_free()
1307 struct gsi *gsi = channel->gsi; in gsi_channel_doorbell() local
1312 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); in gsi_channel_doorbell()
1319 struct gsi *gsi = channel->gsi; in gsi_channel_update() local
1326 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_update()
1333 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); in gsi_channel_update()
1357 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); in gsi_channel_update()
1422 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); in gsi_channel_poll()
1443 static void gsi_evt_ring_setup(struct gsi *gsi) in gsi_evt_ring_setup() argument
1449 static void gsi_evt_ring_teardown(struct gsi *gsi) in gsi_evt_ring_teardown() argument
1455 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, in gsi_channel_setup_one() argument
1458 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_setup_one()
1462 if (!channel->gsi) in gsi_channel_setup_one()
1465 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); in gsi_channel_setup_one()
1469 gsi_evt_ring_program(gsi, evt_ring_id); in gsi_channel_setup_one()
1471 ret = gsi_channel_alloc_command(gsi, channel_id); in gsi_channel_setup_one()
1478 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1481 netif_napi_add(&gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1488 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); in gsi_channel_setup_one()
1494 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) in gsi_channel_teardown_one() argument
1496 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_teardown_one()
1499 if (!channel->gsi) in gsi_channel_teardown_one()
1505 gsi_channel_de_alloc_command(gsi, channel_id); in gsi_channel_teardown_one()
1506 gsi_evt_ring_reset_command(gsi, evt_ring_id); in gsi_channel_teardown_one()
1507 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); in gsi_channel_teardown_one()
1510 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, in gsi_generic_command() argument
1513 struct completion *completion = &gsi->completion; in gsi_generic_command()
1517 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_generic_command()
1519 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); in gsi_generic_command()
1526 if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion)) in gsi_generic_command()
1529 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", in gsi_generic_command()
1535 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) in gsi_modem_channel_alloc() argument
1537 return gsi_generic_command(gsi, channel_id, in gsi_modem_channel_alloc()
1541 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) in gsi_modem_channel_halt() argument
1545 ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL); in gsi_modem_channel_halt()
1547 dev_err(gsi->dev, "error %d halting modem channel %u\n", in gsi_modem_channel_halt()
1552 static int gsi_channel_setup(struct gsi *gsi, bool legacy) in gsi_channel_setup() argument
1558 gsi_evt_ring_setup(gsi); in gsi_channel_setup()
1559 gsi_irq_enable(gsi); in gsi_channel_setup()
1561 mutex_lock(&gsi->mutex); in gsi_channel_setup()
1564 ret = gsi_channel_setup_one(gsi, channel_id, legacy); in gsi_channel_setup()
1567 } while (++channel_id < gsi->channel_count); in gsi_channel_setup()
1571 struct gsi_channel *channel = &gsi->channel[channel_id++]; in gsi_channel_setup()
1573 if (!channel->gsi) in gsi_channel_setup()
1576 dev_err(gsi->dev, "channel %u not supported by hardware\n", in gsi_channel_setup()
1578 channel_id = gsi->channel_count; in gsi_channel_setup()
1583 mask = gsi->modem_channel_bitmap; in gsi_channel_setup()
1587 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); in gsi_channel_setup()
1595 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1601 mask ^= gsi->modem_channel_bitmap; in gsi_channel_setup()
1607 gsi_modem_channel_halt(gsi, channel_id); in gsi_channel_setup()
1612 gsi_channel_teardown_one(gsi, channel_id); in gsi_channel_setup()
1614 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1616 gsi_irq_disable(gsi); in gsi_channel_setup()
1617 gsi_evt_ring_teardown(gsi); in gsi_channel_setup()
1623 static void gsi_channel_teardown(struct gsi *gsi) in gsi_channel_teardown() argument
1625 u32 mask = gsi->modem_channel_bitmap; in gsi_channel_teardown()
1628 mutex_lock(&gsi->mutex); in gsi_channel_teardown()
1635 gsi_modem_channel_halt(gsi, channel_id); in gsi_channel_teardown()
1638 channel_id = gsi->channel_count - 1; in gsi_channel_teardown()
1640 gsi_channel_teardown_one(gsi, channel_id); in gsi_channel_teardown()
1643 mutex_unlock(&gsi->mutex); in gsi_channel_teardown()
1645 gsi_irq_disable(gsi); in gsi_channel_teardown()
1646 gsi_evt_ring_teardown(gsi); in gsi_channel_teardown()
1649 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1650 int gsi_setup(struct gsi *gsi, bool legacy) in gsi_setup() argument
1652 struct device *dev = gsi->dev; in gsi_setup()
1655 /* Here is where we first touch the GSI hardware */ in gsi_setup()
1656 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); in gsi_setup()
1658 dev_err(dev, "GSI has not been enabled\n"); in gsi_setup()
1662 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); in gsi_setup()
1664 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); in gsi_setup()
1665 if (!gsi->channel_count) { in gsi_setup()
1666 dev_err(dev, "GSI reports zero channels supported\n"); in gsi_setup()
1669 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { in gsi_setup()
1672 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); in gsi_setup()
1673 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; in gsi_setup()
1676 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); in gsi_setup()
1677 if (!gsi->evt_ring_count) { in gsi_setup()
1678 dev_err(dev, "GSI reports zero event rings supported\n"); in gsi_setup()
1681 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { in gsi_setup()
1684 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); in gsi_setup()
1685 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; in gsi_setup()
1689 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); in gsi_setup()
1692 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); in gsi_setup()
1694 return gsi_channel_setup(gsi, legacy); in gsi_setup()
1698 void gsi_teardown(struct gsi *gsi) in gsi_teardown() argument
1700 gsi_channel_teardown(gsi); in gsi_teardown()
1706 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_init() local
1710 ret = gsi_evt_ring_id_alloc(gsi); in gsi_channel_evt_ring_init()
1715 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; in gsi_channel_evt_ring_init()
1718 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); in gsi_channel_evt_ring_init()
1722 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", in gsi_channel_evt_ring_init()
1725 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); in gsi_channel_evt_ring_init()
1734 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_exit() local
1737 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_evt_ring_exit()
1738 gsi_ring_free(gsi, &evt_ring->ring); in gsi_channel_evt_ring_exit()
1739 gsi_evt_ring_id_free(gsi, evt_ring_id); in gsi_channel_evt_ring_exit()
1743 static void gsi_evt_ring_init(struct gsi *gsi) in gsi_evt_ring_init() argument
1747 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); in gsi_evt_ring_init()
1748 gsi->event_enable_bitmap = 0; in gsi_evt_ring_init()
1750 init_completion(&gsi->evt_ring[evt_ring_id].completion); in gsi_evt_ring_init()
1755 static void gsi_evt_ring_exit(struct gsi *gsi) in gsi_evt_ring_exit() argument
1760 static bool gsi_channel_data_valid(struct gsi *gsi, in gsi_channel_data_valid() argument
1765 struct device *dev = gsi->dev; in gsi_channel_data_valid()
1815 static int gsi_channel_init_one(struct gsi *gsi, in gsi_channel_init_one() argument
1823 if (!gsi_channel_data_valid(gsi, data)) in gsi_channel_init_one()
1829 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", in gsi_channel_init_one()
1835 channel = &gsi->channel[data->channel_id]; in gsi_channel_init_one()
1838 channel->gsi = gsi; in gsi_channel_init_one()
1851 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); in gsi_channel_init_one()
1853 dev_err(gsi->dev, "error %d allocating channel %u ring\n", in gsi_channel_init_one()
1858 ret = gsi_channel_trans_init(gsi, data->channel_id); in gsi_channel_init_one()
1863 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); in gsi_channel_init_one()
1872 gsi_ring_free(gsi, &channel->tre_ring); in gsi_channel_init_one()
1876 channel->gsi = NULL; /* Mark it not (fully) initialized */ in gsi_channel_init_one()
1884 if (!channel->gsi) in gsi_channel_exit_one()
1890 gsi_ring_free(channel->gsi, &channel->tre_ring); in gsi_channel_exit_one()
1895 static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, in gsi_channel_init() argument
1902 gsi_evt_ring_init(gsi); in gsi_channel_init()
1914 gsi->modem_channel_bitmap |= in gsi_channel_init()
1919 ret = gsi_channel_init_one(gsi, &data[i], command, prefetch); in gsi_channel_init()
1931 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); in gsi_channel_init()
1934 gsi_channel_exit_one(&gsi->channel[data->channel_id]); in gsi_channel_init()
1936 gsi_evt_ring_exit(gsi); in gsi_channel_init()
1942 static void gsi_channel_exit(struct gsi *gsi) in gsi_channel_exit() argument
1947 gsi_channel_exit_one(&gsi->channel[channel_id]); in gsi_channel_exit()
1949 gsi->modem_channel_bitmap = 0; in gsi_channel_exit()
1951 gsi_evt_ring_exit(gsi); in gsi_channel_exit()
1954 /* Init function for GSI. GSI hardware does not need to be "ready" */
1955 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, in gsi_init() argument
1967 gsi->dev = dev; in gsi_init()
1969 /* The GSI layer performs NAPI on all endpoints. NAPI requires a in gsi_init()
1970 * network device structure, but the GSI layer does not have one, in gsi_init()
1973 init_dummy_netdev(&gsi->dummy_dev); in gsi_init()
1975 ret = platform_get_irq_byname(pdev, "gsi"); in gsi_init()
1977 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); in gsi_init()
1982 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); in gsi_init()
1984 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); in gsi_init()
1987 gsi->irq = irq; in gsi_init()
1989 /* Get GSI memory range and map it */ in gsi_init()
1990 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); in gsi_init()
1992 dev_err(dev, "DT error getting \"gsi\" memory property\n"); in gsi_init()
1999 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); in gsi_init()
2004 gsi->virt = ioremap(res->start, size); in gsi_init()
2005 if (!gsi->virt) { in gsi_init()
2006 dev_err(dev, "unable to remap \"gsi\" memory\n"); in gsi_init()
2011 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); in gsi_init()
2015 mutex_init(&gsi->mutex); in gsi_init()
2016 init_completion(&gsi->completion); in gsi_init()
2021 iounmap(gsi->virt); in gsi_init()
2023 free_irq(gsi->irq, gsi); in gsi_init()
2029 void gsi_exit(struct gsi *gsi) in gsi_exit() argument
2031 mutex_destroy(&gsi->mutex); in gsi_exit()
2032 gsi_channel_exit(gsi); in gsi_exit()
2033 free_irq(gsi->irq, gsi); in gsi_exit()
2034 iounmap(gsi->virt); in gsi_exit()
2057 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) in gsi_channel_tre_max() argument
2059 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_tre_max()
2066 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) in gsi_channel_trans_tre_max() argument
2068 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_tre_max()