Lines Matching refs:efx
84 static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value, in ef4_write_buf_tbl() argument
87 ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, in ef4_write_buf_tbl()
98 int ef4_farch_test_registers(struct ef4_nic *efx, in ef4_farch_test_registers() argument
111 ef4_reado(efx, &original, address); in ef4_farch_test_registers()
122 ef4_writeo(efx, ®, address); in ef4_farch_test_registers()
123 ef4_reado(efx, &buf, address); in ef4_farch_test_registers()
132 ef4_writeo(efx, ®, address); in ef4_farch_test_registers()
133 ef4_reado(efx, &buf, address); in ef4_farch_test_registers()
139 ef4_writeo(efx, &original, address); in ef4_farch_test_registers()
145 netif_err(efx, hw, efx->net_dev, in ef4_farch_test_registers()
168 ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) in ef4_init_special_buffer() argument
181 netif_dbg(efx, probe, efx->net_dev, in ef4_init_special_buffer()
188 ef4_write_buf_tbl(efx, &buf_desc, index); in ef4_init_special_buffer()
194 ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) in ef4_fini_special_buffer() argument
203 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", in ef4_fini_special_buffer()
211 ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); in ef4_fini_special_buffer()
223 static int ef4_alloc_special_buffer(struct ef4_nic *efx, in ef4_alloc_special_buffer() argument
229 if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) in ef4_alloc_special_buffer()
235 buffer->index = efx->next_buffer_table; in ef4_alloc_special_buffer()
236 efx->next_buffer_table += buffer->entries; in ef4_alloc_special_buffer()
238 netif_dbg(efx, probe, efx->net_dev, in ef4_alloc_special_buffer()
249 ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer) in ef4_free_special_buffer() argument
254 netif_dbg(efx, hw, efx->net_dev, in ef4_free_special_buffer()
261 ef4_nic_free_buffer(efx, &buffer->buf); in ef4_free_special_buffer()
279 ef4_writed_page(tx_queue->efx, ®, in ef4_farch_notify_tx_desc()
297 ef4_writeo_page(tx_queue->efx, ®, in ef4_farch_push_tx_desc()
355 if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf)) in ef4_farch_tx_limit_len()
365 struct ef4_nic *efx = tx_queue->efx; in ef4_farch_tx_probe() local
369 return ef4_alloc_special_buffer(efx, &tx_queue->txd, in ef4_farch_tx_probe()
375 struct ef4_nic *efx = tx_queue->efx; in ef4_farch_tx_init() local
379 ef4_init_special_buffer(efx, &tx_queue->txd); in ef4_farch_tx_init()
396 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { in ef4_farch_tx_init()
403 ef4_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, in ef4_farch_tx_init()
406 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) { in ef4_farch_tx_init()
410 ef4_reado(efx, ®, FR_AA_TX_CHKSM_CFG); in ef4_farch_tx_init()
415 ef4_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); in ef4_farch_tx_init()
418 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { in ef4_farch_tx_init()
424 ef4_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, in ef4_farch_tx_init()
431 struct ef4_nic *efx = tx_queue->efx; in ef4_farch_flush_tx_queue() local
440 ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); in ef4_farch_flush_tx_queue()
445 struct ef4_nic *efx = tx_queue->efx; in ef4_farch_tx_fini() local
450 ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, in ef4_farch_tx_fini()
454 ef4_fini_special_buffer(efx, &tx_queue->txd); in ef4_farch_tx_fini()
460 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd); in ef4_farch_tx_remove()
481 rx_queue->efx->type->rx_buffer_padding, in ef4_farch_build_rx_desc()
491 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_write() local
505 ef4_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, in ef4_farch_rx_write()
511 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_probe() local
515 return ef4_alloc_special_buffer(efx, &rx_queue->rxd, in ef4_farch_rx_probe()
522 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_init() local
523 bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0; in ef4_farch_rx_init()
532 jumbo_en = !is_b0 || efx->rx_scatter; in ef4_farch_rx_init()
534 netif_dbg(efx, hw, efx->net_dev, in ef4_farch_rx_init()
542 ef4_init_special_buffer(efx, &rx_queue->rxd); in ef4_farch_rx_init()
559 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in ef4_farch_rx_init()
565 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_flush_rx_queue() local
572 ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); in ef4_farch_flush_rx_queue()
578 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_rx_fini() local
582 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, in ef4_farch_rx_fini()
586 ef4_fini_special_buffer(efx, &rx_queue->rxd); in ef4_farch_rx_fini()
592 ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd); in ef4_farch_rx_remove()
604 static bool ef4_farch_flush_wake(struct ef4_nic *efx) in ef4_farch_flush_wake() argument
609 return (atomic_read(&efx->active_queues) == 0 || in ef4_farch_flush_wake()
610 (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT in ef4_farch_flush_wake()
611 && atomic_read(&efx->rxq_flush_pending) > 0)); in ef4_farch_flush_wake()
614 static bool ef4_check_tx_flush_complete(struct ef4_nic *efx) in ef4_check_tx_flush_complete() argument
621 ef4_for_each_channel(channel, efx) { in ef4_check_tx_flush_complete()
623 ef4_reado_table(efx, &txd_ptr_tbl, in ef4_check_tx_flush_complete()
629 netif_dbg(efx, hw, efx->net_dev, in ef4_check_tx_flush_complete()
638 netif_dbg(efx, hw, efx->net_dev, in ef4_check_tx_flush_complete()
658 static int ef4_farch_do_flush(struct ef4_nic *efx) in ef4_farch_do_flush() argument
666 ef4_for_each_channel(channel, efx) { in ef4_farch_do_flush()
672 atomic_inc(&efx->rxq_flush_pending); in ef4_farch_do_flush()
676 while (timeout && atomic_read(&efx->active_queues) > 0) { in ef4_farch_do_flush()
681 ef4_for_each_channel(channel, efx) { in ef4_farch_do_flush()
683 if (atomic_read(&efx->rxq_flush_outstanding) >= in ef4_farch_do_flush()
689 atomic_dec(&efx->rxq_flush_pending); in ef4_farch_do_flush()
690 atomic_inc(&efx->rxq_flush_outstanding); in ef4_farch_do_flush()
696 timeout = wait_event_timeout(efx->flush_wq, in ef4_farch_do_flush()
697 ef4_farch_flush_wake(efx), in ef4_farch_do_flush()
701 if (atomic_read(&efx->active_queues) && in ef4_farch_do_flush()
702 !ef4_check_tx_flush_complete(efx)) { in ef4_farch_do_flush()
703 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " in ef4_farch_do_flush()
704 "(rx %d+%d)\n", atomic_read(&efx->active_queues), in ef4_farch_do_flush()
705 atomic_read(&efx->rxq_flush_outstanding), in ef4_farch_do_flush()
706 atomic_read(&efx->rxq_flush_pending)); in ef4_farch_do_flush()
709 atomic_set(&efx->active_queues, 0); in ef4_farch_do_flush()
710 atomic_set(&efx->rxq_flush_pending, 0); in ef4_farch_do_flush()
711 atomic_set(&efx->rxq_flush_outstanding, 0); in ef4_farch_do_flush()
717 int ef4_farch_fini_dmaq(struct ef4_nic *efx) in ef4_farch_fini_dmaq() argument
725 if (efx->state != STATE_RECOVERY) { in ef4_farch_fini_dmaq()
727 if (efx->pci_dev->is_busmaster) { in ef4_farch_fini_dmaq()
728 efx->type->prepare_flush(efx); in ef4_farch_fini_dmaq()
729 rc = ef4_farch_do_flush(efx); in ef4_farch_fini_dmaq()
730 efx->type->finish_flush(efx); in ef4_farch_fini_dmaq()
733 ef4_for_each_channel(channel, efx) { in ef4_farch_fini_dmaq()
758 void ef4_farch_finish_flr(struct ef4_nic *efx) in ef4_farch_finish_flr() argument
760 atomic_set(&efx->rxq_flush_pending, 0); in ef4_farch_finish_flr()
761 atomic_set(&efx->rxq_flush_outstanding, 0); in ef4_farch_finish_flr()
762 atomic_set(&efx->active_queues, 0); in ef4_farch_finish_flr()
781 struct ef4_nic *efx = channel->efx; in ef4_farch_ev_read_ack() local
789 ef4_writed(efx, ®, in ef4_farch_ev_read_ack()
790 efx->type->evq_rptr_tbl_base + in ef4_farch_ev_read_ack()
795 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq, in ef4_farch_generate_event() argument
807 ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); in ef4_farch_generate_event()
817 ef4_farch_generate_event(channel->efx, channel->channel, &event); in ef4_farch_magic_event()
831 struct ef4_nic *efx = channel->efx; in ef4_farch_handle_tx_event() local
834 if (unlikely(READ_ONCE(efx->reset_pending))) in ef4_farch_handle_tx_event()
852 netif_tx_lock(efx->net_dev); in ef4_farch_handle_tx_event()
854 netif_tx_unlock(efx->net_dev); in ef4_farch_handle_tx_event()
856 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in ef4_farch_handle_tx_event()
858 netif_err(efx, tx_err, efx->net_dev, in ef4_farch_handle_tx_event()
872 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_handle_rx_not_ok() local
892 rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ? in ef4_farch_handle_rx_not_ok()
907 else if (!efx->loopback_selftest) { in ef4_farch_handle_rx_not_ok()
920 netif_dbg(efx, rx_err, efx->net_dev, in ef4_farch_handle_rx_not_ok()
951 struct ef4_nic *efx = rx_queue->efx; in ef4_farch_handle_rx_bad_index() local
963 netif_info(efx, rx_err, efx->net_dev, in ef4_farch_handle_rx_bad_index()
967 ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ? in ef4_farch_handle_rx_bad_index()
988 struct ef4_nic *efx = channel->efx; in ef4_farch_handle_rx_event() local
990 if (unlikely(READ_ONCE(efx->reset_pending))) in ef4_farch_handle_rx_event()
1091 ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) in ef4_farch_handle_tx_flush_done() argument
1097 if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) { in ef4_farch_handle_tx_flush_done()
1098 tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES, in ef4_farch_handle_tx_flush_done()
1112 ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event) in ef4_farch_handle_rx_flush_done() argument
1121 if (qid >= efx->n_channels) in ef4_farch_handle_rx_flush_done()
1123 channel = ef4_get_channel(efx, qid); in ef4_farch_handle_rx_flush_done()
1129 netif_info(efx, hw, efx->net_dev, in ef4_farch_handle_rx_flush_done()
1132 atomic_inc(&efx->rxq_flush_pending); in ef4_farch_handle_rx_flush_done()
1137 atomic_dec(&efx->rxq_flush_outstanding); in ef4_farch_handle_rx_flush_done()
1138 if (ef4_farch_flush_wake(efx)) in ef4_farch_handle_rx_flush_done()
1139 wake_up(&efx->flush_wq); in ef4_farch_handle_rx_flush_done()
1145 struct ef4_nic *efx = channel->efx; in ef4_farch_handle_drain_event() local
1147 WARN_ON(atomic_read(&efx->active_queues) == 0); in ef4_farch_handle_drain_event()
1148 atomic_dec(&efx->active_queues); in ef4_farch_handle_drain_event()
1149 if (ef4_farch_flush_wake(efx)) in ef4_farch_handle_drain_event()
1150 wake_up(&efx->flush_wq); in ef4_farch_handle_drain_event()
1156 struct ef4_nic *efx = channel->efx; in ef4_farch_handle_generated_event() local
1177 netif_dbg(efx, hw, efx->net_dev, "channel %d received " in ef4_farch_handle_generated_event()
1186 struct ef4_nic *efx = channel->efx; in ef4_farch_handle_driver_event() local
1195 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", in ef4_farch_handle_driver_event()
1197 ef4_farch_handle_tx_flush_done(efx, event); in ef4_farch_handle_driver_event()
1200 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", in ef4_farch_handle_driver_event()
1202 ef4_farch_handle_rx_flush_done(efx, event); in ef4_farch_handle_driver_event()
1205 netif_dbg(efx, hw, efx->net_dev, in ef4_farch_handle_driver_event()
1210 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_handle_driver_event()
1214 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_handle_driver_event()
1219 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_handle_driver_event()
1224 netif_err(efx, rx_err, efx->net_dev, in ef4_farch_handle_driver_event()
1227 atomic_inc(&efx->rx_reset); in ef4_farch_handle_driver_event()
1228 ef4_schedule_reset(efx, in ef4_farch_handle_driver_event()
1229 EF4_WORKAROUND_6555(efx) ? in ef4_farch_handle_driver_event()
1234 netif_err(efx, rx_err, efx->net_dev, in ef4_farch_handle_driver_event()
1238 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in ef4_farch_handle_driver_event()
1241 netif_err(efx, tx_err, efx->net_dev, in ef4_farch_handle_driver_event()
1245 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR); in ef4_farch_handle_driver_event()
1248 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_handle_driver_event()
1258 struct ef4_nic *efx = channel->efx; in ef4_farch_ev_process() local
1278 netif_vdbg(channel->efx, intr, channel->efx->net_dev, in ef4_farch_ev_process()
1298 if (tx_packets > efx->txq_entries) { in ef4_farch_ev_process()
1310 if (efx->type->handle_global_event && in ef4_farch_ev_process()
1311 efx->type->handle_global_event(channel, &event)) in ef4_farch_ev_process()
1315 netif_err(channel->efx, hw, channel->efx->net_dev, in ef4_farch_ev_process()
1330 struct ef4_nic *efx = channel->efx; in ef4_farch_ev_probe() local
1334 return ef4_alloc_special_buffer(efx, &channel->eventq, in ef4_farch_ev_probe()
1341 struct ef4_nic *efx = channel->efx; in ef4_farch_ev_init() local
1343 netif_dbg(efx, hw, efx->net_dev, in ef4_farch_ev_init()
1349 ef4_init_special_buffer(efx, &channel->eventq); in ef4_farch_ev_init()
1359 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, in ef4_farch_ev_init()
1368 struct ef4_nic *efx = channel->efx; in ef4_farch_ev_fini() local
1372 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, in ef4_farch_ev_fini()
1376 ef4_fini_special_buffer(efx, &channel->eventq); in ef4_farch_ev_fini()
1382 ef4_free_special_buffer(channel->efx, &channel->eventq); in ef4_farch_ev_remove()
1406 static inline void ef4_farch_interrupts(struct ef4_nic *efx, in ef4_farch_interrupts() argument
1412 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, in ef4_farch_interrupts()
1415 ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); in ef4_farch_interrupts()
1418 void ef4_farch_irq_enable_master(struct ef4_nic *efx) in ef4_farch_irq_enable_master() argument
1420 EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr)); in ef4_farch_irq_enable_master()
1423 ef4_farch_interrupts(efx, true, false); in ef4_farch_irq_enable_master()
1426 void ef4_farch_irq_disable_master(struct ef4_nic *efx) in ef4_farch_irq_disable_master() argument
1429 ef4_farch_interrupts(efx, false, false); in ef4_farch_irq_disable_master()
1436 int ef4_farch_irq_test_generate(struct ef4_nic *efx) in ef4_farch_irq_test_generate() argument
1438 ef4_farch_interrupts(efx, true, true); in ef4_farch_irq_test_generate()
1445 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) in ef4_farch_fatal_interrupt() argument
1447 struct falcon_nic_data *nic_data = efx->nic_data; in ef4_farch_fatal_interrupt()
1448 ef4_oword_t *int_ker = efx->irq_status.addr; in ef4_farch_fatal_interrupt()
1452 ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); in ef4_farch_fatal_interrupt()
1455 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status " in ef4_farch_fatal_interrupt()
1465 ef4_reado(efx, ®, FR_AZ_MEM_STAT); in ef4_farch_fatal_interrupt()
1466 netif_err(efx, hw, efx->net_dev, in ef4_farch_fatal_interrupt()
1472 pci_clear_master(efx->pci_dev); in ef4_farch_fatal_interrupt()
1473 if (ef4_nic_is_dual_func(efx)) in ef4_farch_fatal_interrupt()
1475 ef4_farch_irq_disable_master(efx); in ef4_farch_fatal_interrupt()
1478 if (efx->int_error_count == 0 || in ef4_farch_fatal_interrupt()
1479 time_after(jiffies, efx->int_error_expire)) { in ef4_farch_fatal_interrupt()
1480 efx->int_error_count = 0; in ef4_farch_fatal_interrupt()
1481 efx->int_error_expire = in ef4_farch_fatal_interrupt()
1484 if (++efx->int_error_count < EF4_MAX_INT_ERRORS) { in ef4_farch_fatal_interrupt()
1485 netif_err(efx, hw, efx->net_dev, in ef4_farch_fatal_interrupt()
1487 ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR); in ef4_farch_fatal_interrupt()
1489 netif_err(efx, hw, efx->net_dev, in ef4_farch_fatal_interrupt()
1492 ef4_schedule_reset(efx, RESET_TYPE_DISABLE); in ef4_farch_fatal_interrupt()
1503 struct ef4_nic *efx = dev_id; in ef4_farch_legacy_interrupt() local
1504 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); in ef4_farch_legacy_interrupt()
1505 ef4_oword_t *int_ker = efx->irq_status.addr; in ef4_farch_legacy_interrupt()
1513 ef4_readd(efx, ®, FR_BZ_INT_ISR0); in ef4_farch_legacy_interrupt()
1520 if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) && in ef4_farch_legacy_interrupt()
1521 !efx->eeh_disabled_legacy_irq) { in ef4_farch_legacy_interrupt()
1522 disable_irq_nosync(efx->legacy_irq); in ef4_farch_legacy_interrupt()
1523 efx->eeh_disabled_legacy_irq = true; in ef4_farch_legacy_interrupt()
1527 if (queues & (1U << efx->irq_level) && soft_enabled) { in ef4_farch_legacy_interrupt()
1530 return ef4_farch_fatal_interrupt(efx); in ef4_farch_legacy_interrupt()
1531 efx->last_irq_cpu = raw_smp_processor_id(); in ef4_farch_legacy_interrupt()
1535 efx->irq_zero_count = 0; in ef4_farch_legacy_interrupt()
1539 ef4_for_each_channel(channel, efx) { in ef4_farch_legacy_interrupt()
1554 if (efx->irq_zero_count++ == 0) in ef4_farch_legacy_interrupt()
1559 ef4_for_each_channel(channel, efx) { in ef4_farch_legacy_interrupt()
1571 netif_vdbg(efx, intr, efx->net_dev, in ef4_farch_legacy_interrupt()
1588 struct ef4_nic *efx = context->efx; in ef4_farch_msi_interrupt() local
1589 ef4_oword_t *int_ker = efx->irq_status.addr; in ef4_farch_msi_interrupt()
1592 netif_vdbg(efx, intr, efx->net_dev, in ef4_farch_msi_interrupt()
1596 if (!likely(READ_ONCE(efx->irq_soft_enabled))) in ef4_farch_msi_interrupt()
1600 if (context->index == efx->irq_level) { in ef4_farch_msi_interrupt()
1603 return ef4_farch_fatal_interrupt(efx); in ef4_farch_msi_interrupt()
1604 efx->last_irq_cpu = raw_smp_processor_id(); in ef4_farch_msi_interrupt()
1608 ef4_schedule_channel_irq(efx->channel[context->index]); in ef4_farch_msi_interrupt()
1616 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx) in ef4_farch_rx_push_indir_table() argument
1621 BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0); in ef4_farch_rx_push_indir_table()
1623 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != in ef4_farch_rx_push_indir_table()
1628 efx->rx_indir_table[i]); in ef4_farch_rx_push_indir_table()
1629 ef4_writed(efx, &dword, in ef4_farch_rx_push_indir_table()
1644 void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw) in ef4_farch_dimension_resources() argument
1651 buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE + in ef4_farch_dimension_resources()
1652 efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE + in ef4_farch_dimension_resources()
1653 efx->n_channels * EF4_MAX_EVQ_SIZE) in ef4_farch_dimension_resources()
1655 vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES); in ef4_farch_dimension_resources()
1657 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; in ef4_farch_dimension_resources()
1658 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; in ef4_farch_dimension_resources()
1661 u32 ef4_farch_fpga_ver(struct ef4_nic *efx) in ef4_farch_fpga_ver() argument
1664 ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); in ef4_farch_fpga_ver()
1668 void ef4_farch_init_common(struct ef4_nic *efx) in ef4_farch_init_common() argument
1673 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); in ef4_farch_init_common()
1674 ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); in ef4_farch_init_common()
1675 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); in ef4_farch_init_common()
1676 ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); in ef4_farch_init_common()
1681 ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG); in ef4_farch_init_common()
1688 ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG); in ef4_farch_init_common()
1690 ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); in ef4_farch_init_common()
1695 EF4_INT_MODE_USE_MSI(efx), in ef4_farch_init_common()
1696 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); in ef4_farch_init_common()
1697 ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER); in ef4_farch_init_common()
1700 efx->irq_level = 0; in ef4_farch_init_common()
1713 ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); in ef4_farch_init_common()
1718 ef4_reado(efx, &temp, FR_AZ_TX_RESERVED); in ef4_farch_init_common()
1731 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) in ef4_farch_init_common()
1733 ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED); in ef4_farch_init_common()
1735 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { in ef4_farch_init_common()
1745 ef4_writeo(efx, &temp, FR_BZ_TX_PACE); in ef4_farch_init_common()
1823 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1870 static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx) in ef4_farch_filter_push_rx_config() argument
1872 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_push_rx_config()
1876 ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); in ef4_farch_filter_push_rx_config()
1930 } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { in ef4_farch_filter_push_rx_config()
1938 efx->rx_scatter); in ef4_farch_filter_push_rx_config()
1941 ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); in ef4_farch_filter_push_rx_config()
1944 static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx) in ef4_farch_filter_push_tx_limits() argument
1946 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_push_tx_limits()
1950 ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG); in ef4_farch_filter_push_tx_limits()
1964 ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); in ef4_farch_filter_push_tx_limits()
2155 ef4_farch_filter_init_rx_auto(struct ef4_nic *efx, in ef4_farch_filter_init_rx_auto() argument
2163 (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) | in ef4_farch_filter_init_rx_auto()
2164 (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0)); in ef4_farch_filter_init_rx_auto()
2307 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx) in ef4_farch_filter_get_rx_id_limit() argument
2309 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_get_rx_id_limit()
2323 s32 ef4_farch_filter_insert(struct ef4_nic *efx, in ef4_farch_filter_insert() argument
2327 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_insert()
2343 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_filter_insert()
2355 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_insert()
2385 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_insert()
2444 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_insert()
2449 ef4_farch_filter_push_tx_limits(efx); in ef4_farch_filter_insert()
2451 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_insert()
2454 ef4_writeo(efx, &filter, in ef4_farch_filter_insert()
2461 ef4_farch_filter_table_clear_entry(efx, table, in ef4_farch_filter_insert()
2465 netif_vdbg(efx, hw, efx->net_dev, in ef4_farch_filter_insert()
2471 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_insert()
2476 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx, in ef4_farch_filter_table_clear_entry() argument
2489 ef4_writeo(efx, &filter, table->offset + table->step * filter_idx); in ef4_farch_filter_table_clear_entry()
2500 ef4_farch_filter_push_tx_limits(efx); in ef4_farch_filter_table_clear_entry()
2502 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_table_clear_entry()
2506 static int ef4_farch_filter_remove(struct ef4_nic *efx, in ef4_farch_filter_remove() argument
2518 ef4_farch_filter_init_rx_auto(efx, spec); in ef4_farch_filter_remove()
2519 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_remove()
2521 ef4_farch_filter_table_clear_entry(efx, table, filter_idx); in ef4_farch_filter_remove()
2527 int ef4_farch_filter_remove_safe(struct ef4_nic *efx, in ef4_farch_filter_remove_safe() argument
2531 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_remove_safe()
2548 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_remove_safe()
2549 rc = ef4_farch_filter_remove(efx, table, filter_idx, priority); in ef4_farch_filter_remove_safe()
2550 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_remove_safe()
2555 int ef4_farch_filter_get_safe(struct ef4_nic *efx, in ef4_farch_filter_get_safe() argument
2559 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_get_safe()
2576 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_get_safe()
2586 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_get_safe()
2592 ef4_farch_filter_table_clear(struct ef4_nic *efx, in ef4_farch_filter_table_clear() argument
2596 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_table_clear()
2600 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_table_clear()
2603 ef4_farch_filter_remove(efx, table, in ef4_farch_filter_table_clear()
2606 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_table_clear()
2609 int ef4_farch_filter_clear_rx(struct ef4_nic *efx, in ef4_farch_filter_clear_rx() argument
2612 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP, in ef4_farch_filter_clear_rx()
2614 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC, in ef4_farch_filter_clear_rx()
2616 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF, in ef4_farch_filter_clear_rx()
2621 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx, in ef4_farch_filter_count_rx_used() argument
2624 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_count_rx_used()
2630 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_count_rx_used()
2643 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_count_rx_used()
2648 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx, in ef4_farch_filter_get_rx_ids() argument
2652 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_get_rx_ids()
2658 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_get_rx_ids()
2677 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_get_rx_ids()
2683 void ef4_farch_filter_table_restore(struct ef4_nic *efx) in ef4_farch_filter_table_restore() argument
2685 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_table_restore()
2691 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_table_restore()
2704 ef4_writeo(efx, &filter, in ef4_farch_filter_table_restore()
2709 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_table_restore()
2710 ef4_farch_filter_push_tx_limits(efx); in ef4_farch_filter_table_restore()
2712 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_table_restore()
2715 void ef4_farch_filter_table_remove(struct ef4_nic *efx) in ef4_farch_filter_table_remove() argument
2717 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_table_remove()
2727 int ef4_farch_filter_table_probe(struct ef4_nic *efx) in ef4_farch_filter_table_probe() argument
2736 efx->filter_state = state; in ef4_farch_filter_table_probe()
2738 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) { in ef4_farch_filter_table_probe()
2770 ef4_farch_filter_init_rx_auto(efx, spec); in ef4_farch_filter_table_probe()
2775 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_table_probe()
2780 ef4_farch_filter_table_remove(efx); in ef4_farch_filter_table_probe()
2785 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx) in ef4_farch_filter_update_rx_scatter() argument
2787 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_update_rx_scatter()
2793 spin_lock_bh(&efx->filter_lock); in ef4_farch_filter_update_rx_scatter()
2803 efx->n_rx_channels) in ef4_farch_filter_update_rx_scatter()
2806 if (efx->rx_scatter) in ef4_farch_filter_update_rx_scatter()
2818 ef4_writeo(efx, &filter, in ef4_farch_filter_update_rx_scatter()
2823 ef4_farch_filter_push_rx_config(efx); in ef4_farch_filter_update_rx_scatter()
2825 spin_unlock_bh(&efx->filter_lock); in ef4_farch_filter_update_rx_scatter()
2830 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx, in ef4_farch_filter_rfs_insert() argument
2833 return ef4_farch_filter_insert(efx, gen_spec, true); in ef4_farch_filter_rfs_insert()
2836 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id, in ef4_farch_filter_rfs_expire_one() argument
2839 struct ef4_farch_filter_state *state = efx->filter_state; in ef4_farch_filter_rfs_expire_one()
2845 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, in ef4_farch_filter_rfs_expire_one()
2847 ef4_farch_filter_table_clear_entry(efx, table, index); in ef4_farch_filter_rfs_expire_one()
2856 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx) in ef4_farch_filter_sync_rx_mode() argument
2858 struct net_device *net_dev = efx->net_dev; in ef4_farch_filter_sync_rx_mode()
2860 union ef4_multicast_hash *mc_hash = &efx->multicast_hash; in ef4_farch_filter_sync_rx_mode()
2864 if (!ef4_dev_registered(efx)) in ef4_farch_filter_sync_rx_mode()
2869 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); in ef4_farch_filter_sync_rx_mode()