Lines Matching refs:efx

217 static int efx_soft_enable_interrupts(struct efx_nic *efx);
218 static void efx_soft_disable_interrupts(struct efx_nic *efx);
220 static void efx_remove_channels(struct efx_nic *efx);
222 static void efx_remove_port(struct efx_nic *efx);
224 static void efx_fini_napi(struct efx_nic *efx);
226 static void efx_fini_struct(struct efx_nic *efx);
227 static void efx_start_all(struct efx_nic *efx);
228 static void efx_stop_all(struct efx_nic *efx);
230 #define EFX_ASSERT_RESET_SERIALISED(efx) \ argument
232 if ((efx->state == STATE_READY) || \
233 (efx->state == STATE_RECOVERY) || \
234 (efx->state == STATE_DISABLED)) \
238 static int efx_check_disabled(struct efx_nic *efx) in efx_check_disabled() argument
240 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { in efx_check_disabled()
241 netif_err(efx, drv, efx->net_dev, in efx_check_disabled()
309 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) in efx_update_irq_mod() argument
311 int step = efx->irq_mod_step_us; in efx_update_irq_mod()
316 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
320 efx->irq_rx_moderation_us) { in efx_update_irq_mod()
322 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
334 struct efx_nic *efx = channel->efx; in efx_poll() local
337 netif_vdbg(efx, intr, efx->net_dev, in efx_poll()
345 efx->irq_rx_adaptive && in efx_poll()
347 efx_update_irq_mod(efx, channel); in efx_poll()
374 struct efx_nic *efx = channel->efx; in efx_probe_eventq() local
377 netif_dbg(efx, probe, efx->net_dev, in efx_probe_eventq()
382 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); in efx_probe_eventq()
392 struct efx_nic *efx = channel->efx; in efx_init_eventq() local
397 netif_dbg(efx, drv, efx->net_dev, in efx_init_eventq()
402 efx->type->push_irq_moderation(channel); in efx_init_eventq()
412 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_start_eventq()
438 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
447 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
461 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) in efx_alloc_channel() argument
472 channel->efx = efx; in efx_alloc_channel()
478 tx_queue->efx = efx; in efx_alloc_channel()
488 rx_queue->efx = efx; in efx_alloc_channel()
542 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
577 struct efx_nic *efx = channel->efx; in efx_get_channel_name() local
582 if (efx->tx_channel_offset == 0) { in efx_get_channel_name()
584 } else if (channel->channel < efx->tx_channel_offset) { in efx_get_channel_name()
588 number -= efx->tx_channel_offset; in efx_get_channel_name()
590 snprintf(buf, len, "%s%s-%d", efx->name, type, number); in efx_get_channel_name()
593 static void efx_set_channel_names(struct efx_nic *efx) in efx_set_channel_names() argument
597 efx_for_each_channel(channel, efx) in efx_set_channel_names()
599 efx->msi_context[channel->channel].name, in efx_set_channel_names()
600 sizeof(efx->msi_context[0].name)); in efx_set_channel_names()
603 static int efx_probe_channels(struct efx_nic *efx) in efx_probe_channels() argument
609 efx->next_buffer_table = 0; in efx_probe_channels()
616 efx_for_each_channel_rev(channel, efx) { in efx_probe_channels()
619 netif_err(efx, probe, efx->net_dev, in efx_probe_channels()
625 efx_set_channel_names(efx); in efx_probe_channels()
630 efx_remove_channels(efx); in efx_probe_channels()
638 static void efx_start_datapath(struct efx_nic *efx) in efx_start_datapath() argument
640 netdev_features_t old_features = efx->net_dev->features; in efx_start_datapath()
641 bool old_rx_scatter = efx->rx_scatter; in efx_start_datapath()
651 efx->rx_dma_len = (efx->rx_prefix_size + in efx_start_datapath()
652 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + in efx_start_datapath()
653 efx->type->rx_buffer_padding); in efx_start_datapath()
655 efx->rx_ip_align + efx->rx_dma_len); in efx_start_datapath()
657 efx->rx_scatter = efx->type->always_rx_scatter; in efx_start_datapath()
658 efx->rx_buffer_order = 0; in efx_start_datapath()
659 } else if (efx->type->can_rx_scatter) { in efx_start_datapath()
665 efx->rx_scatter = true; in efx_start_datapath()
666 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; in efx_start_datapath()
667 efx->rx_buffer_order = 0; in efx_start_datapath()
669 efx->rx_scatter = false; in efx_start_datapath()
670 efx->rx_buffer_order = get_order(rx_buf_len); in efx_start_datapath()
673 efx_rx_config_page_split(efx); in efx_start_datapath()
674 if (efx->rx_buffer_order) in efx_start_datapath()
675 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
677 efx->rx_dma_len, efx->rx_buffer_order, in efx_start_datapath()
678 efx->rx_pages_per_batch); in efx_start_datapath()
680 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
682 efx->rx_dma_len, efx->rx_page_buf_step, in efx_start_datapath()
683 efx->rx_bufs_per_page, efx->rx_pages_per_batch); in efx_start_datapath()
688 efx->net_dev->hw_features |= efx->net_dev->features; in efx_start_datapath()
689 efx->net_dev->hw_features &= ~efx->fixed_features; in efx_start_datapath()
690 efx->net_dev->features |= efx->fixed_features; in efx_start_datapath()
691 if (efx->net_dev->features != old_features) in efx_start_datapath()
692 netdev_features_change(efx->net_dev); in efx_start_datapath()
695 if (efx->rx_scatter != old_rx_scatter) in efx_start_datapath()
696 efx->type->filter_update_rx_scatter(efx); in efx_start_datapath()
705 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); in efx_start_datapath()
706 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; in efx_start_datapath()
709 efx_for_each_channel(channel, efx) { in efx_start_datapath()
712 atomic_inc(&efx->active_queues); in efx_start_datapath()
717 atomic_inc(&efx->active_queues); in efx_start_datapath()
726 efx_ptp_start_datapath(efx); in efx_start_datapath()
728 if (netif_device_present(efx->net_dev)) in efx_start_datapath()
729 netif_tx_wake_all_queues(efx->net_dev); in efx_start_datapath()
732 static void efx_stop_datapath(struct efx_nic *efx) in efx_stop_datapath() argument
739 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_datapath()
740 BUG_ON(efx->port_enabled); in efx_stop_datapath()
742 efx_ptp_stop_datapath(efx); in efx_stop_datapath()
745 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
750 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
763 rc = efx->type->fini_dmaq(efx); in efx_stop_datapath()
765 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); in efx_stop_datapath()
767 netif_dbg(efx, drv, efx->net_dev, in efx_stop_datapath()
771 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
784 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_channel()
795 static void efx_remove_channels(struct efx_nic *efx) in efx_remove_channels() argument
799 efx_for_each_channel(channel, efx) in efx_remove_channels()
804 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) in efx_realloc_channels() argument
811 rc = efx_check_disabled(efx); in efx_realloc_channels()
818 efx_for_each_channel(channel, efx) { in efx_realloc_channels()
837 efx_device_detach_sync(efx); in efx_realloc_channels()
838 efx_stop_all(efx); in efx_realloc_channels()
839 efx_soft_disable_interrupts(efx); in efx_realloc_channels()
843 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
844 channel = efx->channel[i]; in efx_realloc_channels()
855 old_rxq_entries = efx->rxq_entries; in efx_realloc_channels()
856 old_txq_entries = efx->txq_entries; in efx_realloc_channels()
857 efx->rxq_entries = rxq_entries; in efx_realloc_channels()
858 efx->txq_entries = txq_entries; in efx_realloc_channels()
859 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
860 channel = efx->channel[i]; in efx_realloc_channels()
861 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
866 efx->next_buffer_table = next_buffer_table; in efx_realloc_channels()
868 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
869 channel = efx->channel[i]; in efx_realloc_channels()
875 efx_init_napi_channel(efx->channel[i]); in efx_realloc_channels()
880 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
889 rc2 = efx_soft_enable_interrupts(efx); in efx_realloc_channels()
892 netif_err(efx, drv, efx->net_dev, in efx_realloc_channels()
894 efx_schedule_reset(efx, RESET_TYPE_DISABLE); in efx_realloc_channels()
896 efx_start_all(efx); in efx_realloc_channels()
897 efx_device_attach_if_not_resetting(efx); in efx_realloc_channels()
903 efx->rxq_entries = old_rxq_entries; in efx_realloc_channels()
904 efx->txq_entries = old_txq_entries; in efx_realloc_channels()
905 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
906 channel = efx->channel[i]; in efx_realloc_channels()
907 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
920 return channel->channel - channel->efx->tx_channel_offset < in efx_default_channel_want_txqs()
921 channel->efx->n_tx_channels; in efx_default_channel_want_txqs()
953 void efx_link_status_changed(struct efx_nic *efx) in efx_link_status_changed() argument
955 struct efx_link_state *link_state = &efx->link_state; in efx_link_status_changed()
961 if (!netif_running(efx->net_dev)) in efx_link_status_changed()
964 if (link_state->up != netif_carrier_ok(efx->net_dev)) { in efx_link_status_changed()
965 efx->n_link_state_changes++; in efx_link_status_changed()
968 netif_carrier_on(efx->net_dev); in efx_link_status_changed()
970 netif_carrier_off(efx->net_dev); in efx_link_status_changed()
975 netif_info(efx, link, efx->net_dev, in efx_link_status_changed()
978 efx->net_dev->mtu); in efx_link_status_changed()
980 netif_info(efx, link, efx->net_dev, "link down\n"); in efx_link_status_changed()
983 void efx_link_set_advertising(struct efx_nic *efx, in efx_link_set_advertising() argument
986 memcpy(efx->link_advertising, advertising, in efx_link_set_advertising()
989 efx->link_advertising[0] |= ADVERTISED_Autoneg; in efx_link_set_advertising()
991 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); in efx_link_set_advertising()
993 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); in efx_link_set_advertising()
995 efx->wanted_fc ^= EFX_FC_TX; in efx_link_set_advertising()
1001 void efx_link_clear_advertising(struct efx_nic *efx) in efx_link_clear_advertising() argument
1003 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); in efx_link_clear_advertising()
1004 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); in efx_link_clear_advertising()
1007 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) in efx_link_set_wanted_fc() argument
1009 efx->wanted_fc = wanted_fc; in efx_link_set_wanted_fc()
1010 if (efx->link_advertising[0]) { in efx_link_set_wanted_fc()
1012 efx->link_advertising[0] |= (ADVERTISED_Pause | in efx_link_set_wanted_fc()
1015 efx->link_advertising[0] &= ~(ADVERTISED_Pause | in efx_link_set_wanted_fc()
1018 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; in efx_link_set_wanted_fc()
1022 static void efx_fini_port(struct efx_nic *efx);
1027 void efx_mac_reconfigure(struct efx_nic *efx) in efx_mac_reconfigure() argument
1029 down_read(&efx->filter_sem); in efx_mac_reconfigure()
1030 efx->type->reconfigure_mac(efx); in efx_mac_reconfigure()
1031 up_read(&efx->filter_sem); in efx_mac_reconfigure()
1041 int __efx_reconfigure_port(struct efx_nic *efx) in __efx_reconfigure_port() argument
1046 WARN_ON(!mutex_is_locked(&efx->mac_lock)); in __efx_reconfigure_port()
1049 phy_mode = efx->phy_mode; in __efx_reconfigure_port()
1050 if (LOOPBACK_INTERNAL(efx)) in __efx_reconfigure_port()
1051 efx->phy_mode |= PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
1053 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
1055 rc = efx->type->reconfigure_port(efx); in __efx_reconfigure_port()
1058 efx->phy_mode = phy_mode; in __efx_reconfigure_port()
1065 int efx_reconfigure_port(struct efx_nic *efx) in efx_reconfigure_port() argument
1069 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reconfigure_port()
1071 mutex_lock(&efx->mac_lock); in efx_reconfigure_port()
1072 rc = __efx_reconfigure_port(efx); in efx_reconfigure_port()
1073 mutex_unlock(&efx->mac_lock); in efx_reconfigure_port()
1083 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); in efx_mac_work() local
1085 mutex_lock(&efx->mac_lock); in efx_mac_work()
1086 if (efx->port_enabled) in efx_mac_work()
1087 efx_mac_reconfigure(efx); in efx_mac_work()
1088 mutex_unlock(&efx->mac_lock); in efx_mac_work()
1091 static int efx_probe_port(struct efx_nic *efx) in efx_probe_port() argument
1095 netif_dbg(efx, probe, efx->net_dev, "create port\n"); in efx_probe_port()
1098 efx->phy_mode = PHY_MODE_SPECIAL; in efx_probe_port()
1101 rc = efx->type->probe_port(efx); in efx_probe_port()
1106 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); in efx_probe_port()
1111 static int efx_init_port(struct efx_nic *efx) in efx_init_port() argument
1115 netif_dbg(efx, drv, efx->net_dev, "init port\n"); in efx_init_port()
1117 mutex_lock(&efx->mac_lock); in efx_init_port()
1119 rc = efx->phy_op->init(efx); in efx_init_port()
1123 efx->port_initialized = true; in efx_init_port()
1127 efx_mac_reconfigure(efx); in efx_init_port()
1130 rc = efx->phy_op->reconfigure(efx); in efx_init_port()
1134 mutex_unlock(&efx->mac_lock); in efx_init_port()
1138 efx->phy_op->fini(efx); in efx_init_port()
1140 mutex_unlock(&efx->mac_lock); in efx_init_port()
1144 static void efx_start_port(struct efx_nic *efx) in efx_start_port() argument
1146 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); in efx_start_port()
1147 BUG_ON(efx->port_enabled); in efx_start_port()
1149 mutex_lock(&efx->mac_lock); in efx_start_port()
1150 efx->port_enabled = true; in efx_start_port()
1153 efx_mac_reconfigure(efx); in efx_start_port()
1155 mutex_unlock(&efx->mac_lock); in efx_start_port()
1163 static void efx_stop_port(struct efx_nic *efx) in efx_stop_port() argument
1165 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); in efx_stop_port()
1167 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_port()
1169 mutex_lock(&efx->mac_lock); in efx_stop_port()
1170 efx->port_enabled = false; in efx_stop_port()
1171 mutex_unlock(&efx->mac_lock); in efx_stop_port()
1174 netif_addr_lock_bh(efx->net_dev); in efx_stop_port()
1175 netif_addr_unlock_bh(efx->net_dev); in efx_stop_port()
1177 cancel_delayed_work_sync(&efx->monitor_work); in efx_stop_port()
1178 efx_selftest_async_cancel(efx); in efx_stop_port()
1179 cancel_work_sync(&efx->mac_work); in efx_stop_port()
1182 static void efx_fini_port(struct efx_nic *efx) in efx_fini_port() argument
1184 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); in efx_fini_port()
1186 if (!efx->port_initialized) in efx_fini_port()
1189 efx->phy_op->fini(efx); in efx_fini_port()
1190 efx->port_initialized = false; in efx_fini_port()
1192 efx->link_state.up = false; in efx_fini_port()
1193 efx_link_status_changed(efx); in efx_fini_port()
1196 static void efx_remove_port(struct efx_nic *efx) in efx_remove_port() argument
1198 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); in efx_remove_port()
1200 efx->type->remove_port(efx); in efx_remove_port()
1219 static void efx_associate(struct efx_nic *efx) in efx_associate() argument
1223 if (efx->primary == efx) { in efx_associate()
1226 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); in efx_associate()
1227 list_add_tail(&efx->node, &efx_primary_list); in efx_associate()
1231 if (efx_same_controller(efx, other)) { in efx_associate()
1235 pci_name(efx->pci_dev), in efx_associate()
1236 efx->net_dev->name); in efx_associate()
1238 &efx->secondary_list); in efx_associate()
1239 other->primary = efx; in efx_associate()
1246 if (efx_same_controller(efx, other)) { in efx_associate()
1247 netif_dbg(efx, probe, efx->net_dev, in efx_associate()
1251 list_add_tail(&efx->node, in efx_associate()
1253 efx->primary = other; in efx_associate()
1258 netif_dbg(efx, probe, efx->net_dev, in efx_associate()
1260 list_add_tail(&efx->node, &efx_unassociated_list); in efx_associate()
1264 static void efx_dissociate(struct efx_nic *efx) in efx_dissociate() argument
1268 list_del(&efx->node); in efx_dissociate()
1269 efx->primary = NULL; in efx_dissociate()
1271 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { in efx_dissociate()
1281 static int efx_init_io(struct efx_nic *efx) in efx_init_io() argument
1283 struct pci_dev *pci_dev = efx->pci_dev; in efx_init_io()
1284 dma_addr_t dma_mask = efx->type->max_dma_mask; in efx_init_io()
1285 unsigned int mem_map_size = efx->type->mem_map_size(efx); in efx_init_io()
1288 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); in efx_init_io()
1290 bar = efx->type->mem_bar(efx); in efx_init_io()
1294 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1312 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1316 netif_dbg(efx, probe, efx->net_dev, in efx_init_io()
1319 efx->membase_phys = pci_resource_start(efx->pci_dev, bar); in efx_init_io()
1322 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1327 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); in efx_init_io()
1328 if (!efx->membase) { in efx_init_io()
1329 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1331 (unsigned long long)efx->membase_phys, mem_map_size); in efx_init_io()
1335 netif_dbg(efx, probe, efx->net_dev, in efx_init_io()
1337 (unsigned long long)efx->membase_phys, mem_map_size, in efx_init_io()
1338 efx->membase); in efx_init_io()
1343 pci_release_region(efx->pci_dev, bar); in efx_init_io()
1345 efx->membase_phys = 0; in efx_init_io()
1347 pci_disable_device(efx->pci_dev); in efx_init_io()
1352 static void efx_fini_io(struct efx_nic *efx) in efx_fini_io() argument
1356 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); in efx_fini_io()
1358 if (efx->membase) { in efx_fini_io()
1359 iounmap(efx->membase); in efx_fini_io()
1360 efx->membase = NULL; in efx_fini_io()
1363 if (efx->membase_phys) { in efx_fini_io()
1364 bar = efx->type->mem_bar(efx); in efx_fini_io()
1365 pci_release_region(efx->pci_dev, bar); in efx_fini_io()
1366 efx->membase_phys = 0; in efx_fini_io()
1370 if (!pci_vfs_assigned(efx->pci_dev)) in efx_fini_io()
1371 pci_disable_device(efx->pci_dev); in efx_fini_io()
1374 void efx_set_default_rx_indir_table(struct efx_nic *efx, in efx_set_default_rx_indir_table() argument
1381 ethtool_rxfh_indir_default(i, efx->rss_spread); in efx_set_default_rx_indir_table()
1384 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) in efx_wanted_parallelism() argument
1394 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
1412 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, in efx_wanted_parallelism()
1422 if (efx->type->sriov_wanted) { in efx_wanted_parallelism()
1423 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && in efx_wanted_parallelism()
1424 count > efx_vf_size(efx)) { in efx_wanted_parallelism()
1425 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
1429 count, efx_vf_size(efx)); in efx_wanted_parallelism()
1430 count = efx_vf_size(efx); in efx_wanted_parallelism()
1441 static int efx_probe_interrupts(struct efx_nic *efx) in efx_probe_interrupts() argument
1448 if (efx->extra_channel_type[i]) in efx_probe_interrupts()
1451 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { in efx_probe_interrupts()
1455 n_channels = efx_wanted_parallelism(efx); in efx_probe_interrupts()
1459 n_channels = min(n_channels, efx->max_channels); in efx_probe_interrupts()
1463 rc = pci_enable_msix_range(efx->pci_dev, in efx_probe_interrupts()
1467 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1469 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) in efx_probe_interrupts()
1470 efx->interrupt_mode = EFX_INT_MODE_MSI; in efx_probe_interrupts()
1474 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1477 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1483 efx->n_channels = n_channels; in efx_probe_interrupts()
1487 efx->n_tx_channels = min(max(n_channels / 2, in efx_probe_interrupts()
1489 efx->max_tx_channels); in efx_probe_interrupts()
1490 efx->n_rx_channels = max(n_channels - in efx_probe_interrupts()
1491 efx->n_tx_channels, in efx_probe_interrupts()
1494 efx->n_tx_channels = min(n_channels, in efx_probe_interrupts()
1495 efx->max_tx_channels); in efx_probe_interrupts()
1496 efx->n_rx_channels = n_channels; in efx_probe_interrupts()
1498 for (i = 0; i < efx->n_channels; i++) in efx_probe_interrupts()
1499 efx_get_channel(efx, i)->irq = in efx_probe_interrupts()
1505 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { in efx_probe_interrupts()
1506 efx->n_channels = 1; in efx_probe_interrupts()
1507 efx->n_rx_channels = 1; in efx_probe_interrupts()
1508 efx->n_tx_channels = 1; in efx_probe_interrupts()
1509 rc = pci_enable_msi(efx->pci_dev); in efx_probe_interrupts()
1511 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; in efx_probe_interrupts()
1513 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1515 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) in efx_probe_interrupts()
1516 efx->interrupt_mode = EFX_INT_MODE_LEGACY; in efx_probe_interrupts()
1523 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { in efx_probe_interrupts()
1524 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); in efx_probe_interrupts()
1525 efx->n_rx_channels = 1; in efx_probe_interrupts()
1526 efx->n_tx_channels = 1; in efx_probe_interrupts()
1527 efx->legacy_irq = efx->pci_dev->irq; in efx_probe_interrupts()
1531 efx->n_extra_tx_channels = 0; in efx_probe_interrupts()
1532 j = efx->n_channels; in efx_probe_interrupts()
1534 if (!efx->extra_channel_type[i]) in efx_probe_interrupts()
1536 if (efx->interrupt_mode != EFX_INT_MODE_MSIX || in efx_probe_interrupts()
1537 efx->n_channels <= extra_channels) { in efx_probe_interrupts()
1538 efx->extra_channel_type[i]->handle_no_channel(efx); in efx_probe_interrupts()
1541 efx_get_channel(efx, j)->type = in efx_probe_interrupts()
1542 efx->extra_channel_type[i]; in efx_probe_interrupts()
1543 if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) in efx_probe_interrupts()
1544 efx->n_extra_tx_channels++; in efx_probe_interrupts()
1550 if (efx->type->sriov_wanted) { in efx_probe_interrupts()
1551 efx->rss_spread = ((efx->n_rx_channels > 1 || in efx_probe_interrupts()
1552 !efx->type->sriov_wanted(efx)) ? in efx_probe_interrupts()
1553 efx->n_rx_channels : efx_vf_size(efx)); in efx_probe_interrupts()
1557 efx->rss_spread = efx->n_rx_channels; in efx_probe_interrupts()
1563 static void efx_set_interrupt_affinity(struct efx_nic *efx) in efx_set_interrupt_affinity() argument
1568 efx_for_each_channel(channel, efx) { in efx_set_interrupt_affinity()
1570 pcibus_to_node(efx->pci_dev->bus)); in efx_set_interrupt_affinity()
1575 static void efx_clear_interrupt_affinity(struct efx_nic *efx) in efx_clear_interrupt_affinity() argument
1579 efx_for_each_channel(channel, efx) in efx_clear_interrupt_affinity()
1584 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) in efx_set_interrupt_affinity() argument
1589 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) in efx_clear_interrupt_affinity() argument
1594 static int efx_soft_enable_interrupts(struct efx_nic *efx) in efx_soft_enable_interrupts() argument
1599 BUG_ON(efx->state == STATE_DISABLED); in efx_soft_enable_interrupts()
1601 efx->irq_soft_enabled = true; in efx_soft_enable_interrupts()
1604 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1613 efx_mcdi_mode_event(efx); in efx_soft_enable_interrupts()
1618 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1629 static void efx_soft_disable_interrupts(struct efx_nic *efx) in efx_soft_disable_interrupts() argument
1633 if (efx->state == STATE_DISABLED) in efx_soft_disable_interrupts()
1636 efx_mcdi_mode_poll(efx); in efx_soft_disable_interrupts()
1638 efx->irq_soft_enabled = false; in efx_soft_disable_interrupts()
1641 if (efx->legacy_irq) in efx_soft_disable_interrupts()
1642 synchronize_irq(efx->legacy_irq); in efx_soft_disable_interrupts()
1644 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1654 efx_mcdi_flush_async(efx); in efx_soft_disable_interrupts()
1657 static int efx_enable_interrupts(struct efx_nic *efx) in efx_enable_interrupts() argument
1662 BUG_ON(efx->state == STATE_DISABLED); in efx_enable_interrupts()
1664 if (efx->eeh_disabled_legacy_irq) { in efx_enable_interrupts()
1665 enable_irq(efx->legacy_irq); in efx_enable_interrupts()
1666 efx->eeh_disabled_legacy_irq = false; in efx_enable_interrupts()
1669 efx->type->irq_enable_master(efx); in efx_enable_interrupts()
1671 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1679 rc = efx_soft_enable_interrupts(efx); in efx_enable_interrupts()
1687 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1694 efx->type->irq_disable_non_ev(efx); in efx_enable_interrupts()
1699 static void efx_disable_interrupts(struct efx_nic *efx) in efx_disable_interrupts() argument
1703 efx_soft_disable_interrupts(efx); in efx_disable_interrupts()
1705 efx_for_each_channel(channel, efx) { in efx_disable_interrupts()
1710 efx->type->irq_disable_non_ev(efx); in efx_disable_interrupts()
1713 static void efx_remove_interrupts(struct efx_nic *efx) in efx_remove_interrupts() argument
1718 efx_for_each_channel(channel, efx) in efx_remove_interrupts()
1720 pci_disable_msi(efx->pci_dev); in efx_remove_interrupts()
1721 pci_disable_msix(efx->pci_dev); in efx_remove_interrupts()
1724 efx->legacy_irq = 0; in efx_remove_interrupts()
1727 static void efx_set_channels(struct efx_nic *efx) in efx_set_channels() argument
1732 efx->tx_channel_offset = in efx_set_channels()
1734 efx->n_channels - efx->n_tx_channels : 0; in efx_set_channels()
1740 efx_for_each_channel(channel, efx) { in efx_set_channels()
1741 if (channel->channel < efx->n_rx_channels) in efx_set_channels()
1747 tx_queue->queue -= (efx->tx_channel_offset * in efx_set_channels()
1752 static int efx_probe_nic(struct efx_nic *efx) in efx_probe_nic() argument
1756 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); in efx_probe_nic()
1759 rc = efx->type->probe(efx); in efx_probe_nic()
1764 if (!efx->max_channels || !efx->max_tx_channels) { in efx_probe_nic()
1765 netif_err(efx, drv, efx->net_dev, in efx_probe_nic()
1775 rc = efx_probe_interrupts(efx); in efx_probe_nic()
1779 efx_set_channels(efx); in efx_probe_nic()
1782 rc = efx->type->dimension_resources(efx); in efx_probe_nic()
1788 efx_remove_interrupts(efx); in efx_probe_nic()
1792 if (efx->n_channels > 1) in efx_probe_nic()
1793 netdev_rss_key_fill(efx->rss_context.rx_hash_key, in efx_probe_nic()
1794 sizeof(efx->rss_context.rx_hash_key)); in efx_probe_nic()
1795 efx_set_default_rx_indir_table(efx, &efx->rss_context); in efx_probe_nic()
1797 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); in efx_probe_nic()
1798 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); in efx_probe_nic()
1801 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); in efx_probe_nic()
1802 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, in efx_probe_nic()
1808 efx_remove_interrupts(efx); in efx_probe_nic()
1810 efx->type->remove(efx); in efx_probe_nic()
1814 static void efx_remove_nic(struct efx_nic *efx) in efx_remove_nic() argument
1816 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); in efx_remove_nic()
1818 efx_remove_interrupts(efx); in efx_remove_nic()
1819 efx->type->remove(efx); in efx_remove_nic()
1822 static int efx_probe_filters(struct efx_nic *efx) in efx_probe_filters() argument
1826 init_rwsem(&efx->filter_sem); in efx_probe_filters()
1827 mutex_lock(&efx->mac_lock); in efx_probe_filters()
1828 down_write(&efx->filter_sem); in efx_probe_filters()
1829 rc = efx->type->filter_table_probe(efx); in efx_probe_filters()
1834 if (efx->type->offload_features & NETIF_F_NTUPLE) { in efx_probe_filters()
1838 efx_for_each_channel(channel, efx) { in efx_probe_filters()
1840 kcalloc(efx->type->max_rx_ip_filters, in efx_probe_filters()
1847 i < efx->type->max_rx_ip_filters; in efx_probe_filters()
1854 efx_for_each_channel(channel, efx) in efx_probe_filters()
1856 efx->type->filter_table_remove(efx); in efx_probe_filters()
1861 efx->rps_expire_index = efx->rps_expire_channel = 0; in efx_probe_filters()
1865 up_write(&efx->filter_sem); in efx_probe_filters()
1866 mutex_unlock(&efx->mac_lock); in efx_probe_filters()
1870 static void efx_remove_filters(struct efx_nic *efx) in efx_remove_filters() argument
1875 efx_for_each_channel(channel, efx) in efx_remove_filters()
1878 down_write(&efx->filter_sem); in efx_remove_filters()
1879 efx->type->filter_table_remove(efx); in efx_remove_filters()
1880 up_write(&efx->filter_sem); in efx_remove_filters()
1890 static int efx_probe_all(struct efx_nic *efx) in efx_probe_all() argument
1894 rc = efx_probe_nic(efx); in efx_probe_all()
1896 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); in efx_probe_all()
1900 rc = efx_probe_port(efx); in efx_probe_all()
1902 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); in efx_probe_all()
1907 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { in efx_probe_all()
1911 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_probe_all()
1914 rc = efx->type->vswitching_probe(efx); in efx_probe_all()
1916 netif_warn(efx, probe, efx->net_dev, in efx_probe_all()
1921 rc = efx_probe_filters(efx); in efx_probe_all()
1923 netif_err(efx, probe, efx->net_dev, in efx_probe_all()
1928 rc = efx_probe_channels(efx); in efx_probe_all()
1935 efx_remove_filters(efx); in efx_probe_all()
1938 efx->type->vswitching_remove(efx); in efx_probe_all()
1941 efx_remove_port(efx); in efx_probe_all()
1943 efx_remove_nic(efx); in efx_probe_all()
1955 static void efx_start_all(struct efx_nic *efx) in efx_start_all() argument
1957 EFX_ASSERT_RESET_SERIALISED(efx); in efx_start_all()
1958 BUG_ON(efx->state == STATE_DISABLED); in efx_start_all()
1962 if (efx->port_enabled || !netif_running(efx->net_dev) || in efx_start_all()
1963 efx->reset_pending) in efx_start_all()
1966 efx_start_port(efx); in efx_start_all()
1967 efx_start_datapath(efx); in efx_start_all()
1970 if (efx->type->monitor != NULL) in efx_start_all()
1971 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_start_all()
1977 mutex_lock(&efx->mac_lock); in efx_start_all()
1978 if (efx->phy_op->poll(efx)) in efx_start_all()
1979 efx_link_status_changed(efx); in efx_start_all()
1980 mutex_unlock(&efx->mac_lock); in efx_start_all()
1982 efx->type->start_stats(efx); in efx_start_all()
1983 efx->type->pull_stats(efx); in efx_start_all()
1984 spin_lock_bh(&efx->stats_lock); in efx_start_all()
1985 efx->type->update_stats(efx, NULL, NULL); in efx_start_all()
1986 spin_unlock_bh(&efx->stats_lock); in efx_start_all()
1994 static void efx_stop_all(struct efx_nic *efx) in efx_stop_all() argument
1996 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_all()
1999 if (!efx->port_enabled) in efx_stop_all()
2005 efx->type->pull_stats(efx); in efx_stop_all()
2006 spin_lock_bh(&efx->stats_lock); in efx_stop_all()
2007 efx->type->update_stats(efx, NULL, NULL); in efx_stop_all()
2008 spin_unlock_bh(&efx->stats_lock); in efx_stop_all()
2009 efx->type->stop_stats(efx); in efx_stop_all()
2010 efx_stop_port(efx); in efx_stop_all()
2016 WARN_ON(netif_running(efx->net_dev) && in efx_stop_all()
2017 netif_device_present(efx->net_dev)); in efx_stop_all()
2018 netif_tx_disable(efx->net_dev); in efx_stop_all()
2020 efx_stop_datapath(efx); in efx_stop_all()
2023 static void efx_remove_all(struct efx_nic *efx) in efx_remove_all() argument
2025 efx_remove_channels(efx); in efx_remove_all()
2026 efx_remove_filters(efx); in efx_remove_all()
2028 efx->type->vswitching_remove(efx); in efx_remove_all()
2030 efx_remove_port(efx); in efx_remove_all()
2031 efx_remove_nic(efx); in efx_remove_all()
2039 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) in efx_usecs_to_ticks() argument
2043 if (usecs * 1000 < efx->timer_quantum_ns) in efx_usecs_to_ticks()
2045 return usecs * 1000 / efx->timer_quantum_ns; in efx_usecs_to_ticks()
2048 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) in efx_ticks_to_usecs() argument
2053 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); in efx_ticks_to_usecs()
2057 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, in efx_init_irq_moderation() argument
2064 EFX_ASSERT_RESET_SERIALISED(efx); in efx_init_irq_moderation()
2066 timer_max_us = efx->timer_max_ns / 1000; in efx_init_irq_moderation()
2071 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && in efx_init_irq_moderation()
2073 netif_err(efx, drv, efx->net_dev, "Channels are shared. " in efx_init_irq_moderation()
2078 efx->irq_rx_adaptive = rx_adaptive; in efx_init_irq_moderation()
2079 efx->irq_rx_moderation_us = rx_usecs; in efx_init_irq_moderation()
2080 efx_for_each_channel(channel, efx) { in efx_init_irq_moderation()
2090 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, in efx_get_irq_moderation() argument
2093 *rx_adaptive = efx->irq_rx_adaptive; in efx_get_irq_moderation()
2094 *rx_usecs = efx->irq_rx_moderation_us; in efx_get_irq_moderation()
2100 if (efx->tx_channel_offset == 0) { in efx_get_irq_moderation()
2105 tx_channel = efx->channel[efx->tx_channel_offset]; in efx_get_irq_moderation()
2119 struct efx_nic *efx = container_of(data, struct efx_nic, in efx_monitor() local
2122 netif_vdbg(efx, timer, efx->net_dev, in efx_monitor()
2125 BUG_ON(efx->type->monitor == NULL); in efx_monitor()
2130 if (mutex_trylock(&efx->mac_lock)) { in efx_monitor()
2131 if (efx->port_enabled) in efx_monitor()
2132 efx->type->monitor(efx); in efx_monitor()
2133 mutex_unlock(&efx->mac_lock); in efx_monitor()
2136 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_monitor()
2151 struct efx_nic *efx = netdev_priv(net_dev); in efx_ioctl() local
2155 return efx_ptp_set_ts_config(efx, ifr); in efx_ioctl()
2157 return efx_ptp_get_ts_config(efx, ifr); in efx_ioctl()
2164 return mdio_mii_ioctl(&efx->mdio, data, cmd); in efx_ioctl()
2175 struct efx_nic *efx = channel->efx; in efx_init_napi_channel() local
2177 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
2182 static void efx_init_napi(struct efx_nic *efx) in efx_init_napi() argument
2186 efx_for_each_channel(channel, efx) in efx_init_napi()
2198 static void efx_fini_napi(struct efx_nic *efx) in efx_fini_napi() argument
2202 efx_for_each_channel(channel, efx) in efx_fini_napi()
2215 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_open() local
2218 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", in efx_net_open()
2221 rc = efx_check_disabled(efx); in efx_net_open()
2224 if (efx->phy_mode & PHY_MODE_SPECIAL) in efx_net_open()
2226 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) in efx_net_open()
2231 efx_link_status_changed(efx); in efx_net_open()
2233 efx_start_all(efx); in efx_net_open()
2234 if (efx->state == STATE_DISABLED || efx->reset_pending) in efx_net_open()
2235 netif_device_detach(efx->net_dev); in efx_net_open()
2236 efx_selftest_async_start(efx); in efx_net_open()
2246 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_stop() local
2248 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", in efx_net_stop()
2252 efx_stop_all(efx); in efx_net_stop()
2261 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_stats() local
2263 spin_lock_bh(&efx->stats_lock); in efx_net_stats()
2264 efx->type->update_stats(efx, NULL, stats); in efx_net_stats()
2265 spin_unlock_bh(&efx->stats_lock); in efx_net_stats()
2271 struct efx_nic *efx = netdev_priv(net_dev); in efx_watchdog() local
2273 netif_err(efx, tx_err, efx->net_dev, in efx_watchdog()
2275 efx->port_enabled); in efx_watchdog()
2277 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); in efx_watchdog()
2284 struct efx_nic *efx = netdev_priv(net_dev); in efx_change_mtu() local
2287 rc = efx_check_disabled(efx); in efx_change_mtu()
2291 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); in efx_change_mtu()
2293 efx_device_detach_sync(efx); in efx_change_mtu()
2294 efx_stop_all(efx); in efx_change_mtu()
2296 mutex_lock(&efx->mac_lock); in efx_change_mtu()
2298 efx_mac_reconfigure(efx); in efx_change_mtu()
2299 mutex_unlock(&efx->mac_lock); in efx_change_mtu()
2301 efx_start_all(efx); in efx_change_mtu()
2302 efx_device_attach_if_not_resetting(efx); in efx_change_mtu()
2308 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_mac_address() local
2315 netif_err(efx, drv, efx->net_dev, in efx_set_mac_address()
2324 if (efx->type->set_mac_address) { in efx_set_mac_address()
2325 rc = efx->type->set_mac_address(efx); in efx_set_mac_address()
2333 mutex_lock(&efx->mac_lock); in efx_set_mac_address()
2334 efx_mac_reconfigure(efx); in efx_set_mac_address()
2335 mutex_unlock(&efx->mac_lock); in efx_set_mac_address()
2343 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_rx_mode() local
2345 if (efx->port_enabled) in efx_set_rx_mode()
2346 queue_work(efx->workqueue, &efx->mac_work); in efx_set_rx_mode()
2352 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_features() local
2357 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); in efx_set_features()
2379 struct efx_nic *efx = netdev_priv(net_dev); in efx_get_phys_port_id() local
2381 if (efx->type->get_phys_port_id) in efx_get_phys_port_id()
2382 return efx->type->get_phys_port_id(efx, ppid); in efx_get_phys_port_id()
2390 struct efx_nic *efx = netdev_priv(net_dev); in efx_get_phys_port_name() local
2392 if (snprintf(name, len, "p%u", efx->port_num) >= len) in efx_get_phys_port_name()
2399 struct efx_nic *efx = netdev_priv(net_dev); in efx_vlan_rx_add_vid() local
2401 if (efx->type->vlan_rx_add_vid) in efx_vlan_rx_add_vid()
2402 return efx->type->vlan_rx_add_vid(efx, proto, vid); in efx_vlan_rx_add_vid()
2409 struct efx_nic *efx = netdev_priv(net_dev); in efx_vlan_rx_kill_vid() local
2411 if (efx->type->vlan_rx_kill_vid) in efx_vlan_rx_kill_vid()
2412 return efx->type->vlan_rx_kill_vid(efx, proto, vid); in efx_vlan_rx_kill_vid()
2431 struct efx_nic *efx = netdev_priv(dev); in efx_udp_tunnel_add() local
2442 if (efx->type->udp_tnl_add_port) in efx_udp_tunnel_add()
2443 (void)efx->type->udp_tnl_add_port(efx, tnl); in efx_udp_tunnel_add()
2448 struct efx_nic *efx = netdev_priv(dev); in efx_udp_tunnel_del() local
2459 if (efx->type->udp_tnl_del_port) in efx_udp_tunnel_del()
2460 (void)efx->type->udp_tnl_del_port(efx, tnl); in efx_udp_tunnel_del()
2494 static void efx_update_name(struct efx_nic *efx) in efx_update_name() argument
2496 strcpy(efx->name, efx->net_dev->name); in efx_update_name()
2497 efx_mtd_rename(efx); in efx_update_name()
2498 efx_set_channel_names(efx); in efx_update_name()
2520 struct efx_nic *efx = dev_get_drvdata(dev); in show_phy_type() local
2521 return sprintf(buf, "%d\n", efx->phy_type); in show_phy_type()
2529 struct efx_nic *efx = dev_get_drvdata(dev); in show_mcdi_log() local
2530 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); in show_mcdi_log()
2537 struct efx_nic *efx = dev_get_drvdata(dev); in set_mcdi_log() local
2538 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); in set_mcdi_log()
2547 static int efx_register_netdev(struct efx_nic *efx) in efx_register_netdev() argument
2549 struct net_device *net_dev = efx->net_dev; in efx_register_netdev()
2554 net_dev->irq = efx->pci_dev->irq; in efx_register_netdev()
2556 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) in efx_register_netdev()
2569 efx->state = STATE_READY; in efx_register_netdev()
2571 if (efx->reset_pending) { in efx_register_netdev()
2572 netif_err(efx, probe, efx->net_dev, in efx_register_netdev()
2581 efx_update_name(efx); in efx_register_netdev()
2590 efx_for_each_channel(channel, efx) { in efx_register_netdev()
2596 efx_associate(efx); in efx_register_netdev()
2600 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); in efx_register_netdev()
2602 netif_err(efx, drv, efx->net_dev, in efx_register_netdev()
2607 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_register_netdev()
2609 netif_err(efx, drv, efx->net_dev, in efx_register_netdev()
2619 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); in efx_register_netdev()
2623 efx_dissociate(efx); in efx_register_netdev()
2626 efx->state = STATE_UNINIT; in efx_register_netdev()
2628 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); in efx_register_netdev()
2632 static void efx_unregister_netdev(struct efx_nic *efx) in efx_unregister_netdev() argument
2634 if (!efx->net_dev) in efx_unregister_netdev()
2637 BUG_ON(netdev_priv(efx->net_dev) != efx); in efx_unregister_netdev()
2639 if (efx_dev_registered(efx)) { in efx_unregister_netdev()
2640 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); in efx_unregister_netdev()
2642 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); in efx_unregister_netdev()
2644 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); in efx_unregister_netdev()
2645 unregister_netdev(efx->net_dev); in efx_unregister_netdev()
2657 void efx_reset_down(struct efx_nic *efx, enum reset_type method) in efx_reset_down() argument
2659 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reset_down()
2662 efx->type->prepare_flr(efx); in efx_reset_down()
2664 efx_stop_all(efx); in efx_reset_down()
2665 efx_disable_interrupts(efx); in efx_reset_down()
2667 mutex_lock(&efx->mac_lock); in efx_reset_down()
2668 down_write(&efx->filter_sem); in efx_reset_down()
2669 mutex_lock(&efx->rss_lock); in efx_reset_down()
2670 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && in efx_reset_down()
2672 efx->phy_op->fini(efx); in efx_reset_down()
2673 efx->type->fini(efx); in efx_reset_down()
2681 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) in efx_reset_up() argument
2685 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reset_up()
2688 efx->type->finish_flr(efx); in efx_reset_up()
2691 rc = efx->type->init(efx); in efx_reset_up()
2693 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); in efx_reset_up()
2700 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && in efx_reset_up()
2702 rc = efx->phy_op->init(efx); in efx_reset_up()
2705 rc = efx->phy_op->reconfigure(efx); in efx_reset_up()
2707 netif_err(efx, drv, efx->net_dev, in efx_reset_up()
2711 rc = efx_enable_interrupts(efx); in efx_reset_up()
2716 rc = efx->type->vswitching_restore(efx); in efx_reset_up()
2718 netif_warn(efx, probe, efx->net_dev, in efx_reset_up()
2723 if (efx->type->rx_restore_rss_contexts) in efx_reset_up()
2724 efx->type->rx_restore_rss_contexts(efx); in efx_reset_up()
2725 mutex_unlock(&efx->rss_lock); in efx_reset_up()
2726 efx->type->filter_table_restore(efx); in efx_reset_up()
2727 up_write(&efx->filter_sem); in efx_reset_up()
2728 if (efx->type->sriov_reset) in efx_reset_up()
2729 efx->type->sriov_reset(efx); in efx_reset_up()
2731 mutex_unlock(&efx->mac_lock); in efx_reset_up()
2733 efx_start_all(efx); in efx_reset_up()
2735 if (efx->type->udp_tnl_push_ports) in efx_reset_up()
2736 efx->type->udp_tnl_push_ports(efx); in efx_reset_up()
2741 efx->port_initialized = false; in efx_reset_up()
2743 mutex_unlock(&efx->rss_lock); in efx_reset_up()
2744 up_write(&efx->filter_sem); in efx_reset_up()
2745 mutex_unlock(&efx->mac_lock); in efx_reset_up()
2755 int efx_reset(struct efx_nic *efx, enum reset_type method) in efx_reset() argument
2760 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", in efx_reset()
2763 efx_device_detach_sync(efx); in efx_reset()
2764 efx_reset_down(efx, method); in efx_reset()
2766 rc = efx->type->reset(efx, method); in efx_reset()
2768 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); in efx_reset()
2776 efx->reset_pending &= -(1 << (method + 1)); in efx_reset()
2778 __clear_bit(method, &efx->reset_pending); in efx_reset()
2784 pci_set_master(efx->pci_dev); in efx_reset()
2791 rc2 = efx_reset_up(efx, method, !disabled); in efx_reset()
2799 dev_close(efx->net_dev); in efx_reset()
2800 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); in efx_reset()
2801 efx->state = STATE_DISABLED; in efx_reset()
2803 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); in efx_reset()
2804 efx_device_attach_if_not_resetting(efx); in efx_reset()
2814 int efx_try_recovery(struct efx_nic *efx) in efx_try_recovery() argument
2822 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); in efx_try_recovery()
2833 static void efx_wait_for_bist_end(struct efx_nic *efx) in efx_wait_for_bist_end() argument
2838 if (efx_mcdi_poll_reboot(efx)) in efx_wait_for_bist_end()
2843 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); in efx_wait_for_bist_end()
2848 efx->mc_bist_for_other_fn = false; in efx_wait_for_bist_end()
2856 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); in efx_reset_work() local
2860 pending = READ_ONCE(efx->reset_pending); in efx_reset_work()
2864 efx_wait_for_bist_end(efx); in efx_reset_work()
2868 efx_try_recovery(efx)) in efx_reset_work()
2880 if (efx->state == STATE_READY) in efx_reset_work()
2881 (void)efx_reset(efx, method); in efx_reset_work()
2886 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) in efx_schedule_reset() argument
2890 if (efx->state == STATE_RECOVERY) { in efx_schedule_reset()
2891 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
2908 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", in efx_schedule_reset()
2912 method = efx->type->map_reset_reason(type); in efx_schedule_reset()
2913 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
2919 set_bit(method, &efx->reset_pending); in efx_schedule_reset()
2925 if (READ_ONCE(efx->state) != STATE_READY) in efx_schedule_reset()
2930 efx_mcdi_mode_poll(efx); in efx_schedule_reset()
2932 queue_work(reset_workqueue, &efx->reset_work); in efx_schedule_reset()
2975 int efx_port_dummy_op_int(struct efx_nic *efx) in efx_port_dummy_op_int() argument
2979 void efx_port_dummy_op_void(struct efx_nic *efx) {} in efx_port_dummy_op_void() argument
2981 static bool efx_port_dummy_op_poll(struct efx_nic *efx) in efx_port_dummy_op_poll() argument
3002 static int efx_init_struct(struct efx_nic *efx, in efx_init_struct() argument
3008 INIT_LIST_HEAD(&efx->node); in efx_init_struct()
3009 INIT_LIST_HEAD(&efx->secondary_list); in efx_init_struct()
3010 spin_lock_init(&efx->biu_lock); in efx_init_struct()
3012 INIT_LIST_HEAD(&efx->mtd_list); in efx_init_struct()
3014 INIT_WORK(&efx->reset_work, efx_reset_work); in efx_init_struct()
3015 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); in efx_init_struct()
3016 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); in efx_init_struct()
3017 efx->pci_dev = pci_dev; in efx_init_struct()
3018 efx->msg_enable = debug; in efx_init_struct()
3019 efx->state = STATE_UNINIT; in efx_init_struct()
3020 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); in efx_init_struct()
3022 efx->net_dev = net_dev; in efx_init_struct()
3023 efx->rx_prefix_size = efx->type->rx_prefix_size; in efx_init_struct()
3024 efx->rx_ip_align = in efx_init_struct()
3025 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; in efx_init_struct()
3026 efx->rx_packet_hash_offset = in efx_init_struct()
3027 efx->type->rx_hash_offset - efx->type->rx_prefix_size; in efx_init_struct()
3028 efx->rx_packet_ts_offset = in efx_init_struct()
3029 efx->type->rx_ts_offset - efx->type->rx_prefix_size; in efx_init_struct()
3030 INIT_LIST_HEAD(&efx->rss_context.list); in efx_init_struct()
3031 mutex_init(&efx->rss_lock); in efx_init_struct()
3032 spin_lock_init(&efx->stats_lock); in efx_init_struct()
3033 efx->vi_stride = EFX_DEFAULT_VI_STRIDE; in efx_init_struct()
3034 efx->num_mac_stats = MC_CMD_MAC_NSTATS; in efx_init_struct()
3036 mutex_init(&efx->mac_lock); in efx_init_struct()
3038 mutex_init(&efx->rps_mutex); in efx_init_struct()
3039 spin_lock_init(&efx->rps_hash_lock); in efx_init_struct()
3041 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, in efx_init_struct()
3042 sizeof(*efx->rps_hash_table), GFP_KERNEL); in efx_init_struct()
3044 efx->phy_op = &efx_dummy_phy_operations; in efx_init_struct()
3045 efx->mdio.dev = net_dev; in efx_init_struct()
3046 INIT_WORK(&efx->mac_work, efx_mac_work); in efx_init_struct()
3047 init_waitqueue_head(&efx->flush_wq); in efx_init_struct()
3050 efx->channel[i] = efx_alloc_channel(efx, i, NULL); in efx_init_struct()
3051 if (!efx->channel[i]) in efx_init_struct()
3053 efx->msi_context[i].efx = efx; in efx_init_struct()
3054 efx->msi_context[i].index = i; in efx_init_struct()
3058 if (WARN_ON_ONCE(efx->type->max_interrupt_mode > in efx_init_struct()
3059 efx->type->min_interrupt_mode)) { in efx_init_struct()
3063 efx->interrupt_mode = max(efx->type->max_interrupt_mode, in efx_init_struct()
3065 efx->interrupt_mode = min(efx->type->min_interrupt_mode, in efx_init_struct()
3069 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", in efx_init_struct()
3071 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); in efx_init_struct()
3072 if (!efx->workqueue) in efx_init_struct()
3078 efx_fini_struct(efx); in efx_init_struct()
3082 static void efx_fini_struct(struct efx_nic *efx) in efx_fini_struct() argument
3087 kfree(efx->rps_hash_table); in efx_fini_struct()
3091 kfree(efx->channel[i]); in efx_fini_struct()
3093 kfree(efx->vpd_sn); in efx_fini_struct()
3095 if (efx->workqueue) { in efx_fini_struct()
3096 destroy_workqueue(efx->workqueue); in efx_fini_struct()
3097 efx->workqueue = NULL; in efx_fini_struct()
3101 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) in efx_update_sw_stats() argument
3106 efx_for_each_channel(channel, efx) in efx_update_sw_stats()
3109 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); in efx_update_sw_stats()
3162 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, in efx_rps_hash_bucket() argument
3167 lockdep_assert_held(&efx->rps_hash_lock); in efx_rps_hash_bucket()
3168 if (!efx->rps_hash_table) in efx_rps_hash_bucket()
3170 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; in efx_rps_hash_bucket()
3173 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, in efx_rps_hash_find() argument
3180 head = efx_rps_hash_bucket(efx, spec); in efx_rps_hash_find()
3191 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, in efx_rps_hash_add() argument
3199 head = efx_rps_hash_bucket(efx, spec); in efx_rps_hash_add()
3218 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) in efx_rps_hash_del() argument
3224 head = efx_rps_hash_bucket(efx, spec); in efx_rps_hash_del()
3251 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) in efx_alloc_rss_context_entry() argument
3253 struct list_head *head = &efx->rss_context.list; in efx_alloc_rss_context_entry()
3257 WARN_ON(!mutex_is_locked(&efx->rss_lock)); in efx_alloc_rss_context_entry()
3284 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) in efx_find_rss_context_entry() argument
3286 struct list_head *head = &efx->rss_context.list; in efx_find_rss_context_entry()
3289 WARN_ON(!mutex_is_locked(&efx->rss_lock)); in efx_find_rss_context_entry()
3312 static void efx_pci_remove_main(struct efx_nic *efx) in efx_pci_remove_main() argument
3317 BUG_ON(efx->state == STATE_READY); in efx_pci_remove_main()
3318 cancel_work_sync(&efx->reset_work); in efx_pci_remove_main()
3320 efx_disable_interrupts(efx); in efx_pci_remove_main()
3321 efx_clear_interrupt_affinity(efx); in efx_pci_remove_main()
3322 efx_nic_fini_interrupt(efx); in efx_pci_remove_main()
3323 efx_fini_port(efx); in efx_pci_remove_main()
3324 efx->type->fini(efx); in efx_pci_remove_main()
3325 efx_fini_napi(efx); in efx_pci_remove_main()
3326 efx_remove_all(efx); in efx_pci_remove_main()
3335 struct efx_nic *efx; in efx_pci_remove() local
3337 efx = pci_get_drvdata(pci_dev); in efx_pci_remove()
3338 if (!efx) in efx_pci_remove()
3343 efx_dissociate(efx); in efx_pci_remove()
3344 dev_close(efx->net_dev); in efx_pci_remove()
3345 efx_disable_interrupts(efx); in efx_pci_remove()
3346 efx->state = STATE_UNINIT; in efx_pci_remove()
3349 if (efx->type->sriov_fini) in efx_pci_remove()
3350 efx->type->sriov_fini(efx); in efx_pci_remove()
3352 efx_unregister_netdev(efx); in efx_pci_remove()
3354 efx_mtd_remove(efx); in efx_pci_remove()
3356 efx_pci_remove_main(efx); in efx_pci_remove()
3358 efx_fini_io(efx); in efx_pci_remove()
3359 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); in efx_pci_remove()
3361 efx_fini_struct(efx); in efx_pci_remove()
3362 free_netdev(efx->net_dev); in efx_pci_remove()
3373 static void efx_probe_vpd_strings(struct efx_nic *efx) in efx_probe_vpd_strings() argument
3375 struct pci_dev *dev = efx->pci_dev; in efx_probe_vpd_strings()
3383 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); in efx_probe_vpd_strings()
3390 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); in efx_probe_vpd_strings()
3403 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); in efx_probe_vpd_strings()
3410 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); in efx_probe_vpd_strings()
3414 netif_info(efx, drv, efx->net_dev, in efx_probe_vpd_strings()
3421 netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); in efx_probe_vpd_strings()
3428 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); in efx_probe_vpd_strings()
3432 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); in efx_probe_vpd_strings()
3433 if (!efx->vpd_sn) in efx_probe_vpd_strings()
3436 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); in efx_probe_vpd_strings()
3443 static int efx_pci_probe_main(struct efx_nic *efx) in efx_pci_probe_main() argument
3448 rc = efx_probe_all(efx); in efx_pci_probe_main()
3452 efx_init_napi(efx); in efx_pci_probe_main()
3454 down_write(&efx->filter_sem); in efx_pci_probe_main()
3455 rc = efx->type->init(efx); in efx_pci_probe_main()
3456 up_write(&efx->filter_sem); in efx_pci_probe_main()
3458 netif_err(efx, probe, efx->net_dev, in efx_pci_probe_main()
3463 rc = efx_init_port(efx); in efx_pci_probe_main()
3465 netif_err(efx, probe, efx->net_dev, in efx_pci_probe_main()
3470 rc = efx_nic_init_interrupt(efx); in efx_pci_probe_main()
3474 efx_set_interrupt_affinity(efx); in efx_pci_probe_main()
3475 rc = efx_enable_interrupts(efx); in efx_pci_probe_main()
3482 efx_clear_interrupt_affinity(efx); in efx_pci_probe_main()
3483 efx_nic_fini_interrupt(efx); in efx_pci_probe_main()
3485 efx_fini_port(efx); in efx_pci_probe_main()
3487 efx->type->fini(efx); in efx_pci_probe_main()
3489 efx_fini_napi(efx); in efx_pci_probe_main()
3490 efx_remove_all(efx); in efx_pci_probe_main()
3495 static int efx_pci_probe_post_io(struct efx_nic *efx) in efx_pci_probe_post_io() argument
3497 struct net_device *net_dev = efx->net_dev; in efx_pci_probe_post_io()
3498 int rc = efx_pci_probe_main(efx); in efx_pci_probe_post_io()
3503 if (efx->type->sriov_init) { in efx_pci_probe_post_io()
3504 rc = efx->type->sriov_init(efx); in efx_pci_probe_post_io()
3506 netif_err(efx, probe, efx->net_dev, in efx_pci_probe_post_io()
3511 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | in efx_pci_probe_post_io()
3513 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) in efx_pci_probe_post_io()
3516 if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) in efx_pci_probe_post_io()
3523 net_dev->hw_features |= net_dev->features & ~efx->fixed_features; in efx_pci_probe_post_io()
3533 net_dev->features |= efx->fixed_features; in efx_pci_probe_post_io()
3535 rc = efx_register_netdev(efx); in efx_pci_probe_post_io()
3539 efx_pci_remove_main(efx); in efx_pci_probe_post_io()
3556 struct efx_nic *efx; in efx_pci_probe() local
3560 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, in efx_pci_probe()
3564 efx = netdev_priv(net_dev); in efx_pci_probe()
3565 efx->type = (const struct efx_nic_type *) entry->driver_data; in efx_pci_probe()
3566 efx->fixed_features |= NETIF_F_HIGHDMA; in efx_pci_probe()
3568 pci_set_drvdata(pci_dev, efx); in efx_pci_probe()
3570 rc = efx_init_struct(efx, pci_dev, net_dev); in efx_pci_probe()
3574 netif_info(efx, probe, efx->net_dev, in efx_pci_probe()
3577 if (!efx->type->is_vf) in efx_pci_probe()
3578 efx_probe_vpd_strings(efx); in efx_pci_probe()
3581 rc = efx_init_io(efx); in efx_pci_probe()
3585 rc = efx_pci_probe_post_io(efx); in efx_pci_probe()
3590 efx->reset_pending = 0; in efx_pci_probe()
3591 rc = efx_pci_probe_post_io(efx); in efx_pci_probe()
3600 efx->reset_pending = 0; in efx_pci_probe()
3601 rc = efx_pci_probe_post_io(efx); in efx_pci_probe()
3607 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); in efx_pci_probe()
3611 rc = efx_mtd_probe(efx); in efx_pci_probe()
3614 netif_warn(efx, probe, efx->net_dev, in efx_pci_probe()
3619 if (efx->type->udp_tnl_push_ports) in efx_pci_probe()
3620 efx->type->udp_tnl_push_ports(efx); in efx_pci_probe()
3625 efx_fini_io(efx); in efx_pci_probe()
3627 efx_fini_struct(efx); in efx_pci_probe()
3630 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); in efx_pci_probe()
3642 struct efx_nic *efx = pci_get_drvdata(dev); in efx_pci_sriov_configure() local
3644 if (efx->type->sriov_configure) { in efx_pci_sriov_configure()
3645 rc = efx->type->sriov_configure(efx, num_vfs); in efx_pci_sriov_configure()
3657 struct efx_nic *efx = dev_get_drvdata(dev); in efx_pm_freeze() local
3661 if (efx->state != STATE_DISABLED) { in efx_pm_freeze()
3662 efx->state = STATE_UNINIT; in efx_pm_freeze()
3664 efx_device_detach_sync(efx); in efx_pm_freeze()
3666 efx_stop_all(efx); in efx_pm_freeze()
3667 efx_disable_interrupts(efx); in efx_pm_freeze()
3678 struct efx_nic *efx = dev_get_drvdata(dev); in efx_pm_thaw() local
3682 if (efx->state != STATE_DISABLED) { in efx_pm_thaw()
3683 rc = efx_enable_interrupts(efx); in efx_pm_thaw()
3687 mutex_lock(&efx->mac_lock); in efx_pm_thaw()
3688 efx->phy_op->reconfigure(efx); in efx_pm_thaw()
3689 mutex_unlock(&efx->mac_lock); in efx_pm_thaw()
3691 efx_start_all(efx); in efx_pm_thaw()
3693 efx_device_attach_if_not_resetting(efx); in efx_pm_thaw()
3695 efx->state = STATE_READY; in efx_pm_thaw()
3697 efx->type->resume_wol(efx); in efx_pm_thaw()
3703 queue_work(reset_workqueue, &efx->reset_work); in efx_pm_thaw()
3716 struct efx_nic *efx = pci_get_drvdata(pci_dev); in efx_pm_poweroff() local
3718 efx->type->fini(efx); in efx_pm_poweroff()
3720 efx->reset_pending = 0; in efx_pm_poweroff()
3730 struct efx_nic *efx = pci_get_drvdata(pci_dev); in efx_pm_resume() local
3740 pci_set_master(efx->pci_dev); in efx_pm_resume()
3741 rc = efx->type->reset(efx, RESET_TYPE_ALL); in efx_pm_resume()
3744 down_write(&efx->filter_sem); in efx_pm_resume()
3745 rc = efx->type->init(efx); in efx_pm_resume()
3746 up_write(&efx->filter_sem); in efx_pm_resume()
3781 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_error_detected() local
3788 if (efx->state != STATE_DISABLED) { in efx_io_error_detected()
3789 efx->state = STATE_RECOVERY; in efx_io_error_detected()
3790 efx->reset_pending = 0; in efx_io_error_detected()
3792 efx_device_detach_sync(efx); in efx_io_error_detected()
3794 efx_stop_all(efx); in efx_io_error_detected()
3795 efx_disable_interrupts(efx); in efx_io_error_detected()
3815 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_slot_reset() local
3819 netif_err(efx, hw, efx->net_dev, in efx_io_slot_reset()
3830 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_resume() local
3835 if (efx->state == STATE_DISABLED) in efx_io_resume()
3838 rc = efx_reset(efx, RESET_TYPE_ALL); in efx_io_resume()
3840 netif_err(efx, hw, efx->net_dev, in efx_io_resume()
3843 efx->state = STATE_READY; in efx_io_resume()
3844 netif_dbg(efx, hw, efx->net_dev, in efx_io_resume()