Lines Matching full:channel
48 /* This is the weight assigned to each of the (per-channel) virtual
57 int efx_channel_dummy_op_int(struct efx_channel *channel) in efx_channel_dummy_op_int() argument
62 void efx_channel_dummy_op_void(struct efx_channel *channel) in efx_channel_dummy_op_void() argument
166 * We need a channel per event queue, plus a VI per tx queue. in efx_allocate_msix_channels()
283 /* Fall back to single channel MSI */ in efx_probe_interrupts()
371 struct efx_channel *channel; in efx_set_interrupt_affinity() local
374 efx_for_each_channel(channel, efx) { in efx_set_interrupt_affinity()
375 cpu = cpumask_local_spread(channel->channel, in efx_set_interrupt_affinity()
377 irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); in efx_set_interrupt_affinity()
383 struct efx_channel *channel; in efx_clear_interrupt_affinity() local
385 efx_for_each_channel(channel, efx) in efx_clear_interrupt_affinity()
386 irq_set_affinity_hint(channel->irq, NULL); in efx_clear_interrupt_affinity()
402 struct efx_channel *channel; in efx_remove_interrupts() local
405 efx_for_each_channel(channel, efx) in efx_remove_interrupts()
406 channel->irq = 0; in efx_remove_interrupts()
419 * Event queue memory allocations are done only once. If the channel
421 * errors during channel reset and also simplifies interrupt handling.
423 int efx_probe_eventq(struct efx_channel *channel) in efx_probe_eventq() argument
425 struct efx_nic *efx = channel->efx; in efx_probe_eventq()
429 "chan %d create event queue\n", channel->channel); in efx_probe_eventq()
436 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; in efx_probe_eventq()
438 return efx_nic_probe_eventq(channel); in efx_probe_eventq()
441 /* Prepare channel's event queue */
442 int efx_init_eventq(struct efx_channel *channel) in efx_init_eventq() argument
444 struct efx_nic *efx = channel->efx; in efx_init_eventq()
447 EFX_WARN_ON_PARANOID(channel->eventq_init); in efx_init_eventq()
450 "chan %d init event queue\n", channel->channel); in efx_init_eventq()
452 rc = efx_nic_init_eventq(channel); in efx_init_eventq()
454 efx->type->push_irq_moderation(channel); in efx_init_eventq()
455 channel->eventq_read_ptr = 0; in efx_init_eventq()
456 channel->eventq_init = true; in efx_init_eventq()
462 void efx_start_eventq(struct efx_channel *channel) in efx_start_eventq() argument
464 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_start_eventq()
465 "chan %d start event queue\n", channel->channel); in efx_start_eventq()
468 channel->enabled = true; in efx_start_eventq()
471 napi_enable(&channel->napi_str); in efx_start_eventq()
472 efx_nic_eventq_read_ack(channel); in efx_start_eventq()
476 void efx_stop_eventq(struct efx_channel *channel) in efx_stop_eventq() argument
478 if (!channel->enabled) in efx_stop_eventq()
481 napi_disable(&channel->napi_str); in efx_stop_eventq()
482 channel->enabled = false; in efx_stop_eventq()
485 void efx_fini_eventq(struct efx_channel *channel) in efx_fini_eventq() argument
487 if (!channel->eventq_init) in efx_fini_eventq()
490 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
491 "chan %d fini event queue\n", channel->channel); in efx_fini_eventq()
493 efx_nic_fini_eventq(channel); in efx_fini_eventq()
494 channel->eventq_init = false; in efx_fini_eventq()
497 void efx_remove_eventq(struct efx_channel *channel) in efx_remove_eventq() argument
499 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
500 "chan %d remove event queue\n", channel->channel); in efx_remove_eventq()
502 efx_nic_remove_eventq(channel); in efx_remove_eventq()
507 * Channel handling
515 struct efx_channel *channel; in efx_filter_rfs_expire() local
518 channel = container_of(dwork, struct efx_channel, filter_work); in efx_filter_rfs_expire()
519 time = jiffies - channel->rfs_last_expiry; in efx_filter_rfs_expire()
520 quota = channel->rfs_filter_count * time / (30 * HZ); in efx_filter_rfs_expire()
521 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) in efx_filter_rfs_expire()
522 channel->rfs_last_expiry += time; in efx_filter_rfs_expire()
528 /* Allocate and initialise a channel structure. */
533 struct efx_channel *channel; in efx_alloc_channel() local
536 channel = kzalloc(sizeof(*channel), GFP_KERNEL); in efx_alloc_channel()
537 if (!channel) in efx_alloc_channel()
540 channel->efx = efx; in efx_alloc_channel()
541 channel->channel = i; in efx_alloc_channel()
542 channel->type = &efx_default_channel_type; in efx_alloc_channel()
545 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel()
549 tx_queue->channel = channel; in efx_alloc_channel()
553 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); in efx_alloc_channel()
556 rx_queue = &channel->rx_queue; in efx_alloc_channel()
560 return channel; in efx_alloc_channel()
568 efx->channel[i] = efx_alloc_channel(efx, i); in efx_init_channels()
569 if (!efx->channel[i]) in efx_init_channels()
590 if (efx->channel[i]) { in efx_fini_channels()
591 kfree(efx->channel[i]); in efx_fini_channels()
592 efx->channel[i] = NULL; in efx_fini_channels()
596 /* Allocate and initialise a channel structure, copying parameters
597 * (but not resources) from an old channel structure.
603 struct efx_channel *channel; in efx_copy_channel() local
606 channel = kmalloc(sizeof(*channel), GFP_KERNEL); in efx_copy_channel()
607 if (!channel) in efx_copy_channel()
610 *channel = *old_channel; in efx_copy_channel()
612 channel->napi_dev = NULL; in efx_copy_channel()
613 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); in efx_copy_channel()
614 channel->napi_str.napi_id = 0; in efx_copy_channel()
615 channel->napi_str.state = 0; in efx_copy_channel()
616 memset(&channel->eventq, 0, sizeof(channel->eventq)); in efx_copy_channel()
619 tx_queue = &channel->tx_queue[j]; in efx_copy_channel()
620 if (tx_queue->channel) in efx_copy_channel()
621 tx_queue->channel = channel; in efx_copy_channel()
627 rx_queue = &channel->rx_queue; in efx_copy_channel()
632 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); in efx_copy_channel()
635 return channel; in efx_copy_channel()
638 static int efx_probe_channel(struct efx_channel *channel) in efx_probe_channel() argument
644 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
645 "creating channel %d\n", channel->channel); in efx_probe_channel()
647 rc = channel->type->pre_probe(channel); in efx_probe_channel()
651 rc = efx_probe_eventq(channel); in efx_probe_channel()
655 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_probe_channel()
661 efx_for_each_channel_rx_queue(rx_queue, channel) { in efx_probe_channel()
667 channel->rx_list = NULL; in efx_probe_channel()
672 efx_remove_channel(channel); in efx_probe_channel()
676 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) in efx_get_channel_name() argument
678 struct efx_nic *efx = channel->efx; in efx_get_channel_name()
682 number = channel->channel; in efx_get_channel_name()
701 struct efx_channel *channel; in efx_set_channel_names() local
703 efx_for_each_channel(channel, efx) in efx_set_channel_names()
704 channel->type->get_name(channel, in efx_set_channel_names()
705 efx->msi_context[channel->channel].name, in efx_set_channel_names()
711 struct efx_channel *channel; in efx_probe_channels() local
722 efx_for_each_channel_rev(channel, efx) { in efx_probe_channels()
723 rc = efx_probe_channel(channel); in efx_probe_channels()
726 "failed to create channel %d\n", in efx_probe_channels()
727 channel->channel); in efx_probe_channels()
740 void efx_remove_channel(struct efx_channel *channel) in efx_remove_channel() argument
745 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_channel()
746 "destroy chan %d\n", channel->channel); in efx_remove_channel()
748 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_remove_channel()
750 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_remove_channel()
752 efx_remove_eventq(channel); in efx_remove_channel()
753 channel->type->post_remove(channel); in efx_remove_channel()
758 struct efx_channel *channel; in efx_remove_channels() local
760 efx_for_each_channel(channel, efx) in efx_remove_channels()
761 efx_remove_channel(channel); in efx_remove_channels()
768 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; in efx_realloc_channels() local
780 efx_for_each_channel(channel, efx) { in efx_realloc_channels()
784 if (channel->type->copy) in efx_realloc_channels()
787 channel->eventq.index + in efx_realloc_channels()
788 channel->eventq.entries); in efx_realloc_channels()
789 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_realloc_channels()
793 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_realloc_channels()
806 channel = efx->channel[i]; in efx_realloc_channels()
807 if (channel->type->copy) in efx_realloc_channels()
808 channel = channel->type->copy(channel); in efx_realloc_channels()
809 if (!channel) { in efx_realloc_channels()
813 other_channel[i] = channel; in efx_realloc_channels()
816 /* Swap entry counts and channel pointers */ in efx_realloc_channels()
822 channel = efx->channel[i]; in efx_realloc_channels()
823 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
824 other_channel[i] = channel; in efx_realloc_channels()
831 channel = efx->channel[i]; in efx_realloc_channels()
832 if (!channel->type->copy) in efx_realloc_channels()
834 rc = efx_probe_channel(channel); in efx_realloc_channels()
837 efx_init_napi_channel(efx->channel[i]); in efx_realloc_channels()
841 /* Destroy unused channel structures */ in efx_realloc_channels()
843 channel = other_channel[i]; in efx_realloc_channels()
844 if (channel && channel->type->copy) { in efx_realloc_channels()
845 efx_fini_napi_channel(channel); in efx_realloc_channels()
846 efx_remove_channel(channel); in efx_realloc_channels()
847 kfree(channel); in efx_realloc_channels()
855 "unable to restart interrupts on channel reallocation\n"); in efx_realloc_channels()
868 channel = efx->channel[i]; in efx_realloc_channels()
869 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
870 other_channel[i] = channel; in efx_realloc_channels()
882 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", in efx_set_xdp_tx_queue()
883 tx_queue->channel->channel, tx_queue->label, in efx_set_xdp_tx_queue()
892 struct efx_channel *channel; in efx_set_channels() local
917 efx_for_each_channel(channel, efx) { in efx_set_channels()
918 if (channel->channel < efx->n_rx_channels) in efx_set_channels()
919 channel->rx_queue.core_index = channel->channel; in efx_set_channels()
921 channel->rx_queue.core_index = -1; in efx_set_channels()
923 if (channel->channel >= efx->tx_channel_offset) { in efx_set_channels()
924 if (efx_channel_is_xdp_tx(channel)) { in efx_set_channels()
925 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_set_channels()
932 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_set_channels()
934 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n", in efx_set_channels()
935 channel->channel, tx_queue->label, in efx_set_channels()
940 * with no csum offload, which is the first one of the channel in efx_set_channels()
941 * (note: channel->tx_queue_by_type is not initialized yet) in efx_set_channels()
944 tx_queue = &channel->tx_queue[0]; in efx_set_channels()
974 bool efx_default_channel_want_txqs(struct efx_channel *channel) in efx_default_channel_want_txqs() argument
976 return channel->channel - channel->efx->tx_channel_offset < in efx_default_channel_want_txqs()
977 channel->efx->n_tx_channels; in efx_default_channel_want_txqs()
986 struct efx_channel *channel, *end_channel; in efx_soft_enable_interrupts() local
994 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
995 if (!channel->type->keep_eventq) { in efx_soft_enable_interrupts()
996 rc = efx_init_eventq(channel); in efx_soft_enable_interrupts()
1000 efx_start_eventq(channel); in efx_soft_enable_interrupts()
1007 end_channel = channel; in efx_soft_enable_interrupts()
1008 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1009 if (channel == end_channel) in efx_soft_enable_interrupts()
1011 efx_stop_eventq(channel); in efx_soft_enable_interrupts()
1012 if (!channel->type->keep_eventq) in efx_soft_enable_interrupts()
1013 efx_fini_eventq(channel); in efx_soft_enable_interrupts()
1021 struct efx_channel *channel; in efx_soft_disable_interrupts() local
1034 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1035 if (channel->irq) in efx_soft_disable_interrupts()
1036 synchronize_irq(channel->irq); in efx_soft_disable_interrupts()
1038 efx_stop_eventq(channel); in efx_soft_disable_interrupts()
1039 if (!channel->type->keep_eventq) in efx_soft_disable_interrupts()
1040 efx_fini_eventq(channel); in efx_soft_disable_interrupts()
1049 struct efx_channel *channel, *end_channel; in efx_enable_interrupts() local
1062 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1063 if (channel->type->keep_eventq) { in efx_enable_interrupts()
1064 rc = efx_init_eventq(channel); in efx_enable_interrupts()
1077 end_channel = channel; in efx_enable_interrupts()
1078 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1079 if (channel == end_channel) in efx_enable_interrupts()
1081 if (channel->type->keep_eventq) in efx_enable_interrupts()
1082 efx_fini_eventq(channel); in efx_enable_interrupts()
1092 struct efx_channel *channel; in efx_disable_interrupts() local
1096 efx_for_each_channel(channel, efx) { in efx_disable_interrupts()
1097 if (channel->type->keep_eventq) in efx_disable_interrupts()
1098 efx_fini_eventq(channel); in efx_disable_interrupts()
1108 struct efx_channel *channel; in efx_start_channels() local
1110 efx_for_each_channel(channel, efx) { in efx_start_channels()
1111 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_start_channels()
1116 efx_for_each_channel_rx_queue(rx_queue, channel) { in efx_start_channels()
1119 efx_stop_eventq(channel); in efx_start_channels()
1121 efx_start_eventq(channel); in efx_start_channels()
1124 WARN_ON(channel->rx_pkt_n_frags); in efx_start_channels()
1132 struct efx_channel *channel; in efx_stop_channels() local
1136 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1137 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_stop_channels()
1141 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1148 if (efx_channel_has_rx_queue(channel)) { in efx_stop_channels()
1149 efx_stop_eventq(channel); in efx_stop_channels()
1150 efx_start_eventq(channel); in efx_stop_channels()
1164 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1165 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_stop_channels()
1167 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_stop_channels()
1178 /* Process channel's event queue
1181 * single channel. The caller must guarantee that this function will
1182 * never be concurrently called more than once on the same channel,
1185 static int efx_process_channel(struct efx_channel *channel, int budget) in efx_process_channel() argument
1191 if (unlikely(!channel->enabled)) in efx_process_channel()
1195 EFX_WARN_ON_PARANOID(channel->rx_list != NULL); in efx_process_channel()
1197 channel->rx_list = &rx_list; in efx_process_channel()
1199 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
1204 spent = efx_nic_process_eventq(channel, budget); in efx_process_channel()
1205 if (spent && efx_channel_has_rx_queue(channel)) { in efx_process_channel()
1207 efx_channel_get_rx_queue(channel); in efx_process_channel()
1209 efx_rx_flush_packet(channel); in efx_process_channel()
1214 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
1223 netif_receive_skb_list(channel->rx_list); in efx_process_channel()
1224 channel->rx_list = NULL; in efx_process_channel()
1229 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) in efx_update_irq_mod() argument
1233 if (channel->irq_mod_score < irq_adapt_low_thresh) { in efx_update_irq_mod()
1234 if (channel->irq_moderation_us > step) { in efx_update_irq_mod()
1235 channel->irq_moderation_us -= step; in efx_update_irq_mod()
1236 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1238 } else if (channel->irq_mod_score > irq_adapt_high_thresh) { in efx_update_irq_mod()
1239 if (channel->irq_moderation_us < in efx_update_irq_mod()
1241 channel->irq_moderation_us += step; in efx_update_irq_mod()
1242 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1246 channel->irq_count = 0; in efx_update_irq_mod()
1247 channel->irq_mod_score = 0; in efx_update_irq_mod()
1257 struct efx_channel *channel = in efx_poll() local
1259 struct efx_nic *efx = channel->efx; in efx_poll()
1266 "channel %d NAPI poll executing on CPU %d\n", in efx_poll()
1267 channel->channel, raw_smp_processor_id()); in efx_poll()
1269 spent = efx_process_channel(channel, budget); in efx_poll()
1274 if (efx_channel_has_rx_queue(channel) && in efx_poll()
1276 unlikely(++channel->irq_count == 1000)) { in efx_poll()
1277 efx_update_irq_mod(efx, channel); in efx_poll()
1282 time = jiffies - channel->rfs_last_expiry; in efx_poll()
1284 if (channel->rfs_filter_count * time >= 600 * HZ) in efx_poll()
1285 mod_delayed_work(system_wq, &channel->filter_work, 0); in efx_poll()
1294 efx_nic_eventq_read_ack(channel); in efx_poll()
1300 void efx_init_napi_channel(struct efx_channel *channel) in efx_init_napi_channel() argument
1302 struct efx_nic *efx = channel->efx; in efx_init_napi_channel()
1304 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
1305 netif_napi_add(channel->napi_dev, &channel->napi_str, in efx_init_napi_channel()
1311 struct efx_channel *channel; in efx_init_napi() local
1313 efx_for_each_channel(channel, efx) in efx_init_napi()
1314 efx_init_napi_channel(channel); in efx_init_napi()
1317 void efx_fini_napi_channel(struct efx_channel *channel) in efx_fini_napi_channel() argument
1319 if (channel->napi_dev) in efx_fini_napi_channel()
1320 netif_napi_del(&channel->napi_str); in efx_fini_napi_channel()
1322 channel->napi_dev = NULL; in efx_fini_napi_channel()
1327 struct efx_channel *channel; in efx_fini_napi() local
1329 efx_for_each_channel(channel, efx) in efx_fini_napi()
1330 efx_fini_napi_channel(channel); in efx_fini_napi()