Lines Matching full:channel
156 * We need a channel per event queue, plus a VI per tx queue. in efx_allocate_msix_channels()
273 /* Fall back to single channel MSI */ in efx_probe_interrupts()
364 struct efx_channel *channel; in efx_set_interrupt_affinity() local
372 efx_for_each_channel(channel, efx) { in efx_set_interrupt_affinity()
376 irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); in efx_set_interrupt_affinity()
382 struct efx_channel *channel; in efx_clear_interrupt_affinity() local
384 efx_for_each_channel(channel, efx) in efx_clear_interrupt_affinity()
385 irq_set_affinity_hint(channel->irq, NULL); in efx_clear_interrupt_affinity()
401 struct efx_channel *channel; in efx_remove_interrupts() local
404 efx_for_each_channel(channel, efx) in efx_remove_interrupts()
405 channel->irq = 0; in efx_remove_interrupts()
418 * Event queue memory allocations are done only once. If the channel
420 * errors during channel reset and also simplifies interrupt handling.
422 int efx_probe_eventq(struct efx_channel *channel) in efx_probe_eventq() argument
424 struct efx_nic *efx = channel->efx; in efx_probe_eventq()
428 "chan %d create event queue\n", channel->channel); in efx_probe_eventq()
435 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; in efx_probe_eventq()
437 return efx_nic_probe_eventq(channel); in efx_probe_eventq()
440 /* Prepare channel's event queue */
441 int efx_init_eventq(struct efx_channel *channel) in efx_init_eventq() argument
443 struct efx_nic *efx = channel->efx; in efx_init_eventq()
446 EFX_WARN_ON_PARANOID(channel->eventq_init); in efx_init_eventq()
449 "chan %d init event queue\n", channel->channel); in efx_init_eventq()
451 rc = efx_nic_init_eventq(channel); in efx_init_eventq()
453 efx->type->push_irq_moderation(channel); in efx_init_eventq()
454 channel->eventq_read_ptr = 0; in efx_init_eventq()
455 channel->eventq_init = true; in efx_init_eventq()
461 void efx_start_eventq(struct efx_channel *channel) in efx_start_eventq() argument
463 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_start_eventq()
464 "chan %d start event queue\n", channel->channel); in efx_start_eventq()
467 channel->enabled = true; in efx_start_eventq()
470 napi_enable(&channel->napi_str); in efx_start_eventq()
471 efx_nic_eventq_read_ack(channel); in efx_start_eventq()
475 void efx_stop_eventq(struct efx_channel *channel) in efx_stop_eventq() argument
477 if (!channel->enabled) in efx_stop_eventq()
480 napi_disable(&channel->napi_str); in efx_stop_eventq()
481 channel->enabled = false; in efx_stop_eventq()
484 void efx_fini_eventq(struct efx_channel *channel) in efx_fini_eventq() argument
486 if (!channel->eventq_init) in efx_fini_eventq()
489 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
490 "chan %d fini event queue\n", channel->channel); in efx_fini_eventq()
492 efx_nic_fini_eventq(channel); in efx_fini_eventq()
493 channel->eventq_init = false; in efx_fini_eventq()
496 void efx_remove_eventq(struct efx_channel *channel) in efx_remove_eventq() argument
498 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
499 "chan %d remove event queue\n", channel->channel); in efx_remove_eventq()
501 efx_nic_remove_eventq(channel); in efx_remove_eventq()
506 * Channel handling
514 struct efx_channel *channel; in efx_filter_rfs_expire() local
517 channel = container_of(dwork, struct efx_channel, filter_work); in efx_filter_rfs_expire()
518 time = jiffies - channel->rfs_last_expiry; in efx_filter_rfs_expire()
519 quota = channel->rfs_filter_count * time / (30 * HZ); in efx_filter_rfs_expire()
520 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) in efx_filter_rfs_expire()
521 channel->rfs_last_expiry += time; in efx_filter_rfs_expire()
527 /* Allocate and initialise a channel structure. */
532 struct efx_channel *channel; in efx_alloc_channel() local
535 channel = kzalloc(sizeof(*channel), GFP_KERNEL); in efx_alloc_channel()
536 if (!channel) in efx_alloc_channel()
539 channel->efx = efx; in efx_alloc_channel()
540 channel->channel = i; in efx_alloc_channel()
541 channel->type = &efx_default_channel_type; in efx_alloc_channel()
544 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel()
548 tx_queue->channel = channel; in efx_alloc_channel()
552 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); in efx_alloc_channel()
555 rx_queue = &channel->rx_queue; in efx_alloc_channel()
559 return channel; in efx_alloc_channel()
567 efx->channel[i] = efx_alloc_channel(efx, i); in efx_init_channels()
568 if (!efx->channel[i]) in efx_init_channels()
589 if (efx->channel[i]) { in efx_fini_channels()
590 kfree(efx->channel[i]); in efx_fini_channels()
591 efx->channel[i] = NULL; in efx_fini_channels()
595 /* Allocate and initialise a channel structure, copying parameters
596 * (but not resources) from an old channel structure.
602 struct efx_channel *channel; in efx_copy_channel() local
605 channel = kmalloc(sizeof(*channel), GFP_KERNEL); in efx_copy_channel()
606 if (!channel) in efx_copy_channel()
609 *channel = *old_channel; in efx_copy_channel()
611 channel->napi_dev = NULL; in efx_copy_channel()
612 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); in efx_copy_channel()
613 channel->napi_str.napi_id = 0; in efx_copy_channel()
614 channel->napi_str.state = 0; in efx_copy_channel()
615 memset(&channel->eventq, 0, sizeof(channel->eventq)); in efx_copy_channel()
618 tx_queue = &channel->tx_queue[j]; in efx_copy_channel()
619 if (tx_queue->channel) in efx_copy_channel()
620 tx_queue->channel = channel; in efx_copy_channel()
626 rx_queue = &channel->rx_queue; in efx_copy_channel()
631 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); in efx_copy_channel()
634 return channel; in efx_copy_channel()
637 static int efx_probe_channel(struct efx_channel *channel) in efx_probe_channel() argument
643 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
644 "creating channel %d\n", channel->channel); in efx_probe_channel()
646 rc = channel->type->pre_probe(channel); in efx_probe_channel()
650 rc = efx_probe_eventq(channel); in efx_probe_channel()
654 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_probe_channel()
660 efx_for_each_channel_rx_queue(rx_queue, channel) { in efx_probe_channel()
666 channel->rx_list = NULL; in efx_probe_channel()
671 efx_remove_channel(channel); in efx_probe_channel()
675 static void efx_get_channel_name(struct efx_channel *channel, char *buf, in efx_get_channel_name() argument
678 struct efx_nic *efx = channel->efx; in efx_get_channel_name()
682 number = channel->channel; in efx_get_channel_name()
701 struct efx_channel *channel; in efx_set_channel_names() local
703 efx_for_each_channel(channel, efx) in efx_set_channel_names()
704 channel->type->get_name(channel, in efx_set_channel_names()
705 efx->msi_context[channel->channel].name, in efx_set_channel_names()
711 struct efx_channel *channel; in efx_probe_channels() local
722 efx_for_each_channel_rev(channel, efx) { in efx_probe_channels()
723 rc = efx_probe_channel(channel); in efx_probe_channels()
726 "failed to create channel %d\n", in efx_probe_channels()
727 channel->channel); in efx_probe_channels()
740 void efx_remove_channel(struct efx_channel *channel) in efx_remove_channel() argument
745 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_channel()
746 "destroy chan %d\n", channel->channel); in efx_remove_channel()
748 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_remove_channel()
750 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_remove_channel()
752 efx_remove_eventq(channel); in efx_remove_channel()
753 channel->type->post_remove(channel); in efx_remove_channel()
758 struct efx_channel *channel; in efx_remove_channels() local
760 efx_for_each_channel(channel, efx) in efx_remove_channels()
761 efx_remove_channel(channel); in efx_remove_channels()
773 "Channel %u TXQ %u is XDP %u, HW %u\n", in efx_set_xdp_tx_queue()
774 tx_queue->channel->channel, tx_queue->label, in efx_set_xdp_tx_queue()
783 struct efx_channel *channel; in efx_set_xdp_channels() local
792 efx_for_each_channel(channel, efx) { in efx_set_xdp_channels()
793 if (channel->channel < efx->tx_channel_offset) in efx_set_xdp_channels()
796 if (efx_channel_is_xdp_tx(channel)) { in efx_set_xdp_channels()
797 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_set_xdp_channels()
805 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_set_xdp_channels()
808 "Channel %u TXQ %u is HW %u\n", in efx_set_xdp_channels()
809 channel->channel, tx_queue->label, in efx_set_xdp_channels()
815 * first one of the channel in efx_set_xdp_channels()
820 tx_queue = &channel->tx_queue[0]; in efx_set_xdp_channels()
847 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel, in efx_realloc_channels() local
861 efx_for_each_channel(channel, efx) { in efx_realloc_channels()
865 if (channel->type->copy) in efx_realloc_channels()
868 channel->eventq.index + in efx_realloc_channels()
869 channel->eventq.entries); in efx_realloc_channels()
870 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_realloc_channels()
874 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_realloc_channels()
887 channel = efx->channel[i]; in efx_realloc_channels()
888 if (channel->type->copy) in efx_realloc_channels()
889 channel = channel->type->copy(channel); in efx_realloc_channels()
890 if (!channel) { in efx_realloc_channels()
894 other_channel[i] = channel; in efx_realloc_channels()
897 /* Swap entry counts and channel pointers */ in efx_realloc_channels()
903 swap(efx->channel[i], other_channel[i]); in efx_realloc_channels()
909 channel = efx->channel[i]; in efx_realloc_channels()
910 if (!channel->type->copy) in efx_realloc_channels()
912 rc = efx_probe_channel(channel); in efx_realloc_channels()
915 efx_init_napi_channel(efx->channel[i]); in efx_realloc_channels()
921 /* Destroy unused channel structures */ in efx_realloc_channels()
923 channel = other_channel[i]; in efx_realloc_channels()
924 if (channel && channel->type->copy) { in efx_realloc_channels()
925 efx_fini_napi_channel(channel); in efx_realloc_channels()
926 efx_remove_channel(channel); in efx_realloc_channels()
927 kfree(channel); in efx_realloc_channels()
936 "unable to restart interrupts on channel reallocation\n"); in efx_realloc_channels()
949 swap(efx->channel[i], other_channel[i]); in efx_realloc_channels()
956 struct efx_channel *channel; in efx_set_channels() local
970 efx_for_each_channel(channel, efx) { in efx_set_channels()
971 if (channel->channel < efx->n_rx_channels) in efx_set_channels()
972 channel->rx_queue.core_index = channel->channel; in efx_set_channels()
974 channel->rx_queue.core_index = -1; in efx_set_channels()
985 static bool efx_default_channel_want_txqs(struct efx_channel *channel) in efx_default_channel_want_txqs() argument
987 return channel->channel - channel->efx->tx_channel_offset < in efx_default_channel_want_txqs()
988 channel->efx->n_tx_channels; in efx_default_channel_want_txqs()
997 struct efx_channel *channel, *end_channel; in efx_soft_enable_interrupts() local
1005 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1006 if (!channel->type->keep_eventq) { in efx_soft_enable_interrupts()
1007 rc = efx_init_eventq(channel); in efx_soft_enable_interrupts()
1011 efx_start_eventq(channel); in efx_soft_enable_interrupts()
1018 end_channel = channel; in efx_soft_enable_interrupts()
1019 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1020 if (channel == end_channel) in efx_soft_enable_interrupts()
1022 efx_stop_eventq(channel); in efx_soft_enable_interrupts()
1023 if (!channel->type->keep_eventq) in efx_soft_enable_interrupts()
1024 efx_fini_eventq(channel); in efx_soft_enable_interrupts()
1032 struct efx_channel *channel; in efx_soft_disable_interrupts() local
1045 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1046 if (channel->irq) in efx_soft_disable_interrupts()
1047 synchronize_irq(channel->irq); in efx_soft_disable_interrupts()
1049 efx_stop_eventq(channel); in efx_soft_disable_interrupts()
1050 if (!channel->type->keep_eventq) in efx_soft_disable_interrupts()
1051 efx_fini_eventq(channel); in efx_soft_disable_interrupts()
1060 struct efx_channel *channel, *end_channel; in efx_enable_interrupts() local
1073 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1074 if (channel->type->keep_eventq) { in efx_enable_interrupts()
1075 rc = efx_init_eventq(channel); in efx_enable_interrupts()
1088 end_channel = channel; in efx_enable_interrupts()
1089 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1090 if (channel == end_channel) in efx_enable_interrupts()
1092 if (channel->type->keep_eventq) in efx_enable_interrupts()
1093 efx_fini_eventq(channel); in efx_enable_interrupts()
1103 struct efx_channel *channel; in efx_disable_interrupts() local
1107 efx_for_each_channel(channel, efx) { in efx_disable_interrupts()
1108 if (channel->type->keep_eventq) in efx_disable_interrupts()
1109 efx_fini_eventq(channel); in efx_disable_interrupts()
1119 struct efx_channel *channel; in efx_start_channels() local
1121 efx_for_each_channel_rev(channel, efx) { in efx_start_channels()
1122 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_start_channels()
1127 efx_for_each_channel_rx_queue(rx_queue, channel) { in efx_start_channels()
1130 efx_stop_eventq(channel); in efx_start_channels()
1132 efx_start_eventq(channel); in efx_start_channels()
1135 WARN_ON(channel->rx_pkt_n_frags); in efx_start_channels()
1143 struct efx_channel *channel; in efx_stop_channels() local
1147 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1148 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_stop_channels()
1152 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1159 if (efx_channel_has_rx_queue(channel)) { in efx_stop_channels()
1160 efx_stop_eventq(channel); in efx_stop_channels()
1161 efx_start_eventq(channel); in efx_stop_channels()
1175 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1176 efx_for_each_channel_rx_queue(rx_queue, channel) in efx_stop_channels()
1178 efx_for_each_channel_tx_queue(tx_queue, channel) in efx_stop_channels()
1189 /* Process channel's event queue
1192 * single channel. The caller must guarantee that this function will
1193 * never be concurrently called more than once on the same channel,
1196 static int efx_process_channel(struct efx_channel *channel, int budget) in efx_process_channel() argument
1202 if (unlikely(!channel->enabled)) in efx_process_channel()
1206 EFX_WARN_ON_PARANOID(channel->rx_list != NULL); in efx_process_channel()
1208 channel->rx_list = &rx_list; in efx_process_channel()
1210 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
1215 spent = efx_nic_process_eventq(channel, budget); in efx_process_channel()
1216 if (spent && efx_channel_has_rx_queue(channel)) { in efx_process_channel()
1218 efx_channel_get_rx_queue(channel); in efx_process_channel()
1220 efx_rx_flush_packet(channel); in efx_process_channel()
1225 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel()
1234 netif_receive_skb_list(channel->rx_list); in efx_process_channel()
1235 channel->rx_list = NULL; in efx_process_channel()
1240 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) in efx_update_irq_mod() argument
1244 if (channel->irq_mod_score < irq_adapt_low_thresh) { in efx_update_irq_mod()
1245 if (channel->irq_moderation_us > step) { in efx_update_irq_mod()
1246 channel->irq_moderation_us -= step; in efx_update_irq_mod()
1247 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1249 } else if (channel->irq_mod_score > irq_adapt_high_thresh) { in efx_update_irq_mod()
1250 if (channel->irq_moderation_us < in efx_update_irq_mod()
1252 channel->irq_moderation_us += step; in efx_update_irq_mod()
1253 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1257 channel->irq_count = 0; in efx_update_irq_mod()
1258 channel->irq_mod_score = 0; in efx_update_irq_mod()
1268 struct efx_channel *channel = in efx_poll() local
1270 struct efx_nic *efx = channel->efx; in efx_poll()
1277 "channel %d NAPI poll executing on CPU %d\n", in efx_poll()
1278 channel->channel, raw_smp_processor_id()); in efx_poll()
1280 spent = efx_process_channel(channel, budget); in efx_poll()
1285 if (efx_channel_has_rx_queue(channel) && in efx_poll()
1287 unlikely(++channel->irq_count == 1000)) { in efx_poll()
1288 efx_update_irq_mod(efx, channel); in efx_poll()
1293 time = jiffies - channel->rfs_last_expiry; in efx_poll()
1295 if (channel->rfs_filter_count * time >= 600 * HZ) in efx_poll()
1296 mod_delayed_work(system_wq, &channel->filter_work, 0); in efx_poll()
1305 efx_nic_eventq_read_ack(channel); in efx_poll()
1311 void efx_init_napi_channel(struct efx_channel *channel) in efx_init_napi_channel() argument
1313 struct efx_nic *efx = channel->efx; in efx_init_napi_channel()
1315 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
1316 netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll); in efx_init_napi_channel()
1321 struct efx_channel *channel; in efx_init_napi() local
1323 efx_for_each_channel(channel, efx) in efx_init_napi()
1324 efx_init_napi_channel(channel); in efx_init_napi()
1327 void efx_fini_napi_channel(struct efx_channel *channel) in efx_fini_napi_channel() argument
1329 if (channel->napi_dev) in efx_fini_napi_channel()
1330 netif_napi_del(&channel->napi_str); in efx_fini_napi_channel()
1332 channel->napi_dev = NULL; in efx_fini_napi_channel()
1337 struct efx_channel *channel; in efx_fini_napi() local
1339 efx_for_each_channel(channel, efx) in efx_fini_napi()
1340 efx_fini_napi_channel(channel); in efx_fini_napi()
1347 static int efx_channel_dummy_op_int(struct efx_channel *channel) in efx_channel_dummy_op_int() argument
1352 void efx_channel_dummy_op_void(struct efx_channel *channel) in efx_channel_dummy_op_void() argument