Lines Matching refs:enic

114 static void enic_init_affinity_hint(struct enic *enic)  in enic_init_affinity_hint()  argument
116 int numa_node = dev_to_node(&enic->pdev->dev); in enic_init_affinity_hint()
119 for (i = 0; i < enic->intr_count; i++) { in enic_init_affinity_hint()
120 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || in enic_init_affinity_hint()
121 (cpumask_available(enic->msix[i].affinity_mask) && in enic_init_affinity_hint()
122 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
124 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
127 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
131 static void enic_free_affinity_hint(struct enic *enic) in enic_free_affinity_hint() argument
135 for (i = 0; i < enic->intr_count; i++) { in enic_free_affinity_hint()
136 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) in enic_free_affinity_hint()
138 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
142 static void enic_set_affinity_hint(struct enic *enic) in enic_set_affinity_hint() argument
147 for (i = 0; i < enic->intr_count; i++) { in enic_set_affinity_hint()
148 if (enic_is_err_intr(enic, i) || in enic_set_affinity_hint()
149 enic_is_notify_intr(enic, i) || in enic_set_affinity_hint()
150 !cpumask_available(enic->msix[i].affinity_mask) || in enic_set_affinity_hint()
151 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
153 err = irq_set_affinity_hint(enic->msix_entry[i].vector, in enic_set_affinity_hint()
154 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
156 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", in enic_set_affinity_hint()
160 for (i = 0; i < enic->wq_count; i++) { in enic_set_affinity_hint()
161 int wq_intr = enic_msix_wq_intr(enic, i); in enic_set_affinity_hint()
163 if (cpumask_available(enic->msix[wq_intr].affinity_mask) && in enic_set_affinity_hint()
164 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
165 netif_set_xps_queue(enic->netdev, in enic_set_affinity_hint()
166 enic->msix[wq_intr].affinity_mask, in enic_set_affinity_hint()
171 static void enic_unset_affinity_hint(struct enic *enic) in enic_unset_affinity_hint() argument
175 for (i = 0; i < enic->intr_count; i++) in enic_unset_affinity_hint()
176 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); in enic_unset_affinity_hint()
183 struct enic *enic = netdev_priv(netdev); in enic_udp_tunnel_set_port() local
186 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
188 err = vnic_dev_overlay_offload_cfg(enic->vdev, in enic_udp_tunnel_set_port()
194 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_set_port()
195 enic->vxlan.patch_level); in enic_udp_tunnel_set_port()
199 enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); in enic_udp_tunnel_set_port()
201 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_set_port()
210 struct enic *enic = netdev_priv(netdev); in enic_udp_tunnel_unset_port() local
213 spin_lock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
215 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, in enic_udp_tunnel_unset_port()
220 enic->vxlan.vxlan_udp_port_number = 0; in enic_udp_tunnel_unset_port()
223 spin_unlock_bh(&enic->devcmd_lock); in enic_udp_tunnel_unset_port()
248 struct enic *enic = netdev_priv(dev); in enic_features_check() local
260 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) in enic_features_check()
273 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) in enic_features_check()
291 if (port != enic->vxlan.vxlan_udp_port_number) in enic_features_check()
300 int enic_is_dynamic(struct enic *enic) in enic_is_dynamic() argument
302 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
305 int enic_sriov_enabled(struct enic *enic) in enic_sriov_enabled() argument
307 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
310 static int enic_is_sriov_vf(struct enic *enic) in enic_is_sriov_vf() argument
312 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
315 int enic_is_valid_vf(struct enic *enic, int vf) in enic_is_valid_vf() argument
318 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
326 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() local
329 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
332 dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_wq_buf()
348 struct enic *enic = vnic_dev_priv(vdev); in enic_wq_service() local
350 spin_lock(&enic->wq_lock[q_number]); in enic_wq_service()
352 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service()
356 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && in enic_wq_service()
357 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service()
359 netif_wake_subqueue(enic->netdev, q_number); in enic_wq_service()
361 spin_unlock(&enic->wq_lock[q_number]); in enic_wq_service()
366 static bool enic_log_q_error(struct enic *enic) in enic_log_q_error() argument
372 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
373 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error()
376 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
380 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
381 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error()
384 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
391 static void enic_msglvl_check(struct enic *enic) in enic_msglvl_check() argument
393 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
395 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
396 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
397 enic->msg_enable, msg_enable); in enic_msglvl_check()
398 enic->msg_enable = msg_enable; in enic_msglvl_check()
402 static void enic_mtu_check(struct enic *enic) in enic_mtu_check() argument
404 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
405 struct net_device *netdev = enic->netdev; in enic_mtu_check()
407 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
408 enic->port_mtu = mtu; in enic_mtu_check()
409 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_mtu_check()
413 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
424 static void enic_link_check(struct enic *enic) in enic_link_check() argument
426 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
427 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
430 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
431 netif_carrier_on(enic->netdev); in enic_link_check()
433 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
434 netif_carrier_off(enic->netdev); in enic_link_check()
438 static void enic_notify_check(struct enic *enic) in enic_notify_check() argument
440 enic_msglvl_check(enic); in enic_notify_check()
441 enic_mtu_check(enic); in enic_notify_check()
442 enic_link_check(enic); in enic_notify_check()
450 struct enic *enic = netdev_priv(netdev); in enic_isr_legacy() local
456 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
458 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
460 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
465 enic_notify_check(enic); in enic_isr_legacy()
466 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
470 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
471 enic_log_q_error(enic); in enic_isr_legacy()
473 schedule_work(&enic->reset); in enic_isr_legacy()
478 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
480 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
487 struct enic *enic = data; in enic_isr_msi() local
505 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
521 struct enic *enic = data; in enic_isr_msix_err() local
522 unsigned int intr = enic_msix_err_intr(enic); in enic_isr_msix_err()
524 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
526 if (enic_log_q_error(enic)) in enic_isr_msix_err()
528 schedule_work(&enic->reset); in enic_isr_msix_err()
535 struct enic *enic = data; in enic_isr_msix_notify() local
536 unsigned int intr = enic_msix_notify_intr(enic); in enic_isr_msix_notify()
538 enic_notify_check(enic); in enic_isr_msix_notify()
539 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
544 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument
554 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
557 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_cont()
567 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument
577 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_vlan()
579 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_vlan()
591 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_vlan()
596 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_csum_l4() argument
608 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
610 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_csum_l4()
623 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_csum_l4()
668 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_tso() argument
696 dma_addr = dma_map_single(&enic->pdev->dev, in enic_queue_wq_skb_tso()
699 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
722 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
725 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
739 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_encap() argument
756 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, in enic_queue_wq_skb_encap()
758 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_encap()
766 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_encap()
771 static inline void enic_queue_wq_skb(struct enic *enic, in enic_queue_wq_skb() argument
784 } else if (enic->loop_enable) { in enic_queue_wq_skb()
785 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
790 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, in enic_queue_wq_skb()
794 err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
797 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
800 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
823 struct enic *enic = netdev_priv(netdev); in enic_hard_start_xmit() local
833 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
834 wq = &enic->wq[txq_map]; in enic_hard_start_xmit()
849 spin_lock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
856 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
860 enic_queue_wq_skb(enic, wq, skb); in enic_hard_start_xmit()
868 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
877 struct enic *enic = netdev_priv(netdev); in enic_get_stats() local
881 err = enic_dev_stats_dump(enic, &stats); in enic_get_stats()
898 net_stats->rx_over_errors = enic->rq_truncated_pkts; in enic_get_stats()
899 net_stats->rx_crc_errors = enic->rq_bad_fcs; in enic_get_stats()
905 struct enic *enic = netdev_priv(netdev); in enic_mc_sync() local
907 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
916 enic_dev_add_addr(enic, mc_addr); in enic_mc_sync()
917 enic->mc_count++; in enic_mc_sync()
924 struct enic *enic = netdev_priv(netdev); in enic_mc_unsync() local
926 enic_dev_del_addr(enic, mc_addr); in enic_mc_unsync()
927 enic->mc_count--; in enic_mc_unsync()
934 struct enic *enic = netdev_priv(netdev); in enic_uc_sync() local
936 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
945 enic_dev_add_addr(enic, uc_addr); in enic_uc_sync()
946 enic->uc_count++; in enic_uc_sync()
953 struct enic *enic = netdev_priv(netdev); in enic_uc_unsync() local
955 enic_dev_del_addr(enic, uc_addr); in enic_uc_unsync()
956 enic->uc_count--; in enic_uc_unsync()
961 void enic_reset_addr_lists(struct enic *enic) in enic_reset_addr_lists() argument
963 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
968 enic->mc_count = 0; in enic_reset_addr_lists()
969 enic->uc_count = 0; in enic_reset_addr_lists()
970 enic->flags = 0; in enic_reset_addr_lists()
975 struct enic *enic = netdev_priv(netdev); in enic_set_mac_addr() local
977 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_set_mac_addr()
992 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address_dynamic() local
997 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
998 err = enic_dev_del_station_addr(enic); in enic_set_mac_address_dynamic()
1007 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
1008 err = enic_dev_add_station_addr(enic); in enic_set_mac_address_dynamic()
1020 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address() local
1023 err = enic_dev_del_station_addr(enic); in enic_set_mac_address()
1031 return enic_dev_add_station_addr(enic); in enic_set_mac_address()
1037 struct enic *enic = netdev_priv(netdev); in enic_set_rx_mode() local
1049 if (enic->flags != flags) { in enic_set_rx_mode()
1050 enic->flags = flags; in enic_set_rx_mode()
1051 enic_dev_packet_filter(enic, directed, in enic_set_rx_mode()
1065 struct enic *enic = netdev_priv(netdev); in enic_tx_timeout() local
1066 schedule_work(&enic->tx_hang_reset); in enic_tx_timeout()
1071 struct enic *enic = netdev_priv(netdev); in enic_set_vf_mac() local
1075 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_mac()
1087 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_mac()
1098 struct enic *enic = netdev_priv(netdev); in enic_set_vf_port() local
1103 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_port()
1110 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
1111 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
1143 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_port()
1152 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); in enic_set_vf_port()
1187 struct enic *enic = netdev_priv(netdev); in enic_get_vf_port() local
1192 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_get_vf_port()
1199 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
1221 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf() local
1226 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_free_rq_buf()
1234 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf() local
1235 struct net_device *netdev = enic->netdev; in enic_rq_alloc_buf()
1252 dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, in enic_rq_alloc_buf()
1254 if (unlikely(enic_dma_map_check(enic, dma_addr))) { in enic_rq_alloc_buf()
1277 struct enic *enic = netdev_priv(netdev); in enic_rxcopybreak() local
1280 if (len > enic->rx_copybreak) in enic_rxcopybreak()
1285 dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, in enic_rxcopybreak()
1297 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf() local
1298 struct net_device *netdev = enic->netdev; in enic_rq_indicate_buf()
1300 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf()
1330 enic->rq_bad_fcs++; in enic_rq_indicate_buf()
1332 enic->rq_truncated_pkts++; in enic_rq_indicate_buf()
1335 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1350 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, in enic_rq_indicate_buf()
1373 if (enic->vxlan.vxlan_udp_port_number) { in enic_rq_indicate_buf()
1374 switch (enic->vxlan.patch_level) { in enic_rq_indicate_buf()
1411 skb_mark_napi_id(skb, &enic->napi[rq->index]); in enic_rq_indicate_buf()
1415 napi_gro_receive(&enic->napi[q_number], skb); in enic_rq_indicate_buf()
1416 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_rq_indicate_buf()
1424 dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1434 struct enic *enic = vnic_dev_priv(vdev); in enic_rq_service() local
1436 vnic_rq_service(&enic->rq[q_number], cq_desc, in enic_rq_service()
1443 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_set_int_moderation() argument
1445 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1446 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1450 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1455 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_calc_int_moderation() argument
1457 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1458 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1504 struct enic *enic = netdev_priv(netdev); in enic_poll() local
1505 unsigned int cq_rq = enic_cq_rq(enic, 0); in enic_poll()
1506 unsigned int cq_wq = enic_cq_wq(enic, 0); in enic_poll()
1513 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, in enic_poll()
1517 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], in enic_poll()
1528 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1533 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_poll()
1541 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1545 enic_calc_int_moderation(enic, &enic->rq[0]); in enic_poll()
1553 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1554 enic_set_int_moderation(enic, &enic->rq[0]); in enic_poll()
1555 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1562 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1564 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1565 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1568 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1572 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1573 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1574 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1576 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1577 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1578 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1580 enic_free_rx_cpu_rmap(enic); in enic_set_rx_cpu_rmap()
1589 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1593 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1602 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_wq() local
1603 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1604 struct vnic_wq *wq = &enic->wq[wq_index]; in enic_poll_msix_wq()
1612 cq = enic_cq_wq(enic, wq_irq); in enic_poll_msix_wq()
1613 intr = enic_msix_wq_intr(enic, wq_irq); in enic_poll_msix_wq()
1614 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, in enic_poll_msix_wq()
1617 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1622 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1632 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_rq() local
1633 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1634 unsigned int cq = enic_cq_rq(enic, rq); in enic_poll_msix_rq()
1635 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_poll_msix_rq()
1644 work_done = vnic_cq_service(&enic->cq[cq], in enic_poll_msix_rq()
1653 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1658 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_poll_msix_rq()
1666 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1670 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1678 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1679 enic_set_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1680 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1688 struct enic *enic = from_timer(enic, t, notify_timer); in enic_notify_timer() local
1690 enic_notify_check(enic); in enic_notify_timer()
1692 mod_timer(&enic->notify_timer, in enic_notify_timer()
1696 static void enic_free_intr(struct enic *enic) in enic_free_intr() argument
1698 struct net_device *netdev = enic->netdev; in enic_free_intr()
1701 enic_free_rx_cpu_rmap(enic); in enic_free_intr()
1702 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1704 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1707 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1710 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_free_intr()
1711 if (enic->msix[i].requested) in enic_free_intr()
1712 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1713 enic->msix[i].devid); in enic_free_intr()
1720 static int enic_request_intr(struct enic *enic) in enic_request_intr() argument
1722 struct net_device *netdev = enic->netdev; in enic_request_intr()
1726 enic_set_rx_cpu_rmap(enic); in enic_request_intr()
1727 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1731 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1737 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1738 0, netdev->name, enic); in enic_request_intr()
1743 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1744 intr = enic_msix_rq_intr(enic, i); in enic_request_intr()
1745 snprintf(enic->msix[intr].devname, in enic_request_intr()
1746 sizeof(enic->msix[intr].devname), in enic_request_intr()
1748 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1749 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1752 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1753 int wq = enic_cq_wq(enic, i); in enic_request_intr()
1755 intr = enic_msix_wq_intr(enic, i); in enic_request_intr()
1756 snprintf(enic->msix[intr].devname, in enic_request_intr()
1757 sizeof(enic->msix[intr].devname), in enic_request_intr()
1759 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1760 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1763 intr = enic_msix_err_intr(enic); in enic_request_intr()
1764 snprintf(enic->msix[intr].devname, in enic_request_intr()
1765 sizeof(enic->msix[intr].devname), in enic_request_intr()
1767 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1768 enic->msix[intr].devid = enic; in enic_request_intr()
1770 intr = enic_msix_notify_intr(enic); in enic_request_intr()
1771 snprintf(enic->msix[intr].devname, in enic_request_intr()
1772 sizeof(enic->msix[intr].devname), in enic_request_intr()
1774 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1775 enic->msix[intr].devid = enic; in enic_request_intr()
1777 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_request_intr()
1778 enic->msix[i].requested = 0; in enic_request_intr()
1780 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1781 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1782 enic->msix[i].isr, 0, in enic_request_intr()
1783 enic->msix[i].devname, in enic_request_intr()
1784 enic->msix[i].devid); in enic_request_intr()
1786 enic_free_intr(enic); in enic_request_intr()
1789 enic->msix[i].requested = 1; in enic_request_intr()
1801 static void enic_synchronize_irqs(struct enic *enic) in enic_synchronize_irqs() argument
1805 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1808 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1811 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1812 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1819 static void enic_set_rx_coal_setting(struct enic *enic) in enic_set_rx_coal_setting() argument
1823 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
1829 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
1842 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
1843 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
1844 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
1849 static int enic_dev_notify_set(struct enic *enic) in enic_dev_notify_set() argument
1853 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1854 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1856 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1860 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1861 enic_msix_notify_intr(enic)); in enic_dev_notify_set()
1864 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1867 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1872 static void enic_notify_timer_start(struct enic *enic) in enic_notify_timer_start() argument
1874 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1876 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1887 struct enic *enic = netdev_priv(netdev); in enic_open() local
1891 err = enic_request_intr(enic); in enic_open()
1896 enic_init_affinity_hint(enic); in enic_open()
1897 enic_set_affinity_hint(enic); in enic_open()
1899 err = enic_dev_notify_set(enic); in enic_open()
1906 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1908 vnic_rq_enable(&enic->rq[i]); in enic_open()
1909 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); in enic_open()
1911 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { in enic_open()
1918 for (i = 0; i < enic->wq_count; i++) in enic_open()
1919 vnic_wq_enable(&enic->wq[i]); in enic_open()
1921 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_open()
1922 enic_dev_add_station_addr(enic); in enic_open()
1928 for (i = 0; i < enic->rq_count; i++) in enic_open()
1929 napi_enable(&enic->napi[i]); in enic_open()
1931 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
1932 for (i = 0; i < enic->wq_count; i++) in enic_open()
1933 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
1934 enic_dev_enable(enic); in enic_open()
1936 for (i = 0; i < enic->intr_count; i++) in enic_open()
1937 vnic_intr_unmask(&enic->intr[i]); in enic_open()
1939 enic_notify_timer_start(enic); in enic_open()
1940 enic_rfs_timer_start(enic); in enic_open()
1945 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1946 ret = vnic_rq_disable(&enic->rq[i]); in enic_open()
1948 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_open()
1950 enic_dev_notify_unset(enic); in enic_open()
1952 enic_unset_affinity_hint(enic); in enic_open()
1953 enic_free_intr(enic); in enic_open()
1961 struct enic *enic = netdev_priv(netdev); in enic_stop() local
1965 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
1966 vnic_intr_mask(&enic->intr[i]); in enic_stop()
1967 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
1970 enic_synchronize_irqs(enic); in enic_stop()
1972 del_timer_sync(&enic->notify_timer); in enic_stop()
1973 enic_rfs_flw_tbl_free(enic); in enic_stop()
1975 enic_dev_disable(enic); in enic_stop()
1977 for (i = 0; i < enic->rq_count; i++) in enic_stop()
1978 napi_disable(&enic->napi[i]); in enic_stop()
1981 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
1982 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1983 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
1986 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_stop()
1987 enic_dev_del_station_addr(enic); in enic_stop()
1989 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
1990 err = vnic_wq_disable(&enic->wq[i]); in enic_stop()
1994 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1995 err = vnic_rq_disable(&enic->rq[i]); in enic_stop()
2000 enic_dev_notify_unset(enic); in enic_stop()
2001 enic_unset_affinity_hint(enic); in enic_stop()
2002 enic_free_intr(enic); in enic_stop()
2004 for (i = 0; i < enic->wq_count; i++) in enic_stop()
2005 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); in enic_stop()
2006 for (i = 0; i < enic->rq_count; i++) in enic_stop()
2007 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_stop()
2008 for (i = 0; i < enic->cq_count; i++) in enic_stop()
2009 vnic_cq_clean(&enic->cq[i]); in enic_stop()
2010 for (i = 0; i < enic->intr_count; i++) in enic_stop()
2011 vnic_intr_clean(&enic->intr[i]); in enic_stop()
2041 struct enic *enic = netdev_priv(netdev); in enic_change_mtu() local
2043 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_change_mtu()
2046 if (netdev->mtu > enic->port_mtu) in enic_change_mtu()
2049 netdev->mtu, enic->port_mtu); in enic_change_mtu()
2056 struct enic *enic = container_of(work, struct enic, change_mtu_work); in enic_change_mtu_work() local
2057 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
2058 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
2070 struct enic *enic = netdev_priv(netdev); in enic_poll_controller() local
2071 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
2076 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
2077 intr = enic_msix_rq_intr(enic, i); in enic_poll_controller()
2078 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2079 &enic->napi[i]); in enic_poll_controller()
2082 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
2083 intr = enic_msix_wq_intr(enic, i); in enic_poll_controller()
2084 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
2085 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
2090 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
2093 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
2134 static int enic_dev_open(struct enic *enic) in enic_dev_open() argument
2139 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
2142 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", in enic_dev_open()
2148 static int enic_dev_soft_reset(struct enic *enic) in enic_dev_soft_reset() argument
2152 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, in enic_dev_soft_reset()
2155 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", in enic_dev_soft_reset()
2161 static int enic_dev_hang_reset(struct enic *enic) in enic_dev_hang_reset() argument
2165 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
2168 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
2174 int __enic_set_rsskey(struct enic *enic) in __enic_set_rsskey() argument
2180 rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, in __enic_set_rsskey()
2189 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
2191 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2192 err = enic_set_rss_key(enic, in __enic_set_rsskey()
2195 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2197 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
2203 static int enic_set_rsskey(struct enic *enic) in enic_set_rsskey() argument
2205 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
2207 return __enic_set_rsskey(enic); in enic_set_rsskey()
2210 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) in enic_set_rsscpu() argument
2217 rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, in enic_set_rsscpu()
2224 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
2226 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2227 err = enic_set_rss_cpu(enic, in enic_set_rsscpu()
2230 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2232 dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2238 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, in enic_set_niccfg() argument
2248 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2249 err = enic_set_nic_cfg(enic, in enic_set_niccfg()
2254 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2259 static int enic_set_rss_nic_cfg(struct enic *enic) in enic_set_rss_nic_cfg() argument
2261 struct device *dev = enic_get_dev(enic); in enic_set_rss_nic_cfg()
2267 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2269 spin_lock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2270 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); in enic_set_rss_nic_cfg()
2271 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rss_nic_cfg()
2282 if (!enic_set_rsskey(enic)) { in enic_set_rss_nic_cfg()
2283 if (enic_set_rsscpu(enic, rss_hash_bits)) { in enic_set_rss_nic_cfg()
2294 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, in enic_set_rss_nic_cfg()
2298 static void enic_set_api_busy(struct enic *enic, bool busy) in enic_set_api_busy() argument
2300 spin_lock(&enic->enic_api_lock); in enic_set_api_busy()
2301 enic->enic_api_busy = busy; in enic_set_api_busy()
2302 spin_unlock(&enic->enic_api_lock); in enic_set_api_busy()
2307 struct enic *enic = container_of(work, struct enic, reset); in enic_reset() local
2309 if (!netif_running(enic->netdev)) in enic_reset()
2315 enic_set_api_busy(enic, true); in enic_reset()
2317 enic_stop(enic->netdev); in enic_reset()
2318 enic_dev_soft_reset(enic); in enic_reset()
2319 enic_reset_addr_lists(enic); in enic_reset()
2320 enic_init_vnic_resources(enic); in enic_reset()
2321 enic_set_rss_nic_cfg(enic); in enic_reset()
2322 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_reset()
2323 enic_open(enic->netdev); in enic_reset()
2326 enic_set_api_busy(enic, false); in enic_reset()
2328 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2335 struct enic *enic = container_of(work, struct enic, tx_hang_reset); in enic_tx_hang_reset() local
2340 enic_set_api_busy(enic, true); in enic_tx_hang_reset()
2342 enic_dev_hang_notify(enic); in enic_tx_hang_reset()
2343 enic_stop(enic->netdev); in enic_tx_hang_reset()
2344 enic_dev_hang_reset(enic); in enic_tx_hang_reset()
2345 enic_reset_addr_lists(enic); in enic_tx_hang_reset()
2346 enic_init_vnic_resources(enic); in enic_tx_hang_reset()
2347 enic_set_rss_nic_cfg(enic); in enic_tx_hang_reset()
2348 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_tx_hang_reset()
2349 enic_open(enic->netdev); in enic_tx_hang_reset()
2352 enic_set_api_busy(enic, false); in enic_tx_hang_reset()
2354 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_tx_hang_reset()
2359 static int enic_set_intr_mode(struct enic *enic) in enic_set_intr_mode() argument
2361 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); in enic_set_intr_mode()
2362 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); in enic_set_intr_mode()
2375 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); in enic_set_intr_mode()
2377 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2382 if (ENIC_SETTING(enic, RSS) && in enic_set_intr_mode()
2383 enic->config.intr_mode < 1 && in enic_set_intr_mode()
2384 enic->rq_count >= n && in enic_set_intr_mode()
2385 enic->wq_count >= m && in enic_set_intr_mode()
2386 enic->cq_count >= n + m && in enic_set_intr_mode()
2387 enic->intr_count >= n + m + 2) { in enic_set_intr_mode()
2389 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2392 enic->rq_count = n; in enic_set_intr_mode()
2393 enic->wq_count = m; in enic_set_intr_mode()
2394 enic->cq_count = n + m; in enic_set_intr_mode()
2395 enic->intr_count = n + m + 2; in enic_set_intr_mode()
2397 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2404 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2405 enic->rq_count >= 1 && in enic_set_intr_mode()
2406 enic->wq_count >= m && in enic_set_intr_mode()
2407 enic->cq_count >= 1 + m && in enic_set_intr_mode()
2408 enic->intr_count >= 1 + m + 2) { in enic_set_intr_mode()
2409 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2412 enic->rq_count = 1; in enic_set_intr_mode()
2413 enic->wq_count = m; in enic_set_intr_mode()
2414 enic->cq_count = 1 + m; in enic_set_intr_mode()
2415 enic->intr_count = 1 + m + 2; in enic_set_intr_mode()
2417 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2429 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2430 enic->rq_count >= 1 && in enic_set_intr_mode()
2431 enic->wq_count >= 1 && in enic_set_intr_mode()
2432 enic->cq_count >= 2 && in enic_set_intr_mode()
2433 enic->intr_count >= 1 && in enic_set_intr_mode()
2434 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2436 enic->rq_count = 1; in enic_set_intr_mode()
2437 enic->wq_count = 1; in enic_set_intr_mode()
2438 enic->cq_count = 2; in enic_set_intr_mode()
2439 enic->intr_count = 1; in enic_set_intr_mode()
2441 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2454 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2455 enic->rq_count >= 1 && in enic_set_intr_mode()
2456 enic->wq_count >= 1 && in enic_set_intr_mode()
2457 enic->cq_count >= 2 && in enic_set_intr_mode()
2458 enic->intr_count >= 3) { in enic_set_intr_mode()
2460 enic->rq_count = 1; in enic_set_intr_mode()
2461 enic->wq_count = 1; in enic_set_intr_mode()
2462 enic->cq_count = 2; in enic_set_intr_mode()
2463 enic->intr_count = 3; in enic_set_intr_mode()
2465 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2470 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2475 static void enic_clear_intr_mode(struct enic *enic) in enic_clear_intr_mode() argument
2477 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2479 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2482 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2488 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2543 static void enic_dev_deinit(struct enic *enic) in enic_dev_deinit() argument
2547 for (i = 0; i < enic->rq_count; i++) in enic_dev_deinit()
2548 __netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2550 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2551 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2552 __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2557 enic_free_vnic_resources(enic); in enic_dev_deinit()
2558 enic_clear_intr_mode(enic); in enic_dev_deinit()
2559 enic_free_affinity_hint(enic); in enic_dev_deinit()
2562 static void enic_kdump_kernel_config(struct enic *enic) in enic_kdump_kernel_config() argument
2565 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); in enic_kdump_kernel_config()
2566 enic->rq_count = 1; in enic_kdump_kernel_config()
2567 enic->wq_count = 1; in enic_kdump_kernel_config()
2568 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_kdump_kernel_config()
2569 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_kdump_kernel_config()
2570 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_kdump_kernel_config()
2574 static int enic_dev_init(struct enic *enic) in enic_dev_init() argument
2576 struct device *dev = enic_get_dev(enic); in enic_dev_init()
2577 struct net_device *netdev = enic->netdev; in enic_dev_init()
2582 err = enic_dev_intr_coal_timer_info(enic); in enic_dev_init()
2586 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2592 err = enic_get_vnic_config(enic); in enic_dev_init()
2601 enic_get_res_counts(enic); in enic_dev_init()
2605 enic_kdump_kernel_config(enic); in enic_dev_init()
2611 err = enic_set_intr_mode(enic); in enic_dev_init()
2621 err = enic_alloc_vnic_resources(enic); in enic_dev_init()
2627 enic_init_vnic_resources(enic); in enic_dev_init()
2629 err = enic_set_rss_nic_cfg(enic); in enic_dev_init()
2635 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2637 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); in enic_dev_init()
2640 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2641 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2644 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2645 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2653 enic_free_affinity_hint(enic); in enic_dev_init()
2654 enic_clear_intr_mode(enic); in enic_dev_init()
2655 enic_free_vnic_resources(enic); in enic_dev_init()
2660 static void enic_iounmap(struct enic *enic) in enic_iounmap() argument
2664 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2665 if (enic->bar[i].vaddr) in enic_iounmap()
2666 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2673 struct enic *enic; in enic_probe() local
2686 netdev = alloc_etherdev_mqs(sizeof(struct enic), in enic_probe()
2695 enic = netdev_priv(netdev); in enic_probe()
2696 enic->netdev = netdev; in enic_probe()
2697 enic->pdev = pdev; in enic_probe()
2747 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2750 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2751 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2752 if (!enic->bar[i].vaddr) { in enic_probe()
2757 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2763 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2764 ARRAY_SIZE(enic->bar)); in enic_probe()
2765 if (!enic->vdev) { in enic_probe()
2771 err = vnic_devcmd_init(enic->vdev); in enic_probe()
2781 &enic->num_vfs); in enic_probe()
2782 if (enic->num_vfs) { in enic_probe()
2783 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2790 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2791 num_pps = enic->num_vfs; in enic_probe()
2797 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2798 if (!enic->pp) { in enic_probe()
2806 err = enic_dev_open(enic); in enic_probe()
2815 spin_lock_init(&enic->devcmd_lock); in enic_probe()
2816 spin_lock_init(&enic->enic_api_lock); in enic_probe()
2822 err = enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_probe()
2844 if (!enic_is_dynamic(enic)) { in enic_probe()
2845 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
2852 err = enic_dev_init(enic); in enic_probe()
2858 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
2859 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
2864 timer_setup(&enic->notify_timer, enic_notify_timer, 0); in enic_probe()
2866 enic_rfs_flw_tbl_init(enic); in enic_probe()
2867 enic_set_rx_coal_setting(enic); in enic_probe()
2868 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
2869 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); in enic_probe()
2870 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
2872 for (i = 0; i < enic->wq_count; i++) in enic_probe()
2873 spin_lock_init(&enic->wq_lock[i]); in enic_probe()
2878 enic->port_mtu = enic->config.mtu; in enic_probe()
2880 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
2886 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
2890 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
2892 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_probe()
2901 if (ENIC_SETTING(enic, LOOP)) { in enic_probe()
2903 enic->loop_enable = 1; in enic_probe()
2904 enic->loop_tag = enic->config.loop_tag; in enic_probe()
2905 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
2907 if (ENIC_SETTING(enic, TXCSUM)) in enic_probe()
2909 if (ENIC_SETTING(enic, TSO)) in enic_probe()
2912 if (ENIC_SETTING(enic, RSS)) in enic_probe()
2914 if (ENIC_SETTING(enic, RXCSUM)) in enic_probe()
2916 if (ENIC_SETTING(enic, VXLAN)) { in enic_probe()
2939 err = vnic_dev_get_supported_feature_ver(enic->vdev, in enic_probe()
2944 enic->vxlan.flags = (u8)a1; in enic_probe()
2950 enic->vxlan.patch_level = patch_level; in enic_probe()
2952 if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || in enic_probe()
2953 enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { in enic_probe()
2955 if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) in enic_probe()
2975 netdev->mtu = enic->port_mtu; in enic_probe()
2982 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; in enic_probe()
2987 enic_dev_deinit(enic); in enic_probe()
2989 vnic_dev_close(enic->vdev); in enic_probe()
2991 kfree(enic->pp); in enic_probe()
2994 if (enic_sriov_enabled(enic)) { in enic_probe()
2996 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
3000 vnic_dev_unregister(enic->vdev); in enic_probe()
3002 enic_iounmap(enic); in enic_probe()
3018 struct enic *enic = netdev_priv(netdev); in enic_remove() local
3020 cancel_work_sync(&enic->reset); in enic_remove()
3021 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
3023 enic_dev_deinit(enic); in enic_remove()
3024 vnic_dev_close(enic->vdev); in enic_remove()
3026 if (enic_sriov_enabled(enic)) { in enic_remove()
3028 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
3031 kfree(enic->pp); in enic_remove()
3032 vnic_dev_unregister(enic->vdev); in enic_remove()
3033 enic_iounmap(enic); in enic_remove()