Lines Matching +full:dcb +full:- +full:algorithm

4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
145 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * a machine check fault if an attempt is made to access one of the 4-byte IP
162 * header fields on a non-4-byte boundary. And it's a major performance issue
165 * edge-case performance sensitive applications (like forwarding large volumes
167 * PCI-E Bus transfers enough to measurably affect performance.
171 /* TX Queue select used to determine what algorithm to use for selecting TX
200 switch (p->link_cfg.speed) { in link_report()
224 dev->name, p->link_cfg.speed); in link_report()
228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, in link_report()
229 fc[p->link_cfg.fc]); in link_report()
238 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable()
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
242 /* We use a simple mapping of Port TX Queue Index to DCB in dcb_tx_queue_prio_enable()
243 * Priority when we're enabling DCB. in dcb_tx_queue_prio_enable()
245 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
261 -FW_CMD_MAX_TIMEOUT); in dcb_tx_queue_prio_enable()
264 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
265 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", in dcb_tx_queue_prio_enable()
266 enable ? "set" : "unset", pi->port_id, i, -err); in dcb_tx_queue_prio_enable()
268 txq->dcb_prio = enable ? value : 0; in dcb_tx_queue_prio_enable()
276 if (!pi->dcb.enabled) in cxgb4_dcb_enabled()
279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || in cxgb4_dcb_enabled()
280 (pi->dcb.state == CXGB4_DCB_STATE_HOST)); in cxgb4_dcb_enabled()
286 struct net_device *dev = adapter->port[port_id]; in t4_os_link_changed()
312 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
317 else if (pi->mod_type < ARRAY_SIZE(mod_str)) in t4_os_portmod_changed()
318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); in t4_os_portmod_changed()
319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
321 dev->name); in t4_os_portmod_changed()
322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
324 dev->name); in t4_os_portmod_changed()
325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) in t4_os_portmod_changed()
326 netdev_info(dev, "%s: transceiver module error\n", dev->name); in t4_os_portmod_changed()
329 dev->name, pi->mod_type); in t4_os_portmod_changed()
334 pi->link_cfg.redo_l1cfg = netif_running(dev); in t4_os_portmod_changed()
351 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash()
357 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
358 ucast |= is_unicast_ether_addr(entry->addr); in cxgb4_set_addr_hash()
359 vec |= (1ULL << hash_mac_addr(entry->addr)); in cxgb4_set_addr_hash()
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
368 struct adapter *adap = pi->adapter; in cxgb4_mac_sync()
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
394 return -ENOMEM; in cxgb4_mac_sync()
395 ether_addr_copy(new_entry->addr, mac_addr); in cxgb4_mac_sync()
396 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
406 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync()
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
415 if (ether_addr_equal(entry->addr, mac_addr)) { in cxgb4_mac_unsync()
416 list_del(&entry->list); in cxgb4_mac_unsync()
422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
423 return ret < 0 ? -EINVAL : 0; in cxgb4_mac_unsync()
428 * If @mtu is -1 it is left unchanged.
433 struct adapter *adapter = pi->adapter; in set_rxmode()
438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, in set_rxmode()
439 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in set_rxmode()
440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, in set_rxmode()
445 * cxgb4_change_mac - Update match filter for a MAC address.
449 * or -1
465 struct adapter *adapter = pi->adapter; in cxgb4_change_mac()
469 ret = t4_change_mac(adapter, adapter->mbox, viid, in cxgb4_change_mac()
472 if (ret == -ENOMEM) { in cxgb4_change_mac()
476 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4_change_mac()
477 if (entry->iface_mac) { in cxgb4_change_mac()
478 ether_addr_copy(entry->addr, addr); in cxgb4_change_mac()
484 return -ENOMEM; in cxgb4_change_mac()
485 ether_addr_copy(new_entry->addr, addr); in cxgb4_change_mac()
486 new_entry->iface_mac = true; in cxgb4_change_mac()
487 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4_change_mac()
499 * link_start - enable a port
507 unsigned int mb = pi->adapter->mbox; in link_start()
514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, in link_start()
515 dev->mtu, -1, -1, -1, in link_start()
516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in link_start()
518 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in link_start()
519 dev->dev_addr, true, &pi->smt_idx); in link_start()
521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, in link_start()
522 &pi->link_cfg); in link_start()
525 ret = t4_enable_pi_params(pi->adapter, mb, pi, true, in link_start()
537 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); in dcb_rpl()
538 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
545 /* If the DCB has become enabled or disabled on the port then we're in dcb_rpl()
546 * going to need to set up/tear down DCB Priority parameters for the in dcb_rpl()
559 u8 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
566 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { in fwevtq_handler()
568 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
579 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); in fwevtq_handler()
582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
583 txq->restarts++; in fwevtq_handler()
584 if (txq->q_type == CXGB4_TXQ_ETH) { in fwevtq_handler()
588 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
593 tasklet_schedule(&oq->qresume_tsk); in fwevtq_handler()
599 const struct fw_port_cmd *pcmd = (const void *)p->data; in fwevtq_handler()
600 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); in fwevtq_handler()
602 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); in fwevtq_handler()
608 be32_to_cpu(pcmd->op_to_portid)); in fwevtq_handler()
612 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
614 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) in fwevtq_handler()
615 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) in fwevtq_handler()
626 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
629 if (p->type == 0) in fwevtq_handler()
630 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
634 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
638 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
642 filter_rpl(q->adap, p); in fwevtq_handler()
646 hash_filter_rpl(q->adap, p); in fwevtq_handler()
650 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
654 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
656 dev_err(q->adap->pdev_dev, in fwevtq_handler()
664 if (adapter->flags & CXGB4_USING_MSIX) { in disable_msi()
665 pci_disable_msix(adapter->pdev); in disable_msi()
666 adapter->flags &= ~CXGB4_USING_MSIX; in disable_msi()
667 } else if (adapter->flags & CXGB4_USING_MSI) { in disable_msi()
668 pci_disable_msi(adapter->pdev); in disable_msi()
669 adapter->flags &= ~CXGB4_USING_MSI; in disable_msi()
674 * Interrupt handler for non-data events used with MSI-X.
682 adap->swintr = 1; in t4_nondata_intr()
685 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
697 return -ENOMEM; in cxgb4_set_msix_aff()
700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
705 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
720 struct sge *s = &adap->sge; in request_msix_queue_irqs()
724 if (s->fwevtq_msix_idx < 0) in request_msix_queue_irqs()
725 return -ENOMEM; in request_msix_queue_irqs()
727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
729 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
730 &s->fw_evtq); in request_msix_queue_irqs()
735 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
736 err = request_irq(minfo->vec, in request_msix_queue_irqs()
738 minfo->desc, in request_msix_queue_irqs()
739 &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
743 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
744 &minfo->aff_mask, ethqidx); in request_msix_queue_irqs()
749 while (--ethqidx >= 0) { in request_msix_queue_irqs()
750 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
751 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in request_msix_queue_irqs()
752 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
760 struct sge *s = &adap->sge; in free_msix_queue_irqs()
764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
766 minfo = s->ethrxq[i].msix; in free_msix_queue_irqs()
767 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in free_msix_queue_irqs()
768 free_irq(minfo->vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
788 dev_warn(adap->pdev_dev, in setup_ppod_edram()
791 return -1; in setup_ppod_edram()
795 return -1; in setup_ppod_edram()
797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
799 dev_err(adap->pdev_dev, in setup_ppod_edram()
801 return -1; in setup_ppod_edram()
815 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hpfilter()
822 dev_err(adapter->pdev_dev, in adap_config_hpfilter()
829 struct adapter *adap = pi->adapter; in cxgb4_config_rss()
832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
842 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
852 * cxgb4_write_rss - write the RSS table for a given port
862 struct adapter *adapter = pi->adapter; in cxgb4_write_rss()
867 rxq = &adapter->sge.ethrxq[pi->first_qset]; in cxgb4_write_rss()
868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_write_rss()
870 return -ENOMEM; in cxgb4_write_rss()
873 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss()
876 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); in cxgb4_write_rss()
882 * setup_rss - configure RSS
895 for (j = 0; j < pi->rss_size; j++) in setup_rss()
896 pi->rss[j] = j % pi->nqsets; in setup_rss()
898 err = cxgb4_write_rss(pi, pi->rss); in setup_rss()
910 qid -= p->ingr_start; in rxq_to_chan()
911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; in rxq_to_chan()
916 if (q->handler) in cxgb4_quiesce_rx()
917 napi_disable(&q->napi); in cxgb4_quiesce_rx()
927 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
928 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
940 struct sge *s = &adap->sge; in disable_interrupts()
942 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
944 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
946 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
949 free_irq(adap->pdev->irq, adap); in disable_interrupts()
957 if (q->handler) in cxgb4_enable_rx()
958 napi_enable(&q->napi); in cxgb4_enable_rx()
960 /* 0-increment GTS to start the timer and enable interrupts */ in cxgb4_enable_rx()
962 SEINTARM_V(q->intr_params) | in cxgb4_enable_rx()
963 INGRESSQID_V(q->cntxt_id)); in cxgb4_enable_rx()
973 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
974 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
987 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
988 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
991 /* Request MSI-X vector for non-data interrupt */ in setup_non_data_intr()
994 return -ENOMEM; in setup_non_data_intr()
996 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
997 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
998 "%s", adap->port[0]->name); in setup_non_data_intr()
1000 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1006 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1009 bitmap_zero(s->starving_fl, s->egr_sz); in setup_fw_sge_queues()
1010 bitmap_zero(s->txq_maperr, s->egr_sz); in setup_fw_sge_queues()
1012 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1013 s->fwevtq_msix_idx = -1; in setup_fw_sge_queues()
1016 return -ENOMEM; in setup_fw_sge_queues()
1018 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1019 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1020 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1023 NULL, NULL, NULL, -1); in setup_fw_sge_queues()
1026 msix = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1030 msix, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
1034 s->fwevtq_msix_idx = msix; in setup_fw_sge_queues()
1039 * setup_sge_queues - configure SGE Tx/Rx/response queues
1043 * We support multiple queue sets per port if we have MSI-X, otherwise
1049 struct sge *s = &adap->sge; in setup_sge_queues()
1054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; in setup_sge_queues()
1056 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1057 msix = -((int)s->intrq.abs_id + 1); in setup_sge_queues()
1060 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1062 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
1065 for (j = 0; j < pi->nqsets; j++, q++) { in setup_sge_queues()
1073 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1074 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1075 "%s-Rx%d", dev->name, j); in setup_sge_queues()
1076 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1080 msix, &q->fl, in setup_sge_queues()
1084 pi->tx_chan)); in setup_sge_queues()
1087 q->rspq.idx = j; in setup_sge_queues()
1088 memset(&q->stats, 0, sizeof(q->stats)); in setup_sge_queues()
1091 q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1092 for (j = 0; j < pi->nqsets; j++, t++, q++) { in setup_sge_queues()
1095 q->rspq.cntxt_id, in setup_sge_queues()
1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1107 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues()
1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1110 s->fw_evtq.cntxt_id, cmplqid); in setup_sge_queues()
1115 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1117 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1118 , s->fw_evtq.cntxt_id, false); in setup_sge_queues()
1123 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1127 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); in setup_sge_queues()
1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1154 "TX Packet without VLAN Tag on DCB Link\n"); in cxgb_select_queue()
1159 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb_select_queue()
1160 txq = skb->priority & 0x7; in cxgb_select_queue()
1167 if (dev->num_tc) { in cxgb_select_queue()
1171 ver = ip_hdr(skb)->version; in cxgb_select_queue()
1172 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : in cxgb_select_queue()
1173 ip_hdr(skb)->protocol; in cxgb_select_queue()
1178 skb->encapsulation || in cxgb_select_queue()
1181 txq = txq % pi->nqsets; in cxgb_select_queue()
1191 while (unlikely(txq >= dev->real_num_tx_queues)) in cxgb_select_queue()
1192 txq -= dev->real_num_tx_queues; in cxgb_select_queue()
1197 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in cxgb_select_queue()
1204 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { in closest_timer()
1205 delta = time - s->timer_val[i]; in closest_timer()
1207 delta = -delta; in closest_timer()
1220 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { in closest_thres()
1221 delta = thres - s->counter_val[i]; in closest_thres()
1223 delta = -delta; in closest_thres()
1233 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1235 * @us: the hold-off time in us, or 0 to disable timer
1236 * @cnt: the hold-off packet count, or 0 to disable counter
1238 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1244 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params()
1253 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1254 if (q->desc && q->pktcnt_idx != new_idx) { in cxgb4_set_rspq_intr_params()
1259 FW_PARAMS_PARAM_YZ_V(q->cntxt_id); in cxgb4_set_rspq_intr_params()
1260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1265 q->pktcnt_idx = new_idx; in cxgb4_set_rspq_intr_params()
1268 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1269 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); in cxgb4_set_rspq_intr_params()
1275 netdev_features_t changed = dev->features ^ features; in cxgb_set_features()
1282 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_set_features()
1283 pi->viid_mirror, -1, -1, -1, -1, in cxgb_set_features()
1286 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; in cxgb_set_features()
1292 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1293 return -1; in setup_debugfs()
1304 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1305 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1306 cxgb4_quiesce_rx(&mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1308 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1309 cxgb4_clear_msix_aff(mirror_rxq->msix->vec, in cxgb4_port_mirror_free_rxq()
1310 mirror_rxq->msix->aff_mask); in cxgb4_port_mirror_free_rxq()
1311 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1312 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1315 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1323 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1328 if (!pi->vi_mirror_count) in cxgb4_port_mirror_alloc_queues()
1331 if (s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_alloc_queues()
1334 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1336 return -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1338 s->mirror_rxq[pi->port_id] = mirror_rxq; in cxgb4_port_mirror_alloc_queues()
1340 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1341 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1343 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { in cxgb4_port_mirror_alloc_queues()
1344 mirror_rxq = &s->mirror_rxq[pi->port_id][i]; in cxgb4_port_mirror_alloc_queues()
1354 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1355 snprintf(mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1356 sizeof(mirror_rxq->msix->desc), in cxgb4_port_mirror_alloc_queues()
1357 "%s-mirrorrxq%d", dev->name, i); in cxgb4_port_mirror_alloc_queues()
1360 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1366 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; in cxgb4_port_mirror_alloc_queues()
1368 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1369 dev, msix, &mirror_rxq->fl, in cxgb4_port_mirror_alloc_queues()
1374 /* Setup MSI-X vectors for Mirror Rxqs */ in cxgb4_port_mirror_alloc_queues()
1375 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1376 ret = request_irq(mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1378 mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1379 &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1383 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1384 &mirror_rxq->msix->aff_mask, i); in cxgb4_port_mirror_alloc_queues()
1388 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1392 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1394 ret = -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1398 mirror_rxq = &s->mirror_rxq[pi->port_id][0]; in cxgb4_port_mirror_alloc_queues()
1399 for (i = 0; i < pi->rss_size; i++) in cxgb4_port_mirror_alloc_queues()
1400 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; in cxgb4_port_mirror_alloc_queues()
1402 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); in cxgb4_port_mirror_alloc_queues()
1410 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1413 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1416 while (rxqid-- > 0) in cxgb4_port_mirror_alloc_queues()
1418 &s->mirror_rxq[pi->port_id][rxqid]); in cxgb4_port_mirror_alloc_queues()
1420 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_alloc_queues()
1421 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_alloc_queues()
1429 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1432 if (!pi->vi_mirror_count) in cxgb4_port_mirror_free_queues()
1435 if (!s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_free_queues()
1438 for (i = 0; i < pi->nmirrorqsets; i++) in cxgb4_port_mirror_free_queues()
1440 &s->mirror_rxq[pi->port_id][i]); in cxgb4_port_mirror_free_queues()
1442 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_free_queues()
1443 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_free_queues()
1450 int ret, idx = -1; in cxgb4_port_mirror_start()
1452 if (!pi->vi_mirror_count) in cxgb4_port_mirror_start()
1460 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1461 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in cxgb4_port_mirror_start()
1462 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, in cxgb4_port_mirror_start()
1463 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in cxgb4_port_mirror_start()
1465 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1467 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1475 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, in cxgb4_port_mirror_start()
1476 dev->dev_addr, true, NULL); in cxgb4_port_mirror_start()
1478 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1480 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1491 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1495 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1497 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1507 if (!pi->vi_mirror_count) in cxgb4_port_mirror_stop()
1510 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1520 if (!pi->nmirrorqsets) in cxgb4_port_mirror_alloc()
1521 return -EOPNOTSUPP; in cxgb4_port_mirror_alloc()
1523 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1524 if (pi->viid_mirror) { in cxgb4_port_mirror_alloc()
1525 pi->vi_mirror_count++; in cxgb4_port_mirror_alloc()
1529 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1530 &pi->viid_mirror); in cxgb4_port_mirror_alloc()
1534 pi->vi_mirror_count = 1; in cxgb4_port_mirror_alloc()
1536 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1546 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1553 pi->vi_mirror_count = 0; in cxgb4_port_mirror_alloc()
1554 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1555 pi->viid_mirror = 0; in cxgb4_port_mirror_alloc()
1558 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1567 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1568 if (!pi->viid_mirror) in cxgb4_port_mirror_free()
1571 if (pi->vi_mirror_count > 1) { in cxgb4_port_mirror_free()
1572 pi->vi_mirror_count--; in cxgb4_port_mirror_free()
1579 pi->vi_mirror_count = 0; in cxgb4_port_mirror_free()
1580 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1581 pi->viid_mirror = 0; in cxgb4_port_mirror_free()
1584 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1588 * upper-layer driver support
1592 * Allocate an active-open TID and set it to the supplied value.
1596 int atid = -1; in cxgb4_alloc_atid()
1598 spin_lock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1599 if (t->afree) { in cxgb4_alloc_atid()
1600 union aopen_entry *p = t->afree; in cxgb4_alloc_atid()
1602 atid = (p - t->atid_tab) + t->atid_base; in cxgb4_alloc_atid()
1603 t->afree = p->next; in cxgb4_alloc_atid()
1604 p->data = data; in cxgb4_alloc_atid()
1605 t->atids_in_use++; in cxgb4_alloc_atid()
1607 spin_unlock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1613 * Release an active-open TID.
1617 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; in cxgb4_free_atid()
1619 spin_lock_bh(&t->atid_lock); in cxgb4_free_atid()
1620 p->next = t->afree; in cxgb4_free_atid()
1621 t->afree = p; in cxgb4_free_atid()
1622 t->atids_in_use--; in cxgb4_free_atid()
1623 spin_unlock_bh(&t->atid_lock); in cxgb4_free_atid()
1634 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1636 stid = find_first_zero_bit(t->stid_bmap, t->nstids); in cxgb4_alloc_stid()
1637 if (stid < t->nstids) in cxgb4_alloc_stid()
1638 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_stid()
1640 stid = -1; in cxgb4_alloc_stid()
1642 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); in cxgb4_alloc_stid()
1644 stid = -1; in cxgb4_alloc_stid()
1647 t->stid_tab[stid].data = data; in cxgb4_alloc_stid()
1648 stid += t->stid_base; in cxgb4_alloc_stid()
1654 t->stids_in_use += 2; in cxgb4_alloc_stid()
1655 t->v6_stids_in_use += 2; in cxgb4_alloc_stid()
1657 t->stids_in_use++; in cxgb4_alloc_stid()
1660 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1671 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1673 stid = find_next_zero_bit(t->stid_bmap, in cxgb4_alloc_sftid()
1674 t->nstids + t->nsftids, t->nstids); in cxgb4_alloc_sftid()
1675 if (stid < (t->nstids + t->nsftids)) in cxgb4_alloc_sftid()
1676 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_sftid()
1678 stid = -1; in cxgb4_alloc_sftid()
1680 stid = -1; in cxgb4_alloc_sftid()
1683 t->stid_tab[stid].data = data; in cxgb4_alloc_sftid()
1684 stid -= t->nstids; in cxgb4_alloc_sftid()
1685 stid += t->sftid_base; in cxgb4_alloc_sftid()
1686 t->sftids_in_use++; in cxgb4_alloc_sftid()
1688 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1698 if (t->nsftids && (stid >= t->sftid_base)) { in cxgb4_free_stid()
1699 stid -= t->sftid_base; in cxgb4_free_stid()
1700 stid += t->nstids; in cxgb4_free_stid()
1702 stid -= t->stid_base; in cxgb4_free_stid()
1705 spin_lock_bh(&t->stid_lock); in cxgb4_free_stid()
1707 __clear_bit(stid, t->stid_bmap); in cxgb4_free_stid()
1709 bitmap_release_region(t->stid_bmap, stid, 1); in cxgb4_free_stid()
1710 t->stid_tab[stid].data = NULL; in cxgb4_free_stid()
1711 if (stid < t->nstids) { in cxgb4_free_stid()
1713 t->stids_in_use -= 2; in cxgb4_free_stid()
1714 t->v6_stids_in_use -= 2; in cxgb4_free_stid()
1716 t->stids_in_use--; in cxgb4_free_stid()
1719 t->sftids_in_use--; in cxgb4_free_stid()
1722 spin_unlock_bh(&t->stid_lock); in cxgb4_free_stid()
1748 void **p = &t->tid_tab[tid - t->tid_base]; in cxgb4_queue_tid_release()
1750 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1751 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1753 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1754 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1755 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1756 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1758 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1771 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1772 while (adap->tid_release_head) { in process_tid_release_list()
1773 void **p = adap->tid_release_head; in process_tid_release_list()
1775 p = (void *)p - chan; in process_tid_release_list()
1777 adap->tid_release_head = *p; in process_tid_release_list()
1779 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1785 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1787 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1789 adap->tid_release_task_busy = false; in process_tid_release_list()
1790 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1803 WARN_ON(tid_out_of_range(&adap->tids, tid)); in cxgb4_remove_tid()
1805 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1806 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1807 atomic_dec(&t->conns_in_use); in cxgb4_remove_tid()
1808 if (t->hash_base && (tid >= t->hash_base)) { in cxgb4_remove_tid()
1810 atomic_sub(2, &t->hash_tids_in_use); in cxgb4_remove_tid()
1812 atomic_dec(&t->hash_tids_in_use); in cxgb4_remove_tid()
1815 atomic_sub(2, &t->tids_in_use); in cxgb4_remove_tid()
1817 atomic_dec(&t->tids_in_use); in cxgb4_remove_tid()
1836 unsigned int max_ftids = t->nftids + t->nsftids; in tid_init()
1837 unsigned int natids = t->natids; in tid_init()
1844 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); in tid_init()
1845 ftid_bmap_size = BITS_TO_LONGS(t->nftids); in tid_init()
1846 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); in tid_init()
1847 eotid_bmap_size = BITS_TO_LONGS(t->neotids); in tid_init()
1848 size = t->ntids * sizeof(*t->tid_tab) + in tid_init()
1849 natids * sizeof(*t->atid_tab) + in tid_init()
1850 t->nstids * sizeof(*t->stid_tab) + in tid_init()
1851 t->nsftids * sizeof(*t->stid_tab) + in tid_init()
1853 t->nhpftids * sizeof(*t->hpftid_tab) + in tid_init()
1855 max_ftids * sizeof(*t->ftid_tab) + in tid_init()
1857 t->neotids * sizeof(*t->eotid_tab) + in tid_init()
1860 t->tid_tab = kvzalloc(size, GFP_KERNEL); in tid_init()
1861 if (!t->tid_tab) in tid_init()
1862 return -ENOMEM; in tid_init()
1864 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; in tid_init()
1865 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; in tid_init()
1866 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; in tid_init()
1867 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; in tid_init()
1868 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; in tid_init()
1869 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; in tid_init()
1870 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; in tid_init()
1871 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; in tid_init()
1872 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; in tid_init()
1873 spin_lock_init(&t->stid_lock); in tid_init()
1874 spin_lock_init(&t->atid_lock); in tid_init()
1875 spin_lock_init(&t->ftid_lock); in tid_init()
1877 t->stids_in_use = 0; in tid_init()
1878 t->v6_stids_in_use = 0; in tid_init()
1879 t->sftids_in_use = 0; in tid_init()
1880 t->afree = NULL; in tid_init()
1881 t->atids_in_use = 0; in tid_init()
1882 atomic_set(&t->tids_in_use, 0); in tid_init()
1883 atomic_set(&t->conns_in_use, 0); in tid_init()
1884 atomic_set(&t->hash_tids_in_use, 0); in tid_init()
1885 atomic_set(&t->eotids_in_use, 0); in tid_init()
1889 while (--natids) in tid_init()
1890 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; in tid_init()
1891 t->afree = t->atid_tab; in tid_init()
1895 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); in tid_init()
1897 if (!t->stid_base && in tid_init()
1898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1899 __set_bit(0, t->stid_bmap); in tid_init()
1901 if (t->neotids) in tid_init()
1902 bitmap_zero(t->eotid_bmap, t->neotids); in tid_init()
1905 if (t->nhpftids) in tid_init()
1906 bitmap_zero(t->hpftid_bmap, t->nhpftids); in tid_init()
1907 bitmap_zero(t->ftid_bmap, t->nftids); in tid_init()
1912 * cxgb4_create_server - create an IP server
1935 return -ENOMEM; in cxgb4_create_server()
1941 req->local_port = sport; in cxgb4_create_server()
1942 req->peer_port = htons(0); in cxgb4_create_server()
1943 req->local_ip = sip; in cxgb4_create_server()
1944 req->peer_ip = htonl(0); in cxgb4_create_server()
1945 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1946 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server()
1947 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server()
1954 /* cxgb4_create_server6 - create an IPv6 server
1976 return -ENOMEM; in cxgb4_create_server6()
1982 req->local_port = sport; in cxgb4_create_server6()
1983 req->peer_port = htons(0); in cxgb4_create_server6()
1984 req->local_ip_hi = *(__be64 *)(sip->s6_addr); in cxgb4_create_server6()
1985 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); in cxgb4_create_server6()
1986 req->peer_ip_hi = cpu_to_be64(0); in cxgb4_create_server6()
1987 req->peer_ip_lo = cpu_to_be64(0); in cxgb4_create_server6()
1988 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1989 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server6()
1990 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server6()
2009 return -ENOMEM; in cxgb4_remove_server()
2014 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : in cxgb4_remove_server()
2022 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2036 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) in cxgb4_best_mtu()
2045 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2067 unsigned short data_size_align_mask = data_size_align - 1; in cxgb4_best_aligned_mtu()
2075 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { in cxgb4_best_aligned_mtu()
2076 unsigned short data_size = mtus[mtu_idx] - header_size; in cxgb4_best_aligned_mtu()
2096 mtu_idx--; in cxgb4_best_aligned_mtu()
2103 mtu_idx - aligned_mtu_idx <= 1) in cxgb4_best_aligned_mtu()
2116 * cxgb4_port_chan - get the HW channel of a port
2123 return netdev2pinfo(dev)->tx_chan; in cxgb4_port_chan()
2128 * cxgb4_port_e2cchan - get the HW c-channel of a port
2131 * Return the HW RX c-channel of the given port.
2135 return netdev2pinfo(dev)->rx_cchan; in cxgb4_port_e2cchan()
2146 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2158 * cxgb4_port_viid - get the VI id of a port
2165 return netdev2pinfo(dev)->viid; in cxgb4_port_viid()
2170 * cxgb4_port_idx - get the index of a port
2177 return netdev2pinfo(dev)->port_id; in cxgb4_port_idx()
2186 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2188 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2208 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2218 spin_lock(&adap->win0_lock); in read_eq_indices()
2222 spin_unlock(&adap->win0_lock); in read_eq_indices()
2246 delta = pidx - hw_pidx; in cxgb4_sync_txq_pidx()
2248 delta = size - hw_pidx + pidx; in cxgb4_sync_txq_pidx()
2250 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2302 memaddr = offset - edc0_end; in cxgb4_read_tpte()
2306 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2309 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2310 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2316 memaddr = offset - mc0_end; in cxgb4_read_tpte()
2327 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2329 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2333 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2335 return -EINVAL; in cxgb4_read_tpte()
2375 const struct net_device *netdev = neigh->dev; in check_neigh_update()
2379 parent = netdev->dev.parent; in check_neigh_update()
2380 if (parent && parent->driver == &cxgb4_driver.driver) in check_neigh_update()
2410 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2429 spin_lock_irqsave(&q->db_lock, flags); in disable_txq_db()
2430 q->db_disabled = 1; in disable_txq_db()
2431 spin_unlock_irqrestore(&q->db_lock, flags); in disable_txq_db()
2436 spin_lock_irq(&q->db_lock); in enable_txq_db()
2437 if (q->db_pidx_inc) { in enable_txq_db()
2443 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); in enable_txq_db()
2444 q->db_pidx_inc = 0; in enable_txq_db()
2446 q->db_disabled = 0; in enable_txq_db()
2447 spin_unlock_irq(&q->db_lock); in enable_txq_db()
2454 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2455 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2458 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2461 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2462 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in disable_dbs()
2464 disable_txq_db(&txq->q); in disable_dbs()
2469 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2476 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2477 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2480 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2483 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2484 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in enable_dbs()
2486 enable_txq_db(adap, &txq->q); in enable_dbs()
2491 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2498 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2499 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2511 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2525 spin_lock_irq(&q->db_lock); in sync_txq_pidx()
2526 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2529 if (q->db_pidx != hw_pidx) { in sync_txq_pidx()
2533 if (q->db_pidx >= hw_pidx) in sync_txq_pidx()
2534 delta = q->db_pidx - hw_pidx; in sync_txq_pidx()
2536 delta = q->size - hw_pidx + q->db_pidx; in sync_txq_pidx()
2538 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2544 QID_V(q->cntxt_id) | val); in sync_txq_pidx()
2547 q->db_disabled = 0; in sync_txq_pidx()
2548 q->db_pidx_inc = 0; in sync_txq_pidx()
2549 spin_unlock_irq(&q->db_lock); in sync_txq_pidx()
2558 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2559 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2562 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2564 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2565 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in recover_all_queues()
2567 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2572 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2581 if (is_t4(adap->params.chip)) { in process_db_drop()
2589 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2600 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2604 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2606 /* Re-enable BAR2 WC */ in process_db_drop()
2610 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2616 if (is_t4(adap->params.chip)) { in t4_db_full()
2621 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2627 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2631 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2647 list_del(&adap->list_node); in detach_ulds()
2650 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2651 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2667 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2668 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2678 struct net_device *event_dev = ifa->idev->dev; in cxgb4_inet6addr_handler()
2686 if (event_dev->flags & IFF_MASTER) { in cxgb4_inet6addr_handler()
2690 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2694 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2706 parent = event_dev->dev.parent; in cxgb4_inet6addr_handler()
2708 if (parent && parent->driver == &cxgb4_driver.driver) { in cxgb4_inet6addr_handler()
2737 dev = adap->port[i]; in update_clip()
2751 * cxgb_up - enable the adapter
2762 struct sge *s = &adap->sge; in cxgb_up()
2773 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2774 if (s->nd_msix_idx < 0) { in cxgb_up()
2775 err = -ENOMEM; in cxgb_up()
2779 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2781 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2789 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2790 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2792 adap->port[0]->name, adap); in cxgb_up()
2800 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2810 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2812 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2822 cancel_work_sync(&adapter->tid_release_task); in cxgb_down()
2823 cancel_work_sync(&adapter->db_full_task); in cxgb_down()
2824 cancel_work_sync(&adapter->db_drop_task); in cxgb_down()
2825 adapter->tid_release_task_busy = false; in cxgb_down()
2826 adapter->tid_release_head = NULL; in cxgb_down()
2831 adapter->flags &= ~CXGB4_FULL_INIT_DONE; in cxgb_down()
2840 struct adapter *adapter = pi->adapter; in cxgb_open()
2845 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_open()
2862 if (pi->nmirrorqsets) { in cxgb_open()
2863 mutex_lock(&pi->vi_mirror_mutex); in cxgb_open()
2871 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2881 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2888 struct adapter *adapter = pi->adapter; in cxgb_close()
2893 ret = t4_enable_pi_params(adapter, adapter->pf, pi, in cxgb_close()
2902 if (pi->nmirrorqsets) { in cxgb_close()
2903 mutex_lock(&pi->vi_mirror_mutex); in cxgb_close()
2906 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_close()
2925 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2926 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2930 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2938 if (f->valid) in cxgb4_create_server_filter()
2942 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); in cxgb4_create_server_filter()
2943 f->fs.val.lport = be16_to_cpu(sport); in cxgb4_create_server_filter()
2944 f->fs.mask.lport = ~0; in cxgb4_create_server_filter()
2948 f->fs.val.lip[i] = val[i]; in cxgb4_create_server_filter()
2949 f->fs.mask.lip[i] = ~0; in cxgb4_create_server_filter()
2951 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2952 f->fs.val.iport = port; in cxgb4_create_server_filter()
2953 f->fs.mask.iport = mask; in cxgb4_create_server_filter()
2957 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2958 f->fs.val.proto = IPPROTO_TCP; in cxgb4_create_server_filter()
2959 f->fs.mask.proto = ~0; in cxgb4_create_server_filter()
2962 f->fs.dirsteer = 1; in cxgb4_create_server_filter()
2963 f->fs.iq = queue; in cxgb4_create_server_filter()
2965 f->locked = 1; in cxgb4_create_server_filter()
2966 f->fs.rpttid = 1; in cxgb4_create_server_filter()
2971 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2991 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2992 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2994 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2996 f->locked = 0; in cxgb4_remove_server_filter()
3007 struct adapter *adapter = p->adapter; in cxgb_get_stats()
3013 spin_lock(&adapter->stats_lock); in cxgb_get_stats()
3015 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3018 t4_get_port_stats_offset(adapter, p->tx_chan, &stats, in cxgb_get_stats()
3019 &p->stats_base); in cxgb_get_stats()
3020 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3022 ns->tx_bytes = stats.tx_octets; in cxgb_get_stats()
3023 ns->tx_packets = stats.tx_frames; in cxgb_get_stats()
3024 ns->rx_bytes = stats.rx_octets; in cxgb_get_stats()
3025 ns->rx_packets = stats.rx_frames; in cxgb_get_stats()
3026 ns->multicast = stats.rx_mcast_frames; in cxgb_get_stats()
3029 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + in cxgb_get_stats()
3031 ns->rx_over_errors = 0; in cxgb_get_stats()
3032 ns->rx_crc_errors = stats.rx_fcs_err; in cxgb_get_stats()
3033 ns->rx_frame_errors = stats.rx_symbol_err; in cxgb_get_stats()
3034 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + in cxgb_get_stats()
3038 ns->rx_missed_errors = 0; in cxgb_get_stats()
3041 ns->tx_aborted_errors = 0; in cxgb_get_stats()
3042 ns->tx_carrier_errors = 0; in cxgb_get_stats()
3043 ns->tx_fifo_errors = 0; in cxgb_get_stats()
3044 ns->tx_heartbeat_errors = 0; in cxgb_get_stats()
3045 ns->tx_window_errors = 0; in cxgb_get_stats()
3047 ns->tx_errors = stats.tx_error_frames; in cxgb_get_stats()
3048 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + in cxgb_get_stats()
3049 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; in cxgb_get_stats()
3057 struct adapter *adapter = pi->adapter; in cxgb_ioctl()
3058 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; in cxgb_ioctl()
3062 if (pi->mdio_addr < 0) in cxgb_ioctl()
3063 return -EOPNOTSUPP; in cxgb_ioctl()
3064 data->phy_id = pi->mdio_addr; in cxgb_ioctl()
3068 if (mdio_phy_id_is_c45(data->phy_id)) { in cxgb_ioctl()
3069 prtad = mdio_phy_id_prtad(data->phy_id); in cxgb_ioctl()
3070 devad = mdio_phy_id_devad(data->phy_id); in cxgb_ioctl()
3071 } else if (data->phy_id < 32) { in cxgb_ioctl()
3072 prtad = data->phy_id; in cxgb_ioctl()
3074 data->reg_num &= 0x1f; in cxgb_ioctl()
3076 return -EINVAL; in cxgb_ioctl()
3078 mbox = pi->adapter->pf; in cxgb_ioctl()
3080 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3081 data->reg_num, &data->val_out); in cxgb_ioctl()
3083 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3084 data->reg_num, data->val_in); in cxgb_ioctl()
3087 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3088 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3089 -EFAULT : 0; in cxgb_ioctl()
3091 if (copy_from_user(&pi->tstamp_config, req->ifr_data, in cxgb_ioctl()
3092 sizeof(pi->tstamp_config))) in cxgb_ioctl()
3093 return -EFAULT; in cxgb_ioctl()
3095 if (!is_t4(adapter->params.chip)) { in cxgb_ioctl()
3096 switch (pi->tstamp_config.tx_type) { in cxgb_ioctl()
3101 return -ERANGE; in cxgb_ioctl()
3104 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3106 pi->rxtstamp = false; in cxgb_ioctl()
3110 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3114 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3122 pi->rxtstamp = true; in cxgb_ioctl()
3125 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3127 return -ERANGE; in cxgb_ioctl()
3130 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && in cxgb_ioctl()
3131 (pi->tstamp_config.rx_filter == in cxgb_ioctl()
3133 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) in cxgb_ioctl()
3134 pi->ptp_enable = false; in cxgb_ioctl()
3137 if (pi->tstamp_config.rx_filter != in cxgb_ioctl()
3141 pi->ptp_enable = true; in cxgb_ioctl()
3145 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3147 pi->rxtstamp = false; in cxgb_ioctl()
3150 pi->rxtstamp = true; in cxgb_ioctl()
3153 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3155 return -ERANGE; in cxgb_ioctl()
3158 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3159 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3160 -EFAULT : 0; in cxgb_ioctl()
3162 return -EOPNOTSUPP; in cxgb_ioctl()
3170 set_rxmode(dev, -1, false); in cxgb_set_rxmode()
3178 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_change_mtu()
3179 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); in cxgb_change_mtu()
3181 dev->mtu = new_mtu; in cxgb_change_mtu()
3204 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev, in cxgb4_mgmt_fill_vf_station_mac_addr()
3206 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3210 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3226 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3228 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3229 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3236 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac()
3241 dev_err(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3244 return -EINVAL; in cxgb4_mgmt_set_vf_mac()
3247 dev_info(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3251 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3259 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config()
3262 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3263 return -EINVAL; in cxgb4_mgmt_get_vf_config()
3264 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3266 ivi->vf = vf; in cxgb4_mgmt_get_vf_config()
3267 ivi->max_tx_rate = vfinfo->tx_rate; in cxgb4_mgmt_get_vf_config()
3268 ivi->min_tx_rate = 0; in cxgb4_mgmt_get_vf_config()
3269 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); in cxgb4_mgmt_get_vf_config()
3270 ivi->vlan = vfinfo->vlan; in cxgb4_mgmt_get_vf_config()
3271 ivi->linkstate = vfinfo->link_state; in cxgb4_mgmt_get_vf_config()
3281 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; in cxgb4_mgmt_get_phys_port_id()
3282 ppid->id_len = sizeof(phy_port_id); in cxgb4_mgmt_get_phys_port_id()
3283 memcpy(ppid->id, &phy_port_id, ppid->id_len); in cxgb4_mgmt_get_phys_port_id()
3291 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate()
3298 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3299 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3302 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3305 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3314 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3317 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3319 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3320 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3322 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3324 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3325 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3331 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3333 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3337 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3338 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3342 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3343 "Max tx rate %d for VF %d can't be > link-speed %u", in cxgb4_mgmt_set_vf_rate()
3345 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3350 pktsize = pktsize - sizeof(struct ethhdr) - 4; in cxgb4_mgmt_set_vf_rate()
3352 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); in cxgb4_mgmt_set_vf_rate()
3353 /* configure Traffic Class for rate-limiting */ in cxgb4_mgmt_set_vf_rate()
3359 pi->tx_chan, class_id, 0, in cxgb4_mgmt_set_vf_rate()
3362 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3364 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3366 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3374 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3377 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3379 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3380 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3382 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3383 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3384 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3392 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan()
3395 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3396 return -EINVAL; in cxgb4_mgmt_set_vf_vlan()
3399 return -EPROTONOSUPPORT; in cxgb4_mgmt_set_vf_vlan()
3401 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3403 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3407 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3408 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3416 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state()
3420 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3421 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3437 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3442 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3445 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3447 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3448 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3451 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3462 if (!is_valid_ether_addr(addr->sa_data)) in cxgb_set_mac_addr()
3463 return -EADDRNOTAVAIL; in cxgb_set_mac_addr()
3465 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in cxgb_set_mac_addr()
3466 addr->sa_data, true, &pi->smt_idx); in cxgb_set_mac_addr()
3470 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); in cxgb_set_mac_addr()
3478 struct adapter *adap = pi->adapter; in cxgb_netpoll()
3480 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3482 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3484 for (i = pi->nqsets; i; i--, rx++) in cxgb_netpoll()
3485 t4_sge_intr_msix(0, &rx->rspq); in cxgb_netpoll()
3494 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate()
3502 return -ENOTSUPP; in cxgb_set_tx_maxrate()
3504 if (index < 0 || index > pi->nqsets - 1) in cxgb_set_tx_maxrate()
3505 return -EINVAL; in cxgb_set_tx_maxrate()
3507 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3508 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3511 return -EINVAL; in cxgb_set_tx_maxrate()
3516 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { in cxgb_set_tx_maxrate()
3517 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3519 index, e->idx, e->info.u.params.level); in cxgb_set_tx_maxrate()
3520 return -EBUSY; in cxgb_set_tx_maxrate()
3528 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3531 return -ERANGE; in cxgb_set_tx_maxrate()
3541 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3543 index, pi->port_id, err); in cxgb_set_tx_maxrate()
3557 p.u.params.channel = pi->tx_chan; in cxgb_set_tx_maxrate()
3562 p.u.params.pktsize = dev->mtu; in cxgb_set_tx_maxrate()
3566 return -ENOMEM; in cxgb_set_tx_maxrate()
3571 qe.class = e->idx; in cxgb_set_tx_maxrate()
3575 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3583 switch (cls_flower->command) { in cxgb_setup_tc_flower()
3591 return -EOPNOTSUPP; in cxgb_setup_tc_flower()
3598 switch (cls_u32->command) { in cxgb_setup_tc_cls_u32()
3605 return -EOPNOTSUPP; in cxgb_setup_tc_cls_u32()
3615 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3616 return -ENOMEM; in cxgb_setup_tc_matchall()
3618 switch (cls_matchall->command) { in cxgb_setup_tc_matchall()
3631 return -EOPNOTSUPP; in cxgb_setup_tc_matchall()
3641 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3642 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3644 pi->port_id); in cxgb_setup_tc_block_ingress_cb()
3645 return -EINVAL; in cxgb_setup_tc_block_ingress_cb()
3649 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3659 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3670 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3671 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3673 pi->port_id); in cxgb_setup_tc_block_egress_cb()
3674 return -EINVAL; in cxgb_setup_tc_block_egress_cb()
3678 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3687 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3695 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3696 return -ENOMEM; in cxgb_setup_tc_mqprio()
3710 pi->tc_block_shared = f->block_shared; in cxgb_setup_tc_block()
3711 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { in cxgb_setup_tc_block()
3732 return -EOPNOTSUPP; in cxgb_setup_tc()
3741 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_unset_port()
3745 switch (ti->type) { in cxgb_udp_tunnel_unset_port()
3747 adapter->vxlan_port = 0; in cxgb_udp_tunnel_unset_port()
3751 adapter->geneve_port = 0; in cxgb_udp_tunnel_unset_port()
3755 return -EINVAL; in cxgb_udp_tunnel_unset_port()
3761 if (!adapter->rawf_cnt) in cxgb_udp_tunnel_unset_port()
3765 ret = t4_free_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_unset_port()
3767 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_unset_port()
3768 1, pi->port_id, false); in cxgb_udp_tunnel_unset_port()
3784 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_set_port()
3788 switch (ti->type) { in cxgb_udp_tunnel_set_port()
3790 adapter->vxlan_port = ti->port; in cxgb_udp_tunnel_set_port()
3792 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); in cxgb_udp_tunnel_set_port()
3795 adapter->geneve_port = ti->port; in cxgb_udp_tunnel_set_port()
3797 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); in cxgb_udp_tunnel_set_port()
3800 return -EINVAL; in cxgb_udp_tunnel_set_port()
3812 ret = t4_alloc_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_set_port()
3815 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_set_port()
3816 1, pi->port_id, false); in cxgb_udp_tunnel_set_port()
3819 be16_to_cpu(ti->port)); in cxgb_udp_tunnel_set_port()
3841 struct adapter *adapter = pi->adapter; in cxgb_features_check()
3843 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in cxgb_features_check()
3847 if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) in cxgb_features_check()
3908 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); in cxgb4_mgmt_get_drvinfo()
3909 strlcpy(info->bus_info, pci_name(adapter->pdev), in cxgb4_mgmt_get_drvinfo()
3910 sizeof(info->bus_info)); in cxgb4_mgmt_get_drvinfo()
3929 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3937 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3948 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3949 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3961 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3967 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3968 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3974 adap->vres.ocq.start); in setup_memwin_rdma()
4007 if (!adapter->hma.sgt) in adap_free_hma_mem()
4010 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { in adap_free_hma_mem()
4011 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, in adap_free_hma_mem()
4012 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); in adap_free_hma_mem()
4013 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; in adap_free_hma_mem()
4016 for_each_sg(adapter->hma.sgt->sgl, iter, in adap_free_hma_mem()
4017 adapter->hma.sgt->orig_nents, i) { in adap_free_hma_mem()
4023 kfree(adapter->hma.phy_addr); in adap_free_hma_mem()
4024 sg_free_table(adapter->hma.sgt); in adap_free_hma_mem()
4025 kfree(adapter->hma.sgt); in adap_free_hma_mem()
4026 adapter->hma.sgt = NULL; in adap_free_hma_mem()
4045 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in adap_config_hma()
4051 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hma()
4061 dev_err(adapter->pdev_dev, in adap_config_hma()
4062 "HMA size %uMB beyond bounds(%u-%lu)MB\n", in adap_config_hma()
4064 return -EINVAL; in adap_config_hma()
4069 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL); in adap_config_hma()
4070 if (unlikely(!adapter->hma.sgt)) { in adap_config_hma()
4071 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n"); in adap_config_hma()
4072 return -ENOMEM; in adap_config_hma()
4074 sgt = adapter->hma.sgt; in adap_config_hma()
4077 sgt->orig_nents = (hma_size << 20) / (page_size << page_order); in adap_config_hma()
4078 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { in adap_config_hma()
4079 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n"); in adap_config_hma()
4080 kfree(adapter->hma.sgt); in adap_config_hma()
4081 adapter->hma.sgt = NULL; in adap_config_hma()
4082 return -ENOMEM; in adap_config_hma()
4085 sgl = adapter->hma.sgt->sgl; in adap_config_hma()
4086 node = dev_to_node(adapter->pdev_dev); in adap_config_hma()
4087 for_each_sg(sgl, iter, sgt->orig_nents, i) { in adap_config_hma()
4091 dev_err(adapter->pdev_dev, in adap_config_hma()
4093 ret = -ENOMEM; in adap_config_hma()
4099 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, in adap_config_hma()
4101 if (!sgt->nents) { in adap_config_hma()
4102 dev_err(adapter->pdev_dev, in adap_config_hma()
4104 ret = -ENOMEM; in adap_config_hma()
4107 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; in adap_config_hma()
4109 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t), in adap_config_hma()
4111 if (unlikely(!adapter->hma.phy_addr)) in adap_config_hma()
4114 for_each_sg(sgl, iter, sgt->nents, i) { in adap_config_hma()
4116 adapter->hma.phy_addr[i] = sg_dma_address(iter); in adap_config_hma()
4119 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); in adap_config_hma()
4128 eoc = (i == ncmds - 1) ? 1 : 0; in adap_config_hma()
4133 if (i == ncmds - 1) { in adap_config_hma()
4134 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; in adap_config_hma()
4159 cpu_to_be64(adapter->hma.phy_addr[j + k]); in adap_config_hma()
4161 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd, in adap_config_hma()
4164 dev_err(adapter->pdev_dev, in adap_config_hma()
4171 dev_info(adapter->pdev_dev, in adap_config_hma()
4190 dev_err(adap->pdev_dev, in adap_init1()
4197 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4199 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); in adap_init1()
4200 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4204 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4206 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4210 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4217 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4227 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4233 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4235 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4261 return t4_early_init(adap, adap->pf); in adap_init1()
4279 * them) but need to be explicitly set if we're using hard-coded
4283 * Configuration Files and hard-coded initialization ...
4288 * Fix up various Host-Dependent Parameters like Page Size, Cache in adap_init0_tweaks()
4298 dev_err(&adapter->pdev->dev, in adap_init0_tweaks()
4317 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4380 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4393 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4395 dev_warn(adap->pdev_dev, in adap_init0_phy()
4397 return -EOPNOTSUPP; in adap_init0_phy()
4405 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, in adap_init0_phy()
4406 adap->pdev_dev); in adap_init0_phy()
4414 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4416 phy_info->phy_fw_file, -ret); in adap_init0_phy()
4417 if (phy_info->phy_flash) { in adap_init0_phy()
4421 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4431 spin_lock_bh(&adap->win0_lock); in adap_init0_phy()
4432 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4433 (u8 *)phyf->data, phyf->size); in adap_init0_phy()
4434 spin_unlock_bh(&adap->win0_lock); in adap_init0_phy()
4436 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4437 -ret); in adap_init0_phy()
4441 if (phy_info->phy_fw_version) in adap_init0_phy()
4442 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, in adap_init0_phy()
4443 phyf->size); in adap_init0_phy()
4444 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4446 phy_info->phy_fw_file, new_phy_fw_ver); in adap_init0_phy()
4472 ret = t4_fw_reset(adapter, adapter->mbox, in adap_init0_config()
4478 /* If this is a 10Gb/s-BT adapter make sure the chip-external in adap_init0_config()
4479 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs in adap_init0_config()
4483 if (is_10gbt_device(adapter->pdev->device)) { in adap_init0_config()
4493 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { in adap_init0_config()
4504 dev_err(adapter->pdev_dev, "Device %d is not supported\n", in adap_init0_config()
4505 adapter->pdev->device); in adap_init0_config()
4506 ret = -EINVAL; in adap_init0_config()
4510 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); in adap_init0_config()
4522 if (cf->size >= FLASH_CFG_MAX_SIZE) in adap_init0_config()
4523 ret = -ENOMEM; in adap_init0_config()
4527 ret = t4_query_params(adapter, adapter->mbox, in adap_init0_config()
4528 adapter->pf, 0, 1, params, val); in adap_init0_config()
4540 size_t resid = cf->size & 0x3; in adap_init0_config()
4541 size_t size = cf->size & ~0x3; in adap_init0_config()
4542 __be32 *data = (__be32 *)cf->data; in adap_init0_config()
4547 spin_lock(&adapter->win0_lock); in adap_init0_config()
4565 spin_unlock(&adapter->win0_lock); in adap_init0_config()
4581 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_init0_config()
4588 dev_warn(adapter->pdev_dev, in adap_init0_config()
4608 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4617 if (ret == -ENOENT) { in adap_init0_config()
4624 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, in adap_init0_config()
4637 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ in adap_init0_config()
4649 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4665 dev_err(adapter->pdev_dev, in adap_init0_config()
4668 if (is_t6(adapter->params.chip)) { in adap_init0_config()
4672 dev_info(adapter->pdev_dev, "Successfully enabled " in adap_init0_config()
4680 ret = t4_fw_initialize(adapter, adapter->mbox); in adap_init0_config()
4687 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ in adap_init0_config()
4698 if (config_issued && ret != -ENOENT) in adap_init0_config()
4699 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", in adap_init0_config()
4700 config_name, -ret); in adap_init0_config()
4782 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4785 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4789 if (ret == adap->mbox) in adap_init0()
4790 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4805 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4815 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4817 dev_err(adap->pdev_dev, in adap_init0()
4819 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4820 return -EINVAL; in adap_init0()
4828 ret = -ENOMEM; in adap_init0()
4833 ret = request_firmware(&fw, fw_info->fw_mod_name, in adap_init0()
4834 adap->pdev_dev); in adap_init0()
4836 dev_err(adap->pdev_dev, in adap_init0()
4838 fw_info->fw_mod_name, ret); in adap_init0()
4840 fw_data = fw->data; in adap_init0()
4841 fw_size = fw->size; in adap_init0()
4862 dev_err(adap->pdev_dev, in adap_init0()
4865 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4867 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4869 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4877 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4884 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4894 if (ret == -ENOENT) { in adap_init0()
4895 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4900 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4901 "adapter, error %d\n", -ret); in adap_init0()
4912 dev_err(adap->pdev_dev, in adap_init0()
4928 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4940 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4944 adap->params.nports = hweight32(port_vec); in adap_init0()
4945 adap->params.portvec = port_vec; in adap_init0()
4960 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4964 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4966 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4967 adap->sge.dbqtimer_val); in adap_init0()
4971 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4973 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4974 adap->params.bypass = 1; in adap_init0()
4985 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4988 adap->sge.egr_start = val[0]; in adap_init0()
4989 adap->l2t_start = val[1]; in adap_init0()
4990 adap->l2t_end = val[2]; in adap_init0()
4991 adap->tids.ftid_base = val[3]; in adap_init0()
4992 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4993 adap->sge.ingr_start = val[5]; in adap_init0()
4995 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4998 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5003 adap->tids.hpftid_base = val[0]; in adap_init0()
5004 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
5011 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5014 adap->rawf_start = val[0]; in adap_init0()
5015 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5018 adap->tids.tid_base = in adap_init0()
5030 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5033 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5034 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5036 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5037 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5038 if (!adap->sge.egr_map) { in adap_init0()
5039 ret = -ENOMEM; in adap_init0()
5043 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5044 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5045 if (!adap->sge.ingr_map) { in adap_init0()
5046 ret = -ENOMEM; in adap_init0()
5053 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
5055 if (!adap->sge.starving_fl) { in adap_init0()
5056 ret = -ENOMEM; in adap_init0()
5060 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
5062 if (!adap->sge.txq_maperr) { in adap_init0()
5063 ret = -ENOMEM; in adap_init0()
5068 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
5070 if (!adap->sge.blocked_fl) { in adap_init0()
5071 ret = -ENOMEM; in adap_init0()
5078 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5081 adap->clipt_start = val[0]; in adap_init0()
5082 adap->clipt_end = val[1]; in adap_init0()
5086 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5092 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5094 adap->params.nsched_cls = val[0]; in adap_init0()
5100 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5105 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5106 adap->tids.aftid_base = val[0]; in adap_init0()
5107 adap->tids.aftid_end = val[1]; in adap_init0()
5117 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5125 if (is_t4(adap->params.chip)) { in adap_init0()
5126 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5129 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5131 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5136 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5138 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5141 if (is_t4(adap->params.chip)) { in adap_init0()
5142 adap->params.filter2_wr_support = 0; in adap_init0()
5145 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5147 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5155 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5157 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5167 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5177 adap->params.offload = 1; in adap_init0()
5182 /* query offload-related parameters */ in adap_init0()
5189 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5193 adap->tids.ntids = val[0]; in adap_init0()
5194 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5195 adap->tids.stid_base = val[1]; in adap_init0()
5196 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5206 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5207 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5208 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5209 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5210 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5211 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5212 adap->tids.ftid_base; in adap_init0()
5214 adap->vres.ddp.start = val[3]; in adap_init0()
5215 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5216 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5221 adap->num_ofld_uld += 1; in adap_init0()
5227 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5230 adap->tids.eotid_base = val[0]; in adap_init0()
5231 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5232 val[1] - val[0] + 1); in adap_init0()
5233 adap->params.ethofld = 1; in adap_init0()
5244 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5248 adap->vres.stag.start = val[0]; in adap_init0()
5249 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5250 adap->vres.rq.start = val[2]; in adap_init0()
5251 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5252 adap->vres.pbl.start = val[4]; in adap_init0()
5253 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5257 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5260 adap->vres.srq.start = val[0]; in adap_init0()
5261 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5263 if (adap->vres.srq.size) { in adap_init0()
5264 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5265 if (!adap->srq) in adap_init0()
5266 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5275 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5279 adap->vres.qp.start = val[0]; in adap_init0()
5280 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5281 adap->vres.cq.start = val[2]; in adap_init0()
5282 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5283 adap->vres.ocq.start = val[4]; in adap_init0()
5284 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5288 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5291 adap->params.max_ordird_qp = 8; in adap_init0()
5292 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5295 adap->params.max_ordird_qp = val[0]; in adap_init0()
5296 adap->params.max_ird_adapter = val[1]; in adap_init0()
5298 dev_info(adap->pdev_dev, in adap_init0()
5300 adap->params.max_ordird_qp, in adap_init0()
5301 adap->params.max_ird_adapter); in adap_init0()
5305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5307 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5311 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5313 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5314 adap->num_ofld_uld += 2; in adap_init0()
5319 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5323 adap->vres.iscsi.start = val[0]; in adap_init0()
5324 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5325 if (is_t6(adap->params.chip)) { in adap_init0()
5328 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5331 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5332 adap->vres.ppod_edram.size = in adap_init0()
5333 val[1] - val[0] + 1; in adap_init0()
5335 dev_info(adap->pdev_dev, in adap_init0()
5338 adap->vres.ppod_edram.size); in adap_init0()
5342 adap->num_ofld_uld += 2; in adap_init0()
5348 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5351 if (ret != -EINVAL) in adap_init0()
5354 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5356 adap->num_ofld_uld += 1; in adap_init0()
5362 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5366 adap->vres.key.start = val[0]; in adap_init0()
5367 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5368 adap->num_uld += 1; in adap_init0()
5370 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5378 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5384 * a multiple of 8 +/- 4 bytes apart near this popular MTU. in adap_init0()
5389 * options are in use, then we have a 20-byte IP header and a in adap_init0()
5390 * 20-byte TCP header. In this case, a 1500-byte MSS would in adap_init0()
5391 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes in adap_init0()
5394 * is a multiple of 8. On the other hand, if 12-byte TCP Time in adap_init0()
5400 if (adap->params.mtus[i] == 1492) { in adap_init0()
5401 adap->params.mtus[i] = 1488; in adap_init0()
5405 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5406 adap->params.b_wnd); in adap_init0()
5409 adap->flags |= CXGB4_FW_OK; in adap_init0()
5420 kfree(adap->sge.egr_map); in adap_init0()
5421 kfree(adap->sge.ingr_map); in adap_init0()
5422 kfree(adap->sge.starving_fl); in adap_init0()
5423 kfree(adap->sge.txq_maperr); in adap_init0()
5425 kfree(adap->sge.blocked_fl); in adap_init0()
5427 if (ret != -ETIMEDOUT && ret != -EIO) in adap_init0()
5428 t4_fw_bye(adap, adap->mbox); in adap_init0()
5444 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5446 spin_lock(&adap->stats_lock); in eeh_err_detected()
5448 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5454 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5456 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5459 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5461 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5479 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5481 dev_err(&pdev->dev, "Cannot reenable PCI " in eeh_slot_reset()
5485 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5492 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5494 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5496 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5504 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5508 pi->viid = ret; in eeh_slot_reset()
5509 pi->xact_addr_filt = -1; in eeh_slot_reset()
5513 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5514 pi->vivld = vivld; in eeh_slot_reset()
5515 pi->vin = vin; in eeh_slot_reset()
5518 pi->vivld = FW_VIID_VIVLD_G(pi->viid); in eeh_slot_reset()
5519 pi->vin = FW_VIID_VIN_G(pi->viid); in eeh_slot_reset()
5523 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5524 adap->params.b_wnd); in eeh_slot_reset()
5541 struct net_device *dev = adap->port[i]; in eeh_resume()
5558 if (adapter->pf != 4) in eeh_reset_prepare()
5561 adapter->flags &= ~CXGB4_FW_OK; in eeh_reset_prepare()
5566 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_prepare()
5567 cxgb_close(adapter->port[i]); in eeh_reset_prepare()
5574 if (adapter->flags & CXGB4_FULL_INIT_DONE) in eeh_reset_prepare()
5583 if (adapter->pf != 4) in eeh_reset_done()
5586 err = t4_wait_dev_ready(adapter->regs); in eeh_reset_done()
5588 dev_err(adapter->pdev_dev, in eeh_reset_done()
5597 dev_err(adapter->pdev_dev, in eeh_reset_done()
5604 if (adapter->flags & CXGB4_FW_OK) { in eeh_reset_done()
5605 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); in eeh_reset_done()
5607 dev_err(adapter->pdev_dev, in eeh_reset_done()
5615 dev_err(adapter->pdev_dev, in eeh_reset_done()
5624 dev_err(adapter->pdev_dev, in eeh_reset_done()
5630 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_done()
5631 cxgb_open(adapter->port[i]); in eeh_reset_done()
5649 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); in is_x_10g_port()
5665 struct sge *s = &adap->sge; in cfg_queues()
5671 adap->params.offload = 0; in cfg_queues()
5672 adap->params.crypto = 0; in cfg_queues()
5673 adap->params.ethofld = 0; in cfg_queues()
5688 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5689 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5690 niqflint--; in cfg_queues()
5691 neq = adap->params.pfres.neq / 2; in cfg_queues()
5694 if (avail_qsets < adap->params.nports) { in cfg_queues()
5695 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5696 avail_qsets, adap->params.nports); in cfg_queues()
5697 return -ENOMEM; in cfg_queues()
5702 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5706 /* We default to 1 queue per non-10G port and up to # of cores queues in cfg_queues()
5710 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5715 * own TX Queue in order to prevent Head-Of-Line Blocking. in cfg_queues()
5718 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5719 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5720 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5721 return -ENOMEM; in cfg_queues()
5724 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5730 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5731 q10g--; in cfg_queues()
5745 pi->first_qset = qidx; in cfg_queues()
5746 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; in cfg_queues()
5747 qidx += pi->nqsets; in cfg_queues()
5750 s->ethqsets = qidx; in cfg_queues()
5751 s->max_ethqsets = qidx; /* MSI-X may lower it later */ in cfg_queues()
5752 avail_qsets -= qidx; in cfg_queues()
5759 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5761 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5762 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5763 adap->params.offload = 0; in cfg_queues()
5764 adap->params.crypto = 0; in cfg_queues()
5765 s->ofldqsets = 0; in cfg_queues()
5767 s->ofldqsets = adap->params.nports; in cfg_queues()
5769 s->ofldqsets = avail_uld_qsets; in cfg_queues()
5772 avail_qsets -= num_ulds * s->ofldqsets; in cfg_queues()
5779 if (avail_qsets < s->max_ethqsets) { in cfg_queues()
5780 adap->params.ethofld = 0; in cfg_queues()
5781 s->eoqsets = 0; in cfg_queues()
5783 s->eoqsets = s->max_ethqsets; in cfg_queues()
5785 avail_qsets -= s->eoqsets; in cfg_queues()
5793 if (avail_qsets >= s->max_ethqsets) in cfg_queues()
5794 s->mirrorqsets = s->max_ethqsets; in cfg_queues()
5795 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5796 s->mirrorqsets = adap->params.nports; in cfg_queues()
5798 s->mirrorqsets = 0; in cfg_queues()
5799 avail_qsets -= s->mirrorqsets; in cfg_queues()
5801 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { in cfg_queues()
5802 struct sge_eth_rxq *r = &s->ethrxq[i]; in cfg_queues()
5804 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5805 r->fl.size = 72; in cfg_queues()
5808 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) in cfg_queues()
5809 s->ethtxq[i].q.size = 1024; in cfg_queues()
5811 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) in cfg_queues()
5812 s->ctrlq[i].q.size = 512; in cfg_queues()
5814 if (!is_t4(adap->params.chip)) in cfg_queues()
5815 s->ptptxq.q.size = 8; in cfg_queues()
5817 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5818 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5832 while (n < adap->sge.ethqsets) in reduce_ethqs()
5835 if (pi->nqsets > 1) { in reduce_ethqs()
5836 pi->nqsets--; in reduce_ethqs()
5837 adap->sge.ethqsets--; in reduce_ethqs()
5838 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5846 pi->first_qset = n; in reduce_ethqs()
5847 n += pi->nqsets; in reduce_ethqs()
5857 return -ENOMEM; in alloc_msix_info()
5859 adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec), in alloc_msix_info()
5861 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5863 return -ENOMEM; in alloc_msix_info()
5866 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5867 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5869 adap->msix_info = msix_info; in alloc_msix_info()
5875 kfree(adap->msix_bmap.msix_bmap); in free_msix_info()
5876 kfree(adap->msix_info); in free_msix_info()
5881 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5885 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5886 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); in cxgb4_get_msix_idx_from_bmap()
5887 if (msix_idx < bmap->mapsize) { in cxgb4_get_msix_idx_from_bmap()
5888 __set_bit(msix_idx, bmap->msix_bmap); in cxgb4_get_msix_idx_from_bmap()
5890 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5891 return -ENOSPC; in cxgb4_get_msix_idx_from_bmap()
5894 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5901 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5904 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5905 __clear_bit(msix_idx, bmap->msix_bmap); in cxgb4_free_msix_idx_in_bmap()
5906 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5909 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5916 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5918 struct sge *s = &adap->sge; in enable_msix()
5923 want = s->max_ethqsets; in enable_msix()
5934 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5935 want += num_uld * s->ofldqsets; in enable_msix()
5941 want += s->eoqsets; in enable_msix()
5946 if (s->mirrorqsets) { in enable_msix()
5947 want += s->mirrorqsets; in enable_msix()
5957 return -ENOMEM; in enable_msix()
5962 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5967 want = s->max_ethqsets + EXTRA_VECS; in enable_msix()
5969 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5972 dev_info(adap->pdev_dev, in enable_msix()
5973 "Disabling MSI-X due to insufficient MSI-X vectors\n"); in enable_msix()
5978 dev_info(adap->pdev_dev, in enable_msix()
5979 "Disabling offload due to insufficient MSI-X vectors\n"); in enable_msix()
5980 adap->params.offload = 0; in enable_msix()
5981 adap->params.crypto = 0; in enable_msix()
5982 adap->params.ethofld = 0; in enable_msix()
5983 s->ofldqsets = 0; in enable_msix()
5984 s->eoqsets = 0; in enable_msix()
5985 s->mirrorqsets = 0; in enable_msix()
6002 if (s->mirrorqsets) in enable_msix()
6005 num_vec -= need; in enable_msix()
6008 ethqsets > s->max_ethqsets) in enable_msix()
6013 if (pi->nqsets < 2) in enable_msix()
6017 num_vec--; in enable_msix()
6020 num_vec--; in enable_msix()
6028 ofldqsets > s->ofldqsets) in enable_msix()
6032 num_vec -= uld_need; in enable_msix()
6036 if (s->mirrorqsets) { in enable_msix()
6039 mirrorqsets > s->mirrorqsets) in enable_msix()
6043 num_vec -= mirror_need; in enable_msix()
6047 ethqsets = s->max_ethqsets; in enable_msix()
6049 ofldqsets = s->ofldqsets; in enable_msix()
6051 eoqsets = s->eoqsets; in enable_msix()
6052 if (s->mirrorqsets) in enable_msix()
6053 mirrorqsets = s->mirrorqsets; in enable_msix()
6056 if (ethqsets < s->max_ethqsets) { in enable_msix()
6057 s->max_ethqsets = ethqsets; in enable_msix()
6062 s->ofldqsets = ofldqsets; in enable_msix()
6063 s->nqs_per_uld = s->ofldqsets; in enable_msix()
6067 s->eoqsets = eoqsets; in enable_msix()
6069 if (s->mirrorqsets) { in enable_msix()
6070 s->mirrorqsets = mirrorqsets; in enable_msix()
6073 pi->nmirrorqsets = s->mirrorqsets / nchan; in enable_msix()
6074 mutex_init(&pi->vi_mirror_mutex); in enable_msix()
6084 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6085 adap->msix_info[i].idx = i; in enable_msix()
6088 dev_info(adap->pdev_dev, in enable_msix()
6089 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", in enable_msix()
6090 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, in enable_msix()
6091 s->mirrorqsets); in enable_msix()
6097 pci_disable_msix(adap->pdev); in enable_msix()
6111 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6118 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in init_rss()
6119 if (!pi->rss) in init_rss()
6120 return -ENOMEM; in init_rss()
6132 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", in print_adapter_info()
6134 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : in print_adapter_info()
6135 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), in print_adapter_info()
6136 is_offload(adapter) ? "Offload" : "non-Offload"); in print_adapter_info()
6144 const struct adapter *adap = pi->adapter; in print_port_info()
6146 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) in print_port_info()
6148 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) in print_port_info()
6150 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) in print_port_info()
6152 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) in print_port_info()
6154 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) in print_port_info()
6156 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) in print_port_info()
6158 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) in print_port_info()
6160 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) in print_port_info()
6162 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) in print_port_info()
6165 --bufp; in print_port_info()
6166 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); in print_port_info()
6169 dev->name, adap->params.vpd.id, adap->name, buf); in print_port_info()
6174 * - memory used for tables
6175 * - MSI/MSI-X
6176 * - net devices
6177 * - resources FW is holding for us
6183 kvfree(adapter->smt); in free_some_resources()
6184 kvfree(adapter->l2t); in free_some_resources()
6185 kvfree(adapter->srq); in free_some_resources()
6187 kvfree(adapter->tids.tid_tab); in free_some_resources()
6193 kfree(adapter->sge.egr_map); in free_some_resources()
6194 kfree(adapter->sge.ingr_map); in free_some_resources()
6195 kfree(adapter->sge.starving_fl); in free_some_resources()
6196 kfree(adapter->sge.txq_maperr); in free_some_resources()
6198 kfree(adapter->sge.blocked_fl); in free_some_resources()
6203 if (adapter->port[i]) { in free_some_resources()
6206 if (pi->viid != 0) in free_some_resources()
6207 t4_free_vi(adapter, adapter->mbox, adapter->pf, in free_some_resources()
6208 0, pi->viid); in free_some_resources()
6209 kfree(adap2pinfo(adapter, i)->rss); in free_some_resources()
6210 free_netdev(adapter->port[i]); in free_some_resources()
6212 if (adapter->flags & CXGB4_FW_OK) in free_some_resources()
6213 t4_fw_bye(adapter, adapter->pf); in free_some_resources()
6236 return -EINVAL; in t4_get_chip_type()
6242 dev->type = ARPHRD_NONE; in cxgb4_mgmt_setup()
6243 dev->mtu = 0; in cxgb4_mgmt_setup()
6244 dev->hard_header_len = 0; in cxgb4_mgmt_setup()
6245 dev->addr_len = 0; in cxgb4_mgmt_setup()
6246 dev->tx_queue_len = 0; in cxgb4_mgmt_setup()
6247 dev->flags |= IFF_NOARP; in cxgb4_mgmt_setup()
6248 dev->priv_flags |= IFF_NO_QUEUE; in cxgb4_mgmt_setup()
6251 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; in cxgb4_mgmt_setup()
6252 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; in cxgb4_mgmt_setup()
6262 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6265 dev_warn(&pdev->dev, "Device not initialized\n"); in cxgb4_iov_configure()
6266 return -EOPNOTSUPP; in cxgb4_iov_configure()
6273 dev_err(&pdev->dev, in cxgb4_iov_configure()
6274 "Cannot modify SR-IOV while VFs are assigned\n"); in cxgb4_iov_configure()
6277 /* Note that the upper-level code ensures that we're never called with in cxgb4_iov_configure()
6278 * a non-zero "num_vfs" when we already have VFs instantiated. But in cxgb4_iov_configure()
6282 return -EBUSY; in cxgb4_iov_configure()
6292 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6293 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6294 adap->port[0] = NULL; in cxgb4_iov_configure()
6297 adap->num_vfs = 0; in cxgb4_iov_configure()
6298 kfree(adap->vfinfo); in cxgb4_iov_configure()
6299 adap->vfinfo = NULL; in cxgb4_iov_configure()
6314 * parent bridge's PCI-E needs to support Alternative Routing in cxgb4_iov_configure()
6318 pbridge = pdev->bus->self; in cxgb4_iov_configure()
6328 …dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Fu… in cxgb4_iov_configure()
6329 pbridge->bus->number, PCI_SLOT(pbridge->devfn), in cxgb4_iov_configure()
6330 PCI_FUNC(pbridge->devfn)); in cxgb4_iov_configure()
6331 return -ENOTSUPP; in cxgb4_iov_configure()
6337 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6340 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6345 port = ffs(pmask) - 1; in cxgb4_iov_configure()
6347 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6348 adap->pf); in cxgb4_iov_configure()
6352 return -ENOMEM; in cxgb4_iov_configure()
6355 pi->adapter = adap; in cxgb4_iov_configure()
6356 pi->lport = port; in cxgb4_iov_configure()
6357 pi->tx_chan = port; in cxgb4_iov_configure()
6358 SET_NETDEV_DEV(netdev, &pdev->dev); in cxgb4_iov_configure()
6360 adap->port[0] = netdev; in cxgb4_iov_configure()
6361 pi->port_id = 0; in cxgb4_iov_configure()
6363 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6366 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6367 adap->port[0] = NULL; in cxgb4_iov_configure()
6371 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6373 if (!adap->vfinfo) { in cxgb4_iov_configure()
6374 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6375 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6376 adap->port[0] = NULL; in cxgb4_iov_configure()
6377 return -ENOMEM; in cxgb4_iov_configure()
6386 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6387 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6388 adap->port[0] = NULL; in cxgb4_iov_configure()
6389 kfree(adap->vfinfo); in cxgb4_iov_configure()
6390 adap->vfinfo = NULL; in cxgb4_iov_configure()
6395 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6408 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6409 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6410 return -EOPNOTSUPP; in chcr_offload_state()
6412 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6413 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6415 return -EOPNOTSUPP; in chcr_offload_state()
6421 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6422 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6423 return -EOPNOTSUPP; in chcr_offload_state()
6425 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6426 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6428 return -EOPNOTSUPP; in chcr_offload_state()
6433 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6435 return -EOPNOTSUPP; in chcr_offload_state()
6462 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6485 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6503 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state()
6507 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_add_state()
6509 return -EBUSY; in cxgb4_xfrm_add_state()
6515 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x); in cxgb4_xfrm_add_state()
6525 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state()
6528 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6535 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6543 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state()
6546 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6553 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6561 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok()
6565 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6572 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6581 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state()
6584 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6591 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()
6625 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); in init_one()
6631 dev_err(&pdev->dev, "cannot enable PCI device\n"); in init_one()
6637 dev_err(&pdev->dev, "cannot map device registers\n"); in init_one()
6638 err = -ENOMEM; in init_one()
6644 err = -ENOMEM; in init_one()
6648 adapter->regs = regs; in init_one()
6658 dev_err(&pdev->dev, "Device %d is not supported\n", device_id); in init_one()
6666 adapter->pdev = pdev; in init_one()
6667 adapter->pdev_dev = &pdev->dev; in init_one()
6668 adapter->name = pci_name(pdev); in init_one()
6669 adapter->mbox = func; in init_one()
6670 adapter->pf = func; in init_one()
6671 adapter->params.chip = chip; in init_one()
6672 adapter->adap_idx = adap_idx; in init_one()
6673 adapter->msg_enable = DFLT_MSG_ENABLE; in init_one()
6674 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + in init_one()
6678 if (!adapter->mbox_log) { in init_one()
6679 err = -ENOMEM; in init_one()
6682 spin_lock_init(&adapter->mbox_lock); in init_one()
6683 INIT_LIST_HEAD(&adapter->mlist.list); in init_one()
6684 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; in init_one()
6687 if (func != ent->driver_data) { in init_one()
6689 pci_save_state(pdev); /* to restore SR-IOV later */ in init_one()
6697 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " in init_one()
6704 dev_err(&pdev->dev, "no usable DMA configuration\n"); in init_one()
6713 adapter->workq = create_singlethread_workqueue("cxgb4"); in init_one()
6714 if (!adapter->workq) { in init_one()
6715 err = -ENOMEM; in init_one()
6720 adapter->flags |= CXGB4_DEV_ENABLED; in init_one()
6721 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); in init_one()
6738 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; in init_one()
6740 spin_lock_init(&adapter->stats_lock); in init_one()
6741 spin_lock_init(&adapter->tid_release_lock); in init_one()
6742 spin_lock_init(&adapter->win0_lock); in init_one()
6744 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); in init_one()
6745 INIT_WORK(&adapter->db_full_task, process_db_full); in init_one()
6746 INIT_WORK(&adapter->db_drop_task, process_db_drop); in init_one()
6747 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); in init_one()
6757 dev_warn(adapter->pdev_dev, in init_one()
6764 if (!is_t4(adapter->params.chip)) { in init_one()
6766 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * in init_one()
6767 adapter->pf); in init_one()
6778 dev_err(&pdev->dev, in init_one()
6780 err = -EINVAL; in init_one()
6783 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), in init_one()
6785 if (!adapter->bar2) { in init_one()
6786 dev_err(&pdev->dev, "cannot map device bar2 region\n"); in init_one()
6787 err = -ENOMEM; in init_one()
6795 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); in init_one()
6802 if (!is_t4(adapter->params.chip)) in init_one()
6804 (is_t5(adapter->params.chip) ? STATMODE_V(0) : in init_one()
6808 INIT_LIST_HEAD(&adapter->mac_hlist); in init_one()
6820 err = -ENOMEM; in init_one()
6824 SET_NETDEV_DEV(netdev, &pdev->dev); in init_one()
6826 adapter->port[i] = netdev; in init_one()
6828 pi->adapter = adapter; in init_one()
6829 pi->xact_addr_filt = -1; in init_one()
6830 pi->port_id = i; in init_one()
6831 netdev->irq = pdev->irq; in init_one()
6833 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | in init_one()
6840 netdev->hw_enc_features |= NETIF_F_IP_CSUM | in init_one()
6847 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in init_one()
6851 if (adapter->rawf_cnt) in init_one()
6852 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; in init_one()
6856 netdev->hw_features |= NETIF_F_HIGHDMA; in init_one()
6857 netdev->features |= netdev->hw_features; in init_one()
6858 netdev->vlan_features = netdev->features & VLAN_FEAT; in init_one()
6860 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { in init_one()
6861 netdev->hw_features |= NETIF_F_HW_TLS_TX; in init_one()
6862 netdev->tlsdev_ops = &cxgb4_ktls_ops; in init_one()
6864 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); in init_one()
6868 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { in init_one()
6869 netdev->hw_enc_features |= NETIF_F_HW_ESP; in init_one()
6870 netdev->features |= NETIF_F_HW_ESP; in init_one()
6871 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; in init_one()
6875 netdev->priv_flags |= IFF_UNICAST_FLT; in init_one()
6877 /* MTU range: 81 - 9600 */ in init_one()
6878 netdev->min_mtu = 81; /* accommodate SACK */ in init_one()
6879 netdev->max_mtu = MAX_MTU; in init_one()
6881 netdev->netdev_ops = &cxgb4_netdev_ops; in init_one()
6883 netdev->dcbnl_ops = &cxgb4_dcb_ops; in init_one()
6894 if (adapter->flags & CXGB4_FW_OK) { in init_one()
6898 } else if (adapter->params.nports == 1) { in init_one()
6899 /* If we don't have a connection to the firmware -- possibly in init_one()
6900 * because of an error -- grab the raw VPD parameters so we in init_one()
6905 u8 *na = adapter->params.vpd.na; in init_one()
6907 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); in init_one()
6916 if (!(adapter->flags & CXGB4_FW_OK)) in init_one()
6926 adapter->smt = t4_init_smt(); in init_one()
6927 if (!adapter->smt) { in init_one()
6929 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); in init_one()
6932 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); in init_one()
6933 if (!adapter->l2t) { in init_one()
6935 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); in init_one()
6936 adapter->params.offload = 0; in init_one()
6945 dev_warn(&pdev->dev, in init_one()
6947 adapter->params.offload = 0; in init_one()
6949 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, in init_one()
6950 adapter->clipt_end); in init_one()
6951 if (!adapter->clipt) { in init_one()
6955 dev_warn(&pdev->dev, in init_one()
6957 adapter->params.offload = 0; in init_one()
6964 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); in init_one()
6965 if (!pi->sched_tbl) in init_one()
6966 dev_warn(&pdev->dev, in init_one()
6977 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); in init_one()
6979 adapter->tids.hash_base = v / 4; in init_one()
6981 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; in init_one()
6984 adapter->tids.hash_base = v; in init_one()
6989 if (tid_init(&adapter->tids) < 0) { in init_one()
6990 dev_warn(&pdev->dev, "could not allocate TID table, " in init_one()
6992 adapter->params.offload = 0; in init_one()
6994 adapter->tc_u32 = cxgb4_init_tc_u32(adapter); in init_one()
6995 if (!adapter->tc_u32) in init_one()
6996 dev_warn(&pdev->dev, in init_one()
7000 dev_warn(&pdev->dev, in init_one()
7004 dev_warn(&pdev->dev, in init_one()
7008 dev_warn(&pdev->dev, in init_one()
7011 dev_warn(&pdev->dev, in init_one()
7017 adapter->flags |= CXGB4_USING_MSIX; in init_one()
7019 adapter->flags |= CXGB4_USING_MSI; in init_one()
7035 dev_err(adapter->pdev_dev, in init_one()
7042 dev_err(adapter->pdev_dev, in init_one()
7056 adapter->port[i]->dev_port = pi->lport; in init_one()
7057 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); in init_one()
7058 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); in init_one()
7060 netif_carrier_off(adapter->port[i]); in init_one()
7062 err = register_netdev(adapter->port[i]); in init_one()
7065 adapter->chan_map[pi->tx_chan] = i; in init_one()
7066 print_port_info(adapter->port[i]); in init_one()
7069 dev_err(&pdev->dev, "could not register any net devices\n"); in init_one()
7073 dev_warn(&pdev->dev, "only %d net devices registered\n", i); in init_one()
7078 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), in init_one()
7084 pdev->needs_freset = 1; in init_one()
7089 if (!is_t4(adapter->params.chip)) in init_one()
7093 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) in init_one()
7102 if (adapter->flags & CXGB4_USING_MSIX) in init_one()
7104 if (adapter->num_uld || adapter->num_ofld_uld) in init_one()
7107 if (!is_t4(adapter->params.chip)) in init_one()
7108 iounmap(adapter->bar2); in init_one()
7110 if (adapter->workq) in init_one()
7111 destroy_workqueue(adapter->workq); in init_one()
7113 kfree(adapter->mbox_log); in init_one()
7140 adapter->flags |= CXGB4_SHUTTING_DOWN; in remove_one()
7142 if (adapter->pf == 4) { in remove_one()
7145 /* Tear down per-adapter Work Queue first since it can contain in remove_one()
7148 destroy_workqueue(adapter->workq); in remove_one()
7162 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in remove_one()
7163 unregister_netdev(adapter->port[i]); in remove_one()
7165 debugfs_remove_recursive(adapter->debugfs_root); in remove_one()
7167 if (!is_t4(adapter->params.chip)) in remove_one()
7172 if (adapter->flags & CXGB4_FULL_INIT_DONE) in remove_one()
7175 if (adapter->flags & CXGB4_USING_MSIX) in remove_one()
7177 if (adapter->num_uld || adapter->num_ofld_uld) in remove_one()
7180 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, in remove_one()
7182 list_del(&entry->list); in remove_one()
7189 if (!is_t4(adapter->params.chip)) in remove_one()
7190 iounmap(adapter->bar2); in remove_one()
7194 cxgb4_iov_configure(adapter->pdev, 0); in remove_one()
7197 iounmap(adapter->regs); in remove_one()
7199 if ((adapter->flags & CXGB4_DEV_ENABLED)) { in remove_one()
7201 adapter->flags &= ~CXGB4_DEV_ENABLED; in remove_one()
7204 kfree(adapter->mbox_log); in remove_one()
7227 adapter->flags |= CXGB4_SHUTTING_DOWN; in shutdown_one()
7229 if (adapter->pf == 4) { in shutdown_one()
7233 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in shutdown_one()
7234 cxgb_close(adapter->port[i]); in shutdown_one()
7249 if (adapter->flags & CXGB4_FW_OK) in shutdown_one()
7250 t4_fw_bye(adapter, adapter->mbox); in shutdown_one()