Lines Matching +full:dp +full:- +full:phy1

3  * Copyright (c) 2007-2013 Broadcom Corporation
37 #include <linux/dma-mapping.h>
77 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
112 static int mrrs = -1;
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_wr_ind()
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); in bnx2x_reg_wr_ind()
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_wr_ind()
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_rd_ind()
378 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); in bnx2x_reg_rd_ind()
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_rd_ind()
394 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; in bnx2x_dp_dmae()
397 switch (dmae->opcode & DMAE_COMMAND_DST) { in bnx2x_dp_dmae()
400 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
403 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
404 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
405 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
406 dmae->comp_val); in bnx2x_dp_dmae()
408 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
411 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
412 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
413 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
414 dmae->comp_val); in bnx2x_dp_dmae()
418 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
421 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
422 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
423 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
424 dmae->comp_val); in bnx2x_dp_dmae()
426 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
429 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
430 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
431 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
432 dmae->comp_val); in bnx2x_dp_dmae()
436 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
439 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
440 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
441 dmae->comp_val); in bnx2x_dp_dmae()
443 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
446 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
447 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
448 dmae->comp_val); in bnx2x_dp_dmae()
453 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", in bnx2x_dp_dmae()
513 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, in bnx2x_prep_dmae_with_comp()
517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
519 dmae->comp_val = DMAE_COMP_VAL; in bnx2x_prep_dmae_with_comp()
522 /* issue a dmae command over the init-channel and wait for completion */
531 /* Lock the dmae channel. Disable BHs to prevent a dead-lock in bnx2x_issue_dmae_with_comp()
536 spin_lock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
549 (bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_issue_dmae_with_comp()
550 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_issue_dmae_with_comp()
555 cnt--; in bnx2x_issue_dmae_with_comp()
565 spin_unlock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
576 if (!bp->dmae_ready) { in bnx2x_write_dmae()
611 if (!bp->dmae_ready) { in bnx2x_read_dmae()
655 len -= dmae_wr_max; in bnx2x_write_dmae_phys_len()
689 return -EINVAL; in bnx2x_get_assert_list_entry()
769 BNX2X_ERR("NO MCP - can not dump\n"); in bnx2x_fw_dump_lvl()
772 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", in bnx2x_fw_dump_lvl()
773 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fw_dump_lvl()
774 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fw_dump_lvl()
775 (bp->common.bc_ver & 0xff)); in bnx2x_fw_dump_lvl()
777 if (pci_channel_offline(bp->pdev)) { in bnx2x_fw_dump_lvl()
787 trace_shmem_base = bp->common.shmem_base; in bnx2x_fw_dump_lvl()
800 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; in bnx2x_fw_dump_lvl()
812 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; in bnx2x_fw_dump_lvl()
870 DP(NETIF_MSG_IFDOWN, in bnx2x_hc_int_disable()
887 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); in bnx2x_igu_int_disable()
896 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_disable()
915 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_panic_dump()
916 bp->eth_stats.unrecoverable_error++; in bnx2x_panic_dump()
917 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); in bnx2x_panic_dump()
919 BNX2X_ERR("begin crash dump -----------------\n"); in bnx2x_panic_dump()
924 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_panic_dump()
928 bp->def_idx, bp->def_att_idx, bp->attn_state, in bnx2x_panic_dump()
929 bp->spq_prod_idx, bp->stats_counter); in bnx2x_panic_dump()
931 def_sb->atten_status_block.attn_bits, in bnx2x_panic_dump()
932 def_sb->atten_status_block.attn_bits_ack, in bnx2x_panic_dump()
933 def_sb->atten_status_block.status_block_id, in bnx2x_panic_dump()
934 def_sb->atten_status_block.attn_bits_index); in bnx2x_panic_dump()
938 def_sb->sp_sb.index_values[i], in bnx2x_panic_dump()
939 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); in bnx2x_panic_dump()
960 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
976 if (!bp->fp) in bnx2x_panic_dump()
979 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
984 i, fp->rx_bd_prod, fp->rx_bd_cons, in bnx2x_panic_dump()
985 fp->rx_comp_prod, in bnx2x_panic_dump()
986 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); in bnx2x_panic_dump()
988 fp->rx_sge_prod, fp->last_max_sge, in bnx2x_panic_dump()
989 le16_to_cpu(fp->fp_hc_idx)); in bnx2x_panic_dump()
994 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
997 txdata = *fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1020 fp->sb_running_index[j], in bnx2x_panic_dump()
1021 (j == HC_SB_MAX_SM - 1) ? ")" : " "); in bnx2x_panic_dump()
1026 fp->sb_index_values[j], in bnx2x_panic_dump()
1027 (j == loop - 1) ? ")" : " "); in bnx2x_panic_dump()
1044 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + in bnx2x_panic_dump()
1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); in bnx2x_panic_dump()
1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data; in bnx2x_panic_dump()
1091 i, bp->eq_ring[i].message.opcode, in bnx2x_panic_dump()
1092 bp->eq_ring[i].message.error); in bnx2x_panic_dump()
1101 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1103 if (!bp->fp) in bnx2x_panic_dump()
1106 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
1109 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); in bnx2x_panic_dump()
1110 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); in bnx2x_panic_dump()
1112 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; in bnx2x_panic_dump()
1113 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; in bnx2x_panic_dump()
1116 i, j, rx_bd[1], rx_bd[0], sw_bd->data); in bnx2x_panic_dump()
1119 start = RX_SGE(fp->rx_sge_prod); in bnx2x_panic_dump()
1120 end = RX_SGE(fp->last_max_sge); in bnx2x_panic_dump()
1122 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; in bnx2x_panic_dump()
1123 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; in bnx2x_panic_dump()
1126 i, j, rx_sge[1], rx_sge[0], sw_page->page); in bnx2x_panic_dump()
1129 start = RCQ_BD(fp->rx_comp_cons - 10); in bnx2x_panic_dump()
1130 end = RCQ_BD(fp->rx_comp_cons + 503); in bnx2x_panic_dump()
1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; in bnx2x_panic_dump()
1141 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1143 if (!bp->fp) in bnx2x_panic_dump()
1147 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1149 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
1152 if (!txdata->tx_cons_sb) in bnx2x_panic_dump()
1155 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); in bnx2x_panic_dump()
1156 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); in bnx2x_panic_dump()
1159 &txdata->tx_buf_ring[j]; in bnx2x_panic_dump()
1162 i, cos, j, sw_bd->skb, in bnx2x_panic_dump()
1163 sw_bd->first_bd); in bnx2x_panic_dump()
1166 start = TX_BD(txdata->tx_bd_cons - 10); in bnx2x_panic_dump()
1167 end = TX_BD(txdata->tx_bd_cons + 254); in bnx2x_panic_dump()
1169 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; in bnx2x_panic_dump()
1179 int tmp_msg_en = bp->msg_enable; in bnx2x_panic_dump()
1182 bp->msg_enable |= NETIF_MSG_HW; in bnx2x_panic_dump()
1183 BNX2X_ERR("Idle check (1st round) ----------\n"); in bnx2x_panic_dump()
1185 BNX2X_ERR("Idle check (2nd round) ----------\n"); in bnx2x_panic_dump()
1187 bp->msg_enable = tmp_msg_en; in bnx2x_panic_dump()
1191 BNX2X_ERR("end crash dump -----------------\n"); in bnx2x_panic_dump()
1224 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1225 crd = crd_start = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1226 init_crd = REG_RD(bp, regs->init_crd); in bnx2x_pbf_pN_buf_flushed()
1228 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); in bnx2x_pbf_pN_buf_flushed()
1229 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1230 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1233 (init_crd - crd_start))) { in bnx2x_pbf_pN_buf_flushed()
1234 if (cur_cnt--) { in bnx2x_pbf_pN_buf_flushed()
1236 crd = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1237 crd_freed = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1239 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", in bnx2x_pbf_pN_buf_flushed()
1240 regs->pN); in bnx2x_pbf_pN_buf_flushed()
1241 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1242 regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1243 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1244 regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1248 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", in bnx2x_pbf_pN_buf_flushed()
1249 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_buf_flushed()
1259 occup = to_free = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1260 freed = freed_start = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1263 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1266 if (cur_cnt--) { in bnx2x_pbf_pN_cmd_flushed()
1268 occup = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1269 freed = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1271 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", in bnx2x_pbf_pN_cmd_flushed()
1272 regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1274 regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1275 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1276 regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1280 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", in bnx2x_pbf_pN_cmd_flushed()
1281 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1290 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) in bnx2x_flr_clnup_reg_poll()
1410 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); in bnx2x_send_final_clnup()
1415 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", in bnx2x_send_final_clnup()
1438 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ in bnx2x_poll_hw_usage_counters()
1445 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1452 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1459 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1486 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1489 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1492 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1495 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1498 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); in bnx2x_hw_enable_status()
1501 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1504 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1507 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", in bnx2x_hw_enable_status()
1515 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); in bnx2x_pf_flr_clnup()
1517 /* Re-enable PF target read access */ in bnx2x_pf_flr_clnup()
1521 DP(BNX2X_MSG_SP, "Polling usage counters\n"); in bnx2x_pf_flr_clnup()
1523 return -EBUSY; in bnx2x_pf_flr_clnup()
1529 return -EBUSY; in bnx2x_pf_flr_clnup()
1540 if (bnx2x_is_pcie_pending(bp->pdev)) in bnx2x_pf_flr_clnup()
1547 * Master enable - Due to WB DMAE writes performed before this in bnx2x_pf_flr_clnup()
1548 * register is re-initialized as part of the regular function init in bnx2x_pf_flr_clnup()
1560 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1561 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1562 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_hc_int_enable()
1583 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1595 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1597 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_hc_int_enable()
1609 if (bp->port.pmf) in bnx2x_hc_int_enable()
1623 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1624 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1625 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_igu_int_enable()
1649 /* Clean previous status - need to configure igu prior to ack*/ in bnx2x_igu_int_enable()
1657 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", in bnx2x_igu_int_enable()
1658 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_igu_int_enable()
1663 pci_intx(bp->pdev, true); in bnx2x_igu_int_enable()
1670 if (bp->port.pmf) in bnx2x_igu_int_enable()
1682 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_enable()
1690 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_int_disable_sync()
1699 synchronize_irq(bp->msix_table[0].vector); in bnx2x_int_disable_sync()
1704 synchronize_irq(bp->msix_table[offset++].vector); in bnx2x_int_disable_sync()
1706 synchronize_irq(bp->pdev->irq); in bnx2x_int_disable_sync()
1709 cancel_delayed_work(&bp->sp_task); in bnx2x_int_disable_sync()
1710 cancel_delayed_work(&bp->period_task); in bnx2x_int_disable_sync()
1728 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1733 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1743 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_trylock_hw_lock()
1751 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1757 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1773 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1789 /* Set the interrupt occurred bit for the sp-task to recognize it in bnx2x_schedule_sp_task()
1793 atomic_set(&bp->interrupt_occurred, 1); in bnx2x_schedule_sp_task()
1802 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); in bnx2x_schedule_sp_task()
1807 struct bnx2x *bp = fp->bp; in bnx2x_sp_event()
1808 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1809 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1813 DP(BNX2X_MSG_SP, in bnx2x_sp_event()
1815 fp->index, cid, command, bp->state, in bnx2x_sp_event()
1816 rr_cqe->ramrod_cqe.ramrod_type); in bnx2x_sp_event()
1827 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); in bnx2x_sp_event()
1832 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); in bnx2x_sp_event()
1837 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); in bnx2x_sp_event()
1842 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); in bnx2x_sp_event()
1847 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); in bnx2x_sp_event()
1852 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); in bnx2x_sp_event()
1857 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); in bnx2x_sp_event()
1863 command, fp->index); in bnx2x_sp_event()
1868 q_obj->complete_cmd(bp, q_obj, drv_cmd)) in bnx2x_sp_event()
1869 /* q_obj->complete_cmd() failure means that this was in bnx2x_sp_event()
1872 * In this case we don't want to increase the bp->spq_left in bnx2x_sp_event()
1883 atomic_inc(&bp->cq_spq_left); in bnx2x_sp_event()
1884 /* push the change in bp->spq_left and towards the memory */ in bnx2x_sp_event()
1887 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); in bnx2x_sp_event()
1890 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { in bnx2x_sp_event()
1901 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); in bnx2x_sp_event()
1903 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_sp_event()
1923 DP(NETIF_MSG_INTR, "not our interrupt!\n"); in bnx2x_interrupt()
1926 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); in bnx2x_interrupt()
1929 if (unlikely(bp->panic)) in bnx2x_interrupt()
1934 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_interrupt()
1936 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); in bnx2x_interrupt()
1940 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_interrupt()
1941 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_interrupt()
1942 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_interrupt()
1953 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_interrupt()
1954 if (c_ops && (bp->cnic_eth_dev.drv_state & in bnx2x_interrupt()
1956 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_interrupt()
1976 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", in bnx2x_interrupt()
2000 return -EINVAL; in bnx2x_acquire_hw_lock()
2007 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_acquire_hw_lock()
2015 return -EEXIST; in bnx2x_acquire_hw_lock()
2029 return -EAGAIN; in bnx2x_acquire_hw_lock()
2048 return -EINVAL; in bnx2x_release_hw_lock()
2055 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_release_hw_lock()
2063 return -EFAULT; in bnx2x_release_hw_lock()
2083 return -EINVAL; in bnx2x_get_gpio()
2110 return -EINVAL; in bnx2x_set_gpio()
2119 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2120 "Set GPIO %d (shift %d) -> output low\n", in bnx2x_set_gpio()
2128 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2129 "Set GPIO %d (shift %d) -> output high\n", in bnx2x_set_gpio()
2137 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2138 "Set GPIO %d (shift %d) -> input\n", in bnx2x_set_gpio()
2170 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); in bnx2x_set_mult_gpio()
2176 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); in bnx2x_set_mult_gpio()
2182 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); in bnx2x_set_mult_gpio()
2189 rc = -EINVAL; in bnx2x_set_mult_gpio()
2213 return -EINVAL; in bnx2x_set_gpio_int()
2222 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2223 "Clear GPIO INT %d (shift %d) -> output low\n", in bnx2x_set_gpio_int()
2231 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2232 "Set GPIO INT %d (shift %d) -> output high\n", in bnx2x_set_gpio_int()
2256 return -EINVAL; in bnx2x_set_spio()
2265 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); in bnx2x_set_spio()
2272 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); in bnx2x_set_spio()
2279 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); in bnx2x_set_spio()
2298 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2300 switch (bp->link_vars.ieee_fc & in bnx2x_calc_fc_adv()
2303 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2308 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; in bnx2x_calc_fc_adv()
2322 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) in bnx2x_set_requested_fc()
2323 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; in bnx2x_set_requested_fc()
2325 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; in bnx2x_set_requested_fc()
2332 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { in bnx2x_init_dropless_fc()
2333 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_init_dropless_fc()
2341 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", in bnx2x_init_dropless_fc()
2348 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; in bnx2x_initial_phy_init()
2355 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2356 lp->loopback_mode = LOOPBACK_XGXS; in bnx2x_initial_phy_init()
2358 if (lp->req_line_speed[cfx_idx] < SPEED_20000) { in bnx2x_initial_phy_init()
2359 if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2361 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2363 else if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2365 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2368 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2374 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2375 lp->loopback_mode = LOOPBACK_EXT; in bnx2x_initial_phy_init()
2378 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_initial_phy_init()
2386 if (bp->link_vars.link_up) { in bnx2x_initial_phy_init()
2390 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_initial_phy_init()
2391 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; in bnx2x_initial_phy_init()
2394 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_initial_phy_init()
2395 return -EINVAL; in bnx2x_initial_phy_init()
2402 bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_link_set()
2409 BNX2X_ERR("Bootcode is missing - can not set link\n"); in bnx2x_link_set()
2416 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); in bnx2x__link_reset()
2419 BNX2X_ERR("Bootcode is missing - can not reset link\n"); in bnx2x__link_reset()
2425 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); in bnx2x_force_link_reset()
2435 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, in bnx2x_link_test()
2439 BNX2X_ERR("Bootcode is missing - can not test link\n"); in bnx2x_link_test()
2449 0 - if all the min_rates are 0.
2460 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_min()
2467 /* If min rate is zero - set it to 1 */ in bnx2x_calc_vn_min()
2473 input->vnic_min_rate[vn] = vn_min_rate; in bnx2x_calc_vn_min()
2476 /* if ETS or all min rates are zeros - disable fairness */ in bnx2x_calc_vn_min()
2478 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2480 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); in bnx2x_calc_vn_min()
2482 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2484 DP(NETIF_MSG_IFUP, in bnx2x_calc_vn_min()
2487 input->flags.cmng_enables |= in bnx2x_calc_vn_min()
2495 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_max()
2504 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; in bnx2x_calc_vn_max()
2510 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); in bnx2x_calc_vn_max()
2512 input->vnic_max_rate[vn] = vn_max_rate; in bnx2x_calc_vn_max()
2549 bp->mf_config[vn] = in bnx2x_read_mf_cfg()
2552 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_read_mf_cfg()
2553 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); in bnx2x_read_mf_cfg()
2554 bp->flags |= MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2556 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); in bnx2x_read_mf_cfg()
2557 bp->flags &= ~MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2566 input.port_rate = bp->link_vars.line_speed; in bnx2x_cmng_fns_init()
2578 /* calculate and set min-max rate for each vn */ in bnx2x_cmng_fns_init()
2579 if (bp->port.pmf) in bnx2x_cmng_fns_init()
2587 bnx2x_init_cmng(&input, &bp->cmng); in bnx2x_cmng_fns_init()
2592 DP(NETIF_MSG_IFUP, in bnx2x_cmng_fns_init()
2606 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); in storm_memset_cmng()
2615 (u32 *)&cmng->vnic.vnic_max_rate[vn]); in storm_memset_cmng()
2621 (u32 *)&cmng->vnic.vnic_min_rate[vn]); in storm_memset_cmng()
2632 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_set_local_cmng()
2635 DP(NETIF_MSG_IFUP, in bnx2x_set_local_cmng()
2646 bnx2x_link_update(&bp->link_params, &bp->link_vars); in bnx2x_link_attn()
2650 if (bp->link_vars.link_up) { in bnx2x_link_attn()
2652 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { in bnx2x_link_attn()
2657 memset(&(pstats->mac_stx[0]), 0, in bnx2x_link_attn()
2660 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_link_attn()
2664 if (bp->link_vars.link_up && bp->link_vars.line_speed) in bnx2x_link_attn()
2675 if (bp->state != BNX2X_STATE_OPEN) in bnx2x__link_status_update()
2681 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); in bnx2x__link_status_update()
2682 if (bp->link_vars.link_up) in bnx2x__link_status_update()
2690 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | in bnx2x__link_status_update()
2702 bp->port.advertising[0] = bp->port.supported[0]; in bnx2x__link_status_update()
2704 bp->link_params.bp = bp; in bnx2x__link_status_update()
2705 bp->link_params.port = BP_PORT(bp); in bnx2x__link_status_update()
2706 bp->link_params.req_duplex[0] = DUPLEX_FULL; in bnx2x__link_status_update()
2707 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2708 bp->link_params.req_line_speed[0] = SPEED_10000; in bnx2x__link_status_update()
2709 bp->link_params.speed_cap_mask[0] = 0x7f0000; in bnx2x__link_status_update()
2710 bp->link_params.switch_cfg = SWITCH_CFG_10G; in bnx2x__link_status_update()
2711 bp->link_vars.mac_type = MAC_TYPE_BMAC; in bnx2x__link_status_update()
2712 bp->link_vars.line_speed = SPEED_10000; in bnx2x__link_status_update()
2713 bp->link_vars.link_status = in bnx2x__link_status_update()
2716 bp->link_vars.link_up = 1; in bnx2x__link_status_update()
2717 bp->link_vars.duplex = DUPLEX_FULL; in bnx2x__link_status_update()
2718 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2739 func_params.f_obj = &bp->func_obj; in bnx2x_afex_func_update()
2746 f_update_params->vif_id = vifid; in bnx2x_afex_func_update()
2747 f_update_params->afex_default_vlan = vlan_val; in bnx2x_afex_func_update()
2748 f_update_params->allowed_priorities = allowed_prio; in bnx2x_afex_func_update()
2771 func_params.f_obj = &bp->func_obj; in bnx2x_afex_handle_vif_list_cmd()
2775 update_params->afex_vif_list_command = cmd_type; in bnx2x_afex_handle_vif_list_cmd()
2776 update_params->vif_list_index = vif_index; in bnx2x_afex_handle_vif_list_cmd()
2777 update_params->func_bit_map = in bnx2x_afex_handle_vif_list_cmd()
2779 update_params->func_to_clear = 0; in bnx2x_afex_handle_vif_list_cmd()
2809 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2817 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2830 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2847 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2848 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2861 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2901 bp->afex_def_vlan_tag = vlan_val; in bnx2x_handle_afex_cmd()
2902 bp->afex_vlan_mode = vlan_mode; in bnx2x_handle_afex_cmd()
2904 /* notify link down because BP->flags is disabled */ in bnx2x_handle_afex_cmd()
2911 bp->afex_def_vlan_tag = -1; in bnx2x_handle_afex_cmd()
2923 func_params.f_obj = &bp->func_obj; in bnx2x_handle_update_svid_cmd()
2934 /* Re-learn the S-tag from shmem */ in bnx2x_handle_update_svid_cmd()
2938 bp->mf_ov = val; in bnx2x_handle_update_svid_cmd()
2944 /* Configure new S-tag in LLH */ in bnx2x_handle_update_svid_cmd()
2946 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2950 &switch_update_params->changes); in bnx2x_handle_update_svid_cmd()
2951 switch_update_params->vlan = bp->mf_ov; in bnx2x_handle_update_svid_cmd()
2954 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", in bnx2x_handle_update_svid_cmd()
2955 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2958 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", in bnx2x_handle_update_svid_cmd()
2959 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2976 bp->port.pmf = 1; in bnx2x_pmf_update()
2977 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); in bnx2x_pmf_update()
2981 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). in bnx2x_pmf_update()
2986 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_pmf_update()
2992 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_pmf_update()
3020 mutex_lock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3021 seq = ++bp->fw_seq; in bnx2x_fw_command()
3025 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", in bnx2x_fw_command()
3037 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", in bnx2x_fw_command()
3049 mutex_unlock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3071 storm_memset_func_cfg(bp, &tcfg, p->func_id); in bnx2x_func_init()
3075 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); in bnx2x_func_init()
3076 storm_memset_func_en(bp, p->func_id, 1); in bnx2x_func_init()
3079 if (p->spq_active) { in bnx2x_func_init()
3080 storm_memset_spq_addr(bp, p->spq_map, p->func_id); in bnx2x_func_init()
3082 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); in bnx2x_func_init()
3087 * bnx2x_get_common_flags - Return common flags
3093 * Return the flags that are common for the Tx-only and not normal connections.
3113 if (bp->flags & TX_SWITCHING) in bnx2x_get_common_flags()
3138 /* For FCoE - force usage of default priority (for afex) */ in bnx2x_get_q_flags()
3142 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_get_q_flags()
3145 if (fp->mode == TPA_MODE_GRO) in bnx2x_get_q_flags()
3168 gen_init->stat_id = bnx2x_stats_id(fp); in bnx2x_pf_q_prep_general()
3169 gen_init->spcl_id = fp->cl_id; in bnx2x_pf_q_prep_general()
3171 /* Always use mini-jumbo MTU for FCoE L2 ring */ in bnx2x_pf_q_prep_general()
3173 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; in bnx2x_pf_q_prep_general()
3175 gen_init->mtu = bp->dev->mtu; in bnx2x_pf_q_prep_general()
3177 gen_init->cos = cos; in bnx2x_pf_q_prep_general()
3179 gen_init->fp_hsi = ETH_FP_HSI_VERSION; in bnx2x_pf_q_prep_general()
3190 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_pf_rx_q_prep()
3191 pause->sge_th_lo = SGE_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3192 pause->sge_th_hi = SGE_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3195 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3196 pause->sge_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3200 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> in bnx2x_pf_rx_q_prep()
3202 max_sge = ((max_sge + PAGES_PER_SGE - 1) & in bnx2x_pf_rx_q_prep()
3203 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; in bnx2x_pf_rx_q_prep()
3207 /* pause - not for e1 */ in bnx2x_pf_rx_q_prep()
3209 pause->bd_th_lo = BD_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3210 pause->bd_th_hi = BD_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3212 pause->rcq_th_lo = RCQ_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3213 pause->rcq_th_hi = RCQ_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3218 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3219 pause->bd_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3220 bp->rx_ring_size); in bnx2x_pf_rx_q_prep()
3221 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3222 pause->rcq_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3225 pause->pri_map = 1; in bnx2x_pf_rx_q_prep()
3229 rxq_init->dscr_map = fp->rx_desc_mapping; in bnx2x_pf_rx_q_prep()
3230 rxq_init->sge_map = fp->rx_sge_mapping; in bnx2x_pf_rx_q_prep()
3231 rxq_init->rcq_map = fp->rx_comp_mapping; in bnx2x_pf_rx_q_prep()
3232 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; in bnx2x_pf_rx_q_prep()
3237 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - in bnx2x_pf_rx_q_prep()
3238 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; in bnx2x_pf_rx_q_prep()
3240 rxq_init->cl_qzone_id = fp->cl_qzone_id; in bnx2x_pf_rx_q_prep()
3241 rxq_init->tpa_agg_sz = tpa_agg_size; in bnx2x_pf_rx_q_prep()
3242 rxq_init->sge_buf_sz = sge_sz; in bnx2x_pf_rx_q_prep()
3243 rxq_init->max_sges_pkt = max_sge; in bnx2x_pf_rx_q_prep()
3244 rxq_init->rss_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3245 rxq_init->mcast_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3252 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); in bnx2x_pf_rx_q_prep()
3254 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; in bnx2x_pf_rx_q_prep()
3255 rxq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_rx_q_prep()
3258 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3260 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3265 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; in bnx2x_pf_rx_q_prep()
3266 rxq_init->silent_removal_mask = VLAN_VID_MASK; in bnx2x_pf_rx_q_prep()
3274 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; in bnx2x_pf_tx_q_prep()
3275 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; in bnx2x_pf_tx_q_prep()
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; in bnx2x_pf_tx_q_prep()
3277 txq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_tx_q_prep()
3283 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_pf_tx_q_prep()
3286 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; in bnx2x_pf_tx_q_prep()
3287 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; in bnx2x_pf_tx_q_prep()
3314 func_init.spq_map = bp->spq_mapping; in bnx2x_pf_init()
3315 func_init.spq_prod = bp->spq_prod_idx; in bnx2x_pf_init()
3319 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); in bnx2x_pf_init()
3325 * re-calculated according to the actual link rate. in bnx2x_pf_init()
3327 bp->link_vars.line_speed = SPEED_10000; in bnx2x_pf_init()
3331 if (bp->port.pmf) in bnx2x_pf_init()
3332 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_pf_init()
3334 /* init Event Queue - PCI bus guarantees correct endianity*/ in bnx2x_pf_init()
3335 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); in bnx2x_pf_init()
3336 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); in bnx2x_pf_init()
3337 eq_data.producer = bp->eq_prod; in bnx2x_pf_init()
3359 /* Tx queue should be only re-enabled */ in bnx2x_e1h_enable()
3360 netif_tx_wake_all_queues(bp->dev); in bnx2x_e1h_enable()
3373 &bp->slowpath->drv_info_to_mcp.ether_stat; in bnx2x_drv_info_ether_stat()
3375 &bp->sp_objs->mac_obj; in bnx2x_drv_info_ether_stat()
3378 strlcpy(ether_stat->version, DRV_MODULE_VERSION, in bnx2x_drv_info_ether_stat()
3390 memset(ether_stat->mac_local + i, 0, in bnx2x_drv_info_ether_stat()
3391 sizeof(ether_stat->mac_local[0])); in bnx2x_drv_info_ether_stat()
3392 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, in bnx2x_drv_info_ether_stat()
3394 ether_stat->mac_local + MAC_PAD, MAC_PAD, in bnx2x_drv_info_ether_stat()
3396 ether_stat->mtu_size = bp->dev->mtu; in bnx2x_drv_info_ether_stat()
3397 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_drv_info_ether_stat()
3398 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; in bnx2x_drv_info_ether_stat()
3399 if (bp->dev->features & NETIF_F_TSO) in bnx2x_drv_info_ether_stat()
3400 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; in bnx2x_drv_info_ether_stat()
3401 ether_stat->feature_flags |= bp->common.boot_mode; in bnx2x_drv_info_ether_stat()
3403 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; in bnx2x_drv_info_ether_stat()
3405 ether_stat->txq_size = bp->tx_ring_size; in bnx2x_drv_info_ether_stat()
3406 ether_stat->rxq_size = bp->rx_ring_size; in bnx2x_drv_info_ether_stat()
3409 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; in bnx2x_drv_info_ether_stat()
3415 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_fcoe_stat()
3417 &bp->slowpath->drv_info_to_mcp.fcoe_stat; in bnx2x_drv_info_fcoe_stat()
3422 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); in bnx2x_drv_info_fcoe_stat()
3424 fcoe_stat->qos_priority = in bnx2x_drv_info_fcoe_stat()
3425 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; in bnx2x_drv_info_fcoe_stat()
3430 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3434 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3438 &bp->fw_stats_data->fcoe; in bnx2x_drv_info_fcoe_stat()
3440 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3441 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3442 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3444 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3445 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3446 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3447 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3449 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3450 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3451 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3452 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3454 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3455 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3456 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3457 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3459 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3460 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3461 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3463 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3464 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3465 fcoe_q_tstorm_stats->rcv_ucast_pkts); in bnx2x_drv_info_fcoe_stat()
3467 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3468 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3469 fcoe_q_tstorm_stats->rcv_bcast_pkts); in bnx2x_drv_info_fcoe_stat()
3471 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3472 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3473 fcoe_q_tstorm_stats->rcv_mcast_pkts); in bnx2x_drv_info_fcoe_stat()
3475 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3476 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3477 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3479 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3480 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3481 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3482 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3484 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3485 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3486 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3487 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3489 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3490 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3491 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3492 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3494 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3495 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3496 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3498 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3499 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3500 fcoe_q_xstorm_stats->ucast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3502 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3503 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3504 fcoe_q_xstorm_stats->bcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3506 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3507 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3508 fcoe_q_xstorm_stats->mcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3517 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_iscsi_stat()
3519 &bp->slowpath->drv_info_to_mcp.iscsi_stat; in bnx2x_drv_info_iscsi_stat()
3524 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, in bnx2x_drv_info_iscsi_stat()
3527 iscsi_stat->qos_priority = in bnx2x_drv_info_iscsi_stat()
3528 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; in bnx2x_drv_info_iscsi_stat()
3546 DP(BNX2X_MSG_MCP, in bnx2x_config_mf_bw()
3551 if (bp->link_vars.link_up) { in bnx2x_config_mf_bw()
3555 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_config_mf_bw()
3566 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); in bnx2x_handle_eee_event()
3580 /* if drv_info version supported by MFW doesn't match - send NACK */ in bnx2x_handle_drv_info_req()
3590 mutex_lock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3592 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_handle_drv_info_req()
3606 /* if op code isn't supported - send NACK */ in bnx2x_handle_drv_info_req()
3626 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); in bnx2x_handle_drv_info_req()
3627 } else if (!bp->drv_info_mng_owner) { in bnx2x_handle_drv_info_req()
3645 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); in bnx2x_handle_drv_info_req()
3646 bp->drv_info_mng_owner = true; in bnx2x_handle_drv_info_req()
3650 mutex_unlock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3662 vals[0] -= '0'; in bnx2x_update_mng_version_utility()
3685 mutex_lock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3687 if (bp->drv_info_mng_owner) in bnx2x_update_mng_version()
3690 if (bp->state != BNX2X_STATE_OPEN) in bnx2x_update_mng_version()
3699 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3702 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; in bnx2x_update_mng_version()
3705 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3708 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; in bnx2x_update_mng_version()
3716 mutex_unlock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3718 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", in bnx2x_update_mng_version()
3738 /* Check & notify On-Chip dump. */ in bnx2x_update_mfw_dump()
3742 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); in bnx2x_update_mfw_dump()
3745 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); in bnx2x_update_mfw_dump()
3767 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); in bnx2x_oem_event()
3772 * where the bp->flags can change so it is done without any in bnx2x_oem_event()
3775 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_oem_event()
3776 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); in bnx2x_oem_event()
3777 bp->flags |= MF_FUNC_DIS; in bnx2x_oem_event()
3781 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); in bnx2x_oem_event()
3782 bp->flags &= ~MF_FUNC_DIS; in bnx2x_oem_event()
3807 struct eth_spe *next_spe = bp->spq_prod_bd; in bnx2x_sp_get_next()
3809 if (bp->spq_prod_bd == bp->spq_last_bd) { in bnx2x_sp_get_next()
3810 bp->spq_prod_bd = bp->spq; in bnx2x_sp_get_next()
3811 bp->spq_prod_idx = 0; in bnx2x_sp_get_next()
3812 DP(BNX2X_MSG_SP, "end of spq\n"); in bnx2x_sp_get_next()
3814 bp->spq_prod_bd++; in bnx2x_sp_get_next()
3815 bp->spq_prod_idx++; in bnx2x_sp_get_next()
3833 bp->spq_prod_idx); in bnx2x_sp_prod_update()
3837 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3857 * bnx2x_sp_post - place a single command on an SP ring
3878 if (unlikely(bp->panic)) { in bnx2x_sp_post()
3880 return -EIO; in bnx2x_sp_post()
3884 spin_lock_bh(&bp->spq_lock); in bnx2x_sp_post()
3887 if (!atomic_read(&bp->eq_spq_left)) { in bnx2x_sp_post()
3889 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3891 return -EBUSY; in bnx2x_sp_post()
3893 } else if (!atomic_read(&bp->cq_spq_left)) { in bnx2x_sp_post()
3895 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3897 return -EBUSY; in bnx2x_sp_post()
3903 spe->hdr.conn_and_cmd_data = in bnx2x_sp_post()
3907 /* In some cases, type may already contain the func-id in bnx2x_sp_post()
3920 spe->hdr.type = cpu_to_le16(type); in bnx2x_sp_post()
3922 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); in bnx2x_sp_post()
3923 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); in bnx2x_sp_post()
3931 atomic_dec(&bp->eq_spq_left); in bnx2x_sp_post()
3933 atomic_dec(&bp->cq_spq_left); in bnx2x_sp_post()
3935 DP(BNX2X_MSG_SP, in bnx2x_sp_post()
3937 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), in bnx2x_sp_post()
3938 (u32)(U64_LO(bp->spq_mapping) + in bnx2x_sp_post()
3939 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, in bnx2x_sp_post()
3941 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); in bnx2x_sp_post()
3944 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3965 rc = -EBUSY; in bnx2x_acquire_alr()
3982 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_update_dsb_idx()
3986 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { in bnx2x_update_dsb_idx()
3987 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; in bnx2x_update_dsb_idx()
3991 if (bp->def_idx != def_sb->sp_sb.running_index) { in bnx2x_update_dsb_idx()
3992 bp->def_idx = def_sb->sp_sb.running_index; in bnx2x_update_dsb_idx()
4016 if (bp->attn_state & asserted) in bnx2x_attn_int_asserted()
4022 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", in bnx2x_attn_int_asserted()
4025 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_asserted()
4030 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4031 bp->attn_state |= asserted; in bnx2x_attn_int_asserted()
4032 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4054 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4057 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); in bnx2x_attn_int_asserted()
4060 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); in bnx2x_attn_int_asserted()
4063 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4067 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); in bnx2x_attn_int_asserted()
4071 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); in bnx2x_attn_int_asserted()
4075 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); in bnx2x_attn_int_asserted()
4080 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); in bnx2x_attn_int_asserted()
4084 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); in bnx2x_attn_int_asserted()
4088 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); in bnx2x_attn_int_asserted()
4095 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_asserted()
4101 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, in bnx2x_attn_int_asserted()
4102 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_asserted()
4108 * NIG mask. This loop should exit after 2-3 iterations max. in bnx2x_attn_int_asserted()
4110 if (bp->common.int_block != INT_BLOCK_HC) { in bnx2x_attn_int_asserted()
4118 DP(NETIF_MSG_HW, in bnx2x_attn_int_asserted()
4142 …netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card … in bnx2x_fan_failure()
4170 bnx2x_hw_reset_phy(&bp->link_params); in bnx2x_attn_int_deasserted0()
4174 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { in bnx2x_attn_int_deasserted0()
4176 bnx2x_handle_module_detect_int(&bp->link_params); in bnx2x_attn_int_deasserted0()
4238 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4245 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4278 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, in bnx2x_attn_int_deasserted3()
4299 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) in bnx2x_attn_int_deasserted3()
4302 if (bp->port.pmf && in bnx2x_attn_int_deasserted3()
4304 bp->dcbx_enabled > 0) in bnx2x_attn_int_deasserted3()
4318 if (bp->link_vars.periodic_flags & in bnx2x_attn_int_deasserted3()
4322 bp->link_vars.periodic_flags &= in bnx2x_attn_int_deasserted3()
4358 BNX2X_ERR("GRC time-out 0x%08x\n", val); in bnx2x_attn_int_deasserted3()
4371 * 0-7 - Engine0 load counter.
4372 * 8-15 - Engine1 load counter.
4373 * 16 - Engine0 RESET_IN_PROGRESS bit.
4374 * 17 - Engine1 RESET_IN_PROGRESS bit.
4375 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4377 * 19 - Engine1 ONE_IS_LOADED.
4378 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4430 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); in bnx2x_reset_is_global()
4503 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_set_pf_load()
4509 val1 |= (1 << bp->pf_num); in bnx2x_set_pf_load()
4522 * bnx2x_clear_pf_load - clear pf load mark
4540 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_clear_pf_load()
4546 val1 &= ~(1 << bp->pf_num); in bnx2x_clear_pf_load()
4572 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); in bnx2x_get_load_status()
4576 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", in bnx2x_get_load_status()
4971 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" in bnx2x_parity_attn()
4984 netdev_err(bp->dev, in bnx2x_parity_attn()
5009 * bnx2x_chk_parity_attn - checks for parity attentions.
5118 bp->recovery_state = BNX2X_RECOVERY_INIT; in bnx2x_attn_int_deasserted()
5119 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_attn_int_deasserted()
5142 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5147 group_mask = &bp->attn_group[index]; in bnx2x_attn_int_deasserted()
5149 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5151 group_mask->sig[0], group_mask->sig[1], in bnx2x_attn_int_deasserted()
5152 group_mask->sig[2], group_mask->sig[3], in bnx2x_attn_int_deasserted()
5153 group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5156 attn.sig[4] & group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5158 attn.sig[3] & group_mask->sig[3]); in bnx2x_attn_int_deasserted()
5160 attn.sig[1] & group_mask->sig[1]); in bnx2x_attn_int_deasserted()
5162 attn.sig[2] & group_mask->sig[2]); in bnx2x_attn_int_deasserted()
5164 attn.sig[0] & group_mask->sig[0]); in bnx2x_attn_int_deasserted()
5170 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_deasserted()
5177 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, in bnx2x_attn_int_deasserted()
5178 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_deasserted()
5181 if (~bp->attn_state & deasserted) in bnx2x_attn_int_deasserted()
5190 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", in bnx2x_attn_int_deasserted()
5193 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_deasserted()
5198 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5199 bp->attn_state &= ~deasserted; in bnx2x_attn_int_deasserted()
5200 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5206 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5208 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5210 u32 attn_state = bp->attn_state; in bnx2x_attn_int()
5216 DP(NETIF_MSG_HW, in bnx2x_attn_int()
5234 u32 igu_addr = bp->igu_base_addr; in bnx2x_igu_ack_sb()
5249 u8 err = elem->message.error; in bnx2x_cnic_handle_cfc_del()
5251 if (!bp->cnic_eth_dev.starting_cid || in bnx2x_cnic_handle_cfc_del()
5252 (cid < bp->cnic_eth_dev.starting_cid && in bnx2x_cnic_handle_cfc_del()
5253 cid != bp->cnic_eth_dev.iscsi_l2_cid)) in bnx2x_cnic_handle_cfc_del()
5256 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); in bnx2x_cnic_handle_cfc_del()
5275 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_handle_mcast_eqe()
5277 netif_addr_lock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5280 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); in bnx2x_handle_mcast_eqe()
5282 /* If there are pending mcast commands - send them */ in bnx2x_handle_mcast_eqe()
5283 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { in bnx2x_handle_mcast_eqe()
5290 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5298 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); in bnx2x_handle_classification_eqe()
5307 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); in bnx2x_handle_classification_eqe()
5309 vlan_mac_obj = &bp->iscsi_l2_mac_obj; in bnx2x_handle_classification_eqe()
5311 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; in bnx2x_handle_classification_eqe()
5315 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); in bnx2x_handle_classification_eqe()
5316 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; in bnx2x_handle_classification_eqe()
5319 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); in bnx2x_handle_classification_eqe()
5330 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); in bnx2x_handle_classification_eqe()
5335 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); in bnx2x_handle_classification_eqe()
5342 netif_addr_lock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5344 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_handle_rx_mode_eqe()
5347 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5350 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5353 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5356 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5362 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { in bnx2x_after_afex_vif_lists()
5363 DP(BNX2X_MSG_SP, in bnx2x_after_afex_vif_lists()
5365 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5367 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5368 } else if (elem->message.data.vif_list_event.echo == in bnx2x_after_afex_vif_lists()
5370 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); in bnx2x_after_afex_vif_lists()
5389 &q_update_params->update_flags); in bnx2x_after_function_update()
5391 &q_update_params->update_flags); in bnx2x_after_function_update()
5395 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { in bnx2x_after_function_update()
5396 q_update_params->silent_removal_value = 0; in bnx2x_after_function_update()
5397 q_update_params->silent_removal_mask = 0; in bnx2x_after_function_update()
5399 q_update_params->silent_removal_value = in bnx2x_after_function_update()
5400 (bp->afex_def_vlan_tag & VLAN_VID_MASK); in bnx2x_after_function_update()
5401 q_update_params->silent_removal_mask = VLAN_VID_MASK; in bnx2x_after_function_update()
5406 fp = &bp->fp[q]; in bnx2x_after_function_update()
5417 fp = &bp->fp[FCOE_IDX(bp)]; in bnx2x_after_function_update()
5425 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_after_function_update()
5434 /* If no FCoE ring - ACK MCP now */ in bnx2x_after_function_update()
5443 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); in bnx2x_cid_to_q_obj()
5448 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; in bnx2x_cid_to_q_obj()
5460 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; in bnx2x_eq_int()
5461 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; in bnx2x_eq_int()
5463 hw_cons = le16_to_cpu(*bp->eq_cons_sb); in bnx2x_eq_int()
5465 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. in bnx2x_eq_int()
5466 * when we get the next-page we need to adjust so the loop in bnx2x_eq_int()
5477 sw_cons = bp->eq_cons; in bnx2x_eq_int()
5478 sw_prod = bp->eq_prod; in bnx2x_eq_int()
5480 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", in bnx2x_eq_int()
5481 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); in bnx2x_eq_int()
5486 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; in bnx2x_eq_int()
5490 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", in bnx2x_eq_int()
5495 opcode = elem->message.opcode; in bnx2x_eq_int()
5501 &elem->message.data.vf_pf_event); in bnx2x_eq_int()
5507 bp->stats_comp++); in bnx2x_eq_int()
5519 cid = SW_CID(elem->message.data.cfc_del_event.cid); in bnx2x_eq_int()
5521 DP(BNX2X_MSG_SP, in bnx2x_eq_int()
5530 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) in bnx2x_eq_int()
5536 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); in bnx2x_eq_int()
5538 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5544 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); in bnx2x_eq_int()
5546 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5552 echo = elem->message.data.function_update_event.echo; in bnx2x_eq_int()
5554 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5556 if (f_obj->complete_cmd( in bnx2x_eq_int()
5563 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, in bnx2x_eq_int()
5565 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5578 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5583 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5585 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) in bnx2x_eq_int()
5591 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5593 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) in bnx2x_eq_int()
5599 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, in bnx2x_eq_int()
5601 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5607 switch (opcode | bp->state) { in bnx2x_eq_int()
5614 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", in bnx2x_eq_int()
5615 SW_CID(elem->message.data.eth_event.echo)); in bnx2x_eq_int()
5616 rss_raw->clear_pending(rss_raw); in bnx2x_eq_int()
5629 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); in bnx2x_eq_int()
5639 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); in bnx2x_eq_int()
5649 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); in bnx2x_eq_int()
5654 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", in bnx2x_eq_int()
5655 elem->message.opcode, bp->state); in bnx2x_eq_int()
5662 atomic_add(spqe_cnt, &bp->eq_spq_left); in bnx2x_eq_int()
5664 bp->eq_cons = sw_cons; in bnx2x_eq_int()
5665 bp->eq_prod = sw_prod; in bnx2x_eq_int()
5670 bnx2x_update_eq_prod(bp, bp->eq_prod); in bnx2x_eq_int()
5677 DP(BNX2X_MSG_SP, "sp task invoked\n"); in bnx2x_sp_task()
5681 if (atomic_read(&bp->interrupt_occurred)) { in bnx2x_sp_task()
5686 DP(BNX2X_MSG_SP, "status %x\n", status); in bnx2x_sp_task()
5687 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); in bnx2x_sp_task()
5688 atomic_set(&bp->interrupt_occurred, 0); in bnx2x_sp_task()
5702 /* Prevent local bottom-halves from running as in bnx2x_sp_task()
5712 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, in bnx2x_sp_task()
5713 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); in bnx2x_sp_task()
5720 DP(BNX2X_MSG_SP, in bnx2x_sp_task()
5724 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, in bnx2x_sp_task()
5725 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); in bnx2x_sp_task()
5728 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ in bnx2x_sp_task()
5730 &bp->sp_state)) { in bnx2x_sp_task()
5741 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, in bnx2x_msix_sp_int()
5745 if (unlikely(bp->panic)) in bnx2x_msix_sp_int()
5753 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_msix_sp_int()
5755 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_msix_sp_int()
5772 bp->fw_drv_pulse_wr_seq); in bnx2x_drv_pulse()
5779 if (!netif_running(bp->dev)) in bnx2x_timer()
5788 ++bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5789 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; in bnx2x_timer()
5790 drv_pulse = bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5800 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) in bnx2x_timer()
5805 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_timer()
5812 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_timer()
5834 /* helper: writes FP SP data to FW - data_size in dwords */
5915 hc_sm->igu_sb_id = igu_sb_id; in bnx2x_setup_ndsb_state_machine()
5916 hc_sm->igu_seg_id = igu_seg_id; in bnx2x_setup_ndsb_state_machine()
5917 hc_sm->timer_value = 0xFF; in bnx2x_setup_ndsb_state_machine()
5918 hc_sm->time_to_expire = 0xFFFFFFFF; in bnx2x_setup_ndsb_state_machine()
6004 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); in bnx2x_init_sb()
6006 /* write indices to HW - PCI guarantees endianity of regpairs */ in bnx2x_init_sb()
6028 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_init_def_sb()
6029 dma_addr_t mapping = bp->def_status_blk_mapping; in bnx2x_init_def_sb()
6044 igu_sp_sb_index = bp->igu_dsb_id; in bnx2x_init_def_sb()
6051 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; in bnx2x_init_def_sb()
6053 bp->attn_state = 0; in bnx2x_init_def_sb()
6063 bp->attn_group[index].sig[sindex] = in bnx2x_init_def_sb()
6072 bp->attn_group[index].sig[4] = REG_RD(bp, in bnx2x_init_def_sb()
6075 bp->attn_group[index].sig[4] = 0; in bnx2x_init_def_sb()
6078 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_def_sb()
6106 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); in bnx2x_init_def_sb()
6114 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, in bnx2x_update_coalesce()
6115 bp->tx_ticks, bp->rx_ticks); in bnx2x_update_coalesce()
6120 spin_lock_init(&bp->spq_lock); in bnx2x_init_sp_ring()
6121 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); in bnx2x_init_sp_ring()
6123 bp->spq_prod_idx = 0; in bnx2x_init_sp_ring()
6124 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; in bnx2x_init_sp_ring()
6125 bp->spq_prod_bd = bp->spq; in bnx2x_init_sp_ring()
6126 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; in bnx2x_init_sp_ring()
6134 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; in bnx2x_init_eq_ring()
6136 elem->next_page.addr.hi = in bnx2x_init_eq_ring()
6137 cpu_to_le32(U64_HI(bp->eq_mapping + in bnx2x_init_eq_ring()
6139 elem->next_page.addr.lo = in bnx2x_init_eq_ring()
6140 cpu_to_le32(U64_LO(bp->eq_mapping + in bnx2x_init_eq_ring()
6143 bp->eq_cons = 0; in bnx2x_init_eq_ring()
6144 bp->eq_prod = NUM_EQ_DESC; in bnx2x_init_eq_ring()
6145 bp->eq_cons_sb = BNX2X_EQ_INDEX; in bnx2x_init_eq_ring()
6147 atomic_set(&bp->eq_spq_left, in bnx2x_init_eq_ring()
6148 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); in bnx2x_init_eq_ring()
6166 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; in bnx2x_set_q_rx_mode()
6169 ramrod_param.pstate = &bp->sp_state; in bnx2x_set_q_rx_mode()
6175 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_set_q_rx_mode()
6185 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); in bnx2x_set_q_rx_mode()
6217 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6233 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6264 return -EINVAL; in bnx2x_fill_accept_flags()
6281 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, in bnx2x_set_storm_rx_mode()
6289 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, in bnx2x_set_storm_rx_mode()
6335 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_igu_sb_id()
6340 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_fw_sb_id()
6345 if (CHIP_IS_E1x(fp->bp)) in bnx2x_fp_cl_id()
6346 return BP_L_ID(fp->bp) + fp->index; in bnx2x_fp_cl_id()
6353 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; in bnx2x_init_eth_fp()
6357 fp->rx_queue = fp_idx; in bnx2x_init_eth_fp()
6358 fp->cid = fp_idx; in bnx2x_init_eth_fp()
6359 fp->cl_id = bnx2x_fp_cl_id(fp); in bnx2x_init_eth_fp()
6360 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); in bnx2x_init_eth_fp()
6361 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); in bnx2x_init_eth_fp()
6363 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); in bnx2x_init_eth_fp()
6366 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); in bnx2x_init_eth_fp()
6369 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; in bnx2x_init_eth_fp()
6375 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); in bnx2x_init_eth_fp()
6379 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], in bnx2x_init_eth_fp()
6380 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), in bnx2x_init_eth_fp()
6383 cids[cos] = fp->txdata_ptr[cos]->cid; in bnx2x_init_eth_fp()
6390 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, in bnx2x_init_eth_fp()
6391 fp->fw_sb_id, fp->igu_sb_id); in bnx2x_init_eth_fp()
6393 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, in bnx2x_init_eth_fp()
6394 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_eth_fp()
6402 DP(NETIF_MSG_IFUP, in bnx2x_init_eth_fp()
6404 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_eth_fp()
6405 fp->igu_sb_id); in bnx2x_init_eth_fp()
6414 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; in bnx2x_init_tx_ring_one()
6416 tx_next_bd->addr_hi = in bnx2x_init_tx_ring_one()
6417 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6419 tx_next_bd->addr_lo = in bnx2x_init_tx_ring_one()
6420 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6424 *txdata->tx_cons_sb = cpu_to_le16(0); in bnx2x_init_tx_ring_one()
6426 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); in bnx2x_init_tx_ring_one()
6427 txdata->tx_db.data.zero_fill1 = 0; in bnx2x_init_tx_ring_one()
6428 txdata->tx_db.data.prod = 0; in bnx2x_init_tx_ring_one()
6430 txdata->tx_pkt_prod = 0; in bnx2x_init_tx_ring_one()
6431 txdata->tx_pkt_cons = 0; in bnx2x_init_tx_ring_one()
6432 txdata->tx_bd_prod = 0; in bnx2x_init_tx_ring_one()
6433 txdata->tx_bd_cons = 0; in bnx2x_init_tx_ring_one()
6434 txdata->tx_pkt = 0; in bnx2x_init_tx_ring_one()
6442 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); in bnx2x_init_tx_rings_cnic()
6451 for_each_cos_in_tx_queue(&bp->fp[i], cos) in bnx2x_init_tx_rings()
6452 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); in bnx2x_init_tx_rings()
6465 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; in bnx2x_init_fcoe_fp()
6468 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, in bnx2x_init_fcoe_fp()
6471 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); in bnx2x_init_fcoe_fp()
6483 /* No multi-CoS for FCoE L2 client */ in bnx2x_init_fcoe_fp()
6484 BUG_ON(fp->max_cos != 1); in bnx2x_init_fcoe_fp()
6486 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, in bnx2x_init_fcoe_fp()
6487 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_fcoe_fp()
6490 DP(NETIF_MSG_IFUP, in bnx2x_init_fcoe_fp()
6492 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_fcoe_fp()
6493 fp->igu_sb_id); in bnx2x_init_fcoe_fp()
6501 bnx2x_init_sb(bp, bp->cnic_sb_mapping, in bnx2x_nic_init_cnic()
6529 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, in bnx2x_pre_irq_nic_init()
6530 bp->common.shmem_base, in bnx2x_pre_irq_nic_init()
6531 bp->common.shmem2_base, BP_PORT(bp)); in bnx2x_pre_irq_nic_init()
6563 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, in bnx2x_gunzip_init()
6564 &bp->gunzip_mapping, GFP_KERNEL); in bnx2x_gunzip_init()
6565 if (bp->gunzip_buf == NULL) in bnx2x_gunzip_init()
6568 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); in bnx2x_gunzip_init()
6569 if (bp->strm == NULL) in bnx2x_gunzip_init()
6572 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); in bnx2x_gunzip_init()
6573 if (bp->strm->workspace == NULL) in bnx2x_gunzip_init()
6579 kfree(bp->strm); in bnx2x_gunzip_init()
6580 bp->strm = NULL; in bnx2x_gunzip_init()
6583 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_init()
6584 bp->gunzip_mapping); in bnx2x_gunzip_init()
6585 bp->gunzip_buf = NULL; in bnx2x_gunzip_init()
6588 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); in bnx2x_gunzip_init()
6589 return -ENOMEM; in bnx2x_gunzip_init()
6594 if (bp->strm) { in bnx2x_gunzip_end()
6595 vfree(bp->strm->workspace); in bnx2x_gunzip_end()
6596 kfree(bp->strm); in bnx2x_gunzip_end()
6597 bp->strm = NULL; in bnx2x_gunzip_end()
6600 if (bp->gunzip_buf) { in bnx2x_gunzip_end()
6601 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_end()
6602 bp->gunzip_mapping); in bnx2x_gunzip_end()
6603 bp->gunzip_buf = NULL; in bnx2x_gunzip_end()
6614 return -EINVAL; in bnx2x_gunzip()
6624 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; in bnx2x_gunzip()
6625 bp->strm->avail_in = len - n; in bnx2x_gunzip()
6626 bp->strm->next_out = bp->gunzip_buf; in bnx2x_gunzip()
6627 bp->strm->avail_out = FW_BUF_SIZE; in bnx2x_gunzip()
6629 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); in bnx2x_gunzip()
6633 rc = zlib_inflate(bp->strm, Z_FINISH); in bnx2x_gunzip()
6635 netdev_err(bp->dev, "Firmware decompression error: %s\n", in bnx2x_gunzip()
6636 bp->strm->msg); in bnx2x_gunzip()
6638 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); in bnx2x_gunzip()
6639 if (bp->gunzip_outlen & 0x3) in bnx2x_gunzip()
6640 netdev_err(bp->dev, in bnx2x_gunzip()
6642 bp->gunzip_outlen); in bnx2x_gunzip()
6643 bp->gunzip_outlen >>= 2; in bnx2x_gunzip()
6645 zlib_inflateEnd(bp->strm); in bnx2x_gunzip()
6670 /* NON-IP protocol */ in bnx2x_lb_pckt()
6717 count--; in bnx2x_int_mem_test()
6721 return -1; in bnx2x_int_mem_test()
6732 count--; in bnx2x_int_mem_test()
6736 return -2; in bnx2x_int_mem_test()
6747 DP(NETIF_MSG_HW, "part2\n"); in bnx2x_int_mem_test()
6773 count--; in bnx2x_int_mem_test()
6777 return -3; in bnx2x_int_mem_test()
6801 return -4; in bnx2x_int_mem_test()
6821 DP(NETIF_MSG_HW, "done\n"); in bnx2x_int_mem_test()
6904 bp->dmae_ready = 0; in bnx2x_setup_dmae()
6905 spin_lock_init(&bp->dmae_lock); in bnx2x_setup_dmae()
6913 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); in bnx2x_init_pxp()
6914 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); in bnx2x_init_pxp()
6916 if (bp->mrrs == -1) in bnx2x_init_pxp()
6919 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); in bnx2x_init_pxp()
6920 r_order = bp->mrrs; in bnx2x_init_pxp()
6952 bp->common.shmem_base, in bnx2x_setup_fan_failure_detection()
6953 bp->common.shmem2_base, in bnx2x_setup_fan_failure_detection()
6957 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); in bnx2x_setup_fan_failure_detection()
6993 shmem_base[0] = bp->common.shmem_base; in bnx2x__common_init_phy()
6994 shmem2_base[0] = bp->common.shmem2_base; in bnx2x__common_init_phy()
7003 bp->common.chip_id); in bnx2x__common_init_phy()
7039 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7047 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); in bnx2x_init_hw_common()
7073 * 4-port mode or 2-port mode we need to turn of master-enable in bnx2x_init_hw_common()
7075 * so, we disregard multi-function or not, and always disable in bnx2x_init_hw_common()
7116 return -EBUSY; in bnx2x_init_hw_common()
7121 return -EBUSY; in bnx2x_init_hw_common()
7131 * (i.e. vnic3) to start even if it is marked as "scan-off". in bnx2x_init_hw_common()
7133 * as "scan-off". Real-life scenario for example: if a driver is being in bnx2x_init_hw_common()
7134 * load-unloaded while func6,7 are down. This will cause the timer to access in bnx2x_init_hw_common()
7149 * dmae-operations (writing to pram for example.) in bnx2x_init_hw_common()
7159 * b. Wait 20msec. - note that this timeout is needed to make in bnx2x_init_hw_common()
7190 * PF-s might be dynamic. in bnx2x_init_hw_common()
7199 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_init_hw_common()
7236 } while (factor-- && (val != 1)); in bnx2x_init_hw_common()
7240 return -EBUSY; in bnx2x_init_hw_common()
7249 bp->dmae_ready = 1; in bnx2x_init_hw_common()
7268 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_common()
7289 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); in bnx2x_init_hw_common()
7302 /* Bit-map indicating which L2 hdrs may appear in bnx2x_init_hw_common()
7306 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7354 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7378 dev_alert(&bp->pdev->dev, in bnx2x_init_hw_common()
7422 /* in E3 this done in per-port section */ in bnx2x_init_hw_common()
7437 return -EBUSY; in bnx2x_init_hw_common()
7442 return -EBUSY; in bnx2x_init_hw_common()
7447 return -EBUSY; in bnx2x_init_hw_common()
7460 return -EBUSY; in bnx2x_init_hw_common()
7476 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_init_hw_common()
7485 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7496 /* In E2 2-PORT mode, same ext phy is used for the two paths */ in bnx2x_init_hw_common_chip()
7510 DP(NETIF_MSG_HW, "starting port init port %d\n", port); in bnx2x_init_hw_port()
7520 * attempted. Therefore we manually added the enable-master to the in bnx2x_init_hw_port()
7537 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_port()
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); in bnx2x_init_hw_port()
7553 else if (bp->dev->mtu > 4096) { in bnx2x_init_hw_port()
7554 if (bp->flags & ONE_PORT_FLAG) in bnx2x_init_hw_port()
7557 val = bp->dev->mtu; in bnx2x_init_hw_port()
7563 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); in bnx2x_init_hw_port()
7588 /* Ovlan exists only if we are in multi-function + in bnx2x_init_hw_port()
7589 * switch-dependent mode, in switch-independent there in bnx2x_init_hw_port()
7595 (bp->path_has_ovlan ? 7 : 6)); in bnx2x_init_hw_port()
7621 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); in bnx2x_init_hw_port()
7645 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use in bnx2x_init_hw_port()
7646 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF in bnx2x_init_hw_port()
7647 * bits 4-7 are used for "per vn group attention" */ in bnx2x_init_hw_port()
7667 /* Bit-map indicating which L2 hdrs may appear after the in bnx2x_init_hw_port()
7695 switch (bp->mf_mode) { in bnx2x_init_hw_port()
7726 bp->flags |= PTP_SUPPORTED; in bnx2x_init_hw_port()
7769 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7773 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7779 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) in bnx2x_igu_clear_sb_gen()
7783 DP(NETIF_MSG_HW, in bnx2x_igu_clear_sb_gen()
7804 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); in bnx2x_init_searcher()
7820 func_params.f_obj = &bp->func_obj; in bnx2x_func_switch_update()
7825 &switch_update_params->changes); in bnx2x_func_switch_update()
7828 &switch_update_params->changes); in bnx2x_func_switch_update()
7841 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7842 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_reset_nic_mode()
7871 BNX2X_ERR("Can't suspend tx-switching!\n"); in bnx2x_reset_nic_mode()
7879 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7880 bnx2x_set_rx_filter(&bp->link_params, 1); in bnx2x_reset_nic_mode()
7899 BNX2X_ERR("Can't resume tx-switching!\n"); in bnx2x_reset_nic_mode()
7903 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_reset_nic_mode()
7927 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7929 * the addresses of the transaction, resulting in was-error bit set in the pci
7930 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7952 DP(NETIF_MSG_HW, "starting func init func %d\n", func); in bnx2x_init_hw_func()
7954 /* FLR cleanup - hmmm */ in bnx2x_init_hw_func()
7964 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
7975 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7984 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7986 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; in bnx2x_init_hw_func()
7987 ilt->lines[cdu_ilt_start + i].page_mapping = in bnx2x_init_hw_func()
7988 bp->context[i].cxt_mapping; in bnx2x_init_hw_func()
7989 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; in bnx2x_init_hw_func()
7997 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_init_hw_func()
8001 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); in bnx2x_init_hw_func()
8010 if (!(bp->flags & USING_MSIX_FLAG)) in bnx2x_init_hw_func()
8020 * Master enable - Due to WB DMAE writes performed before this in bnx2x_init_hw_func()
8021 * register is re-initialized as part of the regular function in bnx2x_init_hw_func()
8029 bp->dmae_ready = 1; in bnx2x_init_hw_func()
8089 bp->mf_ov); in bnx2x_init_hw_func()
8096 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
8121 * E2 mode: address 0-135 match to the mapping memory; in bnx2x_init_hw_func()
8122 * 136 - PF0 default prod; 137 - PF1 default prod; in bnx2x_init_hw_func()
8123 * 138 - PF2 default prod; 139 - PF3 default prod; in bnx2x_init_hw_func()
8124 * 140 - PF0 attn prod; 141 - PF1 attn prod; in bnx2x_init_hw_func()
8125 * 142 - PF2 attn prod; 143 - PF3 attn prod; in bnx2x_init_hw_func()
8126 * 144-147 reserved. in bnx2x_init_hw_func()
8128 * E1.5 mode - In backward compatible mode; in bnx2x_init_hw_func()
8132 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 in bnx2x_init_hw_func()
8135 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; in bnx2x_init_hw_func()
8136 * 132-135 C prods; 136-139 X prods; 140-143 T prods; in bnx2x_init_hw_func()
8137 * 144-147 attn prods; in bnx2x_init_hw_func()
8139 /* non-default-status-blocks */ in bnx2x_init_hw_func()
8142 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { in bnx2x_init_hw_func()
8143 prod_offset = (bp->igu_base_sb + sb_idx) * in bnx2x_init_hw_func()
8152 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, in bnx2x_init_hw_func()
8155 bp->igu_base_sb + sb_idx); in bnx2x_init_hw_func()
8158 /* default-status-blocks */ in bnx2x_init_hw_func()
8172 * igu prods come in chunks of E1HVN_MAX (4) - in bnx2x_init_hw_func()
8183 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8185 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8187 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8189 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8191 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8194 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8196 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8199 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); in bnx2x_init_hw_func()
8202 rf-tool supports split-68 const */ in bnx2x_init_hw_func()
8225 DP(NETIF_MSG_HW, in bnx2x_init_hw_func()
8229 /* Clear "false" parity errors in MSI-X table */ in bnx2x_init_hw_func()
8253 bnx2x_phy_probe(&bp->link_params); in bnx2x_init_hw_func()
8263 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8266 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8269 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem_cnic()
8276 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_mem()
8277 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_mem()
8282 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, in bnx2x_free_mem()
8285 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, in bnx2x_free_mem()
8289 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, in bnx2x_free_mem()
8290 bp->context[i].size); in bnx2x_free_mem()
8293 BNX2X_FREE(bp->ilt->lines); in bnx2x_free_mem()
8295 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_free_mem()
8297 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, in bnx2x_free_mem()
8300 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem()
8309 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8311 if (!bp->cnic_sb.e2_sb) in bnx2x_alloc_mem_cnic()
8314 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8316 if (!bp->cnic_sb.e1x_sb) in bnx2x_alloc_mem_cnic()
8320 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem_cnic()
8322 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem_cnic()
8323 if (!bp->t2) in bnx2x_alloc_mem_cnic()
8328 bp->cnic_eth_dev.addr_drv_info_to_mcp = in bnx2x_alloc_mem_cnic()
8329 &bp->slowpath->drv_info_to_mcp; in bnx2x_alloc_mem_cnic()
8339 return -ENOMEM; in bnx2x_alloc_mem_cnic()
8346 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem()
8348 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem()
8349 if (!bp->t2) in bnx2x_alloc_mem()
8353 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, in bnx2x_alloc_mem()
8355 if (!bp->def_status_blk) in bnx2x_alloc_mem()
8358 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, in bnx2x_alloc_mem()
8360 if (!bp->slowpath) in bnx2x_alloc_mem()
8366 * 1. There are multiple entities allocating memory for context - in bnx2x_alloc_mem()
8369 * 2. Since CDU page-size is not a single 4KB page (which is the case in bnx2x_alloc_mem()
8371 * allocation of sub-page-size in the last entry. in bnx2x_alloc_mem()
8379 bp->context[i].size = min(CDU_ILT_PAGE_SZ, in bnx2x_alloc_mem()
8380 (context_size - allocated)); in bnx2x_alloc_mem()
8381 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, in bnx2x_alloc_mem()
8382 bp->context[i].size); in bnx2x_alloc_mem()
8383 if (!bp->context[i].vcxt) in bnx2x_alloc_mem()
8385 allocated += bp->context[i].size; in bnx2x_alloc_mem()
8387 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), in bnx2x_alloc_mem()
8389 if (!bp->ilt->lines) in bnx2x_alloc_mem()
8399 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_alloc_mem()
8400 if (!bp->spq) in bnx2x_alloc_mem()
8404 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, in bnx2x_alloc_mem()
8406 if (!bp->eq_ring) in bnx2x_alloc_mem()
8414 return -ENOMEM; in bnx2x_alloc_mem()
8449 if (rc == -EEXIST) { in bnx2x_set_mac_one()
8450 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_mac_one()
8485 if (rc == -EEXIST) { in bnx2x_set_vlan_one()
8487 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_vlan_one()
8501 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_clear_vlan_info()
8502 vlan->hw = false; in bnx2x_clear_vlan_info()
8504 bp->vlan_cnt = 0; in bnx2x_clear_vlan_info()
8509 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; in bnx2x_del_all_vlans()
8515 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); in bnx2x_del_all_vlans()
8538 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); in bnx2x_del_all_macs()
8550 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); in bnx2x_set_eth_mac()
8552 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8553 &bp->sp_objs->mac_obj, set, in bnx2x_set_eth_mac()
8556 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8557 bp->fp->index, set); in bnx2x_set_eth_mac()
8564 return bnx2x_setup_queue(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8566 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8570 * bnx2x_set_int_mode - configure interrupt mode
8574 * In case of MSI-X it will also try to enable MSI-X.
8582 return -EINVAL; in bnx2x_set_int_mode()
8598 /* failed to enable multiple MSI-X */ in bnx2x_set_int_mode()
8599 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", in bnx2x_set_int_mode()
8600 bp->num_queues, in bnx2x_set_int_mode()
8601 1 + bp->num_cnic_queues); in bnx2x_set_int_mode()
8609 bp->num_ethernet_queues = 1; in bnx2x_set_int_mode()
8610 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_int_mode()
8615 return -EINVAL; in bnx2x_set_int_mode()
8634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); in bnx2x_ilt_set_info()
8635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); in bnx2x_ilt_set_info()
8638 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; in bnx2x_ilt_set_info()
8639 ilt_client->client_num = ILT_CLIENT_CDU; in bnx2x_ilt_set_info()
8640 ilt_client->page_size = CDU_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8641 ilt_client->flags = ILT_CLIENT_SKIP_MEM; in bnx2x_ilt_set_info()
8642 ilt_client->start = line; in bnx2x_ilt_set_info()
8647 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8649 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", in bnx2x_ilt_set_info()
8650 ilt_client->start, in bnx2x_ilt_set_info()
8651 ilt_client->end, in bnx2x_ilt_set_info()
8652 ilt_client->page_size, in bnx2x_ilt_set_info()
8653 ilt_client->flags, in bnx2x_ilt_set_info()
8654 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8657 if (QM_INIT(bp->qm_cid_count)) { in bnx2x_ilt_set_info()
8658 ilt_client = &ilt->clients[ILT_CLIENT_QM]; in bnx2x_ilt_set_info()
8659 ilt_client->client_num = ILT_CLIENT_QM; in bnx2x_ilt_set_info()
8660 ilt_client->page_size = QM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8661 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8662 ilt_client->start = line; in bnx2x_ilt_set_info()
8665 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, in bnx2x_ilt_set_info()
8668 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8670 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8672 ilt_client->start, in bnx2x_ilt_set_info()
8673 ilt_client->end, in bnx2x_ilt_set_info()
8674 ilt_client->page_size, in bnx2x_ilt_set_info()
8675 ilt_client->flags, in bnx2x_ilt_set_info()
8676 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8681 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; in bnx2x_ilt_set_info()
8682 ilt_client->client_num = ILT_CLIENT_SRC; in bnx2x_ilt_set_info()
8683 ilt_client->page_size = SRC_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8684 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8685 ilt_client->start = line; in bnx2x_ilt_set_info()
8687 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8689 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8691 ilt_client->start, in bnx2x_ilt_set_info()
8692 ilt_client->end, in bnx2x_ilt_set_info()
8693 ilt_client->page_size, in bnx2x_ilt_set_info()
8694 ilt_client->flags, in bnx2x_ilt_set_info()
8695 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8698 ilt_client = &ilt->clients[ILT_CLIENT_TM]; in bnx2x_ilt_set_info()
8699 ilt_client->client_num = ILT_CLIENT_TM; in bnx2x_ilt_set_info()
8700 ilt_client->page_size = TM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8701 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8702 ilt_client->start = line; in bnx2x_ilt_set_info()
8704 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8706 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8708 ilt_client->start, in bnx2x_ilt_set_info()
8709 ilt_client->end, in bnx2x_ilt_set_info()
8710 ilt_client->page_size, in bnx2x_ilt_set_info()
8711 ilt_client->flags, in bnx2x_ilt_set_info()
8712 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8719 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8726 * - HC configuration
8727 * - Queue's CDU context
8737 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8738 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8743 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8744 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8747 init_params->rx.hc_rate = bp->rx_ticks ? in bnx2x_pf_q_prep_init()
8748 (1000000 / bp->rx_ticks) : 0; in bnx2x_pf_q_prep_init()
8749 init_params->tx.hc_rate = bp->tx_ticks ? in bnx2x_pf_q_prep_init()
8750 (1000000 / bp->tx_ticks) : 0; in bnx2x_pf_q_prep_init()
8753 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = in bnx2x_pf_q_prep_init()
8754 fp->fw_sb_id; in bnx2x_pf_q_prep_init()
8760 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_q_prep_init()
8761 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; in bnx2x_pf_q_prep_init()
8765 init_params->max_cos = fp->max_cos; in bnx2x_pf_q_prep_init()
8767 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", in bnx2x_pf_q_prep_init()
8768 fp->index, init_params->max_cos); in bnx2x_pf_q_prep_init()
8771 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { in bnx2x_pf_q_prep_init()
8772 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; in bnx2x_pf_q_prep_init()
8773 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * in bnx2x_pf_q_prep_init()
8775 init_params->cxts[cos] = in bnx2x_pf_q_prep_init()
8776 &bp->context[cxt_index].vcxt[cxt_offset].eth; in bnx2x_pf_q_prep_init()
8788 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; in bnx2x_setup_tx_only()
8790 /* Set tx-only QUEUE flags: don't zero statistics */ in bnx2x_setup_tx_only()
8791 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); in bnx2x_setup_tx_only()
8794 tx_only_params->cid_index = tx_index; in bnx2x_setup_tx_only()
8797 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); in bnx2x_setup_tx_only()
8800 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); in bnx2x_setup_tx_only()
8802 DP(NETIF_MSG_IFUP, in bnx2x_setup_tx_only()
8803 …"preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp in bnx2x_setup_tx_only()
8804 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], in bnx2x_setup_tx_only()
8805 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, in bnx2x_setup_tx_only()
8806 tx_only_params->gen_params.spcl_id, tx_only_params->flags); in bnx2x_setup_tx_only()
8813 * bnx2x_setup_queue - setup queue
8820 * actually: 1) RESET->INIT 2) INIT->SETUP
8834 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); in bnx2x_setup_queue()
8838 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, in bnx2x_setup_queue()
8854 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); in bnx2x_setup_queue()
8858 DP(NETIF_MSG_IFUP, "init complete\n"); in bnx2x_setup_queue()
8864 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); in bnx2x_setup_queue()
8867 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, in bnx2x_setup_queue()
8870 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, in bnx2x_setup_queue()
8871 &setup_params->rxq_params); in bnx2x_setup_queue()
8873 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, in bnx2x_setup_queue()
8880 bp->fcoe_init = true; in bnx2x_setup_queue()
8885 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); in bnx2x_setup_queue()
8889 /* loop through the relevant tx-only indices */ in bnx2x_setup_queue()
8891 tx_index < fp->max_cos; in bnx2x_setup_queue()
8894 /* prepare and send tx-only ramrod*/ in bnx2x_setup_queue()
8899 fp->index, tx_index); in bnx2x_setup_queue()
8909 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_stop_queue()
8914 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); in bnx2x_stop_queue()
8920 /* close tx-only connections */ in bnx2x_stop_queue()
8922 tx_index < fp->max_cos; in bnx2x_stop_queue()
8926 txdata = fp->txdata_ptr[tx_index]; in bnx2x_stop_queue()
8928 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", in bnx2x_stop_queue()
8929 txdata->txq_index); in bnx2x_stop_queue()
8931 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8941 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8987 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_reset_func()
8989 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), in bnx2x_reset_func()
9009 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_reset_func()
9033 /* Timers workaround bug for E2: if this is vnic-3, in bnx2x_reset_func()
9041 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_reset_func()
9051 bp->dmae_ready = 0; in bnx2x_reset_func()
9077 DP(NETIF_MSG_IFDOWN, in bnx2x_reset_port()
9090 func_params.f_obj = &bp->func_obj; in bnx2x_reset_hw()
9105 func_params.f_obj = &bp->func_obj; in bnx2x_func_stop()
9129 * bnx2x_send_unload_req - request unload mode from the MCP.
9145 else if (bp->flags & NO_WOL_FLAG) in bnx2x_send_unload_req()
9148 else if (bp->wol) { in bnx2x_send_unload_req()
9150 u8 *mac_addr = bp->dev->dev_addr; in bnx2x_send_unload_req()
9151 struct pci_dev *pdev = bp->pdev; in bnx2x_send_unload_req()
9155 /* The mac address is written to entries 1-4 to in bnx2x_send_unload_req()
9168 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); in bnx2x_send_unload_req()
9170 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); in bnx2x_send_unload_req()
9183 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9186 bnx2x_load_count[path][0]--; in bnx2x_send_unload_req()
9187 bnx2x_load_count[path][1 + port]--; in bnx2x_send_unload_req()
9188 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9203 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9220 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_func_wait_started()
9222 if (!bp->port.pmf) in bnx2x_func_wait_started()
9229 * 2. Sync SP queue - this guarantees us that attention handling started in bnx2x_func_wait_started()
9233 * pending bit of transaction from STARTED-->TX_STOPPED, if we already in bnx2x_func_wait_started()
9235 * State will return to STARTED after completion of TX_STOPPED-->STARTED in bnx2x_func_wait_started()
9241 synchronize_irq(bp->msix_table[0].vector); in bnx2x_func_wait_started()
9243 synchronize_irq(bp->pdev->irq); in bnx2x_func_wait_started()
9248 while (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9249 BNX2X_F_STATE_STARTED && tout--) in bnx2x_func_wait_started()
9252 if (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9256 return -EBUSY; in bnx2x_func_wait_started()
9264 DP(NETIF_MSG_IFDOWN, in bnx2x_func_wait_started()
9265 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); in bnx2x_func_wait_started()
9267 func_params.f_obj = &bp->func_obj; in bnx2x_func_wait_started()
9271 /* STARTED-->TX_ST0PPED */ in bnx2x_func_wait_started()
9275 /* TX_ST0PPED-->STARTED */ in bnx2x_func_wait_started()
9307 /* Called during unload, to stop PTP-related stuff */
9313 cancel_work_sync(&bp->ptp_task); in bnx2x_stop_ptp()
9315 if (bp->ptp_tx_skb) { in bnx2x_stop_ptp()
9316 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_stop_ptp()
9317 bp->ptp_tx_skb = NULL; in bnx2x_stop_ptp()
9323 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); in bnx2x_stop_ptp()
9336 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_chip_cleanup()
9339 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_chip_cleanup()
9350 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, in bnx2x_chip_cleanup()
9356 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, in bnx2x_chip_cleanup()
9381 netif_addr_lock_bh(bp->dev); in bnx2x_chip_cleanup()
9383 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_chip_cleanup()
9384 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_chip_cleanup()
9385 else if (bp->slowpath) in bnx2x_chip_cleanup()
9389 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_chip_cleanup()
9394 netif_addr_unlock_bh(bp->dev); in bnx2x_chip_cleanup()
9438 /* If SP settings didn't get completed so far - something in bnx2x_chip_cleanup()
9460 if (bp->flags & PTP_SUPPORTED) { in bnx2x_chip_cleanup()
9462 if (bp->ptp_clock) { in bnx2x_chip_cleanup()
9463 ptp_clock_unregister(bp->ptp_clock); in bnx2x_chip_cleanup()
9464 bp->ptp_clock = NULL; in bnx2x_chip_cleanup()
9483 if (!pci_channel_offline(bp->pdev)) { in bnx2x_chip_cleanup()
9497 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); in bnx2x_disable_close_the_gate()
9550 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", in bnx2x_set_234_gates()
9565 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9579 * bnx2x_reset_mcp_prep - prepare for MCP reset.
9591 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); in bnx2x_reset_mcp_prep()
9611 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9626 * initializes bp->common.shmem_base and waits for validity signature to appear
9634 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); in bnx2x_init_shmem()
9639 if (bp->common.shmem_base == 0xFFFFFFFF) { in bnx2x_init_shmem()
9640 bp->flags |= NO_MCP_FLAG; in bnx2x_init_shmem()
9641 return -ENODEV; in bnx2x_init_shmem()
9644 if (bp->common.shmem_base) { in bnx2x_init_shmem()
9656 return -ENODEV; in bnx2x_init_shmem()
9680 * - PCIE core
9681 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9683 * - IGU
9684 * - MISC (including AEU)
9685 * - GRC
9686 * - RBCN, RBCP
9731 * - all xxMACs are handled by the bnx2x_link code. in bnx2x_process_kill_chip_reset()
9784 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9804 } while (cnt-- > 0); in bnx2x_er_poll_igu_vq()
9809 return -EBUSY; in bnx2x_er_poll_igu_vq()
9839 } while (cnt-- > 0); in bnx2x_process_kill()
9846 return -EAGAIN; in bnx2x_process_kill()
9856 return -EAGAIN; in bnx2x_process_kill()
9864 /* Wait for 1ms to empty GLUE and PCI-E core queues, in bnx2x_process_kill()
9889 return -EAGAIN; in bnx2x_process_kill()
9897 * reset state, re-enable attentions. */ in bnx2x_process_kill()
9908 /* if not going to reset MCP - load "fake" driver to reset HW while in bnx2x_leader_reset()
9916 rc = -EAGAIN; in bnx2x_leader_reset()
9922 rc = -EAGAIN; in bnx2x_leader_reset()
9928 rc = -EAGAIN; in bnx2x_leader_reset()
9937 rc = -EAGAIN; in bnx2x_leader_reset()
9956 bp->is_leader = 0; in bnx2x_leader_reset()
9964 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); in bnx2x_recovery_failed()
9967 netif_device_detach(bp->dev); in bnx2x_recovery_failed()
9978 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_recovery_failed()
9986 * will never be called when netif_running(bp->dev) is false.
9995 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { in bnx2x_parity_recover()
9999 vf->state = VF_LOST; in bnx2x_parity_recover()
10002 DP(NETIF_MSG_HW, "Handling parity\n"); in bnx2x_parity_recover()
10004 switch (bp->recovery_state) { in bnx2x_parity_recover()
10006 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); in bnx2x_parity_recover()
10022 bp->is_leader = 1; in bnx2x_parity_recover()
10026 /* If interface has been removed - break */ in bnx2x_parity_recover()
10030 bp->recovery_state = BNX2X_RECOVERY_WAIT; in bnx2x_parity_recover()
10040 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); in bnx2x_parity_recover()
10041 if (bp->is_leader) { in bnx2x_parity_recover()
10062 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10066 /* If all other functions got down - in bnx2x_parity_recover()
10079 * to continue as a none-leader. in bnx2x_parity_recover()
10083 } else { /* non-leader */ in bnx2x_parity_recover()
10095 bp->is_leader = 1; in bnx2x_parity_recover()
10099 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10110 &bp->sp_rtnl_task, in bnx2x_parity_recover()
10116 bp->eth_stats.recoverable_error; in bnx2x_parity_recover()
10118 bp->eth_stats.unrecoverable_error; in bnx2x_parity_recover()
10119 bp->recovery_state = in bnx2x_parity_recover()
10123 netdev_err(bp->dev, in bnx2x_parity_recover()
10126 netif_device_detach(bp->dev); in bnx2x_parity_recover()
10132 bp->recovery_state = in bnx2x_parity_recover()
10137 bp->eth_stats.recoverable_error = in bnx2x_parity_recover()
10139 bp->eth_stats.unrecoverable_error = in bnx2x_parity_recover()
10164 func_params.f_obj = &bp->func_obj; in bnx2x_udp_port_update()
10169 &switch_update_params->changes); in bnx2x_udp_port_update()
10171 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { in bnx2x_udp_port_update()
10172 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; in bnx2x_udp_port_update()
10173 switch_update_params->geneve_dst_port = geneve_port; in bnx2x_udp_port_update()
10176 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { in bnx2x_udp_port_update()
10177 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; in bnx2x_udp_port_update()
10178 switch_update_params->vxlan_dst_port = vxlan_port; in bnx2x_udp_port_update()
10181 /* Re-enable inner-rss for the offloaded UDP tunnels */ in bnx2x_udp_port_update()
10183 &switch_update_params->changes); in bnx2x_udp_port_update()
10190 DP(BNX2X_MSG_SP, in bnx2x_udp_port_update()
10203 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); in bnx2x_udp_tunnel_sync()
10229 if (!netif_running(bp->dev)) { in bnx2x_sp_rtnl_task()
10234 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { in bnx2x_sp_rtnl_task()
10244 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10253 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10264 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10268 bp->link_vars.link_up = 0; in bnx2x_sp_rtnl_task()
10269 bp->force_link_down = true; in bnx2x_sp_rtnl_task()
10270 netif_carrier_off(bp->dev); in bnx2x_sp_rtnl_task()
10271 BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); in bnx2x_sp_rtnl_task()
10278 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { in bnx2x_sp_rtnl_task()
10289 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10290 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); in bnx2x_sp_rtnl_task()
10291 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10298 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10299 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); in bnx2x_sp_rtnl_task()
10300 netif_device_detach(bp->dev); in bnx2x_sp_rtnl_task()
10301 bnx2x_close(bp->dev); in bnx2x_sp_rtnl_task()
10306 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10307 DP(BNX2X_MSG_SP, in bnx2x_sp_rtnl_task()
10308 "sending set mcast vf pf channel message from rtnl sp-task\n"); in bnx2x_sp_rtnl_task()
10309 bnx2x_vfpf_set_mcast(bp->dev); in bnx2x_sp_rtnl_task()
10312 &bp->sp_rtnl_state)){ in bnx2x_sp_rtnl_task()
10313 if (netif_carrier_ok(bp->dev)) { in bnx2x_sp_rtnl_task()
10319 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10320 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); in bnx2x_sp_rtnl_task()
10325 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10328 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10334 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10337 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10340 /* work which needs rtnl lock not-taken (as it takes the lock itself and in bnx2x_sp_rtnl_task()
10345 /* enable SR-IOV if applicable */ in bnx2x_sp_rtnl_task()
10347 &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10357 if (!netif_running(bp->dev)) in bnx2x_period_task()
10368 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and in bnx2x_period_task()
10372 if (bp->port.pmf) { in bnx2x_period_task()
10373 bnx2x_period_func(&bp->link_params, &bp->link_vars); in bnx2x_period_task()
10375 /* Re-queue task in 1 sec */ in bnx2x_period_task()
10376 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); in bnx2x_period_task()
10391 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; in bnx2x_get_pretend_reg()
10407 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; in bnx2x_prev_unload_close_umac()
10408 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); in bnx2x_prev_unload_close_umac()
10409 REG_WR(bp, vals->umac_addr[port], 0); in bnx2x_prev_unload_close_umac()
10445 vals->bmac_addr = base_addr + offset; in bnx2x_prev_unload_close_mac()
10446 vals->bmac_val[0] = wb_data[0]; in bnx2x_prev_unload_close_mac()
10447 vals->bmac_val[1] = wb_data[1]; in bnx2x_prev_unload_close_mac()
10449 REG_WR(bp, vals->bmac_addr, wb_data[0]); in bnx2x_prev_unload_close_mac()
10450 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); in bnx2x_prev_unload_close_mac()
10453 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; in bnx2x_prev_unload_close_mac()
10454 vals->emac_val = REG_RD(bp, vals->emac_addr); in bnx2x_prev_unload_close_mac()
10455 REG_WR(bp, vals->emac_addr, 0); in bnx2x_prev_unload_close_mac()
10466 vals->xmac_addr = base_addr + XMAC_REG_CTRL; in bnx2x_prev_unload_close_mac()
10467 vals->xmac_val = REG_RD(bp, vals->xmac_addr); in bnx2x_prev_unload_close_mac()
10468 REG_WR(bp, vals->xmac_addr, 0); in bnx2x_prev_unload_close_mac()
10495 /* UNDI marks its presence in DORQ - in bnx2x_prev_is_after_undi()
10518 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); in bnx2x_prev_unload_undi_inc()
10527 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", in bnx2x_prev_unload_undi_inc()
10537 return -EBUSY; in bnx2x_prev_mcp_done()
10549 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && in bnx2x_prev_path_get_entry()
10550 bp->pdev->bus->number == tmp_list->bus && in bnx2x_prev_path_get_entry()
10551 BP_PATH(bp) == tmp_list->path) in bnx2x_prev_path_get_entry()
10570 tmp_list->aer = 1; in bnx2x_prev_path_mark_eeh()
10592 if (tmp_list->aer) { in bnx2x_prev_is_path_marked()
10593 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", in bnx2x_prev_is_path_marked()
10615 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); in bnx2x_port_after_undi()
10636 if (!tmp_list->aer) { in bnx2x_prev_mark_path()
10637 BNX2X_ERR("Re-Marking the path.\n"); in bnx2x_prev_mark_path()
10639 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", in bnx2x_prev_mark_path()
10641 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10652 return -ENOMEM; in bnx2x_prev_mark_path()
10655 tmp_list->bus = bp->pdev->bus->number; in bnx2x_prev_mark_path()
10656 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); in bnx2x_prev_mark_path()
10657 tmp_list->path = BP_PATH(bp); in bnx2x_prev_mark_path()
10658 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10659 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; in bnx2x_prev_mark_path()
10666 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", in bnx2x_prev_mark_path()
10668 list_add(&tmp_list->list, &bnx2x_prev_list); in bnx2x_prev_mark_path()
10677 struct pci_dev *dev = bp->pdev; in bnx2x_do_flr()
10681 return -EINVAL; in bnx2x_do_flr()
10685 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { in bnx2x_do_flr()
10687 bp->common.bc_ver); in bnx2x_do_flr()
10688 return -EINVAL; in bnx2x_do_flr()
10692 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); in bnx2x_do_flr()
10772 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10773 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10774 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10775 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10804 timer_count--; in bnx2x_prev_unload_common()
10856 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); in bnx2x_prev_unload()
10882 rc = -EBUSY; in bnx2x_prev_unload()
10893 bnx2x_prev_path_get_entry(bp)->aer); in bnx2x_prev_unload()
10902 /* non-common reply from MCP might require looping */ in bnx2x_prev_unload()
10908 } while (--time_counter); in bnx2x_prev_unload()
10912 rc = -EPROBE_DEFER; in bnx2x_prev_unload()
10917 bp->link_params.feature_config_flags |= in bnx2x_prev_unload()
10931 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ in bnx2x_get_common_hwinfo()
10944 bp->common.chip_id = id; in bnx2x_get_common_hwinfo()
10949 bp->common.chip_id = (CHIP_NUM_57811 << 16) | in bnx2x_get_common_hwinfo()
10950 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10952 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | in bnx2x_get_common_hwinfo()
10953 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10954 bp->common.chip_id |= 0x1; in bnx2x_get_common_hwinfo()
10958 bp->db_size = (1 << BNX2X_DB_SHIFT); in bnx2x_get_common_hwinfo()
10968 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : in bnx2x_get_common_hwinfo()
10972 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ in bnx2x_get_common_hwinfo()
10974 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ in bnx2x_get_common_hwinfo()
10976 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ in bnx2x_get_common_hwinfo()
10977 bp->pfid = bp->pf_num; /* 0..7 */ in bnx2x_get_common_hwinfo()
10980 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); in bnx2x_get_common_hwinfo()
10982 bp->link_params.chip_id = bp->common.chip_id; in bnx2x_get_common_hwinfo()
10986 if ((bp->common.chip_id & 0x1) || in bnx2x_get_common_hwinfo()
10988 bp->flags |= ONE_PORT_FLAG; in bnx2x_get_common_hwinfo()
10993 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << in bnx2x_get_common_hwinfo()
10996 bp->common.flash_size, bp->common.flash_size); in bnx2x_get_common_hwinfo()
11000 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? in bnx2x_get_common_hwinfo()
11004 bp->link_params.shmem_base = bp->common.shmem_base; in bnx2x_get_common_hwinfo()
11005 bp->link_params.shmem2_base = bp->common.shmem2_base; in bnx2x_get_common_hwinfo()
11008 bp->link_params.lfa_base = in bnx2x_get_common_hwinfo()
11009 REG_RD(bp, bp->common.shmem2_base + in bnx2x_get_common_hwinfo()
11013 bp->link_params.lfa_base = 0; in bnx2x_get_common_hwinfo()
11015 bp->common.shmem_base, bp->common.shmem2_base); in bnx2x_get_common_hwinfo()
11017 if (!bp->common.shmem_base) { in bnx2x_get_common_hwinfo()
11019 bp->flags |= NO_MCP_FLAG; in bnx2x_get_common_hwinfo()
11023 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); in bnx2x_get_common_hwinfo()
11024 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); in bnx2x_get_common_hwinfo()
11026 bp->link_params.hw_led_mode = ((bp->common.hw_config & in bnx2x_get_common_hwinfo()
11030 bp->link_params.feature_config_flags = 0; in bnx2x_get_common_hwinfo()
11033 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11036 bp->link_params.feature_config_flags &= in bnx2x_get_common_hwinfo()
11040 bp->common.bc_ver = val; in bnx2x_get_common_hwinfo()
11048 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11052 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11055 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11058 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11062 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11066 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? in bnx2x_get_common_hwinfo()
11069 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? in bnx2x_get_common_hwinfo()
11072 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? in bnx2x_get_common_hwinfo()
11075 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? in bnx2x_get_common_hwinfo()
11083 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; in bnx2x_get_common_hwinfo()
11086 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; in bnx2x_get_common_hwinfo()
11089 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; in bnx2x_get_common_hwinfo()
11092 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; in bnx2x_get_common_hwinfo()
11096 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); in bnx2x_get_common_hwinfo()
11097 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; in bnx2x_get_common_hwinfo()
11100 (bp->flags & NO_WOL_FLAG) ? "not " : ""); in bnx2x_get_common_hwinfo()
11107 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", in bnx2x_get_common_hwinfo()
11121 bp->igu_base_sb = 0xff; in bnx2x_get_igu_cam_info()
11124 igu_sb_cnt = bp->igu_sb_cnt; in bnx2x_get_igu_cam_info()
11125 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * in bnx2x_get_igu_cam_info()
11128 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + in bnx2x_get_igu_cam_info()
11134 /* IGU in normal mode - read CAM */ in bnx2x_get_igu_cam_info()
11146 bp->igu_dsb_id = igu_sb_id; in bnx2x_get_igu_cam_info()
11148 if (bp->igu_base_sb == 0xff) in bnx2x_get_igu_cam_info()
11149 bp->igu_base_sb = igu_sb_id; in bnx2x_get_igu_cam_info()
11162 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); in bnx2x_get_igu_cam_info()
11167 return -EINVAL; in bnx2x_get_igu_cam_info()
11178 bp->port.supported[0] = 0; in bnx2x_link_settings_supported()
11179 bp->port.supported[1] = 0; in bnx2x_link_settings_supported()
11180 switch (bp->link_params.num_phys) { in bnx2x_link_settings_supported()
11182 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; in bnx2x_link_settings_supported()
11186 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11190 if (bp->link_params.multi_phy_config & in bnx2x_link_settings_supported()
11192 bp->port.supported[1] = in bnx2x_link_settings_supported()
11193 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11194 bp->port.supported[0] = in bnx2x_link_settings_supported()
11195 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11197 bp->port.supported[0] = in bnx2x_link_settings_supported()
11198 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11199 bp->port.supported[1] = in bnx2x_link_settings_supported()
11200 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11206 if (!(bp->port.supported[0] || bp->port.supported[1])) { in bnx2x_link_settings_supported()
11207 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", in bnx2x_link_settings_supported()
11216 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); in bnx2x_link_settings_supported()
11220 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11224 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11229 bp->port.link_config[0]); in bnx2x_link_settings_supported()
11233 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); in bnx2x_link_settings_supported()
11236 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11238 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; in bnx2x_link_settings_supported()
11240 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11242 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; in bnx2x_link_settings_supported()
11244 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11246 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; in bnx2x_link_settings_supported()
11248 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11250 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; in bnx2x_link_settings_supported()
11252 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11254 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | in bnx2x_link_settings_supported()
11257 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11259 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; in bnx2x_link_settings_supported()
11261 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11263 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; in bnx2x_link_settings_supported()
11265 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11267 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; in bnx2x_link_settings_supported()
11270 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], in bnx2x_link_settings_supported()
11271 bp->port.supported[1]); in bnx2x_link_settings_supported()
11277 bp->port.advertising[0] = 0; in bnx2x_link_settings_requested()
11278 bp->port.advertising[1] = 0; in bnx2x_link_settings_requested()
11279 switch (bp->link_params.num_phys) { in bnx2x_link_settings_requested()
11289 bp->link_params.req_duplex[idx] = DUPLEX_FULL; in bnx2x_link_settings_requested()
11290 link_config = bp->port.link_config[idx]; in bnx2x_link_settings_requested()
11293 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { in bnx2x_link_settings_requested()
11294 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11296 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11297 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11298 if (bp->link_params.phy[EXT_PHY1].type == in bnx2x_link_settings_requested()
11300 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11305 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11307 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11315 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { in bnx2x_link_settings_requested()
11316 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11318 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11324 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11330 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { in bnx2x_link_settings_requested()
11331 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11333 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11335 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11341 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11347 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11349 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11351 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11357 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11363 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11365 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11367 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11369 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11375 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11381 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11383 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11385 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11388 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11390 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11392 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11397 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11403 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11405 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11407 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11413 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11419 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11421 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11423 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11426 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11428 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11430 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11436 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11441 bp->link_params.req_line_speed[idx] = SPEED_20000; in bnx2x_link_settings_requested()
11447 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11449 bp->port.advertising[idx] = in bnx2x_link_settings_requested()
11450 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11454 bp->link_params.req_flow_ctrl[idx] = (link_config & in bnx2x_link_settings_requested()
11456 if (bp->link_params.req_flow_ctrl[idx] == in bnx2x_link_settings_requested()
11458 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) in bnx2x_link_settings_requested()
11459 bp->link_params.req_flow_ctrl[idx] = in bnx2x_link_settings_requested()
11466 bp->link_params.req_line_speed[idx], in bnx2x_link_settings_requested()
11467 bp->link_params.req_duplex[idx], in bnx2x_link_settings_requested()
11468 bp->link_params.req_flow_ctrl[idx], in bnx2x_link_settings_requested()
11469 bp->port.advertising[idx]); in bnx2x_link_settings_requested()
11487 bp->link_params.bp = bp; in bnx2x_get_port_hwinfo()
11488 bp->link_params.port = port; in bnx2x_get_port_hwinfo()
11490 bp->link_params.lane_config = in bnx2x_get_port_hwinfo()
11493 bp->link_params.speed_cap_mask[0] = in bnx2x_get_port_hwinfo()
11497 bp->link_params.speed_cap_mask[1] = in bnx2x_get_port_hwinfo()
11501 bp->port.link_config[0] = in bnx2x_get_port_hwinfo()
11504 bp->port.link_config[1] = in bnx2x_get_port_hwinfo()
11507 bp->link_params.multi_phy_config = in bnx2x_get_port_hwinfo()
11513 bp->wol = (!(bp->flags & NO_WOL_FLAG) && in bnx2x_get_port_hwinfo()
11518 bp->flags |= NO_ISCSI_FLAG; in bnx2x_get_port_hwinfo()
11521 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_port_hwinfo()
11524 bp->link_params.lane_config, in bnx2x_get_port_hwinfo()
11525 bp->link_params.speed_cap_mask[0], in bnx2x_get_port_hwinfo()
11526 bp->port.link_config[0]); in bnx2x_get_port_hwinfo()
11528 bp->link_params.switch_cfg = (bp->port.link_config[0] & in bnx2x_get_port_hwinfo()
11530 bnx2x_phy_probe(&bp->link_params); in bnx2x_get_port_hwinfo()
11531 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); in bnx2x_get_port_hwinfo()
11544 bp->mdio.prtad = bp->port.phy_addr; in bnx2x_get_port_hwinfo()
11548 bp->mdio.prtad = in bnx2x_get_port_hwinfo()
11557 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | in bnx2x_get_port_hwinfo()
11561 bp->link_params.eee_mode = 0; in bnx2x_get_port_hwinfo()
11573 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11578 bp->cnic_eth_dev.max_iscsi_conn = in bnx2x_get_iscsi_info()
11583 bp->cnic_eth_dev.max_iscsi_conn); in bnx2x_get_iscsi_info()
11586 * If maximum allowed number of connections is zero - in bnx2x_get_iscsi_info()
11589 if (!bp->cnic_eth_dev.max_iscsi_conn) in bnx2x_get_iscsi_info()
11590 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11596 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_ext_wwn_info()
11598 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_ext_wwn_info()
11602 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_ext_wwn_info()
11604 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_ext_wwn_info()
11659 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11664 bp->cnic_eth_dev.max_fcoe_conn = in bnx2x_get_fcoe_info()
11669 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; in bnx2x_get_fcoe_info()
11673 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; in bnx2x_get_fcoe_info()
11678 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_fcoe_info()
11682 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_fcoe_info()
11688 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_fcoe_info()
11692 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_fcoe_info()
11707 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); in bnx2x_get_fcoe_info()
11710 * If maximum allowed number of connections is zero - in bnx2x_get_fcoe_info()
11713 if (!bp->cnic_eth_dev.max_fcoe_conn) { in bnx2x_get_fcoe_info()
11714 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11715 eth_zero_addr(bp->fip_mac); in bnx2x_get_fcoe_info()
11735 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; in bnx2x_get_cnic_mac_hwinfo()
11736 u8 *fip_mac = bp->fip_mac; in bnx2x_get_cnic_mac_hwinfo()
11755 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11767 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11770 bp->mf_ext_config = cfg; in bnx2x_get_cnic_mac_hwinfo()
11775 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11782 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11789 /* If this is a storage-only interface, use SAN mac as in bnx2x_get_cnic_mac_hwinfo()
11794 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11811 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11817 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11818 eth_zero_addr(bp->fip_mac); in bnx2x_get_cnic_mac_hwinfo()
11829 eth_zero_addr(bp->dev->dev_addr); in bnx2x_get_mac_hwinfo()
11833 eth_hw_addr_random(bp->dev); in bnx2x_get_mac_hwinfo()
11839 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); in bnx2x_get_mac_hwinfo()
11847 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); in bnx2x_get_mac_hwinfo()
11857 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); in bnx2x_get_mac_hwinfo()
11858 bp->flags |= HAS_PHYS_PORT_ID; in bnx2x_get_mac_hwinfo()
11861 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_mac_hwinfo()
11863 if (!is_valid_ether_addr(bp->dev->dev_addr)) in bnx2x_get_mac_hwinfo()
11864 dev_err(&bp->pdev->dev, in bnx2x_get_mac_hwinfo()
11867 bp->dev->dev_addr); in bnx2x_get_mac_hwinfo()
11902 bp->mf_mode = MULTI_FUNCTION_SI; in validate_set_si_mode()
11903 bp->mf_config[BP_VN(bp)] = in validate_set_si_mode()
11918 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11920 return -EINVAL; in bnx2x_get_hwinfo()
11929 bp->common.int_block = INT_BLOCK_HC; in bnx2x_get_hwinfo()
11931 bp->igu_dsb_id = DEF_SB_IGU_ID; in bnx2x_get_hwinfo()
11932 bp->igu_base_sb = 0; in bnx2x_get_hwinfo()
11934 bp->common.int_block = INT_BLOCK_IGU; in bnx2x_get_hwinfo()
11951 tout--; in bnx2x_get_hwinfo()
11956 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11960 return -EPERM; in bnx2x_get_hwinfo()
11966 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; in bnx2x_get_hwinfo()
11977 * set base FW non-default (fast path) status block id, this value is in bnx2x_get_hwinfo()
11982 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); in bnx2x_get_hwinfo()
11984 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of in bnx2x_get_hwinfo()
11988 bp->base_fw_ndsb = bp->igu_base_sb; in bnx2x_get_hwinfo()
11991 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, in bnx2x_get_hwinfo()
11992 bp->igu_sb_cnt, bp->base_fw_ndsb); in bnx2x_get_hwinfo()
11997 bp->mf_ov = 0; in bnx2x_get_hwinfo()
11998 bp->mf_mode = 0; in bnx2x_get_hwinfo()
11999 bp->mf_sub_mode = 0; in bnx2x_get_hwinfo()
12004 bp->common.shmem2_base, SHMEM2_RD(bp, size), in bnx2x_get_hwinfo()
12008 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); in bnx2x_get_hwinfo()
12010 bp->common.mf_cfg_base = bp->common.shmem_base + in bnx2x_get_hwinfo()
12017 * for Switch-Independent mode; in bnx2x_get_hwinfo()
12018 * OVLAN must be legal for Switch-Dependent mode in bnx2x_get_hwinfo()
12021 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12037 bp->mf_mode = MULTI_FUNCTION_AFEX; in bnx2x_get_hwinfo()
12038 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12051 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12052 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12058 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12059 bp->mf_sub_mode = SUB_MF_MODE_BD; in bnx2x_get_hwinfo()
12060 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12071 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", in bnx2x_get_hwinfo()
12078 bp->dev->mtu = mtu_size; in bnx2x_get_hwinfo()
12082 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12083 bp->mf_sub_mode = SUB_MF_MODE_UFP; in bnx2x_get_hwinfo()
12084 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12089 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12098 bp->mf_sub_mode = in bnx2x_get_hwinfo()
12103 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12110 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12118 switch (bp->mf_mode) { in bnx2x_get_hwinfo()
12123 bp->mf_ov = val; in bnx2x_get_hwinfo()
12124 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12127 func, bp->mf_ov, bp->mf_ov); in bnx2x_get_hwinfo()
12128 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || in bnx2x_get_hwinfo()
12129 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { in bnx2x_get_hwinfo()
12130 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12131 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", in bnx2x_get_hwinfo()
12133 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12135 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12138 return -EPERM; in bnx2x_get_hwinfo()
12145 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", in bnx2x_get_hwinfo()
12150 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12153 return -EPERM; in bnx2x_get_hwinfo()
12164 !bp->path_has_ovlan && in bnx2x_get_hwinfo()
12166 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12172 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12178 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); in bnx2x_get_hwinfo()
12201 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); in bnx2x_read_fwinfo()
12202 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); in bnx2x_read_fwinfo()
12227 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, in bnx2x_read_fwinfo()
12228 block_end - BNX2X_VPD_LEN, in bnx2x_read_fwinfo()
12230 if (cnt < (block_end - BNX2X_VPD_LEN)) in bnx2x_read_fwinfo()
12264 memcpy(bp->fw_ver, &vpd_data[rodi], len); in bnx2x_read_fwinfo()
12265 bp->fw_ver[len] = ' '; in bnx2x_read_fwinfo()
12304 switch (bp->mf_mode) { in bnx2x_set_modes_bitmap()
12331 mutex_init(&bp->port.phy_mutex); in bnx2x_init_bp()
12332 mutex_init(&bp->fw_mb_mutex); in bnx2x_init_bp()
12333 mutex_init(&bp->drv_info_mutex); in bnx2x_init_bp()
12334 sema_init(&bp->stats_lock, 1); in bnx2x_init_bp()
12335 bp->drv_info_mng_owner = false; in bnx2x_init_bp()
12336 INIT_LIST_HEAD(&bp->vlan_reg); in bnx2x_init_bp()
12338 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); in bnx2x_init_bp()
12339 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); in bnx2x_init_bp()
12340 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); in bnx2x_init_bp()
12341 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); in bnx2x_init_bp()
12347 eth_zero_addr(bp->dev->dev_addr); in bnx2x_init_bp()
12363 bp->fw_seq = in bnx2x_init_bp()
12366 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_init_bp()
12376 dev_err(&bp->pdev->dev, "FPGA detected\n"); in bnx2x_init_bp()
12379 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); in bnx2x_init_bp()
12381 bp->disable_tpa = disable_tpa; in bnx2x_init_bp()
12382 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); in bnx2x_init_bp()
12384 bp->disable_tpa |= is_kdump_kernel(); in bnx2x_init_bp()
12387 if (bp->disable_tpa) { in bnx2x_init_bp()
12388 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12389 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12393 bp->dropless_fc = false; in bnx2x_init_bp()
12395 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); in bnx2x_init_bp()
12397 bp->mrrs = mrrs; in bnx2x_init_bp()
12399 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; in bnx2x_init_bp()
12401 bp->rx_ring_size = MAX_RX_AVAIL; in bnx2x_init_bp()
12404 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12405 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12407 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; in bnx2x_init_bp()
12409 timer_setup(&bp->timer, bnx2x_timer, 0); in bnx2x_init_bp()
12410 bp->timer.expires = jiffies + bp->current_interval; in bnx2x_init_bp()
12425 bp->cnic_base_cl_id = FP_SB_MAX_E1x; in bnx2x_init_bp()
12427 bp->cnic_base_cl_id = FP_SB_MAX_E2; in bnx2x_init_bp()
12431 bp->max_cos = 1; in bnx2x_init_bp()
12433 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; in bnx2x_init_bp()
12435 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; in bnx2x_init_bp()
12437 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; in bnx2x_init_bp()
12441 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); in bnx2x_init_bp()
12443 /* We need at least one default status block for slow-path events, in bnx2x_init_bp()
12448 bp->min_msix_vec_cnt = 1; in bnx2x_init_bp()
12450 bp->min_msix_vec_cnt = 3; in bnx2x_init_bp()
12452 bp->min_msix_vec_cnt = 2; in bnx2x_init_bp()
12453 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); in bnx2x_init_bp()
12455 bp->dump_preset_idx = 1; in bnx2x_init_bp()
12474 bp->stats_init = true; in bnx2x_open()
12513 netdev_info(bp->dev, in bnx2x_open()
12520 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_open()
12525 return -EAGAIN; in bnx2x_open()
12530 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_open()
12556 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12567 list_del(&current_mcast_group->mcast_group_link); in bnx2x_free_mcast_macs_list()
12579 int mc_count = netdev_mc_count(bp->dev); in bnx2x_init_mcast_macs_list()
12582 INIT_LIST_HEAD(&p->mcast_list); in bnx2x_init_mcast_macs_list()
12583 netdev_for_each_mc_addr(ha, bp->dev) { in bnx2x_init_mcast_macs_list()
12591 return -ENOMEM; in bnx2x_init_mcast_macs_list()
12593 list_add(&current_mcast_group->mcast_group_link, in bnx2x_init_mcast_macs_list()
12596 mc_mac = &current_mcast_group->mcast_elems[offset]; in bnx2x_init_mcast_macs_list()
12597 mc_mac->mac = bnx2x_mc_addr(ha); in bnx2x_init_mcast_macs_list()
12598 list_add_tail(&mc_mac->link, &p->mcast_list); in bnx2x_init_mcast_macs_list()
12603 p->mcast_list_len = mc_count; in bnx2x_init_mcast_macs_list()
12608 * bnx2x_set_uc_list - configure a new unicast MACs list.
12617 struct net_device *dev = bp->dev; in bnx2x_set_uc_list()
12619 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_set_uc_list()
12632 if (rc == -EEXIST) { in bnx2x_set_uc_list()
12633 DP(BNX2X_MSG_SP, in bnx2x_set_uc_list()
12655 struct net_device *dev = bp->dev; in bnx2x_set_mc_list_e1x()
12659 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list_e1x()
12691 struct net_device *dev = bp->dev; in bnx2x_set_mc_list()
12694 /* On older adapters, we need to flush and re-add filters */ in bnx2x_set_mc_list()
12698 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list()
12724 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12729 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_set_rx_mode()
12730 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); in bnx2x_set_rx_mode()
12743 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); in bnx2x_set_rx_mode_inner()
12745 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12747 if (bp->dev->flags & IFF_PROMISC) { in bnx2x_set_rx_mode_inner()
12749 } else if ((bp->dev->flags & IFF_ALLMULTI) || in bnx2x_set_rx_mode_inner()
12750 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && in bnx2x_set_rx_mode_inner()
12760 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12763 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12773 bp->rx_mode = rx_mode; in bnx2x_set_rx_mode_inner()
12776 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_set_rx_mode_inner()
12779 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { in bnx2x_set_rx_mode_inner()
12780 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_set_rx_mode_inner()
12781 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12787 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12790 * the VF needs to release the bottom-half lock prior to the in bnx2x_set_rx_mode_inner()
12793 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12806 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", in bnx2x_mdio_read()
12813 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); in bnx2x_mdio_read()
12815 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); in bnx2x_mdio_read()
12829 DP(NETIF_MSG_LINK, in bnx2x_mdio_write()
12837 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); in bnx2x_mdio_write()
12849 return -EAGAIN; in bnx2x_ioctl()
12855 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", in bnx2x_ioctl()
12856 mdio->phy_id, mdio->reg_num, mdio->val_in); in bnx2x_ioctl()
12857 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); in bnx2x_ioctl()
12869 if (!is_valid_ether_addr(dev->dev_addr)) { in bnx2x_validate_addr()
12870 BNX2X_ERR("Non-valid Ethernet address\n"); in bnx2x_validate_addr()
12871 return -EADDRNOTAVAIL; in bnx2x_validate_addr()
12881 if (!(bp->flags & HAS_PHYS_PORT_ID)) in bnx2x_get_phys_port_id()
12882 return -EOPNOTSUPP; in bnx2x_get_phys_port_id()
12884 ppid->id_len = sizeof(bp->phys_port_id); in bnx2x_get_phys_port_id()
12885 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); in bnx2x_get_phys_port_id()
12908 (skb_shinfo(skb)->gso_size > 9000) && in bnx2x_features_check()
12924 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, in __bnx2x_vlan_configure_vid()
12927 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); in __bnx2x_vlan_configure_vid()
12938 /* Configure all non-configured entries */ in bnx2x_vlan_configure_vid_list()
12939 list_for_each_entry(vlan, &bp->vlan_reg, link) { in bnx2x_vlan_configure_vid_list()
12940 if (vlan->hw) in bnx2x_vlan_configure_vid_list()
12943 if (bp->vlan_cnt >= bp->vlan_credit) in bnx2x_vlan_configure_vid_list()
12944 return -ENOBUFS; in bnx2x_vlan_configure_vid_list()
12946 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); in bnx2x_vlan_configure_vid_list()
12948 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12952 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12953 vlan->hw = true; in bnx2x_vlan_configure_vid_list()
12954 bp->vlan_cnt++; in bnx2x_vlan_configure_vid_list()
12966 if (bp->accept_any_vlan != need_accept_any_vlan) { in bnx2x_vlan_configure()
12967 bp->accept_any_vlan = need_accept_any_vlan; in bnx2x_vlan_configure()
12968 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", in bnx2x_vlan_configure()
12969 bp->accept_any_vlan ? "raised" : "cleared"); in bnx2x_vlan_configure()
12992 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); in bnx2x_vlan_rx_add_vid()
12996 return -ENOMEM; in bnx2x_vlan_rx_add_vid()
12998 vlan->vid = vid; in bnx2x_vlan_rx_add_vid()
12999 vlan->hw = false; in bnx2x_vlan_rx_add_vid()
13000 list_add_tail(&vlan->link, &bp->vlan_reg); in bnx2x_vlan_rx_add_vid()
13015 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
13017 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_vlan_rx_kill_vid()
13018 if (vlan->vid == vid) { in bnx2x_vlan_rx_kill_vid()
13024 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); in bnx2x_vlan_rx_kill_vid()
13025 return -EINVAL; in bnx2x_vlan_rx_kill_vid()
13028 if (netif_running(dev) && vlan->hw) { in bnx2x_vlan_rx_kill_vid()
13030 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
13031 bp->vlan_cnt--; in bnx2x_vlan_rx_kill_vid()
13034 list_del(&vlan->link); in bnx2x_vlan_rx_kill_vid()
13040 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); in bnx2x_vlan_rx_kill_vid()
13080 struct device *dev = &bp->pdev->dev; in bnx2x_set_coherency_mask()
13085 return -EIO; in bnx2x_set_coherency_mask()
13093 if (bp->flags & AER_ENABLED) { in bnx2x_disable_pcie_error_reporting()
13094 pci_disable_pcie_error_reporting(bp->pdev); in bnx2x_disable_pcie_error_reporting()
13095 bp->flags &= ~AER_ENABLED; in bnx2x_disable_pcie_error_reporting()
13108 SET_NETDEV_DEV(dev, &pdev->dev); in bnx2x_init_dev()
13110 bp->dev = dev; in bnx2x_init_dev()
13111 bp->pdev = pdev; in bnx2x_init_dev()
13115 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13121 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13123 rc = -ENODEV; in bnx2x_init_dev()
13128 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); in bnx2x_init_dev()
13129 rc = -ENODEV; in bnx2x_init_dev()
13137 rc = -ENODEV; in bnx2x_init_dev()
13141 if (atomic_read(&pdev->enable_cnt) == 1) { in bnx2x_init_dev()
13144 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13154 if (!pdev->pm_cap) { in bnx2x_init_dev()
13155 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13157 rc = -EIO; in bnx2x_init_dev()
13163 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); in bnx2x_init_dev()
13164 rc = -EIO; in bnx2x_init_dev()
13172 dev->mem_start = pci_resource_start(pdev, 0); in bnx2x_init_dev()
13173 dev->base_addr = dev->mem_start; in bnx2x_init_dev()
13174 dev->mem_end = pci_resource_end(pdev, 0); in bnx2x_init_dev()
13176 dev->irq = pdev->irq; in bnx2x_init_dev()
13178 bp->regview = pci_ioremap_bar(pdev, 0); in bnx2x_init_dev()
13179 if (!bp->regview) { in bnx2x_init_dev()
13180 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13182 rc = -ENOMEM; in bnx2x_init_dev()
13192 bp->pf_num = PCI_FUNC(pdev->devfn); in bnx2x_init_dev()
13195 pci_read_config_dword(bp->pdev, in bnx2x_init_dev()
13197 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> in bnx2x_init_dev()
13200 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); in bnx2x_init_dev()
13203 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_init_dev()
13207 pdev->needs_freset = 1; in bnx2x_init_dev()
13212 bp->flags |= AER_ENABLED; in bnx2x_init_dev()
13233 /* Enable internal target-read (in case we are probed after PF in bnx2x_init_dev()
13242 dev->watchdog_timeo = TX_TIMEOUT; in bnx2x_init_dev()
13244 dev->netdev_ops = &bnx2x_netdev_ops; in bnx2x_init_dev()
13247 dev->priv_flags |= IFF_UNICAST_FLT; in bnx2x_init_dev()
13249 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13254 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13260 dev->hw_enc_features = in bnx2x_init_dev()
13268 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13272 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; in bnx2x_init_dev()
13275 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13280 bp->accept_any_vlan = true; in bnx2x_init_dev()
13282 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_dev()
13288 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; in bnx2x_init_dev()
13289 dev->features |= NETIF_F_HIGHDMA; in bnx2x_init_dev()
13290 if (dev->features & NETIF_F_LRO) in bnx2x_init_dev()
13291 dev->features &= ~NETIF_F_GRO_HW; in bnx2x_init_dev()
13294 dev->hw_features |= NETIF_F_LOOPBACK; in bnx2x_init_dev()
13297 dev->dcbnl_ops = &bnx2x_dcbnl_ops; in bnx2x_init_dev()
13300 /* MTU range, 46 - 9600 */ in bnx2x_init_dev()
13301 dev->min_mtu = ETH_MIN_PACKET_SIZE; in bnx2x_init_dev()
13302 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; in bnx2x_init_dev()
13305 bp->mdio.prtad = MDIO_PRTAD_NONE; in bnx2x_init_dev()
13306 bp->mdio.mmds = 0; in bnx2x_init_dev()
13307 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; in bnx2x_init_dev()
13308 bp->mdio.dev = dev; in bnx2x_init_dev()
13309 bp->mdio.mdio_read = bnx2x_mdio_read; in bnx2x_init_dev()
13310 bp->mdio.mdio_write = bnx2x_mdio_write; in bnx2x_init_dev()
13315 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_dev()
13327 const struct firmware *firmware = bp->firmware; in bnx2x_check_firmware()
13335 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { in bnx2x_check_firmware()
13337 return -EINVAL; in bnx2x_check_firmware()
13340 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; in bnx2x_check_firmware()
13348 if (offset + len > firmware->size) { in bnx2x_check_firmware()
13350 return -EINVAL; in bnx2x_check_firmware()
13355 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); in bnx2x_check_firmware()
13356 ops_offsets = (__force __be16 *)(firmware->data + offset); in bnx2x_check_firmware()
13357 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); in bnx2x_check_firmware()
13359 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { in bnx2x_check_firmware()
13362 return -EINVAL; in bnx2x_check_firmware()
13367 offset = be32_to_cpu(fw_hdr->fw_version.offset); in bnx2x_check_firmware()
13368 fw_ver = firmware->data + offset; in bnx2x_check_firmware()
13379 return -EINVAL; in bnx2x_check_firmware()
13448 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13449 bp->arr = kmalloc(len, GFP_KERNEL); \
13450 if (!bp->arr) \
13452 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13453 (u8 *)bp->arr, len); \
13462 if (bp->firmware) in bnx2x_init_firmware()
13473 return -EINVAL; in bnx2x_init_firmware()
13477 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); in bnx2x_init_firmware()
13490 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; in bnx2x_init_firmware()
13494 rc = -ENOMEM; in bnx2x_init_firmware()
13505 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13506 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); in bnx2x_init_firmware()
13507 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13508 be32_to_cpu(fw_hdr->tsem_pram_data.offset); in bnx2x_init_firmware()
13509 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13510 be32_to_cpu(fw_hdr->usem_int_table_data.offset); in bnx2x_init_firmware()
13511 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13512 be32_to_cpu(fw_hdr->usem_pram_data.offset); in bnx2x_init_firmware()
13513 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13514 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); in bnx2x_init_firmware()
13515 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13516 be32_to_cpu(fw_hdr->xsem_pram_data.offset); in bnx2x_init_firmware()
13517 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13518 be32_to_cpu(fw_hdr->csem_int_table_data.offset); in bnx2x_init_firmware()
13519 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13520 be32_to_cpu(fw_hdr->csem_pram_data.offset); in bnx2x_init_firmware()
13527 kfree(bp->init_ops_offsets); in bnx2x_init_firmware()
13529 kfree(bp->init_ops); in bnx2x_init_firmware()
13531 kfree(bp->init_data); in bnx2x_init_firmware()
13533 release_firmware(bp->firmware); in bnx2x_init_firmware()
13534 bp->firmware = NULL; in bnx2x_init_firmware()
13541 kfree(bp->init_ops_offsets); in bnx2x_release_firmware()
13542 kfree(bp->init_ops); in bnx2x_release_firmware()
13543 kfree(bp->init_data); in bnx2x_release_firmware()
13544 release_firmware(bp->firmware); in bnx2x_release_firmware()
13545 bp->firmware = NULL; in bnx2x_release_firmware()
13570 bnx2x_init_func_obj(bp, &bp->func_obj, in bnx2x__init_func_obj()
13578 /* must be called after sriov-enable */
13593 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
13604 * If MSI-X is not supported - return number of SBs needed to support in bnx2x_get_num_non_def_sbs()
13607 if (!pdev->msix_cap) { in bnx2x_get_num_non_def_sbs()
13608 dev_info(&pdev->dev, "no msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13611 dev_info(&pdev->dev, "msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13620 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); in bnx2x_get_num_non_def_sbs()
13657 return -ENODEV; in set_max_cos_est()
13697 func_params.f_obj = &bp->func_obj; in bnx2x_send_update_drift_ramrod()
13701 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; in bnx2x_send_update_drift_ramrod()
13702 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_update_drift_ramrod()
13703 set_timesync_params->add_sub_drift_adjust_value = in bnx2x_send_update_drift_ramrod()
13705 set_timesync_params->drift_adjust_value = best_val; in bnx2x_send_update_drift_ramrod()
13706 set_timesync_params->drift_adjust_period = best_period; in bnx2x_send_update_drift_ramrod()
13719 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); in bnx2x_ptp_adjfreq()
13721 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjfreq()
13722 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjfreq()
13724 return -ENETDOWN; in bnx2x_ptp_adjfreq()
13728 ppb = -ppb; in bnx2x_ptp_adjfreq()
13748 dif1 = ppb - (val * 1000000 / period1); in bnx2x_ptp_adjfreq()
13752 dif1 = -dif1; in bnx2x_ptp_adjfreq()
13753 dif2 = ppb - (val * 1000000 / period2); in bnx2x_ptp_adjfreq()
13755 dif2 = -dif2; in bnx2x_ptp_adjfreq()
13770 return -EFAULT; in bnx2x_ptp_adjfreq()
13773 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, in bnx2x_ptp_adjfreq()
13783 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjtime()
13784 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjtime()
13786 return -ENETDOWN; in bnx2x_ptp_adjtime()
13789 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); in bnx2x_ptp_adjtime()
13791 timecounter_adjtime(&bp->timecounter, delta); in bnx2x_ptp_adjtime()
13801 if (!netif_running(bp->dev)) { in bnx2x_ptp_gettime()
13802 DP(BNX2X_MSG_PTP, in bnx2x_ptp_gettime()
13804 return -ENETDOWN; in bnx2x_ptp_gettime()
13807 ns = timecounter_read(&bp->timecounter); in bnx2x_ptp_gettime()
13809 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); in bnx2x_ptp_gettime()
13822 if (!netif_running(bp->dev)) { in bnx2x_ptp_settime()
13823 DP(BNX2X_MSG_PTP, in bnx2x_ptp_settime()
13825 return -ENETDOWN; in bnx2x_ptp_settime()
13830 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); in bnx2x_ptp_settime()
13832 /* Re-init the timecounter */ in bnx2x_ptp_settime()
13833 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); in bnx2x_ptp_settime()
13845 return -ENOTSUPP; in bnx2x_ptp_enable()
13851 bp->ptp_clock_info.owner = THIS_MODULE; in bnx2x_register_phc()
13852 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); in bnx2x_register_phc()
13853 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ in bnx2x_register_phc()
13854 bp->ptp_clock_info.n_alarm = 0; in bnx2x_register_phc()
13855 bp->ptp_clock_info.n_ext_ts = 0; in bnx2x_register_phc()
13856 bp->ptp_clock_info.n_per_out = 0; in bnx2x_register_phc()
13857 bp->ptp_clock_info.pps = 0; in bnx2x_register_phc()
13858 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; in bnx2x_register_phc()
13859 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; in bnx2x_register_phc()
13860 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; in bnx2x_register_phc()
13861 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; in bnx2x_register_phc()
13862 bp->ptp_clock_info.enable = bnx2x_ptp_enable; in bnx2x_register_phc()
13864 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); in bnx2x_register_phc()
13865 if (IS_ERR(bp->ptp_clock)) { in bnx2x_register_phc()
13866 bp->ptp_clock = NULL; in bnx2x_register_phc()
13883 * to forget previously living interfaces, allowing a proper re-load. in bnx2x_init_one()
13898 * initialization of bp->max_cos based on the chip versions AND chip in bnx2x_init_one()
13901 max_cos_est = set_max_cos_est(ent->driver_data); in bnx2x_init_one()
13904 is_vf = set_is_vf(ent->driver_data); in bnx2x_init_one()
13913 rss_count = max_non_def_sbs - cnic_cnt; in bnx2x_init_one()
13916 return -EINVAL; in bnx2x_init_one()
13929 return -ENOMEM; in bnx2x_init_one()
13933 bp->flags = 0; in bnx2x_init_one()
13935 bp->flags |= IS_VF_FLAG; in bnx2x_init_one()
13937 bp->igu_sb_cnt = max_non_def_sbs; in bnx2x_init_one()
13938 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; in bnx2x_init_one()
13939 bp->msg_enable = debug; in bnx2x_init_one()
13940 bp->cnic_support = cnic_cnt; in bnx2x_init_one()
13941 bp->cnic_probe = bnx2x_cnic_probe; in bnx2x_init_one()
13945 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); in bnx2x_init_one()
13962 /* Map doorbells here as we need the real value of bp->max_cos which in bnx2x_init_one()
13967 bp->doorbells = bnx2x_vf_doorbells(bp); in bnx2x_init_one()
13974 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13976 rc = -ENOMEM; in bnx2x_init_one()
13979 bp->doorbells = ioremap(pci_resource_start(pdev, 2), in bnx2x_init_one()
13982 if (!bp->doorbells) { in bnx2x_init_one()
13983 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13985 rc = -ENOMEM; in bnx2x_init_one()
13996 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { in bnx2x_init_one()
13997 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
13998 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
14009 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); in bnx2x_init_one()
14010 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); in bnx2x_init_one()
14014 bp->flags |= NO_FCOE_FLAG; in bnx2x_init_one()
14016 /* Set bp->num_queues for MSI-X mode*/ in bnx2x_init_one()
14019 /* Configure interrupt mode: try to enable MSI-X/MSI if in bnx2x_init_one()
14024 dev_err(&pdev->dev, "Cannot set interrupts\n"); in bnx2x_init_one()
14032 dev_err(&pdev->dev, "Cannot register net device\n"); in bnx2x_init_one()
14035 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); in bnx2x_init_one()
14040 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in bnx2x_init_one()
14044 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n", in bnx2x_init_one()
14045 board_info[ent->driver_data].name, in bnx2x_init_one()
14047 dev->base_addr, bp->pdev->irq, dev->dev_addr); in bnx2x_init_one()
14048 pcie_print_link_status(bp->pdev); in bnx2x_init_one()
14061 if (bp->regview) in bnx2x_init_one()
14062 iounmap(bp->regview); in bnx2x_init_one()
14064 if (IS_PF(bp) && bp->doorbells) in bnx2x_init_one()
14065 iounmap(bp->doorbells); in bnx2x_init_one()
14069 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_one()
14085 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in __bnx2x_remove()
14096 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) in __bnx2x_remove()
14099 /* Close the interface - either directly or implicitly */ in __bnx2x_remove()
14121 /* Disable MSI/MSI-X */ in __bnx2x_remove()
14129 cancel_delayed_work_sync(&bp->sp_rtnl_task); in __bnx2x_remove()
14137 pci_wake_from_d3(pdev, bp->wol); in __bnx2x_remove()
14143 if (bp->regview) in __bnx2x_remove()
14144 iounmap(bp->regview); in __bnx2x_remove()
14150 if (bp->doorbells) in __bnx2x_remove()
14151 iounmap(bp->doorbells); in __bnx2x_remove()
14161 if (atomic_read(&pdev->enable_cnt) == 1) in __bnx2x_remove()
14174 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_remove_one()
14184 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_eeh_nic_unload()
14186 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_eeh_nic_unload()
14197 netdev_reset_tc(bp->dev); in bnx2x_eeh_nic_unload()
14199 del_timer_sync(&bp->timer); in bnx2x_eeh_nic_unload()
14200 cancel_delayed_work_sync(&bp->sp_task); in bnx2x_eeh_nic_unload()
14201 cancel_delayed_work_sync(&bp->period_task); in bnx2x_eeh_nic_unload()
14203 if (!down_timeout(&bp->stats_lock, HZ / 10)) { in bnx2x_eeh_nic_unload()
14204 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_eeh_nic_unload()
14205 up(&bp->stats_lock); in bnx2x_eeh_nic_unload()
14210 netif_carrier_off(bp->dev); in bnx2x_eeh_nic_unload()
14216 * bnx2x_io_error_detected - called when PCI error is detected
14254 * bnx2x_io_slot_reset - called after the PCI bus has been reset
14257 * Restart the card from scratch, as if from a cold-boot.
14268 dev_err(&pdev->dev, in bnx2x_io_slot_reset()
14269 "Cannot re-enable PCI device after reset\n"); in bnx2x_io_slot_reset()
14282 BNX2X_ERR("IO slot reset --> driver unload\n"); in bnx2x_io_slot_reset()
14306 bp->sp_state = 0; in bnx2x_io_slot_reset()
14307 bp->port.pmf = 0; in bnx2x_io_slot_reset()
14317 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_io_slot_reset()
14321 bp->state = BNX2X_STATE_CLOSED; in bnx2x_io_slot_reset()
14330 * bnx2x_io_resume - called when traffic can start flowing again
14341 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_io_resume()
14342 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); in bnx2x_io_resume()
14348 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_io_resume()
14408 return -ENOMEM; in bnx2x_init()
14414 return -ENOMEM; in bnx2x_init()
14453 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14457 * Return 0 if success, -ENODEV if ramrod doesn't return.
14464 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, in bnx2x_set_iscsi_eth_mac_addr()
14465 &bp->iscsi_l2_mac_obj, true, in bnx2x_set_iscsi_eth_mac_addr()
14476 if (unlikely(bp->panic)) in bnx2x_cnic_sp_post()
14480 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14481 BUG_ON(bp->cnic_spq_pending < count); in bnx2x_cnic_sp_post()
14482 bp->cnic_spq_pending -= count; in bnx2x_cnic_sp_post()
14484 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { in bnx2x_cnic_sp_post()
14485 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) in bnx2x_cnic_sp_post()
14488 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) in bnx2x_cnic_sp_post()
14498 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - in bnx2x_cnic_sp_post()
14501 &bp->context[cxt_index]. in bnx2x_cnic_sp_post()
14514 if (!atomic_read(&bp->cq_spq_left)) in bnx2x_cnic_sp_post()
14517 atomic_dec(&bp->cq_spq_left); in bnx2x_cnic_sp_post()
14519 if (!atomic_read(&bp->eq_spq_left)) in bnx2x_cnic_sp_post()
14522 atomic_dec(&bp->eq_spq_left); in bnx2x_cnic_sp_post()
14525 if (bp->cnic_spq_pending >= in bnx2x_cnic_sp_post()
14526 bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_post()
14529 bp->cnic_spq_pending++; in bnx2x_cnic_sp_post()
14537 *spe = *bp->cnic_kwq_cons; in bnx2x_cnic_sp_post()
14539 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", in bnx2x_cnic_sp_post()
14540 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); in bnx2x_cnic_sp_post()
14542 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) in bnx2x_cnic_sp_post()
14543 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_cnic_sp_post()
14545 bp->cnic_kwq_cons++; in bnx2x_cnic_sp_post()
14548 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14558 if (unlikely(bp->panic)) { in bnx2x_cnic_sp_queue()
14560 return -EIO; in bnx2x_cnic_sp_queue()
14564 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && in bnx2x_cnic_sp_queue()
14565 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_cnic_sp_queue()
14567 return -EAGAIN; in bnx2x_cnic_sp_queue()
14570 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14575 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) in bnx2x_cnic_sp_queue()
14578 *bp->cnic_kwq_prod = *spe; in bnx2x_cnic_sp_queue()
14580 bp->cnic_kwq_pending++; in bnx2x_cnic_sp_queue()
14582 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", in bnx2x_cnic_sp_queue()
14583 spe->hdr.conn_and_cmd_data, spe->hdr.type, in bnx2x_cnic_sp_queue()
14584 spe->data.update_data_addr.hi, in bnx2x_cnic_sp_queue()
14585 spe->data.update_data_addr.lo, in bnx2x_cnic_sp_queue()
14586 bp->cnic_kwq_pending); in bnx2x_cnic_sp_queue()
14588 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) in bnx2x_cnic_sp_queue()
14589 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_cnic_sp_queue()
14591 bp->cnic_kwq_prod++; in bnx2x_cnic_sp_queue()
14594 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14596 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_queue()
14607 mutex_lock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14608 c_ops = rcu_dereference_protected(bp->cnic_ops, in bnx2x_cnic_ctl_send()
14609 lockdep_is_held(&bp->cnic_mutex)); in bnx2x_cnic_ctl_send()
14611 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send()
14612 mutex_unlock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14623 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_cnic_ctl_send_bh()
14625 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send_bh()
14670 * multicasts (in non-promiscuous mode only one Queue per in bnx2x_set_iscsi_eth_rx_mode()
14680 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14685 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14687 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_set_iscsi_eth_rx_mode()
14688 set_bit(sched_state, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14701 switch (ctl->cmd) { in bnx2x_drv_ctl()
14703 u32 index = ctl->data.io.offset; in bnx2x_drv_ctl()
14704 dma_addr_t addr = ctl->data.io.dma_addr; in bnx2x_drv_ctl()
14711 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14719 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_drv_ctl()
14723 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14724 cp->iscsi_l2_client_id, in bnx2x_drv_ctl()
14725 cp->iscsi_l2_cid, BP_FUNC(bp), in bnx2x_drv_ctl()
14729 &bp->sp_state, BNX2X_OBJ_TYPE_RX, in bnx2x_drv_ctl()
14730 &bp->macs_pool); in bnx2x_drv_ctl()
14774 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14779 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14782 atomic_add(count, &bp->cq_spq_left); in bnx2x_drv_ctl()
14787 int ulp_type = ctl->data.register_data.ulp_type; in bnx2x_drv_ctl()
14807 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) in bnx2x_drv_ctl()
14810 /* if reached here - should write fcoe capabilities */ in bnx2x_drv_ctl()
14816 host_addr = (u32 *) &(ctl->data.register_data. in bnx2x_drv_ctl()
14828 int ulp_type = ctl->data.ulp_type; in bnx2x_drv_ctl()
14846 BNX2X_ERR("unknown command %x\n", ctl->cmd); in bnx2x_drv_ctl()
14847 rc = -EINVAL; in bnx2x_drv_ctl()
14850 /* For storage-only interfaces, change driver state */ in bnx2x_drv_ctl()
14852 switch (ctl->drv_state) { in bnx2x_drv_ctl()
14868 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); in bnx2x_drv_ctl()
14881 int rc = -EINVAL; in bnx2x_get_fc_npiv()
14887 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14897 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); in bnx2x_get_fc_npiv()
14900 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); in bnx2x_get_fc_npiv()
14904 BNX2X_ERR("Failed to read FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14911 entries = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14913 tbl->fc_npiv_cfg.num_of_npiv = entries; in bnx2x_get_fc_npiv()
14915 if (!tbl->fc_npiv_cfg.num_of_npiv) { in bnx2x_get_fc_npiv()
14916 DP(BNX2X_MSG_MCP, in bnx2x_get_fc_npiv()
14917 "No FC-NPIV table [valid, simply not present]\n"); in bnx2x_get_fc_npiv()
14919 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { in bnx2x_get_fc_npiv()
14920 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", in bnx2x_get_fc_npiv()
14921 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14924 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", in bnx2x_get_fc_npiv()
14925 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14928 /* Copy the data into cnic-provided struct */ in bnx2x_get_fc_npiv()
14929 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14930 for (i = 0; i < cnic_tbl->count; i++) { in bnx2x_get_fc_npiv()
14931 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); in bnx2x_get_fc_npiv()
14932 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); in bnx2x_get_fc_npiv()
14943 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_irq_info()
14945 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_cnic_irq_info()
14946 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14947 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14948 cp->irq_arr[0].vector = bp->msix_table[1].vector; in bnx2x_setup_cnic_irq_info()
14950 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14951 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14954 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; in bnx2x_setup_cnic_irq_info()
14956 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; in bnx2x_setup_cnic_irq_info()
14958 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14959 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14960 cp->irq_arr[1].status_blk = bp->def_status_blk; in bnx2x_setup_cnic_irq_info()
14961 cp->irq_arr[1].status_blk_num = DEF_SB_ID; in bnx2x_setup_cnic_irq_info()
14962 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; in bnx2x_setup_cnic_irq_info()
14964 cp->num_irq = 2; in bnx2x_setup_cnic_irq_info()
14969 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_info()
14971 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_setup_cnic_info()
14973 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_setup_cnic_info()
14974 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_setup_cnic_info()
14975 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_setup_cnic_info()
14977DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp in bnx2x_setup_cnic_info()
14978 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, in bnx2x_setup_cnic_info()
14979 cp->iscsi_l2_cid); in bnx2x_setup_cnic_info()
14982 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_setup_cnic_info()
14989 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_register_cnic()
14992 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); in bnx2x_register_cnic()
14996 return -EINVAL; in bnx2x_register_cnic()
15001 return -EOPNOTSUPP; in bnx2x_register_cnic()
15007 BNX2X_ERR("CNIC-related load failed\n"); in bnx2x_register_cnic()
15012 bp->cnic_enabled = true; in bnx2x_register_cnic()
15014 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); in bnx2x_register_cnic()
15015 if (!bp->cnic_kwq) in bnx2x_register_cnic()
15016 return -ENOMEM; in bnx2x_register_cnic()
15018 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_register_cnic()
15019 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_register_cnic()
15020 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; in bnx2x_register_cnic()
15022 bp->cnic_spq_pending = 0; in bnx2x_register_cnic()
15023 bp->cnic_kwq_pending = 0; in bnx2x_register_cnic()
15025 bp->cnic_data = data; in bnx2x_register_cnic()
15027 cp->num_irq = 0; in bnx2x_register_cnic()
15028 cp->drv_state |= CNIC_DRV_STATE_REGD; in bnx2x_register_cnic()
15029 cp->iro_arr = bp->iro_arr; in bnx2x_register_cnic()
15033 rcu_assign_pointer(bp->cnic_ops, ops); in bnx2x_register_cnic()
15044 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_unregister_cnic()
15046 mutex_lock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
15047 cp->drv_state = 0; in bnx2x_unregister_cnic()
15048 RCU_INIT_POINTER(bp->cnic_ops, NULL); in bnx2x_unregister_cnic()
15049 mutex_unlock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
15051 bp->cnic_enabled = false; in bnx2x_unregister_cnic()
15052 kfree(bp->cnic_kwq); in bnx2x_unregister_cnic()
15053 bp->cnic_kwq = NULL; in bnx2x_unregister_cnic()
15061 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_cnic_probe()
15063 /* If both iSCSI and FCoE are disabled - return NULL in in bnx2x_cnic_probe()
15070 cp->drv_owner = THIS_MODULE; in bnx2x_cnic_probe()
15071 cp->chip_id = CHIP_ID(bp); in bnx2x_cnic_probe()
15072 cp->pdev = bp->pdev; in bnx2x_cnic_probe()
15073 cp->io_base = bp->regview; in bnx2x_cnic_probe()
15074 cp->io_base2 = bp->doorbells; in bnx2x_cnic_probe()
15075 cp->max_kwqe_pending = 8; in bnx2x_cnic_probe()
15076 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; in bnx2x_cnic_probe()
15077 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_cnic_probe()
15079 cp->ctx_tbl_len = CNIC_ILT_LINES; in bnx2x_cnic_probe()
15080 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_cnic_probe()
15081 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; in bnx2x_cnic_probe()
15082 cp->drv_ctl = bnx2x_drv_ctl; in bnx2x_cnic_probe()
15083 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; in bnx2x_cnic_probe()
15084 cp->drv_register_cnic = bnx2x_register_cnic; in bnx2x_cnic_probe()
15085 cp->drv_unregister_cnic = bnx2x_unregister_cnic; in bnx2x_cnic_probe()
15086 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_cnic_probe()
15087 cp->iscsi_l2_client_id = in bnx2x_cnic_probe()
15089 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_cnic_probe()
15092 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_cnic_probe()
15095 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; in bnx2x_cnic_probe()
15098 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; in bnx2x_cnic_probe()
15102 cp->ctx_blk_size, in bnx2x_cnic_probe()
15103 cp->ctx_tbl_offset, in bnx2x_cnic_probe()
15104 cp->ctx_tbl_len, in bnx2x_cnic_probe()
15105 cp->starting_cid); in bnx2x_cnic_probe()
15111 struct bnx2x *bp = fp->bp; in bnx2x_rx_ustorm_prods_offset()
15117 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); in bnx2x_rx_ustorm_prods_offset()
15119 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); in bnx2x_rx_ustorm_prods_offset()
15126 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15134 return -1; in bnx2x_pretend_func()
15154 * still not complete, may indicate an error state - bail out then. in bnx2x_ptp_task()
15177 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_ptp_task()
15181 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); in bnx2x_ptp_task()
15183 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_ptp_task()
15186 DP(BNX2X_MSG_PTP, in bnx2x_ptp_task()
15189 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_ptp_task()
15192 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_ptp_task()
15193 bp->ptp_tx_skb = NULL; in bnx2x_ptp_task()
15211 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_set_rx_ts()
15213 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); in bnx2x_set_rx_ts()
15215 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_set_rx_ts()
15232 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); in bnx2x_cyclecounter_read()
15239 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); in bnx2x_init_cyclecounter()
15240 bp->cyclecounter.read = bnx2x_cyclecounter_read; in bnx2x_init_cyclecounter()
15241 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); in bnx2x_init_cyclecounter()
15242 bp->cyclecounter.shift = 0; in bnx2x_init_cyclecounter()
15243 bp->cyclecounter.mult = 1; in bnx2x_init_cyclecounter()
15256 func_params.f_obj = &bp->func_obj; in bnx2x_send_reset_timesync_ramrod()
15260 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; in bnx2x_send_reset_timesync_ramrod()
15261 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_reset_timesync_ramrod()
15282 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_enable_ptp_packets()
15317 if (!bp->hwtstamp_ioctl_called) in bnx2x_configure_ptp_filters()
15324 switch (bp->tx_type) { in bnx2x_configure_ptp_filters()
15326 bp->flags |= TX_TIMESTAMPING_EN; in bnx2x_configure_ptp_filters()
15332 BNX2X_ERR("One-step timestamping is not supported\n"); in bnx2x_configure_ptp_filters()
15333 return -ERANGE; in bnx2x_configure_ptp_filters()
15340 switch (bp->rx_filter) { in bnx2x_configure_ptp_filters()
15346 bp->rx_filter = HWTSTAMP_FILTER_NONE; in bnx2x_configure_ptp_filters()
15351 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; in bnx2x_configure_ptp_filters()
15359 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; in bnx2x_configure_ptp_filters()
15367 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; in bnx2x_configure_ptp_filters()
15376 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in bnx2x_configure_ptp_filters()
15400 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); in bnx2x_hwtstamp_ioctl()
15402 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in bnx2x_hwtstamp_ioctl()
15403 return -EFAULT; in bnx2x_hwtstamp_ioctl()
15405 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", in bnx2x_hwtstamp_ioctl()
15410 return -EINVAL; in bnx2x_hwtstamp_ioctl()
15413 bp->hwtstamp_ioctl_called = true; in bnx2x_hwtstamp_ioctl()
15414 bp->tx_type = config.tx_type; in bnx2x_hwtstamp_ioctl()
15415 bp->rx_filter = config.rx_filter; in bnx2x_hwtstamp_ioctl()
15421 config.rx_filter = bp->rx_filter; in bnx2x_hwtstamp_ioctl()
15423 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in bnx2x_hwtstamp_ioctl()
15424 -EFAULT : 0; in bnx2x_hwtstamp_ioctl()
15433 /* Reset PTP event detection rules - will be configured in the IOCTL */ in bnx2x_configure_ptp()
15443 /* Disable PTP packets to host - will be configured in the IOCTL*/ in bnx2x_configure_ptp()
15451 /* Enable the free-running counter */ in bnx2x_configure_ptp()
15460 return -EFAULT; in bnx2x_configure_ptp()
15472 /* Called during load, to initialize PTP-related stuff */
15485 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); in bnx2x_init_ptp()
15491 if (!bp->timecounter_init_done) { in bnx2x_init_ptp()
15493 timecounter_init(&bp->timecounter, &bp->cyclecounter, in bnx2x_init_ptp()
15495 bp->timecounter_init_done = true; in bnx2x_init_ptp()
15498 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); in bnx2x_init_ptp()