Lines Matching +full:dp +full:- +full:phy1
3 * Copyright (c) 2007-2013 Broadcom Corporation
37 #include <linux/dma-mapping.h>
77 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
112 static int mrrs = -1;
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_wr_ind()
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); in bnx2x_reg_wr_ind()
369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_wr_ind()
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); in bnx2x_reg_rd_ind()
378 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); in bnx2x_reg_rd_ind()
379 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_reg_rd_ind()
394 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; in bnx2x_dp_dmae()
397 switch (dmae->opcode & DMAE_COMMAND_DST) { in bnx2x_dp_dmae()
400 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
403 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
404 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
405 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
406 dmae->comp_val); in bnx2x_dp_dmae()
408 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
411 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
412 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, in bnx2x_dp_dmae()
413 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
414 dmae->comp_val); in bnx2x_dp_dmae()
418 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
421 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
422 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
423 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
424 dmae->comp_val); in bnx2x_dp_dmae()
426 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
429 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
430 dmae->len, dmae->dst_addr_lo >> 2, in bnx2x_dp_dmae()
431 dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
432 dmae->comp_val); in bnx2x_dp_dmae()
436 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
439 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, in bnx2x_dp_dmae()
440 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
441 dmae->comp_val); in bnx2x_dp_dmae()
443 DP(msglvl, "DMAE: opcode 0x%08x\n" in bnx2x_dp_dmae()
446 dmae->opcode, dmae->src_addr_lo >> 2, in bnx2x_dp_dmae()
447 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, in bnx2x_dp_dmae()
448 dmae->comp_val); in bnx2x_dp_dmae()
453 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", in bnx2x_dp_dmae()
513 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, in bnx2x_prep_dmae_with_comp()
517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); in bnx2x_prep_dmae_with_comp()
519 dmae->comp_val = DMAE_COMP_VAL; in bnx2x_prep_dmae_with_comp()
522 /* issue a dmae command over the init-channel and wait for completion */
531 /* Lock the dmae channel. Disable BHs to prevent a dead-lock in bnx2x_issue_dmae_with_comp()
536 spin_lock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
549 (bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_issue_dmae_with_comp()
550 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_issue_dmae_with_comp()
555 cnt--; in bnx2x_issue_dmae_with_comp()
565 spin_unlock_bh(&bp->dmae_lock); in bnx2x_issue_dmae_with_comp()
576 if (!bp->dmae_ready) { in bnx2x_write_dmae()
611 if (!bp->dmae_ready) { in bnx2x_read_dmae()
655 len -= dmae_wr_max; in bnx2x_write_dmae_phys_len()
689 return -EINVAL; in bnx2x_get_assert_list_entry()
769 BNX2X_ERR("NO MCP - can not dump\n"); in bnx2x_fw_dump_lvl()
772 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", in bnx2x_fw_dump_lvl()
773 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fw_dump_lvl()
774 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fw_dump_lvl()
775 (bp->common.bc_ver & 0xff)); in bnx2x_fw_dump_lvl()
777 if (pci_channel_offline(bp->pdev)) { in bnx2x_fw_dump_lvl()
787 trace_shmem_base = bp->common.shmem_base; in bnx2x_fw_dump_lvl()
800 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; in bnx2x_fw_dump_lvl()
812 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; in bnx2x_fw_dump_lvl()
870 DP(NETIF_MSG_IFDOWN, in bnx2x_hc_int_disable()
887 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); in bnx2x_igu_int_disable()
896 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_disable()
915 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_panic_dump()
916 bp->eth_stats.unrecoverable_error++; in bnx2x_panic_dump()
917 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); in bnx2x_panic_dump()
919 BNX2X_ERR("begin crash dump -----------------\n"); in bnx2x_panic_dump()
924 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_panic_dump()
928 bp->def_idx, bp->def_att_idx, bp->attn_state, in bnx2x_panic_dump()
929 bp->spq_prod_idx, bp->stats_counter); in bnx2x_panic_dump()
931 def_sb->atten_status_block.attn_bits, in bnx2x_panic_dump()
932 def_sb->atten_status_block.attn_bits_ack, in bnx2x_panic_dump()
933 def_sb->atten_status_block.status_block_id, in bnx2x_panic_dump()
934 def_sb->atten_status_block.attn_bits_index); in bnx2x_panic_dump()
938 def_sb->sp_sb.index_values[i], in bnx2x_panic_dump()
939 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); in bnx2x_panic_dump()
960 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
976 if (!bp->fp) in bnx2x_panic_dump()
979 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
984 i, fp->rx_bd_prod, fp->rx_bd_cons, in bnx2x_panic_dump()
985 fp->rx_comp_prod, in bnx2x_panic_dump()
986 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); in bnx2x_panic_dump()
988 fp->rx_sge_prod, fp->last_max_sge, in bnx2x_panic_dump()
989 le16_to_cpu(fp->fp_hc_idx)); in bnx2x_panic_dump()
994 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
997 txdata = *fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1020 fp->sb_running_index[j], in bnx2x_panic_dump()
1021 (j == HC_SB_MAX_SM - 1) ? ")" : " "); in bnx2x_panic_dump()
1026 fp->sb_index_values[j], in bnx2x_panic_dump()
1027 (j == loop - 1) ? ")" : " "); in bnx2x_panic_dump()
1044 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + in bnx2x_panic_dump()
1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); in bnx2x_panic_dump()
1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data; in bnx2x_panic_dump()
1091 i, bp->eq_ring[i].message.opcode, in bnx2x_panic_dump()
1092 bp->eq_ring[i].message.error); in bnx2x_panic_dump()
1101 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1103 if (!bp->fp) in bnx2x_panic_dump()
1106 if (!fp->rx_cons_sb) in bnx2x_panic_dump()
1109 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); in bnx2x_panic_dump()
1110 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); in bnx2x_panic_dump()
1112 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; in bnx2x_panic_dump()
1113 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; in bnx2x_panic_dump()
1116 i, j, rx_bd[1], rx_bd[0], sw_bd->data); in bnx2x_panic_dump()
1119 start = RX_SGE(fp->rx_sge_prod); in bnx2x_panic_dump()
1120 end = RX_SGE(fp->last_max_sge); in bnx2x_panic_dump()
1122 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; in bnx2x_panic_dump()
1123 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; in bnx2x_panic_dump()
1126 i, j, rx_sge[1], rx_sge[0], sw_page->page); in bnx2x_panic_dump()
1129 start = RCQ_BD(fp->rx_comp_cons - 10); in bnx2x_panic_dump()
1130 end = RCQ_BD(fp->rx_comp_cons + 503); in bnx2x_panic_dump()
1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; in bnx2x_panic_dump()
1141 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_panic_dump()
1143 if (!bp->fp) in bnx2x_panic_dump()
1147 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_panic_dump()
1149 if (!fp->txdata_ptr[cos]) in bnx2x_panic_dump()
1152 if (!txdata->tx_cons_sb) in bnx2x_panic_dump()
1155 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); in bnx2x_panic_dump()
1156 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); in bnx2x_panic_dump()
1159 &txdata->tx_buf_ring[j]; in bnx2x_panic_dump()
1162 i, cos, j, sw_bd->skb, in bnx2x_panic_dump()
1163 sw_bd->first_bd); in bnx2x_panic_dump()
1166 start = TX_BD(txdata->tx_bd_cons - 10); in bnx2x_panic_dump()
1167 end = TX_BD(txdata->tx_bd_cons + 254); in bnx2x_panic_dump()
1169 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; in bnx2x_panic_dump()
1179 int tmp_msg_en = bp->msg_enable; in bnx2x_panic_dump()
1182 bp->msg_enable |= NETIF_MSG_HW; in bnx2x_panic_dump()
1183 BNX2X_ERR("Idle check (1st round) ----------\n"); in bnx2x_panic_dump()
1185 BNX2X_ERR("Idle check (2nd round) ----------\n"); in bnx2x_panic_dump()
1187 bp->msg_enable = tmp_msg_en; in bnx2x_panic_dump()
1191 BNX2X_ERR("end crash dump -----------------\n"); in bnx2x_panic_dump()
1224 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1225 crd = crd_start = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1226 init_crd = REG_RD(bp, regs->init_crd); in bnx2x_pbf_pN_buf_flushed()
1228 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); in bnx2x_pbf_pN_buf_flushed()
1229 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1230 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1233 (init_crd - crd_start))) { in bnx2x_pbf_pN_buf_flushed()
1234 if (cur_cnt--) { in bnx2x_pbf_pN_buf_flushed()
1236 crd = REG_RD(bp, regs->crd); in bnx2x_pbf_pN_buf_flushed()
1237 crd_freed = REG_RD(bp, regs->crd_freed); in bnx2x_pbf_pN_buf_flushed()
1239 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", in bnx2x_pbf_pN_buf_flushed()
1240 regs->pN); in bnx2x_pbf_pN_buf_flushed()
1241 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1242 regs->pN, crd); in bnx2x_pbf_pN_buf_flushed()
1243 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", in bnx2x_pbf_pN_buf_flushed()
1244 regs->pN, crd_freed); in bnx2x_pbf_pN_buf_flushed()
1248 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", in bnx2x_pbf_pN_buf_flushed()
1249 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_buf_flushed()
1259 occup = to_free = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1260 freed = freed_start = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1263 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1266 if (cur_cnt--) { in bnx2x_pbf_pN_cmd_flushed()
1268 occup = REG_RD(bp, regs->lines_occup); in bnx2x_pbf_pN_cmd_flushed()
1269 freed = REG_RD(bp, regs->lines_freed); in bnx2x_pbf_pN_cmd_flushed()
1271 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", in bnx2x_pbf_pN_cmd_flushed()
1272 regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1274 regs->pN, occup); in bnx2x_pbf_pN_cmd_flushed()
1275 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", in bnx2x_pbf_pN_cmd_flushed()
1276 regs->pN, freed); in bnx2x_pbf_pN_cmd_flushed()
1280 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", in bnx2x_pbf_pN_cmd_flushed()
1281 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); in bnx2x_pbf_pN_cmd_flushed()
1290 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) in bnx2x_flr_clnup_reg_poll()
1409 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); in bnx2x_send_final_clnup()
1414 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", in bnx2x_send_final_clnup()
1437 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ in bnx2x_poll_hw_usage_counters()
1444 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1451 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1458 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ in bnx2x_poll_hw_usage_counters()
1485 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1488 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); in bnx2x_hw_enable_status()
1491 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1494 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); in bnx2x_hw_enable_status()
1497 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); in bnx2x_hw_enable_status()
1500 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1503 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); in bnx2x_hw_enable_status()
1506 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", in bnx2x_hw_enable_status()
1514 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); in bnx2x_pf_flr_clnup()
1516 /* Re-enable PF target read access */ in bnx2x_pf_flr_clnup()
1520 DP(BNX2X_MSG_SP, "Polling usage counters\n"); in bnx2x_pf_flr_clnup()
1522 return -EBUSY; in bnx2x_pf_flr_clnup()
1528 return -EBUSY; in bnx2x_pf_flr_clnup()
1539 if (bnx2x_is_pcie_pending(bp->pdev)) in bnx2x_pf_flr_clnup()
1546 * Master enable - Due to WB DMAE writes performed before this in bnx2x_pf_flr_clnup()
1547 * register is re-initialized as part of the regular function init in bnx2x_pf_flr_clnup()
1559 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1560 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_hc_int_enable()
1561 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_hc_int_enable()
1582 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1594 DP(NETIF_MSG_IFUP, in bnx2x_hc_int_enable()
1596 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_hc_int_enable()
1608 if (bp->port.pmf) in bnx2x_hc_int_enable()
1622 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1623 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; in bnx2x_igu_int_enable()
1624 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; in bnx2x_igu_int_enable()
1648 /* Clean previous status - need to configure igu prior to ack*/ in bnx2x_igu_int_enable()
1656 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", in bnx2x_igu_int_enable()
1657 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); in bnx2x_igu_int_enable()
1662 pci_intx(bp->pdev, true); in bnx2x_igu_int_enable()
1669 if (bp->port.pmf) in bnx2x_igu_int_enable()
1681 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_int_enable()
1689 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_int_disable_sync()
1698 synchronize_irq(bp->msix_table[0].vector); in bnx2x_int_disable_sync()
1703 synchronize_irq(bp->msix_table[offset++].vector); in bnx2x_int_disable_sync()
1705 synchronize_irq(bp->pdev->irq); in bnx2x_int_disable_sync()
1708 cancel_delayed_work(&bp->sp_task); in bnx2x_int_disable_sync()
1709 cancel_delayed_work(&bp->period_task); in bnx2x_int_disable_sync()
1727 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1732 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1742 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_trylock_hw_lock()
1750 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, in bnx2x_trylock_hw_lock()
1756 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1772 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1788 /* Set the interrupt occurred bit for the sp-task to recognize it in bnx2x_schedule_sp_task()
1792 atomic_set(&bp->interrupt_occurred, 1); in bnx2x_schedule_sp_task()
1801 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); in bnx2x_schedule_sp_task()
1806 struct bnx2x *bp = fp->bp; in bnx2x_sp_event()
1807 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1808 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); in bnx2x_sp_event()
1812 DP(BNX2X_MSG_SP, in bnx2x_sp_event()
1814 fp->index, cid, command, bp->state, in bnx2x_sp_event()
1815 rr_cqe->ramrod_cqe.ramrod_type); in bnx2x_sp_event()
1826 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); in bnx2x_sp_event()
1831 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); in bnx2x_sp_event()
1836 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); in bnx2x_sp_event()
1841 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); in bnx2x_sp_event()
1846 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); in bnx2x_sp_event()
1851 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); in bnx2x_sp_event()
1856 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); in bnx2x_sp_event()
1862 command, fp->index); in bnx2x_sp_event()
1867 q_obj->complete_cmd(bp, q_obj, drv_cmd)) in bnx2x_sp_event()
1868 /* q_obj->complete_cmd() failure means that this was in bnx2x_sp_event()
1871 * In this case we don't want to increase the bp->spq_left in bnx2x_sp_event()
1882 atomic_inc(&bp->cq_spq_left); in bnx2x_sp_event()
1883 /* push the change in bp->spq_left and towards the memory */ in bnx2x_sp_event()
1886 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); in bnx2x_sp_event()
1889 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { in bnx2x_sp_event()
1900 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); in bnx2x_sp_event()
1902 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_sp_event()
1922 DP(NETIF_MSG_INTR, "not our interrupt!\n"); in bnx2x_interrupt()
1925 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); in bnx2x_interrupt()
1928 if (unlikely(bp->panic)) in bnx2x_interrupt()
1933 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_interrupt()
1935 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); in bnx2x_interrupt()
1939 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_interrupt()
1940 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_interrupt()
1941 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_interrupt()
1952 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_interrupt()
1953 if (c_ops && (bp->cnic_eth_dev.drv_state & in bnx2x_interrupt()
1955 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_interrupt()
1975 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", in bnx2x_interrupt()
1999 return -EINVAL; in bnx2x_acquire_hw_lock()
2006 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_acquire_hw_lock()
2014 return -EEXIST; in bnx2x_acquire_hw_lock()
2028 return -EAGAIN; in bnx2x_acquire_hw_lock()
2047 return -EINVAL; in bnx2x_release_hw_lock()
2054 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); in bnx2x_release_hw_lock()
2062 return -EFAULT; in bnx2x_release_hw_lock()
2082 return -EINVAL; in bnx2x_get_gpio()
2109 return -EINVAL; in bnx2x_set_gpio()
2118 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2119 "Set GPIO %d (shift %d) -> output low\n", in bnx2x_set_gpio()
2127 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2128 "Set GPIO %d (shift %d) -> output high\n", in bnx2x_set_gpio()
2136 DP(NETIF_MSG_LINK, in bnx2x_set_gpio()
2137 "Set GPIO %d (shift %d) -> input\n", in bnx2x_set_gpio()
2169 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); in bnx2x_set_mult_gpio()
2175 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); in bnx2x_set_mult_gpio()
2181 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); in bnx2x_set_mult_gpio()
2188 rc = -EINVAL; in bnx2x_set_mult_gpio()
2212 return -EINVAL; in bnx2x_set_gpio_int()
2221 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2222 "Clear GPIO INT %d (shift %d) -> output low\n", in bnx2x_set_gpio_int()
2230 DP(NETIF_MSG_LINK, in bnx2x_set_gpio_int()
2231 "Set GPIO INT %d (shift %d) -> output high\n", in bnx2x_set_gpio_int()
2255 return -EINVAL; in bnx2x_set_spio()
2264 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); in bnx2x_set_spio()
2271 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); in bnx2x_set_spio()
2278 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); in bnx2x_set_spio()
2297 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2299 switch (bp->link_vars.ieee_fc & in bnx2x_calc_fc_adv()
2302 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | in bnx2x_calc_fc_adv()
2307 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; in bnx2x_calc_fc_adv()
2321 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) in bnx2x_set_requested_fc()
2322 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; in bnx2x_set_requested_fc()
2324 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; in bnx2x_set_requested_fc()
2331 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { in bnx2x_init_dropless_fc()
2332 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_init_dropless_fc()
2340 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", in bnx2x_init_dropless_fc()
2347 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; in bnx2x_initial_phy_init()
2354 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2355 lp->loopback_mode = LOOPBACK_XGXS; in bnx2x_initial_phy_init()
2357 if (lp->req_line_speed[cfx_idx] < SPEED_20000) { in bnx2x_initial_phy_init()
2358 if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2360 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2362 else if (lp->speed_cap_mask[cfx_idx] & in bnx2x_initial_phy_init()
2364 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2367 lp->req_line_speed[cfx_idx] = in bnx2x_initial_phy_init()
2373 struct link_params *lp = &bp->link_params; in bnx2x_initial_phy_init()
2374 lp->loopback_mode = LOOPBACK_EXT; in bnx2x_initial_phy_init()
2377 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_initial_phy_init()
2385 if (bp->link_vars.link_up) { in bnx2x_initial_phy_init()
2389 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_initial_phy_init()
2390 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; in bnx2x_initial_phy_init()
2393 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_initial_phy_init()
2394 return -EINVAL; in bnx2x_initial_phy_init()
2401 bnx2x_phy_init(&bp->link_params, &bp->link_vars); in bnx2x_link_set()
2408 BNX2X_ERR("Bootcode is missing - can not set link\n"); in bnx2x_link_set()
2415 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); in bnx2x__link_reset()
2418 BNX2X_ERR("Bootcode is missing - can not reset link\n"); in bnx2x__link_reset()
2424 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); in bnx2x_force_link_reset()
2434 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, in bnx2x_link_test()
2438 BNX2X_ERR("Bootcode is missing - can not test link\n"); in bnx2x_link_test()
2448 0 - if all the min_rates are 0.
2459 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_min()
2466 /* If min rate is zero - set it to 1 */ in bnx2x_calc_vn_min()
2472 input->vnic_min_rate[vn] = vn_min_rate; in bnx2x_calc_vn_min()
2475 /* if ETS or all min rates are zeros - disable fairness */ in bnx2x_calc_vn_min()
2477 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2479 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); in bnx2x_calc_vn_min()
2481 input->flags.cmng_enables &= in bnx2x_calc_vn_min()
2483 DP(NETIF_MSG_IFUP, in bnx2x_calc_vn_min()
2486 input->flags.cmng_enables |= in bnx2x_calc_vn_min()
2494 u32 vn_cfg = bp->mf_config[vn]; in bnx2x_calc_vn_max()
2503 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; in bnx2x_calc_vn_max()
2509 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); in bnx2x_calc_vn_max()
2511 input->vnic_max_rate[vn] = vn_max_rate; in bnx2x_calc_vn_max()
2548 bp->mf_config[vn] = in bnx2x_read_mf_cfg()
2551 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_read_mf_cfg()
2552 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); in bnx2x_read_mf_cfg()
2553 bp->flags |= MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2555 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); in bnx2x_read_mf_cfg()
2556 bp->flags &= ~MF_FUNC_DIS; in bnx2x_read_mf_cfg()
2565 input.port_rate = bp->link_vars.line_speed; in bnx2x_cmng_fns_init()
2577 /* calculate and set min-max rate for each vn */ in bnx2x_cmng_fns_init()
2578 if (bp->port.pmf) in bnx2x_cmng_fns_init()
2586 bnx2x_init_cmng(&input, &bp->cmng); in bnx2x_cmng_fns_init()
2591 DP(NETIF_MSG_IFUP, in bnx2x_cmng_fns_init()
2605 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); in storm_memset_cmng()
2614 (u32 *)&cmng->vnic.vnic_max_rate[vn]); in storm_memset_cmng()
2620 (u32 *)&cmng->vnic.vnic_min_rate[vn]); in storm_memset_cmng()
2631 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_set_local_cmng()
2634 DP(NETIF_MSG_IFUP, in bnx2x_set_local_cmng()
2645 bnx2x_link_update(&bp->link_params, &bp->link_vars); in bnx2x_link_attn()
2649 if (bp->link_vars.link_up) { in bnx2x_link_attn()
2651 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { in bnx2x_link_attn()
2656 memset(&(pstats->mac_stx[0]), 0, in bnx2x_link_attn()
2659 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_link_attn()
2663 if (bp->link_vars.link_up && bp->link_vars.line_speed) in bnx2x_link_attn()
2674 if (bp->state != BNX2X_STATE_OPEN) in bnx2x__link_status_update()
2680 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); in bnx2x__link_status_update()
2681 if (bp->link_vars.link_up) in bnx2x__link_status_update()
2689 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | in bnx2x__link_status_update()
2701 bp->port.advertising[0] = bp->port.supported[0]; in bnx2x__link_status_update()
2703 bp->link_params.bp = bp; in bnx2x__link_status_update()
2704 bp->link_params.port = BP_PORT(bp); in bnx2x__link_status_update()
2705 bp->link_params.req_duplex[0] = DUPLEX_FULL; in bnx2x__link_status_update()
2706 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2707 bp->link_params.req_line_speed[0] = SPEED_10000; in bnx2x__link_status_update()
2708 bp->link_params.speed_cap_mask[0] = 0x7f0000; in bnx2x__link_status_update()
2709 bp->link_params.switch_cfg = SWITCH_CFG_10G; in bnx2x__link_status_update()
2710 bp->link_vars.mac_type = MAC_TYPE_BMAC; in bnx2x__link_status_update()
2711 bp->link_vars.line_speed = SPEED_10000; in bnx2x__link_status_update()
2712 bp->link_vars.link_status = in bnx2x__link_status_update()
2715 bp->link_vars.link_up = 1; in bnx2x__link_status_update()
2716 bp->link_vars.duplex = DUPLEX_FULL; in bnx2x__link_status_update()
2717 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; in bnx2x__link_status_update()
2738 func_params.f_obj = &bp->func_obj; in bnx2x_afex_func_update()
2745 f_update_params->vif_id = vifid; in bnx2x_afex_func_update()
2746 f_update_params->afex_default_vlan = vlan_val; in bnx2x_afex_func_update()
2747 f_update_params->allowed_priorities = allowed_prio; in bnx2x_afex_func_update()
2770 func_params.f_obj = &bp->func_obj; in bnx2x_afex_handle_vif_list_cmd()
2774 update_params->afex_vif_list_command = cmd_type; in bnx2x_afex_handle_vif_list_cmd()
2775 update_params->vif_list_index = vif_index; in bnx2x_afex_handle_vif_list_cmd()
2776 update_params->func_bit_map = in bnx2x_afex_handle_vif_list_cmd()
2778 update_params->func_to_clear = 0; in bnx2x_afex_handle_vif_list_cmd()
2808 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2816 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2829 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2846 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2847 DP(BNX2X_MSG_MCP, in bnx2x_handle_afex_cmd()
2860 bp->mf_config[BP_VN(bp)] = mf_config; in bnx2x_handle_afex_cmd()
2900 bp->afex_def_vlan_tag = vlan_val; in bnx2x_handle_afex_cmd()
2901 bp->afex_vlan_mode = vlan_mode; in bnx2x_handle_afex_cmd()
2903 /* notify link down because BP->flags is disabled */ in bnx2x_handle_afex_cmd()
2910 bp->afex_def_vlan_tag = -1; in bnx2x_handle_afex_cmd()
2922 func_params.f_obj = &bp->func_obj; in bnx2x_handle_update_svid_cmd()
2933 /* Re-learn the S-tag from shmem */ in bnx2x_handle_update_svid_cmd()
2937 bp->mf_ov = val; in bnx2x_handle_update_svid_cmd()
2943 /* Configure new S-tag in LLH */ in bnx2x_handle_update_svid_cmd()
2945 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2949 &switch_update_params->changes); in bnx2x_handle_update_svid_cmd()
2950 switch_update_params->vlan = bp->mf_ov; in bnx2x_handle_update_svid_cmd()
2953 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", in bnx2x_handle_update_svid_cmd()
2954 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2957 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", in bnx2x_handle_update_svid_cmd()
2958 bp->mf_ov); in bnx2x_handle_update_svid_cmd()
2975 bp->port.pmf = 1; in bnx2x_pmf_update()
2976 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); in bnx2x_pmf_update()
2980 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). in bnx2x_pmf_update()
2985 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); in bnx2x_pmf_update()
2991 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_pmf_update()
3019 mutex_lock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3020 seq = ++bp->fw_seq; in bnx2x_fw_command()
3024 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", in bnx2x_fw_command()
3036 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", in bnx2x_fw_command()
3048 mutex_unlock(&bp->fw_mb_mutex); in bnx2x_fw_command()
3070 storm_memset_func_cfg(bp, &tcfg, p->func_id); in bnx2x_func_init()
3074 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); in bnx2x_func_init()
3075 storm_memset_func_en(bp, p->func_id, 1); in bnx2x_func_init()
3078 if (p->spq_active) { in bnx2x_func_init()
3079 storm_memset_spq_addr(bp, p->spq_map, p->func_id); in bnx2x_func_init()
3081 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); in bnx2x_func_init()
3086 * bnx2x_get_common_flags - Return common flags
3092 * Return the flags that are common for the Tx-only and not normal connections.
3112 if (bp->flags & TX_SWITCHING) in bnx2x_get_common_flags()
3137 /* For FCoE - force usage of default priority (for afex) */ in bnx2x_get_q_flags()
3141 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_get_q_flags()
3144 if (fp->mode == TPA_MODE_GRO) in bnx2x_get_q_flags()
3167 gen_init->stat_id = bnx2x_stats_id(fp); in bnx2x_pf_q_prep_general()
3168 gen_init->spcl_id = fp->cl_id; in bnx2x_pf_q_prep_general()
3170 /* Always use mini-jumbo MTU for FCoE L2 ring */ in bnx2x_pf_q_prep_general()
3172 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; in bnx2x_pf_q_prep_general()
3174 gen_init->mtu = bp->dev->mtu; in bnx2x_pf_q_prep_general()
3176 gen_init->cos = cos; in bnx2x_pf_q_prep_general()
3178 gen_init->fp_hsi = ETH_FP_HSI_VERSION; in bnx2x_pf_q_prep_general()
3189 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_pf_rx_q_prep()
3190 pause->sge_th_lo = SGE_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3191 pause->sge_th_hi = SGE_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3194 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3195 pause->sge_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3199 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> in bnx2x_pf_rx_q_prep()
3201 max_sge = ((max_sge + PAGES_PER_SGE - 1) & in bnx2x_pf_rx_q_prep()
3202 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; in bnx2x_pf_rx_q_prep()
3206 /* pause - not for e1 */ in bnx2x_pf_rx_q_prep()
3208 pause->bd_th_lo = BD_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3209 pause->bd_th_hi = BD_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3211 pause->rcq_th_lo = RCQ_TH_LO(bp); in bnx2x_pf_rx_q_prep()
3212 pause->rcq_th_hi = RCQ_TH_HI(bp); in bnx2x_pf_rx_q_prep()
3217 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3218 pause->bd_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3219 bp->rx_ring_size); in bnx2x_pf_rx_q_prep()
3220 WARN_ON(bp->dropless_fc && in bnx2x_pf_rx_q_prep()
3221 pause->rcq_th_hi + FW_PREFETCH_CNT > in bnx2x_pf_rx_q_prep()
3224 pause->pri_map = 1; in bnx2x_pf_rx_q_prep()
3228 rxq_init->dscr_map = fp->rx_desc_mapping; in bnx2x_pf_rx_q_prep()
3229 rxq_init->sge_map = fp->rx_sge_mapping; in bnx2x_pf_rx_q_prep()
3230 rxq_init->rcq_map = fp->rx_comp_mapping; in bnx2x_pf_rx_q_prep()
3231 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; in bnx2x_pf_rx_q_prep()
3236 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - in bnx2x_pf_rx_q_prep()
3237 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; in bnx2x_pf_rx_q_prep()
3239 rxq_init->cl_qzone_id = fp->cl_qzone_id; in bnx2x_pf_rx_q_prep()
3240 rxq_init->tpa_agg_sz = tpa_agg_size; in bnx2x_pf_rx_q_prep()
3241 rxq_init->sge_buf_sz = sge_sz; in bnx2x_pf_rx_q_prep()
3242 rxq_init->max_sges_pkt = max_sge; in bnx2x_pf_rx_q_prep()
3243 rxq_init->rss_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3244 rxq_init->mcast_engine_id = BP_FUNC(bp); in bnx2x_pf_rx_q_prep()
3251 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); in bnx2x_pf_rx_q_prep()
3253 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; in bnx2x_pf_rx_q_prep()
3254 rxq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_rx_q_prep()
3257 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3259 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_rx_q_prep()
3264 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; in bnx2x_pf_rx_q_prep()
3265 rxq_init->silent_removal_mask = VLAN_VID_MASK; in bnx2x_pf_rx_q_prep()
3273 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; in bnx2x_pf_tx_q_prep()
3274 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; in bnx2x_pf_tx_q_prep()
3275 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; in bnx2x_pf_tx_q_prep()
3276 txq_init->fw_sb_id = fp->fw_sb_id; in bnx2x_pf_tx_q_prep()
3282 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_pf_tx_q_prep()
3285 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; in bnx2x_pf_tx_q_prep()
3286 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; in bnx2x_pf_tx_q_prep()
3313 func_init.spq_map = bp->spq_mapping; in bnx2x_pf_init()
3314 func_init.spq_prod = bp->spq_prod_idx; in bnx2x_pf_init()
3318 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); in bnx2x_pf_init()
3324 * re-calculated according to the actual link rate. in bnx2x_pf_init()
3326 bp->link_vars.line_speed = SPEED_10000; in bnx2x_pf_init()
3330 if (bp->port.pmf) in bnx2x_pf_init()
3331 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_pf_init()
3333 /* init Event Queue - PCI bus guarantees correct endianity*/ in bnx2x_pf_init()
3334 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); in bnx2x_pf_init()
3335 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); in bnx2x_pf_init()
3336 eq_data.producer = bp->eq_prod; in bnx2x_pf_init()
3358 /* Tx queue should be only re-enabled */ in bnx2x_e1h_enable()
3359 netif_tx_wake_all_queues(bp->dev); in bnx2x_e1h_enable()
3372 &bp->slowpath->drv_info_to_mcp.ether_stat; in bnx2x_drv_info_ether_stat()
3374 &bp->sp_objs->mac_obj; in bnx2x_drv_info_ether_stat()
3377 strlcpy(ether_stat->version, DRV_MODULE_VERSION, in bnx2x_drv_info_ether_stat()
3389 memset(ether_stat->mac_local + i, 0, in bnx2x_drv_info_ether_stat()
3390 sizeof(ether_stat->mac_local[0])); in bnx2x_drv_info_ether_stat()
3391 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, in bnx2x_drv_info_ether_stat()
3393 ether_stat->mac_local + MAC_PAD, MAC_PAD, in bnx2x_drv_info_ether_stat()
3395 ether_stat->mtu_size = bp->dev->mtu; in bnx2x_drv_info_ether_stat()
3396 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_drv_info_ether_stat()
3397 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; in bnx2x_drv_info_ether_stat()
3398 if (bp->dev->features & NETIF_F_TSO) in bnx2x_drv_info_ether_stat()
3399 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; in bnx2x_drv_info_ether_stat()
3400 ether_stat->feature_flags |= bp->common.boot_mode; in bnx2x_drv_info_ether_stat()
3402 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; in bnx2x_drv_info_ether_stat()
3404 ether_stat->txq_size = bp->tx_ring_size; in bnx2x_drv_info_ether_stat()
3405 ether_stat->rxq_size = bp->rx_ring_size; in bnx2x_drv_info_ether_stat()
3408 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; in bnx2x_drv_info_ether_stat()
3414 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_fcoe_stat()
3416 &bp->slowpath->drv_info_to_mcp.fcoe_stat; in bnx2x_drv_info_fcoe_stat()
3421 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); in bnx2x_drv_info_fcoe_stat()
3423 fcoe_stat->qos_priority = in bnx2x_drv_info_fcoe_stat()
3424 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; in bnx2x_drv_info_fcoe_stat()
3429 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3433 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. in bnx2x_drv_info_fcoe_stat()
3437 &bp->fw_stats_data->fcoe; in bnx2x_drv_info_fcoe_stat()
3439 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3440 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3441 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3443 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3444 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3445 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3446 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3448 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3449 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3450 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3451 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3453 ADD_64_LE(fcoe_stat->rx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3454 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, in bnx2x_drv_info_fcoe_stat()
3455 fcoe_stat->rx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3456 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); in bnx2x_drv_info_fcoe_stat()
3458 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3459 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3460 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3462 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3463 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3464 fcoe_q_tstorm_stats->rcv_ucast_pkts); in bnx2x_drv_info_fcoe_stat()
3466 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3467 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3468 fcoe_q_tstorm_stats->rcv_bcast_pkts); in bnx2x_drv_info_fcoe_stat()
3470 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3471 fcoe_stat->rx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3472 fcoe_q_tstorm_stats->rcv_mcast_pkts); in bnx2x_drv_info_fcoe_stat()
3474 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3475 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3476 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); in bnx2x_drv_info_fcoe_stat()
3478 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3479 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3480 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3481 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3483 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3484 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3485 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3486 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3488 ADD_64_LE(fcoe_stat->tx_bytes_hi, in bnx2x_drv_info_fcoe_stat()
3489 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, in bnx2x_drv_info_fcoe_stat()
3490 fcoe_stat->tx_bytes_lo, in bnx2x_drv_info_fcoe_stat()
3491 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); in bnx2x_drv_info_fcoe_stat()
3493 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3494 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3495 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); in bnx2x_drv_info_fcoe_stat()
3497 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3498 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3499 fcoe_q_xstorm_stats->ucast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3501 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3502 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3503 fcoe_q_xstorm_stats->bcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3505 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, in bnx2x_drv_info_fcoe_stat()
3506 fcoe_stat->tx_frames_lo, in bnx2x_drv_info_fcoe_stat()
3507 fcoe_q_xstorm_stats->mcast_pkts_sent); in bnx2x_drv_info_fcoe_stat()
3516 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; in bnx2x_drv_info_iscsi_stat()
3518 &bp->slowpath->drv_info_to_mcp.iscsi_stat; in bnx2x_drv_info_iscsi_stat()
3523 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, in bnx2x_drv_info_iscsi_stat()
3526 iscsi_stat->qos_priority = in bnx2x_drv_info_iscsi_stat()
3527 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; in bnx2x_drv_info_iscsi_stat()
3545 DP(BNX2X_MSG_MCP, in bnx2x_config_mf_bw()
3550 if (bp->link_vars.link_up) { in bnx2x_config_mf_bw()
3554 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); in bnx2x_config_mf_bw()
3565 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); in bnx2x_handle_eee_event()
3579 /* if drv_info version supported by MFW doesn't match - send NACK */ in bnx2x_handle_drv_info_req()
3589 mutex_lock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3591 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_handle_drv_info_req()
3605 /* if op code isn't supported - send NACK */ in bnx2x_handle_drv_info_req()
3625 DP(BNX2X_MSG_MCP, "Management does not support indication\n"); in bnx2x_handle_drv_info_req()
3626 } else if (!bp->drv_info_mng_owner) { in bnx2x_handle_drv_info_req()
3644 DP(BNX2X_MSG_MCP, "Management did not release indication\n"); in bnx2x_handle_drv_info_req()
3645 bp->drv_info_mng_owner = true; in bnx2x_handle_drv_info_req()
3649 mutex_unlock(&bp->drv_info_mutex); in bnx2x_handle_drv_info_req()
3661 vals[0] -= '0'; in bnx2x_update_mng_version_utility()
3684 mutex_lock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3686 if (bp->drv_info_mng_owner) in bnx2x_update_mng_version()
3689 if (bp->state != BNX2X_STATE_OPEN) in bnx2x_update_mng_version()
3698 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3701 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; in bnx2x_update_mng_version()
3704 memset(&bp->slowpath->drv_info_to_mcp, 0, in bnx2x_update_mng_version()
3707 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; in bnx2x_update_mng_version()
3715 mutex_unlock(&bp->drv_info_mutex); in bnx2x_update_mng_version()
3717 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", in bnx2x_update_mng_version()
3737 /* Check & notify On-Chip dump. */ in bnx2x_update_mfw_dump()
3741 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); in bnx2x_update_mfw_dump()
3744 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); in bnx2x_update_mfw_dump()
3766 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); in bnx2x_oem_event()
3771 * where the bp->flags can change so it is done without any in bnx2x_oem_event()
3774 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { in bnx2x_oem_event()
3775 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); in bnx2x_oem_event()
3776 bp->flags |= MF_FUNC_DIS; in bnx2x_oem_event()
3780 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); in bnx2x_oem_event()
3781 bp->flags &= ~MF_FUNC_DIS; in bnx2x_oem_event()
3806 struct eth_spe *next_spe = bp->spq_prod_bd; in bnx2x_sp_get_next()
3808 if (bp->spq_prod_bd == bp->spq_last_bd) { in bnx2x_sp_get_next()
3809 bp->spq_prod_bd = bp->spq; in bnx2x_sp_get_next()
3810 bp->spq_prod_idx = 0; in bnx2x_sp_get_next()
3811 DP(BNX2X_MSG_SP, "end of spq\n"); in bnx2x_sp_get_next()
3813 bp->spq_prod_bd++; in bnx2x_sp_get_next()
3814 bp->spq_prod_idx++; in bnx2x_sp_get_next()
3832 bp->spq_prod_idx); in bnx2x_sp_prod_update()
3836 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3856 * bnx2x_sp_post - place a single command on an SP ring
3877 if (unlikely(bp->panic)) { in bnx2x_sp_post()
3879 return -EIO; in bnx2x_sp_post()
3883 spin_lock_bh(&bp->spq_lock); in bnx2x_sp_post()
3886 if (!atomic_read(&bp->eq_spq_left)) { in bnx2x_sp_post()
3888 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3890 return -EBUSY; in bnx2x_sp_post()
3892 } else if (!atomic_read(&bp->cq_spq_left)) { in bnx2x_sp_post()
3894 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3896 return -EBUSY; in bnx2x_sp_post()
3902 spe->hdr.conn_and_cmd_data = in bnx2x_sp_post()
3906 /* In some cases, type may already contain the func-id in bnx2x_sp_post()
3919 spe->hdr.type = cpu_to_le16(type); in bnx2x_sp_post()
3921 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); in bnx2x_sp_post()
3922 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); in bnx2x_sp_post()
3930 atomic_dec(&bp->eq_spq_left); in bnx2x_sp_post()
3932 atomic_dec(&bp->cq_spq_left); in bnx2x_sp_post()
3934 DP(BNX2X_MSG_SP, in bnx2x_sp_post()
3936 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), in bnx2x_sp_post()
3937 (u32)(U64_LO(bp->spq_mapping) + in bnx2x_sp_post()
3938 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, in bnx2x_sp_post()
3940 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); in bnx2x_sp_post()
3943 spin_unlock_bh(&bp->spq_lock); in bnx2x_sp_post()
3964 rc = -EBUSY; in bnx2x_acquire_alr()
3981 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_update_dsb_idx()
3985 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { in bnx2x_update_dsb_idx()
3986 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; in bnx2x_update_dsb_idx()
3990 if (bp->def_idx != def_sb->sp_sb.running_index) { in bnx2x_update_dsb_idx()
3991 bp->def_idx = def_sb->sp_sb.running_index; in bnx2x_update_dsb_idx()
4015 if (bp->attn_state & asserted) in bnx2x_attn_int_asserted()
4021 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", in bnx2x_attn_int_asserted()
4024 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_asserted()
4029 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4030 bp->attn_state |= asserted; in bnx2x_attn_int_asserted()
4031 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_asserted()
4053 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4056 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); in bnx2x_attn_int_asserted()
4059 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); in bnx2x_attn_int_asserted()
4062 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); in bnx2x_attn_int_asserted()
4066 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); in bnx2x_attn_int_asserted()
4070 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); in bnx2x_attn_int_asserted()
4074 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); in bnx2x_attn_int_asserted()
4079 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); in bnx2x_attn_int_asserted()
4083 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); in bnx2x_attn_int_asserted()
4087 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); in bnx2x_attn_int_asserted()
4094 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_asserted()
4100 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, in bnx2x_attn_int_asserted()
4101 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_asserted()
4107 * NIG mask. This loop should exit after 2-3 iterations max. in bnx2x_attn_int_asserted()
4109 if (bp->common.int_block != INT_BLOCK_HC) { in bnx2x_attn_int_asserted()
4117 DP(NETIF_MSG_HW, in bnx2x_attn_int_asserted()
4141 …netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card … in bnx2x_fan_failure()
4169 bnx2x_hw_reset_phy(&bp->link_params); in bnx2x_attn_int_deasserted0()
4173 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { in bnx2x_attn_int_deasserted0()
4175 bnx2x_handle_module_detect_int(&bp->link_params); in bnx2x_attn_int_deasserted0()
4237 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4244 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); in bnx2x_attn_int_deasserted2()
4277 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, in bnx2x_attn_int_deasserted3()
4298 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) in bnx2x_attn_int_deasserted3()
4301 if (bp->port.pmf && in bnx2x_attn_int_deasserted3()
4303 bp->dcbx_enabled > 0) in bnx2x_attn_int_deasserted3()
4317 if (bp->link_vars.periodic_flags & in bnx2x_attn_int_deasserted3()
4321 bp->link_vars.periodic_flags &= in bnx2x_attn_int_deasserted3()
4357 BNX2X_ERR("GRC time-out 0x%08x\n", val); in bnx2x_attn_int_deasserted3()
4370 * 0-7 - Engine0 load counter.
4371 * 8-15 - Engine1 load counter.
4372 * 16 - Engine0 RESET_IN_PROGRESS bit.
4373 * 17 - Engine1 RESET_IN_PROGRESS bit.
4374 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4376 * 19 - Engine1 ONE_IS_LOADED.
4377 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4429 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); in bnx2x_reset_is_global()
4502 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_set_pf_load()
4508 val1 |= (1 << bp->pf_num); in bnx2x_set_pf_load()
4521 * bnx2x_clear_pf_load - clear pf load mark
4539 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); in bnx2x_clear_pf_load()
4545 val1 &= ~(1 << bp->pf_num); in bnx2x_clear_pf_load()
4571 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); in bnx2x_get_load_status()
4575 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", in bnx2x_get_load_status()
4970 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" in bnx2x_parity_attn()
4983 netdev_err(bp->dev, in bnx2x_parity_attn()
5008 * bnx2x_chk_parity_attn - checks for parity attentions.
5117 bp->recovery_state = BNX2X_RECOVERY_INIT; in bnx2x_attn_int_deasserted()
5118 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_attn_int_deasserted()
5141 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5146 group_mask = &bp->attn_group[index]; in bnx2x_attn_int_deasserted()
5148 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", in bnx2x_attn_int_deasserted()
5150 group_mask->sig[0], group_mask->sig[1], in bnx2x_attn_int_deasserted()
5151 group_mask->sig[2], group_mask->sig[3], in bnx2x_attn_int_deasserted()
5152 group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5155 attn.sig[4] & group_mask->sig[4]); in bnx2x_attn_int_deasserted()
5157 attn.sig[3] & group_mask->sig[3]); in bnx2x_attn_int_deasserted()
5159 attn.sig[1] & group_mask->sig[1]); in bnx2x_attn_int_deasserted()
5161 attn.sig[2] & group_mask->sig[2]); in bnx2x_attn_int_deasserted()
5163 attn.sig[0] & group_mask->sig[0]); in bnx2x_attn_int_deasserted()
5169 if (bp->common.int_block == INT_BLOCK_HC) in bnx2x_attn_int_deasserted()
5176 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, in bnx2x_attn_int_deasserted()
5177 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); in bnx2x_attn_int_deasserted()
5180 if (~bp->attn_state & deasserted) in bnx2x_attn_int_deasserted()
5189 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", in bnx2x_attn_int_deasserted()
5192 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); in bnx2x_attn_int_deasserted()
5197 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5198 bp->attn_state &= ~deasserted; in bnx2x_attn_int_deasserted()
5199 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); in bnx2x_attn_int_deasserted()
5205 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5207 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. in bnx2x_attn_int()
5209 u32 attn_state = bp->attn_state; in bnx2x_attn_int()
5215 DP(NETIF_MSG_HW, in bnx2x_attn_int()
5233 u32 igu_addr = bp->igu_base_addr; in bnx2x_igu_ack_sb()
5248 u8 err = elem->message.error; in bnx2x_cnic_handle_cfc_del()
5250 if (!bp->cnic_eth_dev.starting_cid || in bnx2x_cnic_handle_cfc_del()
5251 (cid < bp->cnic_eth_dev.starting_cid && in bnx2x_cnic_handle_cfc_del()
5252 cid != bp->cnic_eth_dev.iscsi_l2_cid)) in bnx2x_cnic_handle_cfc_del()
5255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); in bnx2x_cnic_handle_cfc_del()
5274 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_handle_mcast_eqe()
5276 netif_addr_lock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5279 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); in bnx2x_handle_mcast_eqe()
5281 /* If there are pending mcast commands - send them */ in bnx2x_handle_mcast_eqe()
5282 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { in bnx2x_handle_mcast_eqe()
5289 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_mcast_eqe()
5297 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo); in bnx2x_handle_classification_eqe()
5306 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); in bnx2x_handle_classification_eqe()
5308 vlan_mac_obj = &bp->iscsi_l2_mac_obj; in bnx2x_handle_classification_eqe()
5310 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; in bnx2x_handle_classification_eqe()
5314 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); in bnx2x_handle_classification_eqe()
5315 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; in bnx2x_handle_classification_eqe()
5318 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); in bnx2x_handle_classification_eqe()
5329 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); in bnx2x_handle_classification_eqe()
5334 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); in bnx2x_handle_classification_eqe()
5341 netif_addr_lock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5343 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_handle_rx_mode_eqe()
5346 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5349 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5352 &bp->sp_state)) in bnx2x_handle_rx_mode_eqe()
5355 netif_addr_unlock_bh(bp->dev); in bnx2x_handle_rx_mode_eqe()
5361 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { in bnx2x_after_afex_vif_lists()
5362 DP(BNX2X_MSG_SP, in bnx2x_after_afex_vif_lists()
5364 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5366 elem->message.data.vif_list_event.func_bit_map); in bnx2x_after_afex_vif_lists()
5367 } else if (elem->message.data.vif_list_event.echo == in bnx2x_after_afex_vif_lists()
5369 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); in bnx2x_after_afex_vif_lists()
5388 &q_update_params->update_flags); in bnx2x_after_function_update()
5390 &q_update_params->update_flags); in bnx2x_after_function_update()
5394 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { in bnx2x_after_function_update()
5395 q_update_params->silent_removal_value = 0; in bnx2x_after_function_update()
5396 q_update_params->silent_removal_mask = 0; in bnx2x_after_function_update()
5398 q_update_params->silent_removal_value = in bnx2x_after_function_update()
5399 (bp->afex_def_vlan_tag & VLAN_VID_MASK); in bnx2x_after_function_update()
5400 q_update_params->silent_removal_mask = VLAN_VID_MASK; in bnx2x_after_function_update()
5405 fp = &bp->fp[q]; in bnx2x_after_function_update()
5416 fp = &bp->fp[FCOE_IDX(bp)]; in bnx2x_after_function_update()
5424 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); in bnx2x_after_function_update()
5433 /* If no FCoE ring - ACK MCP now */ in bnx2x_after_function_update()
5442 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); in bnx2x_cid_to_q_obj()
5447 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; in bnx2x_cid_to_q_obj()
5459 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; in bnx2x_eq_int()
5460 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; in bnx2x_eq_int()
5462 hw_cons = le16_to_cpu(*bp->eq_cons_sb); in bnx2x_eq_int()
5464 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. in bnx2x_eq_int()
5465 * when we get the next-page we need to adjust so the loop in bnx2x_eq_int()
5476 sw_cons = bp->eq_cons; in bnx2x_eq_int()
5477 sw_prod = bp->eq_prod; in bnx2x_eq_int()
5479 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", in bnx2x_eq_int()
5480 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); in bnx2x_eq_int()
5485 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; in bnx2x_eq_int()
5489 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", in bnx2x_eq_int()
5494 opcode = elem->message.opcode; in bnx2x_eq_int()
5500 &elem->message.data.vf_pf_event); in bnx2x_eq_int()
5506 bp->stats_comp++); in bnx2x_eq_int()
5518 cid = SW_CID(elem->message.data.cfc_del_event.cid); in bnx2x_eq_int()
5520 DP(BNX2X_MSG_SP, in bnx2x_eq_int()
5529 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) in bnx2x_eq_int()
5535 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); in bnx2x_eq_int()
5537 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5543 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); in bnx2x_eq_int()
5545 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5551 echo = elem->message.data.function_update_event.echo; in bnx2x_eq_int()
5553 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5555 if (f_obj->complete_cmd( in bnx2x_eq_int()
5562 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, in bnx2x_eq_int()
5564 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5577 f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5582 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5584 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) in bnx2x_eq_int()
5590 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, in bnx2x_eq_int()
5592 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) in bnx2x_eq_int()
5598 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, in bnx2x_eq_int()
5600 if (f_obj->complete_cmd(bp, f_obj, in bnx2x_eq_int()
5606 switch (opcode | bp->state) { in bnx2x_eq_int()
5613 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", in bnx2x_eq_int()
5614 SW_CID(elem->message.data.eth_event.echo)); in bnx2x_eq_int()
5615 rss_raw->clear_pending(rss_raw); in bnx2x_eq_int()
5628 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); in bnx2x_eq_int()
5638 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); in bnx2x_eq_int()
5648 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); in bnx2x_eq_int()
5653 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", in bnx2x_eq_int()
5654 elem->message.opcode, bp->state); in bnx2x_eq_int()
5661 atomic_add(spqe_cnt, &bp->eq_spq_left); in bnx2x_eq_int()
5663 bp->eq_cons = sw_cons; in bnx2x_eq_int()
5664 bp->eq_prod = sw_prod; in bnx2x_eq_int()
5669 bnx2x_update_eq_prod(bp, bp->eq_prod); in bnx2x_eq_int()
5676 DP(BNX2X_MSG_SP, "sp task invoked\n"); in bnx2x_sp_task()
5680 if (atomic_read(&bp->interrupt_occurred)) { in bnx2x_sp_task()
5685 DP(BNX2X_MSG_SP, "status %x\n", status); in bnx2x_sp_task()
5686 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); in bnx2x_sp_task()
5687 atomic_set(&bp->interrupt_occurred, 0); in bnx2x_sp_task()
5701 /* Prevent local bottom-halves from running as in bnx2x_sp_task()
5711 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, in bnx2x_sp_task()
5712 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); in bnx2x_sp_task()
5719 DP(BNX2X_MSG_SP, in bnx2x_sp_task()
5723 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, in bnx2x_sp_task()
5724 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); in bnx2x_sp_task()
5727 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ in bnx2x_sp_task()
5729 &bp->sp_state)) { in bnx2x_sp_task()
5740 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, in bnx2x_msix_sp_int()
5744 if (unlikely(bp->panic)) in bnx2x_msix_sp_int()
5752 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_msix_sp_int()
5754 c_ops->cnic_handler(bp->cnic_data, NULL); in bnx2x_msix_sp_int()
5771 bp->fw_drv_pulse_wr_seq); in bnx2x_drv_pulse()
5778 if (!netif_running(bp->dev)) in bnx2x_timer()
5787 ++bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5788 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; in bnx2x_timer()
5789 drv_pulse = bp->fw_drv_pulse_wr_seq; in bnx2x_timer()
5799 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) in bnx2x_timer()
5804 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_timer()
5811 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_timer()
5833 /* helper: writes FP SP data to FW - data_size in dwords */
5914 hc_sm->igu_sb_id = igu_sb_id; in bnx2x_setup_ndsb_state_machine()
5915 hc_sm->igu_seg_id = igu_seg_id; in bnx2x_setup_ndsb_state_machine()
5916 hc_sm->timer_value = 0xFF; in bnx2x_setup_ndsb_state_machine()
5917 hc_sm->time_to_expire = 0xFFFFFFFF; in bnx2x_setup_ndsb_state_machine()
6003 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); in bnx2x_init_sb()
6005 /* write indices to HW - PCI guarantees endianity of regpairs */ in bnx2x_init_sb()
6027 struct host_sp_status_block *def_sb = bp->def_status_blk; in bnx2x_init_def_sb()
6028 dma_addr_t mapping = bp->def_status_blk_mapping; in bnx2x_init_def_sb()
6043 igu_sp_sb_index = bp->igu_dsb_id; in bnx2x_init_def_sb()
6050 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; in bnx2x_init_def_sb()
6052 bp->attn_state = 0; in bnx2x_init_def_sb()
6062 bp->attn_group[index].sig[sindex] = in bnx2x_init_def_sb()
6071 bp->attn_group[index].sig[4] = REG_RD(bp, in bnx2x_init_def_sb()
6074 bp->attn_group[index].sig[4] = 0; in bnx2x_init_def_sb()
6077 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_def_sb()
6105 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); in bnx2x_init_def_sb()
6113 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, in bnx2x_update_coalesce()
6114 bp->tx_ticks, bp->rx_ticks); in bnx2x_update_coalesce()
6119 spin_lock_init(&bp->spq_lock); in bnx2x_init_sp_ring()
6120 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); in bnx2x_init_sp_ring()
6122 bp->spq_prod_idx = 0; in bnx2x_init_sp_ring()
6123 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; in bnx2x_init_sp_ring()
6124 bp->spq_prod_bd = bp->spq; in bnx2x_init_sp_ring()
6125 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; in bnx2x_init_sp_ring()
6133 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; in bnx2x_init_eq_ring()
6135 elem->next_page.addr.hi = in bnx2x_init_eq_ring()
6136 cpu_to_le32(U64_HI(bp->eq_mapping + in bnx2x_init_eq_ring()
6138 elem->next_page.addr.lo = in bnx2x_init_eq_ring()
6139 cpu_to_le32(U64_LO(bp->eq_mapping + in bnx2x_init_eq_ring()
6142 bp->eq_cons = 0; in bnx2x_init_eq_ring()
6143 bp->eq_prod = NUM_EQ_DESC; in bnx2x_init_eq_ring()
6144 bp->eq_cons_sb = BNX2X_EQ_INDEX; in bnx2x_init_eq_ring()
6146 atomic_set(&bp->eq_spq_left, in bnx2x_init_eq_ring()
6147 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); in bnx2x_init_eq_ring()
6165 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; in bnx2x_set_q_rx_mode()
6168 ramrod_param.pstate = &bp->sp_state; in bnx2x_set_q_rx_mode()
6174 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); in bnx2x_set_q_rx_mode()
6184 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); in bnx2x_set_q_rx_mode()
6216 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6232 if (bp->accept_any_vlan) { in bnx2x_fill_accept_flags()
6263 return -EINVAL; in bnx2x_fill_accept_flags()
6280 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, in bnx2x_set_storm_rx_mode()
6288 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, in bnx2x_set_storm_rx_mode()
6334 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_igu_sb_id()
6339 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); in bnx2x_fp_fw_sb_id()
6344 if (CHIP_IS_E1x(fp->bp)) in bnx2x_fp_cl_id()
6345 return BP_L_ID(fp->bp) + fp->index; in bnx2x_fp_cl_id()
6352 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; in bnx2x_init_eth_fp()
6356 fp->rx_queue = fp_idx; in bnx2x_init_eth_fp()
6357 fp->cid = fp_idx; in bnx2x_init_eth_fp()
6358 fp->cl_id = bnx2x_fp_cl_id(fp); in bnx2x_init_eth_fp()
6359 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); in bnx2x_init_eth_fp()
6360 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); in bnx2x_init_eth_fp()
6362 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); in bnx2x_init_eth_fp()
6365 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); in bnx2x_init_eth_fp()
6368 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; in bnx2x_init_eth_fp()
6374 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); in bnx2x_init_eth_fp()
6378 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], in bnx2x_init_eth_fp()
6379 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), in bnx2x_init_eth_fp()
6382 cids[cos] = fp->txdata_ptr[cos]->cid; in bnx2x_init_eth_fp()
6389 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, in bnx2x_init_eth_fp()
6390 fp->fw_sb_id, fp->igu_sb_id); in bnx2x_init_eth_fp()
6392 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, in bnx2x_init_eth_fp()
6393 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_eth_fp()
6401 DP(NETIF_MSG_IFUP, in bnx2x_init_eth_fp()
6403 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_eth_fp()
6404 fp->igu_sb_id); in bnx2x_init_eth_fp()
6413 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; in bnx2x_init_tx_ring_one()
6415 tx_next_bd->addr_hi = in bnx2x_init_tx_ring_one()
6416 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6418 tx_next_bd->addr_lo = in bnx2x_init_tx_ring_one()
6419 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + in bnx2x_init_tx_ring_one()
6423 *txdata->tx_cons_sb = cpu_to_le16(0); in bnx2x_init_tx_ring_one()
6425 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); in bnx2x_init_tx_ring_one()
6426 txdata->tx_db.data.zero_fill1 = 0; in bnx2x_init_tx_ring_one()
6427 txdata->tx_db.data.prod = 0; in bnx2x_init_tx_ring_one()
6429 txdata->tx_pkt_prod = 0; in bnx2x_init_tx_ring_one()
6430 txdata->tx_pkt_cons = 0; in bnx2x_init_tx_ring_one()
6431 txdata->tx_bd_prod = 0; in bnx2x_init_tx_ring_one()
6432 txdata->tx_bd_cons = 0; in bnx2x_init_tx_ring_one()
6433 txdata->tx_pkt = 0; in bnx2x_init_tx_ring_one()
6441 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); in bnx2x_init_tx_rings_cnic()
6450 for_each_cos_in_tx_queue(&bp->fp[i], cos) in bnx2x_init_tx_rings()
6451 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); in bnx2x_init_tx_rings()
6464 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; in bnx2x_init_fcoe_fp()
6467 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, in bnx2x_init_fcoe_fp()
6470 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); in bnx2x_init_fcoe_fp()
6482 /* No multi-CoS for FCoE L2 client */ in bnx2x_init_fcoe_fp()
6483 BUG_ON(fp->max_cos != 1); in bnx2x_init_fcoe_fp()
6485 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, in bnx2x_init_fcoe_fp()
6486 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), in bnx2x_init_fcoe_fp()
6489 DP(NETIF_MSG_IFUP, in bnx2x_init_fcoe_fp()
6491 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, in bnx2x_init_fcoe_fp()
6492 fp->igu_sb_id); in bnx2x_init_fcoe_fp()
6500 bnx2x_init_sb(bp, bp->cnic_sb_mapping, in bnx2x_nic_init_cnic()
6528 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, in bnx2x_pre_irq_nic_init()
6529 bp->common.shmem_base, in bnx2x_pre_irq_nic_init()
6530 bp->common.shmem2_base, BP_PORT(bp)); in bnx2x_pre_irq_nic_init()
6562 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, in bnx2x_gunzip_init()
6563 &bp->gunzip_mapping, GFP_KERNEL); in bnx2x_gunzip_init()
6564 if (bp->gunzip_buf == NULL) in bnx2x_gunzip_init()
6567 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); in bnx2x_gunzip_init()
6568 if (bp->strm == NULL) in bnx2x_gunzip_init()
6571 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); in bnx2x_gunzip_init()
6572 if (bp->strm->workspace == NULL) in bnx2x_gunzip_init()
6578 kfree(bp->strm); in bnx2x_gunzip_init()
6579 bp->strm = NULL; in bnx2x_gunzip_init()
6582 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_init()
6583 bp->gunzip_mapping); in bnx2x_gunzip_init()
6584 bp->gunzip_buf = NULL; in bnx2x_gunzip_init()
6587 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); in bnx2x_gunzip_init()
6588 return -ENOMEM; in bnx2x_gunzip_init()
6593 if (bp->strm) { in bnx2x_gunzip_end()
6594 vfree(bp->strm->workspace); in bnx2x_gunzip_end()
6595 kfree(bp->strm); in bnx2x_gunzip_end()
6596 bp->strm = NULL; in bnx2x_gunzip_end()
6599 if (bp->gunzip_buf) { in bnx2x_gunzip_end()
6600 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, in bnx2x_gunzip_end()
6601 bp->gunzip_mapping); in bnx2x_gunzip_end()
6602 bp->gunzip_buf = NULL; in bnx2x_gunzip_end()
6613 return -EINVAL; in bnx2x_gunzip()
6623 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; in bnx2x_gunzip()
6624 bp->strm->avail_in = len - n; in bnx2x_gunzip()
6625 bp->strm->next_out = bp->gunzip_buf; in bnx2x_gunzip()
6626 bp->strm->avail_out = FW_BUF_SIZE; in bnx2x_gunzip()
6628 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); in bnx2x_gunzip()
6632 rc = zlib_inflate(bp->strm, Z_FINISH); in bnx2x_gunzip()
6634 netdev_err(bp->dev, "Firmware decompression error: %s\n", in bnx2x_gunzip()
6635 bp->strm->msg); in bnx2x_gunzip()
6637 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); in bnx2x_gunzip()
6638 if (bp->gunzip_outlen & 0x3) in bnx2x_gunzip()
6639 netdev_err(bp->dev, in bnx2x_gunzip()
6641 bp->gunzip_outlen); in bnx2x_gunzip()
6642 bp->gunzip_outlen >>= 2; in bnx2x_gunzip()
6644 zlib_inflateEnd(bp->strm); in bnx2x_gunzip()
6669 /* NON-IP protocol */ in bnx2x_lb_pckt()
6716 count--; in bnx2x_int_mem_test()
6720 return -1; in bnx2x_int_mem_test()
6731 count--; in bnx2x_int_mem_test()
6735 return -2; in bnx2x_int_mem_test()
6746 DP(NETIF_MSG_HW, "part2\n"); in bnx2x_int_mem_test()
6772 count--; in bnx2x_int_mem_test()
6776 return -3; in bnx2x_int_mem_test()
6800 return -4; in bnx2x_int_mem_test()
6820 DP(NETIF_MSG_HW, "done\n"); in bnx2x_int_mem_test()
6903 bp->dmae_ready = 0; in bnx2x_setup_dmae()
6904 spin_lock_init(&bp->dmae_lock); in bnx2x_setup_dmae()
6912 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); in bnx2x_init_pxp()
6913 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); in bnx2x_init_pxp()
6915 if (bp->mrrs == -1) in bnx2x_init_pxp()
6918 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); in bnx2x_init_pxp()
6919 r_order = bp->mrrs; in bnx2x_init_pxp()
6951 bp->common.shmem_base, in bnx2x_setup_fan_failure_detection()
6952 bp->common.shmem2_base, in bnx2x_setup_fan_failure_detection()
6956 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); in bnx2x_setup_fan_failure_detection()
6992 shmem_base[0] = bp->common.shmem_base; in bnx2x__common_init_phy()
6993 shmem2_base[0] = bp->common.shmem2_base; in bnx2x__common_init_phy()
7002 bp->common.chip_id); in bnx2x__common_init_phy()
7038 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7046 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); in bnx2x_init_hw_common()
7072 * 4-port mode or 2-port mode we need to turn of master-enable in bnx2x_init_hw_common()
7074 * so, we disregard multi-function or not, and always disable in bnx2x_init_hw_common()
7115 return -EBUSY; in bnx2x_init_hw_common()
7120 return -EBUSY; in bnx2x_init_hw_common()
7130 * (i.e. vnic3) to start even if it is marked as "scan-off". in bnx2x_init_hw_common()
7132 * as "scan-off". Real-life scenario for example: if a driver is being in bnx2x_init_hw_common()
7133 * load-unloaded while func6,7 are down. This will cause the timer to access in bnx2x_init_hw_common()
7148 * dmae-operations (writing to pram for example.) in bnx2x_init_hw_common()
7158 * b. Wait 20msec. - note that this timeout is needed to make in bnx2x_init_hw_common()
7189 * PF-s might be dynamic. in bnx2x_init_hw_common()
7198 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_init_hw_common()
7235 } while (factor-- && (val != 1)); in bnx2x_init_hw_common()
7239 return -EBUSY; in bnx2x_init_hw_common()
7248 bp->dmae_ready = 1; in bnx2x_init_hw_common()
7267 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_common()
7288 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); in bnx2x_init_hw_common()
7301 /* Bit-map indicating which L2 hdrs may appear in bnx2x_init_hw_common()
7305 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7353 bp->path_has_ovlan ? 7 : 6); in bnx2x_init_hw_common()
7377 dev_alert(&bp->pdev->dev, in bnx2x_init_hw_common()
7421 /* in E3 this done in per-port section */ in bnx2x_init_hw_common()
7436 return -EBUSY; in bnx2x_init_hw_common()
7441 return -EBUSY; in bnx2x_init_hw_common()
7446 return -EBUSY; in bnx2x_init_hw_common()
7459 return -EBUSY; in bnx2x_init_hw_common()
7475 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); in bnx2x_init_hw_common()
7484 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7495 /* In E2 2-PORT mode, same ext phy is used for the two paths */ in bnx2x_init_hw_common_chip()
7509 DP(NETIF_MSG_HW, "starting port init port %d\n", port); in bnx2x_init_hw_port()
7519 * attempted. Therefore we manually added the enable-master to the in bnx2x_init_hw_port()
7536 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); in bnx2x_init_hw_port()
7551 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); in bnx2x_init_hw_port()
7552 else if (bp->dev->mtu > 4096) { in bnx2x_init_hw_port()
7553 if (bp->flags & ONE_PORT_FLAG) in bnx2x_init_hw_port()
7556 val = bp->dev->mtu; in bnx2x_init_hw_port()
7562 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); in bnx2x_init_hw_port()
7587 /* Ovlan exists only if we are in multi-function + in bnx2x_init_hw_port()
7588 * switch-dependent mode, in switch-independent there in bnx2x_init_hw_port()
7594 (bp->path_has_ovlan ? 7 : 6)); in bnx2x_init_hw_port()
7620 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); in bnx2x_init_hw_port()
7644 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use in bnx2x_init_hw_port()
7645 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF in bnx2x_init_hw_port()
7646 * bits 4-7 are used for "per vn group attention" */ in bnx2x_init_hw_port()
7666 /* Bit-map indicating which L2 hdrs may appear after the in bnx2x_init_hw_port()
7694 switch (bp->mf_mode) { in bnx2x_init_hw_port()
7725 bp->flags |= PTP_SUPPORTED; in bnx2x_init_hw_port()
7768 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7772 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", in bnx2x_igu_clear_sb_gen()
7778 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) in bnx2x_igu_clear_sb_gen()
7782 DP(NETIF_MSG_HW, in bnx2x_igu_clear_sb_gen()
7803 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); in bnx2x_init_searcher()
7819 func_params.f_obj = &bp->func_obj; in bnx2x_func_switch_update()
7824 &switch_update_params->changes); in bnx2x_func_switch_update()
7827 &switch_update_params->changes); in bnx2x_func_switch_update()
7840 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7841 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_reset_nic_mode()
7870 BNX2X_ERR("Can't suspend tx-switching!\n"); in bnx2x_reset_nic_mode()
7878 if (bp->mf_mode == SINGLE_FUNCTION) { in bnx2x_reset_nic_mode()
7879 bnx2x_set_rx_filter(&bp->link_params, 1); in bnx2x_reset_nic_mode()
7898 BNX2X_ERR("Can't resume tx-switching!\n"); in bnx2x_reset_nic_mode()
7902 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_reset_nic_mode()
7926 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7928 * the addresses of the transaction, resulting in was-error bit set in the pci
7929 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7951 DP(NETIF_MSG_HW, "starting func init func %d\n", func); in bnx2x_init_hw_func()
7953 /* FLR cleanup - hmmm */ in bnx2x_init_hw_func()
7963 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
7974 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7983 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; in bnx2x_init_hw_func()
7985 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; in bnx2x_init_hw_func()
7986 ilt->lines[cdu_ilt_start + i].page_mapping = in bnx2x_init_hw_func()
7987 bp->context[i].cxt_mapping; in bnx2x_init_hw_func()
7988 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; in bnx2x_init_hw_func()
7996 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); in bnx2x_init_hw_func()
8000 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); in bnx2x_init_hw_func()
8009 if (!(bp->flags & USING_MSIX_FLAG)) in bnx2x_init_hw_func()
8019 * Master enable - Due to WB DMAE writes performed before this in bnx2x_init_hw_func()
8020 * register is re-initialized as part of the regular function in bnx2x_init_hw_func()
8028 bp->dmae_ready = 1; in bnx2x_init_hw_func()
8088 bp->mf_ov); in bnx2x_init_hw_func()
8095 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_init_hw_func()
8120 * E2 mode: address 0-135 match to the mapping memory; in bnx2x_init_hw_func()
8121 * 136 - PF0 default prod; 137 - PF1 default prod; in bnx2x_init_hw_func()
8122 * 138 - PF2 default prod; 139 - PF3 default prod; in bnx2x_init_hw_func()
8123 * 140 - PF0 attn prod; 141 - PF1 attn prod; in bnx2x_init_hw_func()
8124 * 142 - PF2 attn prod; 143 - PF3 attn prod; in bnx2x_init_hw_func()
8125 * 144-147 reserved. in bnx2x_init_hw_func()
8127 * E1.5 mode - In backward compatible mode; in bnx2x_init_hw_func()
8131 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 in bnx2x_init_hw_func()
8134 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; in bnx2x_init_hw_func()
8135 * 132-135 C prods; 136-139 X prods; 140-143 T prods; in bnx2x_init_hw_func()
8136 * 144-147 attn prods; in bnx2x_init_hw_func()
8138 /* non-default-status-blocks */ in bnx2x_init_hw_func()
8141 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { in bnx2x_init_hw_func()
8142 prod_offset = (bp->igu_base_sb + sb_idx) * in bnx2x_init_hw_func()
8151 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, in bnx2x_init_hw_func()
8154 bp->igu_base_sb + sb_idx); in bnx2x_init_hw_func()
8157 /* default-status-blocks */ in bnx2x_init_hw_func()
8171 * igu prods come in chunks of E1HVN_MAX (4) - in bnx2x_init_hw_func()
8182 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8184 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8186 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8188 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8190 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8193 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8195 bnx2x_ack_sb(bp, bp->igu_dsb_id, in bnx2x_init_hw_func()
8198 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); in bnx2x_init_hw_func()
8201 rf-tool supports split-68 const */ in bnx2x_init_hw_func()
8224 DP(NETIF_MSG_HW, in bnx2x_init_hw_func()
8228 /* Clear "false" parity errors in MSI-X table */ in bnx2x_init_hw_func()
8252 bnx2x_phy_probe(&bp->link_params); in bnx2x_init_hw_func()
8262 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8265 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, in bnx2x_free_mem_cnic()
8268 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem_cnic()
8275 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_mem()
8276 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_mem()
8281 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, in bnx2x_free_mem()
8284 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, in bnx2x_free_mem()
8288 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, in bnx2x_free_mem()
8289 bp->context[i].size); in bnx2x_free_mem()
8292 BNX2X_FREE(bp->ilt->lines); in bnx2x_free_mem()
8294 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_free_mem()
8296 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, in bnx2x_free_mem()
8299 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); in bnx2x_free_mem()
8308 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8310 if (!bp->cnic_sb.e2_sb) in bnx2x_alloc_mem_cnic()
8313 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, in bnx2x_alloc_mem_cnic()
8315 if (!bp->cnic_sb.e1x_sb) in bnx2x_alloc_mem_cnic()
8319 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem_cnic()
8321 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem_cnic()
8322 if (!bp->t2) in bnx2x_alloc_mem_cnic()
8327 bp->cnic_eth_dev.addr_drv_info_to_mcp = in bnx2x_alloc_mem_cnic()
8328 &bp->slowpath->drv_info_to_mcp; in bnx2x_alloc_mem_cnic()
8338 return -ENOMEM; in bnx2x_alloc_mem_cnic()
8345 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { in bnx2x_alloc_mem()
8347 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); in bnx2x_alloc_mem()
8348 if (!bp->t2) in bnx2x_alloc_mem()
8352 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, in bnx2x_alloc_mem()
8354 if (!bp->def_status_blk) in bnx2x_alloc_mem()
8357 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, in bnx2x_alloc_mem()
8359 if (!bp->slowpath) in bnx2x_alloc_mem()
8365 * 1. There are multiple entities allocating memory for context - in bnx2x_alloc_mem()
8368 * 2. Since CDU page-size is not a single 4KB page (which is the case in bnx2x_alloc_mem()
8370 * allocation of sub-page-size in the last entry. in bnx2x_alloc_mem()
8378 bp->context[i].size = min(CDU_ILT_PAGE_SZ, in bnx2x_alloc_mem()
8379 (context_size - allocated)); in bnx2x_alloc_mem()
8380 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, in bnx2x_alloc_mem()
8381 bp->context[i].size); in bnx2x_alloc_mem()
8382 if (!bp->context[i].vcxt) in bnx2x_alloc_mem()
8384 allocated += bp->context[i].size; in bnx2x_alloc_mem()
8386 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), in bnx2x_alloc_mem()
8388 if (!bp->ilt->lines) in bnx2x_alloc_mem()
8398 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); in bnx2x_alloc_mem()
8399 if (!bp->spq) in bnx2x_alloc_mem()
8403 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, in bnx2x_alloc_mem()
8405 if (!bp->eq_ring) in bnx2x_alloc_mem()
8413 return -ENOMEM; in bnx2x_alloc_mem()
8448 if (rc == -EEXIST) { in bnx2x_set_mac_one()
8449 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_mac_one()
8484 if (rc == -EEXIST) { in bnx2x_set_vlan_one()
8486 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); in bnx2x_set_vlan_one()
8500 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_clear_vlan_info()
8501 vlan->hw = false; in bnx2x_clear_vlan_info()
8503 bp->vlan_cnt = 0; in bnx2x_clear_vlan_info()
8508 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; in bnx2x_del_all_vlans()
8514 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags); in bnx2x_del_all_vlans()
8537 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); in bnx2x_del_all_macs()
8549 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); in bnx2x_set_eth_mac()
8551 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8552 &bp->sp_objs->mac_obj, set, in bnx2x_set_eth_mac()
8555 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, in bnx2x_set_eth_mac()
8556 bp->fp->index, set); in bnx2x_set_eth_mac()
8563 return bnx2x_setup_queue(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8565 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); in bnx2x_setup_leading()
8569 * bnx2x_set_int_mode - configure interrupt mode
8573 * In case of MSI-X it will also try to enable MSI-X.
8581 return -EINVAL; in bnx2x_set_int_mode()
8597 /* failed to enable multiple MSI-X */ in bnx2x_set_int_mode()
8598 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", in bnx2x_set_int_mode()
8599 bp->num_queues, in bnx2x_set_int_mode()
8600 1 + bp->num_cnic_queues); in bnx2x_set_int_mode()
8608 bp->num_ethernet_queues = 1; in bnx2x_set_int_mode()
8609 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_int_mode()
8614 return -EINVAL; in bnx2x_set_int_mode()
8633 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); in bnx2x_ilt_set_info()
8634 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); in bnx2x_ilt_set_info()
8637 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; in bnx2x_ilt_set_info()
8638 ilt_client->client_num = ILT_CLIENT_CDU; in bnx2x_ilt_set_info()
8639 ilt_client->page_size = CDU_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8640 ilt_client->flags = ILT_CLIENT_SKIP_MEM; in bnx2x_ilt_set_info()
8641 ilt_client->start = line; in bnx2x_ilt_set_info()
8646 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8648 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", in bnx2x_ilt_set_info()
8649 ilt_client->start, in bnx2x_ilt_set_info()
8650 ilt_client->end, in bnx2x_ilt_set_info()
8651 ilt_client->page_size, in bnx2x_ilt_set_info()
8652 ilt_client->flags, in bnx2x_ilt_set_info()
8653 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8656 if (QM_INIT(bp->qm_cid_count)) { in bnx2x_ilt_set_info()
8657 ilt_client = &ilt->clients[ILT_CLIENT_QM]; in bnx2x_ilt_set_info()
8658 ilt_client->client_num = ILT_CLIENT_QM; in bnx2x_ilt_set_info()
8659 ilt_client->page_size = QM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8660 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8661 ilt_client->start = line; in bnx2x_ilt_set_info()
8664 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, in bnx2x_ilt_set_info()
8667 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8669 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8671 ilt_client->start, in bnx2x_ilt_set_info()
8672 ilt_client->end, in bnx2x_ilt_set_info()
8673 ilt_client->page_size, in bnx2x_ilt_set_info()
8674 ilt_client->flags, in bnx2x_ilt_set_info()
8675 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8680 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; in bnx2x_ilt_set_info()
8681 ilt_client->client_num = ILT_CLIENT_SRC; in bnx2x_ilt_set_info()
8682 ilt_client->page_size = SRC_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8683 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8684 ilt_client->start = line; in bnx2x_ilt_set_info()
8686 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8688 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8690 ilt_client->start, in bnx2x_ilt_set_info()
8691 ilt_client->end, in bnx2x_ilt_set_info()
8692 ilt_client->page_size, in bnx2x_ilt_set_info()
8693 ilt_client->flags, in bnx2x_ilt_set_info()
8694 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8697 ilt_client = &ilt->clients[ILT_CLIENT_TM]; in bnx2x_ilt_set_info()
8698 ilt_client->client_num = ILT_CLIENT_TM; in bnx2x_ilt_set_info()
8699 ilt_client->page_size = TM_ILT_PAGE_SZ; in bnx2x_ilt_set_info()
8700 ilt_client->flags = 0; in bnx2x_ilt_set_info()
8701 ilt_client->start = line; in bnx2x_ilt_set_info()
8703 ilt_client->end = line - 1; in bnx2x_ilt_set_info()
8705 DP(NETIF_MSG_IFUP, in bnx2x_ilt_set_info()
8707 ilt_client->start, in bnx2x_ilt_set_info()
8708 ilt_client->end, in bnx2x_ilt_set_info()
8709 ilt_client->page_size, in bnx2x_ilt_set_info()
8710 ilt_client->flags, in bnx2x_ilt_set_info()
8711 ilog2(ilt_client->page_size >> 12)); in bnx2x_ilt_set_info()
8718 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8725 * - HC configuration
8726 * - Queue's CDU context
8736 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8737 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8742 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); in bnx2x_pf_q_prep_init()
8743 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); in bnx2x_pf_q_prep_init()
8746 init_params->rx.hc_rate = bp->rx_ticks ? in bnx2x_pf_q_prep_init()
8747 (1000000 / bp->rx_ticks) : 0; in bnx2x_pf_q_prep_init()
8748 init_params->tx.hc_rate = bp->tx_ticks ? in bnx2x_pf_q_prep_init()
8749 (1000000 / bp->tx_ticks) : 0; in bnx2x_pf_q_prep_init()
8752 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = in bnx2x_pf_q_prep_init()
8753 fp->fw_sb_id; in bnx2x_pf_q_prep_init()
8759 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; in bnx2x_pf_q_prep_init()
8760 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; in bnx2x_pf_q_prep_init()
8764 init_params->max_cos = fp->max_cos; in bnx2x_pf_q_prep_init()
8766 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", in bnx2x_pf_q_prep_init()
8767 fp->index, init_params->max_cos); in bnx2x_pf_q_prep_init()
8770 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { in bnx2x_pf_q_prep_init()
8771 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; in bnx2x_pf_q_prep_init()
8772 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * in bnx2x_pf_q_prep_init()
8774 init_params->cxts[cos] = in bnx2x_pf_q_prep_init()
8775 &bp->context[cxt_index].vcxt[cxt_offset].eth; in bnx2x_pf_q_prep_init()
8787 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; in bnx2x_setup_tx_only()
8789 /* Set tx-only QUEUE flags: don't zero statistics */ in bnx2x_setup_tx_only()
8790 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); in bnx2x_setup_tx_only()
8793 tx_only_params->cid_index = tx_index; in bnx2x_setup_tx_only()
8796 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); in bnx2x_setup_tx_only()
8799 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); in bnx2x_setup_tx_only()
8801 DP(NETIF_MSG_IFUP, in bnx2x_setup_tx_only()
8802 …"preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp… in bnx2x_setup_tx_only()
8803 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], in bnx2x_setup_tx_only()
8804 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, in bnx2x_setup_tx_only()
8805 tx_only_params->gen_params.spcl_id, tx_only_params->flags); in bnx2x_setup_tx_only()
8812 * bnx2x_setup_queue - setup queue
8819 * actually: 1) RESET->INIT 2) INIT->SETUP
8833 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); in bnx2x_setup_queue()
8837 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, in bnx2x_setup_queue()
8853 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); in bnx2x_setup_queue()
8857 DP(NETIF_MSG_IFUP, "init complete\n"); in bnx2x_setup_queue()
8863 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); in bnx2x_setup_queue()
8866 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, in bnx2x_setup_queue()
8869 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, in bnx2x_setup_queue()
8870 &setup_params->rxq_params); in bnx2x_setup_queue()
8872 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, in bnx2x_setup_queue()
8879 bp->fcoe_init = true; in bnx2x_setup_queue()
8884 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); in bnx2x_setup_queue()
8888 /* loop through the relevant tx-only indices */ in bnx2x_setup_queue()
8890 tx_index < fp->max_cos; in bnx2x_setup_queue()
8893 /* prepare and send tx-only ramrod*/ in bnx2x_setup_queue()
8898 fp->index, tx_index); in bnx2x_setup_queue()
8908 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_stop_queue()
8913 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); in bnx2x_stop_queue()
8919 /* close tx-only connections */ in bnx2x_stop_queue()
8921 tx_index < fp->max_cos; in bnx2x_stop_queue()
8925 txdata = fp->txdata_ptr[tx_index]; in bnx2x_stop_queue()
8927 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", in bnx2x_stop_queue()
8928 txdata->txq_index); in bnx2x_stop_queue()
8930 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8940 /* send halt terminate on tx-only connection */ in bnx2x_stop_queue()
8986 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_reset_func()
8988 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), in bnx2x_reset_func()
9008 if (bp->common.int_block == INT_BLOCK_HC) { in bnx2x_reset_func()
9032 /* Timers workaround bug for E2: if this is vnic-3, in bnx2x_reset_func()
9040 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; in bnx2x_reset_func()
9050 bp->dmae_ready = 0; in bnx2x_reset_func()
9076 DP(NETIF_MSG_IFDOWN, in bnx2x_reset_port()
9089 func_params.f_obj = &bp->func_obj; in bnx2x_reset_hw()
9104 func_params.f_obj = &bp->func_obj; in bnx2x_func_stop()
9128 * bnx2x_send_unload_req - request unload mode from the MCP.
9144 else if (bp->flags & NO_WOL_FLAG) in bnx2x_send_unload_req()
9147 else if (bp->wol) { in bnx2x_send_unload_req()
9149 u8 *mac_addr = bp->dev->dev_addr; in bnx2x_send_unload_req()
9150 struct pci_dev *pdev = bp->pdev; in bnx2x_send_unload_req()
9154 /* The mac address is written to entries 1-4 to in bnx2x_send_unload_req()
9167 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); in bnx2x_send_unload_req()
9169 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); in bnx2x_send_unload_req()
9182 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9185 bnx2x_load_count[path][0]--; in bnx2x_send_unload_req()
9186 bnx2x_load_count[path][1 + port]--; in bnx2x_send_unload_req()
9187 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", in bnx2x_send_unload_req()
9202 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9219 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; in bnx2x_func_wait_started()
9221 if (!bp->port.pmf) in bnx2x_func_wait_started()
9228 * 2. Sync SP queue - this guarantees us that attention handling started in bnx2x_func_wait_started()
9232 * pending bit of transaction from STARTED-->TX_STOPPED, if we already in bnx2x_func_wait_started()
9234 * State will return to STARTED after completion of TX_STOPPED-->STARTED in bnx2x_func_wait_started()
9240 synchronize_irq(bp->msix_table[0].vector); in bnx2x_func_wait_started()
9242 synchronize_irq(bp->pdev->irq); in bnx2x_func_wait_started()
9247 while (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9248 BNX2X_F_STATE_STARTED && tout--) in bnx2x_func_wait_started()
9251 if (bnx2x_func_get_state(bp, &bp->func_obj) != in bnx2x_func_wait_started()
9255 return -EBUSY; in bnx2x_func_wait_started()
9263 DP(NETIF_MSG_IFDOWN, in bnx2x_func_wait_started()
9264 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); in bnx2x_func_wait_started()
9266 func_params.f_obj = &bp->func_obj; in bnx2x_func_wait_started()
9270 /* STARTED-->TX_ST0PPED */ in bnx2x_func_wait_started()
9274 /* TX_ST0PPED-->STARTED */ in bnx2x_func_wait_started()
9306 /* Called during unload, to stop PTP-related stuff */
9312 cancel_work_sync(&bp->ptp_task); in bnx2x_stop_ptp()
9314 if (bp->ptp_tx_skb) { in bnx2x_stop_ptp()
9315 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_stop_ptp()
9316 bp->ptp_tx_skb = NULL; in bnx2x_stop_ptp()
9322 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); in bnx2x_stop_ptp()
9335 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_chip_cleanup()
9338 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_chip_cleanup()
9349 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, in bnx2x_chip_cleanup()
9355 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, in bnx2x_chip_cleanup()
9380 netif_addr_lock_bh(bp->dev); in bnx2x_chip_cleanup()
9382 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_chip_cleanup()
9383 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_chip_cleanup()
9384 else if (bp->slowpath) in bnx2x_chip_cleanup()
9388 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_chip_cleanup()
9393 netif_addr_unlock_bh(bp->dev); in bnx2x_chip_cleanup()
9437 /* If SP settings didn't get completed so far - something in bnx2x_chip_cleanup()
9459 if (bp->flags & PTP_SUPPORTED) { in bnx2x_chip_cleanup()
9461 if (bp->ptp_clock) { in bnx2x_chip_cleanup()
9462 ptp_clock_unregister(bp->ptp_clock); in bnx2x_chip_cleanup()
9463 bp->ptp_clock = NULL; in bnx2x_chip_cleanup()
9482 if (!pci_channel_offline(bp->pdev)) { in bnx2x_chip_cleanup()
9496 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); in bnx2x_disable_close_the_gate()
9549 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", in bnx2x_set_234_gates()
9564 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9578 * bnx2x_reset_mcp_prep - prepare for MCP reset.
9590 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); in bnx2x_reset_mcp_prep()
9610 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9625 * initializes bp->common.shmem_base and waits for validity signature to appear
9633 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); in bnx2x_init_shmem()
9638 if (bp->common.shmem_base == 0xFFFFFFFF) { in bnx2x_init_shmem()
9639 bp->flags |= NO_MCP_FLAG; in bnx2x_init_shmem()
9640 return -ENODEV; in bnx2x_init_shmem()
9643 if (bp->common.shmem_base) { in bnx2x_init_shmem()
9655 return -ENODEV; in bnx2x_init_shmem()
9679 * - PCIE core
9680 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9682 * - IGU
9683 * - MISC (including AEU)
9684 * - GRC
9685 * - RBCN, RBCP
9730 * - all xxMACs are handled by the bnx2x_link code. in bnx2x_process_kill_chip_reset()
9783 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9803 } while (cnt-- > 0); in bnx2x_er_poll_igu_vq()
9808 return -EBUSY; in bnx2x_er_poll_igu_vq()
9838 } while (cnt-- > 0); in bnx2x_process_kill()
9845 return -EAGAIN; in bnx2x_process_kill()
9855 return -EAGAIN; in bnx2x_process_kill()
9863 /* Wait for 1ms to empty GLUE and PCI-E core queues, in bnx2x_process_kill()
9888 return -EAGAIN; in bnx2x_process_kill()
9896 * reset state, re-enable attentions. */ in bnx2x_process_kill()
9907 /* if not going to reset MCP - load "fake" driver to reset HW while in bnx2x_leader_reset()
9915 rc = -EAGAIN; in bnx2x_leader_reset()
9921 rc = -EAGAIN; in bnx2x_leader_reset()
9927 rc = -EAGAIN; in bnx2x_leader_reset()
9936 rc = -EAGAIN; in bnx2x_leader_reset()
9955 bp->is_leader = 0; in bnx2x_leader_reset()
9963 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); in bnx2x_recovery_failed()
9966 netif_device_detach(bp->dev); in bnx2x_recovery_failed()
9977 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_recovery_failed()
9985 * will never be called when netif_running(bp->dev) is false.
9994 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { in bnx2x_parity_recover()
9998 vf->state = VF_LOST; in bnx2x_parity_recover()
10001 DP(NETIF_MSG_HW, "Handling parity\n"); in bnx2x_parity_recover()
10003 switch (bp->recovery_state) { in bnx2x_parity_recover()
10005 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); in bnx2x_parity_recover()
10021 bp->is_leader = 1; in bnx2x_parity_recover()
10025 /* If interface has been removed - break */ in bnx2x_parity_recover()
10029 bp->recovery_state = BNX2X_RECOVERY_WAIT; in bnx2x_parity_recover()
10039 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); in bnx2x_parity_recover()
10040 if (bp->is_leader) { in bnx2x_parity_recover()
10061 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10065 /* If all other functions got down - in bnx2x_parity_recover()
10078 * to continue as a none-leader. in bnx2x_parity_recover()
10082 } else { /* non-leader */ in bnx2x_parity_recover()
10094 bp->is_leader = 1; in bnx2x_parity_recover()
10098 schedule_delayed_work(&bp->sp_rtnl_task, in bnx2x_parity_recover()
10109 &bp->sp_rtnl_task, in bnx2x_parity_recover()
10115 bp->eth_stats.recoverable_error; in bnx2x_parity_recover()
10117 bp->eth_stats.unrecoverable_error; in bnx2x_parity_recover()
10118 bp->recovery_state = in bnx2x_parity_recover()
10122 netdev_err(bp->dev, in bnx2x_parity_recover()
10125 netif_device_detach(bp->dev); in bnx2x_parity_recover()
10131 bp->recovery_state = in bnx2x_parity_recover()
10136 bp->eth_stats.recoverable_error = in bnx2x_parity_recover()
10138 bp->eth_stats.unrecoverable_error = in bnx2x_parity_recover()
10163 func_params.f_obj = &bp->func_obj; in bnx2x_udp_port_update()
10168 &switch_update_params->changes); in bnx2x_udp_port_update()
10170 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { in bnx2x_udp_port_update()
10171 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; in bnx2x_udp_port_update()
10172 switch_update_params->geneve_dst_port = geneve_port; in bnx2x_udp_port_update()
10175 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { in bnx2x_udp_port_update()
10176 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; in bnx2x_udp_port_update()
10177 switch_update_params->vxlan_dst_port = vxlan_port; in bnx2x_udp_port_update()
10180 /* Re-enable inner-rss for the offloaded UDP tunnels */ in bnx2x_udp_port_update()
10182 &switch_update_params->changes); in bnx2x_udp_port_update()
10189 DP(BNX2X_MSG_SP, in bnx2x_udp_port_update()
10202 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); in bnx2x_udp_tunnel_sync()
10228 if (!netif_running(bp->dev)) { in bnx2x_sp_rtnl_task()
10233 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { in bnx2x_sp_rtnl_task()
10243 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10252 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10263 bp->sp_rtnl_state = 0; in bnx2x_sp_rtnl_task()
10267 bp->link_vars.link_up = 0; in bnx2x_sp_rtnl_task()
10268 bp->force_link_down = true; in bnx2x_sp_rtnl_task()
10269 netif_carrier_off(bp->dev); in bnx2x_sp_rtnl_task()
10270 BNX2X_ERR("Indicating link is down due to Tx-timeout\n"); in bnx2x_sp_rtnl_task()
10277 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { in bnx2x_sp_rtnl_task()
10288 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10289 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); in bnx2x_sp_rtnl_task()
10290 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10297 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10298 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); in bnx2x_sp_rtnl_task()
10299 netif_device_detach(bp->dev); in bnx2x_sp_rtnl_task()
10300 bnx2x_close(bp->dev); in bnx2x_sp_rtnl_task()
10305 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10306 DP(BNX2X_MSG_SP, in bnx2x_sp_rtnl_task()
10307 "sending set mcast vf pf channel message from rtnl sp-task\n"); in bnx2x_sp_rtnl_task()
10308 bnx2x_vfpf_set_mcast(bp->dev); in bnx2x_sp_rtnl_task()
10311 &bp->sp_rtnl_state)){ in bnx2x_sp_rtnl_task()
10312 if (netif_carrier_ok(bp->dev)) { in bnx2x_sp_rtnl_task()
10318 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10319 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); in bnx2x_sp_rtnl_task()
10324 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10327 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10333 &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10336 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) in bnx2x_sp_rtnl_task()
10339 /* work which needs rtnl lock not-taken (as it takes the lock itself and in bnx2x_sp_rtnl_task()
10344 /* enable SR-IOV if applicable */ in bnx2x_sp_rtnl_task()
10346 &bp->sp_rtnl_state)) { in bnx2x_sp_rtnl_task()
10356 if (!netif_running(bp->dev)) in bnx2x_period_task()
10367 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and in bnx2x_period_task()
10371 if (bp->port.pmf) { in bnx2x_period_task()
10372 bnx2x_period_func(&bp->link_params, &bp->link_vars); in bnx2x_period_task()
10374 /* Re-queue task in 1 sec */ in bnx2x_period_task()
10375 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); in bnx2x_period_task()
10390 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; in bnx2x_get_pretend_reg()
10406 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; in bnx2x_prev_unload_close_umac()
10407 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); in bnx2x_prev_unload_close_umac()
10408 REG_WR(bp, vals->umac_addr[port], 0); in bnx2x_prev_unload_close_umac()
10444 vals->bmac_addr = base_addr + offset; in bnx2x_prev_unload_close_mac()
10445 vals->bmac_val[0] = wb_data[0]; in bnx2x_prev_unload_close_mac()
10446 vals->bmac_val[1] = wb_data[1]; in bnx2x_prev_unload_close_mac()
10448 REG_WR(bp, vals->bmac_addr, wb_data[0]); in bnx2x_prev_unload_close_mac()
10449 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); in bnx2x_prev_unload_close_mac()
10452 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; in bnx2x_prev_unload_close_mac()
10453 vals->emac_val = REG_RD(bp, vals->emac_addr); in bnx2x_prev_unload_close_mac()
10454 REG_WR(bp, vals->emac_addr, 0); in bnx2x_prev_unload_close_mac()
10465 vals->xmac_addr = base_addr + XMAC_REG_CTRL; in bnx2x_prev_unload_close_mac()
10466 vals->xmac_val = REG_RD(bp, vals->xmac_addr); in bnx2x_prev_unload_close_mac()
10467 REG_WR(bp, vals->xmac_addr, 0); in bnx2x_prev_unload_close_mac()
10494 /* UNDI marks its presence in DORQ - in bnx2x_prev_is_after_undi()
10517 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); in bnx2x_prev_unload_undi_inc()
10526 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", in bnx2x_prev_unload_undi_inc()
10536 return -EBUSY; in bnx2x_prev_mcp_done()
10548 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && in bnx2x_prev_path_get_entry()
10549 bp->pdev->bus->number == tmp_list->bus && in bnx2x_prev_path_get_entry()
10550 BP_PATH(bp) == tmp_list->path) in bnx2x_prev_path_get_entry()
10569 tmp_list->aer = 1; in bnx2x_prev_path_mark_eeh()
10591 if (tmp_list->aer) { in bnx2x_prev_is_path_marked()
10592 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", in bnx2x_prev_is_path_marked()
10614 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); in bnx2x_port_after_undi()
10635 if (!tmp_list->aer) { in bnx2x_prev_mark_path()
10636 BNX2X_ERR("Re-Marking the path.\n"); in bnx2x_prev_mark_path()
10638 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", in bnx2x_prev_mark_path()
10640 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10651 return -ENOMEM; in bnx2x_prev_mark_path()
10654 tmp_list->bus = bp->pdev->bus->number; in bnx2x_prev_mark_path()
10655 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); in bnx2x_prev_mark_path()
10656 tmp_list->path = BP_PATH(bp); in bnx2x_prev_mark_path()
10657 tmp_list->aer = 0; in bnx2x_prev_mark_path()
10658 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; in bnx2x_prev_mark_path()
10665 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", in bnx2x_prev_mark_path()
10667 list_add(&tmp_list->list, &bnx2x_prev_list); in bnx2x_prev_mark_path()
10676 struct pci_dev *dev = bp->pdev; in bnx2x_do_flr()
10680 return -EINVAL; in bnx2x_do_flr()
10684 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { in bnx2x_do_flr()
10686 bp->common.bc_ver); in bnx2x_do_flr()
10687 return -EINVAL; in bnx2x_do_flr()
10691 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); in bnx2x_do_flr()
10771 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10772 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10773 bnx2x_set_rx_filter(&bp->link_params, 0); in bnx2x_prev_unload_common()
10774 bp->link_params.port ^= 1; in bnx2x_prev_unload_common()
10803 timer_count--; in bnx2x_prev_unload_common()
10855 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); in bnx2x_prev_unload()
10881 rc = -EBUSY; in bnx2x_prev_unload()
10892 bnx2x_prev_path_get_entry(bp)->aer); in bnx2x_prev_unload()
10901 /* non-common reply from MCP might require looping */ in bnx2x_prev_unload()
10907 } while (--time_counter); in bnx2x_prev_unload()
10911 rc = -EPROBE_DEFER; in bnx2x_prev_unload()
10916 bp->link_params.feature_config_flags |= in bnx2x_prev_unload()
10930 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ in bnx2x_get_common_hwinfo()
10943 bp->common.chip_id = id; in bnx2x_get_common_hwinfo()
10948 bp->common.chip_id = (CHIP_NUM_57811 << 16) | in bnx2x_get_common_hwinfo()
10949 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10951 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | in bnx2x_get_common_hwinfo()
10952 (bp->common.chip_id & 0x0000FFFF); in bnx2x_get_common_hwinfo()
10953 bp->common.chip_id |= 0x1; in bnx2x_get_common_hwinfo()
10957 bp->db_size = (1 << BNX2X_DB_SHIFT); in bnx2x_get_common_hwinfo()
10967 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : in bnx2x_get_common_hwinfo()
10971 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ in bnx2x_get_common_hwinfo()
10973 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ in bnx2x_get_common_hwinfo()
10975 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ in bnx2x_get_common_hwinfo()
10976 bp->pfid = bp->pf_num; /* 0..7 */ in bnx2x_get_common_hwinfo()
10979 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); in bnx2x_get_common_hwinfo()
10981 bp->link_params.chip_id = bp->common.chip_id; in bnx2x_get_common_hwinfo()
10985 if ((bp->common.chip_id & 0x1) || in bnx2x_get_common_hwinfo()
10987 bp->flags |= ONE_PORT_FLAG; in bnx2x_get_common_hwinfo()
10992 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << in bnx2x_get_common_hwinfo()
10995 bp->common.flash_size, bp->common.flash_size); in bnx2x_get_common_hwinfo()
10999 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? in bnx2x_get_common_hwinfo()
11003 bp->link_params.shmem_base = bp->common.shmem_base; in bnx2x_get_common_hwinfo()
11004 bp->link_params.shmem2_base = bp->common.shmem2_base; in bnx2x_get_common_hwinfo()
11007 bp->link_params.lfa_base = in bnx2x_get_common_hwinfo()
11008 REG_RD(bp, bp->common.shmem2_base + in bnx2x_get_common_hwinfo()
11012 bp->link_params.lfa_base = 0; in bnx2x_get_common_hwinfo()
11014 bp->common.shmem_base, bp->common.shmem2_base); in bnx2x_get_common_hwinfo()
11016 if (!bp->common.shmem_base) { in bnx2x_get_common_hwinfo()
11018 bp->flags |= NO_MCP_FLAG; in bnx2x_get_common_hwinfo()
11022 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); in bnx2x_get_common_hwinfo()
11023 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); in bnx2x_get_common_hwinfo()
11025 bp->link_params.hw_led_mode = ((bp->common.hw_config & in bnx2x_get_common_hwinfo()
11029 bp->link_params.feature_config_flags = 0; in bnx2x_get_common_hwinfo()
11032 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11035 bp->link_params.feature_config_flags &= in bnx2x_get_common_hwinfo()
11039 bp->common.bc_ver = val; in bnx2x_get_common_hwinfo()
11047 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11051 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11054 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11057 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11061 bp->link_params.feature_config_flags |= in bnx2x_get_common_hwinfo()
11065 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? in bnx2x_get_common_hwinfo()
11068 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? in bnx2x_get_common_hwinfo()
11071 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? in bnx2x_get_common_hwinfo()
11074 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? in bnx2x_get_common_hwinfo()
11082 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; in bnx2x_get_common_hwinfo()
11085 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; in bnx2x_get_common_hwinfo()
11088 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; in bnx2x_get_common_hwinfo()
11091 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; in bnx2x_get_common_hwinfo()
11095 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); in bnx2x_get_common_hwinfo()
11096 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; in bnx2x_get_common_hwinfo()
11099 (bp->flags & NO_WOL_FLAG) ? "not " : ""); in bnx2x_get_common_hwinfo()
11106 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", in bnx2x_get_common_hwinfo()
11120 bp->igu_base_sb = 0xff; in bnx2x_get_igu_cam_info()
11123 igu_sb_cnt = bp->igu_sb_cnt; in bnx2x_get_igu_cam_info()
11124 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * in bnx2x_get_igu_cam_info()
11127 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + in bnx2x_get_igu_cam_info()
11133 /* IGU in normal mode - read CAM */ in bnx2x_get_igu_cam_info()
11145 bp->igu_dsb_id = igu_sb_id; in bnx2x_get_igu_cam_info()
11147 if (bp->igu_base_sb == 0xff) in bnx2x_get_igu_cam_info()
11148 bp->igu_base_sb = igu_sb_id; in bnx2x_get_igu_cam_info()
11161 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); in bnx2x_get_igu_cam_info()
11166 return -EINVAL; in bnx2x_get_igu_cam_info()
11177 bp->port.supported[0] = 0; in bnx2x_link_settings_supported()
11178 bp->port.supported[1] = 0; in bnx2x_link_settings_supported()
11179 switch (bp->link_params.num_phys) { in bnx2x_link_settings_supported()
11181 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; in bnx2x_link_settings_supported()
11185 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11189 if (bp->link_params.multi_phy_config & in bnx2x_link_settings_supported()
11191 bp->port.supported[1] = in bnx2x_link_settings_supported()
11192 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11193 bp->port.supported[0] = in bnx2x_link_settings_supported()
11194 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11196 bp->port.supported[0] = in bnx2x_link_settings_supported()
11197 bp->link_params.phy[EXT_PHY1].supported; in bnx2x_link_settings_supported()
11198 bp->port.supported[1] = in bnx2x_link_settings_supported()
11199 bp->link_params.phy[EXT_PHY2].supported; in bnx2x_link_settings_supported()
11205 if (!(bp->port.supported[0] || bp->port.supported[1])) { in bnx2x_link_settings_supported()
11206 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", in bnx2x_link_settings_supported()
11215 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); in bnx2x_link_settings_supported()
11219 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11223 bp->port.phy_addr = REG_RD( in bnx2x_link_settings_supported()
11228 bp->port.link_config[0]); in bnx2x_link_settings_supported()
11232 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); in bnx2x_link_settings_supported()
11235 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11237 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; in bnx2x_link_settings_supported()
11239 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11241 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; in bnx2x_link_settings_supported()
11243 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11245 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; in bnx2x_link_settings_supported()
11247 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11249 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; in bnx2x_link_settings_supported()
11251 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11253 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | in bnx2x_link_settings_supported()
11256 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11258 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; in bnx2x_link_settings_supported()
11260 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11262 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; in bnx2x_link_settings_supported()
11264 if (!(bp->link_params.speed_cap_mask[idx] & in bnx2x_link_settings_supported()
11266 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; in bnx2x_link_settings_supported()
11269 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], in bnx2x_link_settings_supported()
11270 bp->port.supported[1]); in bnx2x_link_settings_supported()
11276 bp->port.advertising[0] = 0; in bnx2x_link_settings_requested()
11277 bp->port.advertising[1] = 0; in bnx2x_link_settings_requested()
11278 switch (bp->link_params.num_phys) { in bnx2x_link_settings_requested()
11288 bp->link_params.req_duplex[idx] = DUPLEX_FULL; in bnx2x_link_settings_requested()
11289 link_config = bp->port.link_config[idx]; in bnx2x_link_settings_requested()
11292 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { in bnx2x_link_settings_requested()
11293 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11295 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11296 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11297 if (bp->link_params.phy[EXT_PHY1].type == in bnx2x_link_settings_requested()
11299 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11304 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11306 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11314 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { in bnx2x_link_settings_requested()
11315 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11317 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11323 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11329 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { in bnx2x_link_settings_requested()
11330 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11332 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11334 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11340 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11346 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11348 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11350 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11356 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11362 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11364 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11366 bp->link_params.req_duplex[idx] = in bnx2x_link_settings_requested()
11368 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11374 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11380 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11382 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11384 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11387 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11389 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11391 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11396 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11402 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11404 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11406 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11412 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11418 if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11420 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11422 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11425 } else if (bp->port.supported[idx] & in bnx2x_link_settings_requested()
11427 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11429 bp->port.advertising[idx] |= in bnx2x_link_settings_requested()
11435 bp->link_params.speed_cap_mask[idx]); in bnx2x_link_settings_requested()
11440 bp->link_params.req_line_speed[idx] = SPEED_20000; in bnx2x_link_settings_requested()
11446 bp->link_params.req_line_speed[idx] = in bnx2x_link_settings_requested()
11448 bp->port.advertising[idx] = in bnx2x_link_settings_requested()
11449 bp->port.supported[idx]; in bnx2x_link_settings_requested()
11453 bp->link_params.req_flow_ctrl[idx] = (link_config & in bnx2x_link_settings_requested()
11455 if (bp->link_params.req_flow_ctrl[idx] == in bnx2x_link_settings_requested()
11457 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) in bnx2x_link_settings_requested()
11458 bp->link_params.req_flow_ctrl[idx] = in bnx2x_link_settings_requested()
11465 bp->link_params.req_line_speed[idx], in bnx2x_link_settings_requested()
11466 bp->link_params.req_duplex[idx], in bnx2x_link_settings_requested()
11467 bp->link_params.req_flow_ctrl[idx], in bnx2x_link_settings_requested()
11468 bp->port.advertising[idx]); in bnx2x_link_settings_requested()
11486 bp->link_params.bp = bp; in bnx2x_get_port_hwinfo()
11487 bp->link_params.port = port; in bnx2x_get_port_hwinfo()
11489 bp->link_params.lane_config = in bnx2x_get_port_hwinfo()
11492 bp->link_params.speed_cap_mask[0] = in bnx2x_get_port_hwinfo()
11496 bp->link_params.speed_cap_mask[1] = in bnx2x_get_port_hwinfo()
11500 bp->port.link_config[0] = in bnx2x_get_port_hwinfo()
11503 bp->port.link_config[1] = in bnx2x_get_port_hwinfo()
11506 bp->link_params.multi_phy_config = in bnx2x_get_port_hwinfo()
11512 bp->wol = (!(bp->flags & NO_WOL_FLAG) && in bnx2x_get_port_hwinfo()
11517 bp->flags |= NO_ISCSI_FLAG; in bnx2x_get_port_hwinfo()
11520 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_port_hwinfo()
11523 bp->link_params.lane_config, in bnx2x_get_port_hwinfo()
11524 bp->link_params.speed_cap_mask[0], in bnx2x_get_port_hwinfo()
11525 bp->port.link_config[0]); in bnx2x_get_port_hwinfo()
11527 bp->link_params.switch_cfg = (bp->port.link_config[0] & in bnx2x_get_port_hwinfo()
11529 bnx2x_phy_probe(&bp->link_params); in bnx2x_get_port_hwinfo()
11530 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); in bnx2x_get_port_hwinfo()
11543 bp->mdio.prtad = bp->port.phy_addr; in bnx2x_get_port_hwinfo()
11547 bp->mdio.prtad = in bnx2x_get_port_hwinfo()
11556 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | in bnx2x_get_port_hwinfo()
11560 bp->link_params.eee_mode = 0; in bnx2x_get_port_hwinfo()
11572 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11577 bp->cnic_eth_dev.max_iscsi_conn = in bnx2x_get_iscsi_info()
11582 bp->cnic_eth_dev.max_iscsi_conn); in bnx2x_get_iscsi_info()
11585 * If maximum allowed number of connections is zero - in bnx2x_get_iscsi_info()
11588 if (!bp->cnic_eth_dev.max_iscsi_conn) in bnx2x_get_iscsi_info()
11589 bp->flags |= no_flags; in bnx2x_get_iscsi_info()
11595 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_ext_wwn_info()
11597 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_ext_wwn_info()
11601 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_ext_wwn_info()
11603 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_ext_wwn_info()
11658 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11663 bp->cnic_eth_dev.max_fcoe_conn = in bnx2x_get_fcoe_info()
11668 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; in bnx2x_get_fcoe_info()
11672 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; in bnx2x_get_fcoe_info()
11677 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = in bnx2x_get_fcoe_info()
11681 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = in bnx2x_get_fcoe_info()
11687 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = in bnx2x_get_fcoe_info()
11691 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = in bnx2x_get_fcoe_info()
11706 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); in bnx2x_get_fcoe_info()
11709 * If maximum allowed number of connections is zero - in bnx2x_get_fcoe_info()
11712 if (!bp->cnic_eth_dev.max_fcoe_conn) { in bnx2x_get_fcoe_info()
11713 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_fcoe_info()
11714 eth_zero_addr(bp->fip_mac); in bnx2x_get_fcoe_info()
11734 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; in bnx2x_get_cnic_mac_hwinfo()
11735 u8 *fip_mac = bp->fip_mac; in bnx2x_get_cnic_mac_hwinfo()
11754 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11766 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11769 bp->mf_ext_config = cfg; in bnx2x_get_cnic_mac_hwinfo()
11774 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11781 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11788 /* If this is a storage-only interface, use SAN mac as in bnx2x_get_cnic_mac_hwinfo()
11793 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); in bnx2x_get_cnic_mac_hwinfo()
11810 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11816 bp->flags |= NO_FCOE_FLAG; in bnx2x_get_cnic_mac_hwinfo()
11817 eth_zero_addr(bp->fip_mac); in bnx2x_get_cnic_mac_hwinfo()
11828 eth_zero_addr(bp->dev->dev_addr); in bnx2x_get_mac_hwinfo()
11832 eth_hw_addr_random(bp->dev); in bnx2x_get_mac_hwinfo()
11838 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); in bnx2x_get_mac_hwinfo()
11846 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); in bnx2x_get_mac_hwinfo()
11856 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); in bnx2x_get_mac_hwinfo()
11857 bp->flags |= HAS_PHYS_PORT_ID; in bnx2x_get_mac_hwinfo()
11860 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); in bnx2x_get_mac_hwinfo()
11862 if (!is_valid_ether_addr(bp->dev->dev_addr)) in bnx2x_get_mac_hwinfo()
11863 dev_err(&bp->pdev->dev, in bnx2x_get_mac_hwinfo()
11866 bp->dev->dev_addr); in bnx2x_get_mac_hwinfo()
11901 bp->mf_mode = MULTI_FUNCTION_SI; in validate_set_si_mode()
11902 bp->mf_config[BP_VN(bp)] = in validate_set_si_mode()
11917 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11919 return -EINVAL; in bnx2x_get_hwinfo()
11928 bp->common.int_block = INT_BLOCK_HC; in bnx2x_get_hwinfo()
11930 bp->igu_dsb_id = DEF_SB_IGU_ID; in bnx2x_get_hwinfo()
11931 bp->igu_base_sb = 0; in bnx2x_get_hwinfo()
11933 bp->common.int_block = INT_BLOCK_IGU; in bnx2x_get_hwinfo()
11950 tout--; in bnx2x_get_hwinfo()
11955 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
11959 return -EPERM; in bnx2x_get_hwinfo()
11965 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; in bnx2x_get_hwinfo()
11976 * set base FW non-default (fast path) status block id, this value is in bnx2x_get_hwinfo()
11981 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); in bnx2x_get_hwinfo()
11983 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of in bnx2x_get_hwinfo()
11987 bp->base_fw_ndsb = bp->igu_base_sb; in bnx2x_get_hwinfo()
11990 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, in bnx2x_get_hwinfo()
11991 bp->igu_sb_cnt, bp->base_fw_ndsb); in bnx2x_get_hwinfo()
11996 bp->mf_ov = 0; in bnx2x_get_hwinfo()
11997 bp->mf_mode = 0; in bnx2x_get_hwinfo()
11998 bp->mf_sub_mode = 0; in bnx2x_get_hwinfo()
12003 bp->common.shmem2_base, SHMEM2_RD(bp, size), in bnx2x_get_hwinfo()
12007 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); in bnx2x_get_hwinfo()
12009 bp->common.mf_cfg_base = bp->common.shmem_base + in bnx2x_get_hwinfo()
12016 * for Switch-Independent mode; in bnx2x_get_hwinfo()
12017 * OVLAN must be legal for Switch-Dependent mode in bnx2x_get_hwinfo()
12020 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12036 bp->mf_mode = MULTI_FUNCTION_AFEX; in bnx2x_get_hwinfo()
12037 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12050 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12051 bp->mf_config[vn] = MF_CFG_RD(bp, in bnx2x_get_hwinfo()
12057 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12058 bp->mf_sub_mode = SUB_MF_MODE_BD; in bnx2x_get_hwinfo()
12059 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12070 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", in bnx2x_get_hwinfo()
12077 bp->dev->mtu = mtu_size; in bnx2x_get_hwinfo()
12081 bp->mf_mode = MULTI_FUNCTION_SD; in bnx2x_get_hwinfo()
12082 bp->mf_sub_mode = SUB_MF_MODE_UFP; in bnx2x_get_hwinfo()
12083 bp->mf_config[vn] = in bnx2x_get_hwinfo()
12088 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12097 bp->mf_sub_mode = in bnx2x_get_hwinfo()
12102 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12109 bp->mf_config[vn] = 0; in bnx2x_get_hwinfo()
12117 switch (bp->mf_mode) { in bnx2x_get_hwinfo()
12122 bp->mf_ov = val; in bnx2x_get_hwinfo()
12123 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12126 func, bp->mf_ov, bp->mf_ov); in bnx2x_get_hwinfo()
12127 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || in bnx2x_get_hwinfo()
12128 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { in bnx2x_get_hwinfo()
12129 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12130 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", in bnx2x_get_hwinfo()
12132 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12134 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12137 return -EPERM; in bnx2x_get_hwinfo()
12144 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", in bnx2x_get_hwinfo()
12149 dev_err(&bp->pdev->dev, in bnx2x_get_hwinfo()
12152 return -EPERM; in bnx2x_get_hwinfo()
12163 !bp->path_has_ovlan && in bnx2x_get_hwinfo()
12165 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { in bnx2x_get_hwinfo()
12171 bp->path_has_ovlan = true; in bnx2x_get_hwinfo()
12177 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); in bnx2x_get_hwinfo()
12197 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); in bnx2x_read_fwinfo()
12199 vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len); in bnx2x_read_fwinfo()
12214 if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) { in bnx2x_read_fwinfo()
12215 memcpy(bp->fw_ver, &vpd_data[rodi], kw_len); in bnx2x_read_fwinfo()
12216 bp->fw_ver[kw_len] = ' '; in bnx2x_read_fwinfo()
12251 switch (bp->mf_mode) { in bnx2x_set_modes_bitmap()
12278 mutex_init(&bp->port.phy_mutex); in bnx2x_init_bp()
12279 mutex_init(&bp->fw_mb_mutex); in bnx2x_init_bp()
12280 mutex_init(&bp->drv_info_mutex); in bnx2x_init_bp()
12281 sema_init(&bp->stats_lock, 1); in bnx2x_init_bp()
12282 bp->drv_info_mng_owner = false; in bnx2x_init_bp()
12283 INIT_LIST_HEAD(&bp->vlan_reg); in bnx2x_init_bp()
12285 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); in bnx2x_init_bp()
12286 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); in bnx2x_init_bp()
12287 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); in bnx2x_init_bp()
12288 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); in bnx2x_init_bp()
12294 eth_zero_addr(bp->dev->dev_addr); in bnx2x_init_bp()
12310 bp->fw_seq = in bnx2x_init_bp()
12313 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_init_bp()
12323 dev_err(&bp->pdev->dev, "FPGA detected\n"); in bnx2x_init_bp()
12326 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); in bnx2x_init_bp()
12328 bp->disable_tpa = disable_tpa; in bnx2x_init_bp()
12329 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); in bnx2x_init_bp()
12331 bp->disable_tpa |= is_kdump_kernel(); in bnx2x_init_bp()
12334 if (bp->disable_tpa) { in bnx2x_init_bp()
12335 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12336 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); in bnx2x_init_bp()
12340 bp->dropless_fc = false; in bnx2x_init_bp()
12342 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); in bnx2x_init_bp()
12344 bp->mrrs = mrrs; in bnx2x_init_bp()
12346 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; in bnx2x_init_bp()
12348 bp->rx_ring_size = MAX_RX_AVAIL; in bnx2x_init_bp()
12351 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12352 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; in bnx2x_init_bp()
12354 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; in bnx2x_init_bp()
12356 timer_setup(&bp->timer, bnx2x_timer, 0); in bnx2x_init_bp()
12357 bp->timer.expires = jiffies + bp->current_interval; in bnx2x_init_bp()
12372 bp->cnic_base_cl_id = FP_SB_MAX_E1x; in bnx2x_init_bp()
12374 bp->cnic_base_cl_id = FP_SB_MAX_E2; in bnx2x_init_bp()
12378 bp->max_cos = 1; in bnx2x_init_bp()
12380 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; in bnx2x_init_bp()
12382 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; in bnx2x_init_bp()
12384 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; in bnx2x_init_bp()
12388 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); in bnx2x_init_bp()
12390 /* We need at least one default status block for slow-path events, in bnx2x_init_bp()
12395 bp->min_msix_vec_cnt = 1; in bnx2x_init_bp()
12397 bp->min_msix_vec_cnt = 3; in bnx2x_init_bp()
12399 bp->min_msix_vec_cnt = 2; in bnx2x_init_bp()
12400 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); in bnx2x_init_bp()
12402 bp->dump_preset_idx = 1; in bnx2x_init_bp()
12421 bp->stats_init = true; in bnx2x_open()
12460 netdev_info(bp->dev, in bnx2x_open()
12467 bp->recovery_state = BNX2X_RECOVERY_FAILED; in bnx2x_open()
12472 return -EAGAIN; in bnx2x_open()
12477 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_open()
12503 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12514 list_del(¤t_mcast_group->mcast_group_link); in bnx2x_free_mcast_macs_list()
12526 int mc_count = netdev_mc_count(bp->dev); in bnx2x_init_mcast_macs_list()
12529 INIT_LIST_HEAD(&p->mcast_list); in bnx2x_init_mcast_macs_list()
12530 netdev_for_each_mc_addr(ha, bp->dev) { in bnx2x_init_mcast_macs_list()
12538 return -ENOMEM; in bnx2x_init_mcast_macs_list()
12540 list_add(¤t_mcast_group->mcast_group_link, in bnx2x_init_mcast_macs_list()
12543 mc_mac = ¤t_mcast_group->mcast_elems[offset]; in bnx2x_init_mcast_macs_list()
12544 mc_mac->mac = bnx2x_mc_addr(ha); in bnx2x_init_mcast_macs_list()
12545 list_add_tail(&mc_mac->link, &p->mcast_list); in bnx2x_init_mcast_macs_list()
12550 p->mcast_list_len = mc_count; in bnx2x_init_mcast_macs_list()
12555 * bnx2x_set_uc_list - configure a new unicast MACs list.
12564 struct net_device *dev = bp->dev; in bnx2x_set_uc_list()
12566 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_set_uc_list()
12579 if (rc == -EEXIST) { in bnx2x_set_uc_list()
12580 DP(BNX2X_MSG_SP, in bnx2x_set_uc_list()
12602 struct net_device *dev = bp->dev; in bnx2x_set_mc_list_e1x()
12606 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list_e1x()
12638 struct net_device *dev = bp->dev; in bnx2x_set_mc_list()
12641 /* On older adapters, we need to flush and re-add filters */ in bnx2x_set_mc_list()
12645 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_set_mc_list()
12671 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12676 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_set_rx_mode()
12677 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); in bnx2x_set_rx_mode()
12690 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); in bnx2x_set_rx_mode_inner()
12692 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12694 if (bp->dev->flags & IFF_PROMISC) { in bnx2x_set_rx_mode_inner()
12696 } else if ((bp->dev->flags & IFF_ALLMULTI) || in bnx2x_set_rx_mode_inner()
12697 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && in bnx2x_set_rx_mode_inner()
12707 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12710 netif_addr_lock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12720 bp->rx_mode = rx_mode; in bnx2x_set_rx_mode_inner()
12723 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_set_rx_mode_inner()
12726 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { in bnx2x_set_rx_mode_inner()
12727 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); in bnx2x_set_rx_mode_inner()
12728 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12734 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12737 * the VF needs to release the bottom-half lock prior to the in bnx2x_set_rx_mode_inner()
12740 netif_addr_unlock_bh(bp->dev); in bnx2x_set_rx_mode_inner()
12753 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", in bnx2x_mdio_read()
12760 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); in bnx2x_mdio_read()
12762 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); in bnx2x_mdio_read()
12776 DP(NETIF_MSG_LINK, in bnx2x_mdio_write()
12784 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); in bnx2x_mdio_write()
12796 return -EAGAIN; in bnx2x_ioctl()
12802 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", in bnx2x_ioctl()
12803 mdio->phy_id, mdio->reg_num, mdio->val_in); in bnx2x_ioctl()
12804 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); in bnx2x_ioctl()
12816 if (!is_valid_ether_addr(dev->dev_addr)) { in bnx2x_validate_addr()
12817 BNX2X_ERR("Non-valid Ethernet address\n"); in bnx2x_validate_addr()
12818 return -EADDRNOTAVAIL; in bnx2x_validate_addr()
12828 if (!(bp->flags & HAS_PHYS_PORT_ID)) in bnx2x_get_phys_port_id()
12829 return -EOPNOTSUPP; in bnx2x_get_phys_port_id()
12831 ppid->id_len = sizeof(bp->phys_port_id); in bnx2x_get_phys_port_id()
12832 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); in bnx2x_get_phys_port_id()
12855 (skb_shinfo(skb)->gso_size > 9000) && in bnx2x_features_check()
12871 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, in __bnx2x_vlan_configure_vid()
12874 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); in __bnx2x_vlan_configure_vid()
12885 /* Configure all non-configured entries */ in bnx2x_vlan_configure_vid_list()
12886 list_for_each_entry(vlan, &bp->vlan_reg, link) { in bnx2x_vlan_configure_vid_list()
12887 if (vlan->hw) in bnx2x_vlan_configure_vid_list()
12890 if (bp->vlan_cnt >= bp->vlan_credit) in bnx2x_vlan_configure_vid_list()
12891 return -ENOBUFS; in bnx2x_vlan_configure_vid_list()
12893 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); in bnx2x_vlan_configure_vid_list()
12895 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12899 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); in bnx2x_vlan_configure_vid_list()
12900 vlan->hw = true; in bnx2x_vlan_configure_vid_list()
12901 bp->vlan_cnt++; in bnx2x_vlan_configure_vid_list()
12913 if (bp->accept_any_vlan != need_accept_any_vlan) { in bnx2x_vlan_configure()
12914 bp->accept_any_vlan = need_accept_any_vlan; in bnx2x_vlan_configure()
12915 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", in bnx2x_vlan_configure()
12916 bp->accept_any_vlan ? "raised" : "cleared"); in bnx2x_vlan_configure()
12939 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); in bnx2x_vlan_rx_add_vid()
12943 return -ENOMEM; in bnx2x_vlan_rx_add_vid()
12945 vlan->vid = vid; in bnx2x_vlan_rx_add_vid()
12946 vlan->hw = false; in bnx2x_vlan_rx_add_vid()
12947 list_add_tail(&vlan->link, &bp->vlan_reg); in bnx2x_vlan_rx_add_vid()
12962 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
12964 list_for_each_entry(vlan, &bp->vlan_reg, link) in bnx2x_vlan_rx_kill_vid()
12965 if (vlan->vid == vid) { in bnx2x_vlan_rx_kill_vid()
12971 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); in bnx2x_vlan_rx_kill_vid()
12972 return -EINVAL; in bnx2x_vlan_rx_kill_vid()
12975 if (netif_running(dev) && vlan->hw) { in bnx2x_vlan_rx_kill_vid()
12977 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); in bnx2x_vlan_rx_kill_vid()
12978 bp->vlan_cnt--; in bnx2x_vlan_rx_kill_vid()
12981 list_del(&vlan->link); in bnx2x_vlan_rx_kill_vid()
12987 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); in bnx2x_vlan_rx_kill_vid()
13025 struct device *dev = &bp->pdev->dev; in bnx2x_set_coherency_mask()
13030 return -EIO; in bnx2x_set_coherency_mask()
13038 if (bp->flags & AER_ENABLED) { in bnx2x_disable_pcie_error_reporting()
13039 pci_disable_pcie_error_reporting(bp->pdev); in bnx2x_disable_pcie_error_reporting()
13040 bp->flags &= ~AER_ENABLED; in bnx2x_disable_pcie_error_reporting()
13053 SET_NETDEV_DEV(dev, &pdev->dev); in bnx2x_init_dev()
13055 bp->dev = dev; in bnx2x_init_dev()
13056 bp->pdev = pdev; in bnx2x_init_dev()
13060 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13066 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13068 rc = -ENODEV; in bnx2x_init_dev()
13073 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); in bnx2x_init_dev()
13074 rc = -ENODEV; in bnx2x_init_dev()
13082 rc = -ENODEV; in bnx2x_init_dev()
13086 if (atomic_read(&pdev->enable_cnt) == 1) { in bnx2x_init_dev()
13089 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13099 if (!pdev->pm_cap) { in bnx2x_init_dev()
13100 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13102 rc = -EIO; in bnx2x_init_dev()
13108 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); in bnx2x_init_dev()
13109 rc = -EIO; in bnx2x_init_dev()
13117 dev->mem_start = pci_resource_start(pdev, 0); in bnx2x_init_dev()
13118 dev->base_addr = dev->mem_start; in bnx2x_init_dev()
13119 dev->mem_end = pci_resource_end(pdev, 0); in bnx2x_init_dev()
13121 dev->irq = pdev->irq; in bnx2x_init_dev()
13123 bp->regview = pci_ioremap_bar(pdev, 0); in bnx2x_init_dev()
13124 if (!bp->regview) { in bnx2x_init_dev()
13125 dev_err(&bp->pdev->dev, in bnx2x_init_dev()
13127 rc = -ENOMEM; in bnx2x_init_dev()
13137 bp->pf_num = PCI_FUNC(pdev->devfn); in bnx2x_init_dev()
13140 pci_read_config_dword(bp->pdev, in bnx2x_init_dev()
13142 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> in bnx2x_init_dev()
13145 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); in bnx2x_init_dev()
13148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, in bnx2x_init_dev()
13152 pdev->needs_freset = 1; in bnx2x_init_dev()
13157 bp->flags |= AER_ENABLED; in bnx2x_init_dev()
13178 /* Enable internal target-read (in case we are probed after PF in bnx2x_init_dev()
13187 dev->watchdog_timeo = TX_TIMEOUT; in bnx2x_init_dev()
13189 dev->netdev_ops = &bnx2x_netdev_ops; in bnx2x_init_dev()
13192 dev->priv_flags |= IFF_UNICAST_FLT; in bnx2x_init_dev()
13194 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13199 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13205 dev->hw_enc_features = in bnx2x_init_dev()
13213 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | in bnx2x_init_dev()
13217 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; in bnx2x_init_dev()
13220 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bnx2x_init_dev()
13225 bp->accept_any_vlan = true; in bnx2x_init_dev()
13227 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_dev()
13233 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; in bnx2x_init_dev()
13234 dev->features |= NETIF_F_HIGHDMA; in bnx2x_init_dev()
13235 if (dev->features & NETIF_F_LRO) in bnx2x_init_dev()
13236 dev->features &= ~NETIF_F_GRO_HW; in bnx2x_init_dev()
13239 dev->hw_features |= NETIF_F_LOOPBACK; in bnx2x_init_dev()
13242 dev->dcbnl_ops = &bnx2x_dcbnl_ops; in bnx2x_init_dev()
13245 /* MTU range, 46 - 9600 */ in bnx2x_init_dev()
13246 dev->min_mtu = ETH_MIN_PACKET_SIZE; in bnx2x_init_dev()
13247 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE; in bnx2x_init_dev()
13250 bp->mdio.prtad = MDIO_PRTAD_NONE; in bnx2x_init_dev()
13251 bp->mdio.mmds = 0; in bnx2x_init_dev()
13252 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; in bnx2x_init_dev()
13253 bp->mdio.dev = dev; in bnx2x_init_dev()
13254 bp->mdio.mdio_read = bnx2x_mdio_read; in bnx2x_init_dev()
13255 bp->mdio.mdio_write = bnx2x_mdio_write; in bnx2x_init_dev()
13260 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_dev()
13272 const struct firmware *firmware = bp->firmware; in bnx2x_check_firmware()
13280 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { in bnx2x_check_firmware()
13282 return -EINVAL; in bnx2x_check_firmware()
13285 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; in bnx2x_check_firmware()
13293 if (offset + len > firmware->size) { in bnx2x_check_firmware()
13295 return -EINVAL; in bnx2x_check_firmware()
13300 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); in bnx2x_check_firmware()
13301 ops_offsets = (__force __be16 *)(firmware->data + offset); in bnx2x_check_firmware()
13302 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); in bnx2x_check_firmware()
13304 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { in bnx2x_check_firmware()
13307 return -EINVAL; in bnx2x_check_firmware()
13312 offset = be32_to_cpu(fw_hdr->fw_version.offset); in bnx2x_check_firmware()
13313 fw_ver = firmware->data + offset; in bnx2x_check_firmware()
13324 return -EINVAL; in bnx2x_check_firmware()
13393 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13394 bp->arr = kmalloc(len, GFP_KERNEL); \
13395 if (!bp->arr) \
13397 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13398 (u8 *)bp->arr, len); \
13407 if (bp->firmware) in bnx2x_init_firmware()
13418 return -EINVAL; in bnx2x_init_firmware()
13422 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); in bnx2x_init_firmware()
13435 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; in bnx2x_init_firmware()
13439 rc = -ENOMEM; in bnx2x_init_firmware()
13450 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13451 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); in bnx2x_init_firmware()
13452 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13453 be32_to_cpu(fw_hdr->tsem_pram_data.offset); in bnx2x_init_firmware()
13454 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13455 be32_to_cpu(fw_hdr->usem_int_table_data.offset); in bnx2x_init_firmware()
13456 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13457 be32_to_cpu(fw_hdr->usem_pram_data.offset); in bnx2x_init_firmware()
13458 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13459 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); in bnx2x_init_firmware()
13460 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13461 be32_to_cpu(fw_hdr->xsem_pram_data.offset); in bnx2x_init_firmware()
13462 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13463 be32_to_cpu(fw_hdr->csem_int_table_data.offset); in bnx2x_init_firmware()
13464 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + in bnx2x_init_firmware()
13465 be32_to_cpu(fw_hdr->csem_pram_data.offset); in bnx2x_init_firmware()
13472 kfree(bp->init_ops_offsets); in bnx2x_init_firmware()
13474 kfree(bp->init_ops); in bnx2x_init_firmware()
13476 kfree(bp->init_data); in bnx2x_init_firmware()
13478 release_firmware(bp->firmware); in bnx2x_init_firmware()
13479 bp->firmware = NULL; in bnx2x_init_firmware()
13486 kfree(bp->init_ops_offsets); in bnx2x_release_firmware()
13487 kfree(bp->init_ops); in bnx2x_release_firmware()
13488 kfree(bp->init_data); in bnx2x_release_firmware()
13489 release_firmware(bp->firmware); in bnx2x_release_firmware()
13490 bp->firmware = NULL; in bnx2x_release_firmware()
13515 bnx2x_init_func_obj(bp, &bp->func_obj, in bnx2x__init_func_obj()
13523 /* must be called after sriov-enable */
13538 * bnx2x_get_num_non_def_sbs - return the number of none default SBs
13549 * If MSI-X is not supported - return number of SBs needed to support in bnx2x_get_num_non_def_sbs()
13552 if (!pdev->msix_cap) { in bnx2x_get_num_non_def_sbs()
13553 dev_info(&pdev->dev, "no msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13556 dev_info(&pdev->dev, "msix capability found\n"); in bnx2x_get_num_non_def_sbs()
13565 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); in bnx2x_get_num_non_def_sbs()
13602 return -ENODEV; in set_max_cos_est()
13642 func_params.f_obj = &bp->func_obj; in bnx2x_send_update_drift_ramrod()
13646 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; in bnx2x_send_update_drift_ramrod()
13647 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_update_drift_ramrod()
13648 set_timesync_params->add_sub_drift_adjust_value = in bnx2x_send_update_drift_ramrod()
13650 set_timesync_params->drift_adjust_value = best_val; in bnx2x_send_update_drift_ramrod()
13651 set_timesync_params->drift_adjust_period = best_period; in bnx2x_send_update_drift_ramrod()
13664 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); in bnx2x_ptp_adjfreq()
13666 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjfreq()
13667 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjfreq()
13669 return -ENETDOWN; in bnx2x_ptp_adjfreq()
13673 ppb = -ppb; in bnx2x_ptp_adjfreq()
13693 dif1 = ppb - (val * 1000000 / period1); in bnx2x_ptp_adjfreq()
13697 dif1 = -dif1; in bnx2x_ptp_adjfreq()
13698 dif2 = ppb - (val * 1000000 / period2); in bnx2x_ptp_adjfreq()
13700 dif2 = -dif2; in bnx2x_ptp_adjfreq()
13715 return -EFAULT; in bnx2x_ptp_adjfreq()
13718 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, in bnx2x_ptp_adjfreq()
13728 if (!netif_running(bp->dev)) { in bnx2x_ptp_adjtime()
13729 DP(BNX2X_MSG_PTP, in bnx2x_ptp_adjtime()
13731 return -ENETDOWN; in bnx2x_ptp_adjtime()
13734 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); in bnx2x_ptp_adjtime()
13736 timecounter_adjtime(&bp->timecounter, delta); in bnx2x_ptp_adjtime()
13746 if (!netif_running(bp->dev)) { in bnx2x_ptp_gettime()
13747 DP(BNX2X_MSG_PTP, in bnx2x_ptp_gettime()
13749 return -ENETDOWN; in bnx2x_ptp_gettime()
13752 ns = timecounter_read(&bp->timecounter); in bnx2x_ptp_gettime()
13754 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); in bnx2x_ptp_gettime()
13767 if (!netif_running(bp->dev)) { in bnx2x_ptp_settime()
13768 DP(BNX2X_MSG_PTP, in bnx2x_ptp_settime()
13770 return -ENETDOWN; in bnx2x_ptp_settime()
13775 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); in bnx2x_ptp_settime()
13777 /* Re-init the timecounter */ in bnx2x_ptp_settime()
13778 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); in bnx2x_ptp_settime()
13790 return -ENOTSUPP; in bnx2x_ptp_enable()
13796 bp->ptp_clock_info.owner = THIS_MODULE; in bnx2x_register_phc()
13797 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); in bnx2x_register_phc()
13798 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ in bnx2x_register_phc()
13799 bp->ptp_clock_info.n_alarm = 0; in bnx2x_register_phc()
13800 bp->ptp_clock_info.n_ext_ts = 0; in bnx2x_register_phc()
13801 bp->ptp_clock_info.n_per_out = 0; in bnx2x_register_phc()
13802 bp->ptp_clock_info.pps = 0; in bnx2x_register_phc()
13803 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; in bnx2x_register_phc()
13804 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; in bnx2x_register_phc()
13805 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; in bnx2x_register_phc()
13806 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; in bnx2x_register_phc()
13807 bp->ptp_clock_info.enable = bnx2x_ptp_enable; in bnx2x_register_phc()
13809 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); in bnx2x_register_phc()
13810 if (IS_ERR(bp->ptp_clock)) { in bnx2x_register_phc()
13811 bp->ptp_clock = NULL; in bnx2x_register_phc()
13828 * to forget previously living interfaces, allowing a proper re-load. in bnx2x_init_one()
13843 * initialization of bp->max_cos based on the chip versions AND chip in bnx2x_init_one()
13846 max_cos_est = set_max_cos_est(ent->driver_data); in bnx2x_init_one()
13849 is_vf = set_is_vf(ent->driver_data); in bnx2x_init_one()
13858 rss_count = max_non_def_sbs - cnic_cnt; in bnx2x_init_one()
13861 return -EINVAL; in bnx2x_init_one()
13874 return -ENOMEM; in bnx2x_init_one()
13878 bp->flags = 0; in bnx2x_init_one()
13880 bp->flags |= IS_VF_FLAG; in bnx2x_init_one()
13882 bp->igu_sb_cnt = max_non_def_sbs; in bnx2x_init_one()
13883 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; in bnx2x_init_one()
13884 bp->msg_enable = debug; in bnx2x_init_one()
13885 bp->cnic_support = cnic_cnt; in bnx2x_init_one()
13886 bp->cnic_probe = bnx2x_cnic_probe; in bnx2x_init_one()
13890 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); in bnx2x_init_one()
13907 /* Map doorbells here as we need the real value of bp->max_cos which in bnx2x_init_one()
13912 bp->doorbells = bnx2x_vf_doorbells(bp); in bnx2x_init_one()
13919 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13921 rc = -ENOMEM; in bnx2x_init_one()
13924 bp->doorbells = ioremap(pci_resource_start(pdev, 2), in bnx2x_init_one()
13927 if (!bp->doorbells) { in bnx2x_init_one()
13928 dev_err(&bp->pdev->dev, in bnx2x_init_one()
13930 rc = -ENOMEM; in bnx2x_init_one()
13941 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { in bnx2x_init_one()
13942 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
13943 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in bnx2x_init_one()
13954 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); in bnx2x_init_one()
13955 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); in bnx2x_init_one()
13959 bp->flags |= NO_FCOE_FLAG; in bnx2x_init_one()
13961 /* Set bp->num_queues for MSI-X mode*/ in bnx2x_init_one()
13964 /* Configure interrupt mode: try to enable MSI-X/MSI if in bnx2x_init_one()
13969 dev_err(&pdev->dev, "Cannot set interrupts\n"); in bnx2x_init_one()
13977 dev_err(&pdev->dev, "Cannot register net device\n"); in bnx2x_init_one()
13980 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); in bnx2x_init_one()
13985 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in bnx2x_init_one()
13989 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n", in bnx2x_init_one()
13990 board_info[ent->driver_data].name, in bnx2x_init_one()
13992 dev->base_addr, bp->pdev->irq, dev->dev_addr); in bnx2x_init_one()
13993 pcie_print_link_status(bp->pdev); in bnx2x_init_one()
14006 if (bp->regview) in bnx2x_init_one()
14007 iounmap(bp->regview); in bnx2x_init_one()
14009 if (IS_PF(bp) && bp->doorbells) in bnx2x_init_one()
14010 iounmap(bp->doorbells); in bnx2x_init_one()
14014 if (atomic_read(&pdev->enable_cnt) == 1) in bnx2x_init_one()
14030 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); in __bnx2x_remove()
14041 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) in __bnx2x_remove()
14044 /* Close the interface - either directly or implicitly */ in __bnx2x_remove()
14066 /* Disable MSI/MSI-X */ in __bnx2x_remove()
14074 cancel_delayed_work_sync(&bp->sp_rtnl_task); in __bnx2x_remove()
14082 pci_wake_from_d3(pdev, bp->wol); in __bnx2x_remove()
14088 if (bp->regview) in __bnx2x_remove()
14089 iounmap(bp->regview); in __bnx2x_remove()
14095 if (bp->doorbells) in __bnx2x_remove()
14096 iounmap(bp->doorbells); in __bnx2x_remove()
14106 if (atomic_read(&pdev->enable_cnt) == 1) in __bnx2x_remove()
14119 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); in bnx2x_remove_one()
14129 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_eeh_nic_unload()
14131 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_eeh_nic_unload()
14142 netdev_reset_tc(bp->dev); in bnx2x_eeh_nic_unload()
14144 del_timer_sync(&bp->timer); in bnx2x_eeh_nic_unload()
14145 cancel_delayed_work_sync(&bp->sp_task); in bnx2x_eeh_nic_unload()
14146 cancel_delayed_work_sync(&bp->period_task); in bnx2x_eeh_nic_unload()
14148 if (!down_timeout(&bp->stats_lock, HZ / 10)) { in bnx2x_eeh_nic_unload()
14149 bp->stats_state = STATS_STATE_DISABLED; in bnx2x_eeh_nic_unload()
14150 up(&bp->stats_lock); in bnx2x_eeh_nic_unload()
14155 netif_carrier_off(bp->dev); in bnx2x_eeh_nic_unload()
14161 * bnx2x_io_error_detected - called when PCI error is detected
14199 * bnx2x_io_slot_reset - called after the PCI bus has been reset
14202 * Restart the card from scratch, as if from a cold-boot.
14213 dev_err(&pdev->dev, in bnx2x_io_slot_reset()
14214 "Cannot re-enable PCI device after reset\n"); in bnx2x_io_slot_reset()
14227 BNX2X_ERR("IO slot reset --> driver unload\n"); in bnx2x_io_slot_reset()
14251 bp->sp_state = 0; in bnx2x_io_slot_reset()
14252 bp->port.pmf = 0; in bnx2x_io_slot_reset()
14262 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_io_slot_reset()
14266 bp->state = BNX2X_STATE_CLOSED; in bnx2x_io_slot_reset()
14275 * bnx2x_io_resume - called when traffic can start flowing again
14286 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_io_resume()
14287 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); in bnx2x_io_resume()
14293 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_io_resume()
14353 return -ENOMEM; in bnx2x_init()
14359 return -ENOMEM; in bnx2x_init()
14398 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14402 * Return 0 if success, -ENODEV if ramrod doesn't return.
14409 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, in bnx2x_set_iscsi_eth_mac_addr()
14410 &bp->iscsi_l2_mac_obj, true, in bnx2x_set_iscsi_eth_mac_addr()
14421 if (unlikely(bp->panic)) in bnx2x_cnic_sp_post()
14425 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14426 BUG_ON(bp->cnic_spq_pending < count); in bnx2x_cnic_sp_post()
14427 bp->cnic_spq_pending -= count; in bnx2x_cnic_sp_post()
14429 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { in bnx2x_cnic_sp_post()
14430 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) in bnx2x_cnic_sp_post()
14433 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) in bnx2x_cnic_sp_post()
14443 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - in bnx2x_cnic_sp_post()
14446 &bp->context[cxt_index]. in bnx2x_cnic_sp_post()
14459 if (!atomic_read(&bp->cq_spq_left)) in bnx2x_cnic_sp_post()
14462 atomic_dec(&bp->cq_spq_left); in bnx2x_cnic_sp_post()
14464 if (!atomic_read(&bp->eq_spq_left)) in bnx2x_cnic_sp_post()
14467 atomic_dec(&bp->eq_spq_left); in bnx2x_cnic_sp_post()
14470 if (bp->cnic_spq_pending >= in bnx2x_cnic_sp_post()
14471 bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_post()
14474 bp->cnic_spq_pending++; in bnx2x_cnic_sp_post()
14482 *spe = *bp->cnic_kwq_cons; in bnx2x_cnic_sp_post()
14484 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", in bnx2x_cnic_sp_post()
14485 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); in bnx2x_cnic_sp_post()
14487 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) in bnx2x_cnic_sp_post()
14488 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_cnic_sp_post()
14490 bp->cnic_kwq_cons++; in bnx2x_cnic_sp_post()
14493 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_post()
14503 if (unlikely(bp->panic)) { in bnx2x_cnic_sp_queue()
14505 return -EIO; in bnx2x_cnic_sp_queue()
14509 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && in bnx2x_cnic_sp_queue()
14510 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { in bnx2x_cnic_sp_queue()
14512 return -EAGAIN; in bnx2x_cnic_sp_queue()
14515 spin_lock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14520 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) in bnx2x_cnic_sp_queue()
14523 *bp->cnic_kwq_prod = *spe; in bnx2x_cnic_sp_queue()
14525 bp->cnic_kwq_pending++; in bnx2x_cnic_sp_queue()
14527 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", in bnx2x_cnic_sp_queue()
14528 spe->hdr.conn_and_cmd_data, spe->hdr.type, in bnx2x_cnic_sp_queue()
14529 spe->data.update_data_addr.hi, in bnx2x_cnic_sp_queue()
14530 spe->data.update_data_addr.lo, in bnx2x_cnic_sp_queue()
14531 bp->cnic_kwq_pending); in bnx2x_cnic_sp_queue()
14533 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) in bnx2x_cnic_sp_queue()
14534 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_cnic_sp_queue()
14536 bp->cnic_kwq_prod++; in bnx2x_cnic_sp_queue()
14539 spin_unlock_bh(&bp->spq_lock); in bnx2x_cnic_sp_queue()
14541 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) in bnx2x_cnic_sp_queue()
14552 mutex_lock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14553 c_ops = rcu_dereference_protected(bp->cnic_ops, in bnx2x_cnic_ctl_send()
14554 lockdep_is_held(&bp->cnic_mutex)); in bnx2x_cnic_ctl_send()
14556 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send()
14557 mutex_unlock(&bp->cnic_mutex); in bnx2x_cnic_ctl_send()
14568 c_ops = rcu_dereference(bp->cnic_ops); in bnx2x_cnic_ctl_send_bh()
14570 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); in bnx2x_cnic_ctl_send_bh()
14615 * multicasts (in non-promiscuous mode only one Queue per in bnx2x_set_iscsi_eth_rx_mode()
14625 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14630 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14632 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) in bnx2x_set_iscsi_eth_rx_mode()
14633 set_bit(sched_state, &bp->sp_state); in bnx2x_set_iscsi_eth_rx_mode()
14646 switch (ctl->cmd) { in bnx2x_drv_ctl()
14648 u32 index = ctl->data.io.offset; in bnx2x_drv_ctl()
14649 dma_addr_t addr = ctl->data.io.dma_addr; in bnx2x_drv_ctl()
14656 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14664 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_drv_ctl()
14668 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14669 cp->iscsi_l2_client_id, in bnx2x_drv_ctl()
14670 cp->iscsi_l2_cid, BP_FUNC(bp), in bnx2x_drv_ctl()
14674 &bp->sp_state, BNX2X_OBJ_TYPE_RX, in bnx2x_drv_ctl()
14675 &bp->macs_pool); in bnx2x_drv_ctl()
14719 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, in bnx2x_drv_ctl()
14724 int count = ctl->data.credit.credit_count; in bnx2x_drv_ctl()
14727 atomic_add(count, &bp->cq_spq_left); in bnx2x_drv_ctl()
14732 int ulp_type = ctl->data.register_data.ulp_type; in bnx2x_drv_ctl()
14752 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) in bnx2x_drv_ctl()
14755 /* if reached here - should write fcoe capabilities */ in bnx2x_drv_ctl()
14761 host_addr = (u32 *) &(ctl->data.register_data. in bnx2x_drv_ctl()
14773 int ulp_type = ctl->data.ulp_type; in bnx2x_drv_ctl()
14791 BNX2X_ERR("unknown command %x\n", ctl->cmd); in bnx2x_drv_ctl()
14792 rc = -EINVAL; in bnx2x_drv_ctl()
14795 /* For storage-only interfaces, change driver state */ in bnx2x_drv_ctl()
14797 switch (ctl->drv_state) { in bnx2x_drv_ctl()
14813 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state); in bnx2x_drv_ctl()
14826 int rc = -EINVAL; in bnx2x_get_fc_npiv()
14832 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14842 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); in bnx2x_get_fc_npiv()
14845 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); in bnx2x_get_fc_npiv()
14849 BNX2X_ERR("Failed to read FC-NPIV table\n"); in bnx2x_get_fc_npiv()
14856 entries = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14858 tbl->fc_npiv_cfg.num_of_npiv = entries; in bnx2x_get_fc_npiv()
14860 if (!tbl->fc_npiv_cfg.num_of_npiv) { in bnx2x_get_fc_npiv()
14861 DP(BNX2X_MSG_MCP, in bnx2x_get_fc_npiv()
14862 "No FC-NPIV table [valid, simply not present]\n"); in bnx2x_get_fc_npiv()
14864 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { in bnx2x_get_fc_npiv()
14865 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n", in bnx2x_get_fc_npiv()
14866 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14869 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n", in bnx2x_get_fc_npiv()
14870 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv()
14873 /* Copy the data into cnic-provided struct */ in bnx2x_get_fc_npiv()
14874 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv()
14875 for (i = 0; i < cnic_tbl->count; i++) { in bnx2x_get_fc_npiv()
14876 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8); in bnx2x_get_fc_npiv()
14877 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8); in bnx2x_get_fc_npiv()
14888 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_irq_info()
14890 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_cnic_irq_info()
14891 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14892 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14893 cp->irq_arr[0].vector = bp->msix_table[1].vector; in bnx2x_setup_cnic_irq_info()
14895 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; in bnx2x_setup_cnic_irq_info()
14896 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; in bnx2x_setup_cnic_irq_info()
14899 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; in bnx2x_setup_cnic_irq_info()
14901 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; in bnx2x_setup_cnic_irq_info()
14903 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14904 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); in bnx2x_setup_cnic_irq_info()
14905 cp->irq_arr[1].status_blk = bp->def_status_blk; in bnx2x_setup_cnic_irq_info()
14906 cp->irq_arr[1].status_blk_num = DEF_SB_ID; in bnx2x_setup_cnic_irq_info()
14907 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; in bnx2x_setup_cnic_irq_info()
14909 cp->num_irq = 2; in bnx2x_setup_cnic_irq_info()
14914 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_setup_cnic_info()
14916 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_setup_cnic_info()
14918 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_setup_cnic_info()
14919 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_setup_cnic_info()
14920 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_setup_cnic_info()
14922 …DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp… in bnx2x_setup_cnic_info()
14923 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, in bnx2x_setup_cnic_info()
14924 cp->iscsi_l2_cid); in bnx2x_setup_cnic_info()
14927 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_setup_cnic_info()
14934 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_register_cnic()
14937 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); in bnx2x_register_cnic()
14941 return -EINVAL; in bnx2x_register_cnic()
14946 return -EOPNOTSUPP; in bnx2x_register_cnic()
14952 BNX2X_ERR("CNIC-related load failed\n"); in bnx2x_register_cnic()
14957 bp->cnic_enabled = true; in bnx2x_register_cnic()
14959 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); in bnx2x_register_cnic()
14960 if (!bp->cnic_kwq) in bnx2x_register_cnic()
14961 return -ENOMEM; in bnx2x_register_cnic()
14963 bp->cnic_kwq_cons = bp->cnic_kwq; in bnx2x_register_cnic()
14964 bp->cnic_kwq_prod = bp->cnic_kwq; in bnx2x_register_cnic()
14965 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; in bnx2x_register_cnic()
14967 bp->cnic_spq_pending = 0; in bnx2x_register_cnic()
14968 bp->cnic_kwq_pending = 0; in bnx2x_register_cnic()
14970 bp->cnic_data = data; in bnx2x_register_cnic()
14972 cp->num_irq = 0; in bnx2x_register_cnic()
14973 cp->drv_state |= CNIC_DRV_STATE_REGD; in bnx2x_register_cnic()
14974 cp->iro_arr = bp->iro_arr; in bnx2x_register_cnic()
14978 rcu_assign_pointer(bp->cnic_ops, ops); in bnx2x_register_cnic()
14989 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_unregister_cnic()
14991 mutex_lock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
14992 cp->drv_state = 0; in bnx2x_unregister_cnic()
14993 RCU_INIT_POINTER(bp->cnic_ops, NULL); in bnx2x_unregister_cnic()
14994 mutex_unlock(&bp->cnic_mutex); in bnx2x_unregister_cnic()
14996 bp->cnic_enabled = false; in bnx2x_unregister_cnic()
14997 kfree(bp->cnic_kwq); in bnx2x_unregister_cnic()
14998 bp->cnic_kwq = NULL; in bnx2x_unregister_cnic()
15006 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_cnic_probe()
15008 /* If both iSCSI and FCoE are disabled - return NULL in in bnx2x_cnic_probe()
15015 cp->drv_owner = THIS_MODULE; in bnx2x_cnic_probe()
15016 cp->chip_id = CHIP_ID(bp); in bnx2x_cnic_probe()
15017 cp->pdev = bp->pdev; in bnx2x_cnic_probe()
15018 cp->io_base = bp->regview; in bnx2x_cnic_probe()
15019 cp->io_base2 = bp->doorbells; in bnx2x_cnic_probe()
15020 cp->max_kwqe_pending = 8; in bnx2x_cnic_probe()
15021 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; in bnx2x_cnic_probe()
15022 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + in bnx2x_cnic_probe()
15024 cp->ctx_tbl_len = CNIC_ILT_LINES; in bnx2x_cnic_probe()
15025 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; in bnx2x_cnic_probe()
15026 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; in bnx2x_cnic_probe()
15027 cp->drv_ctl = bnx2x_drv_ctl; in bnx2x_cnic_probe()
15028 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv; in bnx2x_cnic_probe()
15029 cp->drv_register_cnic = bnx2x_register_cnic; in bnx2x_cnic_probe()
15030 cp->drv_unregister_cnic = bnx2x_unregister_cnic; in bnx2x_cnic_probe()
15031 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); in bnx2x_cnic_probe()
15032 cp->iscsi_l2_client_id = in bnx2x_cnic_probe()
15034 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); in bnx2x_cnic_probe()
15037 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; in bnx2x_cnic_probe()
15040 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; in bnx2x_cnic_probe()
15043 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; in bnx2x_cnic_probe()
15047 cp->ctx_blk_size, in bnx2x_cnic_probe()
15048 cp->ctx_tbl_offset, in bnx2x_cnic_probe()
15049 cp->ctx_tbl_len, in bnx2x_cnic_probe()
15050 cp->starting_cid); in bnx2x_cnic_probe()
15056 struct bnx2x *bp = fp->bp; in bnx2x_rx_ustorm_prods_offset()
15062 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); in bnx2x_rx_ustorm_prods_offset()
15064 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); in bnx2x_rx_ustorm_prods_offset()
15071 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15079 return -1; in bnx2x_pretend_func()
15099 * still not complete, may indicate an error state - bail out then. in bnx2x_ptp_task()
15122 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_ptp_task()
15126 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); in bnx2x_ptp_task()
15128 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_ptp_task()
15131 DP(BNX2X_MSG_PTP, in bnx2x_ptp_task()
15134 bp->eth_stats.ptp_skip_tx_ts++; in bnx2x_ptp_task()
15137 dev_kfree_skb_any(bp->ptp_tx_skb); in bnx2x_ptp_task()
15138 bp->ptp_tx_skb = NULL; in bnx2x_ptp_task()
15156 ns = timecounter_cyc2time(&bp->timecounter, timestamp); in bnx2x_set_rx_ts()
15158 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); in bnx2x_set_rx_ts()
15160 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", in bnx2x_set_rx_ts()
15177 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); in bnx2x_cyclecounter_read()
15184 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); in bnx2x_init_cyclecounter()
15185 bp->cyclecounter.read = bnx2x_cyclecounter_read; in bnx2x_init_cyclecounter()
15186 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); in bnx2x_init_cyclecounter()
15187 bp->cyclecounter.shift = 0; in bnx2x_init_cyclecounter()
15188 bp->cyclecounter.mult = 1; in bnx2x_init_cyclecounter()
15201 func_params.f_obj = &bp->func_obj; in bnx2x_send_reset_timesync_ramrod()
15205 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; in bnx2x_send_reset_timesync_ramrod()
15206 set_timesync_params->offset_cmd = TS_OFFSET_KEEP; in bnx2x_send_reset_timesync_ramrod()
15227 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_enable_ptp_packets()
15262 if (!bp->hwtstamp_ioctl_called) in bnx2x_configure_ptp_filters()
15269 switch (bp->tx_type) { in bnx2x_configure_ptp_filters()
15271 bp->flags |= TX_TIMESTAMPING_EN; in bnx2x_configure_ptp_filters()
15277 BNX2X_ERR("One-step timestamping is not supported\n"); in bnx2x_configure_ptp_filters()
15278 return -ERANGE; in bnx2x_configure_ptp_filters()
15285 switch (bp->rx_filter) { in bnx2x_configure_ptp_filters()
15291 bp->rx_filter = HWTSTAMP_FILTER_NONE; in bnx2x_configure_ptp_filters()
15296 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; in bnx2x_configure_ptp_filters()
15304 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; in bnx2x_configure_ptp_filters()
15312 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; in bnx2x_configure_ptp_filters()
15321 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in bnx2x_configure_ptp_filters()
15345 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); in bnx2x_hwtstamp_ioctl()
15347 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in bnx2x_hwtstamp_ioctl()
15348 return -EFAULT; in bnx2x_hwtstamp_ioctl()
15350 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", in bnx2x_hwtstamp_ioctl()
15355 return -EINVAL; in bnx2x_hwtstamp_ioctl()
15358 bp->hwtstamp_ioctl_called = true; in bnx2x_hwtstamp_ioctl()
15359 bp->tx_type = config.tx_type; in bnx2x_hwtstamp_ioctl()
15360 bp->rx_filter = config.rx_filter; in bnx2x_hwtstamp_ioctl()
15366 config.rx_filter = bp->rx_filter; in bnx2x_hwtstamp_ioctl()
15368 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in bnx2x_hwtstamp_ioctl()
15369 -EFAULT : 0; in bnx2x_hwtstamp_ioctl()
15378 /* Reset PTP event detection rules - will be configured in the IOCTL */ in bnx2x_configure_ptp()
15388 /* Disable PTP packets to host - will be configured in the IOCTL*/ in bnx2x_configure_ptp()
15396 /* Enable the free-running counter */ in bnx2x_configure_ptp()
15405 return -EFAULT; in bnx2x_configure_ptp()
15417 /* Called during load, to initialize PTP-related stuff */
15430 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); in bnx2x_init_ptp()
15436 if (!bp->timecounter_init_done) { in bnx2x_init_ptp()
15438 timecounter_init(&bp->timecounter, &bp->cyclecounter, in bnx2x_init_ptp()
15440 bp->timecounter_init_done = true; in bnx2x_init_ptp()
15443 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); in bnx2x_init_ptp()