Lines Matching refs:qdev

107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)  in ql_sem_trylock()  argument
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); in ql_sem_trylock()
141 ql_write32(qdev, SEM, sem_bits | sem_mask); in ql_sem_trylock()
142 return !(ql_read32(qdev, SEM) & sem_bits); in ql_sem_trylock()
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_spinlock() argument
149 if (!ql_sem_trylock(qdev, sem_mask)) in ql_sem_spinlock()
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
158 ql_write32(qdev, SEM, sem_mask); in ql_sem_unlock()
159 ql_read32(qdev, SEM); /* flush */ in ql_sem_unlock()
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) in ql_wait_reg_rdy() argument
173 temp = ql_read32(qdev, reg); in ql_wait_reg_rdy()
177 netif_alert(qdev, probe, qdev->ndev, in ql_wait_reg_rdy()
186 netif_alert(qdev, probe, qdev->ndev, in ql_wait_reg_rdy()
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) in ql_wait_cfg() argument
200 temp = ql_read32(qdev, CFG); in ql_wait_cfg()
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, in ql_write_cfg() argument
228 map = pci_map_single(qdev->pdev, ptr, size, direction); in ql_write_cfg()
229 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_write_cfg()
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); in ql_write_cfg()
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK); in ql_write_cfg()
238 status = ql_wait_cfg(qdev, bit); in ql_write_cfg()
240 netif_err(qdev, ifup, qdev->ndev, in ql_write_cfg()
245 ql_write32(qdev, ICB_L, (u32) map); in ql_write_cfg()
246 ql_write32(qdev, ICB_H, (u32) (map >> 32)); in ql_write_cfg()
250 ql_write32(qdev, CFG, (mask | value)); in ql_write_cfg()
255 status = ql_wait_cfg(qdev, bit); in ql_write_cfg()
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ in ql_write_cfg()
258 pci_unmap_single(qdev->pdev, map, size, direction); in ql_write_cfg()
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, in ql_get_mac_addr_reg() argument
274 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
282 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
288 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
296 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
303 ql_wait_reg_rdy(qdev, in ql_get_mac_addr_reg()
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_get_mac_addr_reg()
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, in ql_get_mac_addr_reg()
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA); in ql_get_mac_addr_reg()
322 netif_crit(qdev, ifup, qdev->ndev, in ql_get_mac_addr_reg()
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, in ql_set_mac_addr_reg() argument
347 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | in ql_set_mac_addr_reg()
354 ql_write32(qdev, MAC_ADDR_DATA, lower); in ql_set_mac_addr_reg()
356 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | in ql_set_mac_addr_reg()
364 ql_write32(qdev, MAC_ADDR_DATA, upper); in ql_set_mac_addr_reg()
366 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
380 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_set_mac_addr_reg()
387 ql_write32(qdev, MAC_ADDR_DATA, lower); in ql_set_mac_addr_reg()
389 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ in ql_set_mac_addr_reg()
396 ql_write32(qdev, MAC_ADDR_DATA, upper); in ql_set_mac_addr_reg()
398 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ in ql_set_mac_addr_reg()
410 (qdev-> in ql_set_mac_addr_reg()
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in ql_set_mac_addr_reg()
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output); in ql_set_mac_addr_reg()
428 ql_wait_reg_rdy(qdev, in ql_set_mac_addr_reg()
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ in ql_set_mac_addr_reg()
440 netif_crit(qdev, ifup, qdev->ndev, in ql_set_mac_addr_reg()
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set) in ql_set_mac_addr() argument
459 addr = &qdev->current_mac_addr[0]; in ql_set_mac_addr()
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_set_mac_addr()
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_set_mac_addr()
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in ql_set_mac_addr()
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr, in ql_set_mac_addr()
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); in ql_set_mac_addr()
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in ql_set_mac_addr()
475 netif_err(qdev, ifup, qdev->ndev, in ql_set_mac_addr()
480 void ql_link_on(struct ql_adapter *qdev) in ql_link_on() argument
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n"); in ql_link_on()
483 netif_carrier_on(qdev->ndev); in ql_link_on()
484 ql_set_mac_addr(qdev, 1); in ql_link_on()
487 void ql_link_off(struct ql_adapter *qdev) in ql_link_off() argument
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n"); in ql_link_off()
490 netif_carrier_off(qdev->ndev); in ql_link_off()
491 ql_set_mac_addr(qdev, 0); in ql_link_off()
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) in ql_get_routing_reg() argument
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); in ql_get_routing_reg()
505 ql_write32(qdev, RT_IDX, in ql_get_routing_reg()
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); in ql_get_routing_reg()
510 *value = ql_read32(qdev, RT_DATA); in ql_get_routing_reg()
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, in ql_set_routing_reg() argument
600 netif_err(qdev, ifup, qdev->ndev, in ql_set_routing_reg()
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); in ql_set_routing_reg()
611 ql_write32(qdev, RT_IDX, value); in ql_set_routing_reg()
612 ql_write32(qdev, RT_DATA, enable ? mask : 0); in ql_set_routing_reg()
618 static void ql_enable_interrupts(struct ql_adapter *qdev) in ql_enable_interrupts() argument
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); in ql_enable_interrupts()
623 static void ql_disable_interrupts(struct ql_adapter *qdev) in ql_disable_interrupts() argument
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); in ql_disable_interrupts()
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) in ql_enable_completion_interrupt() argument
638 struct intr_context *ctx = qdev->intr_context + intr; in ql_enable_completion_interrupt()
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { in ql_enable_completion_interrupt()
644 ql_write32(qdev, INTR_EN, in ql_enable_completion_interrupt()
646 var = ql_read32(qdev, STS); in ql_enable_completion_interrupt()
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_enable_completion_interrupt()
652 ql_write32(qdev, INTR_EN, in ql_enable_completion_interrupt()
654 var = ql_read32(qdev, STS); in ql_enable_completion_interrupt()
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_enable_completion_interrupt()
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) in ql_disable_completion_interrupt() argument
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) in ql_disable_completion_interrupt()
671 ctx = qdev->intr_context + intr; in ql_disable_completion_interrupt()
672 spin_lock(&qdev->hw_lock); in ql_disable_completion_interrupt()
674 ql_write32(qdev, INTR_EN, in ql_disable_completion_interrupt()
676 var = ql_read32(qdev, STS); in ql_disable_completion_interrupt()
679 spin_unlock(&qdev->hw_lock); in ql_disable_completion_interrupt()
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) in ql_enable_all_completion_interrupts() argument
686 for (i = 0; i < qdev->intr_count; i++) { in ql_enable_all_completion_interrupts()
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || in ql_enable_all_completion_interrupts()
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1); in ql_enable_all_completion_interrupts()
694 ql_enable_completion_interrupt(qdev, i); in ql_enable_all_completion_interrupts()
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) in ql_validate_flash() argument
703 __le16 *flash = (__le16 *)&qdev->flash; in ql_validate_flash()
705 status = strncmp((char *)&qdev->flash, str, 4); in ql_validate_flash()
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); in ql_validate_flash()
715 netif_err(qdev, ifup, qdev->ndev, in ql_validate_flash()
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) in ql_read_flash_word() argument
725 status = ql_wait_reg_rdy(qdev, in ql_read_flash_word()
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); in ql_read_flash_word()
732 status = ql_wait_reg_rdy(qdev, in ql_read_flash_word()
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); in ql_read_flash_word()
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev) in ql_get_8000_flash_params() argument
749 __le32 *p = (__le32 *)&qdev->flash; in ql_get_8000_flash_params()
756 if (!qdev->port) in ql_get_8000_flash_params()
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) in ql_get_8000_flash_params()
766 status = ql_read_flash_word(qdev, i+offset, p); in ql_get_8000_flash_params()
768 netif_err(qdev, ifup, qdev->ndev, in ql_get_8000_flash_params()
774 status = ql_validate_flash(qdev, in ql_get_8000_flash_params()
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); in ql_get_8000_flash_params()
786 if (qdev->flash.flash_params_8000.data_type1 == 2) in ql_get_8000_flash_params()
788 qdev->flash.flash_params_8000.mac_addr1, in ql_get_8000_flash_params()
789 qdev->ndev->addr_len); in ql_get_8000_flash_params()
792 qdev->flash.flash_params_8000.mac_addr, in ql_get_8000_flash_params()
793 qdev->ndev->addr_len); in ql_get_8000_flash_params()
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); in ql_get_8000_flash_params()
801 memcpy(qdev->ndev->dev_addr, in ql_get_8000_flash_params()
803 qdev->ndev->addr_len); in ql_get_8000_flash_params()
806 ql_sem_unlock(qdev, SEM_FLASH_MASK); in ql_get_8000_flash_params()
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev) in ql_get_8012_flash_params() argument
814 __le32 *p = (__le32 *)&qdev->flash; in ql_get_8012_flash_params()
821 if (qdev->port) in ql_get_8012_flash_params()
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) in ql_get_8012_flash_params()
828 status = ql_read_flash_word(qdev, i+offset, p); in ql_get_8012_flash_params()
830 netif_err(qdev, ifup, qdev->ndev, in ql_get_8012_flash_params()
837 status = ql_validate_flash(qdev, in ql_get_8012_flash_params()
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); in ql_get_8012_flash_params()
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { in ql_get_8012_flash_params()
851 memcpy(qdev->ndev->dev_addr, in ql_get_8012_flash_params()
852 qdev->flash.flash_params_8012.mac_addr, in ql_get_8012_flash_params()
853 qdev->ndev->addr_len); in ql_get_8012_flash_params()
856 ql_sem_unlock(qdev, SEM_FLASH_MASK); in ql_get_8012_flash_params()
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) in ql_write_xgmac_reg() argument
868 status = ql_wait_reg_rdy(qdev, in ql_write_xgmac_reg()
873 ql_write32(qdev, XGMAC_DATA, data); in ql_write_xgmac_reg()
875 ql_write32(qdev, XGMAC_ADDR, reg); in ql_write_xgmac_reg()
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) in ql_read_xgmac_reg() argument
887 status = ql_wait_reg_rdy(qdev, in ql_read_xgmac_reg()
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); in ql_read_xgmac_reg()
894 status = ql_wait_reg_rdy(qdev, in ql_read_xgmac_reg()
899 *data = ql_read32(qdev, XGMAC_DATA); in ql_read_xgmac_reg()
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) in ql_read_xgmac_reg64() argument
911 status = ql_read_xgmac_reg(qdev, reg, &lo); in ql_read_xgmac_reg64()
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi); in ql_read_xgmac_reg64()
925 static int ql_8000_port_initialize(struct ql_adapter *qdev) in ql_8000_port_initialize() argument
932 status = ql_mb_about_fw(qdev); in ql_8000_port_initialize()
935 status = ql_mb_get_fw_state(qdev); in ql_8000_port_initialize()
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); in ql_8000_port_initialize()
950 static int ql_8012_port_initialize(struct ql_adapter *qdev) in ql_8012_port_initialize() argument
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { in ql_8012_port_initialize()
959 netif_info(qdev, link, qdev->ndev, in ql_8012_port_initialize()
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); in ql_8012_port_initialize()
963 netif_crit(qdev, link, qdev->ndev, in ql_8012_port_initialize()
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); in ql_8012_port_initialize()
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); in ql_8012_port_initialize()
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); in ql_8012_port_initialize()
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); in ql_8012_port_initialize()
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data); in ql_8012_port_initialize()
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data); in ql_8012_port_initialize()
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data); in ql_8012_port_initialize()
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data); in ql_8012_port_initialize()
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); in ql_8012_port_initialize()
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); in ql_8012_port_initialize()
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); in ql_8012_port_initialize()
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask); in ql_8012_port_initialize()
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) in ql_lbq_block_size() argument
1027 return PAGE_SIZE << qdev->lbq_buf_order; in ql_lbq_block_size()
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, in ql_get_curr_lchunk() argument
1046 pci_dma_sync_single_for_cpu(qdev->pdev, in ql_get_curr_lchunk()
1055 == ql_lbq_block_size(qdev)) in ql_get_curr_lchunk()
1056 pci_unmap_page(qdev->pdev, in ql_get_curr_lchunk()
1058 ql_lbq_block_size(qdev), in ql_get_curr_lchunk()
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, in ql_get_next_chunk() argument
1096 qdev->lbq_buf_order); in ql_get_next_chunk()
1098 netif_err(qdev, drv, qdev->ndev, in ql_get_next_chunk()
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, in ql_get_next_chunk()
1104 0, ql_lbq_block_size(qdev), in ql_get_next_chunk()
1106 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_get_next_chunk()
1108 qdev->lbq_buf_order); in ql_get_next_chunk()
1110 netif_err(qdev, drv, qdev->ndev, in ql_get_next_chunk()
1127 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { in ql_get_next_chunk()
1138 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_lbq() argument
1148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_lbq()
1152 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1154 netif_err(qdev, ifup, qdev->ndev, in ql_update_lbq()
1167 pci_dma_sync_single_for_device(qdev->pdev, map, in ql_update_lbq()
1183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_lbq()
1192 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_sbq() argument
1203 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_sbq()
1207 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_update_sbq()
1208 qdev->ndev, in ql_update_sbq()
1212 netdev_alloc_skb(qdev->ndev, in ql_update_sbq()
1219 map = pci_map_single(qdev->pdev, in ql_update_sbq()
1223 if (pci_dma_mapping_error(qdev->pdev, map)) { in ql_update_sbq()
1224 netif_err(qdev, ifup, qdev->ndev, in ql_update_sbq()
1249 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_update_sbq()
1257 static void ql_update_buffer_queues(struct ql_adapter *qdev, in ql_update_buffer_queues() argument
1260 ql_update_sbq(qdev, rx_ring); in ql_update_buffer_queues()
1261 ql_update_lbq(qdev, rx_ring); in ql_update_buffer_queues()
1267 static void ql_unmap_send(struct ql_adapter *qdev, in ql_unmap_send() argument
1283 netif_printk(qdev, tx_done, KERN_DEBUG, in ql_unmap_send()
1284 qdev->ndev, in ql_unmap_send()
1287 pci_unmap_single(qdev->pdev, in ql_unmap_send()
1294 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, in ql_unmap_send()
1296 pci_unmap_page(qdev->pdev, in ql_unmap_send()
1309 static int ql_map_send(struct ql_adapter *qdev, in ql_map_send() argument
1320 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in ql_map_send()
1326 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); in ql_map_send()
1328 err = pci_dma_mapping_error(qdev->pdev, map); in ql_map_send()
1330 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1372 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, in ql_map_send()
1375 err = pci_dma_mapping_error(qdev->pdev, map); in ql_map_send()
1377 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1400 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), in ql_map_send()
1403 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_map_send()
1405 netif_err(qdev, tx_queued, qdev->ndev, in ql_map_send()
1431 ql_unmap_send(qdev, tx_ring_desc, map_idx); in ql_map_send()
1436 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, in ql_categorize_rx_err() argument
1439 struct nic_stats *stats = &qdev->nic_stats; in ql_categorize_rx_err()
1471 static void ql_update_mac_hdr_len(struct ql_adapter *qdev, in ql_update_mac_hdr_len() argument
1477 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in ql_update_mac_hdr_len()
1491 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, in ql_process_mac_rx_gro_page() argument
1498 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page()
1503 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_gro_page()
1507 napi->dev = qdev->ndev; in ql_process_mac_rx_gro_page()
1511 netif_err(qdev, drv, qdev->ndev, in ql_process_mac_rx_gro_page()
1538 static void ql_process_mac_rx_page(struct ql_adapter *qdev, in ql_process_mac_rx_page() argument
1544 struct net_device *ndev = qdev->ndev; in ql_process_mac_rx_page()
1547 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page()
1563 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_page()
1568 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); in ql_process_mac_rx_page()
1574 netif_err(qdev, drv, qdev->ndev, in ql_process_mac_rx_page()
1580 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_page()
1599 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_page()
1610 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_process_mac_rx_page()
1611 qdev->ndev, in ql_process_mac_rx_page()
1631 static void ql_process_mac_rx_skb(struct ql_adapter *qdev, in ql_process_mac_rx_skb() argument
1637 struct net_device *ndev = qdev->ndev; in ql_process_mac_rx_skb()
1644 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); in ql_process_mac_rx_skb()
1651 pci_dma_sync_single_for_cpu(qdev->pdev, in ql_process_mac_rx_skb()
1658 pci_dma_sync_single_for_device(qdev->pdev, in ql_process_mac_rx_skb()
1666 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_skb()
1672 if (test_bit(QL_SELFTEST, &qdev->flags)) { in ql_process_mac_rx_skb()
1673 ql_check_lb_frame(qdev, skb); in ql_process_mac_rx_skb()
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1699 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_rx_skb()
1724 netif_printk(qdev, rx_status, KERN_DEBUG, in ql_process_mac_rx_skb()
1725 qdev->ndev, in ql_process_mac_rx_skb()
1758 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, in ql_build_rx_skb() argument
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1780 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1794 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1812 pci_dma_sync_single_for_cpu(qdev->pdev, in ql_build_rx_skb()
1819 pci_dma_sync_single_for_device(qdev->pdev, in ql_build_rx_skb()
1828 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1835 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1845 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1854 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1869 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1870 skb = netdev_alloc_skb(qdev->ndev, length); in ql_build_rx_skb()
1872 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1876 pci_unmap_page(qdev->pdev, in ql_build_rx_skb()
1882 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1892 ql_update_mac_hdr_len(qdev, ib_mac_rsp, in ql_build_rx_skb()
1911 pci_unmap_single(qdev->pdev, in ql_build_rx_skb()
1925 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1933 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_build_rx_skb()
1950 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
1958 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, in ql_process_mac_split_rx_intr() argument
1963 struct net_device *ndev = qdev->ndev; in ql_process_mac_split_rx_intr()
1968 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); in ql_process_mac_split_rx_intr()
1970 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
1978 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_split_rx_intr()
1993 if (test_bit(QL_SELFTEST, &qdev->flags)) { in ql_process_mac_split_rx_intr()
1994 ql_check_lb_frame(qdev, skb); in ql_process_mac_split_rx_intr()
2001 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", in ql_process_mac_split_rx_intr()
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2025 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2035 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_process_mac_split_rx_intr()
2053 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, in ql_process_mac_rx_intr() argument
2059 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? in ql_process_mac_rx_intr()
2069 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2076 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2084 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2090 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2096 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2104 static void ql_process_mac_tx_intr(struct ql_adapter *qdev, in ql_process_mac_tx_intr() argument
2111 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; in ql_process_mac_tx_intr()
2113 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); in ql_process_mac_tx_intr()
2124 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2128 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2132 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2136 netif_warn(qdev, tx_done, qdev->ndev, in ql_process_mac_tx_intr()
2144 void ql_queue_fw_error(struct ql_adapter *qdev) in ql_queue_fw_error() argument
2146 ql_link_off(qdev); in ql_queue_fw_error()
2147 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); in ql_queue_fw_error()
2150 void ql_queue_asic_error(struct ql_adapter *qdev) in ql_queue_asic_error() argument
2152 ql_link_off(qdev); in ql_queue_asic_error()
2153 ql_disable_interrupts(qdev); in ql_queue_asic_error()
2158 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_queue_asic_error()
2162 set_bit(QL_ASIC_RECOVERY, &qdev->flags); in ql_queue_asic_error()
2163 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); in ql_queue_asic_error()
2166 static void ql_process_chip_ae_intr(struct ql_adapter *qdev, in ql_process_chip_ae_intr() argument
2171 netif_err(qdev, rx_err, qdev->ndev, in ql_process_chip_ae_intr()
2173 ql_queue_fw_error(qdev); in ql_process_chip_ae_intr()
2177 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); in ql_process_chip_ae_intr()
2178 netdev_err(qdev->ndev, "This event shouldn't occur.\n"); in ql_process_chip_ae_intr()
2179 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2183 netdev_err(qdev->ndev, "Soft ECC error detected.\n"); in ql_process_chip_ae_intr()
2184 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2188 netdev_err(qdev->ndev, "PCI error occurred when reading " in ql_process_chip_ae_intr()
2191 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2195 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", in ql_process_chip_ae_intr()
2197 ql_queue_asic_error(qdev); in ql_process_chip_ae_intr()
2204 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_outbound_rx_ring() local
2213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_outbound_rx_ring()
2223 ql_process_mac_tx_intr(qdev, net_rsp); in ql_clean_outbound_rx_ring()
2226 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_outbound_rx_ring()
2237 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; in ql_clean_outbound_rx_ring()
2238 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { in ql_clean_outbound_rx_ring()
2244 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in ql_clean_outbound_rx_ring()
2252 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_inbound_rx_ring() local
2260 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_inbound_rx_ring()
2268 ql_process_mac_rx_intr(qdev, rx_ring, in ql_clean_inbound_rx_ring()
2274 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) in ql_clean_inbound_rx_ring()
2278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_clean_inbound_rx_ring()
2289 ql_update_buffer_queues(qdev, rx_ring); in ql_clean_inbound_rx_ring()
2297 struct ql_adapter *qdev = rx_ring->qdev; in ql_napi_poll_msix() local
2300 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; in ql_napi_poll_msix()
2302 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2307 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { in ql_napi_poll_msix()
2308 trx_ring = &qdev->rx_ring[i]; in ql_napi_poll_msix()
2315 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2327 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in ql_napi_poll_msix()
2335 ql_enable_completion_interrupt(qdev, rx_ring->irq); in ql_napi_poll_msix()
2342 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_mode() local
2345 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | in qlge_vlan_mode()
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); in qlge_vlan_mode()
2359 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_update_hw_vlan_features() local
2364 status = ql_adapter_down(qdev); in qlge_update_hw_vlan_features()
2366 netif_err(qdev, link, qdev->ndev, in qlge_update_hw_vlan_features()
2376 status = ql_adapter_up(qdev); in qlge_update_hw_vlan_features()
2378 netif_err(qdev, link, qdev->ndev, in qlge_update_hw_vlan_features()
2405 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) in __qlge_vlan_rx_add_vid() argument
2410 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, in __qlge_vlan_rx_add_vid()
2413 netif_err(qdev, ifup, qdev->ndev, in __qlge_vlan_rx_add_vid()
2420 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_rx_add_vid() local
2424 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_add_vid()
2428 err = __qlge_vlan_rx_add_vid(qdev, vid); in qlge_vlan_rx_add_vid()
2429 set_bit(vid, qdev->active_vlans); in qlge_vlan_rx_add_vid()
2431 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_add_vid()
2436 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) in __qlge_vlan_rx_kill_vid() argument
2441 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, in __qlge_vlan_rx_kill_vid()
2444 netif_err(qdev, ifup, qdev->ndev, in __qlge_vlan_rx_kill_vid()
2451 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_vlan_rx_kill_vid() local
2455 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_kill_vid()
2459 err = __qlge_vlan_rx_kill_vid(qdev, vid); in qlge_vlan_rx_kill_vid()
2460 clear_bit(vid, qdev->active_vlans); in qlge_vlan_rx_kill_vid()
2462 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_vlan_rx_kill_vid()
2467 static void qlge_restore_vlan(struct ql_adapter *qdev) in qlge_restore_vlan() argument
2472 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_restore_vlan()
2476 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) in qlge_restore_vlan()
2477 __qlge_vlan_rx_add_vid(qdev, vid); in qlge_restore_vlan()
2479 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_restore_vlan()
2498 struct ql_adapter *qdev = rx_ring->qdev; in qlge_isr() local
2499 struct intr_context *intr_context = &qdev->intr_context[0]; in qlge_isr()
2503 spin_lock(&qdev->hw_lock); in qlge_isr()
2504 if (atomic_read(&qdev->intr_context[0].irq_cnt)) { in qlge_isr()
2505 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, in qlge_isr()
2507 spin_unlock(&qdev->hw_lock); in qlge_isr()
2510 spin_unlock(&qdev->hw_lock); in qlge_isr()
2512 var = ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2518 ql_queue_asic_error(qdev); in qlge_isr()
2519 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); in qlge_isr()
2520 var = ql_read32(qdev, ERR_STS); in qlge_isr()
2521 netdev_err(qdev->ndev, "Resetting chip. " in qlge_isr()
2530 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { in qlge_isr()
2535 netif_err(qdev, intr, qdev->ndev, in qlge_isr()
2537 ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2538 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); in qlge_isr()
2540 qdev->workqueue, &qdev->mpi_work, 0); in qlge_isr()
2549 var = ql_read32(qdev, ISR1); in qlge_isr()
2551 netif_info(qdev, intr, qdev->ndev, in qlge_isr()
2553 ql_disable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2557 ql_enable_completion_interrupt(qdev, intr_context->intr); in qlge_isr()
2638 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_send() local
2643 tx_ring = &qdev->tx_ring[tx_ring_idx]; in qlge_send()
2649 netif_info(qdev, tx_queued, qdev->ndev, in qlge_send()
2671 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in qlge_send()
2684 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != in qlge_send()
2686 netif_err(qdev, tx_queued, qdev->ndev, in qlge_send()
2699 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, in qlge_send()
2712 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); in qlge_send()
2718 static void ql_free_shadow_space(struct ql_adapter *qdev) in ql_free_shadow_space() argument
2720 if (qdev->rx_ring_shadow_reg_area) { in ql_free_shadow_space()
2721 pci_free_consistent(qdev->pdev, in ql_free_shadow_space()
2723 qdev->rx_ring_shadow_reg_area, in ql_free_shadow_space()
2724 qdev->rx_ring_shadow_reg_dma); in ql_free_shadow_space()
2725 qdev->rx_ring_shadow_reg_area = NULL; in ql_free_shadow_space()
2727 if (qdev->tx_ring_shadow_reg_area) { in ql_free_shadow_space()
2728 pci_free_consistent(qdev->pdev, in ql_free_shadow_space()
2730 qdev->tx_ring_shadow_reg_area, in ql_free_shadow_space()
2731 qdev->tx_ring_shadow_reg_dma); in ql_free_shadow_space()
2732 qdev->tx_ring_shadow_reg_area = NULL; in ql_free_shadow_space()
2736 static int ql_alloc_shadow_space(struct ql_adapter *qdev) in ql_alloc_shadow_space() argument
2738 qdev->rx_ring_shadow_reg_area = in ql_alloc_shadow_space()
2739 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, in ql_alloc_shadow_space()
2740 &qdev->rx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2741 if (qdev->rx_ring_shadow_reg_area == NULL) { in ql_alloc_shadow_space()
2742 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_shadow_space()
2747 qdev->tx_ring_shadow_reg_area = in ql_alloc_shadow_space()
2748 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, in ql_alloc_shadow_space()
2749 &qdev->tx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2750 if (qdev->tx_ring_shadow_reg_area == NULL) { in ql_alloc_shadow_space()
2751 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_shadow_space()
2758 pci_free_consistent(qdev->pdev, in ql_alloc_shadow_space()
2760 qdev->rx_ring_shadow_reg_area, in ql_alloc_shadow_space()
2761 qdev->rx_ring_shadow_reg_dma); in ql_alloc_shadow_space()
2765 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_init_tx_ring() argument
2783 static void ql_free_tx_resources(struct ql_adapter *qdev, in ql_free_tx_resources() argument
2787 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_free_tx_resources()
2795 static int ql_alloc_tx_resources(struct ql_adapter *qdev, in ql_alloc_tx_resources() argument
2799 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2814 pci_free_consistent(qdev->pdev, tx_ring->wq_size, in ql_alloc_tx_resources()
2818 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); in ql_alloc_tx_resources()
2822 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_lbq_buffers() argument
2834 pci_unmap_page(qdev->pdev, in ql_free_lbq_buffers()
2836 ql_lbq_block_size(qdev), in ql_free_lbq_buffers()
2849 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, in ql_free_lbq_buffers()
2850 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); in ql_free_lbq_buffers()
2856 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_sbq_buffers() argument
2864 netif_err(qdev, ifup, qdev->ndev, in ql_free_sbq_buffers()
2869 pci_unmap_single(qdev->pdev, in ql_free_sbq_buffers()
2882 static void ql_free_rx_buffers(struct ql_adapter *qdev) in ql_free_rx_buffers() argument
2887 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_free_rx_buffers()
2888 rx_ring = &qdev->rx_ring[i]; in ql_free_rx_buffers()
2890 ql_free_lbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2892 ql_free_sbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2896 static void ql_alloc_rx_buffers(struct ql_adapter *qdev) in ql_alloc_rx_buffers() argument
2901 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_alloc_rx_buffers()
2902 rx_ring = &qdev->rx_ring[i]; in ql_alloc_rx_buffers()
2904 ql_update_buffer_queues(qdev, rx_ring); in ql_alloc_rx_buffers()
2908 static void ql_init_lbq_ring(struct ql_adapter *qdev, in ql_init_lbq_ring() argument
2925 static void ql_init_sbq_ring(struct ql_adapter *qdev, in ql_init_sbq_ring() argument
2942 static void ql_free_rx_resources(struct ql_adapter *qdev, in ql_free_rx_resources() argument
2947 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2959 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2971 pci_free_consistent(qdev->pdev, in ql_free_rx_resources()
2980 static int ql_alloc_rx_resources(struct ql_adapter *qdev, in ql_alloc_rx_resources() argument
2988 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, in ql_alloc_rx_resources()
2992 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); in ql_alloc_rx_resources()
3001 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, in ql_alloc_rx_resources()
3005 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_rx_resources()
3019 ql_init_sbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3027 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, in ql_alloc_rx_resources()
3031 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_rx_resources()
3044 ql_init_lbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3050 ql_free_rx_resources(qdev, rx_ring); in ql_alloc_rx_resources()
3054 static void ql_tx_ring_clean(struct ql_adapter *qdev) in ql_tx_ring_clean() argument
3064 for (j = 0; j < qdev->tx_ring_count; j++) { in ql_tx_ring_clean()
3065 tx_ring = &qdev->tx_ring[j]; in ql_tx_ring_clean()
3069 netif_err(qdev, ifdown, qdev->ndev, in ql_tx_ring_clean()
3073 ql_unmap_send(qdev, tx_ring_desc, in ql_tx_ring_clean()
3082 static void ql_free_mem_resources(struct ql_adapter *qdev) in ql_free_mem_resources() argument
3086 for (i = 0; i < qdev->tx_ring_count; i++) in ql_free_mem_resources()
3087 ql_free_tx_resources(qdev, &qdev->tx_ring[i]); in ql_free_mem_resources()
3088 for (i = 0; i < qdev->rx_ring_count; i++) in ql_free_mem_resources()
3089 ql_free_rx_resources(qdev, &qdev->rx_ring[i]); in ql_free_mem_resources()
3090 ql_free_shadow_space(qdev); in ql_free_mem_resources()
3093 static int ql_alloc_mem_resources(struct ql_adapter *qdev) in ql_alloc_mem_resources() argument
3098 if (ql_alloc_shadow_space(qdev)) in ql_alloc_mem_resources()
3101 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_alloc_mem_resources()
3102 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { in ql_alloc_mem_resources()
3103 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_mem_resources()
3109 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_alloc_mem_resources()
3110 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { in ql_alloc_mem_resources()
3111 netif_err(qdev, ifup, qdev->ndev, in ql_alloc_mem_resources()
3119 ql_free_mem_resources(qdev); in ql_alloc_mem_resources()
3127 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_start_rx_ring() argument
3130 void *shadow_reg = qdev->rx_ring_shadow_reg_area + in ql_start_rx_ring()
3132 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + in ql_start_rx_ring()
3135 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); in ql_start_rx_ring()
3234 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); in ql_start_rx_ring()
3235 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); in ql_start_rx_ring()
3241 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, in ql_start_rx_ring()
3243 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); in ql_start_rx_ring()
3244 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); in ql_start_rx_ring()
3247 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_start_rx_ring()
3250 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), in ql_start_rx_ring()
3253 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); in ql_start_rx_ring()
3259 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) in ql_start_tx_ring() argument
3263 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); in ql_start_tx_ring()
3264 void *shadow_reg = qdev->tx_ring_shadow_reg_area + in ql_start_tx_ring()
3266 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + in ql_start_tx_ring()
3294 ql_init_tx_ring(qdev, tx_ring); in ql_start_tx_ring()
3296 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, in ql_start_tx_ring()
3299 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); in ql_start_tx_ring()
3305 static void ql_disable_msix(struct ql_adapter *qdev) in ql_disable_msix() argument
3307 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_disable_msix()
3308 pci_disable_msix(qdev->pdev); in ql_disable_msix()
3309 clear_bit(QL_MSIX_ENABLED, &qdev->flags); in ql_disable_msix()
3310 kfree(qdev->msi_x_entry); in ql_disable_msix()
3311 qdev->msi_x_entry = NULL; in ql_disable_msix()
3312 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { in ql_disable_msix()
3313 pci_disable_msi(qdev->pdev); in ql_disable_msix()
3314 clear_bit(QL_MSI_ENABLED, &qdev->flags); in ql_disable_msix()
3322 static void ql_enable_msix(struct ql_adapter *qdev) in ql_enable_msix() argument
3331 qdev->msi_x_entry = kcalloc(qdev->intr_count, in ql_enable_msix()
3334 if (!qdev->msi_x_entry) { in ql_enable_msix()
3339 for (i = 0; i < qdev->intr_count; i++) in ql_enable_msix()
3340 qdev->msi_x_entry[i].entry = i; in ql_enable_msix()
3342 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry, in ql_enable_msix()
3343 1, qdev->intr_count); in ql_enable_msix()
3345 kfree(qdev->msi_x_entry); in ql_enable_msix()
3346 qdev->msi_x_entry = NULL; in ql_enable_msix()
3347 netif_warn(qdev, ifup, qdev->ndev, in ql_enable_msix()
3351 qdev->intr_count = err; in ql_enable_msix()
3352 set_bit(QL_MSIX_ENABLED, &qdev->flags); in ql_enable_msix()
3353 netif_info(qdev, ifup, qdev->ndev, in ql_enable_msix()
3355 qdev->intr_count); in ql_enable_msix()
3360 qdev->intr_count = 1; in ql_enable_msix()
3362 if (!pci_enable_msi(qdev->pdev)) { in ql_enable_msix()
3363 set_bit(QL_MSI_ENABLED, &qdev->flags); in ql_enable_msix()
3364 netif_info(qdev, ifup, qdev->ndev, in ql_enable_msix()
3370 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_enable_msix()
3383 static void ql_set_tx_vect(struct ql_adapter *qdev) in ql_set_tx_vect() argument
3386 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; in ql_set_tx_vect()
3388 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_set_tx_vect()
3390 for (vect = 0, j = 0, i = qdev->rss_ring_count; in ql_set_tx_vect()
3391 i < qdev->rx_ring_count; i++) { in ql_set_tx_vect()
3396 qdev->rx_ring[i].irq = vect; in ql_set_tx_vect()
3403 for (i = 0; i < qdev->rx_ring_count; i++) in ql_set_tx_vect()
3404 qdev->rx_ring[i].irq = 0; in ql_set_tx_vect()
3413 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) in ql_set_irq_mask() argument
3416 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; in ql_set_irq_mask()
3418 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_set_irq_mask()
3422 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); in ql_set_irq_mask()
3427 (1 << qdev->rx_ring[qdev->rss_ring_count + in ql_set_irq_mask()
3434 for (j = 0; j < qdev->rx_ring_count; j++) in ql_set_irq_mask()
3435 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); in ql_set_irq_mask()
3445 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) in ql_resolve_queues_to_irqs() argument
3448 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_resolve_queues_to_irqs()
3450 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { in ql_resolve_queues_to_irqs()
3455 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_resolve_queues_to_irqs()
3456 qdev->rx_ring[i].irq = i; in ql_resolve_queues_to_irqs()
3458 intr_context->qdev = qdev; in ql_resolve_queues_to_irqs()
3462 ql_set_irq_mask(qdev, intr_context); in ql_resolve_queues_to_irqs()
3487 qdev->ndev->name, i); in ql_resolve_queues_to_irqs()
3494 qdev->ndev->name, i); in ql_resolve_queues_to_irqs()
3503 intr_context->qdev = qdev; in ql_resolve_queues_to_irqs()
3519 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); in ql_resolve_queues_to_irqs()
3525 ql_set_irq_mask(qdev, intr_context); in ql_resolve_queues_to_irqs()
3530 ql_set_tx_vect(qdev); in ql_resolve_queues_to_irqs()
3533 static void ql_free_irq(struct ql_adapter *qdev) in ql_free_irq() argument
3536 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_free_irq()
3538 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_free_irq()
3540 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_free_irq()
3541 free_irq(qdev->msi_x_entry[i].vector, in ql_free_irq()
3542 &qdev->rx_ring[i]); in ql_free_irq()
3544 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); in ql_free_irq()
3548 ql_disable_msix(qdev); in ql_free_irq()
3551 static int ql_request_irq(struct ql_adapter *qdev) in ql_request_irq() argument
3555 struct pci_dev *pdev = qdev->pdev; in ql_request_irq()
3556 struct intr_context *intr_context = &qdev->intr_context[0]; in ql_request_irq()
3558 ql_resolve_queues_to_irqs(qdev); in ql_request_irq()
3560 for (i = 0; i < qdev->intr_count; i++, intr_context++) { in ql_request_irq()
3562 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { in ql_request_irq()
3563 status = request_irq(qdev->msi_x_entry[i].vector, in ql_request_irq()
3567 &qdev->rx_ring[i]); in ql_request_irq()
3569 netif_err(qdev, ifup, qdev->ndev, in ql_request_irq()
3575 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3577 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3579 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3582 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, in ql_request_irq()
3584 &qdev->rx_ring[0]); in ql_request_irq()
3588 &qdev-> in ql_request_irq()
3590 intr_context->name, &qdev->rx_ring[0]); in ql_request_irq()
3594 netif_err(qdev, ifup, qdev->ndev, in ql_request_irq()
3597 qdev->rx_ring[0].type == DEFAULT_Q ? in ql_request_irq()
3599 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : in ql_request_irq()
3600 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", in ql_request_irq()
3607 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n"); in ql_request_irq()
3608 ql_free_irq(qdev); in ql_request_irq()
3612 static int ql_start_rss(struct ql_adapter *qdev) in ql_start_rss() argument
3621 struct ricb *ricb = &qdev->ricb; in ql_start_rss()
3637 hash_id[i] = (i & (qdev->rss_ring_count - 1)); in ql_start_rss()
3642 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); in ql_start_rss()
3644 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); in ql_start_rss()
3650 static int ql_clear_routing_entries(struct ql_adapter *qdev) in ql_clear_routing_entries() argument
3654 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in ql_clear_routing_entries()
3659 status = ql_set_routing_reg(qdev, i, 0, 0); in ql_clear_routing_entries()
3661 netif_err(qdev, ifup, qdev->ndev, in ql_clear_routing_entries()
3666 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in ql_clear_routing_entries()
3671 static int ql_route_initialize(struct ql_adapter *qdev) in ql_route_initialize() argument
3676 status = ql_clear_routing_entries(qdev); in ql_route_initialize()
3680 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in ql_route_initialize()
3684 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, in ql_route_initialize()
3687 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3692 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, in ql_route_initialize()
3695 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3700 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); in ql_route_initialize()
3702 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3709 if (qdev->rss_ring_count > 1) { in ql_route_initialize()
3710 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, in ql_route_initialize()
3713 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3719 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, in ql_route_initialize()
3722 netif_err(qdev, ifup, qdev->ndev, in ql_route_initialize()
3725 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in ql_route_initialize()
3729 int ql_cam_route_initialize(struct ql_adapter *qdev) in ql_cam_route_initialize() argument
3737 set = ql_read32(qdev, STS); in ql_cam_route_initialize()
3738 set &= qdev->port_link_up; in ql_cam_route_initialize()
3739 status = ql_set_mac_addr(qdev, set); in ql_cam_route_initialize()
3741 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); in ql_cam_route_initialize()
3745 status = ql_route_initialize(qdev); in ql_cam_route_initialize()
3747 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); in ql_cam_route_initialize()
3752 static int ql_adapter_initialize(struct ql_adapter *qdev) in ql_adapter_initialize() argument
3763 ql_write32(qdev, SYS, mask | value); in ql_adapter_initialize()
3768 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { in ql_adapter_initialize()
3772 ql_write32(qdev, NIC_RCV_CFG, (mask | value)); in ql_adapter_initialize()
3775 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); in ql_adapter_initialize()
3785 ql_write32(qdev, FSC, mask | value); in ql_adapter_initialize()
3787 ql_write32(qdev, SPLT_HDR, SPLT_LEN); in ql_adapter_initialize()
3794 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); in ql_adapter_initialize()
3799 value = ql_read32(qdev, MGMT_RCV_CFG); in ql_adapter_initialize()
3804 ql_write32(qdev, MGMT_RCV_CFG, mask); in ql_adapter_initialize()
3805 ql_write32(qdev, MGMT_RCV_CFG, mask | value); in ql_adapter_initialize()
3808 if (qdev->pdev->subsystem_device == 0x0068 || in ql_adapter_initialize()
3809 qdev->pdev->subsystem_device == 0x0180) in ql_adapter_initialize()
3810 qdev->wol = WAKE_MAGIC; in ql_adapter_initialize()
3813 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_adapter_initialize()
3814 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); in ql_adapter_initialize()
3816 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3825 if (qdev->rss_ring_count > 1) { in ql_adapter_initialize()
3826 status = ql_start_rss(qdev); in ql_adapter_initialize()
3828 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); in ql_adapter_initialize()
3834 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_adapter_initialize()
3835 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); in ql_adapter_initialize()
3837 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3844 status = qdev->nic_ops->port_initialize(qdev); in ql_adapter_initialize()
3846 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); in ql_adapter_initialize()
3849 status = ql_cam_route_initialize(qdev); in ql_adapter_initialize()
3851 netif_err(qdev, ifup, qdev->ndev, in ql_adapter_initialize()
3857 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_initialize()
3858 napi_enable(&qdev->rx_ring[i].napi); in ql_adapter_initialize()
3864 static int ql_adapter_reset(struct ql_adapter *qdev) in ql_adapter_reset() argument
3871 status = ql_clear_routing_entries(qdev); in ql_adapter_reset()
3873 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); in ql_adapter_reset()
3880 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { in ql_adapter_reset()
3882 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); in ql_adapter_reset()
3885 ql_wait_fifo_empty(qdev); in ql_adapter_reset()
3887 clear_bit(QL_ASIC_RECOVERY, &qdev->flags); in ql_adapter_reset()
3889 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); in ql_adapter_reset()
3893 value = ql_read32(qdev, RST_FO); in ql_adapter_reset()
3900 netif_err(qdev, ifdown, qdev->ndev, in ql_adapter_reset()
3906 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); in ql_adapter_reset()
3912 struct ql_adapter *qdev = netdev_priv(ndev); in ql_display_dev_info() local
3914 netif_info(qdev, probe, qdev->ndev, in ql_display_dev_info()
3917 qdev->func, in ql_display_dev_info()
3918 qdev->port, in ql_display_dev_info()
3919 qdev->chip_rev_id & 0x0000000f, in ql_display_dev_info()
3920 qdev->chip_rev_id >> 4 & 0x0000000f, in ql_display_dev_info()
3921 qdev->chip_rev_id >> 8 & 0x0000000f, in ql_display_dev_info()
3922 qdev->chip_rev_id >> 12 & 0x0000000f); in ql_display_dev_info()
3923 netif_info(qdev, probe, qdev->ndev, in ql_display_dev_info()
3927 static int ql_wol(struct ql_adapter *qdev) in ql_wol() argument
3939 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | in ql_wol()
3941 netif_err(qdev, ifdown, qdev->ndev, in ql_wol()
3943 qdev->wol); in ql_wol()
3947 if (qdev->wol & WAKE_MAGIC) { in ql_wol()
3948 status = ql_mb_wol_set_magic(qdev, 1); in ql_wol()
3950 netif_err(qdev, ifdown, qdev->ndev, in ql_wol()
3952 qdev->ndev->name); in ql_wol()
3955 netif_info(qdev, drv, qdev->ndev, in ql_wol()
3957 qdev->ndev->name); in ql_wol()
3962 if (qdev->wol) { in ql_wol()
3964 status = ql_mb_wol_mode(qdev, wol); in ql_wol()
3965 netif_err(qdev, drv, qdev->ndev, in ql_wol()
3968 wol, qdev->ndev->name); in ql_wol()
3974 static void ql_cancel_all_work_sync(struct ql_adapter *qdev) in ql_cancel_all_work_sync() argument
3980 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) in ql_cancel_all_work_sync()
3981 cancel_delayed_work_sync(&qdev->asic_reset_work); in ql_cancel_all_work_sync()
3982 cancel_delayed_work_sync(&qdev->mpi_reset_work); in ql_cancel_all_work_sync()
3983 cancel_delayed_work_sync(&qdev->mpi_work); in ql_cancel_all_work_sync()
3984 cancel_delayed_work_sync(&qdev->mpi_idc_work); in ql_cancel_all_work_sync()
3985 cancel_delayed_work_sync(&qdev->mpi_core_to_log); in ql_cancel_all_work_sync()
3986 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); in ql_cancel_all_work_sync()
3989 static int ql_adapter_down(struct ql_adapter *qdev) in ql_adapter_down() argument
3993 ql_link_off(qdev); in ql_adapter_down()
3995 ql_cancel_all_work_sync(qdev); in ql_adapter_down()
3997 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_down()
3998 napi_disable(&qdev->rx_ring[i].napi); in ql_adapter_down()
4000 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_down()
4002 ql_disable_interrupts(qdev); in ql_adapter_down()
4004 ql_tx_ring_clean(qdev); in ql_adapter_down()
4008 for (i = 0; i < qdev->rss_ring_count; i++) in ql_adapter_down()
4009 netif_napi_del(&qdev->rx_ring[i].napi); in ql_adapter_down()
4011 status = ql_adapter_reset(qdev); in ql_adapter_down()
4013 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", in ql_adapter_down()
4014 qdev->func); in ql_adapter_down()
4015 ql_free_rx_buffers(qdev); in ql_adapter_down()
4020 static int ql_adapter_up(struct ql_adapter *qdev) in ql_adapter_up() argument
4024 err = ql_adapter_initialize(qdev); in ql_adapter_up()
4026 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); in ql_adapter_up()
4029 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_up()
4030 ql_alloc_rx_buffers(qdev); in ql_adapter_up()
4034 if ((ql_read32(qdev, STS) & qdev->port_init) && in ql_adapter_up()
4035 (ql_read32(qdev, STS) & qdev->port_link_up)) in ql_adapter_up()
4036 ql_link_on(qdev); in ql_adapter_up()
4038 clear_bit(QL_ALLMULTI, &qdev->flags); in ql_adapter_up()
4039 clear_bit(QL_PROMISCUOUS, &qdev->flags); in ql_adapter_up()
4040 qlge_set_multicast_list(qdev->ndev); in ql_adapter_up()
4043 qlge_restore_vlan(qdev); in ql_adapter_up()
4045 ql_enable_interrupts(qdev); in ql_adapter_up()
4046 ql_enable_all_completion_interrupts(qdev); in ql_adapter_up()
4047 netif_tx_start_all_queues(qdev->ndev); in ql_adapter_up()
4051 ql_adapter_reset(qdev); in ql_adapter_up()
4055 static void ql_release_adapter_resources(struct ql_adapter *qdev) in ql_release_adapter_resources() argument
4057 ql_free_mem_resources(qdev); in ql_release_adapter_resources()
4058 ql_free_irq(qdev); in ql_release_adapter_resources()
4061 static int ql_get_adapter_resources(struct ql_adapter *qdev) in ql_get_adapter_resources() argument
4065 if (ql_alloc_mem_resources(qdev)) { in ql_get_adapter_resources()
4066 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); in ql_get_adapter_resources()
4069 status = ql_request_irq(qdev); in ql_get_adapter_resources()
4075 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_close() local
4081 if (test_bit(QL_EEH_FATAL, &qdev->flags)) { in qlge_close()
4082 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); in qlge_close()
4083 clear_bit(QL_EEH_FATAL, &qdev->flags); in qlge_close()
4091 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) in qlge_close()
4093 ql_adapter_down(qdev); in qlge_close()
4094 ql_release_adapter_resources(qdev); in qlge_close()
4098 static int ql_configure_rings(struct ql_adapter *qdev) in ql_configure_rings() argument
4104 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? in ql_configure_rings()
4107 qdev->lbq_buf_order = get_order(lbq_buf_len); in ql_configure_rings()
4116 qdev->intr_count = cpu_cnt; in ql_configure_rings()
4117 ql_enable_msix(qdev); in ql_configure_rings()
4119 qdev->rss_ring_count = qdev->intr_count; in ql_configure_rings()
4120 qdev->tx_ring_count = cpu_cnt; in ql_configure_rings()
4121 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; in ql_configure_rings()
4123 for (i = 0; i < qdev->tx_ring_count; i++) { in ql_configure_rings()
4124 tx_ring = &qdev->tx_ring[i]; in ql_configure_rings()
4126 tx_ring->qdev = qdev; in ql_configure_rings()
4128 tx_ring->wq_len = qdev->tx_ring_size; in ql_configure_rings()
4136 tx_ring->cq_id = qdev->rss_ring_count + i; in ql_configure_rings()
4139 for (i = 0; i < qdev->rx_ring_count; i++) { in ql_configure_rings()
4140 rx_ring = &qdev->rx_ring[i]; in ql_configure_rings()
4142 rx_ring->qdev = qdev; in ql_configure_rings()
4145 if (i < qdev->rss_ring_count) { in ql_configure_rings()
4149 rx_ring->cq_len = qdev->rx_ring_size; in ql_configure_rings()
4166 rx_ring->cq_len = qdev->tx_ring_size; in ql_configure_rings()
4184 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_open() local
4186 err = ql_adapter_reset(qdev); in qlge_open()
4190 err = ql_configure_rings(qdev); in qlge_open()
4194 err = ql_get_adapter_resources(qdev); in qlge_open()
4198 err = ql_adapter_up(qdev); in qlge_open()
4205 ql_release_adapter_resources(qdev); in qlge_open()
4209 static int ql_change_rx_buffers(struct ql_adapter *qdev) in ql_change_rx_buffers() argument
4216 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { in ql_change_rx_buffers()
4219 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { in ql_change_rx_buffers()
4220 netif_err(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4226 netif_err(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4232 status = ql_adapter_down(qdev); in ql_change_rx_buffers()
4237 lbq_buf_len = (qdev->ndev->mtu > 1500) ? in ql_change_rx_buffers()
4239 qdev->lbq_buf_order = get_order(lbq_buf_len); in ql_change_rx_buffers()
4241 for (i = 0; i < qdev->rss_ring_count; i++) { in ql_change_rx_buffers()
4242 rx_ring = &qdev->rx_ring[i]; in ql_change_rx_buffers()
4247 status = ql_adapter_up(qdev); in ql_change_rx_buffers()
4253 netif_alert(qdev, ifup, qdev->ndev, in ql_change_rx_buffers()
4255 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_change_rx_buffers()
4256 dev_close(qdev->ndev); in ql_change_rx_buffers()
4262 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_change_mtu() local
4266 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); in qlge_change_mtu()
4268 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); in qlge_change_mtu()
4272 queue_delayed_work(qdev->workqueue, in qlge_change_mtu()
4273 &qdev->mpi_port_cfg_work, 3*HZ); in qlge_change_mtu()
4277 if (!netif_running(qdev->ndev)) { in qlge_change_mtu()
4281 status = ql_change_rx_buffers(qdev); in qlge_change_mtu()
4283 netif_err(qdev, ifup, qdev->ndev, in qlge_change_mtu()
4293 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_get_stats() local
4294 struct rx_ring *rx_ring = &qdev->rx_ring[0]; in qlge_get_stats()
4295 struct tx_ring *tx_ring = &qdev->tx_ring[0]; in qlge_get_stats()
4301 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { in qlge_get_stats()
4316 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { in qlge_get_stats()
4329 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_set_multicast_list() local
4333 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); in qlge_set_multicast_list()
4341 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { in qlge_set_multicast_list()
4343 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { in qlge_set_multicast_list()
4344 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4347 set_bit(QL_PROMISCUOUS, &qdev->flags); in qlge_set_multicast_list()
4351 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { in qlge_set_multicast_list()
4353 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { in qlge_set_multicast_list()
4354 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4357 clear_bit(QL_PROMISCUOUS, &qdev->flags); in qlge_set_multicast_list()
4368 if (!test_bit(QL_ALLMULTI, &qdev->flags)) { in qlge_set_multicast_list()
4370 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { in qlge_set_multicast_list()
4371 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4374 set_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4378 if (test_bit(QL_ALLMULTI, &qdev->flags)) { in qlge_set_multicast_list()
4380 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { in qlge_set_multicast_list()
4381 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4384 clear_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4390 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4395 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, in qlge_set_multicast_list()
4397 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4399 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4404 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_multicast_list()
4406 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { in qlge_set_multicast_list()
4407 netif_err(qdev, hw, qdev->ndev, in qlge_set_multicast_list()
4410 set_bit(QL_ALLMULTI, &qdev->flags); in qlge_set_multicast_list()
4414 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); in qlge_set_multicast_list()
4419 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_set_mac_address() local
4427 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); in qlge_set_mac_address()
4429 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_mac_address()
4432 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, in qlge_set_mac_address()
4433 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); in qlge_set_mac_address()
4435 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); in qlge_set_mac_address()
4436 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); in qlge_set_mac_address()
4442 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_tx_timeout() local
4443 ql_queue_asic_error(qdev); in qlge_tx_timeout()
4448 struct ql_adapter *qdev = in ql_asic_reset_work() local
4452 status = ql_adapter_down(qdev); in ql_asic_reset_work()
4456 status = ql_adapter_up(qdev); in ql_asic_reset_work()
4461 clear_bit(QL_ALLMULTI, &qdev->flags); in ql_asic_reset_work()
4462 clear_bit(QL_PROMISCUOUS, &qdev->flags); in ql_asic_reset_work()
4463 qlge_set_multicast_list(qdev->ndev); in ql_asic_reset_work()
4468 netif_alert(qdev, ifup, qdev->ndev, in ql_asic_reset_work()
4471 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_asic_reset_work()
4472 dev_close(qdev->ndev); in ql_asic_reset_work()
4493 static int ql_get_alt_pcie_func(struct ql_adapter *qdev) in ql_get_alt_pcie_func() argument
4499 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, in ql_get_alt_pcie_func()
4509 if (qdev->func == nic_func1) in ql_get_alt_pcie_func()
4510 qdev->alt_func = nic_func2; in ql_get_alt_pcie_func()
4511 else if (qdev->func == nic_func2) in ql_get_alt_pcie_func()
4512 qdev->alt_func = nic_func1; in ql_get_alt_pcie_func()
4519 static int ql_get_board_info(struct ql_adapter *qdev) in ql_get_board_info() argument
4522 qdev->func = in ql_get_board_info()
4523 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; in ql_get_board_info()
4524 if (qdev->func > 3) in ql_get_board_info()
4527 status = ql_get_alt_pcie_func(qdev); in ql_get_board_info()
4531 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; in ql_get_board_info()
4532 if (qdev->port) { in ql_get_board_info()
4533 qdev->xg_sem_mask = SEM_XGMAC1_MASK; in ql_get_board_info()
4534 qdev->port_link_up = STS_PL1; in ql_get_board_info()
4535 qdev->port_init = STS_PI1; in ql_get_board_info()
4536 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; in ql_get_board_info()
4537 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; in ql_get_board_info()
4539 qdev->xg_sem_mask = SEM_XGMAC0_MASK; in ql_get_board_info()
4540 qdev->port_link_up = STS_PL0; in ql_get_board_info()
4541 qdev->port_init = STS_PI0; in ql_get_board_info()
4542 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; in ql_get_board_info()
4543 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; in ql_get_board_info()
4545 qdev->chip_rev_id = ql_read32(qdev, REV_ID); in ql_get_board_info()
4546 qdev->device_id = qdev->pdev->device; in ql_get_board_info()
4547 if (qdev->device_id == QLGE_DEVICE_ID_8012) in ql_get_board_info()
4548 qdev->nic_ops = &qla8012_nic_ops; in ql_get_board_info()
4549 else if (qdev->device_id == QLGE_DEVICE_ID_8000) in ql_get_board_info()
4550 qdev->nic_ops = &qla8000_nic_ops; in ql_get_board_info()
4557 struct ql_adapter *qdev = netdev_priv(ndev); in ql_release_all() local
4559 if (qdev->workqueue) { in ql_release_all()
4560 destroy_workqueue(qdev->workqueue); in ql_release_all()
4561 qdev->workqueue = NULL; in ql_release_all()
4564 if (qdev->reg_base) in ql_release_all()
4565 iounmap(qdev->reg_base); in ql_release_all()
4566 if (qdev->doorbell_area) in ql_release_all()
4567 iounmap(qdev->doorbell_area); in ql_release_all()
4568 vfree(qdev->mpi_coredump); in ql_release_all()
4575 struct ql_adapter *qdev = netdev_priv(ndev); in ql_init_device() local
4578 memset((void *)qdev, 0, sizeof(*qdev)); in ql_init_device()
4585 qdev->ndev = ndev; in ql_init_device()
4586 qdev->pdev = pdev; in ql_init_device()
4604 set_bit(QL_DMA64, &qdev->flags); in ql_init_device()
4620 qdev->reg_base = in ql_init_device()
4623 if (!qdev->reg_base) { in ql_init_device()
4629 qdev->doorbell_area_size = pci_resource_len(pdev, 3); in ql_init_device()
4630 qdev->doorbell_area = in ql_init_device()
4633 if (!qdev->doorbell_area) { in ql_init_device()
4639 err = ql_get_board_info(qdev); in ql_init_device()
4645 qdev->msg_enable = netif_msg_init(debug, default_msg); in ql_init_device()
4646 spin_lock_init(&qdev->hw_lock); in ql_init_device()
4647 spin_lock_init(&qdev->stats_lock); in ql_init_device()
4650 qdev->mpi_coredump = in ql_init_device()
4652 if (qdev->mpi_coredump == NULL) { in ql_init_device()
4657 set_bit(QL_FRC_COREDUMP, &qdev->flags); in ql_init_device()
4660 err = qdev->nic_ops->get_flash(qdev); in ql_init_device()
4667 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); in ql_init_device()
4670 qdev->tx_ring_size = NUM_TX_RING_ENTRIES; in ql_init_device()
4671 qdev->rx_ring_size = NUM_RX_RING_ENTRIES; in ql_init_device()
4674 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; in ql_init_device()
4675 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; in ql_init_device()
4676 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; in ql_init_device()
4677 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; in ql_init_device()
4682 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, in ql_init_device()
4684 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); in ql_init_device()
4685 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); in ql_init_device()
4686 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); in ql_init_device()
4687 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); in ql_init_device()
4688 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); in ql_init_device()
4689 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); in ql_init_device()
4690 init_completion(&qdev->ide_completion); in ql_init_device()
4691 mutex_init(&qdev->mpi_mutex); in ql_init_device()
4723 struct ql_adapter *qdev = from_timer(qdev, t, timer); in ql_timer() local
4726 var = ql_read32(qdev, STS); in ql_timer()
4727 if (pci_channel_offline(qdev->pdev)) { in ql_timer()
4728 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); in ql_timer()
4732 mod_timer(&qdev->timer, jiffies + (5*HZ)); in ql_timer()
4739 struct ql_adapter *qdev = NULL; in qlge_probe() local
4754 qdev = netdev_priv(ndev); in qlge_probe()
4771 if (test_bit(QL_DMA64, &qdev->flags)) in qlge_probe()
4777 ndev->tx_queue_len = qdev->tx_ring_size; in qlge_probe()
4802 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); in qlge_probe()
4803 mod_timer(&qdev->timer, jiffies + (5*HZ)); in qlge_probe()
4804 ql_link_off(qdev); in qlge_probe()
4806 atomic_set(&qdev->lb_count, 0); in qlge_probe()
4824 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_remove() local
4825 del_timer_sync(&qdev->timer); in qlge_remove()
4826 ql_cancel_all_work_sync(qdev); in qlge_remove()
4837 struct ql_adapter *qdev = netdev_priv(ndev); in ql_eeh_close() local
4845 ql_cancel_all_work_sync(qdev); in ql_eeh_close()
4847 for (i = 0; i < qdev->rss_ring_count; i++) in ql_eeh_close()
4848 netif_napi_del(&qdev->rx_ring[i].napi); in ql_eeh_close()
4850 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_eeh_close()
4851 ql_tx_ring_clean(qdev); in ql_eeh_close()
4852 ql_free_rx_buffers(qdev); in ql_eeh_close()
4853 ql_release_adapter_resources(qdev); in ql_eeh_close()
4864 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_error_detected() local
4871 del_timer_sync(&qdev->timer); in qlge_io_error_detected()
4879 del_timer_sync(&qdev->timer); in qlge_io_error_detected()
4881 set_bit(QL_EEH_FATAL, &qdev->flags); in qlge_io_error_detected()
4898 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_slot_reset() local
4904 netif_err(qdev, ifup, qdev->ndev, in qlge_io_slot_reset()
4910 if (ql_adapter_reset(qdev)) { in qlge_io_slot_reset()
4911 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); in qlge_io_slot_reset()
4912 set_bit(QL_EEH_FATAL, &qdev->flags); in qlge_io_slot_reset()
4922 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_io_resume() local
4928 netif_err(qdev, ifup, qdev->ndev, in qlge_io_resume()
4933 netif_err(qdev, ifup, qdev->ndev, in qlge_io_resume()
4936 mod_timer(&qdev->timer, jiffies + (5*HZ)); in qlge_io_resume()
4949 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_suspend() local
4953 del_timer_sync(&qdev->timer); in qlge_suspend()
4956 err = ql_adapter_down(qdev); in qlge_suspend()
4961 ql_wol(qdev); in qlge_suspend()
4977 struct ql_adapter *qdev = netdev_priv(ndev); in qlge_resume() local
4984 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); in qlge_resume()
4993 err = ql_adapter_up(qdev); in qlge_resume()
4998 mod_timer(&qdev->timer, jiffies + (5*HZ)); in qlge_resume()