Lines Matching full:cp

111  * also, we need to make cp->lock finer-grained.
161 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
230 static void cas_set_link_modes(struct cas *cp);
232 static inline void cas_lock_tx(struct cas *cp) in cas_lock_tx() argument
237 spin_lock_nested(&cp->tx_lock[i], i); in cas_lock_tx()
248 #define cas_lock_all_save(cp, flags) \ argument
250 struct cas *xxxcp = (cp); \
255 static inline void cas_unlock_tx(struct cas *cp) in cas_unlock_tx() argument
260 spin_unlock(&cp->tx_lock[i - 1]); in cas_unlock_tx()
263 #define cas_unlock_all_restore(cp, flags) \ argument
265 struct cas *xxxcp = (cp); \
270 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument
274 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); in cas_disable_irq()
279 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_disable_irq()
292 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
296 writel(INTRN_MASK_CLEAR_ALL, cp->regs + in cas_disable_irq()
303 static inline void cas_mask_intr(struct cas *cp) in cas_mask_intr() argument
308 cas_disable_irq(cp, i); in cas_mask_intr()
311 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument
314 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); in cas_enable_irq()
318 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_enable_irq()
330 writel(INTRN_MASK_RX_EN, cp->regs + in cas_enable_irq()
340 static inline void cas_unmask_intr(struct cas *cp) in cas_unmask_intr() argument
345 cas_enable_irq(cp, i); in cas_unmask_intr()
348 static inline void cas_entropy_gather(struct cas *cp) in cas_entropy_gather() argument
351 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_gather()
354 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
355 readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
360 static inline void cas_entropy_reset(struct cas *cp) in cas_entropy_reset() argument
363 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_reset()
367 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_entropy_reset()
368 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); in cas_entropy_reset()
369 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); in cas_entropy_reset()
372 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) in cas_entropy_reset()
373 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; in cas_entropy_reset()
380 static u16 cas_phy_read(struct cas *cp, int reg) in cas_phy_read() argument
386 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_read()
389 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_read()
394 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_read()
401 static int cas_phy_write(struct cas *cp, int reg, u16 val) in cas_phy_write() argument
407 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_write()
411 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_write()
416 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_write()
423 static void cas_phy_powerup(struct cas *cp) in cas_phy_powerup() argument
425 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerup()
430 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerup()
433 static void cas_phy_powerdown(struct cas *cp) in cas_phy_powerdown() argument
435 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerdown()
440 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerdown()
443 /* cp->lock held. note: the last put_page will free the buffer */
444 static int cas_page_free(struct cas *cp, cas_page_t *page) in cas_page_free() argument
446 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, in cas_page_free()
448 __free_pages(page->buffer, cp->page_order); in cas_page_free()
464 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) in cas_page_alloc() argument
474 page->buffer = alloc_pages(flags, cp->page_order); in cas_page_alloc()
477 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, in cas_page_alloc()
478 cp->page_size, DMA_FROM_DEVICE); in cas_page_alloc()
487 static void cas_spare_init(struct cas *cp) in cas_spare_init() argument
489 spin_lock(&cp->rx_inuse_lock); in cas_spare_init()
490 INIT_LIST_HEAD(&cp->rx_inuse_list); in cas_spare_init()
491 spin_unlock(&cp->rx_inuse_lock); in cas_spare_init()
493 spin_lock(&cp->rx_spare_lock); in cas_spare_init()
494 INIT_LIST_HEAD(&cp->rx_spare_list); in cas_spare_init()
495 cp->rx_spares_needed = RX_SPARE_COUNT; in cas_spare_init()
496 spin_unlock(&cp->rx_spare_lock); in cas_spare_init()
500 static void cas_spare_free(struct cas *cp) in cas_spare_free() argument
506 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
507 list_splice_init(&cp->rx_spare_list, &list); in cas_spare_free()
508 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
510 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
519 spin_lock(&cp->rx_inuse_lock); in cas_spare_free()
520 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
521 spin_unlock(&cp->rx_inuse_lock); in cas_spare_free()
523 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
524 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
525 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
528 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
533 static void cas_spare_recover(struct cas *cp, const gfp_t flags) in cas_spare_recover() argument
544 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
545 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_recover()
546 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
567 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
568 if (cp->rx_spares_needed > 0) { in cas_spare_recover()
569 list_add(elem, &cp->rx_spare_list); in cas_spare_recover()
570 cp->rx_spares_needed--; in cas_spare_recover()
571 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
573 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
574 cas_page_free(cp, page); in cas_spare_recover()
580 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
581 list_splice(&list, &cp->rx_inuse_list); in cas_spare_recover()
582 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
585 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
586 needed = cp->rx_spares_needed; in cas_spare_recover()
587 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
595 cas_page_t *spare = cas_page_alloc(cp, flags); in cas_spare_recover()
602 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
603 list_splice(&list, &cp->rx_spare_list); in cas_spare_recover()
604 cp->rx_spares_needed -= i; in cas_spare_recover()
605 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
609 static cas_page_t *cas_page_dequeue(struct cas *cp) in cas_page_dequeue() argument
614 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
615 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
617 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
618 cas_spare_recover(cp, GFP_ATOMIC); in cas_page_dequeue()
619 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
620 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
621 netif_err(cp, rx_err, cp->dev, in cas_page_dequeue()
623 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
628 entry = cp->rx_spare_list.next; in cas_page_dequeue()
630 recover = ++cp->rx_spares_needed; in cas_page_dequeue()
631 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
636 atomic_inc(&cp->reset_task_pending); in cas_page_dequeue()
637 atomic_inc(&cp->reset_task_pending_spare); in cas_page_dequeue()
638 schedule_work(&cp->reset_task); in cas_page_dequeue()
640 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); in cas_page_dequeue()
641 schedule_work(&cp->reset_task); in cas_page_dequeue()
648 static void cas_mif_poll(struct cas *cp, const int enable) in cas_mif_poll() argument
652 cfg = readl(cp->regs + REG_MIF_CFG); in cas_mif_poll()
655 if (cp->phy_type & CAS_PHY_MII_MDIO1) in cas_mif_poll()
662 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); in cas_mif_poll()
665 cp->regs + REG_MIF_MASK); in cas_mif_poll()
666 writel(cfg, cp->regs + REG_MIF_CFG); in cas_mif_poll()
669 /* Must be invoked under cp->lock */
670 static void cas_begin_auto_negotiation(struct cas *cp, in cas_begin_auto_negotiation() argument
677 int oldstate = cp->lstate; in cas_begin_auto_negotiation()
683 lcntl = cp->link_cntl; in cas_begin_auto_negotiation()
685 cp->link_cntl = BMCR_ANENABLE; in cas_begin_auto_negotiation()
688 cp->link_cntl = 0; in cas_begin_auto_negotiation()
690 cp->link_cntl |= BMCR_SPEED100; in cas_begin_auto_negotiation()
692 cp->link_cntl |= CAS_BMCR_SPEED1000; in cas_begin_auto_negotiation()
694 cp->link_cntl |= BMCR_FULLDPLX; in cas_begin_auto_negotiation()
697 changed = (lcntl != cp->link_cntl); in cas_begin_auto_negotiation()
700 if (cp->lstate == link_up) { in cas_begin_auto_negotiation()
701 netdev_info(cp->dev, "PCS link down\n"); in cas_begin_auto_negotiation()
704 netdev_info(cp->dev, "link configuration changed\n"); in cas_begin_auto_negotiation()
707 cp->lstate = link_down; in cas_begin_auto_negotiation()
708 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_begin_auto_negotiation()
709 if (!cp->hw_running) in cas_begin_auto_negotiation()
718 netif_carrier_off(cp->dev); in cas_begin_auto_negotiation()
725 atomic_inc(&cp->reset_task_pending); in cas_begin_auto_negotiation()
726 atomic_inc(&cp->reset_task_pending_all); in cas_begin_auto_negotiation()
727 schedule_work(&cp->reset_task); in cas_begin_auto_negotiation()
728 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
729 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
733 if (cp->phy_type & CAS_PHY_SERDES) { in cas_begin_auto_negotiation()
734 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
736 if (cp->link_cntl & BMCR_ANENABLE) { in cas_begin_auto_negotiation()
738 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
740 if (cp->link_cntl & BMCR_FULLDPLX) in cas_begin_auto_negotiation()
743 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
745 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
746 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
749 cas_mif_poll(cp, 0); in cas_begin_auto_negotiation()
750 ctl = cas_phy_read(cp, MII_BMCR); in cas_begin_auto_negotiation()
753 ctl |= cp->link_cntl; in cas_begin_auto_negotiation()
756 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
758 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
760 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
761 cas_phy_write(cp, MII_BMCR, ctl); in cas_begin_auto_negotiation()
762 cas_mif_poll(cp, 1); in cas_begin_auto_negotiation()
765 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
766 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
769 /* Must be invoked under cp->lock. */
770 static int cas_reset_mii_phy(struct cas *cp) in cas_reset_mii_phy() argument
775 cas_phy_write(cp, MII_BMCR, BMCR_RESET); in cas_reset_mii_phy()
778 val = cas_phy_read(cp, MII_BMCR); in cas_reset_mii_phy()
786 static void cas_saturn_firmware_init(struct cas *cp) in cas_saturn_firmware_init() argument
792 if (PHY_NS_DP83065 != cp->phy_id) in cas_saturn_firmware_init()
795 err = request_firmware(&fw, fw_name, &cp->pdev->dev); in cas_saturn_firmware_init()
806 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; in cas_saturn_firmware_init()
807 cp->fw_size = fw->size - 2; in cas_saturn_firmware_init()
808 cp->fw_data = vmalloc(cp->fw_size); in cas_saturn_firmware_init()
809 if (!cp->fw_data) in cas_saturn_firmware_init()
811 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); in cas_saturn_firmware_init()
816 static void cas_saturn_firmware_load(struct cas *cp) in cas_saturn_firmware_load() argument
820 if (!cp->fw_data) in cas_saturn_firmware_load()
823 cas_phy_powerdown(cp); in cas_saturn_firmware_load()
826 cas_phy_write(cp, DP83065_MII_MEM, 0x0); in cas_saturn_firmware_load()
829 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); in cas_saturn_firmware_load()
830 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); in cas_saturn_firmware_load()
831 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); in cas_saturn_firmware_load()
832 cas_phy_write(cp, DP83065_MII_REGD, 0x82); in cas_saturn_firmware_load()
833 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); in cas_saturn_firmware_load()
834 cas_phy_write(cp, DP83065_MII_REGD, 0x0); in cas_saturn_firmware_load()
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); in cas_saturn_firmware_load()
836 cas_phy_write(cp, DP83065_MII_REGD, 0x39); in cas_saturn_firmware_load()
839 cas_phy_write(cp, DP83065_MII_MEM, 0x1); in cas_saturn_firmware_load()
840 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); in cas_saturn_firmware_load()
841 for (i = 0; i < cp->fw_size; i++) in cas_saturn_firmware_load()
842 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); in cas_saturn_firmware_load()
845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); in cas_saturn_firmware_load()
846 cas_phy_write(cp, DP83065_MII_REGD, 0x1); in cas_saturn_firmware_load()
851 static void cas_phy_init(struct cas *cp) in cas_phy_init() argument
856 if (CAS_PHY_MII(cp->phy_type)) { in cas_phy_init()
858 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
860 cas_mif_poll(cp, 0); in cas_phy_init()
861 cas_reset_mii_phy(cp); /* take out of isolate mode */ in cas_phy_init()
863 if (PHY_LUCENT_B0 == cp->phy_id) { in cas_phy_init()
865 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); in cas_phy_init()
866 cas_phy_write(cp, MII_BMCR, 0x00f1); in cas_phy_init()
867 cas_phy_write(cp, LUCENT_MII_REG, 0x0); in cas_phy_init()
869 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { in cas_phy_init()
871 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); in cas_phy_init()
872 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); in cas_phy_init()
873 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); in cas_phy_init()
874 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); in cas_phy_init()
875 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); in cas_phy_init()
876 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
877 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); in cas_phy_init()
878 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
879 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); in cas_phy_init()
880 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); in cas_phy_init()
881 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); in cas_phy_init()
883 } else if (PHY_BROADCOM_5411 == cp->phy_id) { in cas_phy_init()
884 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
885 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
888 cas_phy_write(cp, BROADCOM_MII_REG4, in cas_phy_init()
892 } else if (cp->cas_flags & CAS_FLAG_SATURN) { in cas_phy_init()
893 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? in cas_phy_init()
895 cp->regs + REG_SATURN_PCFG); in cas_phy_init()
901 if (PHY_NS_DP83065 == cp->phy_id) { in cas_phy_init()
902 cas_saturn_firmware_load(cp); in cas_phy_init()
904 cas_phy_powerup(cp); in cas_phy_init()
908 val = cas_phy_read(cp, MII_BMCR); in cas_phy_init()
910 cas_phy_write(cp, MII_BMCR, val); in cas_phy_init()
913 cas_phy_write(cp, MII_ADVERTISE, in cas_phy_init()
914 cas_phy_read(cp, MII_ADVERTISE) | in cas_phy_init()
920 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_phy_init()
924 val = cas_phy_read(cp, CAS_MII_1000_CTRL); in cas_phy_init()
927 cas_phy_write(cp, CAS_MII_1000_CTRL, val); in cas_phy_init()
936 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
939 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_phy_init()
940 writel(0, cp->regs + REG_SATURN_PCFG); in cas_phy_init()
943 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
945 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
950 if ((readl(cp->regs + REG_PCS_MII_CTRL) & in cas_phy_init()
955 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", in cas_phy_init()
956 readl(cp->regs + REG_PCS_STATE_MACHINE)); in cas_phy_init()
961 writel(0x0, cp->regs + REG_PCS_CFG); in cas_phy_init()
964 val = readl(cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
968 writel(val, cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
971 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); in cas_phy_init()
975 cp->regs + REG_PCS_SERDES_CTRL); in cas_phy_init()
980 static int cas_pcs_link_check(struct cas *cp) in cas_pcs_link_check() argument
989 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
991 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
999 netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); in cas_pcs_link_check()
1004 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); in cas_pcs_link_check()
1012 if (cp->lstate != link_up) { in cas_pcs_link_check()
1013 if (cp->opened) { in cas_pcs_link_check()
1014 cp->lstate = link_up; in cas_pcs_link_check()
1015 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_pcs_link_check()
1017 cas_set_link_modes(cp); in cas_pcs_link_check()
1018 netif_carrier_on(cp->dev); in cas_pcs_link_check()
1021 } else if (cp->lstate == link_up) { in cas_pcs_link_check()
1022 cp->lstate = link_down; in cas_pcs_link_check()
1024 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1025 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1039 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1040 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1041 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1043 cp->link_transition = LINK_TRANSITION_ON_FAILURE; in cas_pcs_link_check()
1045 netif_carrier_off(cp->dev); in cas_pcs_link_check()
1046 if (cp->opened) in cas_pcs_link_check()
1047 netif_info(cp, link, cp->dev, "PCS link down\n"); in cas_pcs_link_check()
1057 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { in cas_pcs_link_check()
1059 stat = readl(cp->regs + REG_PCS_SERDES_STATE); in cas_pcs_link_check()
1063 } else if (cp->lstate == link_down) { in cas_pcs_link_check()
1065 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1066 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1073 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1074 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1075 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1077 cp->link_transition = LINK_TRANSITION_STILL_FAILED; in cas_pcs_link_check()
1085 struct cas *cp, u32 status) in cas_pcs_interrupt() argument
1087 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); in cas_pcs_interrupt()
1091 return cas_pcs_link_check(cp); in cas_pcs_interrupt()
1095 struct cas *cp, u32 status) in cas_txmac_interrupt() argument
1097 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); in cas_txmac_interrupt()
1102 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_txmac_interrupt()
1112 spin_lock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1115 cp->net_stats[0].tx_fifo_errors++; in cas_txmac_interrupt()
1120 cp->net_stats[0].tx_errors++; in cas_txmac_interrupt()
1127 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1130 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1131 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1135 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1136 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1138 spin_unlock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1146 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) in cas_load_firmware() argument
1154 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); in cas_load_firmware()
1158 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); in cas_load_firmware()
1167 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); in cas_load_firmware()
1173 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); in cas_load_firmware()
1179 static void cas_init_rx_dma(struct cas *cp) in cas_init_rx_dma() argument
1181 u64 desc_dma = cp->block_dvma; in cas_init_rx_dma()
1190 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ in cas_init_rx_dma()
1192 writel(val, cp->regs + REG_RX_CFG); in cas_init_rx_dma()
1194 val = (unsigned long) cp->init_rxds[0] - in cas_init_rx_dma()
1195 (unsigned long) cp->init_block; in cas_init_rx_dma()
1196 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); in cas_init_rx_dma()
1197 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); in cas_init_rx_dma()
1198 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_init_rx_dma()
1200 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1204 val = (unsigned long) cp->init_rxds[1] - in cas_init_rx_dma()
1205 (unsigned long) cp->init_block; in cas_init_rx_dma()
1206 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); in cas_init_rx_dma()
1207 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1209 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + in cas_init_rx_dma()
1214 val = (unsigned long) cp->init_rxcs[0] - in cas_init_rx_dma()
1215 (unsigned long) cp->init_block; in cas_init_rx_dma()
1216 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); in cas_init_rx_dma()
1217 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); in cas_init_rx_dma()
1219 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1222 val = (unsigned long) cp->init_rxcs[i] - in cas_init_rx_dma()
1223 (unsigned long) cp->init_block; in cas_init_rx_dma()
1224 writel((desc_dma + val) >> 32, cp->regs + in cas_init_rx_dma()
1226 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1235 readl(cp->regs + REG_INTR_STATUS_ALIAS); in cas_init_rx_dma()
1236 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); in cas_init_rx_dma()
1237 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1239 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); in cas_init_rx_dma()
1244 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); in cas_init_rx_dma()
1248 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); in cas_init_rx_dma()
1253 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1255 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1256 writel(val, cp->regs + REG_RX_PAUSE_THRESH); in cas_init_rx_dma()
1260 writel(i, cp->regs + REG_RX_TABLE_ADDR); in cas_init_rx_dma()
1261 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); in cas_init_rx_dma()
1262 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); in cas_init_rx_dma()
1263 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); in cas_init_rx_dma()
1267 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); in cas_init_rx_dma()
1268 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); in cas_init_rx_dma()
1274 writel(val, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1276 writel(0x0, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1286 writel(val, cp->regs + REG_RX_AE_THRESH); in cas_init_rx_dma()
1287 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1289 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); in cas_init_rx_dma()
1295 writel(0x0, cp->regs + REG_RX_RED); in cas_init_rx_dma()
1299 if (cp->page_size == 0x1000) in cas_init_rx_dma()
1301 else if (cp->page_size == 0x2000) in cas_init_rx_dma()
1303 else if (cp->page_size == 0x4000) in cas_init_rx_dma()
1307 size = cp->dev->mtu + 64; in cas_init_rx_dma()
1308 if (size > cp->page_size) in cas_init_rx_dma()
1309 size = cp->page_size; in cas_init_rx_dma()
1320 cp->mtu_stride = 1 << (i + 10); in cas_init_rx_dma()
1323 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); in cas_init_rx_dma()
1325 writel(val, cp->regs + REG_RX_PAGE_SIZE); in cas_init_rx_dma()
1334 writel(val, cp->regs + REG_HP_CFG); in cas_init_rx_dma()
1347 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) in cas_page_spare() argument
1349 cas_page_t *page = cp->rx_pages[1][index]; in cas_page_spare()
1355 new = cas_page_dequeue(cp); in cas_page_spare()
1357 spin_lock(&cp->rx_inuse_lock); in cas_page_spare()
1358 list_add(&page->list, &cp->rx_inuse_list); in cas_page_spare()
1359 spin_unlock(&cp->rx_inuse_lock); in cas_page_spare()
1365 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument
1368 cas_page_t **page0 = cp->rx_pages[0]; in cas_page_swap()
1369 cas_page_t **page1 = cp->rx_pages[1]; in cas_page_swap()
1373 cas_page_t *new = cas_page_spare(cp, index); in cas_page_swap()
1383 static void cas_clean_rxds(struct cas *cp) in cas_clean_rxds() argument
1386 struct cas_rx_desc *rxd = cp->init_rxds[0]; in cas_clean_rxds()
1392 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { in cas_clean_rxds()
1400 cas_page_t *page = cas_page_swap(cp, 0, i); in cas_clean_rxds()
1406 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; in cas_clean_rxds()
1407 cp->rx_last[0] = 0; in cas_clean_rxds()
1408 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); in cas_clean_rxds()
1411 static void cas_clean_rxcs(struct cas *cp) in cas_clean_rxcs() argument
1416 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1417 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1419 struct cas_rx_comp *rxc = cp->init_rxcs[i]; in cas_clean_rxcs()
1433 static int cas_rxmac_reset(struct cas *cp)
1435 struct net_device *dev = cp->dev;
1440 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1442 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1452 writel(0, cp->regs + REG_RX_CFG);
1454 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1466 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1468 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1478 cas_clean_rxds(cp);
1479 cas_clean_rxcs(cp);
1482 cas_init_rx_dma(cp);
1485 val = readl(cp->regs + REG_RX_CFG);
1486 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1487 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1488 val = readl(cp->regs + REG_MAC_RX_CFG);
1489 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1494 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, in cas_rxmac_interrupt() argument
1497 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); in cas_rxmac_interrupt()
1502 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); in cas_rxmac_interrupt()
1505 spin_lock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1507 cp->net_stats[0].rx_frame_errors += 0x10000; in cas_rxmac_interrupt()
1510 cp->net_stats[0].rx_crc_errors += 0x10000; in cas_rxmac_interrupt()
1513 cp->net_stats[0].rx_length_errors += 0x10000; in cas_rxmac_interrupt()
1516 cp->net_stats[0].rx_over_errors++; in cas_rxmac_interrupt()
1517 cp->net_stats[0].rx_fifo_errors++; in cas_rxmac_interrupt()
1523 spin_unlock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1527 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, in cas_mac_interrupt() argument
1530 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); in cas_mac_interrupt()
1535 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_mac_interrupt()
1543 cp->pause_entered++; in cas_mac_interrupt()
1546 cp->pause_last_time_recvd = (stat >> 16); in cas_mac_interrupt()
1552 /* Must be invoked under cp->lock. */
1553 static inline int cas_mdio_link_not_up(struct cas *cp) in cas_mdio_link_not_up() argument
1557 switch (cp->lstate) { in cas_mdio_link_not_up()
1559 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); in cas_mdio_link_not_up()
1560 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); in cas_mdio_link_not_up()
1561 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1562 cp->lstate = link_force_ok; in cas_mdio_link_not_up()
1563 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1567 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1574 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_mdio_link_not_up()
1576 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1577 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1578 cp->lstate = link_force_try; in cas_mdio_link_not_up()
1579 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1584 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1585 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1589 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1599 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1610 /* must be invoked with cp->lock held */
1611 static int cas_mii_link_check(struct cas *cp, const u16 bmsr) in cas_mii_link_check() argument
1621 if ((cp->lstate == link_force_try) && in cas_mii_link_check()
1622 (cp->link_cntl & BMCR_ANENABLE)) { in cas_mii_link_check()
1623 cp->lstate = link_force_ret; in cas_mii_link_check()
1624 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mii_link_check()
1625 cas_mif_poll(cp, 0); in cas_mii_link_check()
1626 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); in cas_mii_link_check()
1627 cp->timer_ticks = 5; in cas_mii_link_check()
1628 if (cp->opened) in cas_mii_link_check()
1629 netif_info(cp, link, cp->dev, in cas_mii_link_check()
1631 cas_phy_write(cp, MII_BMCR, in cas_mii_link_check()
1632 cp->link_fcntl | BMCR_ANENABLE | in cas_mii_link_check()
1634 cas_mif_poll(cp, 1); in cas_mii_link_check()
1636 } else if (cp->lstate != link_up) { in cas_mii_link_check()
1637 cp->lstate = link_up; in cas_mii_link_check()
1638 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_mii_link_check()
1640 if (cp->opened) { in cas_mii_link_check()
1641 cas_set_link_modes(cp); in cas_mii_link_check()
1642 netif_carrier_on(cp->dev); in cas_mii_link_check()
1652 if (cp->lstate == link_up) { in cas_mii_link_check()
1653 cp->lstate = link_down; in cas_mii_link_check()
1654 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_mii_link_check()
1656 netif_carrier_off(cp->dev); in cas_mii_link_check()
1657 if (cp->opened) in cas_mii_link_check()
1658 netif_info(cp, link, cp->dev, "Link down\n"); in cas_mii_link_check()
1661 } else if (++cp->timer_ticks > 10) in cas_mii_link_check()
1662 cas_mdio_link_not_up(cp); in cas_mii_link_check()
1667 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, in cas_mif_interrupt() argument
1670 u32 stat = readl(cp->regs + REG_MIF_STATUS); in cas_mif_interrupt()
1678 return cas_mii_link_check(cp, bmsr); in cas_mif_interrupt()
1681 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, in cas_pci_interrupt() argument
1684 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); in cas_pci_interrupt()
1690 stat, readl(cp->regs + REG_BIM_DIAG)); in cas_pci_interrupt()
1694 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) in cas_pci_interrupt()
1713 pci_errs = pci_status_get_and_clear_errors(cp->pdev); in cas_pci_interrupt()
1739 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, in cas_abnormal_irq() argument
1744 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1746 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1747 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1748 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1754 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1756 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1757 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1758 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1763 if (cas_pcs_interrupt(dev, cp, status)) in cas_abnormal_irq()
1768 if (cas_txmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1773 if (cas_rxmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1778 if (cas_mac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1783 if (cas_mif_interrupt(dev, cp, status)) in cas_abnormal_irq()
1788 if (cas_pci_interrupt(dev, cp, status)) in cas_abnormal_irq()
1795 atomic_inc(&cp->reset_task_pending); in cas_abnormal_irq()
1796 atomic_inc(&cp->reset_task_pending_all); in cas_abnormal_irq()
1798 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1800 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_abnormal_irq()
1802 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1812 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, in cas_calc_tabort() argument
1817 if (CAS_TABORT(cp) == 1) in cas_calc_tabort()
1824 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) in cas_tx_ringN() argument
1828 struct net_device *dev = cp->dev; in cas_tx_ringN()
1831 spin_lock(&cp->tx_lock[ring]); in cas_tx_ringN()
1832 txds = cp->init_txds[ring]; in cas_tx_ringN()
1833 skbs = cp->tx_skbs[ring]; in cas_tx_ringN()
1834 entry = cp->tx_old[ring]; in cas_tx_ringN()
1851 + cp->tx_tiny_use[ring][entry].nbufs + 1; in cas_tx_ringN()
1855 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, in cas_tx_ringN()
1859 cp->tx_tiny_use[ring][entry].nbufs = 0; in cas_tx_ringN()
1867 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_tx_ringN()
1872 if (cp->tx_tiny_use[ring][entry].used) { in cas_tx_ringN()
1873 cp->tx_tiny_use[ring][entry].used = 0; in cas_tx_ringN()
1878 spin_lock(&cp->stat_lock[ring]); in cas_tx_ringN()
1879 cp->net_stats[ring].tx_packets++; in cas_tx_ringN()
1880 cp->net_stats[ring].tx_bytes += skb->len; in cas_tx_ringN()
1881 spin_unlock(&cp->stat_lock[ring]); in cas_tx_ringN()
1884 cp->tx_old[ring] = entry; in cas_tx_ringN()
1891 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) in cas_tx_ringN()
1893 spin_unlock(&cp->tx_lock[ring]); in cas_tx_ringN()
1896 static void cas_tx(struct net_device *dev, struct cas *cp, in cas_tx() argument
1901 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); in cas_tx()
1903 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_tx()
1914 limit = readl(cp->regs + REG_TX_COMPN(ring)); in cas_tx()
1916 if (cp->tx_old[ring] != limit) in cas_tx()
1917 cas_tx_ringN(cp, ring, limit); in cas_tx()
1922 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, in cas_rx_process_pkt() argument
1943 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); in cas_rx_process_pkt()
1954 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1960 i += cp->crc_size; in cas_rx_process_pkt()
1961 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
1965 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
1980 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1983 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
1985 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
1992 i += cp->crc_size; in cas_rx_process_pkt()
1993 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
2001 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2007 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2029 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2030 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2032 hlen + cp->crc_size, in cas_rx_process_pkt()
2034 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2036 hlen + cp->crc_size, in cas_rx_process_pkt()
2048 RX_USED_ADD(page, hlen + cp->crc_size); in cas_rx_process_pkt()
2051 if (cp->crc_size) { in cas_rx_process_pkt()
2062 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2064 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
2066 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
2073 i += cp->crc_size; in cas_rx_process_pkt()
2074 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
2078 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2083 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2091 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2092 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2094 dlen + cp->crc_size, in cas_rx_process_pkt()
2097 memcpy(p, addr, dlen + cp->crc_size); in cas_rx_process_pkt()
2098 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2100 dlen + cp->crc_size, in cas_rx_process_pkt()
2103 RX_USED_ADD(page, dlen + cp->crc_size); in cas_rx_process_pkt()
2106 if (cp->crc_size) { in cas_rx_process_pkt()
2114 if (cp->crc_size) { in cas_rx_process_pkt()
2116 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, in cas_rx_process_pkt()
2121 skb->protocol = eth_type_trans(skb, cp->dev); in cas_rx_process_pkt()
2145 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, in cas_rx_flow_pkt() argument
2149 struct sk_buff_head *flow = &cp->rx_flows[flowid]; in cas_rx_flow_pkt()
2166 static void cas_post_page(struct cas *cp, const int ring, const int index) in cas_post_page() argument
2171 entry = cp->rx_old[ring]; in cas_post_page()
2173 new = cas_page_swap(cp, ring, index); in cas_post_page()
2174 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); in cas_post_page()
2175 cp->init_rxds[ring][entry].index = in cas_post_page()
2180 cp->rx_old[ring] = entry; in cas_post_page()
2186 writel(entry, cp->regs + REG_RX_KICK); in cas_post_page()
2188 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_page()
2189 writel(entry, cp->regs + REG_PLUS_RX_KICK1); in cas_post_page()
2194 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) in cas_post_rxds_ringN() argument
2198 cas_page_t **page = cp->rx_pages[ring]; in cas_post_rxds_ringN()
2200 entry = cp->rx_old[ring]; in cas_post_rxds_ringN()
2202 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_post_rxds_ringN()
2212 cas_page_t *new = cas_page_dequeue(cp); in cas_post_rxds_ringN()
2217 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); in cas_post_rxds_ringN()
2218 if (!timer_pending(&cp->link_timer)) in cas_post_rxds_ringN()
2219 mod_timer(&cp->link_timer, jiffies + in cas_post_rxds_ringN()
2221 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2222 cp->rx_last[ring] = num ? num - released : 0; in cas_post_rxds_ringN()
2225 spin_lock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2226 list_add(&page[entry]->list, &cp->rx_inuse_list); in cas_post_rxds_ringN()
2227 spin_unlock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2228 cp->init_rxds[ring][entry].buffer = in cas_post_rxds_ringN()
2241 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2247 writel(cluster, cp->regs + REG_RX_KICK); in cas_post_rxds_ringN()
2249 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_rxds_ringN()
2250 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); in cas_post_rxds_ringN()
2267 static int cas_rx_ringN(struct cas *cp, int ring, int budget) in cas_rx_ringN() argument
2269 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; in cas_rx_ringN()
2273 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_rx_ringN()
2276 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); in cas_rx_ringN()
2278 entry = cp->rx_new[ring]; in cas_rx_ringN()
2304 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2305 cp->net_stats[ring].rx_errors++; in cas_rx_ringN()
2307 cp->net_stats[ring].rx_length_errors++; in cas_rx_ringN()
2309 cp->net_stats[ring].rx_crc_errors++; in cas_rx_ringN()
2310 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2314 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2315 ++cp->net_stats[ring].rx_dropped; in cas_rx_ringN()
2316 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2320 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); in cas_rx_ringN()
2333 cas_rx_flow_pkt(cp, words, skb); in cas_rx_ringN()
2336 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2337 cp->net_stats[ring].rx_packets++; in cas_rx_ringN()
2338 cp->net_stats[ring].rx_bytes += len; in cas_rx_ringN()
2339 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2349 cas_post_page(cp, dring, i); in cas_rx_ringN()
2356 cas_post_page(cp, dring, i); in cas_rx_ringN()
2363 cas_post_page(cp, dring, i); in cas_rx_ringN()
2374 cp->rx_new[ring] = entry; in cas_rx_ringN()
2377 netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); in cas_rx_ringN()
2384 struct cas *cp, int ring) in cas_post_rxcs_ringN() argument
2386 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; in cas_post_rxcs_ringN()
2389 last = cp->rx_cur[ring]; in cas_post_rxcs_ringN()
2390 entry = cp->rx_new[ring]; in cas_post_rxcs_ringN()
2391 netif_printk(cp, intr, KERN_DEBUG, dev, in cas_post_rxcs_ringN()
2393 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); in cas_post_rxcs_ringN()
2400 cp->rx_cur[ring] = last; in cas_post_rxcs_ringN()
2403 writel(last, cp->regs + REG_RX_COMP_TAIL); in cas_post_rxcs_ringN()
2404 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) in cas_post_rxcs_ringN()
2405 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); in cas_post_rxcs_ringN()
2415 struct cas *cp, const u32 status, in cas_handle_irqN() argument
2419 cas_post_rxcs_ringN(dev, cp, ring); in cas_handle_irqN()
2425 struct cas *cp = netdev_priv(dev); in cas_interruptN() local
2427 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; in cas_interruptN()
2428 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); in cas_interruptN()
2434 spin_lock_irqsave(&cp->lock, flags); in cas_interruptN()
2437 cas_mask_intr(cp); in cas_interruptN()
2438 napi_schedule(&cp->napi); in cas_interruptN()
2440 cas_rx_ringN(cp, ring, 0); in cas_interruptN()
2446 cas_handle_irqN(dev, cp, status, ring); in cas_interruptN()
2447 spin_unlock_irqrestore(&cp->lock, flags); in cas_interruptN()
2454 static inline void cas_handle_irq1(struct cas *cp, const u32 status) in cas_handle_irq1() argument
2459 cas_post_rxds_ringN(cp, 1, 0); in cas_handle_irq1()
2460 spin_lock(&cp->stat_lock[1]); in cas_handle_irq1()
2461 cp->net_stats[1].rx_dropped++; in cas_handle_irq1()
2462 spin_unlock(&cp->stat_lock[1]); in cas_handle_irq1()
2466 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - in cas_handle_irq1()
2470 cas_post_rxcs_ringN(cp, 1); in cas_handle_irq1()
2477 struct cas *cp = netdev_priv(dev); in cas_interrupt1() local
2479 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_interrupt1()
2485 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt1()
2488 cas_mask_intr(cp); in cas_interrupt1()
2489 napi_schedule(&cp->napi); in cas_interrupt1()
2491 cas_rx_ringN(cp, 1, 0); in cas_interrupt1()
2496 cas_handle_irq1(cp, status); in cas_interrupt1()
2497 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt1()
2503 struct cas *cp, const u32 status) in cas_handle_irq() argument
2507 cas_abnormal_irq(dev, cp, status); in cas_handle_irq()
2513 cas_post_rxds_ringN(cp, 0, 0); in cas_handle_irq()
2514 spin_lock(&cp->stat_lock[0]); in cas_handle_irq()
2515 cp->net_stats[0].rx_dropped++; in cas_handle_irq()
2516 spin_unlock(&cp->stat_lock[0]); in cas_handle_irq()
2518 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - in cas_handle_irq()
2523 cas_post_rxcs_ringN(dev, cp, 0); in cas_handle_irq()
2529 struct cas *cp = netdev_priv(dev); in cas_interrupt() local
2531 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_interrupt()
2536 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt()
2538 cas_tx(dev, cp, status); in cas_interrupt()
2544 cas_mask_intr(cp); in cas_interrupt()
2545 napi_schedule(&cp->napi); in cas_interrupt()
2547 cas_rx_ringN(cp, 0, 0); in cas_interrupt()
2553 cas_handle_irq(dev, cp, status); in cas_interrupt()
2554 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt()
2562 struct cas *cp = container_of(napi, struct cas, napi); in cas_poll() local
2563 struct net_device *dev = cp->dev; in cas_poll()
2565 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_poll()
2568 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2569 cas_tx(dev, cp, status); in cas_poll()
2570 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2584 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); in cas_poll()
2594 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2596 cas_handle_irq(dev, cp, status); in cas_poll()
2600 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_poll()
2602 cas_handle_irq1(dev, cp, status); in cas_poll()
2608 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); in cas_poll()
2610 cas_handle_irqN(dev, cp, status, 2); in cas_poll()
2616 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); in cas_poll()
2618 cas_handle_irqN(dev, cp, status, 3); in cas_poll()
2621 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2624 cas_unmask_intr(cp); in cas_poll()
2633 struct cas *cp = netdev_priv(dev); in cas_netpoll() local
2635 cas_disable_irq(cp, 0); in cas_netpoll()
2636 cas_interrupt(cp->pdev->irq, dev); in cas_netpoll()
2637 cas_enable_irq(cp, 0); in cas_netpoll()
2659 struct cas *cp = netdev_priv(dev); in cas_tx_timeout() local
2662 if (!cp->hw_running) { in cas_tx_timeout()
2668 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_tx_timeout()
2671 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_tx_timeout()
2674 readl(cp->regs + REG_TX_CFG), in cas_tx_timeout()
2675 readl(cp->regs + REG_MAC_TX_STATUS), in cas_tx_timeout()
2676 readl(cp->regs + REG_MAC_TX_CFG), in cas_tx_timeout()
2677 readl(cp->regs + REG_TX_FIFO_PKT_CNT), in cas_tx_timeout()
2678 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), in cas_tx_timeout()
2679 readl(cp->regs + REG_TX_FIFO_READ_PTR), in cas_tx_timeout()
2680 readl(cp->regs + REG_TX_SM_1), in cas_tx_timeout()
2681 readl(cp->regs + REG_TX_SM_2)); in cas_tx_timeout()
2684 readl(cp->regs + REG_RX_CFG), in cas_tx_timeout()
2685 readl(cp->regs + REG_MAC_RX_STATUS), in cas_tx_timeout()
2686 readl(cp->regs + REG_MAC_RX_CFG)); in cas_tx_timeout()
2689 readl(cp->regs + REG_HP_STATE_MACHINE), in cas_tx_timeout()
2690 readl(cp->regs + REG_HP_STATUS0), in cas_tx_timeout()
2691 readl(cp->regs + REG_HP_STATUS1), in cas_tx_timeout()
2692 readl(cp->regs + REG_HP_STATUS2)); in cas_tx_timeout()
2695 atomic_inc(&cp->reset_task_pending); in cas_tx_timeout()
2696 atomic_inc(&cp->reset_task_pending_all); in cas_tx_timeout()
2697 schedule_work(&cp->reset_task); in cas_tx_timeout()
2699 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_tx_timeout()
2700 schedule_work(&cp->reset_task); in cas_tx_timeout()
2713 static void cas_write_txd(struct cas *cp, int ring, int entry, in cas_write_txd() argument
2716 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; in cas_write_txd()
2727 static inline void *tx_tiny_buf(struct cas *cp, const int ring, in tx_tiny_buf() argument
2730 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_buf()
2733 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, in tx_tiny_map() argument
2736 cp->tx_tiny_use[ring][tentry].nbufs++; in tx_tiny_map()
2737 cp->tx_tiny_use[ring][entry].used = 1; in tx_tiny_map()
2738 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_map()
2741 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, in cas_xmit_tx_ringN() argument
2744 struct net_device *dev = cp->dev; in cas_xmit_tx_ringN()
2751 spin_lock_irqsave(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2754 if (TX_BUFFS_AVAIL(cp, ring) <= in cas_xmit_tx_ringN()
2755 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { in cas_xmit_tx_ringN()
2757 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2772 entry = cp->tx_new[ring]; in cas_xmit_tx_ringN()
2773 cp->tx_skbs[ring][entry] = skb; in cas_xmit_tx_ringN()
2777 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), in cas_xmit_tx_ringN()
2781 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); in cas_xmit_tx_ringN()
2784 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2789 tx_tiny_buf(cp, ring, entry), tabort); in cas_xmit_tx_ringN()
2790 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2791 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, in cas_xmit_tx_ringN()
2794 cas_write_txd(cp, ring, entry, mapping, len, ctrl | in cas_xmit_tx_ringN()
2803 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, in cas_xmit_tx_ringN()
2806 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); in cas_xmit_tx_ringN()
2811 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2816 memcpy(tx_tiny_buf(cp, ring, entry), in cas_xmit_tx_ringN()
2820 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2824 cas_write_txd(cp, ring, entry, mapping, len, ctrl, in cas_xmit_tx_ringN()
2829 cp->tx_new[ring] = entry; in cas_xmit_tx_ringN()
2830 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) in cas_xmit_tx_ringN()
2833 netif_printk(cp, tx_queued, KERN_DEBUG, dev, in cas_xmit_tx_ringN()
2835 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); in cas_xmit_tx_ringN()
2836 writel(entry, cp->regs + REG_TX_KICKN(ring)); in cas_xmit_tx_ringN()
2837 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2843 struct cas *cp = netdev_priv(dev); in cas_start_xmit() local
2850 if (skb_padto(skb, cp->min_frame_size)) in cas_start_xmit()
2856 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) in cas_start_xmit()
2861 static void cas_init_tx_dma(struct cas *cp) in cas_init_tx_dma() argument
2863 u64 desc_dma = cp->block_dvma; in cas_init_tx_dma()
2871 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); in cas_init_tx_dma()
2872 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); in cas_init_tx_dma()
2885 off = (unsigned long) cp->init_txds[i] - in cas_init_tx_dma()
2886 (unsigned long) cp->init_block; in cas_init_tx_dma()
2889 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); in cas_init_tx_dma()
2890 writel((desc_dma + off) & 0xffffffff, cp->regs + in cas_init_tx_dma()
2896 writel(val, cp->regs + REG_TX_CFG); in cas_init_tx_dma()
2902 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2903 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2904 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2905 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2907 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2908 writel(0x800, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2909 writel(0x800, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2910 writel(0x800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2914 /* Must be invoked under cp->lock. */
2915 static inline void cas_init_dma(struct cas *cp) in cas_init_dma() argument
2917 cas_init_tx_dma(cp); in cas_init_dma()
2918 cas_init_rx_dma(cp); in cas_init_dma()
2921 static void cas_process_mc_list(struct cas *cp) in cas_process_mc_list() argument
2929 netdev_for_each_mc_addr(ha, cp->dev) { in cas_process_mc_list()
2935 cp->regs + REG_MAC_ADDRN(i*3 + 0)); in cas_process_mc_list()
2937 cp->regs + REG_MAC_ADDRN(i*3 + 1)); in cas_process_mc_list()
2939 cp->regs + REG_MAC_ADDRN(i*3 + 2)); in cas_process_mc_list()
2952 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_process_mc_list()
2955 /* Must be invoked under cp->lock. */
2956 static u32 cas_setup_multicast(struct cas *cp) in cas_setup_multicast() argument
2961 if (cp->dev->flags & IFF_PROMISC) { in cas_setup_multicast()
2964 } else if (cp->dev->flags & IFF_ALLMULTI) { in cas_setup_multicast()
2966 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_setup_multicast()
2970 cas_process_mc_list(cp); in cas_setup_multicast()
2977 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
2978 static void cas_clear_mac_err(struct cas *cp) in cas_clear_mac_err() argument
2980 writel(0, cp->regs + REG_MAC_COLL_NORMAL); in cas_clear_mac_err()
2981 writel(0, cp->regs + REG_MAC_COLL_FIRST); in cas_clear_mac_err()
2982 writel(0, cp->regs + REG_MAC_COLL_EXCESS); in cas_clear_mac_err()
2983 writel(0, cp->regs + REG_MAC_COLL_LATE); in cas_clear_mac_err()
2984 writel(0, cp->regs + REG_MAC_TIMER_DEFER); in cas_clear_mac_err()
2985 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); in cas_clear_mac_err()
2986 writel(0, cp->regs + REG_MAC_RECV_FRAME); in cas_clear_mac_err()
2987 writel(0, cp->regs + REG_MAC_LEN_ERR); in cas_clear_mac_err()
2988 writel(0, cp->regs + REG_MAC_ALIGN_ERR); in cas_clear_mac_err()
2989 writel(0, cp->regs + REG_MAC_FCS_ERR); in cas_clear_mac_err()
2990 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); in cas_clear_mac_err()
2994 static void cas_mac_reset(struct cas *cp) in cas_mac_reset() argument
2999 writel(0x1, cp->regs + REG_MAC_TX_RESET); in cas_mac_reset()
3000 writel(0x1, cp->regs + REG_MAC_RX_RESET); in cas_mac_reset()
3005 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) in cas_mac_reset()
3013 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) in cas_mac_reset()
3018 if (readl(cp->regs + REG_MAC_TX_RESET) | in cas_mac_reset()
3019 readl(cp->regs + REG_MAC_RX_RESET)) in cas_mac_reset()
3020 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", in cas_mac_reset()
3021 readl(cp->regs + REG_MAC_TX_RESET), in cas_mac_reset()
3022 readl(cp->regs + REG_MAC_RX_RESET), in cas_mac_reset()
3023 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_mac_reset()
3027 /* Must be invoked under cp->lock. */
3028 static void cas_init_mac(struct cas *cp) in cas_init_mac() argument
3030 unsigned char *e = &cp->dev->dev_addr[0]; in cas_init_mac()
3032 cas_mac_reset(cp); in cas_init_mac()
3035 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); in cas_init_mac()
3041 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) in cas_init_mac()
3042 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); in cas_init_mac()
3045 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); in cas_init_mac()
3047 writel(0x00, cp->regs + REG_MAC_IPG0); in cas_init_mac()
3048 writel(0x08, cp->regs + REG_MAC_IPG1); in cas_init_mac()
3049 writel(0x04, cp->regs + REG_MAC_IPG2); in cas_init_mac()
3052 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_init_mac()
3055 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); in cas_init_mac()
3064 cp->regs + REG_MAC_FRAMESIZE_MAX); in cas_init_mac()
3070 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) in cas_init_mac()
3071 writel(0x41, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3073 writel(0x07, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3074 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); in cas_init_mac()
3075 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); in cas_init_mac()
3076 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); in cas_init_mac()
3078 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); in cas_init_mac()
3080 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); in cas_init_mac()
3081 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); in cas_init_mac()
3082 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); in cas_init_mac()
3083 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); in cas_init_mac()
3084 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); in cas_init_mac()
3088 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); in cas_init_mac()
3090 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); in cas_init_mac()
3091 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); in cas_init_mac()
3092 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); in cas_init_mac()
3094 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); in cas_init_mac()
3095 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); in cas_init_mac()
3096 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); in cas_init_mac()
3098 cp->mac_rx_cfg = cas_setup_multicast(cp); in cas_init_mac()
3100 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3101 cas_clear_mac_err(cp); in cas_init_mac()
3102 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3108 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); in cas_init_mac()
3109 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); in cas_init_mac()
3114 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); in cas_init_mac()
3117 /* Must be invoked under cp->lock. */
3118 static void cas_init_pause_thresholds(struct cas *cp) in cas_init_pause_thresholds() argument
3123 if (cp->rx_fifo_size <= (2 * 1024)) { in cas_init_pause_thresholds()
3124 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; in cas_init_pause_thresholds()
3126 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; in cas_init_pause_thresholds()
3127 if (max_frame * 3 > cp->rx_fifo_size) { in cas_init_pause_thresholds()
3128 cp->rx_pause_off = 7104; in cas_init_pause_thresholds()
3129 cp->rx_pause_on = 960; in cas_init_pause_thresholds()
3131 int off = (cp->rx_fifo_size - (max_frame * 2)); in cas_init_pause_thresholds()
3133 cp->rx_pause_off = off; in cas_init_pause_thresholds()
3134 cp->rx_pause_on = on; in cas_init_pause_thresholds()
3163 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, in cas_get_vpd_info() argument
3166 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; in cas_get_vpd_info()
3182 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3287 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; in cas_get_vpd_info()
3328 addr = of_get_property(cp->of_node, "local-mac-address", NULL); in cas_get_vpd_info()
3343 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3348 static void cas_check_pci_invariants(struct cas *cp) in cas_check_pci_invariants() argument
3350 struct pci_dev *pdev = cp->pdev; in cas_check_pci_invariants()
3352 cp->cas_flags = 0; in cas_check_pci_invariants()
3356 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3358 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; in cas_check_pci_invariants()
3364 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; in cas_check_pci_invariants()
3367 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3374 cp->cas_flags |= CAS_FLAG_SATURN; in cas_check_pci_invariants()
3379 static int cas_check_invariants(struct cas *cp) in cas_check_invariants() argument
3381 struct pci_dev *pdev = cp->pdev; in cas_check_invariants()
3386 cp->page_order = 0; in cas_check_invariants()
3395 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; in cas_check_invariants()
3401 cp->page_size = (PAGE_SIZE << cp->page_order); in cas_check_invariants()
3404 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; in cas_check_invariants()
3405 cp->rx_fifo_size = RX_FIFO_SIZE; in cas_check_invariants()
3410 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, in cas_check_invariants()
3412 if (cp->phy_type & CAS_PHY_SERDES) { in cas_check_invariants()
3413 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3418 cfg = readl(cp->regs + REG_MIF_CFG); in cas_check_invariants()
3420 cp->phy_type = CAS_PHY_MII_MDIO1; in cas_check_invariants()
3422 cp->phy_type = CAS_PHY_MII_MDIO0; in cas_check_invariants()
3425 cas_mif_poll(cp, 0); in cas_check_invariants()
3426 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_check_invariants()
3433 cp->phy_addr = i; in cas_check_invariants()
3434 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; in cas_check_invariants()
3435 phy_id |= cas_phy_read(cp, MII_PHYSID2); in cas_check_invariants()
3437 cp->phy_id = phy_id; in cas_check_invariants()
3443 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_check_invariants()
3448 cfg = cas_phy_read(cp, MII_BMSR); in cas_check_invariants()
3450 cas_phy_read(cp, CAS_MII_1000_EXTEND)) in cas_check_invariants()
3451 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3455 /* Must be invoked under cp->lock. */
3456 static inline void cas_start_dma(struct cas *cp) in cas_start_dma() argument
3463 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; in cas_start_dma()
3464 writel(val, cp->regs + REG_TX_CFG); in cas_start_dma()
3465 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; in cas_start_dma()
3466 writel(val, cp->regs + REG_RX_CFG); in cas_start_dma()
3469 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; in cas_start_dma()
3470 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3471 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; in cas_start_dma()
3472 writel(val, cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3476 val = readl(cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3484 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3487 netdev_err(cp->dev, in cas_start_dma()
3489 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3490 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3496 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", in cas_start_dma()
3498 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3499 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3502 cas_unmask_intr(cp); /* enable interrupts */ in cas_start_dma()
3503 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_start_dma()
3504 writel(0, cp->regs + REG_RX_COMP_TAIL); in cas_start_dma()
3506 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_start_dma()
3509 cp->regs + REG_PLUS_RX_KICK1); in cas_start_dma()
3512 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); in cas_start_dma()
3516 /* Must be invoked under cp->lock. */
3517 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_pcs_link_mode() argument
3520 u32 val = readl(cp->regs + REG_PCS_MII_LPA); in cas_read_pcs_link_mode()
3528 /* Must be invoked under cp->lock. */
3529 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_mii_link_mode() argument
3539 val = cas_phy_read(cp, MII_LPA); in cas_read_mii_link_mode()
3551 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_read_mii_link_mode()
3552 val = cas_phy_read(cp, CAS_MII_1000_STATUS); in cas_read_mii_link_mode()
3563 * Must be invoked under cp->lock.
3565 static void cas_set_link_modes(struct cas *cp) in cas_set_link_modes() argument
3574 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3575 cas_mif_poll(cp, 0); in cas_set_link_modes()
3576 val = cas_phy_read(cp, MII_BMCR); in cas_set_link_modes()
3578 cas_read_mii_link_mode(cp, &full_duplex, &speed, in cas_set_link_modes()
3587 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_set_link_modes()
3590 cas_mif_poll(cp, 1); in cas_set_link_modes()
3593 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_set_link_modes()
3594 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); in cas_set_link_modes()
3601 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", in cas_set_link_modes()
3605 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3614 writel(val, cp->regs + REG_MAC_XIF_CFG); in cas_set_link_modes()
3636 cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3638 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3641 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3643 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3645 cp->crc_size = 4; in cas_set_link_modes()
3647 cp->min_frame_size = CAS_1000MB_MIN_FRAME; in cas_set_link_modes()
3650 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3655 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3658 cp->crc_size = 0; in cas_set_link_modes()
3659 cp->min_frame_size = CAS_MIN_MTU; in cas_set_link_modes()
3662 cp->crc_size = 4; in cas_set_link_modes()
3663 cp->min_frame_size = CAS_MIN_FRAME; in cas_set_link_modes()
3666 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3667 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3670 if (netif_msg_link(cp)) { in cas_set_link_modes()
3672 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", in cas_set_link_modes()
3673 cp->rx_fifo_size, in cas_set_link_modes()
3674 cp->rx_pause_off, in cas_set_link_modes()
3675 cp->rx_pause_on); in cas_set_link_modes()
3677 netdev_info(cp->dev, "TX pause enabled\n"); in cas_set_link_modes()
3679 netdev_info(cp->dev, "Pause is disabled\n"); in cas_set_link_modes()
3683 val = readl(cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3691 writel(val, cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3692 cas_start_dma(cp); in cas_set_link_modes()
3695 /* Must be invoked under cp->lock. */
3696 static void cas_init_hw(struct cas *cp, int restart_link) in cas_init_hw() argument
3699 cas_phy_init(cp); in cas_init_hw()
3701 cas_init_pause_thresholds(cp); in cas_init_hw()
3702 cas_init_mac(cp); in cas_init_hw()
3703 cas_init_dma(cp); in cas_init_hw()
3707 cp->timer_ticks = 0; in cas_init_hw()
3708 cas_begin_auto_negotiation(cp, NULL); in cas_init_hw()
3709 } else if (cp->lstate == link_up) { in cas_init_hw()
3710 cas_set_link_modes(cp); in cas_init_hw()
3711 netif_carrier_on(cp->dev); in cas_init_hw()
3715 /* Must be invoked under cp->lock. on earlier cassini boards,
3719 static void cas_hard_reset(struct cas *cp) in cas_hard_reset() argument
3721 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_hard_reset()
3723 pci_restore_state(cp->pdev); in cas_hard_reset()
3727 static void cas_global_reset(struct cas *cp, int blkflag) in cas_global_reset() argument
3732 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { in cas_global_reset()
3740 cp->regs + REG_SW_RESET); in cas_global_reset()
3742 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); in cas_global_reset()
3750 u32 val = readl(cp->regs + REG_SW_RESET); in cas_global_reset()
3755 netdev_err(cp->dev, "sw reset failed\n"); in cas_global_reset()
3760 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); in cas_global_reset()
3768 PCI_ERR_BIM_DMA_READ), cp->regs + in cas_global_reset()
3774 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_global_reset()
3777 static void cas_reset(struct cas *cp, int blkflag) in cas_reset() argument
3781 cas_mask_intr(cp); in cas_reset()
3782 cas_global_reset(cp, blkflag); in cas_reset()
3783 cas_mac_reset(cp); in cas_reset()
3784 cas_entropy_reset(cp); in cas_reset()
3787 val = readl(cp->regs + REG_TX_CFG); in cas_reset()
3789 writel(val, cp->regs + REG_TX_CFG); in cas_reset()
3791 val = readl(cp->regs + REG_RX_CFG); in cas_reset()
3793 writel(val, cp->regs + REG_RX_CFG); in cas_reset()
3796 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || in cas_reset()
3798 cas_load_firmware(cp, CAS_HP_FIRMWARE); in cas_reset()
3800 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); in cas_reset()
3804 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3805 cas_clear_mac_err(cp); in cas_reset()
3806 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3810 static void cas_shutdown(struct cas *cp) in cas_shutdown() argument
3815 cp->hw_running = 0; in cas_shutdown()
3817 del_timer_sync(&cp->link_timer); in cas_shutdown()
3821 while (atomic_read(&cp->reset_task_pending_mtu) || in cas_shutdown()
3822 atomic_read(&cp->reset_task_pending_spare) || in cas_shutdown()
3823 atomic_read(&cp->reset_task_pending_all)) in cas_shutdown()
3827 while (atomic_read(&cp->reset_task_pending)) in cas_shutdown()
3831 cas_lock_all_save(cp, flags); in cas_shutdown()
3832 cas_reset(cp, 0); in cas_shutdown()
3833 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_shutdown()
3834 cas_phy_powerdown(cp); in cas_shutdown()
3835 cas_unlock_all_restore(cp, flags); in cas_shutdown()
3840 struct cas *cp = netdev_priv(dev); in cas_change_mtu() local
3848 atomic_inc(&cp->reset_task_pending); in cas_change_mtu()
3849 if ((cp->phy_type & CAS_PHY_SERDES)) { in cas_change_mtu()
3850 atomic_inc(&cp->reset_task_pending_all); in cas_change_mtu()
3852 atomic_inc(&cp->reset_task_pending_mtu); in cas_change_mtu()
3854 schedule_work(&cp->reset_task); in cas_change_mtu()
3856 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? in cas_change_mtu()
3859 schedule_work(&cp->reset_task); in cas_change_mtu()
3862 flush_work(&cp->reset_task); in cas_change_mtu()
3866 static void cas_clean_txd(struct cas *cp, int ring) in cas_clean_txd() argument
3868 struct cas_tx_desc *txd = cp->init_txds[ring]; in cas_clean_txd()
3869 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; in cas_clean_txd()
3892 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_clean_txd()
3902 if (cp->tx_tiny_use[ring][ent].used) in cas_clean_txd()
3910 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); in cas_clean_txd()
3914 static inline void cas_free_rx_desc(struct cas *cp, int ring) in cas_free_rx_desc() argument
3916 cas_page_t **page = cp->rx_pages[ring]; in cas_free_rx_desc()
3922 cas_page_free(cp, page[i]); in cas_free_rx_desc()
3928 static void cas_free_rxds(struct cas *cp) in cas_free_rxds() argument
3933 cas_free_rx_desc(cp, i); in cas_free_rxds()
3936 /* Must be invoked under cp->lock. */
3937 static void cas_clean_rings(struct cas *cp) in cas_clean_rings() argument
3942 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); in cas_clean_rings()
3943 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); in cas_clean_rings()
3945 cas_clean_txd(cp, i); in cas_clean_rings()
3948 memset(cp->init_block, 0, sizeof(struct cas_init_block)); in cas_clean_rings()
3949 cas_clean_rxds(cp); in cas_clean_rings()
3950 cas_clean_rxcs(cp); in cas_clean_rings()
3954 static inline int cas_alloc_rx_desc(struct cas *cp, int ring) in cas_alloc_rx_desc() argument
3956 cas_page_t **page = cp->rx_pages[ring]; in cas_alloc_rx_desc()
3961 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) in cas_alloc_rx_desc()
3967 static int cas_alloc_rxds(struct cas *cp) in cas_alloc_rxds() argument
3972 if (cas_alloc_rx_desc(cp, i) < 0) { in cas_alloc_rxds()
3973 cas_free_rxds(cp); in cas_alloc_rxds()
3982 struct cas *cp = container_of(work, struct cas, reset_task); in cas_reset_task() local
3984 int pending = atomic_read(&cp->reset_task_pending); in cas_reset_task()
3986 int pending_all = atomic_read(&cp->reset_task_pending_all); in cas_reset_task()
3987 int pending_spare = atomic_read(&cp->reset_task_pending_spare); in cas_reset_task()
3988 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); in cas_reset_task()
3994 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
4002 if (cp->hw_running) { in cas_reset_task()
4006 netif_device_detach(cp->dev); in cas_reset_task()
4007 cas_lock_all_save(cp, flags); in cas_reset_task()
4009 if (cp->opened) { in cas_reset_task()
4014 cas_spare_recover(cp, GFP_ATOMIC); in cas_reset_task()
4032 cas_reset(cp, !(pending_all > 0)); in cas_reset_task()
4033 if (cp->opened) in cas_reset_task()
4034 cas_clean_rings(cp); in cas_reset_task()
4035 cas_init_hw(cp, (pending_all > 0)); in cas_reset_task()
4037 cas_reset(cp, !(pending == CAS_RESET_ALL)); in cas_reset_task()
4038 if (cp->opened) in cas_reset_task()
4039 cas_clean_rings(cp); in cas_reset_task()
4040 cas_init_hw(cp, pending == CAS_RESET_ALL); in cas_reset_task()
4044 cas_unlock_all_restore(cp, flags); in cas_reset_task()
4045 netif_device_attach(cp->dev); in cas_reset_task()
4048 atomic_sub(pending_all, &cp->reset_task_pending_all); in cas_reset_task()
4049 atomic_sub(pending_spare, &cp->reset_task_pending_spare); in cas_reset_task()
4050 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); in cas_reset_task()
4051 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
4053 atomic_set(&cp->reset_task_pending, 0); in cas_reset_task()
4059 struct cas *cp = from_timer(cp, t, link_timer); in cas_link_timer() local
4064 cp->link_transition_jiffies_valid && in cas_link_timer()
4065 ((jiffies - cp->link_transition_jiffies) > in cas_link_timer()
4071 cp->link_transition_jiffies_valid = 0; in cas_link_timer()
4074 if (!cp->hw_running) in cas_link_timer()
4077 spin_lock_irqsave(&cp->lock, flags); in cas_link_timer()
4078 cas_lock_tx(cp); in cas_link_timer()
4079 cas_entropy_gather(cp); in cas_link_timer()
4085 if (atomic_read(&cp->reset_task_pending_all) || in cas_link_timer()
4086 atomic_read(&cp->reset_task_pending_spare) || in cas_link_timer()
4087 atomic_read(&cp->reset_task_pending_mtu)) in cas_link_timer()
4090 if (atomic_read(&cp->reset_task_pending)) in cas_link_timer()
4095 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { in cas_link_timer()
4104 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { in cas_link_timer()
4108 cp->cas_flags &= ~rmask; in cas_link_timer()
4112 if (CAS_PHY_MII(cp->phy_type)) { in cas_link_timer()
4114 cas_mif_poll(cp, 0); in cas_link_timer()
4115 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4121 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4122 cas_mif_poll(cp, 1); in cas_link_timer()
4123 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ in cas_link_timer()
4124 reset = cas_mii_link_check(cp, bmsr); in cas_link_timer()
4126 reset = cas_pcs_link_check(cp); in cas_link_timer()
4133 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { in cas_link_timer()
4134 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); in cas_link_timer()
4140 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4146 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); in cas_link_timer()
4147 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); in cas_link_timer()
4148 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); in cas_link_timer()
4150 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4157 cas_hard_reset(cp); in cas_link_timer()
4163 atomic_inc(&cp->reset_task_pending); in cas_link_timer()
4164 atomic_inc(&cp->reset_task_pending_all); in cas_link_timer()
4165 schedule_work(&cp->reset_task); in cas_link_timer()
4167 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_link_timer()
4169 schedule_work(&cp->reset_task); in cas_link_timer()
4174 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_link_timer()
4175 cas_unlock_tx(cp); in cas_link_timer()
4176 spin_unlock_irqrestore(&cp->lock, flags); in cas_link_timer()
4182 static void cas_tx_tiny_free(struct cas *cp) in cas_tx_tiny_free() argument
4184 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_free()
4188 if (!cp->tx_tiny_bufs[i]) in cas_tx_tiny_free()
4192 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); in cas_tx_tiny_free()
4193 cp->tx_tiny_bufs[i] = NULL; in cas_tx_tiny_free()
4197 static int cas_tx_tiny_alloc(struct cas *cp) in cas_tx_tiny_alloc() argument
4199 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_alloc()
4203 cp->tx_tiny_bufs[i] = in cas_tx_tiny_alloc()
4205 &cp->tx_tiny_dvma[i], GFP_KERNEL); in cas_tx_tiny_alloc()
4206 if (!cp->tx_tiny_bufs[i]) { in cas_tx_tiny_alloc()
4207 cas_tx_tiny_free(cp); in cas_tx_tiny_alloc()
4217 struct cas *cp = netdev_priv(dev); in cas_open() local
4221 mutex_lock(&cp->pm_mutex); in cas_open()
4223 hw_was_up = cp->hw_running; in cas_open()
4226 * etc. state so it is safe to do this bit without cp->lock in cas_open()
4228 if (!cp->hw_running) { in cas_open()
4230 cas_lock_all_save(cp, flags); in cas_open()
4236 cas_reset(cp, 0); in cas_open()
4237 cp->hw_running = 1; in cas_open()
4238 cas_unlock_all_restore(cp, flags); in cas_open()
4242 if (cas_tx_tiny_alloc(cp) < 0) in cas_open()
4246 if (cas_alloc_rxds(cp) < 0) in cas_open()
4250 cas_spare_init(cp); in cas_open()
4251 cas_spare_recover(cp, GFP_KERNEL); in cas_open()
4258 if (request_irq(cp->pdev->irq, cas_interrupt, in cas_open()
4260 netdev_err(cp->dev, "failed to request irq !\n"); in cas_open()
4266 napi_enable(&cp->napi); in cas_open()
4269 cas_lock_all_save(cp, flags); in cas_open()
4270 cas_clean_rings(cp); in cas_open()
4271 cas_init_hw(cp, !hw_was_up); in cas_open()
4272 cp->opened = 1; in cas_open()
4273 cas_unlock_all_restore(cp, flags); in cas_open()
4276 mutex_unlock(&cp->pm_mutex); in cas_open()
4280 cas_spare_free(cp); in cas_open()
4281 cas_free_rxds(cp); in cas_open()
4283 cas_tx_tiny_free(cp); in cas_open()
4285 mutex_unlock(&cp->pm_mutex); in cas_open()
4292 struct cas *cp = netdev_priv(dev); in cas_close() local
4295 napi_disable(&cp->napi); in cas_close()
4298 mutex_lock(&cp->pm_mutex); in cas_close()
4303 cas_lock_all_save(cp, flags); in cas_close()
4304 cp->opened = 0; in cas_close()
4305 cas_reset(cp, 0); in cas_close()
4306 cas_phy_init(cp); in cas_close()
4307 cas_begin_auto_negotiation(cp, NULL); in cas_close()
4308 cas_clean_rings(cp); in cas_close()
4309 cas_unlock_all_restore(cp, flags); in cas_close()
4311 free_irq(cp->pdev->irq, (void *) dev); in cas_close()
4312 cas_spare_free(cp); in cas_close()
4313 cas_free_rxds(cp); in cas_close()
4314 cas_tx_tiny_free(cp); in cas_close()
4315 mutex_unlock(&cp->pm_mutex); in cas_close()
4366 static void cas_read_regs(struct cas *cp, u8 *ptr, int len) in cas_read_regs() argument
4372 spin_lock_irqsave(&cp->lock, flags); in cas_read_regs()
4377 hval = cas_phy_read(cp, in cas_read_regs()
4381 val= readl(cp->regs+ethtool_register_table[i].offsets); in cas_read_regs()
4385 spin_unlock_irqrestore(&cp->lock, flags); in cas_read_regs()
4390 struct cas *cp = netdev_priv(dev); in cas_get_stats() local
4391 struct net_device_stats *stats = cp->net_stats; in cas_get_stats()
4397 if (!cp->hw_running) in cas_get_stats()
4408 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4410 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; in cas_get_stats()
4412 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; in cas_get_stats()
4414 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; in cas_get_stats()
4416 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + in cas_get_stats()
4417 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); in cas_get_stats()
4420 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); in cas_get_stats()
4423 readl(cp->regs + REG_MAC_COLL_EXCESS); in cas_get_stats()
4424 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + in cas_get_stats()
4425 readl(cp->regs + REG_MAC_COLL_LATE); in cas_get_stats()
4427 cas_clear_mac_err(cp); in cas_get_stats()
4430 spin_lock(&cp->stat_lock[0]); in cas_get_stats()
4437 spin_unlock(&cp->stat_lock[0]); in cas_get_stats()
4440 spin_lock(&cp->stat_lock[i]); in cas_get_stats()
4453 spin_unlock(&cp->stat_lock[i]); in cas_get_stats()
4455 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4462 struct cas *cp = netdev_priv(dev); in cas_set_multicast() local
4467 if (!cp->hw_running) in cas_set_multicast()
4470 spin_lock_irqsave(&cp->lock, flags); in cas_set_multicast()
4471 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4474 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4475 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { in cas_set_multicast()
4484 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4485 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { in cas_set_multicast()
4492 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); in cas_set_multicast()
4494 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4495 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_multicast()
4500 struct cas *cp = netdev_priv(dev); in cas_get_drvinfo() local
4503 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cas_get_drvinfo()
4509 struct cas *cp = netdev_priv(dev); in cas_get_link_ksettings() local
4518 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_get_link_ksettings()
4524 spin_lock_irqsave(&cp->lock, flags); in cas_get_link_ksettings()
4526 linkstate = cp->lstate; in cas_get_link_ksettings()
4527 if (CAS_PHY_MII(cp->phy_type)) { in cas_get_link_ksettings()
4529 cmd->base.phy_address = cp->phy_addr; in cas_get_link_ksettings()
4543 if (cp->hw_running) { in cas_get_link_ksettings()
4544 cas_mif_poll(cp, 0); in cas_get_link_ksettings()
4545 bmcr = cas_phy_read(cp, MII_BMCR); in cas_get_link_ksettings()
4546 cas_read_mii_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4548 cas_mif_poll(cp, 1); in cas_get_link_ksettings()
4557 if (cp->hw_running) { in cas_get_link_ksettings()
4559 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); in cas_get_link_ksettings()
4560 cas_read_pcs_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4564 spin_unlock_irqrestore(&cp->lock, flags); in cas_get_link_ksettings()
4594 if (cp->link_cntl & BMCR_ANENABLE) { in cas_get_link_ksettings()
4599 if (cp->link_cntl & BMCR_SPEED100) { in cas_get_link_ksettings()
4601 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { in cas_get_link_ksettings()
4604 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ? in cas_get_link_ksettings()
4620 struct cas *cp = netdev_priv(dev); in cas_set_link_ksettings() local
4638 spin_lock_irqsave(&cp->lock, flags); in cas_set_link_ksettings()
4639 cas_begin_auto_negotiation(cp, cmd); in cas_set_link_ksettings()
4640 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_link_ksettings()
4646 struct cas *cp = netdev_priv(dev); in cas_nway_reset() local
4649 if ((cp->link_cntl & BMCR_ANENABLE) == 0) in cas_nway_reset()
4653 spin_lock_irqsave(&cp->lock, flags); in cas_nway_reset()
4654 cas_begin_auto_negotiation(cp, NULL); in cas_nway_reset()
4655 spin_unlock_irqrestore(&cp->lock, flags); in cas_nway_reset()
4662 struct cas *cp = netdev_priv(dev); in cas_get_link() local
4663 return cp->lstate == link_up; in cas_get_link()
4668 struct cas *cp = netdev_priv(dev); in cas_get_msglevel() local
4669 return cp->msg_enable; in cas_get_msglevel()
4674 struct cas *cp = netdev_priv(dev); in cas_set_msglevel() local
4675 cp->msg_enable = value; in cas_set_msglevel()
4680 struct cas *cp = netdev_priv(dev); in cas_get_regs_len() local
4681 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; in cas_get_regs_len()
4687 struct cas *cp = netdev_priv(dev); in cas_get_regs() local
4689 /* cas_read_regs handles locks (cp->lock). */ in cas_get_regs()
4690 cas_read_regs(cp, p, regs->len / sizeof(u32)); in cas_get_regs()
4712 struct cas *cp = netdev_priv(dev); in cas_get_ethtool_stats() local
4713 struct net_device_stats *stats = cas_get_stats(cp->dev); in cas_get_ethtool_stats()
4751 struct cas *cp = netdev_priv(dev); in cas_ioctl() local
4759 mutex_lock(&cp->pm_mutex); in cas_ioctl()
4762 data->phy_id = cp->phy_addr; in cas_ioctl()
4766 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4767 cas_mif_poll(cp, 0); in cas_ioctl()
4768 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); in cas_ioctl()
4769 cas_mif_poll(cp, 1); in cas_ioctl()
4770 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4775 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4776 cas_mif_poll(cp, 0); in cas_ioctl()
4777 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); in cas_ioctl()
4778 cas_mif_poll(cp, 1); in cas_ioctl()
4779 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4785 mutex_unlock(&cp->pm_mutex); in cas_ioctl()
4894 struct cas *cp; in cas_init_one() local
4915 dev = alloc_etherdev(sizeof(*cp)); in cas_init_one()
4988 cp = netdev_priv(dev); in cas_init_one()
4989 cp->pdev = pdev; in cas_init_one()
4992 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; in cas_init_one()
4994 cp->dev = dev; in cas_init_one()
4995 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : in cas_init_one()
4999 cp->of_node = pci_device_to_OF_node(pdev); in cas_init_one()
5002 cp->link_transition = LINK_TRANSITION_UNKNOWN; in cas_init_one()
5003 cp->link_transition_jiffies_valid = 0; in cas_init_one()
5005 spin_lock_init(&cp->lock); in cas_init_one()
5006 spin_lock_init(&cp->rx_inuse_lock); in cas_init_one()
5007 spin_lock_init(&cp->rx_spare_lock); in cas_init_one()
5009 spin_lock_init(&cp->stat_lock[i]); in cas_init_one()
5010 spin_lock_init(&cp->tx_lock[i]); in cas_init_one()
5012 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); in cas_init_one()
5013 mutex_init(&cp->pm_mutex); in cas_init_one()
5015 timer_setup(&cp->link_timer, cas_link_timer, 0); in cas_init_one()
5021 atomic_set(&cp->reset_task_pending, 0); in cas_init_one()
5022 atomic_set(&cp->reset_task_pending_all, 0); in cas_init_one()
5023 atomic_set(&cp->reset_task_pending_spare, 0); in cas_init_one()
5024 atomic_set(&cp->reset_task_pending_mtu, 0); in cas_init_one()
5026 INIT_WORK(&cp->reset_task, cas_reset_task); in cas_init_one()
5030 cp->link_cntl = link_modes[link_mode]; in cas_init_one()
5032 cp->link_cntl = BMCR_ANENABLE; in cas_init_one()
5033 cp->lstate = link_down; in cas_init_one()
5034 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_init_one()
5035 netif_carrier_off(cp->dev); in cas_init_one()
5036 cp->timer_ticks = 0; in cas_init_one()
5039 cp->regs = pci_iomap(pdev, 0, casreg_len); in cas_init_one()
5040 if (!cp->regs) { in cas_init_one()
5044 cp->casreg_len = casreg_len; in cas_init_one()
5047 cas_check_pci_invariants(cp); in cas_init_one()
5048 cas_hard_reset(cp); in cas_init_one()
5049 cas_reset(cp, 0); in cas_init_one()
5050 if (cas_check_invariants(cp)) in cas_init_one()
5052 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_init_one()
5053 cas_saturn_firmware_init(cp); in cas_init_one()
5055 cp->init_block = in cas_init_one()
5057 &cp->block_dvma, GFP_KERNEL); in cas_init_one()
5058 if (!cp->init_block) { in cas_init_one()
5064 cp->init_txds[i] = cp->init_block->txds[i]; in cas_init_one()
5067 cp->init_rxds[i] = cp->init_block->rxds[i]; in cas_init_one()
5070 cp->init_rxcs[i] = cp->init_block->rxcs[i]; in cas_init_one()
5073 skb_queue_head_init(&cp->rx_flows[i]); in cas_init_one()
5080 netif_napi_add(dev, &cp->napi, cas_poll, 64); in cas_init_one()
5086 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) in cas_init_one()
5101 i = readl(cp->regs + REG_BIM_CFG); in cas_init_one()
5103 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", in cas_init_one()
5106 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, in cas_init_one()
5110 cp->hw_running = 1; in cas_init_one()
5111 cas_entropy_reset(cp); in cas_init_one()
5112 cas_phy_init(cp); in cas_init_one()
5113 cas_begin_auto_negotiation(cp, NULL); in cas_init_one()
5118 cp->init_block, cp->block_dvma); in cas_init_one()
5121 mutex_lock(&cp->pm_mutex); in cas_init_one()
5122 if (cp->hw_running) in cas_init_one()
5123 cas_shutdown(cp); in cas_init_one()
5124 mutex_unlock(&cp->pm_mutex); in cas_init_one()
5126 pci_iounmap(pdev, cp->regs); in cas_init_one()
5148 struct cas *cp; in cas_remove_one() local
5152 cp = netdev_priv(dev); in cas_remove_one()
5155 vfree(cp->fw_data); in cas_remove_one()
5157 mutex_lock(&cp->pm_mutex); in cas_remove_one()
5158 cancel_work_sync(&cp->reset_task); in cas_remove_one()
5159 if (cp->hw_running) in cas_remove_one()
5160 cas_shutdown(cp); in cas_remove_one()
5161 mutex_unlock(&cp->pm_mutex); in cas_remove_one()
5164 if (cp->orig_cacheline_size) { in cas_remove_one()
5169 cp->orig_cacheline_size); in cas_remove_one()
5173 cp->init_block, cp->block_dvma); in cas_remove_one()
5174 pci_iounmap(pdev, cp->regs); in cas_remove_one()
5183 struct cas *cp = netdev_priv(dev); in cas_suspend() local
5186 mutex_lock(&cp->pm_mutex); in cas_suspend()
5189 if (cp->opened) { in cas_suspend()
5192 cas_lock_all_save(cp, flags); in cas_suspend()
5199 cas_reset(cp, 0); in cas_suspend()
5200 cas_clean_rings(cp); in cas_suspend()
5201 cas_unlock_all_restore(cp, flags); in cas_suspend()
5204 if (cp->hw_running) in cas_suspend()
5205 cas_shutdown(cp); in cas_suspend()
5206 mutex_unlock(&cp->pm_mutex); in cas_suspend()
5214 struct cas *cp = netdev_priv(dev); in cas_resume() local
5218 mutex_lock(&cp->pm_mutex); in cas_resume()
5219 cas_hard_reset(cp); in cas_resume()
5220 if (cp->opened) { in cas_resume()
5222 cas_lock_all_save(cp, flags); in cas_resume()
5223 cas_reset(cp, 0); in cas_resume()
5224 cp->hw_running = 1; in cas_resume()
5225 cas_clean_rings(cp); in cas_resume()
5226 cas_init_hw(cp, 1); in cas_resume()
5227 cas_unlock_all_restore(cp, flags); in cas_resume()
5231 mutex_unlock(&cp->pm_mutex); in cas_resume()