Lines Matching full:ss

197 	struct myri10ge_slice_state *ss;  member
916 struct myri10ge_slice_state *ss; in myri10ge_reset() local
944 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); in myri10ge_reset()
1001 ss = &mgp->ss[i]; in myri10ge_reset()
1002 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1003 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); in myri10ge_reset()
1012 ss = &mgp->ss[i]; in myri10ge_reset()
1013 ss->irq_claim = in myri10ge_reset()
1033 ss = &mgp->ss[i]; in myri10ge_reset()
1035 ss->dca_tag = (__iomem __be32 *) in myri10ge_reset()
1038 ss->dca_tag = NULL; in myri10ge_reset()
1047 ss = &mgp->ss[i]; in myri10ge_reset()
1049 memset(ss->rx_done.entry, 0, bytes); in myri10ge_reset()
1050 ss->tx.req = 0; in myri10ge_reset()
1051 ss->tx.done = 0; in myri10ge_reset()
1052 ss->tx.pkt_start = 0; in myri10ge_reset()
1053 ss->tx.pkt_done = 0; in myri10ge_reset()
1054 ss->rx_big.cnt = 0; in myri10ge_reset()
1055 ss->rx_small.cnt = 0; in myri10ge_reset()
1056 ss->rx_done.idx = 0; in myri10ge_reset()
1057 ss->rx_done.cnt = 0; in myri10ge_reset()
1058 ss->tx.wake_queue = 0; in myri10ge_reset()
1059 ss->tx.stop_queue = 0; in myri10ge_reset()
1086 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) in myri10ge_write_dca() argument
1088 ss->cached_dca_tag = tag; in myri10ge_write_dca()
1089 put_be32(htonl(tag), ss->dca_tag); in myri10ge_write_dca()
1092 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) in myri10ge_update_dca() argument
1097 if (cpu != ss->cpu) { in myri10ge_update_dca()
1098 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); in myri10ge_update_dca()
1099 if (ss->cached_dca_tag != tag) in myri10ge_update_dca()
1100 myri10ge_write_dca(ss, cpu, tag); in myri10ge_update_dca()
1101 ss->cpu = cpu; in myri10ge_update_dca()
1111 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) in myri10ge_setup_dca()
1127 mgp->ss[i].cpu = -1; in myri10ge_setup_dca()
1128 mgp->ss[i].cached_dca_tag = -1; in myri10ge_setup_dca()
1129 myri10ge_update_dca(&mgp->ss[i]); in myri10ge_setup_dca()
1306 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) in myri10ge_rx_done() argument
1308 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_rx_done()
1318 rx = &ss->rx_small; in myri10ge_rx_done()
1321 rx = &ss->rx_big; in myri10ge_rx_done()
1330 skb = napi_get_frags(&ss->napi); in myri10ge_rx_done()
1332 ss->stats.rx_dropped++; in myri10ge_rx_done()
1368 skb_record_rx_queue(skb, ss - &mgp->ss[0]); in myri10ge_rx_done()
1370 napi_gro_frags(&ss->napi); in myri10ge_rx_done()
1376 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) in myri10ge_tx_done() argument
1378 struct pci_dev *pdev = ss->mgp->pdev; in myri10ge_tx_done()
1379 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_tx_done()
1398 ss->stats.tx_bytes += skb->len; in myri10ge_tx_done()
1399 ss->stats.tx_packets++; in myri10ge_tx_done()
1415 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); in myri10ge_tx_done()
1425 if ((ss->mgp->dev->real_num_tx_queues > 1) && in myri10ge_tx_done()
1438 ss->mgp->running == MYRI10GE_ETH_RUNNING) { in myri10ge_tx_done()
1445 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) in myri10ge_clean_rx_done() argument
1447 struct myri10ge_rx_done *rx_done = &ss->rx_done; in myri10ge_clean_rx_done()
1448 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_clean_rx_done()
1462 rx_ok = myri10ge_rx_done(ss, length, checksum); in myri10ge_clean_rx_done()
1471 ss->stats.rx_packets += rx_packets; in myri10ge_clean_rx_done()
1472 ss->stats.rx_bytes += rx_bytes; in myri10ge_clean_rx_done()
1475 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1476 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_clean_rx_done()
1478 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) in myri10ge_clean_rx_done()
1479 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_clean_rx_done()
1486 struct mcp_irq_data *stats = mgp->ss[0].fw_stats; in myri10ge_check_statblock()
1521 struct myri10ge_slice_state *ss = in myri10ge_poll() local
1526 if (ss->mgp->dca_enabled) in myri10ge_poll()
1527 myri10ge_update_dca(ss); in myri10ge_poll()
1530 work_done = myri10ge_clean_rx_done(ss, budget); in myri10ge_poll()
1534 put_be32(htonl(3), ss->irq_claim); in myri10ge_poll()
1541 struct myri10ge_slice_state *ss = arg; in myri10ge_intr() local
1542 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_intr()
1543 struct mcp_irq_data *stats = ss->fw_stats; in myri10ge_intr()
1544 struct myri10ge_tx_buf *tx = &ss->tx; in myri10ge_intr()
1550 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { in myri10ge_intr()
1551 napi_schedule(&ss->napi); in myri10ge_intr()
1562 napi_schedule(&ss->napi); in myri10ge_intr()
1579 myri10ge_tx_done(ss, (int)send_done_count); in myri10ge_intr()
1592 if (ss == mgp->ss) in myri10ge_intr()
1595 put_be32(htonl(3), ss->irq_claim + 1); in myri10ge_intr()
1710 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; in myri10ge_get_ringparam()
1711 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; in myri10ge_get_ringparam()
1713 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; in myri10ge_get_ringparam()
1791 struct myri10ge_slice_state *ss; in myri10ge_get_ethtool_stats() local
1812 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); in myri10ge_get_ethtool_stats()
1818 ss = &mgp->ss[0]; in myri10ge_get_ethtool_stats()
1819 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); in myri10ge_get_ethtool_stats()
1820 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); in myri10ge_get_ethtool_stats()
1822 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); in myri10ge_get_ethtool_stats()
1823 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); in myri10ge_get_ethtool_stats()
1824 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); in myri10ge_get_ethtool_stats()
1825 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); in myri10ge_get_ethtool_stats()
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); in myri10ge_get_ethtool_stats()
1828 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); in myri10ge_get_ethtool_stats()
1829 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); in myri10ge_get_ethtool_stats()
1830 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); in myri10ge_get_ethtool_stats()
1831 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); in myri10ge_get_ethtool_stats()
1832 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); in myri10ge_get_ethtool_stats()
1835 ss = &mgp->ss[slice]; in myri10ge_get_ethtool_stats()
1837 data[i++] = (unsigned int)ss->tx.pkt_start; in myri10ge_get_ethtool_stats()
1838 data[i++] = (unsigned int)ss->tx.pkt_done; in myri10ge_get_ethtool_stats()
1839 data[i++] = (unsigned int)ss->tx.req; in myri10ge_get_ethtool_stats()
1840 data[i++] = (unsigned int)ss->tx.done; in myri10ge_get_ethtool_stats()
1841 data[i++] = (unsigned int)ss->rx_small.cnt; in myri10ge_get_ethtool_stats()
1842 data[i++] = (unsigned int)ss->rx_big.cnt; in myri10ge_get_ethtool_stats()
1843 data[i++] = (unsigned int)ss->tx.wake_queue; in myri10ge_get_ethtool_stats()
1844 data[i++] = (unsigned int)ss->tx.stop_queue; in myri10ge_get_ethtool_stats()
1845 data[i++] = (unsigned int)ss->tx.linearized; in myri10ge_get_ethtool_stats()
1933 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) in myri10ge_allocate_rings() argument
1935 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_allocate_rings()
1944 slice = ss - mgp->ss; in myri10ge_allocate_rings()
1956 ss->tx.mask = tx_ring_entries - 1; in myri10ge_allocate_rings()
1957 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; in myri10ge_allocate_rings()
1964 * sizeof(*ss->tx.req_list); in myri10ge_allocate_rings()
1965 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1966 if (ss->tx.req_bytes == NULL) in myri10ge_allocate_rings()
1970 ss->tx.req_list = (struct mcp_kreq_ether_send *) in myri10ge_allocate_rings()
1971 ALIGN((unsigned long)ss->tx.req_bytes, 8); in myri10ge_allocate_rings()
1972 ss->tx.queue_active = 0; in myri10ge_allocate_rings()
1974 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); in myri10ge_allocate_rings()
1975 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1976 if (ss->rx_small.shadow == NULL) in myri10ge_allocate_rings()
1979 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); in myri10ge_allocate_rings()
1980 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1981 if (ss->rx_big.shadow == NULL) in myri10ge_allocate_rings()
1986 bytes = tx_ring_entries * sizeof(*ss->tx.info); in myri10ge_allocate_rings()
1987 ss->tx.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1988 if (ss->tx.info == NULL) in myri10ge_allocate_rings()
1991 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); in myri10ge_allocate_rings()
1992 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1993 if (ss->rx_small.info == NULL) in myri10ge_allocate_rings()
1996 bytes = rx_ring_entries * sizeof(*ss->rx_big.info); in myri10ge_allocate_rings()
1997 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); in myri10ge_allocate_rings()
1998 if (ss->rx_big.info == NULL) in myri10ge_allocate_rings()
2002 ss->rx_big.cnt = 0; in myri10ge_allocate_rings()
2003 ss->rx_small.cnt = 0; in myri10ge_allocate_rings()
2004 ss->rx_big.fill_cnt = 0; in myri10ge_allocate_rings()
2005 ss->rx_small.fill_cnt = 0; in myri10ge_allocate_rings()
2006 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2007 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_allocate_rings()
2008 ss->rx_small.watchdog_needed = 0; in myri10ge_allocate_rings()
2009 ss->rx_big.watchdog_needed = 0; in myri10ge_allocate_rings()
2011 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; in myri10ge_allocate_rings()
2013 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_allocate_rings()
2017 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { in myri10ge_allocate_rings()
2019 slice, ss->rx_small.fill_cnt); in myri10ge_allocate_rings()
2023 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); in myri10ge_allocate_rings()
2024 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { in myri10ge_allocate_rings()
2026 slice, ss->rx_big.fill_cnt); in myri10ge_allocate_rings()
2033 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_allocate_rings()
2034 int idx = i & ss->rx_big.mask; in myri10ge_allocate_rings()
2035 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_allocate_rings()
2037 put_page(ss->rx_big.info[idx].page); in myri10ge_allocate_rings()
2042 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_allocate_rings()
2043 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_allocate_rings()
2044 int idx = i & ss->rx_small.mask; in myri10ge_allocate_rings()
2045 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_allocate_rings()
2047 put_page(ss->rx_small.info[idx].page); in myri10ge_allocate_rings()
2050 kfree(ss->rx_big.info); in myri10ge_allocate_rings()
2053 kfree(ss->rx_small.info); in myri10ge_allocate_rings()
2056 kfree(ss->tx.info); in myri10ge_allocate_rings()
2059 kfree(ss->rx_big.shadow); in myri10ge_allocate_rings()
2062 kfree(ss->rx_small.shadow); in myri10ge_allocate_rings()
2065 kfree(ss->tx.req_bytes); in myri10ge_allocate_rings()
2066 ss->tx.req_bytes = NULL; in myri10ge_allocate_rings()
2067 ss->tx.req_list = NULL; in myri10ge_allocate_rings()
2073 static void myri10ge_free_rings(struct myri10ge_slice_state *ss) in myri10ge_free_rings() argument
2075 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_free_rings()
2081 if (ss->tx.req_list == NULL) in myri10ge_free_rings()
2084 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { in myri10ge_free_rings()
2085 idx = i & ss->rx_big.mask; in myri10ge_free_rings()
2086 if (i == ss->rx_big.fill_cnt - 1) in myri10ge_free_rings()
2087 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; in myri10ge_free_rings()
2088 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], in myri10ge_free_rings()
2090 put_page(ss->rx_big.info[idx].page); in myri10ge_free_rings()
2094 ss->rx_small.fill_cnt = ss->rx_small.cnt; in myri10ge_free_rings()
2095 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { in myri10ge_free_rings()
2096 idx = i & ss->rx_small.mask; in myri10ge_free_rings()
2097 if (i == ss->rx_small.fill_cnt - 1) in myri10ge_free_rings()
2098 ss->rx_small.info[idx].page_offset = in myri10ge_free_rings()
2100 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], in myri10ge_free_rings()
2102 put_page(ss->rx_small.info[idx].page); in myri10ge_free_rings()
2104 tx = &ss->tx; in myri10ge_free_rings()
2115 ss->stats.tx_dropped++; in myri10ge_free_rings()
2130 kfree(ss->rx_big.info); in myri10ge_free_rings()
2132 kfree(ss->rx_small.info); in myri10ge_free_rings()
2134 kfree(ss->tx.info); in myri10ge_free_rings()
2136 kfree(ss->rx_big.shadow); in myri10ge_free_rings()
2138 kfree(ss->rx_small.shadow); in myri10ge_free_rings()
2140 kfree(ss->tx.req_bytes); in myri10ge_free_rings()
2141 ss->tx.req_bytes = NULL; in myri10ge_free_rings()
2142 ss->tx.req_list = NULL; in myri10ge_free_rings()
2148 struct myri10ge_slice_state *ss; in myri10ge_request_irq() local
2180 ss = &mgp->ss[i]; in myri10ge_request_irq()
2181 snprintf(ss->irq_desc, sizeof(ss->irq_desc), in myri10ge_request_irq()
2184 myri10ge_intr, 0, ss->irq_desc, in myri10ge_request_irq()
2185 ss); in myri10ge_request_irq()
2192 &mgp->ss[i]); in myri10ge_request_irq()
2201 mgp->dev->name, &mgp->ss[0]); in myri10ge_request_irq()
2218 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); in myri10ge_free_irq()
2220 free_irq(pdev->irq, &mgp->ss[0]); in myri10ge_free_irq()
2231 struct myri10ge_slice_state *ss; in myri10ge_get_txrx() local
2234 ss = &mgp->ss[slice]; in myri10ge_get_txrx()
2240 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) in myri10ge_get_txrx()
2246 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2251 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) in myri10ge_get_txrx()
2254 ss->tx.send_go = (__iomem __be32 *) in myri10ge_get_txrx()
2256 ss->tx.send_stop = (__iomem __be32 *) in myri10ge_get_txrx()
2265 struct myri10ge_slice_state *ss; in myri10ge_set_stats() local
2268 ss = &mgp->ss[slice]; in myri10ge_set_stats()
2269 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2270 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); in myri10ge_set_stats()
2274 dma_addr_t bus = ss->fw_stats_bus; in myri10ge_set_stats()
2293 struct myri10ge_slice_state *ss; in myri10ge_open() local
2387 ss = &mgp->ss[slice]; in myri10ge_open()
2394 status = myri10ge_allocate_rings(ss); in myri10ge_open()
2409 napi_enable(&(ss)->napi); in myri10ge_open()
2457 napi_disable(&mgp->ss[slice].napi); in myri10ge_open()
2460 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_open()
2479 if (mgp->ss[0].tx.req_bytes == NULL) in myri10ge_close()
2485 napi_disable(&mgp->ss[i].napi); in myri10ge_close()
2506 myri10ge_free_rings(&mgp->ss[i]); in myri10ge_close()
2622 struct myri10ge_slice_state *ss; in myri10ge_xmit() local
2637 ss = &mgp->ss[queue]; in myri10ge_xmit()
2639 tx = &ss->tx; in myri10ge_xmit()
2716 ss->stats.tx_dropped += 1; in myri10ge_xmit()
2881 ss->stats.tx_dropped += 1; in myri10ge_xmit()
2891 struct myri10ge_slice_state *ss; in myri10ge_sw_tso() local
2916 ss = &mgp->ss[skb_get_queue_mapping(skb)]; in myri10ge_sw_tso()
2918 ss->stats.tx_dropped += 1; in myri10ge_sw_tso()
2930 slice_stats = &mgp->ss[i].stats; in myri10ge_get_stats()
3341 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, in myri10ge_check_slice() argument
3344 struct myri10ge_priv *mgp = ss->mgp; in myri10ge_check_slice()
3345 int slice = ss - mgp->ss; in myri10ge_check_slice()
3347 if (ss->tx.req != ss->tx.done && in myri10ge_check_slice()
3348 ss->tx.done == ss->watchdog_tx_done && in myri10ge_check_slice()
3349 ss->watchdog_tx_req != ss->watchdog_tx_done) { in myri10ge_check_slice()
3358 slice, ss->tx.queue_active, ss->tx.req, in myri10ge_check_slice()
3359 ss->tx.done, ss->tx.pkt_start, in myri10ge_check_slice()
3360 ss->tx.pkt_done, in myri10ge_check_slice()
3361 (int)ntohl(mgp->ss[slice].fw_stats-> in myri10ge_check_slice()
3364 ss->stuck = 1; in myri10ge_check_slice()
3367 if (ss->watchdog_tx_done != ss->tx.done || in myri10ge_check_slice()
3368 ss->watchdog_rx_done != ss->rx_done.cnt) { in myri10ge_check_slice()
3371 ss->watchdog_tx_done = ss->tx.done; in myri10ge_check_slice()
3372 ss->watchdog_tx_req = ss->tx.req; in myri10ge_check_slice()
3373 ss->watchdog_rx_done = ss->rx_done.cnt; in myri10ge_check_slice()
3384 struct myri10ge_slice_state *ss; in myri10ge_watchdog() local
3435 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog()
3437 ss = mgp->ss; in myri10ge_watchdog()
3438 if (ss->stuck) { in myri10ge_watchdog()
3439 myri10ge_check_slice(ss, &reset_needed, in myri10ge_watchdog()
3442 ss->stuck = 0; in myri10ge_watchdog()
3475 struct myri10ge_slice_state *ss; in myri10ge_watchdog_timer() local
3482 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); in myri10ge_watchdog_timer()
3487 ss = &mgp->ss[i]; in myri10ge_watchdog_timer()
3488 if (ss->rx_small.watchdog_needed) { in myri10ge_watchdog_timer()
3489 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, in myri10ge_watchdog_timer()
3492 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= in myri10ge_watchdog_timer()
3494 ss->rx_small.watchdog_needed = 0; in myri10ge_watchdog_timer()
3496 if (ss->rx_big.watchdog_needed) { in myri10ge_watchdog_timer()
3497 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, in myri10ge_watchdog_timer()
3499 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= in myri10ge_watchdog_timer()
3501 ss->rx_big.watchdog_needed = 0; in myri10ge_watchdog_timer()
3503 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, in myri10ge_watchdog_timer()
3528 struct myri10ge_slice_state *ss; in myri10ge_free_slices() local
3533 if (mgp->ss == NULL) in myri10ge_free_slices()
3537 ss = &mgp->ss[i]; in myri10ge_free_slices()
3538 if (ss->rx_done.entry != NULL) { in myri10ge_free_slices()
3540 sizeof(*ss->rx_done.entry); in myri10ge_free_slices()
3542 ss->rx_done.entry, ss->rx_done.bus); in myri10ge_free_slices()
3543 ss->rx_done.entry = NULL; in myri10ge_free_slices()
3545 if (ss->fw_stats != NULL) { in myri10ge_free_slices()
3546 bytes = sizeof(*ss->fw_stats); in myri10ge_free_slices()
3548 ss->fw_stats, ss->fw_stats_bus); in myri10ge_free_slices()
3549 ss->fw_stats = NULL; in myri10ge_free_slices()
3551 __netif_napi_del(&ss->napi); in myri10ge_free_slices()
3553 /* Wait till napi structs are no longer used, and then free ss. */ in myri10ge_free_slices()
3555 kfree(mgp->ss); in myri10ge_free_slices()
3556 mgp->ss = NULL; in myri10ge_free_slices()
3561 struct myri10ge_slice_state *ss; in myri10ge_alloc_slices() local
3566 bytes = sizeof(*mgp->ss) * mgp->num_slices; in myri10ge_alloc_slices()
3567 mgp->ss = kzalloc(bytes, GFP_KERNEL); in myri10ge_alloc_slices()
3568 if (mgp->ss == NULL) { in myri10ge_alloc_slices()
3573 ss = &mgp->ss[i]; in myri10ge_alloc_slices()
3574 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); in myri10ge_alloc_slices()
3575 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3576 &ss->rx_done.bus, in myri10ge_alloc_slices()
3578 if (ss->rx_done.entry == NULL) in myri10ge_alloc_slices()
3580 bytes = sizeof(*ss->fw_stats); in myri10ge_alloc_slices()
3581 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, in myri10ge_alloc_slices()
3582 &ss->fw_stats_bus, in myri10ge_alloc_slices()
3584 if (ss->fw_stats == NULL) in myri10ge_alloc_slices()
3586 ss->mgp = mgp; in myri10ge_alloc_slices()
3587 ss->dev = mgp->dev; in myri10ge_alloc_slices()
3588 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, in myri10ge_alloc_slices()