Lines Matching refs:tx_pool

563 			     struct ibmvnic_tx_pool *tx_pool)  in reset_one_tx_pool()  argument
567 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); in reset_one_tx_pool()
571 memset(tx_pool->tx_buff, 0, in reset_one_tx_pool()
572 tx_pool->num_buffers * in reset_one_tx_pool()
575 for (i = 0; i < tx_pool->num_buffers; i++) in reset_one_tx_pool()
576 tx_pool->free_map[i] = i; in reset_one_tx_pool()
578 tx_pool->consumer_index = 0; in reset_one_tx_pool()
579 tx_pool->producer_index = 0; in reset_one_tx_pool()
594 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); in reset_tx_pools()
614 struct ibmvnic_tx_pool *tx_pool) in release_one_tx_pool() argument
616 kfree(tx_pool->tx_buff); in release_one_tx_pool()
617 kfree(tx_pool->free_map); in release_one_tx_pool()
618 free_long_term_buff(adapter, &tx_pool->long_term_buff); in release_one_tx_pool()
625 if (!adapter->tx_pool) in release_tx_pools()
629 release_one_tx_pool(adapter, &adapter->tx_pool[i]); in release_tx_pools()
633 kfree(adapter->tx_pool); in release_tx_pools()
634 adapter->tx_pool = NULL; in release_tx_pools()
641 struct ibmvnic_tx_pool *tx_pool, in init_one_tx_pool() argument
647 tx_pool->tx_buff = kcalloc(num_entries, in init_one_tx_pool()
650 if (!tx_pool->tx_buff) in init_one_tx_pool()
653 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, in init_one_tx_pool()
657 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); in init_one_tx_pool()
658 if (!tx_pool->free_map) in init_one_tx_pool()
662 tx_pool->free_map[i] = i; in init_one_tx_pool()
664 tx_pool->consumer_index = 0; in init_one_tx_pool()
665 tx_pool->producer_index = 0; in init_one_tx_pool()
666 tx_pool->num_buffers = num_entries; in init_one_tx_pool()
667 tx_pool->buf_size = buf_size; in init_one_tx_pool()
679 adapter->tx_pool = kcalloc(tx_subcrqs, in init_tx_pools()
681 if (!adapter->tx_pool) in init_tx_pools()
692 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], in init_tx_pools()
1140 struct ibmvnic_tx_pool *tx_pool) in clean_one_tx_pool() argument
1146 if (!tx_pool || !tx_pool->tx_buff) in clean_one_tx_pool()
1149 tx_entries = tx_pool->num_buffers; in clean_one_tx_pool()
1152 tx_buff = &tx_pool->tx_buff[i]; in clean_one_tx_pool()
1165 if (!adapter->tx_pool || !adapter->tso_pool) in clean_tx_pools()
1173 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); in clean_tx_pools()
1413 struct ibmvnic_tx_pool *tx_pool; in ibmvnic_xmit() local
1449 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_xmit()
1451 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_xmit()
1458 index = tx_pool->free_map[tx_pool->consumer_index]; in ibmvnic_xmit()
1468 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; in ibmvnic_xmit()
1470 offset = index * tx_pool->buf_size; in ibmvnic_xmit()
1471 dst = tx_pool->long_term_buff.buff + offset; in ibmvnic_xmit()
1472 memset(dst, 0, tx_pool->buf_size); in ibmvnic_xmit()
1473 data_dma_addr = tx_pool->long_term_buff.addr + offset; in ibmvnic_xmit()
1495 tx_pool->consumer_index = in ibmvnic_xmit()
1496 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; in ibmvnic_xmit()
1498 tx_buff = &tx_pool->tx_buff[index]; in ibmvnic_xmit()
1518 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); in ibmvnic_xmit()
1614 if (tx_pool->consumer_index == 0) in ibmvnic_xmit()
1615 tx_pool->consumer_index = in ibmvnic_xmit()
1616 tx_pool->num_buffers - 1; in ibmvnic_xmit()
1618 tx_pool->consumer_index--; in ibmvnic_xmit()
1619 tx_pool->free_map[tx_pool->consumer_index] = index; in ibmvnic_xmit()
2904 struct ibmvnic_tx_pool *tx_pool; in ibmvnic_complete_tx() local
2924 tx_pool = &adapter->tso_pool[pool]; in ibmvnic_complete_tx()
2927 tx_pool = &adapter->tx_pool[pool]; in ibmvnic_complete_tx()
2930 txbuff = &tx_pool->tx_buff[index]; in ibmvnic_complete_tx()
2946 tx_pool->free_map[tx_pool->producer_index] = index; in ibmvnic_complete_tx()
2947 tx_pool->producer_index = in ibmvnic_complete_tx()
2948 (tx_pool->producer_index + 1) % in ibmvnic_complete_tx()
2949 tx_pool->num_buffers; in ibmvnic_complete_tx()