Lines Matching refs:rx_pool

239 		adapter->rx_pool[i].active = 0;  in deactivate_rx_pools()
349 if (adapter->rx_pool[i].active) in replenish_pools()
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]); in replenish_pools()
414 struct ibmvnic_rx_pool *rx_pool; in reset_rx_pools() local
424 rx_pool = &adapter->rx_pool[i]; in reset_rx_pools()
428 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { in reset_rx_pools()
429 free_long_term_buff(adapter, &rx_pool->long_term_buff); in reset_rx_pools()
430 rx_pool->buff_size = be64_to_cpu(size_array[i]); in reset_rx_pools()
432 &rx_pool->long_term_buff, in reset_rx_pools()
433 rx_pool->size * in reset_rx_pools()
434 rx_pool->buff_size); in reset_rx_pools()
437 &rx_pool->long_term_buff); in reset_rx_pools()
443 for (j = 0; j < rx_pool->size; j++) in reset_rx_pools()
444 rx_pool->free_map[j] = j; in reset_rx_pools()
446 memset(rx_pool->rx_buff, 0, in reset_rx_pools()
447 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); in reset_rx_pools()
449 atomic_set(&rx_pool->available, 0); in reset_rx_pools()
450 rx_pool->next_alloc = 0; in reset_rx_pools()
451 rx_pool->next_free = 0; in reset_rx_pools()
452 rx_pool->active = 1; in reset_rx_pools()
460 struct ibmvnic_rx_pool *rx_pool; in release_rx_pools() local
463 if (!adapter->rx_pool) in release_rx_pools()
467 rx_pool = &adapter->rx_pool[i]; in release_rx_pools()
471 kfree(rx_pool->free_map); in release_rx_pools()
472 free_long_term_buff(adapter, &rx_pool->long_term_buff); in release_rx_pools()
474 if (!rx_pool->rx_buff) in release_rx_pools()
477 for (j = 0; j < rx_pool->size; j++) { in release_rx_pools()
478 if (rx_pool->rx_buff[j].skb) { in release_rx_pools()
479 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); in release_rx_pools()
480 rx_pool->rx_buff[j].skb = NULL; in release_rx_pools()
484 kfree(rx_pool->rx_buff); in release_rx_pools()
487 kfree(adapter->rx_pool); in release_rx_pools()
488 adapter->rx_pool = NULL; in release_rx_pools()
496 struct ibmvnic_rx_pool *rx_pool; in init_rx_pools() local
506 adapter->rx_pool = kcalloc(rxadd_subcrqs, in init_rx_pools()
509 if (!adapter->rx_pool) { in init_rx_pools()
517 rx_pool = &adapter->rx_pool[i]; in init_rx_pools()
524 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; in init_rx_pools()
525 rx_pool->index = i; in init_rx_pools()
526 rx_pool->buff_size = be64_to_cpu(size_array[i]); in init_rx_pools()
527 rx_pool->active = 1; in init_rx_pools()
529 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), in init_rx_pools()
531 if (!rx_pool->free_map) { in init_rx_pools()
536 rx_pool->rx_buff = kcalloc(rx_pool->size, in init_rx_pools()
539 if (!rx_pool->rx_buff) { in init_rx_pools()
545 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, in init_rx_pools()
546 rx_pool->size * rx_pool->buff_size)) { in init_rx_pools()
551 for (j = 0; j < rx_pool->size; ++j) in init_rx_pools()
552 rx_pool->free_map[j] = j; in init_rx_pools()
554 atomic_set(&rx_pool->available, 0); in init_rx_pools()
555 rx_pool->next_alloc = 0; in init_rx_pools()
556 rx_pool->next_free = 0; in init_rx_pools()
1110 struct ibmvnic_rx_pool *rx_pool; in clean_rx_pools() local
1116 if (!adapter->rx_pool) in clean_rx_pools()
1124 rx_pool = &adapter->rx_pool[i]; in clean_rx_pools()
1125 if (!rx_pool || !rx_pool->rx_buff) in clean_rx_pools()
1130 rx_buff = &rx_pool->rx_buff[j]; in clean_rx_pools()
2202 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; in remove_buff_from_pool()
2295 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); in ibmvnic_poll()
5111 ret += adapter->rx_pool[i].size * in ibmvnic_get_desired_dma()
5112 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); in ibmvnic_get_desired_dma()