Lines Matching refs:pble_rsrc
52 struct i40iw_hmc_pble_rsrc *pble_rsrc);
59 void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc) in i40iw_destroy_pble_pool() argument
64 struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo; in i40iw_destroy_pble_pool()
83 struct i40iw_hmc_pble_rsrc *pble_rsrc) in i40iw_hmc_init_pble() argument
89 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base; in i40iw_hmc_init_pble()
91 if (pble_rsrc->fpm_base_addr & 0xfff) in i40iw_hmc_init_pble()
92 fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3; in i40iw_hmc_init_pble()
94 pble_rsrc->unallocated_pble = in i40iw_hmc_init_pble()
96 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3); in i40iw_hmc_init_pble()
98 pble_rsrc->pinfo.pool_shift = POOL_SHIFT; in i40iw_hmc_init_pble()
99 pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1); in i40iw_hmc_init_pble()
100 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); in i40iw_hmc_init_pble()
101 if (!pble_rsrc->pinfo.pool) in i40iw_hmc_init_pble()
104 if (add_pble_pool(dev, pble_rsrc)) in i40iw_hmc_init_pble()
109 error:i40iw_destroy_pble_pool(dev, pble_rsrc); in i40iw_hmc_init_pble()
118 static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, in get_sd_pd_idx() argument
121 idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE; in get_sd_pd_idx()
122 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE; in get_sd_pd_idx()
133 struct i40iw_hmc_pble_rsrc *pble_rsrc, in add_sd_direct() argument
157 chunk->fpm_addr = pble_rsrc->next_fpm_addr; in add_sd_direct()
239 static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr) in fpm_to_idx() argument
241 return (addr - (pble_rsrc->fpm_base_addr)) >> 3; in fpm_to_idx()
251 struct i40iw_hmc_pble_rsrc *pble_rsrc, in add_bp_pages() argument
276 fpm_to_idx(pble_rsrc, in add_bp_pages()
277 pble_rsrc->next_fpm_addr), in add_bp_pages()
312 chunk->fpm_addr = pble_rsrc->next_fpm_addr; in add_bp_pages()
325 struct i40iw_hmc_pble_rsrc *pble_rsrc) in add_pble_pool() argument
337 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) in add_pble_pool()
339 if (pble_rsrc->next_fpm_addr & 0xfff) { in add_pble_pool()
340 i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr); in add_pble_pool()
347 chunk->fpm_addr = pble_rsrc->next_fpm_addr; in add_pble_pool()
348 get_sd_pd_idx(pble_rsrc, idx); in add_pble_pool()
352 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT); in add_pble_pool()
366 pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr); in add_pble_pool()
371 ret_code = add_sd_direct(dev, pble_rsrc, &info); in add_pble_pool()
375 pble_rsrc->stats_direct_sds++; in add_pble_pool()
378 ret_code = add_bp_pages(dev, pble_rsrc, &info); in add_pble_pool()
382 pble_rsrc->stats_paged_sds++; in add_pble_pool()
385 if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr, in add_pble_pool()
391 pble_rsrc->next_fpm_addr += chunk->size; in add_pble_pool()
393 pble_rsrc->next_fpm_addr, chunk->size, chunk->size); in add_pble_pool()
394 pble_rsrc->unallocated_pble -= (chunk->size >> 3); in add_pble_pool()
395 list_add(&chunk->list, &pble_rsrc->pinfo.clist); in add_pble_pool()
422 static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc, in free_lvl2() argument
431 pool = pble_rsrc->pinfo.pool; in free_lvl2()
453 static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, in get_lvl2_pble() argument
481 root->idx = fpm_to_idx(pble_rsrc, in get_lvl2_pble()
490 leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr)); in get_lvl2_pble()
497 pble_rsrc->stats_lvl2++; in get_lvl2_pble()
500 free_lvl2(pble_rsrc, palloc); in get_lvl2_pble()
511 struct i40iw_hmc_pble_rsrc *pble_rsrc, in get_lvl1_pble() argument
518 pool = pble_rsrc->pinfo.pool; in get_lvl1_pble()
526 lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, in get_lvl1_pble()
529 pble_rsrc->stats_lvl1++; in get_lvl1_pble()
541 struct i40iw_hmc_pble_rsrc *pble_rsrc, in get_lvl1_lvl2_pble() argument
547 status = get_lvl1_pble(dev, pble_rsrc, palloc); in get_lvl1_lvl2_pble()
549 status = get_lvl2_pble(pble_rsrc, palloc, pool); in get_lvl1_lvl2_pble()
561 struct i40iw_hmc_pble_rsrc *pble_rsrc, in i40iw_get_pble() argument
570 pool = pble_rsrc->pinfo.pool; in i40iw_get_pble()
574 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); in i40iw_get_pble()
579 status = add_pble_pool(dev, pble_rsrc); in i40iw_get_pble()
582 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); in i40iw_get_pble()
588 pble_rsrc->stats_alloc_ok++; in i40iw_get_pble()
590 pble_rsrc->stats_alloc_fail++; in i40iw_get_pble()
600 void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, in i40iw_free_pble() argument
605 pool = pble_rsrc->pinfo.pool; in i40iw_free_pble()
607 free_lvl2(pble_rsrc, palloc); in i40iw_free_pble()
611 pble_rsrc->stats_alloc_freed++; in i40iw_free_pble()