Searched refs:num_pg (Results 1 – 10 of 10) sorted by relevance
301 static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg) in tcm_1d_limit() argument303 if (__tcm_sizeof(a) < num_pg) in tcm_1d_limit()305 if (!num_pg) in tcm_1d_limit()308 a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width; in tcm_1d_limit()309 a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width); in tcm_1d_limit()
620 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_info_init()676 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_to_pri_map()699 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_shaper_cfg()740 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_pg_dwrr_cfg()1124 for (i = 0; i < hdev->tm_info.num_pg; i++) { in hclge_tm_lvl2_schd_mode_cfg()1434 hdev->tm_info.num_pg != 1) in hclge_tm_schd_init()
352 u8 num_pg; /* It must be 1 if vNET-Base schd */ member
1349 hdev->tm_info.num_pg = 1; in hclge_configure()
117 __le16 num_pg; member317 pg_per_blk = le16_to_cpu(src->num_pg); in nvme_nvm_setup_12()367 geo->num_pg = le16_to_cpu(src->num_pg); in nvme_nvm_setup_12()1063 return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg); in nvm_dev_attr_show_12()
395 u16 num_pg; member612 if (pg == geo->num_pg) { in nvm_next_ppa_in_chk()
479 unsigned int num_pg; member
1057 unsigned long max_pg, num_pg, new_pg, old_pg; in mm_account_pinned_pages() local1063 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ in mm_account_pinned_pages()1069 new_pg = old_pg + num_pg; in mm_account_pinned_pages()1077 mmp->num_pg = num_pg; in mm_account_pinned_pages()1079 mmp->num_pg += num_pg; in mm_account_pinned_pages()1089 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
897 ppa.g.pg = geo->num_pg - 1; in nvm_bb_chunk_scan()926 for (pg = 0; pg < geo->num_pg; pg++) { in nvm_bb_chunk_scan()
1246 ppa->g.pg < geo->num_pg && in pblk_boundary_ppa_checks()