Home
last modified time | relevance | path

Searched refs:cq_table (Results 1 – 16 of 16) sorted by relevance

/Linux-v6.1/drivers/net/ethernet/mellanox/mlx4/
Dcq.c109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; in mlx4_cq_event() local
132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
220 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_alloc_icm() local
223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); in __mlx4_cq_alloc_icm()
227 err = mlx4_table_get(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); in __mlx4_cq_alloc_icm()
237 mlx4_table_put(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
240 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); in __mlx4_cq_alloc_icm()
267 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_free_icm() local
[all …]
Dmain.c1609 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, in mlx4_init_cmpt_table()
1630 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_cmpt_table()
1764 err = mlx4_init_icm_table(dev, &priv->cq_table.table, in mlx4_init_icm()
1808 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_init_icm()
1833 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_icm()
1852 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_free_icms()
1861 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_free_icms()
Dmlx4.h896 struct mlx4_cq_table cq_table; member
/Linux-v6.1/drivers/infiniband/hw/hns/
Dhns_roce_cq.c60 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in alloc_cqn() local
65 mutex_lock(&cq_table->bank_mutex); in alloc_cqn()
66 bankid = get_least_load_bankid_for_cq(cq_table->bank); in alloc_cqn()
67 bank = &cq_table->bank[bankid]; in alloc_cqn()
71 mutex_unlock(&cq_table->bank_mutex); in alloc_cqn()
78 mutex_unlock(&cq_table->bank_mutex); in alloc_cqn()
91 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in free_cqn() local
94 bank = &cq_table->bank[get_cq_bankid(cqn)]; in free_cqn()
98 mutex_lock(&cq_table->bank_mutex); in free_cqn()
100 mutex_unlock(&cq_table->bank_mutex); in free_cqn()
[all …]
Dhns_roce_main.c700 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, in hns_roce_init_hem()
789 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_init_hem()
Dhns_roce_device.h939 struct hns_roce_cq_table cq_table; member
Dhns_roce_hem.c942 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_cleanup_hem()
/Linux-v6.1/drivers/infiniband/hw/mthca/
Dmthca_cq.c221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
239 spin_lock(&dev->cq_table.lock); in mthca_cq_event()
241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event()
245 spin_unlock(&dev->cq_table.lock); in mthca_cq_event()
258 spin_lock(&dev->cq_table.lock); in mthca_cq_event()
261 spin_unlock(&dev->cq_table.lock); in mthca_cq_event()
779 cq->cqn = mthca_alloc(&dev->cq_table.alloc); in mthca_init_cq()
784 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); in mthca_init_cq()
850 spin_lock_irq(&dev->cq_table.lock); in mthca_init_cq()
851 err = mthca_array_set(&dev->cq_table.cq, in mthca_init_cq()
[all …]
Dmthca_main.c476 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, in mthca_init_icm()
481 if (!mdev->cq_table.table) { in mthca_init_icm()
527 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_init_icm()
562 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_free_icms()
Dmthca_dev.h342 struct mthca_cq_table cq_table; member
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/
Deq.c93 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_cq_get()
266 struct mlx5_cq_table *cq_table = &eq->cq_table; in create_map_eq() local
279 memset(cq_table, 0, sizeof(*cq_table)); in create_map_eq()
280 spin_lock_init(&cq_table->lock); in create_map_eq()
281 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); in create_map_eq()
406 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_add_cq()
418 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_del_cq()
/Linux-v6.1/drivers/net/ethernet/microsoft/mana/
Dhw_channel.c630 gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *)); in mana_hwc_establish_channel()
631 if (!gc->cq_table) in mana_hwc_establish_channel()
634 gc->cq_table[cq->id] = cq; in mana_hwc_establish_channel()
777 vfree(gc->cq_table); in mana_hwc_destroy_channel()
778 gc->cq_table = NULL; in mana_hwc_destroy_channel()
Dgdma_main.c337 cq = gc->cq_table[cq_id]; in mana_gd_process_eqe()
621 if (!gc->cq_table[id]) in mana_gd_destroy_cq()
624 gc->cq_table[id] = NULL; in mana_gd_destroy_cq()
Dgdma.h344 struct gdma_queue **cq_table; member
Dmana_en.c1502 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
1733 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/lib/
Deq.h28 struct mlx5_cq_table cq_table; member