| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/ | 
| D | cq.c | 109 	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,  in mlx4_cq_completion() 128 	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;  in mlx4_cq_event()  local 132 	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));  in mlx4_cq_event() 220 	struct mlx4_cq_table *cq_table = &priv->cq_table;  in __mlx4_cq_alloc_icm()  local 223 	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);  in __mlx4_cq_alloc_icm() 227 	err = mlx4_table_get(dev, &cq_table->table, *cqn);  in __mlx4_cq_alloc_icm() 231 	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);  in __mlx4_cq_alloc_icm() 237 	mlx4_table_put(dev, &cq_table->table, *cqn);  in __mlx4_cq_alloc_icm() 240 	mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);  in __mlx4_cq_alloc_icm() 267 	struct mlx4_cq_table *cq_table = &priv->cq_table;  in __mlx4_cq_free_icm()  local [all …] 
 | 
| D | main.c | 1608 	err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,  in mlx4_init_cmpt_table() 1629 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);  in mlx4_init_cmpt_table() 1763 	err = mlx4_init_icm_table(dev, &priv->cq_table.table,  in mlx4_init_icm() 1807 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);  in mlx4_init_icm() 1832 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);  in mlx4_init_icm() 1851 	mlx4_cleanup_icm_table(dev, &priv->cq_table.table);  in mlx4_free_icms() 1860 	mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);  in mlx4_free_icms()
  | 
| D | mlx4.h | 895 	struct mlx4_cq_table	cq_table;  member
  | 
| /Linux-v5.4/drivers/infiniband/hw/hns/ | 
| D | hns_roce_cq.c | 90 	struct hns_roce_cq_table *cq_table;  in hns_roce_cq_alloc()  local 96 	cq_table = &hr_dev->cq_table;  in hns_roce_cq_alloc() 117 	ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);  in hns_roce_cq_alloc() 124 	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);  in hns_roce_cq_alloc() 130 	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));  in hns_roce_cq_alloc() 163 	xa_erase(&cq_table->array, hr_cq->cqn);  in hns_roce_cq_alloc() 166 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);  in hns_roce_cq_alloc() 169 	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);  in hns_roce_cq_alloc() 184 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;  in hns_roce_free_cq()  local 193 	xa_erase(&cq_table->array, hr_cq->cqn);  in hns_roce_free_cq() [all …] 
 | 
| D | hns_roce_main.c | 646 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,  in hns_roce_init_hem() 758 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);  in hns_roce_init_hem()
  | 
| D | hns_roce_device.h | 1030 	struct hns_roce_cq_table  cq_table;  member
  | 
| D | hns_roce_hem.c | 1096 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);  in hns_roce_cleanup_hem()
  | 
| /Linux-v5.4/drivers/infiniband/hw/mthca/ | 
| D | mthca_cq.c | 221 	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));  in mthca_cq_completion() 239 	spin_lock(&dev->cq_table.lock);  in mthca_cq_event() 241 	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));  in mthca_cq_event() 245 	spin_unlock(&dev->cq_table.lock);  in mthca_cq_event() 258 	spin_lock(&dev->cq_table.lock);  in mthca_cq_event() 261 	spin_unlock(&dev->cq_table.lock);  in mthca_cq_event() 779 	cq->cqn = mthca_alloc(&dev->cq_table.alloc);  in mthca_init_cq() 784 		err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);  in mthca_init_cq() 848 	spin_lock_irq(&dev->cq_table.lock);  in mthca_init_cq() 849 	if (mthca_array_set(&dev->cq_table.cq,  in mthca_init_cq() [all …] 
 | 
| D | mthca_main.c | 476 	mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,  in mthca_init_icm() 481 	if (!mdev->cq_table.table) {  in mthca_init_icm() 527 	mthca_free_icm_table(mdev, mdev->cq_table.table);  in mthca_init_icm() 562 	mthca_free_icm_table(mdev, mdev->cq_table.table);  in mthca_free_icms()
  | 
| D | mthca_dev.h | 343 	struct mthca_cq_table  cq_table;  member
  | 
| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ | 
| D | eq.c | 116 	struct mlx5_cq_table *table = &eq->cq_table;  in mlx5_eq_cq_get() 246 	struct mlx5_cq_table *cq_table = &eq->cq_table;  in create_map_eq()  local 258 	memset(cq_table, 0, sizeof(*cq_table));  in create_map_eq() 259 	spin_lock_init(&cq_table->lock);  in create_map_eq() 260 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);  in create_map_eq() 385 	struct mlx5_cq_table *table = &eq->cq_table;  in mlx5_eq_add_cq() 397 	struct mlx5_cq_table *table = &eq->cq_table;  in mlx5_eq_del_cq()
  | 
| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/lib/ | 
| D | eq.h | 26 	struct mlx5_cq_table    cq_table;  member
  |