| /Linux-v5.10/drivers/infiniband/hw/mthca/ | 
| D | mthca_eq.c | 397 	if (dev->eq_table.clr_mask)  in mthca_tavor_interrupt() 398 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);  in mthca_tavor_interrupt() 408 		if (ecr & dev->eq_table.eq[i].eqn_mask) {  in mthca_tavor_interrupt() 409 			if (mthca_eq_int(dev, &dev->eq_table.eq[i]))  in mthca_tavor_interrupt() 410 				tavor_set_eq_ci(dev, &dev->eq_table.eq[i],  in mthca_tavor_interrupt() 411 						dev->eq_table.eq[i].cons_index);  in mthca_tavor_interrupt() 412 			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);  in mthca_tavor_interrupt() 437 	if (dev->eq_table.clr_mask)  in mthca_arbel_interrupt() 438 		writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);  in mthca_arbel_interrupt() 441 		if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {  in mthca_arbel_interrupt() [all …] 
 | 
| D | mthca_main.c | 682 	mdev->eq_table.inta_pin = adapter.inta_pin;  in mthca_init_hca() 762 				   dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);  in mthca_setup_hca() 859 	mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector =  in mthca_enable_msi_x() 861 	mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector =  in mthca_enable_msi_x() 863 	mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector =  in mthca_enable_msi_x()
  | 
| D | mthca_cq.c | 833 	cq_context->error_eqn       = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);  in mthca_init_cq() 834 	cq_context->comp_eqn        = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);  in mthca_init_cq() 934 		synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);  in mthca_free_cq()
  | 
| D | mthca_dev.h | 342 	struct mthca_eq_table  eq_table;  member
  | 
| /Linux-v5.10/drivers/net/ethernet/mellanox/mlx5/core/ | 
| D | eq.c | 228 	eqt = dev->priv.eq_table;  in mlx5_eq_async_int() 259 	struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;  in mlx5_cmd_eq_recover() 374 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;  in mlx5_eq_enable()  local 377 	err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);  in mlx5_eq_enable() 396 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;  in mlx5_eq_disable()  local 398 	mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);  in mlx5_eq_disable() 453 	struct mlx5_eq_table *eq_table;  in mlx5_eq_table_init()  local 456 	eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);  in mlx5_eq_table_init() 457 	if (!eq_table)  in mlx5_eq_table_init() 460 	dev->priv.eq_table = eq_table;  in mlx5_eq_table_init() [all …] 
 | 
| /Linux-v5.10/drivers/net/ethernet/mellanox/mlx4/ | 
| D | eq.c | 241 	struct mlx4_eq *eq = &priv->eq_table.eq[vec];  in mlx4_set_eq_affinity_hint() 855 	writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);  in mlx4_interrupt() 858 		work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);  in mlx4_interrupt() 941 	if (!priv->eq_table.uar_map[index]) {  in mlx4_get_eq_uar() 942 		priv->eq_table.uar_map[index] =  in mlx4_get_eq_uar() 947 		if (!priv->eq_table.uar_map[index]) {  in mlx4_get_eq_uar() 954 	return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);  in mlx4_get_eq_uar() 963 		if (priv->eq_table.uar_map[i]) {  in mlx4_unmap_uar() 964 			iounmap(priv->eq_table.uar_map[i]);  in mlx4_unmap_uar() 965 			priv->eq_table.uar_map[i] = NULL;  in mlx4_unmap_uar() [all …] 
 | 
| D | cq.c | 383 	cq_context->comp_eqn	    = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;  in mlx4_cq_alloc() 416 		&priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;  in mlx4_cq_alloc() 420 	cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;  in mlx4_cq_alloc() 449 	synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);  in mlx4_cq_free() 450 	if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=  in mlx4_cq_free() 451 	    priv->eq_table.eq[MLX4_EQ_ASYNC].irq)  in mlx4_cq_free() 452 		synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);  in mlx4_cq_free()
  | 
| D | main.c | 1618 	err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,  in mlx4_init_cmpt_table() 1680 	err = mlx4_init_icm_table(dev, &priv->eq_table.table,  in mlx4_init_icm() 1828 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);  in mlx4_init_icm() 1831 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);  in mlx4_init_icm() 1858 	mlx4_cleanup_icm_table(dev, &priv->eq_table.table);  in mlx4_free_icms() 1859 	mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);  in mlx4_free_icms() 2468 	priv->eq_table.inta_pin = adapter.inta_pin;  in mlx4_init_hca() 2770 				  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);  in mlx4_setup_hca() 2774 				 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);  in mlx4_setup_hca() 2919 	eq = &priv->eq_table.eq[eqn];  in mlx4_init_affinity_hint() [all …] 
 | 
| D | mlx4.h | 896 	struct mlx4_eq_table	eq_table;  member
  | 
| /Linux-v5.10/drivers/infiniband/hw/hns/ | 
| D | hns_roce_hw_v1.c | 4104 	void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];  in hns_roce_v1_enable_eq() 4129 	void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];  in hns_roce_v1_create_eq() 4213 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;  in hns_roce_v1_init_eq_table()  local 4224 	eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);  in hns_roce_v1_init_eq_table() 4225 	if (!eq_table->eq)  in hns_roce_v1_init_eq_table() 4228 	eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),  in hns_roce_v1_init_eq_table() 4230 	if (!eq_table->eqc_base) {  in hns_roce_v1_init_eq_table() 4236 		eq = &eq_table->eq[i];  in hns_roce_v1_init_eq_table() 4244 			eq_table->eqc_base[i] = hr_dev->reg_base +  in hns_roce_v1_init_eq_table() 4256 			eq_table->eqc_base[i] = hr_dev->reg_base +  in hns_roce_v1_init_eq_table() [all …] 
 | 
| D | hns_roce_hw_v2.c | 5934 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;  in __hns_roce_request_irq()  local 5967 			ret = request_irq(eq_table->eq[j - other_num].irq,  in __hns_roce_request_irq() 5970 					  &eq_table->eq[j - other_num]);  in __hns_roce_request_irq() 5972 			ret = request_irq(eq_table->eq[j - other_num].irq,  in __hns_roce_request_irq() 5975 					  &eq_table->eq[j - other_num]);  in __hns_roce_request_irq() 5989 			free_irq(eq_table->eq[j - other_num].irq,  in __hns_roce_request_irq() 5990 				 &eq_table->eq[j - other_num]);  in __hns_roce_request_irq() 6012 		free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);  in __hns_roce_free_irq() 6020 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;  in hns_roce_v2_init_eq_table()  local 6039 	eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);  in hns_roce_v2_init_eq_table() [all …] 
 | 
| D | hns_roce_cq.c | 134 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);  in free_cqc()
  | 
| D | hns_roce_device.h | 999 	struct hns_roce_eq_table  eq_table;  member
  | 
| /Linux-v5.10/drivers/infiniband/hw/mlx4/ | 
| D | main.c | 2425 	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,  in mlx4_ib_alloc_eqs() 2426 				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);  in mlx4_ib_alloc_eqs() 2427 	if (!ibdev->eq_table)  in mlx4_ib_alloc_eqs() 2435 			ibdev->eq_table[eq] = total_eqs;  in mlx4_ib_alloc_eqs() 2437 					    &ibdev->eq_table[eq]))  in mlx4_ib_alloc_eqs() 2440 				ibdev->eq_table[eq] = -1;  in mlx4_ib_alloc_eqs() 2445 	     ibdev->eq_table[i++] = -1)  in mlx4_ib_alloc_eqs() 2458 	if (!ibdev->eq_table)  in mlx4_ib_free_eqs() 2465 		mlx4_release_eq(dev, ibdev->eq_table[i]);  in mlx4_ib_free_eqs() 2467 	kfree(ibdev->eq_table);  in mlx4_ib_free_eqs() [all …] 
 | 
| D | cq.c | 244 	if (dev->eq_table)  in mlx4_ib_create_cq() 245 		vector = dev->eq_table[vector % ibdev->num_comp_vectors];  in mlx4_ib_create_cq()
  | 
| D | mlx4_ib.h | 629 	int		       *eq_table;  member
  | 
| /Linux-v5.10/include/linux/mlx5/ | 
| D | driver.h | 540 	struct mlx5_eq_table	*eq_table;  member
  |