1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies */
3
4 #ifndef __LIB_MLX5_EQ_H__
5 #define __LIB_MLX5_EQ_H__
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/eq.h>
8 #include <linux/mlx5/cq.h>
9
10 #define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
11
12 struct mlx5_eq_tasklet {
13 struct list_head list;
14 struct list_head process_list;
15 struct tasklet_struct task;
16 spinlock_t lock; /* lock completion tasklet list */
17 };
18
19 struct mlx5_cq_table {
20 spinlock_t lock; /* protect radix tree */
21 struct radix_tree_root tree;
22 };
23
24 struct mlx5_eq {
25 struct mlx5_core_dev *dev;
26 struct mlx5_cq_table cq_table;
27 __be32 __iomem *doorbell;
28 u32 cons_index;
29 struct mlx5_frag_buf buf;
30 int size;
31 unsigned int vecidx;
32 unsigned int irqn;
33 u8 eqn;
34 int nent;
35 struct mlx5_rsc_debug *dbg;
36 };
37
38 struct mlx5_eq_async {
39 struct mlx5_eq core;
40 struct notifier_block irq_nb;
41 };
42
43 struct mlx5_eq_comp {
44 struct mlx5_eq core;
45 struct notifier_block irq_nb;
46 struct mlx5_eq_tasklet tasklet_ctx;
47 struct list_head list;
48 };
49
get_eqe(struct mlx5_eq * eq,u32 entry)50 static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
51 {
52 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
53 }
54
next_eqe_sw(struct mlx5_eq * eq)55 static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
56 {
57 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
58
59 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
60 }
61
eq_update_ci(struct mlx5_eq * eq,int arm)62 static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
63 {
64 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
65 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
66
67 __raw_writel((__force u32)cpu_to_be32(val), addr);
68 /* We still want ordering, just not swabbing, so add a barrier */
69 mb();
70 }
71
72 int mlx5_eq_table_init(struct mlx5_core_dev *dev);
73 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
74 int mlx5_eq_table_create(struct mlx5_core_dev *dev);
75 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
76
77 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
78 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
79 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
80 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
81 void mlx5_cq_tasklet_cb(unsigned long data);
82 struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
83
84 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
85 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
86 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
87
88 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
89 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
90 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
91 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
92
93 /* This function should only be called after mlx5_cmd_force_teardown_hca */
94 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
95
96 #ifdef CONFIG_RFS_ACCEL
97 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
98 #endif
99
100 #endif
101