1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_CORE_H__
34 #define __MLX5_CORE_H__
35 
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/if_link.h>
40 #include <linux/firmware.h>
41 #include <linux/mlx5/cq.h>
42 
43 #define DRIVER_NAME "mlx5_core"
44 #define DRIVER_VERSION "5.0-0"
45 
46 extern uint mlx5_core_debug_mask;
47 
48 #define mlx5_core_dbg(__dev, format, ...)				\
49 	dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,		\
50 		 __func__, __LINE__, current->pid,			\
51 		 ##__VA_ARGS__)
52 
53 #define mlx5_core_dbg_once(__dev, format, ...)				\
54 	dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
55 		     __func__, __LINE__, current->pid,			\
56 		     ##__VA_ARGS__)
57 
58 #define mlx5_core_dbg_mask(__dev, mask, format, ...)			\
59 do {									\
60 	if ((mask) & mlx5_core_debug_mask)				\
61 		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);		\
62 } while (0)
63 
64 #define mlx5_core_err(__dev, format, ...)				\
65 	dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
66 		__func__, __LINE__, current->pid,	\
67 	       ##__VA_ARGS__)
68 
69 #define mlx5_core_err_rl(__dev, format, ...)				\
70 	dev_err_ratelimited(&(__dev)->pdev->dev,			\
71 			   "%s:%d:(pid %d): " format,			\
72 			   __func__, __LINE__, current->pid,		\
73 			   ##__VA_ARGS__)
74 
75 #define mlx5_core_warn(__dev, format, ...)				\
76 	dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
77 		 __func__, __LINE__, current->pid,			\
78 		##__VA_ARGS__)
79 
80 #define mlx5_core_info(__dev, format, ...)				\
81 	dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
82 
83 enum {
84 	MLX5_CMD_DATA, /* print command payload only */
85 	MLX5_CMD_TIME, /* print command execution time */
86 };
87 
88 enum {
89 	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
90 	MLX5_DRIVER_SYND = 0xbadd00de,
91 };
92 
93 int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
94 int mlx5_query_board_id(struct mlx5_core_dev *dev);
95 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
96 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
97 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
98 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
99 		     unsigned long param);
100 void mlx5_core_page_fault(struct mlx5_core_dev *dev,
101 			  struct mlx5_pagefault *pfault);
102 void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
103 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
104 void mlx5_disable_device(struct mlx5_core_dev *dev);
105 void mlx5_recover_device(struct mlx5_core_dev *dev);
106 int mlx5_sriov_init(struct mlx5_core_dev *dev);
107 void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
108 int mlx5_sriov_attach(struct mlx5_core_dev *dev);
109 void mlx5_sriov_detach(struct mlx5_core_dev *dev);
110 int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
111 bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
112 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
113 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
114 int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
115 				       void *context, u32 *element_id);
116 int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
117 				       void *context, u32 element_id,
118 				       u32 modify_bitmask);
119 int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
120 					u32 element_id);
121 int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
122 u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
123 
124 int mlx5_eq_init(struct mlx5_core_dev *dev);
125 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
126 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
127 		       int nent, u64 mask, const char *name,
128 		       enum mlx5_eq_type type);
129 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
130 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
131 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
132 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
133 		       u32 *out, int outlen);
134 int mlx5_start_eqs(struct mlx5_core_dev *dev);
135 void mlx5_stop_eqs(struct mlx5_core_dev *dev);
136 /* This function should only be called after mlx5_cmd_force_teardown_hca */
137 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
138 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
139 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
140 void mlx5_cq_tasklet_cb(unsigned long data);
141 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
142 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
143 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
144 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
145 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
146 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
147 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
148 
149 int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
150 			u8 access_reg_group);
151 int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
152 			u8 access_reg_group);
153 int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
154 			u8 feature_group, u8 access_reg_group);
155 
156 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
157 void mlx5_lag_remove(struct mlx5_core_dev *dev);
158 
159 void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
160 void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
161 void mlx5_attach_device(struct mlx5_core_dev *dev);
162 void mlx5_detach_device(struct mlx5_core_dev *dev);
163 bool mlx5_device_registered(struct mlx5_core_dev *dev);
164 int mlx5_register_device(struct mlx5_core_dev *dev);
165 void mlx5_unregister_device(struct mlx5_core_dev *dev);
166 void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
167 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
168 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
169 void mlx5_dev_list_lock(void);
170 void mlx5_dev_list_unlock(void);
171 int mlx5_dev_list_trylock(void);
172 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
173 		     int header_type,
174 		     size_t size,
175 		     void *encap_header,
176 		     u32 *encap_id);
177 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
178 
179 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
180 			     u8 namespace, u8 num_actions,
181 			     void *modify_actions, u32 *modify_header_id);
182 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
183 
184 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
185 
186 int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
187 int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
188 int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
189 int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
190 
191 #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&		\
192 			    MLX5_CAP_GEN((mdev), pps_modify) &&		\
193 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&	\
194 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
195 
196 int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
197 
198 void mlx5e_init(void);
199 void mlx5e_cleanup(void);
200 
mlx5_lag_is_lacp_owner(struct mlx5_core_dev * dev)201 static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
202 {
203 	/* LACP owner conditions:
204 	 * 1) Function is physical.
205 	 * 2) LAG is supported by FW.
206 	 * 3) LAG is managed by driver (currently the only option).
207 	 */
208 	return  MLX5_CAP_GEN(dev, vport_group_manager) &&
209 		   (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
210 		    MLX5_CAP_GEN(dev, lag_master);
211 }
212 
213 int mlx5_lag_allow(struct mlx5_core_dev *dev);
214 int mlx5_lag_forbid(struct mlx5_core_dev *dev);
215 
216 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
217 #endif /* __MLX5_CORE_H__ */
218