1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include "mlx5_core.h"
38
39 /* Mutex to hold while enabling or disabling RoCE */
40 static DEFINE_MUTEX(mlx5_roce_en_lock);
41
_mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u32 * out,int outlen)42 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
43 u16 vport, u32 *out, int outlen)
44 {
45 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
53
54 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
55 }
56
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)57 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 {
59 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60
61 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62
63 return MLX5_GET(query_vport_state_out, out, state);
64 }
65
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 state)66 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
67 u16 vport, u8 state)
68 {
69 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
70 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
71
72 MLX5_SET(modify_vport_state_in, in, opcode,
73 MLX5_CMD_OP_MODIFY_VPORT_STATE);
74 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
75 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
76 if (vport)
77 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
78 MLX5_SET(modify_vport_state_in, in, admin_state, state);
79
80 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
81 }
82
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,u32 * out,int outlen)83 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
84 u32 *out, int outlen)
85 {
86 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
87
88 MLX5_SET(query_nic_vport_context_in, in, opcode,
89 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
90 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
91 if (vport)
92 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
93
94 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
95 }
96
mlx5_modify_nic_vport_context(struct mlx5_core_dev * mdev,void * in,int inlen)97 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
98 int inlen)
99 {
100 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
101
102 MLX5_SET(modify_nic_vport_context_in, in, opcode,
103 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
104 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
105 }
106
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)107 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
108 u16 vport, u8 *min_inline)
109 {
110 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
111 int err;
112
113 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
114 if (!err)
115 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
116 nic_vport_context.min_wqe_inline_mode);
117 return err;
118 }
119 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
120
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)121 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
122 u8 *min_inline_mode)
123 {
124 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
125 case MLX5_CAP_INLINE_MODE_L2:
126 *min_inline_mode = MLX5_INLINE_MODE_L2;
127 break;
128 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
129 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
130 break;
131 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
132 *min_inline_mode = MLX5_INLINE_MODE_NONE;
133 break;
134 }
135 }
136 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
137
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)138 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
139 u16 vport, u8 min_inline)
140 {
141 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
142 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
143 void *nic_vport_ctx;
144
145 MLX5_SET(modify_nic_vport_context_in, in,
146 field_select.min_inline, 1);
147 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
148 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
149
150 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
151 in, nic_vport_context);
152 MLX5_SET(nic_vport_context, nic_vport_ctx,
153 min_wqe_inline_mode, min_inline);
154
155 return mlx5_modify_nic_vport_context(mdev, in, inlen);
156 }
157
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)158 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
159 u16 vport, u8 *addr)
160 {
161 u32 *out;
162 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
163 u8 *out_addr;
164 int err;
165
166 out = kvzalloc(outlen, GFP_KERNEL);
167 if (!out)
168 return -ENOMEM;
169
170 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
171 nic_vport_context.permanent_address);
172
173 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
174 if (!err)
175 ether_addr_copy(addr, &out_addr[2]);
176
177 kvfree(out);
178 return err;
179 }
180 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
181
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)182 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
183 u16 vport, u8 *addr)
184 {
185 void *in;
186 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
187 int err;
188 void *nic_vport_ctx;
189 u8 *perm_mac;
190
191 in = kvzalloc(inlen, GFP_KERNEL);
192 if (!in)
193 return -ENOMEM;
194
195 MLX5_SET(modify_nic_vport_context_in, in,
196 field_select.permanent_address, 1);
197 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
198
199 if (vport)
200 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
201
202 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
203 in, nic_vport_context);
204 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
205 permanent_address);
206
207 ether_addr_copy(&perm_mac[2], addr);
208
209 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
210
211 kvfree(in);
212
213 return err;
214 }
215 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
216
mlx5_query_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 * mtu)217 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
218 {
219 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
220 u32 *out;
221 int err;
222
223 out = kvzalloc(outlen, GFP_KERNEL);
224 if (!out)
225 return -ENOMEM;
226
227 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
228 if (!err)
229 *mtu = MLX5_GET(query_nic_vport_context_out, out,
230 nic_vport_context.mtu);
231
232 kvfree(out);
233 return err;
234 }
235 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
236
mlx5_modify_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 mtu)237 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
238 {
239 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
240 void *in;
241 int err;
242
243 in = kvzalloc(inlen, GFP_KERNEL);
244 if (!in)
245 return -ENOMEM;
246
247 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
248 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
249
250 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
251
252 kvfree(in);
253 return err;
254 }
255 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
256
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u32 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)257 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
258 u32 vport,
259 enum mlx5_list_type list_type,
260 u8 addr_list[][ETH_ALEN],
261 int *list_size)
262 {
263 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
264 void *nic_vport_ctx;
265 int max_list_size;
266 int req_list_size;
267 int out_sz;
268 void *out;
269 int err;
270 int i;
271
272 req_list_size = *list_size;
273
274 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
275 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
276 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
277
278 if (req_list_size > max_list_size) {
279 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
280 req_list_size, max_list_size);
281 req_list_size = max_list_size;
282 }
283
284 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
285 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
286
287 out = kzalloc(out_sz, GFP_KERNEL);
288 if (!out)
289 return -ENOMEM;
290
291 MLX5_SET(query_nic_vport_context_in, in, opcode,
292 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
293 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
294 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
295
296 if (vport)
297 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
298
299 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
300 if (err)
301 goto out;
302
303 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
304 nic_vport_context);
305 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
306 allowed_list_size);
307
308 *list_size = req_list_size;
309 for (i = 0; i < req_list_size; i++) {
310 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
311 nic_vport_ctx,
312 current_uc_mac_address[i]) + 2;
313 ether_addr_copy(addr_list[i], mac_addr);
314 }
315 out:
316 kfree(out);
317 return err;
318 }
319 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
320
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)321 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
322 enum mlx5_list_type list_type,
323 u8 addr_list[][ETH_ALEN],
324 int list_size)
325 {
326 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
327 void *nic_vport_ctx;
328 int max_list_size;
329 int in_sz;
330 void *in;
331 int err;
332 int i;
333
334 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
335 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
336 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
337
338 if (list_size > max_list_size)
339 return -ENOSPC;
340
341 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
342 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
343
344 memset(out, 0, sizeof(out));
345 in = kzalloc(in_sz, GFP_KERNEL);
346 if (!in)
347 return -ENOMEM;
348
349 MLX5_SET(modify_nic_vport_context_in, in, opcode,
350 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
351 MLX5_SET(modify_nic_vport_context_in, in,
352 field_select.addresses_list, 1);
353
354 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
355 nic_vport_context);
356
357 MLX5_SET(nic_vport_context, nic_vport_ctx,
358 allowed_list_type, list_type);
359 MLX5_SET(nic_vport_context, nic_vport_ctx,
360 allowed_list_size, list_size);
361
362 for (i = 0; i < list_size; i++) {
363 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
364 nic_vport_ctx,
365 current_uc_mac_address[i]) + 2;
366 ether_addr_copy(curr_mac, addr_list[i]);
367 }
368
369 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
370 kfree(in);
371 return err;
372 }
373 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
374
mlx5_query_nic_vport_vlans(struct mlx5_core_dev * dev,u32 vport,u16 vlans[],int * size)375 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
376 u32 vport,
377 u16 vlans[],
378 int *size)
379 {
380 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
381 void *nic_vport_ctx;
382 int req_list_size;
383 int max_list_size;
384 int out_sz;
385 void *out;
386 int err;
387 int i;
388
389 req_list_size = *size;
390 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
391 if (req_list_size > max_list_size) {
392 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
393 req_list_size, max_list_size);
394 req_list_size = max_list_size;
395 }
396
397 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
398 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
399
400 memset(in, 0, sizeof(in));
401 out = kzalloc(out_sz, GFP_KERNEL);
402 if (!out)
403 return -ENOMEM;
404
405 MLX5_SET(query_nic_vport_context_in, in, opcode,
406 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
407 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
408 MLX5_NVPRT_LIST_TYPE_VLAN);
409 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
410
411 if (vport)
412 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
413
414 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
415 if (err)
416 goto out;
417
418 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
419 nic_vport_context);
420 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
421 allowed_list_size);
422
423 *size = req_list_size;
424 for (i = 0; i < req_list_size; i++) {
425 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
426 nic_vport_ctx,
427 current_uc_mac_address[i]);
428 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
429 }
430 out:
431 kfree(out);
432 return err;
433 }
434 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
435
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)436 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
437 u16 vlans[],
438 int list_size)
439 {
440 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
441 void *nic_vport_ctx;
442 int max_list_size;
443 int in_sz;
444 void *in;
445 int err;
446 int i;
447
448 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
449
450 if (list_size > max_list_size)
451 return -ENOSPC;
452
453 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
454 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
455
456 memset(out, 0, sizeof(out));
457 in = kzalloc(in_sz, GFP_KERNEL);
458 if (!in)
459 return -ENOMEM;
460
461 MLX5_SET(modify_nic_vport_context_in, in, opcode,
462 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
463 MLX5_SET(modify_nic_vport_context_in, in,
464 field_select.addresses_list, 1);
465
466 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
467 nic_vport_context);
468
469 MLX5_SET(nic_vport_context, nic_vport_ctx,
470 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
471 MLX5_SET(nic_vport_context, nic_vport_ctx,
472 allowed_list_size, list_size);
473
474 for (i = 0; i < list_size; i++) {
475 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
476 nic_vport_ctx,
477 current_uc_mac_address[i]);
478 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
479 }
480
481 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
482 kfree(in);
483 return err;
484 }
485 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
486
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)487 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
488 u64 *system_image_guid)
489 {
490 u32 *out;
491 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
492
493 out = kvzalloc(outlen, GFP_KERNEL);
494 if (!out)
495 return -ENOMEM;
496
497 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
498
499 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
500 nic_vport_context.system_image_guid);
501
502 kvfree(out);
503
504 return 0;
505 }
506 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
507
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)508 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
509 {
510 u32 *out;
511 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
512
513 out = kvzalloc(outlen, GFP_KERNEL);
514 if (!out)
515 return -ENOMEM;
516
517 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
518
519 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
520 nic_vport_context.node_guid);
521
522 kvfree(out);
523
524 return 0;
525 }
526 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
527
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u32 vport,u64 node_guid)528 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
529 u32 vport, u64 node_guid)
530 {
531 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
532 void *nic_vport_context;
533 void *in;
534 int err;
535
536 if (!vport)
537 return -EINVAL;
538 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
539 return -EACCES;
540
541 in = kvzalloc(inlen, GFP_KERNEL);
542 if (!in)
543 return -ENOMEM;
544
545 MLX5_SET(modify_nic_vport_context_in, in,
546 field_select.node_guid, 1);
547 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
548 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
549
550 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
551 in, nic_vport_context);
552 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
553
554 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
555
556 kvfree(in);
557
558 return err;
559 }
560
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)561 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
562 u16 *qkey_viol_cntr)
563 {
564 u32 *out;
565 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
566
567 out = kvzalloc(outlen, GFP_KERNEL);
568 if (!out)
569 return -ENOMEM;
570
571 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
572
573 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
574 nic_vport_context.qkey_violation_counter);
575
576 kvfree(out);
577
578 return 0;
579 }
580 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
581
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 gid_index,union ib_gid * gid)582 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
583 u8 port_num, u16 vf_num, u16 gid_index,
584 union ib_gid *gid)
585 {
586 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
587 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
588 int is_group_manager;
589 void *out = NULL;
590 void *in = NULL;
591 union ib_gid *tmp;
592 int tbsz;
593 int nout;
594 int err;
595
596 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
597 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
598 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
599 vf_num, gid_index, tbsz);
600
601 if (gid_index > tbsz && gid_index != 0xffff)
602 return -EINVAL;
603
604 if (gid_index == 0xffff)
605 nout = tbsz;
606 else
607 nout = 1;
608
609 out_sz += nout * sizeof(*gid);
610
611 in = kzalloc(in_sz, GFP_KERNEL);
612 out = kzalloc(out_sz, GFP_KERNEL);
613 if (!in || !out) {
614 err = -ENOMEM;
615 goto out;
616 }
617
618 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
619 if (other_vport) {
620 if (is_group_manager) {
621 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
622 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
623 } else {
624 err = -EPERM;
625 goto out;
626 }
627 }
628 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
629
630 if (MLX5_CAP_GEN(dev, num_ports) == 2)
631 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
632
633 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
634 if (err)
635 goto out;
636
637 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
638 gid->global.subnet_prefix = tmp->global.subnet_prefix;
639 gid->global.interface_id = tmp->global.interface_id;
640
641 out:
642 kfree(in);
643 kfree(out);
644 return err;
645 }
646 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
647
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)648 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
649 u8 port_num, u16 vf_num, u16 pkey_index,
650 u16 *pkey)
651 {
652 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
653 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
654 int is_group_manager;
655 void *out = NULL;
656 void *in = NULL;
657 void *pkarr;
658 int nout;
659 int tbsz;
660 int err;
661 int i;
662
663 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
664
665 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
666 if (pkey_index > tbsz && pkey_index != 0xffff)
667 return -EINVAL;
668
669 if (pkey_index == 0xffff)
670 nout = tbsz;
671 else
672 nout = 1;
673
674 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
675
676 in = kzalloc(in_sz, GFP_KERNEL);
677 out = kzalloc(out_sz, GFP_KERNEL);
678 if (!in || !out) {
679 err = -ENOMEM;
680 goto out;
681 }
682
683 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
684 if (other_vport) {
685 if (is_group_manager) {
686 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
687 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
688 } else {
689 err = -EPERM;
690 goto out;
691 }
692 }
693 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
694
695 if (MLX5_CAP_GEN(dev, num_ports) == 2)
696 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
697
698 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
699 if (err)
700 goto out;
701
702 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
703 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
704 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
705
706 out:
707 kfree(in);
708 kfree(out);
709 return err;
710 }
711 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
712
mlx5_query_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,struct mlx5_hca_vport_context * rep)713 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
714 u8 other_vport, u8 port_num,
715 u16 vf_num,
716 struct mlx5_hca_vport_context *rep)
717 {
718 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
719 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
720 int is_group_manager;
721 void *out;
722 void *ctx;
723 int err;
724
725 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
726
727 out = kzalloc(out_sz, GFP_KERNEL);
728 if (!out)
729 return -ENOMEM;
730
731 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
732
733 if (other_vport) {
734 if (is_group_manager) {
735 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
736 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
737 } else {
738 err = -EPERM;
739 goto ex;
740 }
741 }
742
743 if (MLX5_CAP_GEN(dev, num_ports) == 2)
744 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
745
746 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
747 if (err)
748 goto ex;
749
750 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
751 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
752 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
753 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
754 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
755 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
756 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
757 port_physical_state);
758 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
759 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
760 port_physical_state);
761 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
762 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
763 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
764 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
765 cap_mask1_field_select);
766 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
767 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
768 cap_mask2_field_select);
769 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
770 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
771 init_type_reply);
772 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
773 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
774 subnet_timeout);
775 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
776 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
777 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
778 qkey_violation_counter);
779 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
780 pkey_violation_counter);
781 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
782 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
783 system_image_guid);
784
785 ex:
786 kfree(out);
787 return err;
788 }
789 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
790
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)791 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
792 u64 *sys_image_guid)
793 {
794 struct mlx5_hca_vport_context *rep;
795 int err;
796
797 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
798 if (!rep)
799 return -ENOMEM;
800
801 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
802 if (!err)
803 *sys_image_guid = rep->sys_image_guid;
804
805 kfree(rep);
806 return err;
807 }
808 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
809
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)810 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
811 u64 *node_guid)
812 {
813 struct mlx5_hca_vport_context *rep;
814 int err;
815
816 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
817 if (!rep)
818 return -ENOMEM;
819
820 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
821 if (!err)
822 *node_guid = rep->node_guid;
823
824 kfree(rep);
825 return err;
826 }
827 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
828
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u32 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)829 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
830 u32 vport,
831 int *promisc_uc,
832 int *promisc_mc,
833 int *promisc_all)
834 {
835 u32 *out;
836 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
837 int err;
838
839 out = kzalloc(outlen, GFP_KERNEL);
840 if (!out)
841 return -ENOMEM;
842
843 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
844 if (err)
845 goto out;
846
847 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
848 nic_vport_context.promisc_uc);
849 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
850 nic_vport_context.promisc_mc);
851 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
852 nic_vport_context.promisc_all);
853
854 out:
855 kfree(out);
856 return err;
857 }
858 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
859
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)860 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
861 int promisc_uc,
862 int promisc_mc,
863 int promisc_all)
864 {
865 void *in;
866 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
867 int err;
868
869 in = kvzalloc(inlen, GFP_KERNEL);
870 if (!in)
871 return -ENOMEM;
872
873 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
874 MLX5_SET(modify_nic_vport_context_in, in,
875 nic_vport_context.promisc_uc, promisc_uc);
876 MLX5_SET(modify_nic_vport_context_in, in,
877 nic_vport_context.promisc_mc, promisc_mc);
878 MLX5_SET(modify_nic_vport_context_in, in,
879 nic_vport_context.promisc_all, promisc_all);
880
881 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
882
883 kvfree(in);
884
885 return err;
886 }
887 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
888
889 enum {
890 UC_LOCAL_LB,
891 MC_LOCAL_LB
892 };
893
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)894 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
895 {
896 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
897 void *in;
898 int err;
899
900 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
901 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
902 return 0;
903
904 in = kvzalloc(inlen, GFP_KERNEL);
905 if (!in)
906 return -ENOMEM;
907
908 MLX5_SET(modify_nic_vport_context_in, in,
909 nic_vport_context.disable_mc_local_lb, !enable);
910 MLX5_SET(modify_nic_vport_context_in, in,
911 nic_vport_context.disable_uc_local_lb, !enable);
912
913 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
914 MLX5_SET(modify_nic_vport_context_in, in,
915 field_select.disable_mc_local_lb, 1);
916
917 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
918 MLX5_SET(modify_nic_vport_context_in, in,
919 field_select.disable_uc_local_lb, 1);
920
921 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
922
923 if (!err)
924 mlx5_core_dbg(mdev, "%s local_lb\n",
925 enable ? "enable" : "disable");
926
927 kvfree(in);
928 return err;
929 }
930 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
931
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,bool * status)932 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
933 {
934 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
935 u32 *out;
936 int value;
937 int err;
938
939 out = kzalloc(outlen, GFP_KERNEL);
940 if (!out)
941 return -ENOMEM;
942
943 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
944 if (err)
945 goto out;
946
947 value = MLX5_GET(query_nic_vport_context_out, out,
948 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
949
950 value |= MLX5_GET(query_nic_vport_context_out, out,
951 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
952
953 *status = !value;
954
955 out:
956 kfree(out);
957 return err;
958 }
959 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
960
961 enum mlx5_vport_roce_state {
962 MLX5_VPORT_ROCE_DISABLED = 0,
963 MLX5_VPORT_ROCE_ENABLED = 1,
964 };
965
mlx5_nic_vport_update_roce_state(struct mlx5_core_dev * mdev,enum mlx5_vport_roce_state state)966 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
967 enum mlx5_vport_roce_state state)
968 {
969 void *in;
970 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
971 int err;
972
973 in = kvzalloc(inlen, GFP_KERNEL);
974 if (!in)
975 return -ENOMEM;
976
977 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
978 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
979 state);
980
981 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
982
983 kvfree(in);
984
985 return err;
986 }
987
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)988 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
989 {
990 int err = 0;
991
992 mutex_lock(&mlx5_roce_en_lock);
993 if (!mdev->roce.roce_en)
994 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
995
996 if (!err)
997 mdev->roce.roce_en++;
998 mutex_unlock(&mlx5_roce_en_lock);
999
1000 return err;
1001 }
1002 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1003
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)1004 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1005 {
1006 int err = 0;
1007
1008 mutex_lock(&mlx5_roce_en_lock);
1009 if (mdev->roce.roce_en) {
1010 mdev->roce.roce_en--;
1011 if (mdev->roce.roce_en == 0)
1012 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1013
1014 if (err)
1015 mdev->roce.roce_en++;
1016 }
1017 mutex_unlock(&mlx5_roce_en_lock);
1018 return err;
1019 }
1020 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1021
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out,size_t out_sz)1022 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1023 int vf, u8 port_num, void *out,
1024 size_t out_sz)
1025 {
1026 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1027 int is_group_manager;
1028 void *in;
1029 int err;
1030
1031 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1032 in = kvzalloc(in_sz, GFP_KERNEL);
1033 if (!in) {
1034 err = -ENOMEM;
1035 return err;
1036 }
1037
1038 MLX5_SET(query_vport_counter_in, in, opcode,
1039 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1040 if (other_vport) {
1041 if (is_group_manager) {
1042 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1043 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1044 } else {
1045 err = -EPERM;
1046 goto free;
1047 }
1048 }
1049 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1050 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1051
1052 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1053 free:
1054 kvfree(in);
1055 return err;
1056 }
1057 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1058
mlx5_query_vport_down_stats(struct mlx5_core_dev * mdev,u16 vport,u64 * rx_discard_vport_down,u64 * tx_discard_vport_down)1059 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1060 u64 *rx_discard_vport_down,
1061 u64 *tx_discard_vport_down)
1062 {
1063 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
1064 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
1065 int err;
1066
1067 MLX5_SET(query_vnic_env_in, in, opcode,
1068 MLX5_CMD_OP_QUERY_VNIC_ENV);
1069 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1070 MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1071 if (vport)
1072 MLX5_SET(query_vnic_env_in, in, other_vport, 1);
1073
1074 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1075 if (err)
1076 return err;
1077
1078 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1079 vport_env.receive_discard_vport_down);
1080 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1081 vport_env.transmit_discard_vport_down);
1082 return 0;
1083 }
1084
mlx5_core_modify_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,int vf,struct mlx5_hca_vport_context * req)1085 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1086 u8 other_vport, u8 port_num,
1087 int vf,
1088 struct mlx5_hca_vport_context *req)
1089 {
1090 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1091 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1092 int is_group_manager;
1093 void *in;
1094 int err;
1095 void *ctx;
1096
1097 mlx5_core_dbg(dev, "vf %d\n", vf);
1098 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1099 in = kzalloc(in_sz, GFP_KERNEL);
1100 if (!in)
1101 return -ENOMEM;
1102
1103 memset(out, 0, sizeof(out));
1104 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1105 if (other_vport) {
1106 if (is_group_manager) {
1107 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1108 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1109 } else {
1110 err = -EPERM;
1111 goto ex;
1112 }
1113 }
1114
1115 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1116 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1117
1118 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1119 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1120 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1121 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1122 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1123 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1124 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1125 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1126 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1127 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1128 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1129 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1130 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1131 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1132 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1133 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1134 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1135 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1136 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1137 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1138 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1139 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1140 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1141 ex:
1142 kfree(in);
1143 return err;
1144 }
1145 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1146
mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev * master_mdev,struct mlx5_core_dev * port_mdev)1147 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1148 struct mlx5_core_dev *port_mdev)
1149 {
1150 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1151 void *in;
1152 int err;
1153
1154 in = kvzalloc(inlen, GFP_KERNEL);
1155 if (!in)
1156 return -ENOMEM;
1157
1158 err = mlx5_nic_vport_enable_roce(port_mdev);
1159 if (err)
1160 goto free;
1161
1162 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1163 MLX5_SET(modify_nic_vport_context_in, in,
1164 nic_vport_context.affiliated_vhca_id,
1165 MLX5_CAP_GEN(master_mdev, vhca_id));
1166 MLX5_SET(modify_nic_vport_context_in, in,
1167 nic_vport_context.affiliation_criteria,
1168 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1169
1170 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1171 if (err)
1172 mlx5_nic_vport_disable_roce(port_mdev);
1173
1174 free:
1175 kvfree(in);
1176 return err;
1177 }
1178 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1179
mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev * port_mdev)1180 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1181 {
1182 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1183 void *in;
1184 int err;
1185
1186 in = kvzalloc(inlen, GFP_KERNEL);
1187 if (!in)
1188 return -ENOMEM;
1189
1190 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1191 MLX5_SET(modify_nic_vport_context_in, in,
1192 nic_vport_context.affiliated_vhca_id, 0);
1193 MLX5_SET(modify_nic_vport_context_in, in,
1194 nic_vport_context.affiliation_criteria, 0);
1195
1196 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1197 if (!err)
1198 mlx5_nic_vport_disable_roce(port_mdev);
1199
1200 kvfree(in);
1201 return err;
1202 }
1203 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1204