1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 #include "vf.h"
5 #include "ixgbevf.h"
6
7 /* On Hyper-V, to reset, we need to read from this offset
8 * from the PCI config space. This is the mechanism used on
9 * Hyper-V to support PF/VF communication.
10 */
11 #define IXGBE_HV_RESET_OFFSET 0x201
12
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 u32 *retmsg, u16 size)
15 {
16 struct ixgbe_mbx_info *mbx = &hw->mbx;
17 s32 retval = mbx->ops.write_posted(hw, msg, size);
18
19 if (retval)
20 return retval;
21
22 return mbx->ops.read_posted(hw, retmsg, size);
23 }
24
25 /**
26 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27 * @hw: pointer to hardware structure
28 *
29 * Starts the hardware by filling the bus info structure and media type, clears
30 * all on chip counters, initializes receive address registers, multicast
31 * table, VLAN filter table, calls routine to set up link and flow control
32 * settings, and leaves transmit and receive units disabled and uninitialized
33 **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)34 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
35 {
36 /* Clear adapter stopped flag */
37 hw->adapter_stopped = false;
38
39 return 0;
40 }
41
42 /**
43 * ixgbevf_init_hw_vf - virtual function hardware initialization
44 * @hw: pointer to hardware structure
45 *
46 * Initialize the hardware by resetting the hardware and then starting
47 * the hardware
48 **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)49 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
50 {
51 s32 status = hw->mac.ops.start_hw(hw);
52
53 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
54
55 return status;
56 }
57
58 /**
59 * ixgbevf_reset_hw_vf - Performs hardware reset
60 * @hw: pointer to hardware structure
61 *
62 * Resets the hardware by resetting the transmit and receive units, masks and
63 * clears all interrupts.
64 **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)65 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
66 {
67 struct ixgbe_mbx_info *mbx = &hw->mbx;
68 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
69 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
70 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
71 u8 *addr = (u8 *)(&msgbuf[1]);
72
73 /* Call adapter stop to disable tx/rx and clear interrupts */
74 hw->mac.ops.stop_adapter(hw);
75
76 /* reset the api version */
77 hw->api_version = ixgbe_mbox_api_10;
78
79 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
80 IXGBE_WRITE_FLUSH(hw);
81
82 /* we cannot reset while the RSTI / RSTD bits are asserted */
83 while (!mbx->ops.check_for_rst(hw) && timeout) {
84 timeout--;
85 udelay(5);
86 }
87
88 if (!timeout)
89 return IXGBE_ERR_RESET_FAILED;
90
91 /* mailbox timeout can now become active */
92 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
93
94 msgbuf[0] = IXGBE_VF_RESET;
95 mbx->ops.write_posted(hw, msgbuf, 1);
96
97 mdelay(10);
98
99 /* set our "perm_addr" based on info provided by PF
100 * also set up the mc_filter_type which is piggy backed
101 * on the mac address in word 3
102 */
103 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
104 if (ret_val)
105 return ret_val;
106
107 /* New versions of the PF may NACK the reset return message
108 * to indicate that no MAC address has yet been assigned for
109 * the VF.
110 */
111 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
112 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 return IXGBE_ERR_INVALID_MAC_ADDR;
114
115 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
116 ether_addr_copy(hw->mac.perm_addr, addr);
117
118 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
119
120 return 0;
121 }
122
123 /**
124 * Hyper-V variant; the VF/PF communication is through the PCI
125 * config space.
126 * @hw: pointer to private hardware struct
127 */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)128 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
129 {
130 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131 struct ixgbevf_adapter *adapter = hw->back;
132 int i;
133
134 for (i = 0; i < 6; i++)
135 pci_read_config_byte(adapter->pdev,
136 (i + IXGBE_HV_RESET_OFFSET),
137 &hw->mac.perm_addr[i]);
138 return 0;
139 #else
140 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
141 return -EOPNOTSUPP;
142 #endif
143 }
144
145 /**
146 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
147 * @hw: pointer to hardware structure
148 *
149 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
150 * disables transmit and receive units. The adapter_stopped flag is used by
151 * the shared code and drivers to determine if the adapter is in a stopped
152 * state and should not touch the hardware.
153 **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)154 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
155 {
156 u32 number_of_queues;
157 u32 reg_val;
158 u16 i;
159
160 /* Set the adapter_stopped flag so other driver functions stop touching
161 * the hardware
162 */
163 hw->adapter_stopped = true;
164
165 /* Disable the receive unit by stopped each queue */
166 number_of_queues = hw->mac.max_rx_queues;
167 for (i = 0; i < number_of_queues; i++) {
168 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
169 if (reg_val & IXGBE_RXDCTL_ENABLE) {
170 reg_val &= ~IXGBE_RXDCTL_ENABLE;
171 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
172 }
173 }
174
175 IXGBE_WRITE_FLUSH(hw);
176
177 /* Clear interrupt mask to stop from interrupts being generated */
178 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
179
180 /* Clear any pending interrupts */
181 IXGBE_READ_REG(hw, IXGBE_VTEICR);
182
183 /* Disable the transmit unit. Each queue must be disabled. */
184 number_of_queues = hw->mac.max_tx_queues;
185 for (i = 0; i < number_of_queues; i++) {
186 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
187 if (reg_val & IXGBE_TXDCTL_ENABLE) {
188 reg_val &= ~IXGBE_TXDCTL_ENABLE;
189 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
190 }
191 }
192
193 return 0;
194 }
195
196 /**
197 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
198 * @hw: pointer to hardware structure
199 * @mc_addr: the multicast address
200 *
201 * Extracts the 12 bits, from a multicast address, to determine which
202 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
203 * incoming Rx multicast addresses, to determine the bit-vector to check in
204 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
205 * by the MO field of the MCSTCTRL. The MO field is set during initialization
206 * to mc_filter_type.
207 **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)208 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
209 {
210 u32 vector = 0;
211
212 switch (hw->mac.mc_filter_type) {
213 case 0: /* use bits [47:36] of the address */
214 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
215 break;
216 case 1: /* use bits [46:35] of the address */
217 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
218 break;
219 case 2: /* use bits [45:34] of the address */
220 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
221 break;
222 case 3: /* use bits [43:32] of the address */
223 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
224 break;
225 default: /* Invalid mc_filter_type */
226 break;
227 }
228
229 /* vector can only be 12-bits or boundary will be exceeded */
230 vector &= 0xFFF;
231 return vector;
232 }
233
234 /**
235 * ixgbevf_get_mac_addr_vf - Read device MAC address
236 * @hw: pointer to the HW structure
237 * @mac_addr: pointer to storage for retrieved MAC address
238 **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)239 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
240 {
241 ether_addr_copy(mac_addr, hw->mac.perm_addr);
242
243 return 0;
244 }
245
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)246 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
247 {
248 u32 msgbuf[3], msgbuf_chk;
249 u8 *msg_addr = (u8 *)(&msgbuf[1]);
250 s32 ret_val;
251
252 memset(msgbuf, 0, sizeof(msgbuf));
253 /* If index is one then this is the start of a new list and needs
254 * indication to the PF so it can do it's own list management.
255 * If it is zero then that tells the PF to just clear all of
256 * this VF's macvlans and there is no new list.
257 */
258 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
259 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
260 msgbuf_chk = msgbuf[0];
261
262 if (addr)
263 ether_addr_copy(msg_addr, addr);
264
265 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
266 ARRAY_SIZE(msgbuf));
267 if (!ret_val) {
268 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
269
270 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
271 return -ENOMEM;
272 }
273
274 return ret_val;
275 }
276
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)277 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
278 {
279 return -EOPNOTSUPP;
280 }
281
282 /**
283 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
284 * @hw: pointer to hardware structure
285 * @reta: buffer to fill with RETA contents.
286 * @num_rx_queues: Number of Rx queues configured for this port
287 *
288 * The "reta" buffer should be big enough to contain 32 registers.
289 *
290 * Returns: 0 on success.
291 * if API doesn't support this operation - (-EOPNOTSUPP).
292 */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)293 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
294 {
295 int err, i, j;
296 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
297 u32 *hw_reta = &msgbuf[1];
298 u32 mask = 0;
299
300 /* We have to use a mailbox for 82599 and x540 devices only.
301 * For these devices RETA has 128 entries.
302 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
303 * 16 RETA entries in each DWORD giving 2 bits to each entry.
304 */
305 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
306
307 /* We support the RSS querying for 82599 and x540 devices only.
308 * Thus return an error if API doesn't support RETA querying or querying
309 * is not supported for this device type.
310 */
311 switch (hw->api_version) {
312 case ixgbe_mbox_api_13:
313 case ixgbe_mbox_api_12:
314 if (hw->mac.type < ixgbe_mac_X550_vf)
315 break;
316 /* fall through */
317 default:
318 return -EOPNOTSUPP;
319 }
320
321 msgbuf[0] = IXGBE_VF_GET_RETA;
322
323 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
324
325 if (err)
326 return err;
327
328 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
329
330 if (err)
331 return err;
332
333 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
334
335 /* If the operation has been refused by a PF return -EPERM */
336 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
337 return -EPERM;
338
339 /* If we didn't get an ACK there must have been
340 * some sort of mailbox error so we should treat it
341 * as such.
342 */
343 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
344 return IXGBE_ERR_MBX;
345
346 /* ixgbevf doesn't support more than 2 queues at the moment */
347 if (num_rx_queues > 1)
348 mask = 0x1;
349
350 for (i = 0; i < dwords; i++)
351 for (j = 0; j < 16; j++)
352 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
353
354 return 0;
355 }
356
357 /**
358 * ixgbevf_get_rss_key_locked - get the RSS Random Key
359 * @hw: pointer to the HW structure
360 * @rss_key: buffer to fill with RSS Hash Key contents.
361 *
362 * The "rss_key" buffer should be big enough to contain 10 registers.
363 *
364 * Returns: 0 on success.
365 * if API doesn't support this operation - (-EOPNOTSUPP).
366 */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)367 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
368 {
369 int err;
370 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
371
372 /* We currently support the RSS Random Key retrieval for 82599 and x540
373 * devices only.
374 *
375 * Thus return an error if API doesn't support RSS Random Key retrieval
376 * or if the operation is not supported for this device type.
377 */
378 switch (hw->api_version) {
379 case ixgbe_mbox_api_13:
380 case ixgbe_mbox_api_12:
381 if (hw->mac.type < ixgbe_mac_X550_vf)
382 break;
383 /* fall through */
384 default:
385 return -EOPNOTSUPP;
386 }
387
388 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
389 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
390
391 if (err)
392 return err;
393
394 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
395
396 if (err)
397 return err;
398
399 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
400
401 /* If the operation has been refused by a PF return -EPERM */
402 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
403 return -EPERM;
404
405 /* If we didn't get an ACK there must have been
406 * some sort of mailbox error so we should treat it
407 * as such.
408 */
409 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
410 return IXGBE_ERR_MBX;
411
412 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
413
414 return 0;
415 }
416
417 /**
418 * ixgbevf_set_rar_vf - set device MAC address
419 * @hw: pointer to hardware structure
420 * @index: Receive address register to write
421 * @addr: Address to put into receive address register
422 * @vmdq: Unused in this implementation
423 **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)424 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
425 u32 vmdq)
426 {
427 u32 msgbuf[3];
428 u8 *msg_addr = (u8 *)(&msgbuf[1]);
429 s32 ret_val;
430
431 memset(msgbuf, 0, sizeof(msgbuf));
432 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
433 ether_addr_copy(msg_addr, addr);
434
435 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
436 ARRAY_SIZE(msgbuf));
437 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
438
439 /* if nacked the address was rejected, use "perm_addr" */
440 if (!ret_val &&
441 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
442 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
443 return IXGBE_ERR_MBX;
444 }
445
446 return ret_val;
447 }
448
449 /**
450 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
451 * @hw: pointer to hardware structure
452 * @index: Receive address register to write
453 * @addr: Address to put into receive address register
454 * @vmdq: Unused in this implementation
455 *
456 * We don't really allow setting the device MAC address. However,
457 * if the address being set is the permanent MAC address we will
458 * permit that.
459 **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)460 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
461 u32 vmdq)
462 {
463 if (ether_addr_equal(addr, hw->mac.perm_addr))
464 return 0;
465
466 return -EOPNOTSUPP;
467 }
468
469 /**
470 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
471 * @hw: pointer to the HW structure
472 * @netdev: pointer to net device structure
473 *
474 * Updates the Multicast Table Array.
475 **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)476 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
477 struct net_device *netdev)
478 {
479 struct netdev_hw_addr *ha;
480 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
481 u16 *vector_list = (u16 *)&msgbuf[1];
482 u32 cnt, i;
483
484 /* Each entry in the list uses 1 16 bit word. We have 30
485 * 16 bit words available in our HW msg buffer (minus 1 for the
486 * msg type). That's 30 hash values if we pack 'em right. If
487 * there are more than 30 MC addresses to add then punt the
488 * extras for now and then add code to handle more than 30 later.
489 * It would be unusual for a server to request that many multi-cast
490 * addresses except for in large enterprise network environments.
491 */
492
493 cnt = netdev_mc_count(netdev);
494 if (cnt > 30)
495 cnt = 30;
496 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
497 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
498
499 i = 0;
500 netdev_for_each_mc_addr(ha, netdev) {
501 if (i == cnt)
502 break;
503 if (is_link_local_ether_addr(ha->addr))
504 continue;
505
506 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
507 }
508
509 ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
510
511 return 0;
512 }
513
514 /**
515 * Hyper-V variant - just a stub.
516 * @hw: unused
517 * @netdev: unused
518 */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)519 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
520 struct net_device *netdev)
521 {
522 return -EOPNOTSUPP;
523 }
524
525 /**
526 * ixgbevf_update_xcast_mode - Update Multicast mode
527 * @hw: pointer to the HW structure
528 * @xcast_mode: new multicast mode
529 *
530 * Updates the Multicast Mode of VF.
531 **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)532 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
533 {
534 u32 msgbuf[2];
535 s32 err;
536
537 switch (hw->api_version) {
538 case ixgbe_mbox_api_12:
539 /* promisc introduced in 1.3 version */
540 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
541 return -EOPNOTSUPP;
542 /* Fall threw */
543 case ixgbe_mbox_api_13:
544 break;
545 default:
546 return -EOPNOTSUPP;
547 }
548
549 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
550 msgbuf[1] = xcast_mode;
551
552 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
553 ARRAY_SIZE(msgbuf));
554 if (err)
555 return err;
556
557 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
558 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
559 return -EPERM;
560
561 return 0;
562 }
563
564 /**
565 * Hyper-V variant - just a stub.
566 * @hw: unused
567 * @xcast_mode: unused
568 */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)569 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
570 {
571 return -EOPNOTSUPP;
572 }
573
574 /**
575 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
576 * @hw: pointer to the HW structure
577 * @vlan: 12 bit VLAN ID
578 * @vind: unused by VF drivers
579 * @vlan_on: if true then set bit, else clear bit
580 **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)581 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
582 bool vlan_on)
583 {
584 u32 msgbuf[2];
585 s32 err;
586
587 msgbuf[0] = IXGBE_VF_SET_VLAN;
588 msgbuf[1] = vlan;
589 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
590 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
591
592 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
593 ARRAY_SIZE(msgbuf));
594 if (err)
595 goto mbx_err;
596
597 /* remove extra bits from the message */
598 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
599 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
600
601 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
602 err = IXGBE_ERR_INVALID_ARGUMENT;
603
604 mbx_err:
605 return err;
606 }
607
608 /**
609 * Hyper-V variant - just a stub.
610 * @hw: unused
611 * @vlan: unused
612 * @vind: unused
613 * @vlan_on: unused
614 */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)615 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
616 bool vlan_on)
617 {
618 return -EOPNOTSUPP;
619 }
620
621 /**
622 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
623 * @hw: pointer to hardware structure
624 * @speed: Unused in this implementation
625 * @autoneg: Unused in this implementation
626 * @autoneg_wait_to_complete: Unused in this implementation
627 *
628 * Do nothing and return success. VF drivers are not allowed to change
629 * global settings. Maintained for driver compatibility.
630 **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)631 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
632 ixgbe_link_speed speed, bool autoneg,
633 bool autoneg_wait_to_complete)
634 {
635 return 0;
636 }
637
638 /**
639 * ixgbevf_check_mac_link_vf - Get link/speed status
640 * @hw: pointer to hardware structure
641 * @speed: pointer to link speed
642 * @link_up: true is link is up, false otherwise
643 * @autoneg_wait_to_complete: unused
644 *
645 * Reads the links register to determine if link is up and the current speed
646 **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)647 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
648 ixgbe_link_speed *speed,
649 bool *link_up,
650 bool autoneg_wait_to_complete)
651 {
652 struct ixgbe_mbx_info *mbx = &hw->mbx;
653 struct ixgbe_mac_info *mac = &hw->mac;
654 s32 ret_val = 0;
655 u32 links_reg;
656 u32 in_msg = 0;
657
658 /* If we were hit with a reset drop the link */
659 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
660 mac->get_link_status = true;
661
662 if (!mac->get_link_status)
663 goto out;
664
665 /* if link status is down no point in checking to see if pf is up */
666 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
667 if (!(links_reg & IXGBE_LINKS_UP))
668 goto out;
669
670 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
671 * before the link status is correct
672 */
673 if (mac->type == ixgbe_mac_82599_vf) {
674 int i;
675
676 for (i = 0; i < 5; i++) {
677 udelay(100);
678 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
679
680 if (!(links_reg & IXGBE_LINKS_UP))
681 goto out;
682 }
683 }
684
685 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
686 case IXGBE_LINKS_SPEED_10G_82599:
687 *speed = IXGBE_LINK_SPEED_10GB_FULL;
688 break;
689 case IXGBE_LINKS_SPEED_1G_82599:
690 *speed = IXGBE_LINK_SPEED_1GB_FULL;
691 break;
692 case IXGBE_LINKS_SPEED_100_82599:
693 *speed = IXGBE_LINK_SPEED_100_FULL;
694 break;
695 }
696
697 /* if the read failed it could just be a mailbox collision, best wait
698 * until we are called again and don't report an error
699 */
700 if (mbx->ops.read(hw, &in_msg, 1))
701 goto out;
702
703 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
704 /* msg is not CTS and is NACK we must have lost CTS status */
705 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
706 ret_val = -1;
707 goto out;
708 }
709
710 /* the pf is talking, if we timed out in the past we reinit */
711 if (!mbx->timeout) {
712 ret_val = -1;
713 goto out;
714 }
715
716 /* if we passed all the tests above then the link is up and we no
717 * longer need to check for link
718 */
719 mac->get_link_status = false;
720
721 out:
722 *link_up = !mac->get_link_status;
723 return ret_val;
724 }
725
726 /**
727 * Hyper-V variant; there is no mailbox communication.
728 * @hw: pointer to private hardware struct
729 * @speed: pointer to link speed
730 * @link_up: true is link is up, false otherwise
731 * @autoneg_wait_to_complete: unused
732 */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)733 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
734 ixgbe_link_speed *speed,
735 bool *link_up,
736 bool autoneg_wait_to_complete)
737 {
738 struct ixgbe_mbx_info *mbx = &hw->mbx;
739 struct ixgbe_mac_info *mac = &hw->mac;
740 u32 links_reg;
741
742 /* If we were hit with a reset drop the link */
743 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
744 mac->get_link_status = true;
745
746 if (!mac->get_link_status)
747 goto out;
748
749 /* if link status is down no point in checking to see if pf is up */
750 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
751 if (!(links_reg & IXGBE_LINKS_UP))
752 goto out;
753
754 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
755 * before the link status is correct
756 */
757 if (mac->type == ixgbe_mac_82599_vf) {
758 int i;
759
760 for (i = 0; i < 5; i++) {
761 udelay(100);
762 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
763
764 if (!(links_reg & IXGBE_LINKS_UP))
765 goto out;
766 }
767 }
768
769 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
770 case IXGBE_LINKS_SPEED_10G_82599:
771 *speed = IXGBE_LINK_SPEED_10GB_FULL;
772 break;
773 case IXGBE_LINKS_SPEED_1G_82599:
774 *speed = IXGBE_LINK_SPEED_1GB_FULL;
775 break;
776 case IXGBE_LINKS_SPEED_100_82599:
777 *speed = IXGBE_LINK_SPEED_100_FULL;
778 break;
779 }
780
781 /* if we passed all the tests above then the link is up and we no
782 * longer need to check for link
783 */
784 mac->get_link_status = false;
785
786 out:
787 *link_up = !mac->get_link_status;
788 return 0;
789 }
790
791 /**
792 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
793 * @hw: pointer to the HW structure
794 * @max_size: value to assign to max frame size
795 **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)796 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
797 {
798 u32 msgbuf[2];
799 s32 ret_val;
800
801 msgbuf[0] = IXGBE_VF_SET_LPE;
802 msgbuf[1] = max_size;
803
804 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
805 ARRAY_SIZE(msgbuf));
806 if (ret_val)
807 return ret_val;
808 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
809 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
810 return IXGBE_ERR_MBX;
811
812 return 0;
813 }
814
815 /**
816 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
817 * @hw: pointer to the HW structure
818 * @max_size: value to assign to max frame size
819 * Hyper-V variant.
820 **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)821 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
822 {
823 u32 reg;
824
825 /* If we are on Hyper-V, we implement this functionality
826 * differently.
827 */
828 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
829 /* CRC == 4 */
830 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
831 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
832
833 return 0;
834 }
835
836 /**
837 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
838 * @hw: pointer to the HW structure
839 * @api: integer containing requested API version
840 **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)841 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
842 {
843 int err;
844 u32 msg[3];
845
846 /* Negotiate the mailbox API version */
847 msg[0] = IXGBE_VF_API_NEGOTIATE;
848 msg[1] = api;
849 msg[2] = 0;
850
851 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
852 if (!err) {
853 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
854
855 /* Store value and return 0 on success */
856 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
857 hw->api_version = api;
858 return 0;
859 }
860
861 err = IXGBE_ERR_INVALID_ARGUMENT;
862 }
863
864 return err;
865 }
866
867 /**
868 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
869 * @hw: pointer to the HW structure
870 * @api: integer containing requested API version
871 * Hyper-V version - only ixgbe_mbox_api_10 supported.
872 **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)873 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
874 {
875 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
876 if (api != ixgbe_mbox_api_10)
877 return IXGBE_ERR_INVALID_ARGUMENT;
878
879 return 0;
880 }
881
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)882 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
883 unsigned int *default_tc)
884 {
885 int err;
886 u32 msg[5];
887
888 /* do nothing if API doesn't support ixgbevf_get_queues */
889 switch (hw->api_version) {
890 case ixgbe_mbox_api_11:
891 case ixgbe_mbox_api_12:
892 case ixgbe_mbox_api_13:
893 break;
894 default:
895 return 0;
896 }
897
898 /* Fetch queue configuration from the PF */
899 msg[0] = IXGBE_VF_GET_QUEUE;
900 msg[1] = msg[2] = msg[3] = msg[4] = 0;
901
902 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
903 if (!err) {
904 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
905
906 /* if we we didn't get an ACK there must have been
907 * some sort of mailbox error so we should treat it
908 * as such
909 */
910 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
911 return IXGBE_ERR_MBX;
912
913 /* record and validate values from message */
914 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
915 if (hw->mac.max_tx_queues == 0 ||
916 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
917 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
918
919 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
920 if (hw->mac.max_rx_queues == 0 ||
921 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
922 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
923
924 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
925 /* in case of unknown state assume we cannot tag frames */
926 if (*num_tcs > hw->mac.max_rx_queues)
927 *num_tcs = 1;
928
929 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
930 /* default to queue 0 on out-of-bounds queue number */
931 if (*default_tc >= hw->mac.max_tx_queues)
932 *default_tc = 0;
933 }
934
935 return err;
936 }
937
938 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
939 .init_hw = ixgbevf_init_hw_vf,
940 .reset_hw = ixgbevf_reset_hw_vf,
941 .start_hw = ixgbevf_start_hw_vf,
942 .get_mac_addr = ixgbevf_get_mac_addr_vf,
943 .stop_adapter = ixgbevf_stop_hw_vf,
944 .setup_link = ixgbevf_setup_mac_link_vf,
945 .check_link = ixgbevf_check_mac_link_vf,
946 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
947 .set_rar = ixgbevf_set_rar_vf,
948 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
949 .update_xcast_mode = ixgbevf_update_xcast_mode,
950 .set_uc_addr = ixgbevf_set_uc_addr_vf,
951 .set_vfta = ixgbevf_set_vfta_vf,
952 .set_rlpml = ixgbevf_set_rlpml_vf,
953 };
954
955 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
956 .init_hw = ixgbevf_init_hw_vf,
957 .reset_hw = ixgbevf_hv_reset_hw_vf,
958 .start_hw = ixgbevf_start_hw_vf,
959 .get_mac_addr = ixgbevf_get_mac_addr_vf,
960 .stop_adapter = ixgbevf_stop_hw_vf,
961 .setup_link = ixgbevf_setup_mac_link_vf,
962 .check_link = ixgbevf_hv_check_mac_link_vf,
963 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
964 .set_rar = ixgbevf_hv_set_rar_vf,
965 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
966 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
967 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
968 .set_vfta = ixgbevf_hv_set_vfta_vf,
969 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
970 };
971
972 const struct ixgbevf_info ixgbevf_82599_vf_info = {
973 .mac = ixgbe_mac_82599_vf,
974 .mac_ops = &ixgbevf_mac_ops,
975 };
976
977 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
978 .mac = ixgbe_mac_82599_vf,
979 .mac_ops = &ixgbevf_hv_mac_ops,
980 };
981
982 const struct ixgbevf_info ixgbevf_X540_vf_info = {
983 .mac = ixgbe_mac_X540_vf,
984 .mac_ops = &ixgbevf_mac_ops,
985 };
986
987 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
988 .mac = ixgbe_mac_X540_vf,
989 .mac_ops = &ixgbevf_hv_mac_ops,
990 };
991
992 const struct ixgbevf_info ixgbevf_X550_vf_info = {
993 .mac = ixgbe_mac_X550_vf,
994 .mac_ops = &ixgbevf_mac_ops,
995 };
996
997 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
998 .mac = ixgbe_mac_X550_vf,
999 .mac_ops = &ixgbevf_hv_mac_ops,
1000 };
1001
1002 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1003 .mac = ixgbe_mac_X550EM_x_vf,
1004 .mac_ops = &ixgbevf_mac_ops,
1005 };
1006
1007 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1008 .mac = ixgbe_mac_X550EM_x_vf,
1009 .mac_ops = &ixgbevf_hv_mac_ops,
1010 };
1011
1012 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1013 .mac = ixgbe_mac_x550em_a_vf,
1014 .mac_ops = &ixgbevf_mac_ops,
1015 };
1016