1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *	copyright notice, this list of conditions and the following
17 *	disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *	copyright notice, this list of conditions and the following
21 *	disclaimer in the documentation and/or other materials
22 *	provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
42 #include <net/addrconf.h>
43 
44 #include "i40iw.h"
45 #include "i40iw_register.h"
46 #include <net/netevent.h>
47 #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
48 #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
49 #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
50 
51 #define DRV_VERSION_MAJOR 0
52 #define DRV_VERSION_MINOR 5
53 #define DRV_VERSION_BUILD 123
54 #define DRV_VERSION	__stringify(DRV_VERSION_MAJOR) "."		\
55 	__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
56 
57 static int push_mode;
58 module_param(push_mode, int, 0644);
59 MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
60 
61 static int debug;
62 module_param(debug, int, 0644);
63 MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
64 
65 static int resource_profile;
66 module_param(resource_profile, int, 0644);
67 MODULE_PARM_DESC(resource_profile,
68 		 "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
69 
70 static int max_rdma_vfs = 32;
71 module_param(max_rdma_vfs, int, 0644);
72 MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
73 static int mpa_version = 2;
74 module_param(mpa_version, int, 0644);
75 MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
76 
77 MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
78 MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
79 MODULE_LICENSE("Dual BSD/GPL");
80 
81 static struct i40e_client i40iw_client;
82 static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
83 
84 static LIST_HEAD(i40iw_handlers);
85 static spinlock_t i40iw_handler_lock;
86 
87 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
88 						  u32 vf_id, u8 *msg, u16 len);
89 
90 static struct notifier_block i40iw_inetaddr_notifier = {
91 	.notifier_call = i40iw_inetaddr_event
92 };
93 
94 static struct notifier_block i40iw_inetaddr6_notifier = {
95 	.notifier_call = i40iw_inet6addr_event
96 };
97 
98 static struct notifier_block i40iw_net_notifier = {
99 	.notifier_call = i40iw_net_event
100 };
101 
102 static struct notifier_block i40iw_netdevice_notifier = {
103 	.notifier_call = i40iw_netdevice_event
104 };
105 
106 /**
107  * i40iw_find_i40e_handler - find a handler given a client info
108  * @ldev: pointer to a client info
109  */
i40iw_find_i40e_handler(struct i40e_info * ldev)110 static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
111 {
112 	struct i40iw_handler *hdl;
113 	unsigned long flags;
114 
115 	spin_lock_irqsave(&i40iw_handler_lock, flags);
116 	list_for_each_entry(hdl, &i40iw_handlers, list) {
117 		if (hdl->ldev.netdev == ldev->netdev) {
118 			spin_unlock_irqrestore(&i40iw_handler_lock, flags);
119 			return hdl;
120 		}
121 	}
122 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
123 	return NULL;
124 }
125 
126 /**
127  * i40iw_find_netdev - find a handler given a netdev
128  * @netdev: pointer to net_device
129  */
i40iw_find_netdev(struct net_device * netdev)130 struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
131 {
132 	struct i40iw_handler *hdl;
133 	unsigned long flags;
134 
135 	spin_lock_irqsave(&i40iw_handler_lock, flags);
136 	list_for_each_entry(hdl, &i40iw_handlers, list) {
137 		if (hdl->ldev.netdev == netdev) {
138 			spin_unlock_irqrestore(&i40iw_handler_lock, flags);
139 			return hdl;
140 		}
141 	}
142 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
143 	return NULL;
144 }
145 
146 /**
147  * i40iw_add_handler - add a handler to the list
148  * @hdl: handler to be added to the handler list
149  */
i40iw_add_handler(struct i40iw_handler * hdl)150 static void i40iw_add_handler(struct i40iw_handler *hdl)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&i40iw_handler_lock, flags);
155 	list_add(&hdl->list, &i40iw_handlers);
156 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
157 }
158 
159 /**
160  * i40iw_del_handler - delete a handler from the list
161  * @hdl: handler to be deleted from the handler list
162  */
i40iw_del_handler(struct i40iw_handler * hdl)163 static int i40iw_del_handler(struct i40iw_handler *hdl)
164 {
165 	unsigned long flags;
166 
167 	spin_lock_irqsave(&i40iw_handler_lock, flags);
168 	list_del(&hdl->list);
169 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
170 	return 0;
171 }
172 
173 /**
174  * i40iw_enable_intr - set up device interrupts
175  * @dev: hardware control device structure
176  * @msix_id: id of the interrupt to be enabled
177  */
i40iw_enable_intr(struct i40iw_sc_dev * dev,u32 msix_id)178 static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
179 {
180 	u32 val;
181 
182 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
183 		I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
184 		(3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
185 	if (dev->is_pf)
186 		i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
187 	else
188 		i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
189 }
190 
191 /**
192  * i40iw_dpc - tasklet for aeq and ceq 0
193  * @data: iwarp device
194  */
i40iw_dpc(unsigned long data)195 static void i40iw_dpc(unsigned long data)
196 {
197 	struct i40iw_device *iwdev = (struct i40iw_device *)data;
198 
199 	if (iwdev->msix_shared)
200 		i40iw_process_ceq(iwdev, iwdev->ceqlist);
201 	i40iw_process_aeq(iwdev);
202 	i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
203 }
204 
205 /**
206  * i40iw_ceq_dpc - dpc handler for CEQ
207  * @data: data points to CEQ
208  */
i40iw_ceq_dpc(unsigned long data)209 static void i40iw_ceq_dpc(unsigned long data)
210 {
211 	struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
212 	struct i40iw_device *iwdev = iwceq->iwdev;
213 
214 	i40iw_process_ceq(iwdev, iwceq);
215 	i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
216 }
217 
218 /**
219  * i40iw_irq_handler - interrupt handler for aeq and ceq0
220  * @irq: Interrupt request number
221  * @data: iwarp device
222  */
i40iw_irq_handler(int irq,void * data)223 static irqreturn_t i40iw_irq_handler(int irq, void *data)
224 {
225 	struct i40iw_device *iwdev = (struct i40iw_device *)data;
226 
227 	tasklet_schedule(&iwdev->dpc_tasklet);
228 	return IRQ_HANDLED;
229 }
230 
231 /**
232  * i40iw_destroy_cqp  - destroy control qp
233  * @iwdev: iwarp device
234  * @create_done: 1 if cqp create poll was success
235  *
236  * Issue destroy cqp request and
237  * free the resources associated with the cqp
238  */
i40iw_destroy_cqp(struct i40iw_device * iwdev,bool free_hwcqp)239 static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
240 {
241 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
242 	struct i40iw_cqp *cqp = &iwdev->cqp;
243 
244 	if (free_hwcqp)
245 		dev->cqp_ops->cqp_destroy(dev->cqp);
246 
247 	i40iw_cleanup_pending_cqp_op(iwdev);
248 
249 	i40iw_free_dma_mem(dev->hw, &cqp->sq);
250 	kfree(cqp->scratch_array);
251 	iwdev->cqp.scratch_array = NULL;
252 
253 	kfree(cqp->cqp_requests);
254 	cqp->cqp_requests = NULL;
255 }
256 
257 /**
258  * i40iw_disable_irqs - disable device interrupts
259  * @dev: hardware control device structure
260  * @msic_vec: msix vector to disable irq
261  * @dev_id: parameter to pass to free_irq (used during irq setup)
262  *
263  * The function is called when destroying aeq/ceq
264  */
i40iw_disable_irq(struct i40iw_sc_dev * dev,struct i40iw_msix_vector * msix_vec,void * dev_id)265 static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
266 			      struct i40iw_msix_vector *msix_vec,
267 			      void *dev_id)
268 {
269 	if (dev->is_pf)
270 		i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
271 	else
272 		i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
273 	irq_set_affinity_hint(msix_vec->irq, NULL);
274 	free_irq(msix_vec->irq, dev_id);
275 }
276 
277 /**
278  * i40iw_destroy_aeq - destroy aeq
279  * @iwdev: iwarp device
280  *
281  * Issue a destroy aeq request and
282  * free the resources associated with the aeq
283  * The function is called during driver unload
284  */
i40iw_destroy_aeq(struct i40iw_device * iwdev)285 static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
286 {
287 	enum i40iw_status_code status = I40IW_ERR_NOT_READY;
288 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
289 	struct i40iw_aeq *aeq = &iwdev->aeq;
290 
291 	if (!iwdev->msix_shared)
292 		i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
293 	if (iwdev->reset)
294 		goto exit;
295 
296 	if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
297 		status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
298 	if (status)
299 		i40iw_pr_err("destroy aeq failed %d\n", status);
300 
301 exit:
302 	i40iw_free_dma_mem(dev->hw, &aeq->mem);
303 }
304 
305 /**
306  * i40iw_destroy_ceq - destroy ceq
307  * @iwdev: iwarp device
308  * @iwceq: ceq to be destroyed
309  *
310  * Issue a destroy ceq request and
311  * free the resources associated with the ceq
312  */
i40iw_destroy_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq)313 static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
314 			      struct i40iw_ceq *iwceq)
315 {
316 	enum i40iw_status_code status;
317 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
318 
319 	if (iwdev->reset)
320 		goto exit;
321 
322 	status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
323 	if (status) {
324 		i40iw_pr_err("ceq destroy command failed %d\n", status);
325 		goto exit;
326 	}
327 
328 	status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
329 	if (status)
330 		i40iw_pr_err("ceq destroy completion failed %d\n", status);
331 exit:
332 	i40iw_free_dma_mem(dev->hw, &iwceq->mem);
333 }
334 
335 /**
336  * i40iw_dele_ceqs - destroy all ceq's
337  * @iwdev: iwarp device
338  *
339  * Go through all of the device ceq's and for each ceq
340  * disable the ceq interrupt and destroy the ceq
341  */
i40iw_dele_ceqs(struct i40iw_device * iwdev)342 static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
343 {
344 	u32 i = 0;
345 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
346 	struct i40iw_ceq *iwceq = iwdev->ceqlist;
347 	struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
348 
349 	if (iwdev->msix_shared) {
350 		i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
351 		i40iw_destroy_ceq(iwdev, iwceq);
352 		iwceq++;
353 		i++;
354 	}
355 
356 	for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
357 		i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
358 		i40iw_destroy_ceq(iwdev, iwceq);
359 	}
360 
361 	iwdev->sc_dev.ceq_valid = false;
362 }
363 
364 /**
365  * i40iw_destroy_ccq - destroy control cq
366  * @iwdev: iwarp device
367  *
368  * Issue destroy ccq request and
369  * free the resources associated with the ccq
370  */
i40iw_destroy_ccq(struct i40iw_device * iwdev)371 static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
372 {
373 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
374 	struct i40iw_ccq *ccq = &iwdev->ccq;
375 	enum i40iw_status_code status = 0;
376 
377 	if (!iwdev->reset)
378 		status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
379 	if (status)
380 		i40iw_pr_err("ccq destroy failed %d\n", status);
381 	i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
382 }
383 
384 /* types of hmc objects */
385 static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
386 	I40IW_HMC_IW_QP,
387 	I40IW_HMC_IW_CQ,
388 	I40IW_HMC_IW_HTE,
389 	I40IW_HMC_IW_ARP,
390 	I40IW_HMC_IW_APBVT_ENTRY,
391 	I40IW_HMC_IW_MR,
392 	I40IW_HMC_IW_XF,
393 	I40IW_HMC_IW_XFFL,
394 	I40IW_HMC_IW_Q1,
395 	I40IW_HMC_IW_Q1FL,
396 	I40IW_HMC_IW_TIMER,
397 };
398 
399 /**
400  * i40iw_close_hmc_objects_type - delete hmc objects of a given type
401  * @iwdev: iwarp device
402  * @obj_type: the hmc object type to be deleted
403  * @is_pf: true if the function is PF otherwise false
404  * @reset: true if called before reset
405  */
i40iw_close_hmc_objects_type(struct i40iw_sc_dev * dev,enum i40iw_hmc_rsrc_type obj_type,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)406 static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
407 					 enum i40iw_hmc_rsrc_type obj_type,
408 					 struct i40iw_hmc_info *hmc_info,
409 					 bool is_pf,
410 					 bool reset)
411 {
412 	struct i40iw_hmc_del_obj_info info;
413 
414 	memset(&info, 0, sizeof(info));
415 	info.hmc_info = hmc_info;
416 	info.rsrc_type = obj_type;
417 	info.count = hmc_info->hmc_obj[obj_type].cnt;
418 	info.is_pf = is_pf;
419 	if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
420 		i40iw_pr_err("del obj of type %d failed\n", obj_type);
421 }
422 
423 /**
424  * i40iw_del_hmc_objects - remove all device hmc objects
425  * @dev: iwarp device
426  * @hmc_info: hmc_info to free
427  * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
428  *	   by PF on behalf of VF
429  * @reset: true if called before reset
430  */
i40iw_del_hmc_objects(struct i40iw_sc_dev * dev,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)431 static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
432 				  struct i40iw_hmc_info *hmc_info,
433 				  bool is_pf,
434 				  bool reset)
435 {
436 	unsigned int i;
437 
438 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
439 		i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
440 }
441 
442 /**
443  * i40iw_ceq_handler - interrupt handler for ceq
444  * @data: ceq pointer
445  */
i40iw_ceq_handler(int irq,void * data)446 static irqreturn_t i40iw_ceq_handler(int irq, void *data)
447 {
448 	struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
449 
450 	if (iwceq->irq != irq)
451 		i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
452 	tasklet_schedule(&iwceq->dpc_tasklet);
453 	return IRQ_HANDLED;
454 }
455 
456 /**
457  * i40iw_create_hmc_obj_type - create hmc object of a given type
458  * @dev: hardware control device structure
459  * @info: information for the hmc object to create
460  */
i40iw_create_hmc_obj_type(struct i40iw_sc_dev * dev,struct i40iw_hmc_create_obj_info * info)461 static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
462 							struct i40iw_hmc_create_obj_info *info)
463 {
464 	return dev->hmc_ops->create_hmc_object(dev, info);
465 }
466 
467 /**
468  * i40iw_create_hmc_objs - create all hmc objects for the device
469  * @iwdev: iwarp device
470  * @is_pf: true if the function is PF otherwise false
471  *
472  * Create the device hmc objects and allocate hmc pages
473  * Return 0 if successful, otherwise clean up and return error
474  */
i40iw_create_hmc_objs(struct i40iw_device * iwdev,bool is_pf)475 static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
476 						    bool is_pf)
477 {
478 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
479 	struct i40iw_hmc_create_obj_info info;
480 	enum i40iw_status_code status;
481 	int i;
482 
483 	memset(&info, 0, sizeof(info));
484 	info.hmc_info = dev->hmc_info;
485 	info.is_pf = is_pf;
486 	info.entry_type = iwdev->sd_type;
487 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
488 		info.rsrc_type = iw_hmc_obj_types[i];
489 		info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
490 		info.add_sd_cnt = 0;
491 		status = i40iw_create_hmc_obj_type(dev, &info);
492 		if (status) {
493 			i40iw_pr_err("create obj type %d status = %d\n",
494 				     iw_hmc_obj_types[i], status);
495 			break;
496 		}
497 	}
498 	if (!status)
499 		return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
500 								      dev->hmc_fn_id,
501 								      true, true));
502 
503 	while (i) {
504 		i--;
505 		/* destroy the hmc objects of a given type */
506 		i40iw_close_hmc_objects_type(dev,
507 					     iw_hmc_obj_types[i],
508 					     dev->hmc_info,
509 					     is_pf,
510 					     false);
511 	}
512 	return status;
513 }
514 
515 /**
516  * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
517  * @iwdev: iwarp device
518  * @memptr: points to the memory addresses
519  * @size: size of memory needed
520  * @mask: mask for the aligned memory
521  *
522  * Get aligned memory of the requested size and
523  * update the memptr to point to the new aligned memory
524  * Return 0 if successful, otherwise return no memory error
525  */
i40iw_obj_aligned_mem(struct i40iw_device * iwdev,struct i40iw_dma_mem * memptr,u32 size,u32 mask)526 enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
527 					     struct i40iw_dma_mem *memptr,
528 					     u32 size,
529 					     u32 mask)
530 {
531 	unsigned long va, newva;
532 	unsigned long extra;
533 
534 	va = (unsigned long)iwdev->obj_next.va;
535 	newva = va;
536 	if (mask)
537 		newva = ALIGN(va, (mask + 1));
538 	extra = newva - va;
539 	memptr->va = (u8 *)va + extra;
540 	memptr->pa = iwdev->obj_next.pa + extra;
541 	memptr->size = size;
542 	if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
543 		return I40IW_ERR_NO_MEMORY;
544 
545 	iwdev->obj_next.va = memptr->va + size;
546 	iwdev->obj_next.pa = memptr->pa + size;
547 	return 0;
548 }
549 
550 /**
551  * i40iw_create_cqp - create control qp
552  * @iwdev: iwarp device
553  *
554  * Return 0, if the cqp and all the resources associated with it
555  * are successfully created, otherwise return error
556  */
i40iw_create_cqp(struct i40iw_device * iwdev)557 static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
558 {
559 	enum i40iw_status_code status;
560 	u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
561 	struct i40iw_dma_mem mem;
562 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
563 	struct i40iw_cqp_init_info cqp_init_info;
564 	struct i40iw_cqp *cqp = &iwdev->cqp;
565 	u16 maj_err, min_err;
566 	int i;
567 
568 	cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
569 	if (!cqp->cqp_requests)
570 		return I40IW_ERR_NO_MEMORY;
571 	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
572 	if (!cqp->scratch_array) {
573 		kfree(cqp->cqp_requests);
574 		return I40IW_ERR_NO_MEMORY;
575 	}
576 	dev->cqp = &cqp->sc_cqp;
577 	dev->cqp->dev = dev;
578 	memset(&cqp_init_info, 0, sizeof(cqp_init_info));
579 	status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
580 					(sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
581 					I40IW_CQP_ALIGNMENT);
582 	if (status)
583 		goto exit;
584 	status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
585 				       I40IW_HOST_CTX_ALIGNMENT_MASK);
586 	if (status)
587 		goto exit;
588 	dev->cqp->host_ctx_pa = mem.pa;
589 	dev->cqp->host_ctx = mem.va;
590 	/* populate the cqp init info */
591 	cqp_init_info.dev = dev;
592 	cqp_init_info.sq_size = sqsize;
593 	cqp_init_info.sq = cqp->sq.va;
594 	cqp_init_info.sq_pa = cqp->sq.pa;
595 	cqp_init_info.host_ctx_pa = mem.pa;
596 	cqp_init_info.host_ctx = mem.va;
597 	cqp_init_info.hmc_profile = iwdev->resource_profile;
598 	cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
599 	cqp_init_info.scratch_array = cqp->scratch_array;
600 	status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
601 	if (status) {
602 		i40iw_pr_err("cqp init status %d\n", status);
603 		goto exit;
604 	}
605 	status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
606 	if (status) {
607 		i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
608 			     status, maj_err, min_err);
609 		goto exit;
610 	}
611 	spin_lock_init(&cqp->req_lock);
612 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
613 	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
614 	/* init the waitq of the cqp_requests and add them to the list */
615 	for (i = 0; i < sqsize; i++) {
616 		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
617 		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
618 	}
619 	return 0;
620 exit:
621 	/* clean up the created resources */
622 	i40iw_destroy_cqp(iwdev, false);
623 	return status;
624 }
625 
626 /**
627  * i40iw_create_ccq - create control cq
628  * @iwdev: iwarp device
629  *
630  * Return 0, if the ccq and the resources associated with it
631  * are successfully created, otherwise return error
632  */
i40iw_create_ccq(struct i40iw_device * iwdev)633 static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
634 {
635 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
636 	struct i40iw_dma_mem mem;
637 	enum i40iw_status_code status;
638 	struct i40iw_ccq_init_info info;
639 	struct i40iw_ccq *ccq = &iwdev->ccq;
640 
641 	memset(&info, 0, sizeof(info));
642 	dev->ccq = &ccq->sc_cq;
643 	dev->ccq->dev = dev;
644 	info.dev = dev;
645 	ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
646 	ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
647 	status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
648 					ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
649 	if (status)
650 		goto exit;
651 	status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
652 				       I40IW_SHADOWAREA_MASK);
653 	if (status)
654 		goto exit;
655 	ccq->sc_cq.back_cq = (void *)ccq;
656 	/* populate the ccq init info */
657 	info.cq_base = ccq->mem_cq.va;
658 	info.cq_pa = ccq->mem_cq.pa;
659 	info.num_elem = IW_CCQ_SIZE;
660 	info.shadow_area = mem.va;
661 	info.shadow_area_pa = mem.pa;
662 	info.ceqe_mask = false;
663 	info.ceq_id_valid = true;
664 	info.shadow_read_threshold = 16;
665 	status = dev->ccq_ops->ccq_init(dev->ccq, &info);
666 	if (!status)
667 		status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
668 exit:
669 	if (status)
670 		i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
671 	return status;
672 }
673 
674 /**
675  * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
676  * @iwdev: iwarp device
677  * @msix_vec: interrupt vector information
678  * @iwceq: ceq associated with the vector
679  * @ceq_id: the id number of the iwceq
680  *
681  * Allocate interrupt resources and enable irq handling
682  * Return 0 if successful, otherwise return error
683  */
i40iw_configure_ceq_vector(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id,struct i40iw_msix_vector * msix_vec)684 static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
685 							 struct i40iw_ceq *iwceq,
686 							 u32 ceq_id,
687 							 struct i40iw_msix_vector *msix_vec)
688 {
689 	enum i40iw_status_code status;
690 
691 	if (iwdev->msix_shared && !ceq_id) {
692 		tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
693 		status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
694 	} else {
695 		tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
696 		status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
697 	}
698 
699 	cpumask_clear(&msix_vec->mask);
700 	cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
701 	irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
702 
703 	if (status) {
704 		i40iw_pr_err("ceq irq config fail\n");
705 		return I40IW_ERR_CONFIG;
706 	}
707 	msix_vec->ceq_id = ceq_id;
708 
709 	return 0;
710 }
711 
712 /**
713  * i40iw_create_ceq - create completion event queue
714  * @iwdev: iwarp device
715  * @iwceq: pointer to the ceq resources to be created
716  * @ceq_id: the id number of the iwceq
717  *
718  * Return 0, if the ceq and the resources associated with it
719  * are successfully created, otherwise return error
720  */
i40iw_create_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id)721 static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
722 					       struct i40iw_ceq *iwceq,
723 					       u32 ceq_id)
724 {
725 	enum i40iw_status_code status;
726 	struct i40iw_ceq_init_info info;
727 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
728 	u64 scratch;
729 
730 	memset(&info, 0, sizeof(info));
731 	info.ceq_id = ceq_id;
732 	iwceq->iwdev = iwdev;
733 	iwceq->mem.size = sizeof(struct i40iw_ceqe) *
734 		iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
735 	status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
736 					I40IW_CEQ_ALIGNMENT);
737 	if (status)
738 		goto exit;
739 	info.ceq_id = ceq_id;
740 	info.ceqe_base = iwceq->mem.va;
741 	info.ceqe_pa = iwceq->mem.pa;
742 
743 	info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
744 	iwceq->sc_ceq.ceq_id = ceq_id;
745 	info.dev = dev;
746 	scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
747 	status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
748 	if (!status)
749 		status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
750 
751 exit:
752 	if (status)
753 		i40iw_free_dma_mem(dev->hw, &iwceq->mem);
754 	return status;
755 }
756 
i40iw_request_reset(struct i40iw_device * iwdev)757 void i40iw_request_reset(struct i40iw_device *iwdev)
758 {
759 	struct i40e_info *ldev = iwdev->ldev;
760 
761 	ldev->ops->request_reset(ldev, iwdev->client, 1);
762 }
763 
764 /**
765  * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
766  * @iwdev: iwarp device
767  * @ldev: i40e lan device
768  *
769  * Allocate a list for all device completion event queues
770  * Create the ceq's and configure their msix interrupt vectors
771  * Return 0, if at least one ceq is successfully set up, otherwise return error
772  */
i40iw_setup_ceqs(struct i40iw_device * iwdev,struct i40e_info * ldev)773 static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
774 					       struct i40e_info *ldev)
775 {
776 	u32 i;
777 	u32 ceq_id;
778 	struct i40iw_ceq *iwceq;
779 	struct i40iw_msix_vector *msix_vec;
780 	enum i40iw_status_code status = 0;
781 	u32 num_ceqs;
782 
783 	if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
784 		status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
785 						 iwdev->iw_qvlist);
786 		if (status)
787 			goto exit;
788 	} else {
789 		status = I40IW_ERR_BAD_PTR;
790 		goto exit;
791 	}
792 
793 	num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
794 	iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
795 	if (!iwdev->ceqlist) {
796 		status = I40IW_ERR_NO_MEMORY;
797 		goto exit;
798 	}
799 	i = (iwdev->msix_shared) ? 0 : 1;
800 	for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
801 		iwceq = &iwdev->ceqlist[ceq_id];
802 		status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
803 		if (status) {
804 			i40iw_pr_err("create ceq status = %d\n", status);
805 			break;
806 		}
807 
808 		msix_vec = &iwdev->iw_msixtbl[i];
809 		iwceq->irq = msix_vec->irq;
810 		iwceq->msix_idx = msix_vec->idx;
811 		status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
812 		if (status) {
813 			i40iw_destroy_ceq(iwdev, iwceq);
814 			break;
815 		}
816 		i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
817 		iwdev->ceqs_count++;
818 	}
819 exit:
820 	if (status && !iwdev->ceqs_count) {
821 		kfree(iwdev->ceqlist);
822 		iwdev->ceqlist = NULL;
823 		return status;
824 	} else {
825 		iwdev->sc_dev.ceq_valid = true;
826 		return 0;
827 	}
828 
829 }
830 
831 /**
832  * i40iw_configure_aeq_vector - set up the msix vector for aeq
833  * @iwdev: iwarp device
834  *
835  * Allocate interrupt resources and enable irq handling
836  * Return 0 if successful, otherwise return error
837  */
i40iw_configure_aeq_vector(struct i40iw_device * iwdev)838 static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
839 {
840 	struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
841 	u32 ret = 0;
842 
843 	if (!iwdev->msix_shared) {
844 		tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
845 		ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
846 	}
847 	if (ret) {
848 		i40iw_pr_err("aeq irq config fail\n");
849 		return I40IW_ERR_CONFIG;
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  * i40iw_create_aeq - create async event queue
857  * @iwdev: iwarp device
858  *
859  * Return 0, if the aeq and the resources associated with it
860  * are successfully created, otherwise return error
861  */
i40iw_create_aeq(struct i40iw_device * iwdev)862 static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
863 {
864 	enum i40iw_status_code status;
865 	struct i40iw_aeq_init_info info;
866 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
867 	struct i40iw_aeq *aeq = &iwdev->aeq;
868 	u64 scratch = 0;
869 	u32 aeq_size;
870 
871 	aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
872 		iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
873 	memset(&info, 0, sizeof(info));
874 	aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
875 	status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
876 					I40IW_AEQ_ALIGNMENT);
877 	if (status)
878 		goto exit;
879 
880 	info.aeqe_base = aeq->mem.va;
881 	info.aeq_elem_pa = aeq->mem.pa;
882 	info.elem_cnt = aeq_size;
883 	info.dev = dev;
884 	status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
885 	if (status)
886 		goto exit;
887 	status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
888 	if (!status)
889 		status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
890 exit:
891 	if (status)
892 		i40iw_free_dma_mem(dev->hw, &aeq->mem);
893 	return status;
894 }
895 
896 /**
897  * i40iw_setup_aeq - set up the device aeq
898  * @iwdev: iwarp device
899  *
900  * Create the aeq and configure its msix interrupt vector
901  * Return 0 if successful, otherwise return error
902  */
i40iw_setup_aeq(struct i40iw_device * iwdev)903 static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
904 {
905 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
906 	enum i40iw_status_code status;
907 
908 	status = i40iw_create_aeq(iwdev);
909 	if (status)
910 		return status;
911 
912 	status = i40iw_configure_aeq_vector(iwdev);
913 	if (status) {
914 		i40iw_destroy_aeq(iwdev);
915 		return status;
916 	}
917 
918 	if (!iwdev->msix_shared)
919 		i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
920 	return 0;
921 }
922 
923 /**
924  * i40iw_initialize_ilq - create iwarp local queue for cm
925  * @iwdev: iwarp device
926  *
927  * Return 0 if successful, otherwise return error
928  */
i40iw_initialize_ilq(struct i40iw_device * iwdev)929 static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
930 {
931 	struct i40iw_puda_rsrc_info info;
932 	enum i40iw_status_code status;
933 
934 	memset(&info, 0, sizeof(info));
935 	info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
936 	info.cq_id = 1;
937 	info.qp_id = 0;
938 	info.count = 1;
939 	info.pd_id = 1;
940 	info.sq_size = 8192;
941 	info.rq_size = 8192;
942 	info.buf_size = 1024;
943 	info.tx_buf_cnt = 16384;
944 	info.receive = i40iw_receive_ilq;
945 	info.xmit_complete = i40iw_free_sqbuf;
946 	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
947 	if (status)
948 		i40iw_pr_err("ilq create fail\n");
949 	return status;
950 }
951 
952 /**
953  * i40iw_initialize_ieq - create iwarp exception queue
954  * @iwdev: iwarp device
955  *
956  * Return 0 if successful, otherwise return error
957  */
i40iw_initialize_ieq(struct i40iw_device * iwdev)958 static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
959 {
960 	struct i40iw_puda_rsrc_info info;
961 	enum i40iw_status_code status;
962 
963 	memset(&info, 0, sizeof(info));
964 	info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
965 	info.cq_id = 2;
966 	info.qp_id = iwdev->vsi.exception_lan_queue;
967 	info.count = 1;
968 	info.pd_id = 2;
969 	info.sq_size = 8192;
970 	info.rq_size = 8192;
971 	info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
972 	info.tx_buf_cnt = 4096;
973 	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
974 	if (status)
975 		i40iw_pr_err("ieq create fail\n");
976 	return status;
977 }
978 
979 /**
980  * i40iw_reinitialize_ieq - destroy and re-create ieq
981  * @dev: iwarp device
982  */
i40iw_reinitialize_ieq(struct i40iw_sc_dev * dev)983 void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
984 {
985 	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
986 
987 	i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
988 	if (i40iw_initialize_ieq(iwdev)) {
989 		iwdev->reset = true;
990 		i40iw_request_reset(iwdev);
991 	}
992 }
993 
994 /**
995  * i40iw_hmc_setup - create hmc objects for the device
996  * @iwdev: iwarp device
997  *
998  * Set up the device private memory space for the number and size of
999  * the hmc objects and create the objects
1000  * Return 0 if successful, otherwise return error
1001  */
i40iw_hmc_setup(struct i40iw_device * iwdev)1002 static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
1003 {
1004 	enum i40iw_status_code status;
1005 
1006 	iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
1007 	status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
1008 	if (status)
1009 		goto exit;
1010 	status = i40iw_create_hmc_objs(iwdev, true);
1011 	if (status)
1012 		goto exit;
1013 	iwdev->init_state = HMC_OBJS_CREATED;
1014 exit:
1015 	return status;
1016 }
1017 
1018 /**
1019  * i40iw_del_init_mem - deallocate memory resources
1020  * @iwdev: iwarp device
1021  */
i40iw_del_init_mem(struct i40iw_device * iwdev)1022 static void i40iw_del_init_mem(struct i40iw_device *iwdev)
1023 {
1024 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1025 
1026 	i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
1027 	kfree(dev->hmc_info->sd_table.sd_entry);
1028 	dev->hmc_info->sd_table.sd_entry = NULL;
1029 	kfree(iwdev->mem_resources);
1030 	iwdev->mem_resources = NULL;
1031 	kfree(iwdev->ceqlist);
1032 	iwdev->ceqlist = NULL;
1033 	kfree(iwdev->iw_msixtbl);
1034 	iwdev->iw_msixtbl = NULL;
1035 	kfree(iwdev->hmc_info_mem);
1036 	iwdev->hmc_info_mem = NULL;
1037 }
1038 
1039 /**
1040  * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
1041  * @iwdev: iwarp device
1042  * @idx: the index of the mac ip address to delete
1043  */
i40iw_del_macip_entry(struct i40iw_device * iwdev,u8 idx)1044 static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
1045 {
1046 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1047 	struct i40iw_cqp_request *cqp_request;
1048 	struct cqp_commands_info *cqp_info;
1049 	enum i40iw_status_code status = 0;
1050 
1051 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1052 	if (!cqp_request) {
1053 		i40iw_pr_err("cqp_request memory failed\n");
1054 		return;
1055 	}
1056 	cqp_info = &cqp_request->info;
1057 	cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
1058 	cqp_info->post_sq = 1;
1059 	cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1060 	cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1061 	cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
1062 	cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
1063 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1064 	if (status)
1065 		i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
1066 }
1067 
1068 /**
1069  * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
1070  * @iwdev: iwarp device
1071  * @mac_addr: pointer to mac address
1072  * @idx: the index of the mac ip address to add
1073  */
i40iw_add_mac_ipaddr_entry(struct i40iw_device * iwdev,u8 * mac_addr,u8 idx)1074 static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
1075 							 u8 *mac_addr,
1076 							 u8 idx)
1077 {
1078 	struct i40iw_local_mac_ipaddr_entry_info *info;
1079 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1080 	struct i40iw_cqp_request *cqp_request;
1081 	struct cqp_commands_info *cqp_info;
1082 	enum i40iw_status_code status = 0;
1083 
1084 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1085 	if (!cqp_request) {
1086 		i40iw_pr_err("cqp_request memory failed\n");
1087 		return I40IW_ERR_NO_MEMORY;
1088 	}
1089 
1090 	cqp_info = &cqp_request->info;
1091 
1092 	cqp_info->post_sq = 1;
1093 	info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
1094 	ether_addr_copy(info->mac_addr, mac_addr);
1095 	info->entry_idx = idx;
1096 	cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1097 	cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
1098 	cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1099 	cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1100 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1101 	if (status)
1102 		i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
1103 	return status;
1104 }
1105 
1106 /**
1107  * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
1108  * @iwdev: iwarp device
1109  * @mac_ip_tbl_idx: the index of the new mac ip address
1110  *
1111  * Allocate a mac ip address entry and update the mac_ip_tbl_idx
1112  * to hold the index of the newly created mac ip address
1113  * Return 0 if successful, otherwise return error
1114  */
i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device * iwdev,u16 * mac_ip_tbl_idx)1115 static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
1116 								 u16 *mac_ip_tbl_idx)
1117 {
1118 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1119 	struct i40iw_cqp_request *cqp_request;
1120 	struct cqp_commands_info *cqp_info;
1121 	enum i40iw_status_code status = 0;
1122 
1123 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1124 	if (!cqp_request) {
1125 		i40iw_pr_err("cqp_request memory failed\n");
1126 		return I40IW_ERR_NO_MEMORY;
1127 	}
1128 
1129 	/* increment refcount, because we need the cqp request ret value */
1130 	atomic_inc(&cqp_request->refcount);
1131 
1132 	cqp_info = &cqp_request->info;
1133 	cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
1134 	cqp_info->post_sq = 1;
1135 	cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1136 	cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1137 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1138 	if (!status)
1139 		*mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
1140 	else
1141 		i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
1142 	/* decrement refcount and free the cqp request, if no longer used */
1143 	i40iw_put_cqp_request(iwcqp, cqp_request);
1144 	return status;
1145 }
1146 
1147 /**
1148  * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
1149  * @iwdev: iwarp device
1150  * @macaddr: pointer to mac address
1151  *
1152  * Allocate a mac ip address entry and add it to the hw table
1153  * Return 0 if successful, otherwise return error
1154  */
i40iw_alloc_set_mac_ipaddr(struct i40iw_device * iwdev,u8 * macaddr)1155 static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
1156 							 u8 *macaddr)
1157 {
1158 	enum i40iw_status_code status;
1159 
1160 	status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
1161 	if (!status) {
1162 		status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1163 						    (u8)iwdev->mac_ip_table_idx);
1164 		if (status)
1165 			i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1166 	}
1167 	return status;
1168 }
1169 
1170 /**
1171  * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
1172  * @iwdev: iwarp device
1173  */
i40iw_add_ipv6_addr(struct i40iw_device * iwdev)1174 static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
1175 {
1176 	struct net_device *ip_dev;
1177 	struct inet6_dev *idev;
1178 	struct inet6_ifaddr *ifp, *tmp;
1179 	u32 local_ipaddr6[4];
1180 
1181 	rcu_read_lock();
1182 	for_each_netdev_rcu(&init_net, ip_dev) {
1183 		if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
1184 		      (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1185 		     (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1186 			idev = __in6_dev_get(ip_dev);
1187 			if (!idev) {
1188 				i40iw_pr_err("ipv6 inet device not found\n");
1189 				break;
1190 			}
1191 			list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1192 				i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
1193 					      rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
1194 				i40iw_copy_ip_ntohl(local_ipaddr6,
1195 						    ifp->addr.in6_u.u6_addr32);
1196 				i40iw_manage_arp_cache(iwdev,
1197 						       ip_dev->dev_addr,
1198 						       local_ipaddr6,
1199 						       false,
1200 						       I40IW_ARP_ADD);
1201 			}
1202 		}
1203 	}
1204 	rcu_read_unlock();
1205 }
1206 
1207 /**
1208  * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
1209  * @iwdev: iwarp device
1210  */
i40iw_add_ipv4_addr(struct i40iw_device * iwdev)1211 static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1212 {
1213 	struct net_device *dev;
1214 	struct in_device *idev;
1215 	bool got_lock = true;
1216 	u32 ip_addr;
1217 
1218 	if (!rtnl_trylock())
1219 		got_lock = false;
1220 
1221 	for_each_netdev(&init_net, dev) {
1222 		if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
1223 		      (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1224 		    (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1225 			idev = in_dev_get(dev);
1226 			for_ifa(idev) {
1227 				i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1228 					    "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1229 					     rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
1230 
1231 				ip_addr = ntohl(ifa->ifa_address);
1232 				i40iw_manage_arp_cache(iwdev,
1233 						       dev->dev_addr,
1234 						       &ip_addr,
1235 						       true,
1236 						       I40IW_ARP_ADD);
1237 			}
1238 			endfor_ifa(idev);
1239 			in_dev_put(idev);
1240 		}
1241 	}
1242 	if (got_lock)
1243 		rtnl_unlock();
1244 }
1245 
1246 /**
1247  * i40iw_add_mac_ip - add mac and ip addresses
1248  * @iwdev: iwarp device
1249  *
1250  * Create and add a mac ip address entry to the hw table and
1251  * ipv4/ipv6 addresses to the arp cache
1252  * Return 0 if successful, otherwise return error
1253  */
i40iw_add_mac_ip(struct i40iw_device * iwdev)1254 static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
1255 {
1256 	struct net_device *netdev = iwdev->netdev;
1257 	enum i40iw_status_code status;
1258 
1259 	status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
1260 	if (status)
1261 		return status;
1262 	i40iw_add_ipv4_addr(iwdev);
1263 	i40iw_add_ipv6_addr(iwdev);
1264 	return 0;
1265 }
1266 
1267 /**
1268  * i40iw_wait_pe_ready - Check if firmware is ready
1269  * @hw: provides access to registers
1270  */
i40iw_wait_pe_ready(struct i40iw_hw * hw)1271 static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
1272 {
1273 	u32 statusfw;
1274 	u32 statuscpu0;
1275 	u32 statuscpu1;
1276 	u32 statuscpu2;
1277 	u32 retrycount = 0;
1278 
1279 	do {
1280 		statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
1281 		i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
1282 		statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
1283 		i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
1284 		statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
1285 		i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
1286 			      __LINE__, statuscpu1);
1287 		statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
1288 		i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
1289 			      __LINE__, statuscpu2);
1290 		if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
1291 			break;	/* SUCCESS */
1292 		msleep(1000);
1293 		retrycount++;
1294 	} while (retrycount < 14);
1295 	i40iw_wr32(hw, 0xb4040, 0x4C104C5);
1296 }
1297 
1298 /**
1299  * i40iw_initialize_dev - initialize device
1300  * @iwdev: iwarp device
1301  * @ldev: lan device information
1302  *
1303  * Allocate memory for the hmc objects and initialize iwdev
1304  * Return 0 if successful, otherwise clean up the resources
1305  * and return error
1306  */
i40iw_initialize_dev(struct i40iw_device * iwdev,struct i40e_info * ldev)1307 static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1308 						   struct i40e_info *ldev)
1309 {
1310 	enum i40iw_status_code status;
1311 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1312 	struct i40iw_device_init_info info;
1313 	struct i40iw_vsi_init_info vsi_info;
1314 	struct i40iw_dma_mem mem;
1315 	struct i40iw_l2params l2params;
1316 	u32 size;
1317 	struct i40iw_vsi_stats_info stats_info;
1318 	u16 last_qset = I40IW_NO_QSET;
1319 	u16 qset;
1320 	u32 i;
1321 
1322 	memset(&l2params, 0, sizeof(l2params));
1323 	memset(&info, 0, sizeof(info));
1324 	size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
1325 				(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
1326 	iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1327 	if (!iwdev->hmc_info_mem)
1328 		return I40IW_ERR_NO_MEMORY;
1329 
1330 	iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
1331 	dev->hmc_info = &iwdev->hw.hmc;
1332 	dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
1333 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1334 				       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1335 	if (status)
1336 		goto error;
1337 	info.fpm_query_buf_pa = mem.pa;
1338 	info.fpm_query_buf = mem.va;
1339 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1340 				       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1341 	if (status)
1342 		goto error;
1343 	info.fpm_commit_buf_pa = mem.pa;
1344 	info.fpm_commit_buf = mem.va;
1345 	info.hmc_fn_id = ldev->fid;
1346 	info.is_pf = (ldev->ftype) ? false : true;
1347 	info.bar0 = ldev->hw_addr;
1348 	info.hw = &iwdev->hw;
1349 	info.debug_mask = debug;
1350 	l2params.mtu =
1351 		(ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
1352 	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
1353 		qset = ldev->params.qos.prio_qos[i].qs_handle;
1354 		l2params.qs_handle_list[i] = qset;
1355 		if (last_qset == I40IW_NO_QSET)
1356 			last_qset = qset;
1357 		else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
1358 			iwdev->dcb = true;
1359 	}
1360 	i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
1361 	info.vchnl_send = i40iw_virtchnl_send;
1362 	status = i40iw_device_init(&iwdev->sc_dev, &info);
1363 
1364 	if (status)
1365 		goto error;
1366 	memset(&vsi_info, 0, sizeof(vsi_info));
1367 	vsi_info.dev = &iwdev->sc_dev;
1368 	vsi_info.back_vsi = (void *)iwdev;
1369 	vsi_info.params = &l2params;
1370 	vsi_info.exception_lan_queue = 1;
1371 	i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
1372 
1373 	if (dev->is_pf) {
1374 		memset(&stats_info, 0, sizeof(stats_info));
1375 		stats_info.fcn_id = ldev->fid;
1376 		stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1377 		if (!stats_info.pestat) {
1378 			status = I40IW_ERR_NO_MEMORY;
1379 			goto error;
1380 		}
1381 		stats_info.stats_initialize = true;
1382 		if (stats_info.pestat)
1383 			i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
1384 	}
1385 	return status;
1386 error:
1387 	kfree(iwdev->hmc_info_mem);
1388 	iwdev->hmc_info_mem = NULL;
1389 	return status;
1390 }
1391 
1392 /**
1393  * i40iw_register_notifiers - register tcp ip notifiers
1394  */
i40iw_register_notifiers(void)1395 static void i40iw_register_notifiers(void)
1396 {
1397 	register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1398 	register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1399 	register_netevent_notifier(&i40iw_net_notifier);
1400 	register_netdevice_notifier(&i40iw_netdevice_notifier);
1401 }
1402 
1403 /**
1404  * i40iw_unregister_notifiers - unregister tcp ip notifiers
1405  */
1406 
i40iw_unregister_notifiers(void)1407 static void i40iw_unregister_notifiers(void)
1408 {
1409 	unregister_netevent_notifier(&i40iw_net_notifier);
1410 	unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1411 	unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1412 	unregister_netdevice_notifier(&i40iw_netdevice_notifier);
1413 }
1414 
1415 /**
1416  * i40iw_save_msix_info - copy msix vector information to iwarp device
1417  * @iwdev: iwarp device
1418  * @ldev: lan device information
1419  *
1420  * Allocate iwdev msix table and copy the ldev msix info to the table
1421  * Return 0 if successful, otherwise return error
1422  */
i40iw_save_msix_info(struct i40iw_device * iwdev,struct i40e_info * ldev)1423 static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1424 						   struct i40e_info *ldev)
1425 {
1426 	struct i40e_qvlist_info *iw_qvlist;
1427 	struct i40e_qv_info *iw_qvinfo;
1428 	u32 ceq_idx;
1429 	u32 i;
1430 	u32 size;
1431 
1432 	if (!ldev->msix_count) {
1433 		i40iw_pr_err("No MSI-X vectors\n");
1434 		return I40IW_ERR_CONFIG;
1435 	}
1436 
1437 	iwdev->msix_count = ldev->msix_count;
1438 
1439 	size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
1440 	size += sizeof(struct i40e_qvlist_info);
1441 	size +=  sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
1442 	iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
1443 
1444 	if (!iwdev->iw_msixtbl)
1445 		return I40IW_ERR_NO_MEMORY;
1446 	iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
1447 	iw_qvlist = iwdev->iw_qvlist;
1448 	iw_qvinfo = iw_qvlist->qv_info;
1449 	iw_qvlist->num_vectors = iwdev->msix_count;
1450 	if (iwdev->msix_count <= num_online_cpus())
1451 		iwdev->msix_shared = true;
1452 	for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
1453 		iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
1454 		iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
1455 		iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
1456 		if (i == 0) {
1457 			iw_qvinfo->aeq_idx = 0;
1458 			if (iwdev->msix_shared)
1459 				iw_qvinfo->ceq_idx = ceq_idx++;
1460 			else
1461 				iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
1462 		} else {
1463 			iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
1464 			iw_qvinfo->ceq_idx = ceq_idx++;
1465 		}
1466 		iw_qvinfo->itr_idx = 3;
1467 		iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
1468 	}
1469 	return 0;
1470 }
1471 
1472 /**
1473  * i40iw_deinit_device - clean up the device resources
1474  * @iwdev: iwarp device
1475  *
1476  * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1477  * destroy the device queues and free the pble and the hmc objects
1478  */
i40iw_deinit_device(struct i40iw_device * iwdev)1479 static void i40iw_deinit_device(struct i40iw_device *iwdev)
1480 {
1481 	struct i40e_info *ldev = iwdev->ldev;
1482 
1483 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1484 
1485 	i40iw_pr_info("state = %d\n", iwdev->init_state);
1486 	if (iwdev->param_wq)
1487 		destroy_workqueue(iwdev->param_wq);
1488 
1489 	switch (iwdev->init_state) {
1490 	case RDMA_DEV_REGISTERED:
1491 		iwdev->iw_status = 0;
1492 		i40iw_port_ibevent(iwdev);
1493 		i40iw_destroy_rdma_device(iwdev->iwibdev);
1494 		/* fallthrough */
1495 	case IP_ADDR_REGISTERED:
1496 		if (!iwdev->reset)
1497 			i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1498 		/* fallthrough */
1499 		/* fallthrough */
1500 	case PBLE_CHUNK_MEM:
1501 		i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1502 		/* fallthrough */
1503 	case CEQ_CREATED:
1504 		i40iw_dele_ceqs(iwdev);
1505 		/* fallthrough */
1506 	case AEQ_CREATED:
1507 		i40iw_destroy_aeq(iwdev);
1508 		/* fallthrough */
1509 	case IEQ_CREATED:
1510 		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1511 		/* fallthrough */
1512 	case ILQ_CREATED:
1513 		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1514 		/* fallthrough */
1515 	case CCQ_CREATED:
1516 		i40iw_destroy_ccq(iwdev);
1517 		/* fallthrough */
1518 	case HMC_OBJS_CREATED:
1519 		i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1520 		/* fallthrough */
1521 	case CQP_CREATED:
1522 		i40iw_destroy_cqp(iwdev, true);
1523 		/* fallthrough */
1524 	case INITIAL_STATE:
1525 		i40iw_cleanup_cm_core(&iwdev->cm_core);
1526 		if (iwdev->vsi.pestat) {
1527 			i40iw_vsi_stats_free(&iwdev->vsi);
1528 			kfree(iwdev->vsi.pestat);
1529 		}
1530 		i40iw_del_init_mem(iwdev);
1531 		break;
1532 	case INVALID_STATE:
1533 		/* fallthrough */
1534 	default:
1535 		i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
1536 		break;
1537 	}
1538 
1539 	i40iw_del_handler(i40iw_find_i40e_handler(ldev));
1540 	kfree(iwdev->hdl);
1541 }
1542 
1543 /**
1544  * i40iw_setup_init_state - set up the initial device struct
1545  * @hdl: handler for iwarp device - one per instance
1546  * @ldev: lan device information
1547  * @client: iwarp client information, provided during registration
1548  *
1549  * Initialize the iwarp device and its hdl information
1550  * using the ldev and client information
1551  * Return 0 if successful, otherwise return error
1552  */
i40iw_setup_init_state(struct i40iw_handler * hdl,struct i40e_info * ldev,struct i40e_client * client)1553 static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1554 						     struct i40e_info *ldev,
1555 						     struct i40e_client *client)
1556 {
1557 	struct i40iw_device *iwdev = &hdl->device;
1558 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1559 	enum i40iw_status_code status;
1560 
1561 	memcpy(&hdl->ldev, ldev, sizeof(*ldev));
1562 
1563 	iwdev->mpa_version = mpa_version;
1564 	iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
1565 	    (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
1566 	    I40IW_HMC_PROFILE_DEFAULT;
1567 	iwdev->max_rdma_vfs =
1568 		(iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ?  max_rdma_vfs : 0;
1569 	iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
1570 	iwdev->netdev = ldev->netdev;
1571 	hdl->client = client;
1572 	if (!ldev->ftype)
1573 		iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
1574 	else
1575 		iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
1576 
1577 	status = i40iw_save_msix_info(iwdev, ldev);
1578 	if (status)
1579 		return status;
1580 	iwdev->hw.dev_context = (void *)ldev->pcidev;
1581 	iwdev->hw.hw_addr = ldev->hw_addr;
1582 	status = i40iw_allocate_dma_mem(&iwdev->hw,
1583 					&iwdev->obj_mem, 8192, 4096);
1584 	if (status)
1585 		goto exit;
1586 	iwdev->obj_next = iwdev->obj_mem;
1587 	iwdev->push_mode = push_mode;
1588 
1589 	init_waitqueue_head(&iwdev->vchnl_waitq);
1590 	init_waitqueue_head(&dev->vf_reqs);
1591 	init_waitqueue_head(&iwdev->close_wq);
1592 
1593 	status = i40iw_initialize_dev(iwdev, ldev);
1594 exit:
1595 	if (status) {
1596 		kfree(iwdev->iw_msixtbl);
1597 		i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
1598 		iwdev->iw_msixtbl = NULL;
1599 	}
1600 	return status;
1601 }
1602 
1603 /**
1604  * i40iw_get_used_rsrc - determine resources used internally
1605  * @iwdev: iwarp device
1606  *
1607  * Called after internal allocations
1608  */
i40iw_get_used_rsrc(struct i40iw_device * iwdev)1609 static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
1610 {
1611 	iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
1612 	iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
1613 	iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
1614 	iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
1615 }
1616 
1617 /**
1618  * i40iw_open - client interface operation open for iwarp/uda device
1619  * @ldev: lan device information
1620  * @client: iwarp client information, provided during registration
1621  *
1622  * Called by the lan driver during the processing of client register
1623  * Create device resources, set up queues, pble and hmc objects and
1624  * register the device with the ib verbs interface
1625  * Return 0 if successful, otherwise return error
1626  */
i40iw_open(struct i40e_info * ldev,struct i40e_client * client)1627 static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1628 {
1629 	struct i40iw_device *iwdev;
1630 	struct i40iw_sc_dev *dev;
1631 	enum i40iw_status_code status;
1632 	struct i40iw_handler *hdl;
1633 
1634 	hdl = i40iw_find_netdev(ldev->netdev);
1635 	if (hdl)
1636 		return 0;
1637 
1638 	hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1639 	if (!hdl)
1640 		return -ENOMEM;
1641 	iwdev = &hdl->device;
1642 	iwdev->hdl = hdl;
1643 	dev = &iwdev->sc_dev;
1644 	i40iw_setup_cm_core(iwdev);
1645 
1646 	dev->back_dev = (void *)iwdev;
1647 	iwdev->ldev = &hdl->ldev;
1648 	iwdev->client = client;
1649 	mutex_init(&iwdev->pbl_mutex);
1650 	i40iw_add_handler(hdl);
1651 
1652 	do {
1653 		status = i40iw_setup_init_state(hdl, ldev, client);
1654 		if (status)
1655 			break;
1656 		iwdev->init_state = INITIAL_STATE;
1657 		if (dev->is_pf)
1658 			i40iw_wait_pe_ready(dev->hw);
1659 		status = i40iw_create_cqp(iwdev);
1660 		if (status)
1661 			break;
1662 		iwdev->init_state = CQP_CREATED;
1663 		status = i40iw_hmc_setup(iwdev);
1664 		if (status)
1665 			break;
1666 		status = i40iw_create_ccq(iwdev);
1667 		if (status)
1668 			break;
1669 		iwdev->init_state = CCQ_CREATED;
1670 		status = i40iw_initialize_ilq(iwdev);
1671 		if (status)
1672 			break;
1673 		iwdev->init_state = ILQ_CREATED;
1674 		status = i40iw_initialize_ieq(iwdev);
1675 		if (status)
1676 			break;
1677 		iwdev->init_state = IEQ_CREATED;
1678 		status = i40iw_setup_aeq(iwdev);
1679 		if (status)
1680 			break;
1681 		iwdev->init_state = AEQ_CREATED;
1682 		status = i40iw_setup_ceqs(iwdev, ldev);
1683 		if (status)
1684 			break;
1685 		iwdev->init_state = CEQ_CREATED;
1686 		status = i40iw_initialize_hw_resources(iwdev);
1687 		if (status)
1688 			break;
1689 		i40iw_get_used_rsrc(iwdev);
1690 		dev->ccq_ops->ccq_arm(dev->ccq);
1691 		status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1692 		if (status)
1693 			break;
1694 		iwdev->init_state = PBLE_CHUNK_MEM;
1695 		iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1696 		status = i40iw_add_mac_ip(iwdev);
1697 		if (status)
1698 			break;
1699 		iwdev->init_state = IP_ADDR_REGISTERED;
1700 		if (i40iw_register_rdma_device(iwdev)) {
1701 			i40iw_pr_err("register rdma device fail\n");
1702 			break;
1703 		};
1704 
1705 		iwdev->init_state = RDMA_DEV_REGISTERED;
1706 		iwdev->iw_status = 1;
1707 		i40iw_port_ibevent(iwdev);
1708 		iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
1709 		if(iwdev->param_wq == NULL)
1710 			break;
1711 		i40iw_pr_info("i40iw_open completed\n");
1712 		return 0;
1713 	} while (0);
1714 
1715 	i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1716 	i40iw_deinit_device(iwdev);
1717 	return -ERESTART;
1718 }
1719 
1720 /**
1721  * i40iw_l2params_worker - worker for l2 params change
1722  * @work: work pointer for l2 params
1723  */
i40iw_l2params_worker(struct work_struct * work)1724 static void i40iw_l2params_worker(struct work_struct *work)
1725 {
1726 	struct l2params_work *dwork =
1727 	    container_of(work, struct l2params_work, work);
1728 	struct i40iw_device *iwdev = dwork->iwdev;
1729 
1730 	i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
1731 	atomic_dec(&iwdev->params_busy);
1732 	kfree(work);
1733 }
1734 
1735 /**
1736  * i40iw_l2param_change - handle qs handles for qos and mss change
1737  * @ldev: lan device information
1738  * @client: client for paramater change
1739  * @params: new parameters from L2
1740  */
i40iw_l2param_change(struct i40e_info * ldev,struct i40e_client * client,struct i40e_params * params)1741 static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
1742 				 struct i40e_params *params)
1743 {
1744 	struct i40iw_handler *hdl;
1745 	struct i40iw_l2params *l2params;
1746 	struct l2params_work *work;
1747 	struct i40iw_device *iwdev;
1748 	int i;
1749 
1750 	hdl = i40iw_find_i40e_handler(ldev);
1751 	if (!hdl)
1752 		return;
1753 
1754 	iwdev = &hdl->device;
1755 
1756 	if (atomic_read(&iwdev->params_busy))
1757 		return;
1758 
1759 
1760 	work = kzalloc(sizeof(*work), GFP_KERNEL);
1761 	if (!work)
1762 		return;
1763 
1764 	atomic_inc(&iwdev->params_busy);
1765 
1766 	work->iwdev = iwdev;
1767 	l2params = &work->l2params;
1768 	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
1769 		l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
1770 
1771 	l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
1772 
1773 	INIT_WORK(&work->work, i40iw_l2params_worker);
1774 	queue_work(iwdev->param_wq, &work->work);
1775 }
1776 
1777 /**
1778  * i40iw_close - client interface operation close for iwarp/uda device
1779  * @ldev: lan device information
1780  * @client: client to close
1781  *
1782  * Called by the lan driver during the processing of client unregister
1783  * Destroy and clean up the driver resources
1784  */
i40iw_close(struct i40e_info * ldev,struct i40e_client * client,bool reset)1785 static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
1786 {
1787 	struct i40iw_device *iwdev;
1788 	struct i40iw_handler *hdl;
1789 
1790 	hdl = i40iw_find_i40e_handler(ldev);
1791 	if (!hdl)
1792 		return;
1793 
1794 	iwdev = &hdl->device;
1795 	iwdev->closing = true;
1796 
1797 	if (reset)
1798 		iwdev->reset = true;
1799 
1800 	i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
1801 	destroy_workqueue(iwdev->virtchnl_wq);
1802 	i40iw_deinit_device(iwdev);
1803 }
1804 
1805 /**
1806  * i40iw_vf_reset - process VF reset
1807  * @ldev: lan device information
1808  * @client: client interface instance
1809  * @vf_id: virtual function id
1810  *
1811  * Called when a VF is reset by the PF
1812  * Destroy and clean up the VF resources
1813  */
i40iw_vf_reset(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1814 static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
1815 {
1816 	struct i40iw_handler *hdl;
1817 	struct i40iw_sc_dev *dev;
1818 	struct i40iw_hmc_fcn_info hmc_fcn_info;
1819 	struct i40iw_virt_mem vf_dev_mem;
1820 	struct i40iw_vfdev *tmp_vfdev;
1821 	unsigned int i;
1822 	unsigned long flags;
1823 	struct i40iw_device *iwdev;
1824 
1825 	hdl = i40iw_find_i40e_handler(ldev);
1826 	if (!hdl)
1827 		return;
1828 
1829 	dev = &hdl->device.sc_dev;
1830 	iwdev = (struct i40iw_device *)dev->back_dev;
1831 
1832 	for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
1833 		if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
1834 			continue;
1835 		/* free all resources allocated on behalf of vf */
1836 		tmp_vfdev = dev->vf_dev[i];
1837 		spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
1838 		dev->vf_dev[i] = NULL;
1839 		spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
1840 		i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
1841 		/* remove vf hmc function */
1842 		memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
1843 		hmc_fcn_info.vf_id = vf_id;
1844 		hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
1845 		hmc_fcn_info.free_fcn = true;
1846 		i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
1847 		/* free vf_dev */
1848 		vf_dev_mem.va = tmp_vfdev;
1849 		vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
1850 					sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
1851 		i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
1852 		break;
1853 	}
1854 }
1855 
1856 /**
1857  * i40iw_vf_enable - enable a number of VFs
1858  * @ldev: lan device information
1859  * @client: client interface instance
1860  * @num_vfs: number of VFs for the PF
1861  *
1862  * Called when the number of VFs changes
1863  */
i40iw_vf_enable(struct i40e_info * ldev,struct i40e_client * client,u32 num_vfs)1864 static void i40iw_vf_enable(struct i40e_info *ldev,
1865 			    struct i40e_client *client,
1866 			    u32 num_vfs)
1867 {
1868 	struct i40iw_handler *hdl;
1869 
1870 	hdl = i40iw_find_i40e_handler(ldev);
1871 	if (!hdl)
1872 		return;
1873 
1874 	if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
1875 		hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
1876 	else
1877 		hdl->device.max_enabled_vfs = num_vfs;
1878 }
1879 
1880 /**
1881  * i40iw_vf_capable - check if VF capable
1882  * @ldev: lan device information
1883  * @client: client interface instance
1884  * @vf_id: virtual function id
1885  *
1886  * Return 1 if a VF slot is available or if VF is already RDMA enabled
1887  * Return 0 otherwise
1888  */
i40iw_vf_capable(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1889 static int i40iw_vf_capable(struct i40e_info *ldev,
1890 			    struct i40e_client *client,
1891 			    u32 vf_id)
1892 {
1893 	struct i40iw_handler *hdl;
1894 	struct i40iw_sc_dev *dev;
1895 	unsigned int i;
1896 
1897 	hdl = i40iw_find_i40e_handler(ldev);
1898 	if (!hdl)
1899 		return 0;
1900 
1901 	dev = &hdl->device.sc_dev;
1902 
1903 	for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
1904 		if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
1905 			return 1;
1906 	}
1907 
1908 	return 0;
1909 }
1910 
1911 /**
1912  * i40iw_virtchnl_receive - receive a message through the virtual channel
1913  * @ldev: lan device information
1914  * @client: client interface instance
1915  * @vf_id: virtual function id associated with the message
1916  * @msg: message buffer pointer
1917  * @len: length of the message
1918  *
1919  * Invoke virtual channel receive operation for the given msg
1920  * Return 0 if successful, otherwise return error
1921  */
i40iw_virtchnl_receive(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id,u8 * msg,u16 len)1922 static int i40iw_virtchnl_receive(struct i40e_info *ldev,
1923 				  struct i40e_client *client,
1924 				  u32 vf_id,
1925 				  u8 *msg,
1926 				  u16 len)
1927 {
1928 	struct i40iw_handler *hdl;
1929 	struct i40iw_sc_dev *dev;
1930 	struct i40iw_device *iwdev;
1931 	int ret_code = I40IW_NOT_SUPPORTED;
1932 
1933 	if (!len || !msg)
1934 		return I40IW_ERR_PARAM;
1935 
1936 	hdl = i40iw_find_i40e_handler(ldev);
1937 	if (!hdl)
1938 		return I40IW_ERR_PARAM;
1939 
1940 	dev = &hdl->device.sc_dev;
1941 	iwdev = dev->back_dev;
1942 
1943 	if (dev->vchnl_if.vchnl_recv) {
1944 		ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
1945 		if (!dev->is_pf) {
1946 			atomic_dec(&iwdev->vchnl_msgs);
1947 			wake_up(&iwdev->vchnl_waitq);
1948 		}
1949 	}
1950 	return ret_code;
1951 }
1952 
1953 /**
1954  * i40iw_vf_clear_to_send - wait to send virtual channel message
1955  * @dev: iwarp device *
1956  * Wait for until virtual channel is clear
1957  * before sending the next message
1958  *
1959  * Returns false if error
1960  * Returns true if clear to send
1961  */
i40iw_vf_clear_to_send(struct i40iw_sc_dev * dev)1962 bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
1963 {
1964 	struct i40iw_device *iwdev;
1965 	wait_queue_entry_t wait;
1966 
1967 	iwdev = dev->back_dev;
1968 
1969 	if (!wq_has_sleeper(&dev->vf_reqs) &&
1970 	    (atomic_read(&iwdev->vchnl_msgs) == 0))
1971 		return true; /* virtual channel is clear */
1972 
1973 	init_wait(&wait);
1974 	add_wait_queue_exclusive(&dev->vf_reqs, &wait);
1975 
1976 	if (!wait_event_timeout(dev->vf_reqs,
1977 				(atomic_read(&iwdev->vchnl_msgs) == 0),
1978 				I40IW_VCHNL_EVENT_TIMEOUT))
1979 		dev->vchnl_up = false;
1980 
1981 	remove_wait_queue(&dev->vf_reqs, &wait);
1982 
1983 	return dev->vchnl_up;
1984 }
1985 
1986 /**
1987  * i40iw_virtchnl_send - send a message through the virtual channel
1988  * @dev: iwarp device
1989  * @vf_id: virtual function id associated with the message
1990  * @msg: virtual channel message buffer pointer
1991  * @len: length of the message
1992  *
1993  * Invoke virtual channel send operation for the given msg
1994  * Return 0 if successful, otherwise return error
1995  */
i40iw_virtchnl_send(struct i40iw_sc_dev * dev,u32 vf_id,u8 * msg,u16 len)1996 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
1997 						  u32 vf_id,
1998 						  u8 *msg,
1999 						  u16 len)
2000 {
2001 	struct i40iw_device *iwdev;
2002 	struct i40e_info *ldev;
2003 
2004 	if (!dev || !dev->back_dev)
2005 		return I40IW_ERR_BAD_PTR;
2006 
2007 	iwdev = dev->back_dev;
2008 	ldev = iwdev->ldev;
2009 
2010 	if (ldev && ldev->ops && ldev->ops->virtchnl_send)
2011 		return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
2012 	return I40IW_ERR_BAD_PTR;
2013 }
2014 
2015 /* client interface functions */
2016 static const struct i40e_client_ops i40e_ops = {
2017 	.open = i40iw_open,
2018 	.close = i40iw_close,
2019 	.l2_param_change = i40iw_l2param_change,
2020 	.virtchnl_receive = i40iw_virtchnl_receive,
2021 	.vf_reset = i40iw_vf_reset,
2022 	.vf_enable = i40iw_vf_enable,
2023 	.vf_capable = i40iw_vf_capable
2024 };
2025 
2026 /**
2027  * i40iw_init_module - driver initialization function
2028  *
2029  * First function to call when the driver is loaded
2030  * Register the driver as i40e client and port mapper client
2031  */
i40iw_init_module(void)2032 static int __init i40iw_init_module(void)
2033 {
2034 	int ret;
2035 
2036 	memset(&i40iw_client, 0, sizeof(i40iw_client));
2037 	i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
2038 	i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
2039 	i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
2040 	i40iw_client.ops = &i40e_ops;
2041 	memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
2042 	i40iw_client.type = I40E_CLIENT_IWARP;
2043 	spin_lock_init(&i40iw_handler_lock);
2044 	ret = i40e_register_client(&i40iw_client);
2045 	i40iw_register_notifiers();
2046 
2047 	return ret;
2048 }
2049 
2050 /**
2051  * i40iw_exit_module - driver exit clean up function
2052  *
2053  * The function is called just before the driver is unloaded
2054  * Unregister the driver as i40e client and port mapper client
2055  */
i40iw_exit_module(void)2056 static void __exit i40iw_exit_module(void)
2057 {
2058 	i40iw_unregister_notifiers();
2059 	i40e_unregister_client(&i40iw_client);
2060 }
2061 
2062 module_init(i40iw_init_module);
2063 module_exit(i40iw_exit_module);
2064