1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *	copyright notice, this list of conditions and the following
17 *	disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *	copyright notice, this list of conditions and the following
21 *	disclaimer in the documentation and/or other materials
22 *	provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34 
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
42 #include <net/addrconf.h>
43 
44 #include "i40iw.h"
45 #include "i40iw_register.h"
46 #include <net/netevent.h>
47 #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
48 #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
49 #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
50 
51 #define DRV_VERSION_MAJOR 0
52 #define DRV_VERSION_MINOR 5
53 #define DRV_VERSION_BUILD 123
54 #define DRV_VERSION	__stringify(DRV_VERSION_MAJOR) "."		\
55 	__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
56 
57 static int push_mode;
58 module_param(push_mode, int, 0644);
59 MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
60 
61 static int debug;
62 module_param(debug, int, 0644);
63 MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
64 
65 static int resource_profile;
66 module_param(resource_profile, int, 0644);
67 MODULE_PARM_DESC(resource_profile,
68 		 "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
69 
70 static int max_rdma_vfs = 32;
71 module_param(max_rdma_vfs, int, 0644);
72 MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
73 static int mpa_version = 2;
74 module_param(mpa_version, int, 0644);
75 MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
76 
77 MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
78 MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
79 MODULE_LICENSE("Dual BSD/GPL");
80 
81 static struct i40e_client i40iw_client;
82 static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
83 
84 static LIST_HEAD(i40iw_handlers);
85 static spinlock_t i40iw_handler_lock;
86 
87 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
88 						  u32 vf_id, u8 *msg, u16 len);
89 
90 static struct notifier_block i40iw_inetaddr_notifier = {
91 	.notifier_call = i40iw_inetaddr_event
92 };
93 
94 static struct notifier_block i40iw_inetaddr6_notifier = {
95 	.notifier_call = i40iw_inet6addr_event
96 };
97 
98 static struct notifier_block i40iw_net_notifier = {
99 	.notifier_call = i40iw_net_event
100 };
101 
102 static struct notifier_block i40iw_netdevice_notifier = {
103 	.notifier_call = i40iw_netdevice_event
104 };
105 
106 /**
107  * i40iw_find_i40e_handler - find a handler given a client info
108  * @ldev: pointer to a client info
109  */
i40iw_find_i40e_handler(struct i40e_info * ldev)110 static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
111 {
112 	struct i40iw_handler *hdl;
113 	unsigned long flags;
114 
115 	spin_lock_irqsave(&i40iw_handler_lock, flags);
116 	list_for_each_entry(hdl, &i40iw_handlers, list) {
117 		if (hdl->ldev.netdev == ldev->netdev) {
118 			spin_unlock_irqrestore(&i40iw_handler_lock, flags);
119 			return hdl;
120 		}
121 	}
122 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
123 	return NULL;
124 }
125 
126 /**
127  * i40iw_find_netdev - find a handler given a netdev
128  * @netdev: pointer to net_device
129  */
i40iw_find_netdev(struct net_device * netdev)130 struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
131 {
132 	struct i40iw_handler *hdl;
133 	unsigned long flags;
134 
135 	spin_lock_irqsave(&i40iw_handler_lock, flags);
136 	list_for_each_entry(hdl, &i40iw_handlers, list) {
137 		if (hdl->ldev.netdev == netdev) {
138 			spin_unlock_irqrestore(&i40iw_handler_lock, flags);
139 			return hdl;
140 		}
141 	}
142 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
143 	return NULL;
144 }
145 
146 /**
147  * i40iw_add_handler - add a handler to the list
148  * @hdl: handler to be added to the handler list
149  */
i40iw_add_handler(struct i40iw_handler * hdl)150 static void i40iw_add_handler(struct i40iw_handler *hdl)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&i40iw_handler_lock, flags);
155 	list_add(&hdl->list, &i40iw_handlers);
156 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
157 }
158 
159 /**
160  * i40iw_del_handler - delete a handler from the list
161  * @hdl: handler to be deleted from the handler list
162  */
i40iw_del_handler(struct i40iw_handler * hdl)163 static int i40iw_del_handler(struct i40iw_handler *hdl)
164 {
165 	unsigned long flags;
166 
167 	spin_lock_irqsave(&i40iw_handler_lock, flags);
168 	list_del(&hdl->list);
169 	spin_unlock_irqrestore(&i40iw_handler_lock, flags);
170 	return 0;
171 }
172 
173 /**
174  * i40iw_enable_intr - set up device interrupts
175  * @dev: hardware control device structure
176  * @msix_id: id of the interrupt to be enabled
177  */
i40iw_enable_intr(struct i40iw_sc_dev * dev,u32 msix_id)178 static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
179 {
180 	u32 val;
181 
182 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
183 		I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
184 		(3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
185 	if (dev->is_pf)
186 		i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
187 	else
188 		i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
189 }
190 
191 /**
192  * i40iw_dpc - tasklet for aeq and ceq 0
193  * @data: iwarp device
194  */
i40iw_dpc(unsigned long data)195 static void i40iw_dpc(unsigned long data)
196 {
197 	struct i40iw_device *iwdev = (struct i40iw_device *)data;
198 
199 	if (iwdev->msix_shared)
200 		i40iw_process_ceq(iwdev, iwdev->ceqlist);
201 	i40iw_process_aeq(iwdev);
202 	i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
203 }
204 
205 /**
206  * i40iw_ceq_dpc - dpc handler for CEQ
207  * @data: data points to CEQ
208  */
i40iw_ceq_dpc(unsigned long data)209 static void i40iw_ceq_dpc(unsigned long data)
210 {
211 	struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
212 	struct i40iw_device *iwdev = iwceq->iwdev;
213 
214 	i40iw_process_ceq(iwdev, iwceq);
215 	i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
216 }
217 
218 /**
219  * i40iw_irq_handler - interrupt handler for aeq and ceq0
220  * @irq: Interrupt request number
221  * @data: iwarp device
222  */
i40iw_irq_handler(int irq,void * data)223 static irqreturn_t i40iw_irq_handler(int irq, void *data)
224 {
225 	struct i40iw_device *iwdev = (struct i40iw_device *)data;
226 
227 	tasklet_schedule(&iwdev->dpc_tasklet);
228 	return IRQ_HANDLED;
229 }
230 
231 /**
232  * i40iw_destroy_cqp  - destroy control qp
233  * @iwdev: iwarp device
234  * @create_done: 1 if cqp create poll was success
235  *
236  * Issue destroy cqp request and
237  * free the resources associated with the cqp
238  */
i40iw_destroy_cqp(struct i40iw_device * iwdev,bool free_hwcqp)239 static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
240 {
241 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
242 	struct i40iw_cqp *cqp = &iwdev->cqp;
243 
244 	if (free_hwcqp)
245 		dev->cqp_ops->cqp_destroy(dev->cqp);
246 
247 	i40iw_cleanup_pending_cqp_op(iwdev);
248 
249 	i40iw_free_dma_mem(dev->hw, &cqp->sq);
250 	kfree(cqp->scratch_array);
251 	iwdev->cqp.scratch_array = NULL;
252 
253 	kfree(cqp->cqp_requests);
254 	cqp->cqp_requests = NULL;
255 }
256 
257 /**
258  * i40iw_disable_irqs - disable device interrupts
259  * @dev: hardware control device structure
260  * @msic_vec: msix vector to disable irq
261  * @dev_id: parameter to pass to free_irq (used during irq setup)
262  *
263  * The function is called when destroying aeq/ceq
264  */
i40iw_disable_irq(struct i40iw_sc_dev * dev,struct i40iw_msix_vector * msix_vec,void * dev_id)265 static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
266 			      struct i40iw_msix_vector *msix_vec,
267 			      void *dev_id)
268 {
269 	if (dev->is_pf)
270 		i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
271 	else
272 		i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
273 	irq_set_affinity_hint(msix_vec->irq, NULL);
274 	free_irq(msix_vec->irq, dev_id);
275 }
276 
277 /**
278  * i40iw_destroy_aeq - destroy aeq
279  * @iwdev: iwarp device
280  *
281  * Issue a destroy aeq request and
282  * free the resources associated with the aeq
283  * The function is called during driver unload
284  */
i40iw_destroy_aeq(struct i40iw_device * iwdev)285 static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
286 {
287 	enum i40iw_status_code status = I40IW_ERR_NOT_READY;
288 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
289 	struct i40iw_aeq *aeq = &iwdev->aeq;
290 
291 	if (!iwdev->msix_shared)
292 		i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
293 	if (iwdev->reset)
294 		goto exit;
295 
296 	if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
297 		status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
298 	if (status)
299 		i40iw_pr_err("destroy aeq failed %d\n", status);
300 
301 exit:
302 	i40iw_free_dma_mem(dev->hw, &aeq->mem);
303 }
304 
305 /**
306  * i40iw_destroy_ceq - destroy ceq
307  * @iwdev: iwarp device
308  * @iwceq: ceq to be destroyed
309  *
310  * Issue a destroy ceq request and
311  * free the resources associated with the ceq
312  */
i40iw_destroy_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq)313 static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
314 			      struct i40iw_ceq *iwceq)
315 {
316 	enum i40iw_status_code status;
317 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
318 
319 	if (iwdev->reset)
320 		goto exit;
321 
322 	status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
323 	if (status) {
324 		i40iw_pr_err("ceq destroy command failed %d\n", status);
325 		goto exit;
326 	}
327 
328 	status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
329 	if (status)
330 		i40iw_pr_err("ceq destroy completion failed %d\n", status);
331 exit:
332 	i40iw_free_dma_mem(dev->hw, &iwceq->mem);
333 }
334 
335 /**
336  * i40iw_dele_ceqs - destroy all ceq's
337  * @iwdev: iwarp device
338  *
339  * Go through all of the device ceq's and for each ceq
340  * disable the ceq interrupt and destroy the ceq
341  */
i40iw_dele_ceqs(struct i40iw_device * iwdev)342 static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
343 {
344 	u32 i = 0;
345 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
346 	struct i40iw_ceq *iwceq = iwdev->ceqlist;
347 	struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
348 
349 	if (iwdev->msix_shared) {
350 		i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
351 		i40iw_destroy_ceq(iwdev, iwceq);
352 		iwceq++;
353 		i++;
354 	}
355 
356 	for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
357 		i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
358 		i40iw_destroy_ceq(iwdev, iwceq);
359 	}
360 
361 	iwdev->sc_dev.ceq_valid = false;
362 }
363 
364 /**
365  * i40iw_destroy_ccq - destroy control cq
366  * @iwdev: iwarp device
367  *
368  * Issue destroy ccq request and
369  * free the resources associated with the ccq
370  */
i40iw_destroy_ccq(struct i40iw_device * iwdev)371 static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
372 {
373 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
374 	struct i40iw_ccq *ccq = &iwdev->ccq;
375 	enum i40iw_status_code status = 0;
376 
377 	if (!iwdev->reset)
378 		status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
379 	if (status)
380 		i40iw_pr_err("ccq destroy failed %d\n", status);
381 	i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
382 }
383 
384 /* types of hmc objects */
385 static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
386 	I40IW_HMC_IW_QP,
387 	I40IW_HMC_IW_CQ,
388 	I40IW_HMC_IW_HTE,
389 	I40IW_HMC_IW_ARP,
390 	I40IW_HMC_IW_APBVT_ENTRY,
391 	I40IW_HMC_IW_MR,
392 	I40IW_HMC_IW_XF,
393 	I40IW_HMC_IW_XFFL,
394 	I40IW_HMC_IW_Q1,
395 	I40IW_HMC_IW_Q1FL,
396 	I40IW_HMC_IW_TIMER,
397 };
398 
399 /**
400  * i40iw_close_hmc_objects_type - delete hmc objects of a given type
401  * @iwdev: iwarp device
402  * @obj_type: the hmc object type to be deleted
403  * @is_pf: true if the function is PF otherwise false
404  * @reset: true if called before reset
405  */
i40iw_close_hmc_objects_type(struct i40iw_sc_dev * dev,enum i40iw_hmc_rsrc_type obj_type,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)406 static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
407 					 enum i40iw_hmc_rsrc_type obj_type,
408 					 struct i40iw_hmc_info *hmc_info,
409 					 bool is_pf,
410 					 bool reset)
411 {
412 	struct i40iw_hmc_del_obj_info info;
413 
414 	memset(&info, 0, sizeof(info));
415 	info.hmc_info = hmc_info;
416 	info.rsrc_type = obj_type;
417 	info.count = hmc_info->hmc_obj[obj_type].cnt;
418 	info.is_pf = is_pf;
419 	if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
420 		i40iw_pr_err("del obj of type %d failed\n", obj_type);
421 }
422 
423 /**
424  * i40iw_del_hmc_objects - remove all device hmc objects
425  * @dev: iwarp device
426  * @hmc_info: hmc_info to free
427  * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
428  *	   by PF on behalf of VF
429  * @reset: true if called before reset
430  */
i40iw_del_hmc_objects(struct i40iw_sc_dev * dev,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)431 static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
432 				  struct i40iw_hmc_info *hmc_info,
433 				  bool is_pf,
434 				  bool reset)
435 {
436 	unsigned int i;
437 
438 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
439 		i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
440 }
441 
442 /**
443  * i40iw_ceq_handler - interrupt handler for ceq
444  * @data: ceq pointer
445  */
i40iw_ceq_handler(int irq,void * data)446 static irqreturn_t i40iw_ceq_handler(int irq, void *data)
447 {
448 	struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
449 
450 	if (iwceq->irq != irq)
451 		i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
452 	tasklet_schedule(&iwceq->dpc_tasklet);
453 	return IRQ_HANDLED;
454 }
455 
456 /**
457  * i40iw_create_hmc_obj_type - create hmc object of a given type
458  * @dev: hardware control device structure
459  * @info: information for the hmc object to create
460  */
i40iw_create_hmc_obj_type(struct i40iw_sc_dev * dev,struct i40iw_hmc_create_obj_info * info)461 static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
462 							struct i40iw_hmc_create_obj_info *info)
463 {
464 	return dev->hmc_ops->create_hmc_object(dev, info);
465 }
466 
467 /**
468  * i40iw_create_hmc_objs - create all hmc objects for the device
469  * @iwdev: iwarp device
470  * @is_pf: true if the function is PF otherwise false
471  *
472  * Create the device hmc objects and allocate hmc pages
473  * Return 0 if successful, otherwise clean up and return error
474  */
i40iw_create_hmc_objs(struct i40iw_device * iwdev,bool is_pf)475 static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
476 						    bool is_pf)
477 {
478 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
479 	struct i40iw_hmc_create_obj_info info;
480 	enum i40iw_status_code status;
481 	int i;
482 
483 	memset(&info, 0, sizeof(info));
484 	info.hmc_info = dev->hmc_info;
485 	info.is_pf = is_pf;
486 	info.entry_type = iwdev->sd_type;
487 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
488 		info.rsrc_type = iw_hmc_obj_types[i];
489 		info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
490 		info.add_sd_cnt = 0;
491 		status = i40iw_create_hmc_obj_type(dev, &info);
492 		if (status) {
493 			i40iw_pr_err("create obj type %d status = %d\n",
494 				     iw_hmc_obj_types[i], status);
495 			break;
496 		}
497 	}
498 	if (!status)
499 		return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
500 								      dev->hmc_fn_id,
501 								      true, true));
502 
503 	while (i) {
504 		i--;
505 		/* destroy the hmc objects of a given type */
506 		i40iw_close_hmc_objects_type(dev,
507 					     iw_hmc_obj_types[i],
508 					     dev->hmc_info,
509 					     is_pf,
510 					     false);
511 	}
512 	return status;
513 }
514 
515 /**
516  * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
517  * @iwdev: iwarp device
518  * @memptr: points to the memory addresses
519  * @size: size of memory needed
520  * @mask: mask for the aligned memory
521  *
522  * Get aligned memory of the requested size and
523  * update the memptr to point to the new aligned memory
524  * Return 0 if successful, otherwise return no memory error
525  */
i40iw_obj_aligned_mem(struct i40iw_device * iwdev,struct i40iw_dma_mem * memptr,u32 size,u32 mask)526 enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
527 					     struct i40iw_dma_mem *memptr,
528 					     u32 size,
529 					     u32 mask)
530 {
531 	unsigned long va, newva;
532 	unsigned long extra;
533 
534 	va = (unsigned long)iwdev->obj_next.va;
535 	newva = va;
536 	if (mask)
537 		newva = ALIGN(va, (mask + 1));
538 	extra = newva - va;
539 	memptr->va = (u8 *)va + extra;
540 	memptr->pa = iwdev->obj_next.pa + extra;
541 	memptr->size = size;
542 	if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
543 		return I40IW_ERR_NO_MEMORY;
544 
545 	iwdev->obj_next.va = memptr->va + size;
546 	iwdev->obj_next.pa = memptr->pa + size;
547 	return 0;
548 }
549 
550 /**
551  * i40iw_create_cqp - create control qp
552  * @iwdev: iwarp device
553  *
554  * Return 0, if the cqp and all the resources associated with it
555  * are successfully created, otherwise return error
556  */
i40iw_create_cqp(struct i40iw_device * iwdev)557 static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
558 {
559 	enum i40iw_status_code status;
560 	u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
561 	struct i40iw_dma_mem mem;
562 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
563 	struct i40iw_cqp_init_info cqp_init_info;
564 	struct i40iw_cqp *cqp = &iwdev->cqp;
565 	u16 maj_err, min_err;
566 	int i;
567 
568 	cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
569 	if (!cqp->cqp_requests)
570 		return I40IW_ERR_NO_MEMORY;
571 	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
572 	if (!cqp->scratch_array) {
573 		kfree(cqp->cqp_requests);
574 		return I40IW_ERR_NO_MEMORY;
575 	}
576 	dev->cqp = &cqp->sc_cqp;
577 	dev->cqp->dev = dev;
578 	memset(&cqp_init_info, 0, sizeof(cqp_init_info));
579 	status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
580 					(sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
581 					I40IW_CQP_ALIGNMENT);
582 	if (status)
583 		goto exit;
584 	status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
585 				       I40IW_HOST_CTX_ALIGNMENT_MASK);
586 	if (status)
587 		goto exit;
588 	dev->cqp->host_ctx_pa = mem.pa;
589 	dev->cqp->host_ctx = mem.va;
590 	/* populate the cqp init info */
591 	cqp_init_info.dev = dev;
592 	cqp_init_info.sq_size = sqsize;
593 	cqp_init_info.sq = cqp->sq.va;
594 	cqp_init_info.sq_pa = cqp->sq.pa;
595 	cqp_init_info.host_ctx_pa = mem.pa;
596 	cqp_init_info.host_ctx = mem.va;
597 	cqp_init_info.hmc_profile = iwdev->resource_profile;
598 	cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
599 	cqp_init_info.scratch_array = cqp->scratch_array;
600 	status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
601 	if (status) {
602 		i40iw_pr_err("cqp init status %d\n", status);
603 		goto exit;
604 	}
605 	status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
606 	if (status) {
607 		i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
608 			     status, maj_err, min_err);
609 		goto exit;
610 	}
611 	spin_lock_init(&cqp->req_lock);
612 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
613 	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
614 	/* init the waitq of the cqp_requests and add them to the list */
615 	for (i = 0; i < sqsize; i++) {
616 		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
617 		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
618 	}
619 	return 0;
620 exit:
621 	/* clean up the created resources */
622 	i40iw_destroy_cqp(iwdev, false);
623 	return status;
624 }
625 
626 /**
627  * i40iw_create_ccq - create control cq
628  * @iwdev: iwarp device
629  *
630  * Return 0, if the ccq and the resources associated with it
631  * are successfully created, otherwise return error
632  */
i40iw_create_ccq(struct i40iw_device * iwdev)633 static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
634 {
635 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
636 	struct i40iw_dma_mem mem;
637 	enum i40iw_status_code status;
638 	struct i40iw_ccq_init_info info;
639 	struct i40iw_ccq *ccq = &iwdev->ccq;
640 
641 	memset(&info, 0, sizeof(info));
642 	dev->ccq = &ccq->sc_cq;
643 	dev->ccq->dev = dev;
644 	info.dev = dev;
645 	ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
646 	ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
647 	status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
648 					ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
649 	if (status)
650 		goto exit;
651 	status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
652 				       I40IW_SHADOWAREA_MASK);
653 	if (status)
654 		goto exit;
655 	ccq->sc_cq.back_cq = (void *)ccq;
656 	/* populate the ccq init info */
657 	info.cq_base = ccq->mem_cq.va;
658 	info.cq_pa = ccq->mem_cq.pa;
659 	info.num_elem = IW_CCQ_SIZE;
660 	info.shadow_area = mem.va;
661 	info.shadow_area_pa = mem.pa;
662 	info.ceqe_mask = false;
663 	info.ceq_id_valid = true;
664 	info.shadow_read_threshold = 16;
665 	status = dev->ccq_ops->ccq_init(dev->ccq, &info);
666 	if (!status)
667 		status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
668 exit:
669 	if (status)
670 		i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
671 	return status;
672 }
673 
674 /**
675  * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
676  * @iwdev: iwarp device
677  * @msix_vec: interrupt vector information
678  * @iwceq: ceq associated with the vector
679  * @ceq_id: the id number of the iwceq
680  *
681  * Allocate interrupt resources and enable irq handling
682  * Return 0 if successful, otherwise return error
683  */
i40iw_configure_ceq_vector(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id,struct i40iw_msix_vector * msix_vec)684 static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
685 							 struct i40iw_ceq *iwceq,
686 							 u32 ceq_id,
687 							 struct i40iw_msix_vector *msix_vec)
688 {
689 	enum i40iw_status_code status;
690 
691 	if (iwdev->msix_shared && !ceq_id) {
692 		tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
693 		status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
694 	} else {
695 		tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
696 		status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
697 	}
698 
699 	cpumask_clear(&msix_vec->mask);
700 	cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
701 	irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
702 
703 	if (status) {
704 		i40iw_pr_err("ceq irq config fail\n");
705 		return I40IW_ERR_CONFIG;
706 	}
707 	msix_vec->ceq_id = ceq_id;
708 
709 	return 0;
710 }
711 
712 /**
713  * i40iw_create_ceq - create completion event queue
714  * @iwdev: iwarp device
715  * @iwceq: pointer to the ceq resources to be created
716  * @ceq_id: the id number of the iwceq
717  *
718  * Return 0, if the ceq and the resources associated with it
719  * are successfully created, otherwise return error
720  */
i40iw_create_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id)721 static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
722 					       struct i40iw_ceq *iwceq,
723 					       u32 ceq_id)
724 {
725 	enum i40iw_status_code status;
726 	struct i40iw_ceq_init_info info;
727 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
728 	u64 scratch;
729 
730 	memset(&info, 0, sizeof(info));
731 	info.ceq_id = ceq_id;
732 	iwceq->iwdev = iwdev;
733 	iwceq->mem.size = sizeof(struct i40iw_ceqe) *
734 		iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
735 	status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
736 					I40IW_CEQ_ALIGNMENT);
737 	if (status)
738 		goto exit;
739 	info.ceq_id = ceq_id;
740 	info.ceqe_base = iwceq->mem.va;
741 	info.ceqe_pa = iwceq->mem.pa;
742 
743 	info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
744 	iwceq->sc_ceq.ceq_id = ceq_id;
745 	info.dev = dev;
746 	scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
747 	status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
748 	if (!status)
749 		status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
750 
751 exit:
752 	if (status)
753 		i40iw_free_dma_mem(dev->hw, &iwceq->mem);
754 	return status;
755 }
756 
i40iw_request_reset(struct i40iw_device * iwdev)757 void i40iw_request_reset(struct i40iw_device *iwdev)
758 {
759 	struct i40e_info *ldev = iwdev->ldev;
760 
761 	ldev->ops->request_reset(ldev, iwdev->client, 1);
762 }
763 
764 /**
765  * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
766  * @iwdev: iwarp device
767  * @ldev: i40e lan device
768  *
769  * Allocate a list for all device completion event queues
770  * Create the ceq's and configure their msix interrupt vectors
771  * Return 0, if at least one ceq is successfully set up, otherwise return error
772  */
i40iw_setup_ceqs(struct i40iw_device * iwdev,struct i40e_info * ldev)773 static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
774 					       struct i40e_info *ldev)
775 {
776 	u32 i;
777 	u32 ceq_id;
778 	struct i40iw_ceq *iwceq;
779 	struct i40iw_msix_vector *msix_vec;
780 	enum i40iw_status_code status = 0;
781 	u32 num_ceqs;
782 
783 	if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
784 		status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
785 						 iwdev->iw_qvlist);
786 		if (status)
787 			goto exit;
788 	} else {
789 		status = I40IW_ERR_BAD_PTR;
790 		goto exit;
791 	}
792 
793 	num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
794 	iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
795 	if (!iwdev->ceqlist) {
796 		status = I40IW_ERR_NO_MEMORY;
797 		goto exit;
798 	}
799 	i = (iwdev->msix_shared) ? 0 : 1;
800 	for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
801 		iwceq = &iwdev->ceqlist[ceq_id];
802 		status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
803 		if (status) {
804 			i40iw_pr_err("create ceq status = %d\n", status);
805 			break;
806 		}
807 
808 		msix_vec = &iwdev->iw_msixtbl[i];
809 		iwceq->irq = msix_vec->irq;
810 		iwceq->msix_idx = msix_vec->idx;
811 		status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
812 		if (status) {
813 			i40iw_destroy_ceq(iwdev, iwceq);
814 			break;
815 		}
816 		i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
817 		iwdev->ceqs_count++;
818 	}
819 exit:
820 	if (status && !iwdev->ceqs_count) {
821 		kfree(iwdev->ceqlist);
822 		iwdev->ceqlist = NULL;
823 		return status;
824 	} else {
825 		iwdev->sc_dev.ceq_valid = true;
826 		return 0;
827 	}
828 
829 }
830 
831 /**
832  * i40iw_configure_aeq_vector - set up the msix vector for aeq
833  * @iwdev: iwarp device
834  *
835  * Allocate interrupt resources and enable irq handling
836  * Return 0 if successful, otherwise return error
837  */
i40iw_configure_aeq_vector(struct i40iw_device * iwdev)838 static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
839 {
840 	struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
841 	u32 ret = 0;
842 
843 	if (!iwdev->msix_shared) {
844 		tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
845 		ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
846 	}
847 	if (ret) {
848 		i40iw_pr_err("aeq irq config fail\n");
849 		return I40IW_ERR_CONFIG;
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  * i40iw_create_aeq - create async event queue
857  * @iwdev: iwarp device
858  *
859  * Return 0, if the aeq and the resources associated with it
860  * are successfully created, otherwise return error
861  */
i40iw_create_aeq(struct i40iw_device * iwdev)862 static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
863 {
864 	enum i40iw_status_code status;
865 	struct i40iw_aeq_init_info info;
866 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
867 	struct i40iw_aeq *aeq = &iwdev->aeq;
868 	u64 scratch = 0;
869 	u32 aeq_size;
870 
871 	aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
872 		iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
873 	memset(&info, 0, sizeof(info));
874 	aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
875 	status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
876 					I40IW_AEQ_ALIGNMENT);
877 	if (status)
878 		goto exit;
879 
880 	info.aeqe_base = aeq->mem.va;
881 	info.aeq_elem_pa = aeq->mem.pa;
882 	info.elem_cnt = aeq_size;
883 	info.dev = dev;
884 	status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
885 	if (status)
886 		goto exit;
887 	status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
888 	if (!status)
889 		status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
890 exit:
891 	if (status)
892 		i40iw_free_dma_mem(dev->hw, &aeq->mem);
893 	return status;
894 }
895 
896 /**
897  * i40iw_setup_aeq - set up the device aeq
898  * @iwdev: iwarp device
899  *
900  * Create the aeq and configure its msix interrupt vector
901  * Return 0 if successful, otherwise return error
902  */
i40iw_setup_aeq(struct i40iw_device * iwdev)903 static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
904 {
905 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
906 	enum i40iw_status_code status;
907 
908 	status = i40iw_create_aeq(iwdev);
909 	if (status)
910 		return status;
911 
912 	status = i40iw_configure_aeq_vector(iwdev);
913 	if (status) {
914 		i40iw_destroy_aeq(iwdev);
915 		return status;
916 	}
917 
918 	if (!iwdev->msix_shared)
919 		i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
920 	return 0;
921 }
922 
923 /**
924  * i40iw_initialize_ilq - create iwarp local queue for cm
925  * @iwdev: iwarp device
926  *
927  * Return 0 if successful, otherwise return error
928  */
i40iw_initialize_ilq(struct i40iw_device * iwdev)929 static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
930 {
931 	struct i40iw_puda_rsrc_info info;
932 	enum i40iw_status_code status;
933 
934 	memset(&info, 0, sizeof(info));
935 	info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
936 	info.cq_id = 1;
937 	info.qp_id = 0;
938 	info.count = 1;
939 	info.pd_id = 1;
940 	info.sq_size = 8192;
941 	info.rq_size = 8192;
942 	info.buf_size = 1024;
943 	info.tx_buf_cnt = 16384;
944 	info.receive = i40iw_receive_ilq;
945 	info.xmit_complete = i40iw_free_sqbuf;
946 	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
947 	if (status)
948 		i40iw_pr_err("ilq create fail\n");
949 	return status;
950 }
951 
952 /**
953  * i40iw_initialize_ieq - create iwarp exception queue
954  * @iwdev: iwarp device
955  *
956  * Return 0 if successful, otherwise return error
957  */
i40iw_initialize_ieq(struct i40iw_device * iwdev)958 static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
959 {
960 	struct i40iw_puda_rsrc_info info;
961 	enum i40iw_status_code status;
962 
963 	memset(&info, 0, sizeof(info));
964 	info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
965 	info.cq_id = 2;
966 	info.qp_id = iwdev->vsi.exception_lan_queue;
967 	info.count = 1;
968 	info.pd_id = 2;
969 	info.sq_size = 8192;
970 	info.rq_size = 8192;
971 	info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
972 	info.tx_buf_cnt = 4096;
973 	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
974 	if (status)
975 		i40iw_pr_err("ieq create fail\n");
976 	return status;
977 }
978 
979 /**
980  * i40iw_reinitialize_ieq - destroy and re-create ieq
981  * @dev: iwarp device
982  */
i40iw_reinitialize_ieq(struct i40iw_sc_dev * dev)983 void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
984 {
985 	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
986 
987 	i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
988 	if (i40iw_initialize_ieq(iwdev)) {
989 		iwdev->reset = true;
990 		i40iw_request_reset(iwdev);
991 	}
992 }
993 
994 /**
995  * i40iw_hmc_setup - create hmc objects for the device
996  * @iwdev: iwarp device
997  *
998  * Set up the device private memory space for the number and size of
999  * the hmc objects and create the objects
1000  * Return 0 if successful, otherwise return error
1001  */
i40iw_hmc_setup(struct i40iw_device * iwdev)1002 static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
1003 {
1004 	enum i40iw_status_code status;
1005 
1006 	iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
1007 	status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
1008 	if (status)
1009 		goto exit;
1010 	status = i40iw_create_hmc_objs(iwdev, true);
1011 	if (status)
1012 		goto exit;
1013 	iwdev->init_state = HMC_OBJS_CREATED;
1014 exit:
1015 	return status;
1016 }
1017 
1018 /**
1019  * i40iw_del_init_mem - deallocate memory resources
1020  * @iwdev: iwarp device
1021  */
i40iw_del_init_mem(struct i40iw_device * iwdev)1022 static void i40iw_del_init_mem(struct i40iw_device *iwdev)
1023 {
1024 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1025 
1026 	i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
1027 	kfree(dev->hmc_info->sd_table.sd_entry);
1028 	dev->hmc_info->sd_table.sd_entry = NULL;
1029 	kfree(iwdev->mem_resources);
1030 	iwdev->mem_resources = NULL;
1031 	kfree(iwdev->ceqlist);
1032 	iwdev->ceqlist = NULL;
1033 	kfree(iwdev->iw_msixtbl);
1034 	iwdev->iw_msixtbl = NULL;
1035 	kfree(iwdev->hmc_info_mem);
1036 	iwdev->hmc_info_mem = NULL;
1037 }
1038 
1039 /**
1040  * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
1041  * @iwdev: iwarp device
1042  * @idx: the index of the mac ip address to delete
1043  */
i40iw_del_macip_entry(struct i40iw_device * iwdev,u8 idx)1044 static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
1045 {
1046 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1047 	struct i40iw_cqp_request *cqp_request;
1048 	struct cqp_commands_info *cqp_info;
1049 	enum i40iw_status_code status = 0;
1050 
1051 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1052 	if (!cqp_request) {
1053 		i40iw_pr_err("cqp_request memory failed\n");
1054 		return;
1055 	}
1056 	cqp_info = &cqp_request->info;
1057 	cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
1058 	cqp_info->post_sq = 1;
1059 	cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1060 	cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1061 	cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
1062 	cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
1063 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1064 	if (status)
1065 		i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
1066 }
1067 
1068 /**
1069  * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
1070  * @iwdev: iwarp device
1071  * @mac_addr: pointer to mac address
1072  * @idx: the index of the mac ip address to add
1073  */
i40iw_add_mac_ipaddr_entry(struct i40iw_device * iwdev,u8 * mac_addr,u8 idx)1074 static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
1075 							 u8 *mac_addr,
1076 							 u8 idx)
1077 {
1078 	struct i40iw_local_mac_ipaddr_entry_info *info;
1079 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1080 	struct i40iw_cqp_request *cqp_request;
1081 	struct cqp_commands_info *cqp_info;
1082 	enum i40iw_status_code status = 0;
1083 
1084 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1085 	if (!cqp_request) {
1086 		i40iw_pr_err("cqp_request memory failed\n");
1087 		return I40IW_ERR_NO_MEMORY;
1088 	}
1089 
1090 	cqp_info = &cqp_request->info;
1091 
1092 	cqp_info->post_sq = 1;
1093 	info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
1094 	ether_addr_copy(info->mac_addr, mac_addr);
1095 	info->entry_idx = idx;
1096 	cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1097 	cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
1098 	cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1099 	cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1100 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1101 	if (status)
1102 		i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
1103 	return status;
1104 }
1105 
1106 /**
1107  * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
1108  * @iwdev: iwarp device
1109  * @mac_ip_tbl_idx: the index of the new mac ip address
1110  *
1111  * Allocate a mac ip address entry and update the mac_ip_tbl_idx
1112  * to hold the index of the newly created mac ip address
1113  * Return 0 if successful, otherwise return error
1114  */
i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device * iwdev,u16 * mac_ip_tbl_idx)1115 static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
1116 								 u16 *mac_ip_tbl_idx)
1117 {
1118 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
1119 	struct i40iw_cqp_request *cqp_request;
1120 	struct cqp_commands_info *cqp_info;
1121 	enum i40iw_status_code status = 0;
1122 
1123 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
1124 	if (!cqp_request) {
1125 		i40iw_pr_err("cqp_request memory failed\n");
1126 		return I40IW_ERR_NO_MEMORY;
1127 	}
1128 
1129 	/* increment refcount, because we need the cqp request ret value */
1130 	atomic_inc(&cqp_request->refcount);
1131 
1132 	cqp_info = &cqp_request->info;
1133 	cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
1134 	cqp_info->post_sq = 1;
1135 	cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1136 	cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1137 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1138 	if (!status)
1139 		*mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
1140 	else
1141 		i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
1142 	/* decrement refcount and free the cqp request, if no longer used */
1143 	i40iw_put_cqp_request(iwcqp, cqp_request);
1144 	return status;
1145 }
1146 
1147 /**
1148  * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
1149  * @iwdev: iwarp device
1150  * @macaddr: pointer to mac address
1151  *
1152  * Allocate a mac ip address entry and add it to the hw table
1153  * Return 0 if successful, otherwise return error
1154  */
i40iw_alloc_set_mac_ipaddr(struct i40iw_device * iwdev,u8 * macaddr)1155 static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
1156 							 u8 *macaddr)
1157 {
1158 	enum i40iw_status_code status;
1159 
1160 	status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
1161 	if (!status) {
1162 		status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1163 						    (u8)iwdev->mac_ip_table_idx);
1164 		if (status)
1165 			i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1166 	}
1167 	return status;
1168 }
1169 
1170 /**
1171  * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
1172  * @iwdev: iwarp device
1173  */
i40iw_add_ipv6_addr(struct i40iw_device * iwdev)1174 static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
1175 {
1176 	struct net_device *ip_dev;
1177 	struct inet6_dev *idev;
1178 	struct inet6_ifaddr *ifp, *tmp;
1179 	u32 local_ipaddr6[4];
1180 
1181 	rcu_read_lock();
1182 	for_each_netdev_rcu(&init_net, ip_dev) {
1183 		if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
1184 		      (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1185 		     (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1186 			idev = __in6_dev_get(ip_dev);
1187 			if (!idev) {
1188 				i40iw_pr_err("ipv6 inet device not found\n");
1189 				break;
1190 			}
1191 			list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1192 				i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
1193 					      rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
1194 				i40iw_copy_ip_ntohl(local_ipaddr6,
1195 						    ifp->addr.in6_u.u6_addr32);
1196 				i40iw_manage_arp_cache(iwdev,
1197 						       ip_dev->dev_addr,
1198 						       local_ipaddr6,
1199 						       false,
1200 						       I40IW_ARP_ADD);
1201 			}
1202 		}
1203 	}
1204 	rcu_read_unlock();
1205 }
1206 
1207 /**
1208  * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
1209  * @iwdev: iwarp device
1210  */
i40iw_add_ipv4_addr(struct i40iw_device * iwdev)1211 static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1212 {
1213 	struct net_device *dev;
1214 	struct in_device *idev;
1215 	bool got_lock = true;
1216 	u32 ip_addr;
1217 
1218 	if (!rtnl_trylock())
1219 		got_lock = false;
1220 
1221 	for_each_netdev(&init_net, dev) {
1222 		if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
1223 		      (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1224 		    (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1225 			const struct in_ifaddr *ifa;
1226 
1227 			idev = in_dev_get(dev);
1228 			in_dev_for_each_ifa_rtnl(ifa, idev) {
1229 				i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1230 					    "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1231 					     rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
1232 
1233 				ip_addr = ntohl(ifa->ifa_address);
1234 				i40iw_manage_arp_cache(iwdev,
1235 						       dev->dev_addr,
1236 						       &ip_addr,
1237 						       true,
1238 						       I40IW_ARP_ADD);
1239 			}
1240 
1241 			in_dev_put(idev);
1242 		}
1243 	}
1244 	if (got_lock)
1245 		rtnl_unlock();
1246 }
1247 
1248 /**
1249  * i40iw_add_mac_ip - add mac and ip addresses
1250  * @iwdev: iwarp device
1251  *
1252  * Create and add a mac ip address entry to the hw table and
1253  * ipv4/ipv6 addresses to the arp cache
1254  * Return 0 if successful, otherwise return error
1255  */
i40iw_add_mac_ip(struct i40iw_device * iwdev)1256 static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
1257 {
1258 	struct net_device *netdev = iwdev->netdev;
1259 	enum i40iw_status_code status;
1260 
1261 	status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
1262 	if (status)
1263 		return status;
1264 	i40iw_add_ipv4_addr(iwdev);
1265 	i40iw_add_ipv6_addr(iwdev);
1266 	return 0;
1267 }
1268 
1269 /**
1270  * i40iw_wait_pe_ready - Check if firmware is ready
1271  * @hw: provides access to registers
1272  */
i40iw_wait_pe_ready(struct i40iw_hw * hw)1273 static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
1274 {
1275 	u32 statusfw;
1276 	u32 statuscpu0;
1277 	u32 statuscpu1;
1278 	u32 statuscpu2;
1279 	u32 retrycount = 0;
1280 
1281 	do {
1282 		statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
1283 		i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
1284 		statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
1285 		i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
1286 		statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
1287 		i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
1288 			      __LINE__, statuscpu1);
1289 		statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
1290 		i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
1291 			      __LINE__, statuscpu2);
1292 		if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
1293 			break;	/* SUCCESS */
1294 		msleep(1000);
1295 		retrycount++;
1296 	} while (retrycount < 14);
1297 	i40iw_wr32(hw, 0xb4040, 0x4C104C5);
1298 }
1299 
1300 /**
1301  * i40iw_initialize_dev - initialize device
1302  * @iwdev: iwarp device
1303  * @ldev: lan device information
1304  *
1305  * Allocate memory for the hmc objects and initialize iwdev
1306  * Return 0 if successful, otherwise clean up the resources
1307  * and return error
1308  */
i40iw_initialize_dev(struct i40iw_device * iwdev,struct i40e_info * ldev)1309 static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1310 						   struct i40e_info *ldev)
1311 {
1312 	enum i40iw_status_code status;
1313 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1314 	struct i40iw_device_init_info info;
1315 	struct i40iw_vsi_init_info vsi_info;
1316 	struct i40iw_dma_mem mem;
1317 	struct i40iw_l2params l2params;
1318 	u32 size;
1319 	struct i40iw_vsi_stats_info stats_info;
1320 	u16 last_qset = I40IW_NO_QSET;
1321 	u16 qset;
1322 	u32 i;
1323 
1324 	memset(&l2params, 0, sizeof(l2params));
1325 	memset(&info, 0, sizeof(info));
1326 	size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
1327 				(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
1328 	iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1329 	if (!iwdev->hmc_info_mem)
1330 		return I40IW_ERR_NO_MEMORY;
1331 
1332 	iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
1333 	dev->hmc_info = &iwdev->hw.hmc;
1334 	dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
1335 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1336 				       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1337 	if (status)
1338 		goto error;
1339 	info.fpm_query_buf_pa = mem.pa;
1340 	info.fpm_query_buf = mem.va;
1341 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1342 				       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1343 	if (status)
1344 		goto error;
1345 	info.fpm_commit_buf_pa = mem.pa;
1346 	info.fpm_commit_buf = mem.va;
1347 	info.hmc_fn_id = ldev->fid;
1348 	info.is_pf = (ldev->ftype) ? false : true;
1349 	info.bar0 = ldev->hw_addr;
1350 	info.hw = &iwdev->hw;
1351 	info.debug_mask = debug;
1352 	l2params.mtu =
1353 		(ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
1354 	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
1355 		qset = ldev->params.qos.prio_qos[i].qs_handle;
1356 		l2params.qs_handle_list[i] = qset;
1357 		if (last_qset == I40IW_NO_QSET)
1358 			last_qset = qset;
1359 		else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
1360 			iwdev->dcb = true;
1361 	}
1362 	i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
1363 	info.vchnl_send = i40iw_virtchnl_send;
1364 	status = i40iw_device_init(&iwdev->sc_dev, &info);
1365 
1366 	if (status)
1367 		goto error;
1368 	memset(&vsi_info, 0, sizeof(vsi_info));
1369 	vsi_info.dev = &iwdev->sc_dev;
1370 	vsi_info.back_vsi = (void *)iwdev;
1371 	vsi_info.params = &l2params;
1372 	vsi_info.exception_lan_queue = 1;
1373 	i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
1374 
1375 	if (dev->is_pf) {
1376 		memset(&stats_info, 0, sizeof(stats_info));
1377 		stats_info.fcn_id = ldev->fid;
1378 		stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1379 		if (!stats_info.pestat) {
1380 			status = I40IW_ERR_NO_MEMORY;
1381 			goto error;
1382 		}
1383 		stats_info.stats_initialize = true;
1384 		if (stats_info.pestat)
1385 			i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
1386 	}
1387 	return status;
1388 error:
1389 	kfree(iwdev->hmc_info_mem);
1390 	iwdev->hmc_info_mem = NULL;
1391 	return status;
1392 }
1393 
1394 /**
1395  * i40iw_register_notifiers - register tcp ip notifiers
1396  */
i40iw_register_notifiers(void)1397 static void i40iw_register_notifiers(void)
1398 {
1399 	register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1400 	register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1401 	register_netevent_notifier(&i40iw_net_notifier);
1402 	register_netdevice_notifier(&i40iw_netdevice_notifier);
1403 }
1404 
1405 /**
1406  * i40iw_unregister_notifiers - unregister tcp ip notifiers
1407  */
1408 
i40iw_unregister_notifiers(void)1409 static void i40iw_unregister_notifiers(void)
1410 {
1411 	unregister_netevent_notifier(&i40iw_net_notifier);
1412 	unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1413 	unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1414 	unregister_netdevice_notifier(&i40iw_netdevice_notifier);
1415 }
1416 
1417 /**
1418  * i40iw_save_msix_info - copy msix vector information to iwarp device
1419  * @iwdev: iwarp device
1420  * @ldev: lan device information
1421  *
1422  * Allocate iwdev msix table and copy the ldev msix info to the table
1423  * Return 0 if successful, otherwise return error
1424  */
i40iw_save_msix_info(struct i40iw_device * iwdev,struct i40e_info * ldev)1425 static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1426 						   struct i40e_info *ldev)
1427 {
1428 	struct i40e_qvlist_info *iw_qvlist;
1429 	struct i40e_qv_info *iw_qvinfo;
1430 	u32 ceq_idx;
1431 	u32 i;
1432 	u32 size;
1433 
1434 	if (!ldev->msix_count) {
1435 		i40iw_pr_err("No MSI-X vectors\n");
1436 		return I40IW_ERR_CONFIG;
1437 	}
1438 
1439 	iwdev->msix_count = ldev->msix_count;
1440 
1441 	size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
1442 	size += sizeof(struct i40e_qvlist_info);
1443 	size +=  sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
1444 	iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
1445 
1446 	if (!iwdev->iw_msixtbl)
1447 		return I40IW_ERR_NO_MEMORY;
1448 	iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
1449 	iw_qvlist = iwdev->iw_qvlist;
1450 	iw_qvinfo = iw_qvlist->qv_info;
1451 	iw_qvlist->num_vectors = iwdev->msix_count;
1452 	if (iwdev->msix_count <= num_online_cpus())
1453 		iwdev->msix_shared = true;
1454 	for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
1455 		iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
1456 		iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
1457 		iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
1458 		if (i == 0) {
1459 			iw_qvinfo->aeq_idx = 0;
1460 			if (iwdev->msix_shared)
1461 				iw_qvinfo->ceq_idx = ceq_idx++;
1462 			else
1463 				iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
1464 		} else {
1465 			iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
1466 			iw_qvinfo->ceq_idx = ceq_idx++;
1467 		}
1468 		iw_qvinfo->itr_idx = 3;
1469 		iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
1470 	}
1471 	return 0;
1472 }
1473 
1474 /**
1475  * i40iw_deinit_device - clean up the device resources
1476  * @iwdev: iwarp device
1477  *
1478  * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1479  * destroy the device queues and free the pble and the hmc objects
1480  */
i40iw_deinit_device(struct i40iw_device * iwdev)1481 static void i40iw_deinit_device(struct i40iw_device *iwdev)
1482 {
1483 	struct i40e_info *ldev = iwdev->ldev;
1484 
1485 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1486 
1487 	i40iw_pr_info("state = %d\n", iwdev->init_state);
1488 	if (iwdev->param_wq)
1489 		destroy_workqueue(iwdev->param_wq);
1490 
1491 	switch (iwdev->init_state) {
1492 	case RDMA_DEV_REGISTERED:
1493 		iwdev->iw_status = 0;
1494 		i40iw_port_ibevent(iwdev);
1495 		i40iw_destroy_rdma_device(iwdev->iwibdev);
1496 		/* fallthrough */
1497 	case IP_ADDR_REGISTERED:
1498 		if (!iwdev->reset)
1499 			i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1500 		/* fallthrough */
1501 		/* fallthrough */
1502 	case PBLE_CHUNK_MEM:
1503 		i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1504 		/* fallthrough */
1505 	case CEQ_CREATED:
1506 		i40iw_dele_ceqs(iwdev);
1507 		/* fallthrough */
1508 	case AEQ_CREATED:
1509 		i40iw_destroy_aeq(iwdev);
1510 		/* fallthrough */
1511 	case IEQ_CREATED:
1512 		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1513 		/* fallthrough */
1514 	case ILQ_CREATED:
1515 		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1516 		/* fallthrough */
1517 	case CCQ_CREATED:
1518 		i40iw_destroy_ccq(iwdev);
1519 		/* fallthrough */
1520 	case HMC_OBJS_CREATED:
1521 		i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1522 		/* fallthrough */
1523 	case CQP_CREATED:
1524 		i40iw_destroy_cqp(iwdev, true);
1525 		/* fallthrough */
1526 	case INITIAL_STATE:
1527 		i40iw_cleanup_cm_core(&iwdev->cm_core);
1528 		if (iwdev->vsi.pestat) {
1529 			i40iw_vsi_stats_free(&iwdev->vsi);
1530 			kfree(iwdev->vsi.pestat);
1531 		}
1532 		i40iw_del_init_mem(iwdev);
1533 		break;
1534 	case INVALID_STATE:
1535 		/* fallthrough */
1536 	default:
1537 		i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
1538 		break;
1539 	}
1540 
1541 	i40iw_del_handler(i40iw_find_i40e_handler(ldev));
1542 	kfree(iwdev->hdl);
1543 }
1544 
1545 /**
1546  * i40iw_setup_init_state - set up the initial device struct
1547  * @hdl: handler for iwarp device - one per instance
1548  * @ldev: lan device information
1549  * @client: iwarp client information, provided during registration
1550  *
1551  * Initialize the iwarp device and its hdl information
1552  * using the ldev and client information
1553  * Return 0 if successful, otherwise return error
1554  */
i40iw_setup_init_state(struct i40iw_handler * hdl,struct i40e_info * ldev,struct i40e_client * client)1555 static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1556 						     struct i40e_info *ldev,
1557 						     struct i40e_client *client)
1558 {
1559 	struct i40iw_device *iwdev = &hdl->device;
1560 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1561 	enum i40iw_status_code status;
1562 
1563 	memcpy(&hdl->ldev, ldev, sizeof(*ldev));
1564 
1565 	iwdev->mpa_version = mpa_version;
1566 	iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
1567 	    (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
1568 	    I40IW_HMC_PROFILE_DEFAULT;
1569 	iwdev->max_rdma_vfs =
1570 		(iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ?  max_rdma_vfs : 0;
1571 	iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
1572 	iwdev->netdev = ldev->netdev;
1573 	hdl->client = client;
1574 	if (!ldev->ftype)
1575 		iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
1576 	else
1577 		iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
1578 
1579 	status = i40iw_save_msix_info(iwdev, ldev);
1580 	if (status)
1581 		return status;
1582 	iwdev->hw.dev_context = (void *)ldev->pcidev;
1583 	iwdev->hw.hw_addr = ldev->hw_addr;
1584 	status = i40iw_allocate_dma_mem(&iwdev->hw,
1585 					&iwdev->obj_mem, 8192, 4096);
1586 	if (status)
1587 		goto exit;
1588 	iwdev->obj_next = iwdev->obj_mem;
1589 	iwdev->push_mode = push_mode;
1590 
1591 	init_waitqueue_head(&iwdev->vchnl_waitq);
1592 	init_waitqueue_head(&dev->vf_reqs);
1593 	init_waitqueue_head(&iwdev->close_wq);
1594 
1595 	status = i40iw_initialize_dev(iwdev, ldev);
1596 exit:
1597 	if (status) {
1598 		kfree(iwdev->iw_msixtbl);
1599 		i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
1600 		iwdev->iw_msixtbl = NULL;
1601 	}
1602 	return status;
1603 }
1604 
1605 /**
1606  * i40iw_get_used_rsrc - determine resources used internally
1607  * @iwdev: iwarp device
1608  *
1609  * Called after internal allocations
1610  */
i40iw_get_used_rsrc(struct i40iw_device * iwdev)1611 static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
1612 {
1613 	iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
1614 	iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
1615 	iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
1616 	iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
1617 }
1618 
1619 /**
1620  * i40iw_open - client interface operation open for iwarp/uda device
1621  * @ldev: lan device information
1622  * @client: iwarp client information, provided during registration
1623  *
1624  * Called by the lan driver during the processing of client register
1625  * Create device resources, set up queues, pble and hmc objects and
1626  * register the device with the ib verbs interface
1627  * Return 0 if successful, otherwise return error
1628  */
i40iw_open(struct i40e_info * ldev,struct i40e_client * client)1629 static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1630 {
1631 	struct i40iw_device *iwdev;
1632 	struct i40iw_sc_dev *dev;
1633 	enum i40iw_status_code status;
1634 	struct i40iw_handler *hdl;
1635 
1636 	hdl = i40iw_find_netdev(ldev->netdev);
1637 	if (hdl)
1638 		return 0;
1639 
1640 	hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1641 	if (!hdl)
1642 		return -ENOMEM;
1643 	iwdev = &hdl->device;
1644 	iwdev->hdl = hdl;
1645 	dev = &iwdev->sc_dev;
1646 	if (i40iw_setup_cm_core(iwdev)) {
1647 		kfree(iwdev->hdl);
1648 		return -ENOMEM;
1649 	}
1650 
1651 	dev->back_dev = (void *)iwdev;
1652 	iwdev->ldev = &hdl->ldev;
1653 	iwdev->client = client;
1654 	mutex_init(&iwdev->pbl_mutex);
1655 	i40iw_add_handler(hdl);
1656 
1657 	do {
1658 		status = i40iw_setup_init_state(hdl, ldev, client);
1659 		if (status)
1660 			break;
1661 		iwdev->init_state = INITIAL_STATE;
1662 		if (dev->is_pf)
1663 			i40iw_wait_pe_ready(dev->hw);
1664 		status = i40iw_create_cqp(iwdev);
1665 		if (status)
1666 			break;
1667 		iwdev->init_state = CQP_CREATED;
1668 		status = i40iw_hmc_setup(iwdev);
1669 		if (status)
1670 			break;
1671 		status = i40iw_create_ccq(iwdev);
1672 		if (status)
1673 			break;
1674 		iwdev->init_state = CCQ_CREATED;
1675 		status = i40iw_initialize_ilq(iwdev);
1676 		if (status)
1677 			break;
1678 		iwdev->init_state = ILQ_CREATED;
1679 		status = i40iw_initialize_ieq(iwdev);
1680 		if (status)
1681 			break;
1682 		iwdev->init_state = IEQ_CREATED;
1683 		status = i40iw_setup_aeq(iwdev);
1684 		if (status)
1685 			break;
1686 		iwdev->init_state = AEQ_CREATED;
1687 		status = i40iw_setup_ceqs(iwdev, ldev);
1688 		if (status)
1689 			break;
1690 		iwdev->init_state = CEQ_CREATED;
1691 		status = i40iw_initialize_hw_resources(iwdev);
1692 		if (status)
1693 			break;
1694 		i40iw_get_used_rsrc(iwdev);
1695 		dev->ccq_ops->ccq_arm(dev->ccq);
1696 		status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1697 		if (status)
1698 			break;
1699 		iwdev->init_state = PBLE_CHUNK_MEM;
1700 		iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1701 		status = i40iw_add_mac_ip(iwdev);
1702 		if (status)
1703 			break;
1704 		iwdev->init_state = IP_ADDR_REGISTERED;
1705 		if (i40iw_register_rdma_device(iwdev)) {
1706 			i40iw_pr_err("register rdma device fail\n");
1707 			break;
1708 		};
1709 
1710 		iwdev->init_state = RDMA_DEV_REGISTERED;
1711 		iwdev->iw_status = 1;
1712 		i40iw_port_ibevent(iwdev);
1713 		iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
1714 		if(iwdev->param_wq == NULL)
1715 			break;
1716 		i40iw_pr_info("i40iw_open completed\n");
1717 		return 0;
1718 	} while (0);
1719 
1720 	i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1721 	i40iw_deinit_device(iwdev);
1722 	return -ERESTART;
1723 }
1724 
1725 /**
1726  * i40iw_l2params_worker - worker for l2 params change
1727  * @work: work pointer for l2 params
1728  */
i40iw_l2params_worker(struct work_struct * work)1729 static void i40iw_l2params_worker(struct work_struct *work)
1730 {
1731 	struct l2params_work *dwork =
1732 	    container_of(work, struct l2params_work, work);
1733 	struct i40iw_device *iwdev = dwork->iwdev;
1734 
1735 	i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
1736 	atomic_dec(&iwdev->params_busy);
1737 	kfree(work);
1738 }
1739 
1740 /**
1741  * i40iw_l2param_change - handle qs handles for qos and mss change
1742  * @ldev: lan device information
1743  * @client: client for paramater change
1744  * @params: new parameters from L2
1745  */
i40iw_l2param_change(struct i40e_info * ldev,struct i40e_client * client,struct i40e_params * params)1746 static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
1747 				 struct i40e_params *params)
1748 {
1749 	struct i40iw_handler *hdl;
1750 	struct i40iw_l2params *l2params;
1751 	struct l2params_work *work;
1752 	struct i40iw_device *iwdev;
1753 	int i;
1754 
1755 	hdl = i40iw_find_i40e_handler(ldev);
1756 	if (!hdl)
1757 		return;
1758 
1759 	iwdev = &hdl->device;
1760 
1761 	if (atomic_read(&iwdev->params_busy))
1762 		return;
1763 
1764 
1765 	work = kzalloc(sizeof(*work), GFP_KERNEL);
1766 	if (!work)
1767 		return;
1768 
1769 	atomic_inc(&iwdev->params_busy);
1770 
1771 	work->iwdev = iwdev;
1772 	l2params = &work->l2params;
1773 	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
1774 		l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
1775 
1776 	l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
1777 
1778 	INIT_WORK(&work->work, i40iw_l2params_worker);
1779 	queue_work(iwdev->param_wq, &work->work);
1780 }
1781 
1782 /**
1783  * i40iw_close - client interface operation close for iwarp/uda device
1784  * @ldev: lan device information
1785  * @client: client to close
1786  *
1787  * Called by the lan driver during the processing of client unregister
1788  * Destroy and clean up the driver resources
1789  */
i40iw_close(struct i40e_info * ldev,struct i40e_client * client,bool reset)1790 static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
1791 {
1792 	struct i40iw_device *iwdev;
1793 	struct i40iw_handler *hdl;
1794 
1795 	hdl = i40iw_find_i40e_handler(ldev);
1796 	if (!hdl)
1797 		return;
1798 
1799 	iwdev = &hdl->device;
1800 	iwdev->closing = true;
1801 
1802 	if (reset)
1803 		iwdev->reset = true;
1804 
1805 	i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
1806 	destroy_workqueue(iwdev->virtchnl_wq);
1807 	i40iw_deinit_device(iwdev);
1808 }
1809 
1810 /**
1811  * i40iw_vf_reset - process VF reset
1812  * @ldev: lan device information
1813  * @client: client interface instance
1814  * @vf_id: virtual function id
1815  *
1816  * Called when a VF is reset by the PF
1817  * Destroy and clean up the VF resources
1818  */
i40iw_vf_reset(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1819 static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
1820 {
1821 	struct i40iw_handler *hdl;
1822 	struct i40iw_sc_dev *dev;
1823 	struct i40iw_hmc_fcn_info hmc_fcn_info;
1824 	struct i40iw_virt_mem vf_dev_mem;
1825 	struct i40iw_vfdev *tmp_vfdev;
1826 	unsigned int i;
1827 	unsigned long flags;
1828 	struct i40iw_device *iwdev;
1829 
1830 	hdl = i40iw_find_i40e_handler(ldev);
1831 	if (!hdl)
1832 		return;
1833 
1834 	dev = &hdl->device.sc_dev;
1835 	iwdev = (struct i40iw_device *)dev->back_dev;
1836 
1837 	for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
1838 		if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
1839 			continue;
1840 		/* free all resources allocated on behalf of vf */
1841 		tmp_vfdev = dev->vf_dev[i];
1842 		spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
1843 		dev->vf_dev[i] = NULL;
1844 		spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
1845 		i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
1846 		/* remove vf hmc function */
1847 		memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
1848 		hmc_fcn_info.vf_id = vf_id;
1849 		hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
1850 		hmc_fcn_info.free_fcn = true;
1851 		i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
1852 		/* free vf_dev */
1853 		vf_dev_mem.va = tmp_vfdev;
1854 		vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
1855 					sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
1856 		i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
1857 		break;
1858 	}
1859 }
1860 
1861 /**
1862  * i40iw_vf_enable - enable a number of VFs
1863  * @ldev: lan device information
1864  * @client: client interface instance
1865  * @num_vfs: number of VFs for the PF
1866  *
1867  * Called when the number of VFs changes
1868  */
i40iw_vf_enable(struct i40e_info * ldev,struct i40e_client * client,u32 num_vfs)1869 static void i40iw_vf_enable(struct i40e_info *ldev,
1870 			    struct i40e_client *client,
1871 			    u32 num_vfs)
1872 {
1873 	struct i40iw_handler *hdl;
1874 
1875 	hdl = i40iw_find_i40e_handler(ldev);
1876 	if (!hdl)
1877 		return;
1878 
1879 	if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
1880 		hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
1881 	else
1882 		hdl->device.max_enabled_vfs = num_vfs;
1883 }
1884 
1885 /**
1886  * i40iw_vf_capable - check if VF capable
1887  * @ldev: lan device information
1888  * @client: client interface instance
1889  * @vf_id: virtual function id
1890  *
1891  * Return 1 if a VF slot is available or if VF is already RDMA enabled
1892  * Return 0 otherwise
1893  */
i40iw_vf_capable(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1894 static int i40iw_vf_capable(struct i40e_info *ldev,
1895 			    struct i40e_client *client,
1896 			    u32 vf_id)
1897 {
1898 	struct i40iw_handler *hdl;
1899 	struct i40iw_sc_dev *dev;
1900 	unsigned int i;
1901 
1902 	hdl = i40iw_find_i40e_handler(ldev);
1903 	if (!hdl)
1904 		return 0;
1905 
1906 	dev = &hdl->device.sc_dev;
1907 
1908 	for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
1909 		if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
1910 			return 1;
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 /**
1917  * i40iw_virtchnl_receive - receive a message through the virtual channel
1918  * @ldev: lan device information
1919  * @client: client interface instance
1920  * @vf_id: virtual function id associated with the message
1921  * @msg: message buffer pointer
1922  * @len: length of the message
1923  *
1924  * Invoke virtual channel receive operation for the given msg
1925  * Return 0 if successful, otherwise return error
1926  */
i40iw_virtchnl_receive(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id,u8 * msg,u16 len)1927 static int i40iw_virtchnl_receive(struct i40e_info *ldev,
1928 				  struct i40e_client *client,
1929 				  u32 vf_id,
1930 				  u8 *msg,
1931 				  u16 len)
1932 {
1933 	struct i40iw_handler *hdl;
1934 	struct i40iw_sc_dev *dev;
1935 	struct i40iw_device *iwdev;
1936 	int ret_code = I40IW_NOT_SUPPORTED;
1937 
1938 	if (!len || !msg)
1939 		return I40IW_ERR_PARAM;
1940 
1941 	hdl = i40iw_find_i40e_handler(ldev);
1942 	if (!hdl)
1943 		return I40IW_ERR_PARAM;
1944 
1945 	dev = &hdl->device.sc_dev;
1946 	iwdev = dev->back_dev;
1947 
1948 	if (dev->vchnl_if.vchnl_recv) {
1949 		ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
1950 		if (!dev->is_pf) {
1951 			atomic_dec(&iwdev->vchnl_msgs);
1952 			wake_up(&iwdev->vchnl_waitq);
1953 		}
1954 	}
1955 	return ret_code;
1956 }
1957 
1958 /**
1959  * i40iw_vf_clear_to_send - wait to send virtual channel message
1960  * @dev: iwarp device *
1961  * Wait for until virtual channel is clear
1962  * before sending the next message
1963  *
1964  * Returns false if error
1965  * Returns true if clear to send
1966  */
i40iw_vf_clear_to_send(struct i40iw_sc_dev * dev)1967 bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
1968 {
1969 	struct i40iw_device *iwdev;
1970 	wait_queue_entry_t wait;
1971 
1972 	iwdev = dev->back_dev;
1973 
1974 	if (!wq_has_sleeper(&dev->vf_reqs) &&
1975 	    (atomic_read(&iwdev->vchnl_msgs) == 0))
1976 		return true; /* virtual channel is clear */
1977 
1978 	init_wait(&wait);
1979 	add_wait_queue_exclusive(&dev->vf_reqs, &wait);
1980 
1981 	if (!wait_event_timeout(dev->vf_reqs,
1982 				(atomic_read(&iwdev->vchnl_msgs) == 0),
1983 				I40IW_VCHNL_EVENT_TIMEOUT))
1984 		dev->vchnl_up = false;
1985 
1986 	remove_wait_queue(&dev->vf_reqs, &wait);
1987 
1988 	return dev->vchnl_up;
1989 }
1990 
1991 /**
1992  * i40iw_virtchnl_send - send a message through the virtual channel
1993  * @dev: iwarp device
1994  * @vf_id: virtual function id associated with the message
1995  * @msg: virtual channel message buffer pointer
1996  * @len: length of the message
1997  *
1998  * Invoke virtual channel send operation for the given msg
1999  * Return 0 if successful, otherwise return error
2000  */
i40iw_virtchnl_send(struct i40iw_sc_dev * dev,u32 vf_id,u8 * msg,u16 len)2001 static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
2002 						  u32 vf_id,
2003 						  u8 *msg,
2004 						  u16 len)
2005 {
2006 	struct i40iw_device *iwdev;
2007 	struct i40e_info *ldev;
2008 
2009 	if (!dev || !dev->back_dev)
2010 		return I40IW_ERR_BAD_PTR;
2011 
2012 	iwdev = dev->back_dev;
2013 	ldev = iwdev->ldev;
2014 
2015 	if (ldev && ldev->ops && ldev->ops->virtchnl_send)
2016 		return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
2017 	return I40IW_ERR_BAD_PTR;
2018 }
2019 
2020 /* client interface functions */
2021 static const struct i40e_client_ops i40e_ops = {
2022 	.open = i40iw_open,
2023 	.close = i40iw_close,
2024 	.l2_param_change = i40iw_l2param_change,
2025 	.virtchnl_receive = i40iw_virtchnl_receive,
2026 	.vf_reset = i40iw_vf_reset,
2027 	.vf_enable = i40iw_vf_enable,
2028 	.vf_capable = i40iw_vf_capable
2029 };
2030 
2031 /**
2032  * i40iw_init_module - driver initialization function
2033  *
2034  * First function to call when the driver is loaded
2035  * Register the driver as i40e client and port mapper client
2036  */
i40iw_init_module(void)2037 static int __init i40iw_init_module(void)
2038 {
2039 	int ret;
2040 
2041 	memset(&i40iw_client, 0, sizeof(i40iw_client));
2042 	i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
2043 	i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
2044 	i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
2045 	i40iw_client.ops = &i40e_ops;
2046 	memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
2047 	i40iw_client.type = I40E_CLIENT_IWARP;
2048 	spin_lock_init(&i40iw_handler_lock);
2049 	ret = i40e_register_client(&i40iw_client);
2050 	i40iw_register_notifiers();
2051 
2052 	return ret;
2053 }
2054 
2055 /**
2056  * i40iw_exit_module - driver exit clean up function
2057  *
2058  * The function is called just before the driver is unloaded
2059  * Unregister the driver as i40e client and port mapper client
2060  */
i40iw_exit_module(void)2061 static void __exit i40iw_exit_module(void)
2062 {
2063 	i40iw_unregister_notifiers();
2064 	i40e_unregister_client(&i40iw_client);
2065 }
2066 
2067 module_init(i40iw_init_module);
2068 module_exit(i40iw_exit_module);
2069