1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
16 
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
20 
21 /*
22  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23  * @vp: Virtual Path handle.
24  *
25  * Enable vpath interrupts. The function is to be executed the last in
26  * vpath initialization sequence.
27  *
28  * See also: vxge_hw_vpath_intr_disable()
29  */
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle * vp)30 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31 {
32 	u64 val64;
33 
34 	struct __vxge_hw_virtualpath *vpath;
35 	struct vxge_hw_vpath_reg __iomem *vp_reg;
36 	enum vxge_hw_status status = VXGE_HW_OK;
37 	if (vp == NULL) {
38 		status = VXGE_HW_ERR_INVALID_HANDLE;
39 		goto exit;
40 	}
41 
42 	vpath = vp->vpath;
43 
44 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46 		goto exit;
47 	}
48 
49 	vp_reg = vpath->vp_reg;
50 
51 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52 
53 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54 			&vp_reg->general_errors_reg);
55 
56 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57 			&vp_reg->pci_config_errors_reg);
58 
59 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 			&vp_reg->mrpcim_to_vpath_alarm_reg);
61 
62 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 			&vp_reg->srpcim_to_vpath_alarm_reg);
64 
65 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 			&vp_reg->vpath_ppif_int_status);
67 
68 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 			&vp_reg->srpcim_msg_to_vpath_reg);
70 
71 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 			&vp_reg->vpath_pcipif_int_status);
73 
74 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 			&vp_reg->prc_alarm_reg);
76 
77 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 			&vp_reg->wrdma_alarm_status);
79 
80 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 			&vp_reg->asic_ntwk_vp_err_reg);
82 
83 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 			&vp_reg->xgmac_vp_int_status);
85 
86 	val64 = readq(&vp_reg->vpath_general_int_status);
87 
88 	/* Mask unwanted interrupts */
89 
90 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91 			&vp_reg->vpath_pcipif_int_mask);
92 
93 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94 			&vp_reg->srpcim_msg_to_vpath_mask);
95 
96 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 			&vp_reg->srpcim_to_vpath_alarm_mask);
98 
99 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 			&vp_reg->mrpcim_to_vpath_alarm_mask);
101 
102 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 			&vp_reg->pci_config_errors_mask);
104 
105 	/* Unmask the individual interrupts */
106 
107 	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108 		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110 		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111 		&vp_reg->general_errors_mask);
112 
113 	__vxge_hw_pio_mem_write32_upper(
114 		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119 		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120 		&vp_reg->kdfcctl_errors_mask);
121 
122 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 
124 	__vxge_hw_pio_mem_write32_upper(
125 		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126 		&vp_reg->prc_alarm_mask);
127 
128 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129 	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 
131 	if (vpath->hldev->first_vp_id != vpath->vp_id)
132 		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133 			&vp_reg->asic_ntwk_vp_err_mask);
134 	else
135 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137 		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138 		&vp_reg->asic_ntwk_vp_err_mask);
139 
140 	__vxge_hw_pio_mem_write32_upper(0,
141 		&vp_reg->vpath_general_int_mask);
142 exit:
143 	return status;
144 
145 }
146 
147 /*
148  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149  * @vp: Virtual Path handle.
150  *
151  * Disable vpath interrupts. The function is to be executed the last in
152  * vpath initialization sequence.
153  *
154  * See also: vxge_hw_vpath_intr_enable()
155  */
vxge_hw_vpath_intr_disable(struct __vxge_hw_vpath_handle * vp)156 enum vxge_hw_status vxge_hw_vpath_intr_disable(
157 			struct __vxge_hw_vpath_handle *vp)
158 {
159 	u64 val64;
160 
161 	struct __vxge_hw_virtualpath *vpath;
162 	enum vxge_hw_status status = VXGE_HW_OK;
163 	struct vxge_hw_vpath_reg __iomem *vp_reg;
164 	if (vp == NULL) {
165 		status = VXGE_HW_ERR_INVALID_HANDLE;
166 		goto exit;
167 	}
168 
169 	vpath = vp->vpath;
170 
171 	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172 		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173 		goto exit;
174 	}
175 	vp_reg = vpath->vp_reg;
176 
177 	__vxge_hw_pio_mem_write32_upper(
178 		(u32)VXGE_HW_INTR_MASK_ALL,
179 		&vp_reg->vpath_general_int_mask);
180 
181 	val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 
183 	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 
185 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186 			&vp_reg->general_errors_mask);
187 
188 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189 			&vp_reg->pci_config_errors_mask);
190 
191 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 			&vp_reg->mrpcim_to_vpath_alarm_mask);
193 
194 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 			&vp_reg->srpcim_to_vpath_alarm_mask);
196 
197 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 			&vp_reg->vpath_ppif_int_mask);
199 
200 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 			&vp_reg->srpcim_msg_to_vpath_mask);
202 
203 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 			&vp_reg->vpath_pcipif_int_mask);
205 
206 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 			&vp_reg->wrdma_alarm_mask);
208 
209 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 			&vp_reg->prc_alarm_mask);
211 
212 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 			&vp_reg->xgmac_vp_int_mask);
214 
215 	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 			&vp_reg->asic_ntwk_vp_err_mask);
217 
218 exit:
219 	return status;
220 }
221 
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo * fifo)222 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223 {
224 	struct vxge_hw_vpath_reg __iomem *vp_reg;
225 	struct vxge_hw_vp_config *config;
226 	u64 val64;
227 
228 	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229 		return;
230 
231 	vp_reg = fifo->vp_reg;
232 	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233 
234 	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235 		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237 		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238 		fifo->tim_tti_cfg1_saved = val64;
239 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240 	}
241 }
242 
vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring * ring)243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244 {
245 	u64 val64 = ring->tim_rti_cfg1_saved;
246 
247 	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248 	ring->tim_rti_cfg1_saved = val64;
249 	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250 }
251 
vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo * fifo)252 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253 {
254 	u64 val64 = fifo->tim_tti_cfg3_saved;
255 	u64 timer = (fifo->rtimer * 1000) / 272;
256 
257 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258 	if (timer)
259 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261 
262 	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263 	/* tti_cfg3_saved is not updated again because it is
264 	 * initialized at one place only - init time.
265 	 */
266 }
267 
vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring * ring)268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269 {
270 	u64 val64 = ring->tim_rti_cfg3_saved;
271 	u64 timer = (ring->rtimer * 1000) / 272;
272 
273 	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274 	if (timer)
275 		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276 			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277 
278 	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279 	/* rti_cfg3_saved is not updated again because it is
280 	 * initialized at one place only - init time.
281 	 */
282 }
283 
284 /**
285  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286  * @channeh: Channel for rx or tx handle
287  * @msix_id:  MSIX ID
288  *
289  * The function masks the msix interrupt for the given msix_id
290  *
291  * Returns: 0
292  */
vxge_hw_channel_msix_mask(struct __vxge_hw_channel * channel,int msix_id)293 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294 {
295 
296 	__vxge_hw_pio_mem_write32_upper(
297 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298 		&channel->common_reg->set_msix_mask_vect[msix_id%4]);
299 }
300 
301 /**
302  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303  * @channeh: Channel for rx or tx handle
304  * @msix_id:  MSI ID
305  *
306  * The function unmasks the msix interrupt for the given msix_id
307  *
308  * Returns: 0
309  */
310 void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel * channel,int msix_id)311 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312 {
313 
314 	__vxge_hw_pio_mem_write32_upper(
315 		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316 		&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317 }
318 
319 /**
320  * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321  * @channel: Channel for rx or tx handle
322  * @msix_id:  MSI ID
323  *
324  * The function unmasks the msix interrupt for the given msix_id
325  * if configured in MSIX oneshot mode
326  *
327  * Returns: 0
328  */
vxge_hw_channel_msix_clear(struct __vxge_hw_channel * channel,int msix_id)329 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330 {
331 	__vxge_hw_pio_mem_write32_upper(
332 		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333 		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334 }
335 
336 /**
337  * vxge_hw_device_set_intr_type - Updates the configuration
338  *		with new interrupt type.
339  * @hldev: HW device handle.
340  * @intr_mode: New interrupt type
341  */
vxge_hw_device_set_intr_type(struct __vxge_hw_device * hldev,u32 intr_mode)342 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343 {
344 
345 	if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347 	   (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348 	   (intr_mode != VXGE_HW_INTR_MODE_DEF))
349 		intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350 
351 	hldev->config.intr_mode = intr_mode;
352 	return intr_mode;
353 }
354 
355 /**
356  * vxge_hw_device_intr_enable - Enable interrupts.
357  * @hldev: HW device handle.
358  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359  *      the type(s) of interrupts to enable.
360  *
361  * Enable Titan interrupts. The function is to be executed the last in
362  * Titan initialization sequence.
363  *
364  * See also: vxge_hw_device_intr_disable()
365  */
vxge_hw_device_intr_enable(struct __vxge_hw_device * hldev)366 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367 {
368 	u32 i;
369 	u64 val64;
370 	u32 val32;
371 
372 	vxge_hw_device_mask_all(hldev);
373 
374 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375 
376 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377 			continue;
378 
379 		vxge_hw_vpath_intr_enable(
380 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381 	}
382 
383 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384 		val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385 			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386 
387 		if (val64 != 0) {
388 			writeq(val64, &hldev->common_reg->tim_int_status0);
389 
390 			writeq(~val64, &hldev->common_reg->tim_int_mask0);
391 		}
392 
393 		val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394 			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395 
396 		if (val32 != 0) {
397 			__vxge_hw_pio_mem_write32_upper(val32,
398 					&hldev->common_reg->tim_int_status1);
399 
400 			__vxge_hw_pio_mem_write32_upper(~val32,
401 					&hldev->common_reg->tim_int_mask1);
402 		}
403 	}
404 
405 	val64 = readq(&hldev->common_reg->titan_general_int_status);
406 
407 	vxge_hw_device_unmask_all(hldev);
408 }
409 
410 /**
411  * vxge_hw_device_intr_disable - Disable Titan interrupts.
412  * @hldev: HW device handle.
413  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414  *      the type(s) of interrupts to disable.
415  *
416  * Disable Titan interrupts.
417  *
418  * See also: vxge_hw_device_intr_enable()
419  */
vxge_hw_device_intr_disable(struct __vxge_hw_device * hldev)420 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421 {
422 	u32 i;
423 
424 	vxge_hw_device_mask_all(hldev);
425 
426 	/* mask all the tim interrupts */
427 	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428 	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429 		&hldev->common_reg->tim_int_mask1);
430 
431 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432 
433 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434 			continue;
435 
436 		vxge_hw_vpath_intr_disable(
437 			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438 	}
439 }
440 
441 /**
442  * vxge_hw_device_mask_all - Mask all device interrupts.
443  * @hldev: HW device handle.
444  *
445  * Mask	all device interrupts.
446  *
447  * See also: vxge_hw_device_unmask_all()
448  */
vxge_hw_device_mask_all(struct __vxge_hw_device * hldev)449 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450 {
451 	u64 val64;
452 
453 	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454 		VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455 
456 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457 				&hldev->common_reg->titan_mask_all_int);
458 }
459 
460 /**
461  * vxge_hw_device_unmask_all - Unmask all device interrupts.
462  * @hldev: HW device handle.
463  *
464  * Unmask all device interrupts.
465  *
466  * See also: vxge_hw_device_mask_all()
467  */
vxge_hw_device_unmask_all(struct __vxge_hw_device * hldev)468 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469 {
470 	u64 val64 = 0;
471 
472 	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473 		val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474 
475 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476 			&hldev->common_reg->titan_mask_all_int);
477 }
478 
479 /**
480  * vxge_hw_device_flush_io - Flush io writes.
481  * @hldev: HW device handle.
482  *
483  * The function	performs a read operation to flush io writes.
484  *
485  * Returns: void
486  */
vxge_hw_device_flush_io(struct __vxge_hw_device * hldev)487 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488 {
489 	u32 val32;
490 
491 	val32 = readl(&hldev->common_reg->titan_general_int_status);
492 }
493 
494 /**
495  * __vxge_hw_device_handle_error - Handle error
496  * @hldev: HW device
497  * @vp_id: Vpath Id
498  * @type: Error type. Please see enum vxge_hw_event{}
499  *
500  * Handle error.
501  */
502 static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device * hldev,u32 vp_id,enum vxge_hw_event type)503 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504 			      enum vxge_hw_event type)
505 {
506 	switch (type) {
507 	case VXGE_HW_EVENT_UNKNOWN:
508 		break;
509 	case VXGE_HW_EVENT_RESET_START:
510 	case VXGE_HW_EVENT_RESET_COMPLETE:
511 	case VXGE_HW_EVENT_LINK_DOWN:
512 	case VXGE_HW_EVENT_LINK_UP:
513 		goto out;
514 	case VXGE_HW_EVENT_ALARM_CLEARED:
515 		goto out;
516 	case VXGE_HW_EVENT_ECCERR:
517 	case VXGE_HW_EVENT_MRPCIM_ECCERR:
518 		goto out;
519 	case VXGE_HW_EVENT_FIFO_ERR:
520 	case VXGE_HW_EVENT_VPATH_ERR:
521 	case VXGE_HW_EVENT_CRITICAL_ERR:
522 	case VXGE_HW_EVENT_SERR:
523 		break;
524 	case VXGE_HW_EVENT_SRPCIM_SERR:
525 	case VXGE_HW_EVENT_MRPCIM_SERR:
526 		goto out;
527 	case VXGE_HW_EVENT_SLOT_FREEZE:
528 		break;
529 	default:
530 		vxge_assert(0);
531 		goto out;
532 	}
533 
534 	/* notify driver */
535 	if (hldev->uld_callbacks->crit_err)
536 		hldev->uld_callbacks->crit_err(hldev,
537 			type, vp_id);
538 out:
539 
540 	return VXGE_HW_OK;
541 }
542 
543 /*
544  * __vxge_hw_device_handle_link_down_ind
545  * @hldev: HW device handle.
546  *
547  * Link down indication handler. The function is invoked by HW when
548  * Titan indicates that the link is down.
549  */
550 static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device * hldev)551 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552 {
553 	/*
554 	 * If the previous link state is not down, return.
555 	 */
556 	if (hldev->link_state == VXGE_HW_LINK_DOWN)
557 		goto exit;
558 
559 	hldev->link_state = VXGE_HW_LINK_DOWN;
560 
561 	/* notify driver */
562 	if (hldev->uld_callbacks->link_down)
563 		hldev->uld_callbacks->link_down(hldev);
564 exit:
565 	return VXGE_HW_OK;
566 }
567 
568 /*
569  * __vxge_hw_device_handle_link_up_ind
570  * @hldev: HW device handle.
571  *
572  * Link up indication handler. The function is invoked by HW when
573  * Titan indicates that the link is up for programmable amount of time.
574  */
575 static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device * hldev)576 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
577 {
578 	/*
579 	 * If the previous link state is not down, return.
580 	 */
581 	if (hldev->link_state == VXGE_HW_LINK_UP)
582 		goto exit;
583 
584 	hldev->link_state = VXGE_HW_LINK_UP;
585 
586 	/* notify driver */
587 	if (hldev->uld_callbacks->link_up)
588 		hldev->uld_callbacks->link_up(hldev);
589 exit:
590 	return VXGE_HW_OK;
591 }
592 
593 /*
594  * __vxge_hw_vpath_alarm_process - Process Alarms.
595  * @vpath: Virtual Path.
596  * @skip_alarms: Do not clear the alarms
597  *
598  * Process vpath alarms.
599  *
600  */
601 static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath * vpath,u32 skip_alarms)602 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603 			      u32 skip_alarms)
604 {
605 	u64 val64;
606 	u64 alarm_status;
607 	u64 pic_status;
608 	struct __vxge_hw_device *hldev = NULL;
609 	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610 	u64 mask64;
611 	struct vxge_hw_vpath_stats_sw_info *sw_stats;
612 	struct vxge_hw_vpath_reg __iomem *vp_reg;
613 
614 	if (vpath == NULL) {
615 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
616 			alarm_event);
617 		goto out2;
618 	}
619 
620 	hldev = vpath->hldev;
621 	vp_reg = vpath->vp_reg;
622 	alarm_status = readq(&vp_reg->vpath_general_int_status);
623 
624 	if (alarm_status == VXGE_HW_ALL_FOXES) {
625 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
626 			alarm_event);
627 		goto out;
628 	}
629 
630 	sw_stats = vpath->sw_stats;
631 
632 	if (alarm_status & ~(
633 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
634 		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
635 		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
636 		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
637 		sw_stats->error_stats.unknown_alarms++;
638 
639 		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
640 			alarm_event);
641 		goto out;
642 	}
643 
644 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
645 
646 		val64 = readq(&vp_reg->xgmac_vp_int_status);
647 
648 		if (val64 &
649 		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
650 
651 			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
652 
653 			if (((val64 &
654 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
655 			     (!(val64 &
656 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
657 			    ((val64 &
658 			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
659 			     (!(val64 &
660 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
661 				     ))) {
662 				sw_stats->error_stats.network_sustained_fault++;
663 
664 				writeq(
665 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666 					&vp_reg->asic_ntwk_vp_err_mask);
667 
668 				__vxge_hw_device_handle_link_down_ind(hldev);
669 				alarm_event = VXGE_HW_SET_LEVEL(
670 					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
671 			}
672 
673 			if (((val64 &
674 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675 			     (!(val64 &
676 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677 			    ((val64 &
678 			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679 			     (!(val64 &
680 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681 				     ))) {
682 
683 				sw_stats->error_stats.network_sustained_ok++;
684 
685 				writeq(
686 				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687 					&vp_reg->asic_ntwk_vp_err_mask);
688 
689 				__vxge_hw_device_handle_link_up_ind(hldev);
690 				alarm_event = VXGE_HW_SET_LEVEL(
691 					VXGE_HW_EVENT_LINK_UP, alarm_event);
692 			}
693 
694 			writeq(VXGE_HW_INTR_MASK_ALL,
695 				&vp_reg->asic_ntwk_vp_err_reg);
696 
697 			alarm_event = VXGE_HW_SET_LEVEL(
698 				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
699 
700 			if (skip_alarms)
701 				return VXGE_HW_OK;
702 		}
703 	}
704 
705 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
706 
707 		pic_status = readq(&vp_reg->vpath_ppif_int_status);
708 
709 		if (pic_status &
710 		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
711 
712 			val64 = readq(&vp_reg->general_errors_reg);
713 			mask64 = readq(&vp_reg->general_errors_mask);
714 
715 			if ((val64 &
716 				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
717 				~mask64) {
718 				sw_stats->error_stats.ini_serr_det++;
719 
720 				alarm_event = VXGE_HW_SET_LEVEL(
721 					VXGE_HW_EVENT_SERR, alarm_event);
722 			}
723 
724 			if ((val64 &
725 			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
726 				~mask64) {
727 				sw_stats->error_stats.dblgen_fifo0_overflow++;
728 
729 				alarm_event = VXGE_HW_SET_LEVEL(
730 					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
731 			}
732 
733 			if ((val64 &
734 			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
735 				~mask64)
736 				sw_stats->error_stats.statsb_pif_chain_error++;
737 
738 			if ((val64 &
739 			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
740 				~mask64)
741 				sw_stats->error_stats.statsb_drop_timeout++;
742 
743 			if ((val64 &
744 				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
745 				~mask64)
746 				sw_stats->error_stats.target_illegal_access++;
747 
748 			if (!skip_alarms) {
749 				writeq(VXGE_HW_INTR_MASK_ALL,
750 					&vp_reg->general_errors_reg);
751 				alarm_event = VXGE_HW_SET_LEVEL(
752 					VXGE_HW_EVENT_ALARM_CLEARED,
753 					alarm_event);
754 			}
755 		}
756 
757 		if (pic_status &
758 		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
759 
760 			val64 = readq(&vp_reg->kdfcctl_errors_reg);
761 			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
762 
763 			if ((val64 &
764 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
765 				~mask64) {
766 				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
767 
768 				alarm_event = VXGE_HW_SET_LEVEL(
769 					VXGE_HW_EVENT_FIFO_ERR,
770 					alarm_event);
771 			}
772 
773 			if ((val64 &
774 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
775 				~mask64) {
776 				sw_stats->error_stats.kdfcctl_fifo0_poison++;
777 
778 				alarm_event = VXGE_HW_SET_LEVEL(
779 					VXGE_HW_EVENT_FIFO_ERR,
780 					alarm_event);
781 			}
782 
783 			if ((val64 &
784 			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
785 				~mask64) {
786 				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
787 
788 				alarm_event = VXGE_HW_SET_LEVEL(
789 					VXGE_HW_EVENT_FIFO_ERR,
790 					alarm_event);
791 			}
792 
793 			if (!skip_alarms) {
794 				writeq(VXGE_HW_INTR_MASK_ALL,
795 					&vp_reg->kdfcctl_errors_reg);
796 				alarm_event = VXGE_HW_SET_LEVEL(
797 					VXGE_HW_EVENT_ALARM_CLEARED,
798 					alarm_event);
799 			}
800 		}
801 
802 	}
803 
804 	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
805 
806 		val64 = readq(&vp_reg->wrdma_alarm_status);
807 
808 		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
809 
810 			val64 = readq(&vp_reg->prc_alarm_reg);
811 			mask64 = readq(&vp_reg->prc_alarm_mask);
812 
813 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
814 				~mask64)
815 				sw_stats->error_stats.prc_ring_bumps++;
816 
817 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
818 				~mask64) {
819 				sw_stats->error_stats.prc_rxdcm_sc_err++;
820 
821 				alarm_event = VXGE_HW_SET_LEVEL(
822 					VXGE_HW_EVENT_VPATH_ERR,
823 					alarm_event);
824 			}
825 
826 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
827 				& ~mask64) {
828 				sw_stats->error_stats.prc_rxdcm_sc_abort++;
829 
830 				alarm_event = VXGE_HW_SET_LEVEL(
831 						VXGE_HW_EVENT_VPATH_ERR,
832 						alarm_event);
833 			}
834 
835 			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
836 				 & ~mask64) {
837 				sw_stats->error_stats.prc_quanta_size_err++;
838 
839 				alarm_event = VXGE_HW_SET_LEVEL(
840 					VXGE_HW_EVENT_VPATH_ERR,
841 					alarm_event);
842 			}
843 
844 			if (!skip_alarms) {
845 				writeq(VXGE_HW_INTR_MASK_ALL,
846 					&vp_reg->prc_alarm_reg);
847 				alarm_event = VXGE_HW_SET_LEVEL(
848 						VXGE_HW_EVENT_ALARM_CLEARED,
849 						alarm_event);
850 			}
851 		}
852 	}
853 out:
854 	hldev->stats.sw_dev_err_stats.vpath_alarms++;
855 out2:
856 	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857 		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
858 		return VXGE_HW_OK;
859 
860 	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
861 
862 	if (alarm_event == VXGE_HW_EVENT_SERR)
863 		return VXGE_HW_ERR_CRITICAL;
864 
865 	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
866 		VXGE_HW_ERR_SLOT_FREEZE :
867 		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
868 		VXGE_HW_ERR_VPATH;
869 }
870 
871 /**
872  * vxge_hw_device_begin_irq - Begin IRQ processing.
873  * @hldev: HW device handle.
874  * @skip_alarms: Do not clear the alarms
875  * @reason: "Reason" for the interrupt, the value of Titan's
876  *	general_int_status register.
877  *
878  * The function	performs two actions, It first checks whether (shared IRQ) the
879  * interrupt was raised	by the device. Next, it	masks the device interrupts.
880  *
881  * Note:
882  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
883  * bridge. Therefore, two back-to-back interrupts are potentially possible.
884  *
885  * Returns: 0, if the interrupt	is not "ours" (note that in this case the
886  * device remain enabled).
887  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
888  * status.
889  */
vxge_hw_device_begin_irq(struct __vxge_hw_device * hldev,u32 skip_alarms,u64 * reason)890 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
891 					     u32 skip_alarms, u64 *reason)
892 {
893 	u32 i;
894 	u64 val64;
895 	u64 adapter_status;
896 	u64 vpath_mask;
897 	enum vxge_hw_status ret = VXGE_HW_OK;
898 
899 	val64 = readq(&hldev->common_reg->titan_general_int_status);
900 
901 	if (unlikely(!val64)) {
902 		/* not Titan interrupt	*/
903 		*reason	= 0;
904 		ret = VXGE_HW_ERR_WRONG_IRQ;
905 		goto exit;
906 	}
907 
908 	if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
909 
910 		adapter_status = readq(&hldev->common_reg->adapter_status);
911 
912 		if (adapter_status == VXGE_HW_ALL_FOXES) {
913 
914 			__vxge_hw_device_handle_error(hldev,
915 				NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
916 			*reason	= 0;
917 			ret = VXGE_HW_ERR_SLOT_FREEZE;
918 			goto exit;
919 		}
920 	}
921 
922 	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
923 
924 	*reason	= val64;
925 
926 	vpath_mask = hldev->vpaths_deployed >>
927 				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
928 
929 	if (val64 &
930 	    VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
931 		hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
932 
933 		return VXGE_HW_OK;
934 	}
935 
936 	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
937 
938 	if (unlikely(val64 &
939 			VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
940 
941 		enum vxge_hw_status error_level = VXGE_HW_OK;
942 
943 		hldev->stats.sw_dev_err_stats.vpath_alarms++;
944 
945 		for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
946 
947 			if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948 				continue;
949 
950 			ret = __vxge_hw_vpath_alarm_process(
951 				&hldev->virtual_paths[i], skip_alarms);
952 
953 			error_level = VXGE_HW_SET_LEVEL(ret, error_level);
954 
955 			if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956 				(ret == VXGE_HW_ERR_SLOT_FREEZE)))
957 				break;
958 		}
959 
960 		ret = error_level;
961 	}
962 exit:
963 	return ret;
964 }
965 
966 /**
967  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
968  * condition that has caused the Tx and RX interrupt.
969  * @hldev: HW device.
970  *
971  * Acknowledge (that is, clear) the condition that has caused
972  * the Tx and Rx interrupt.
973  * See also: vxge_hw_device_begin_irq(),
974  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
975  */
vxge_hw_device_clear_tx_rx(struct __vxge_hw_device * hldev)976 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
977 {
978 
979 	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980 	   (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
981 		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
982 				 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
983 				&hldev->common_reg->tim_int_status0);
984 	}
985 
986 	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987 	   (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988 		__vxge_hw_pio_mem_write32_upper(
989 				(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
990 				 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
991 				&hldev->common_reg->tim_int_status1);
992 	}
993 }
994 
995 /*
996  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
997  * @channel: Channel
998  * @dtrh: Buffer to return the DTR pointer
999  *
1000  * Allocates a dtr from the reserve array. If the reserve array is empty,
1001  * it swaps the reserve and free arrays.
1002  *
1003  */
1004 static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel * channel,void ** dtrh)1005 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1006 {
1007 	if (channel->reserve_ptr - channel->reserve_top > 0) {
1008 _alloc_after_swap:
1009 		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
1010 
1011 		return VXGE_HW_OK;
1012 	}
1013 
1014 	/* switch between empty	and full arrays	*/
1015 
1016 	/* the idea behind such	a design is that by having free	and reserved
1017 	 * arrays separated we basically separated irq and non-irq parts.
1018 	 * i.e.	no additional lock need	to be done when	we free	a resource */
1019 
1020 	if (channel->length - channel->free_ptr > 0) {
1021 		swap(channel->reserve_arr, channel->free_arr);
1022 		channel->reserve_ptr = channel->length;
1023 		channel->reserve_top = channel->free_ptr;
1024 		channel->free_ptr = channel->length;
1025 
1026 		channel->stats->reserve_free_swaps_cnt++;
1027 
1028 		goto _alloc_after_swap;
1029 	}
1030 
1031 	channel->stats->full_cnt++;
1032 
1033 	*dtrh =	NULL;
1034 	return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1035 }
1036 
1037 /*
1038  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1039  * @channelh: Channel
1040  * @dtrh: DTR pointer
1041  *
1042  * Posts a dtr to work array.
1043  *
1044  */
1045 static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel * channel,void * dtrh)1046 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1047 {
1048 	vxge_assert(channel->work_arr[channel->post_index] == NULL);
1049 
1050 	channel->work_arr[channel->post_index++] = dtrh;
1051 
1052 	/* wrap-around */
1053 	if (channel->post_index	== channel->length)
1054 		channel->post_index = 0;
1055 }
1056 
1057 /*
1058  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1059  * @channel: Channel
1060  * @dtr: Buffer to return the next completed DTR pointer
1061  *
1062  * Returns the next completed dtr with out removing it from work array
1063  *
1064  */
1065 void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel * channel,void ** dtrh)1066 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1067 {
1068 	vxge_assert(channel->compl_index < channel->length);
1069 
1070 	*dtrh =	channel->work_arr[channel->compl_index];
1071 	prefetch(*dtrh);
1072 }
1073 
1074 /*
1075  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1076  * @channel: Channel handle
1077  *
1078  * Removes the next completed dtr from work array
1079  *
1080  */
vxge_hw_channel_dtr_complete(struct __vxge_hw_channel * channel)1081 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1082 {
1083 	channel->work_arr[channel->compl_index]	= NULL;
1084 
1085 	/* wrap-around */
1086 	if (++channel->compl_index == channel->length)
1087 		channel->compl_index = 0;
1088 
1089 	channel->stats->total_compl_cnt++;
1090 }
1091 
1092 /*
1093  * vxge_hw_channel_dtr_free - Frees a dtr
1094  * @channel: Channel handle
1095  * @dtr:  DTR pointer
1096  *
1097  * Returns the dtr to free array
1098  *
1099  */
vxge_hw_channel_dtr_free(struct __vxge_hw_channel * channel,void * dtrh)1100 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1101 {
1102 	channel->free_arr[--channel->free_ptr] = dtrh;
1103 }
1104 
1105 /*
1106  * vxge_hw_channel_dtr_count
1107  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1108  *
1109  * Retrieve number of DTRs available. This function can not be called
1110  * from data path. ring_initial_replenishi() is the only user.
1111  */
vxge_hw_channel_dtr_count(struct __vxge_hw_channel * channel)1112 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1113 {
1114 	return (channel->reserve_ptr - channel->reserve_top) +
1115 		(channel->length - channel->free_ptr);
1116 }
1117 
1118 /**
1119  * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor.
1120  * @ring: Handle to the ring object used for receive
1121  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1122  * with a valid handle.
1123  *
1124  * Reserve Rx descriptor for the subsequent filling-in driver
1125  * and posting on the corresponding channel (@channelh)
1126  * via vxge_hw_ring_rxd_post().
1127  *
1128  * Returns: VXGE_HW_OK - success.
1129  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1130  *
1131  */
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring * ring,void ** rxdh)1132 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1133 	void **rxdh)
1134 {
1135 	enum vxge_hw_status status;
1136 	struct __vxge_hw_channel *channel;
1137 
1138 	channel = &ring->channel;
1139 
1140 	status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1141 
1142 	if (status == VXGE_HW_OK) {
1143 		struct vxge_hw_ring_rxd_1 *rxdp =
1144 			(struct vxge_hw_ring_rxd_1 *)*rxdh;
1145 
1146 		rxdp->control_0	= rxdp->control_1 = 0;
1147 	}
1148 
1149 	return status;
1150 }
1151 
1152 /**
1153  * vxge_hw_ring_rxd_free - Free descriptor.
1154  * @ring: Handle to the ring object used for receive
1155  * @rxdh: Descriptor handle.
1156  *
1157  * Free	the reserved descriptor. This operation is "symmetrical" to
1158  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1159  * lifecycle.
1160  *
1161  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1162  * be:
1163  *
1164  * - reserved (vxge_hw_ring_rxd_reserve);
1165  *
1166  * - posted	(vxge_hw_ring_rxd_post);
1167  *
1168  * - completed (vxge_hw_ring_rxd_next_completed);
1169  *
1170  * - and recycled again	(vxge_hw_ring_rxd_free).
1171  *
1172  * For alternative state transitions and more details please refer to
1173  * the design doc.
1174  *
1175  */
vxge_hw_ring_rxd_free(struct __vxge_hw_ring * ring,void * rxdh)1176 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1177 {
1178 	struct __vxge_hw_channel *channel;
1179 
1180 	channel = &ring->channel;
1181 
1182 	vxge_hw_channel_dtr_free(channel, rxdh);
1183 
1184 }
1185 
1186 /**
1187  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1188  * @ring: Handle to the ring object used for receive
1189  * @rxdh: Descriptor handle.
1190  *
1191  * This routine prepares a rxd and posts
1192  */
vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring * ring,void * rxdh)1193 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1194 {
1195 	struct __vxge_hw_channel *channel;
1196 
1197 	channel = &ring->channel;
1198 
1199 	vxge_hw_channel_dtr_post(channel, rxdh);
1200 }
1201 
1202 /**
1203  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1204  * @ring: Handle to the ring object used for receive
1205  * @rxdh: Descriptor handle.
1206  *
1207  * Processes rxd after post
1208  */
vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring * ring,void * rxdh)1209 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1210 {
1211 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1212 
1213 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1214 
1215 	if (ring->stats->common_stats.usage_cnt > 0)
1216 		ring->stats->common_stats.usage_cnt--;
1217 }
1218 
1219 /**
1220  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1221  * @ring: Handle to the ring object used for receive
1222  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1223  *
1224  * Post	descriptor on the ring.
1225  * Prior to posting the	descriptor should be filled in accordance with
1226  * Host/Titan interface specification for a given service (LL, etc.).
1227  *
1228  */
vxge_hw_ring_rxd_post(struct __vxge_hw_ring * ring,void * rxdh)1229 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1230 {
1231 	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1232 	struct __vxge_hw_channel *channel;
1233 
1234 	channel = &ring->channel;
1235 
1236 	wmb();
1237 	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1238 
1239 	vxge_hw_channel_dtr_post(channel, rxdh);
1240 
1241 	if (ring->stats->common_stats.usage_cnt > 0)
1242 		ring->stats->common_stats.usage_cnt--;
1243 }
1244 
1245 /**
1246  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1247  * @ring: Handle to the ring object used for receive
1248  * @rxdh: Descriptor handle.
1249  *
1250  * Processes rxd after post with memory barrier.
1251  */
vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring * ring,void * rxdh)1252 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1253 {
1254 	wmb();
1255 	vxge_hw_ring_rxd_post_post(ring, rxdh);
1256 }
1257 
1258 /**
1259  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1260  * @ring: Handle to the ring object used for receive
1261  * @rxdh: Descriptor handle. Returned by HW.
1262  * @t_code:	Transfer code, as per Titan User Guide,
1263  *	 Receive Descriptor Format. Returned by HW.
1264  *
1265  * Retrieve the	_next_ completed descriptor.
1266  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1267  * driver of new completed descriptors. After that
1268  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1269  * completions (the very first completion is passed by HW via
1270  * vxge_hw_ring_callback_f).
1271  *
1272  * Implementation-wise, the driver is free to call
1273  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1274  * ring callback, or in a deferred fashion and separate (from HW)
1275  * context.
1276  *
1277  * Non-zero @t_code means failure to fill-in receive buffer(s)
1278  * of the descriptor.
1279  * For instance, parity	error detected during the data transfer.
1280  * In this case	Titan will complete the descriptor and indicate
1281  * for the host	that the received data is not to be used.
1282  * For details please refer to Titan User Guide.
1283  *
1284  * Returns: VXGE_HW_OK - success.
1285  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1286  * are currently available for processing.
1287  *
1288  * See also: vxge_hw_ring_callback_f{},
1289  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1290  */
vxge_hw_ring_rxd_next_completed(struct __vxge_hw_ring * ring,void ** rxdh,u8 * t_code)1291 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1292 	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1293 {
1294 	struct __vxge_hw_channel *channel;
1295 	struct vxge_hw_ring_rxd_1 *rxdp;
1296 	enum vxge_hw_status status = VXGE_HW_OK;
1297 	u64 control_0, own;
1298 
1299 	channel = &ring->channel;
1300 
1301 	vxge_hw_channel_dtr_try_complete(channel, rxdh);
1302 
1303 	rxdp = *rxdh;
1304 	if (rxdp == NULL) {
1305 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1306 		goto exit;
1307 	}
1308 
1309 	control_0 = rxdp->control_0;
1310 	own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1311 	*t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1312 
1313 	/* check whether it is not the end */
1314 	if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1315 
1316 		vxge_assert((rxdp)->host_control !=
1317 				0);
1318 
1319 		++ring->cmpl_cnt;
1320 		vxge_hw_channel_dtr_complete(channel);
1321 
1322 		vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1323 
1324 		ring->stats->common_stats.usage_cnt++;
1325 		if (ring->stats->common_stats.usage_max <
1326 				ring->stats->common_stats.usage_cnt)
1327 			ring->stats->common_stats.usage_max =
1328 				ring->stats->common_stats.usage_cnt;
1329 
1330 		status = VXGE_HW_OK;
1331 		goto exit;
1332 	}
1333 
1334 	/* reset it. since we don't want to return
1335 	 * garbage to the driver */
1336 	*rxdh =	NULL;
1337 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1338 exit:
1339 	return status;
1340 }
1341 
1342 /**
1343  * vxge_hw_ring_handle_tcode - Handle transfer code.
1344  * @ring: Handle to the ring object used for receive
1345  * @rxdh: Descriptor handle.
1346  * @t_code: One of the enumerated (and documented in the Titan user guide)
1347  * "transfer codes".
1348  *
1349  * Handle descriptor's transfer code. The latter comes with each completed
1350  * descriptor.
1351  *
1352  * Returns: one of the enum vxge_hw_status{} enumerated types.
1353  * VXGE_HW_OK			- for success.
1354  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1355  */
vxge_hw_ring_handle_tcode(struct __vxge_hw_ring * ring,void * rxdh,u8 t_code)1356 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1357 	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1358 {
1359 	enum vxge_hw_status status = VXGE_HW_OK;
1360 
1361 	/* If the t_code is not supported and if the
1362 	 * t_code is other than 0x5 (unparseable packet
1363 	 * such as unknown UPV6 header), Drop it !!!
1364 	 */
1365 
1366 	if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1367 		t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1368 		status = VXGE_HW_OK;
1369 		goto exit;
1370 	}
1371 
1372 	if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1373 		status = VXGE_HW_ERR_INVALID_TCODE;
1374 		goto exit;
1375 	}
1376 
1377 	ring->stats->rxd_t_code_err_cnt[t_code]++;
1378 exit:
1379 	return status;
1380 }
1381 
1382 /**
1383  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1384  *
1385  * @fifo: fifohandle
1386  * @txdl_ptr: The starting location of the TxDL in host memory
1387  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1388  * @no_snoop: No snoop flags
1389  *
1390  * This function posts a non-offload doorbell to doorbell FIFO
1391  *
1392  */
__vxge_hw_non_offload_db_post(struct __vxge_hw_fifo * fifo,u64 txdl_ptr,u32 num_txds,u32 no_snoop)1393 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1394 	u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1395 {
1396 	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1397 		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1398 		VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1399 		&fifo->nofl_db->control_0);
1400 
1401 	mmiowb();
1402 
1403 	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1404 
1405 	mmiowb();
1406 }
1407 
1408 /**
1409  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1410  * the fifo
1411  * @fifoh: Handle to the fifo object used for non offload send
1412  */
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo * fifoh)1413 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1414 {
1415 	return vxge_hw_channel_dtr_count(&fifoh->channel);
1416 }
1417 
1418 /**
1419  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1420  * @fifoh: Handle to the fifo object used for non offload send
1421  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1422  *        with a valid handle.
1423  * @txdl_priv: Buffer to return the pointer to per txdl space
1424  *
1425  * Reserve a single TxDL (that is, fifo descriptor)
1426  * for the subsequent filling-in by driver)
1427  * and posting on the corresponding channel (@channelh)
1428  * via vxge_hw_fifo_txdl_post().
1429  *
1430  * Note: it is the responsibility of driver to reserve multiple descriptors
1431  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1432  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1433  *
1434  * Returns: VXGE_HW_OK - success;
1435  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1436  *
1437  */
vxge_hw_fifo_txdl_reserve(struct __vxge_hw_fifo * fifo,void ** txdlh,void ** txdl_priv)1438 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1439 	struct __vxge_hw_fifo *fifo,
1440 	void **txdlh, void **txdl_priv)
1441 {
1442 	struct __vxge_hw_channel *channel;
1443 	enum vxge_hw_status status;
1444 	int i;
1445 
1446 	channel = &fifo->channel;
1447 
1448 	status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1449 
1450 	if (status == VXGE_HW_OK) {
1451 		struct vxge_hw_fifo_txd *txdp =
1452 			(struct vxge_hw_fifo_txd *)*txdlh;
1453 		struct __vxge_hw_fifo_txdl_priv *priv;
1454 
1455 		priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1456 
1457 		/* reset the TxDL's private */
1458 		priv->align_dma_offset = 0;
1459 		priv->align_vaddr_start = priv->align_vaddr;
1460 		priv->align_used_frags = 0;
1461 		priv->frags = 0;
1462 		priv->alloc_frags = fifo->config->max_frags;
1463 		priv->next_txdl_priv = NULL;
1464 
1465 		*txdl_priv = (void *)(size_t)txdp->host_control;
1466 
1467 		for (i = 0; i < fifo->config->max_frags; i++) {
1468 			txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1469 			txdp->control_0 = txdp->control_1 = 0;
1470 		}
1471 	}
1472 
1473 	return status;
1474 }
1475 
1476 /**
1477  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1478  * descriptor.
1479  * @fifo: Handle to the fifo object used for non offload send
1480  * @txdlh: Descriptor handle.
1481  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1482  *            (of buffers).
1483  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1484  * @size: Size of the data buffer (in bytes).
1485  *
1486  * This API is part of the preparation of the transmit descriptor for posting
1487  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1488  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1489  * All three APIs fill in the fields of the fifo descriptor,
1490  * in accordance with the Titan specification.
1491  *
1492  */
vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo * fifo,void * txdlh,u32 frag_idx,dma_addr_t dma_pointer,u32 size)1493 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1494 				  void *txdlh, u32 frag_idx,
1495 				  dma_addr_t dma_pointer, u32 size)
1496 {
1497 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1498 	struct vxge_hw_fifo_txd *txdp, *txdp_last;
1499 
1500 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1501 	txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1502 
1503 	if (frag_idx != 0)
1504 		txdp->control_0 = txdp->control_1 = 0;
1505 	else {
1506 		txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1507 			VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1508 		txdp->control_1 |= fifo->interrupt_type;
1509 		txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1510 			fifo->tx_intr_num);
1511 		if (txdl_priv->frags) {
1512 			txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1513 			(txdl_priv->frags - 1);
1514 			txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1515 				VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1516 		}
1517 	}
1518 
1519 	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1520 
1521 	txdp->buffer_pointer = (u64)dma_pointer;
1522 	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1523 	fifo->stats->total_buffers++;
1524 	txdl_priv->frags++;
1525 }
1526 
1527 /**
1528  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1529  * @fifo: Handle to the fifo object used for non offload send
1530  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1531  * @frags: Number of contiguous buffers that are part of a single
1532  *         transmit operation.
1533  *
1534  * Post descriptor on the 'fifo' type channel for transmission.
1535  * Prior to posting the descriptor should be filled in accordance with
1536  * Host/Titan interface specification for a given service (LL, etc.).
1537  *
1538  */
vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo * fifo,void * txdlh)1539 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1540 {
1541 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1542 	struct vxge_hw_fifo_txd *txdp_last;
1543 	struct vxge_hw_fifo_txd *txdp_first;
1544 
1545 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1546 	txdp_first = txdlh;
1547 
1548 	txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1549 	txdp_last->control_0 |=
1550 	      VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1551 	txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1552 
1553 	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1554 
1555 	__vxge_hw_non_offload_db_post(fifo,
1556 		(u64)txdl_priv->dma_addr,
1557 		txdl_priv->frags - 1,
1558 		fifo->no_snoop_bits);
1559 
1560 	fifo->stats->total_posts++;
1561 	fifo->stats->common_stats.usage_cnt++;
1562 	if (fifo->stats->common_stats.usage_max <
1563 		fifo->stats->common_stats.usage_cnt)
1564 		fifo->stats->common_stats.usage_max =
1565 			fifo->stats->common_stats.usage_cnt;
1566 }
1567 
1568 /**
1569  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1570  * @fifo: Handle to the fifo object used for non offload send
1571  * @txdlh: Descriptor handle. Returned by HW.
1572  * @t_code: Transfer code, as per Titan User Guide,
1573  *          Transmit Descriptor Format.
1574  *          Returned by HW.
1575  *
1576  * Retrieve the _next_ completed descriptor.
1577  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1578  * driver of new completed descriptors. After that
1579  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1580  * completions (the very first completion is passed by HW via
1581  * vxge_hw_channel_callback_f).
1582  *
1583  * Implementation-wise, the driver is free to call
1584  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1585  * channel callback, or in a deferred fashion and separate (from HW)
1586  * context.
1587  *
1588  * Non-zero @t_code means failure to process the descriptor.
1589  * The failure could happen, for instance, when the link is
1590  * down, in which case Titan completes the descriptor because it
1591  * is not able to send the data out.
1592  *
1593  * For details please refer to Titan User Guide.
1594  *
1595  * Returns: VXGE_HW_OK - success.
1596  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1597  * are currently available for processing.
1598  *
1599  */
vxge_hw_fifo_txdl_next_completed(struct __vxge_hw_fifo * fifo,void ** txdlh,enum vxge_hw_fifo_tcode * t_code)1600 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1601 	struct __vxge_hw_fifo *fifo, void **txdlh,
1602 	enum vxge_hw_fifo_tcode *t_code)
1603 {
1604 	struct __vxge_hw_channel *channel;
1605 	struct vxge_hw_fifo_txd *txdp;
1606 	enum vxge_hw_status status = VXGE_HW_OK;
1607 
1608 	channel = &fifo->channel;
1609 
1610 	vxge_hw_channel_dtr_try_complete(channel, txdlh);
1611 
1612 	txdp = *txdlh;
1613 	if (txdp == NULL) {
1614 		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1615 		goto exit;
1616 	}
1617 
1618 	/* check whether host owns it */
1619 	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1620 
1621 		vxge_assert(txdp->host_control != 0);
1622 
1623 		vxge_hw_channel_dtr_complete(channel);
1624 
1625 		*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1626 
1627 		if (fifo->stats->common_stats.usage_cnt > 0)
1628 			fifo->stats->common_stats.usage_cnt--;
1629 
1630 		status = VXGE_HW_OK;
1631 		goto exit;
1632 	}
1633 
1634 	/* no more completions */
1635 	*txdlh = NULL;
1636 	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1637 exit:
1638 	return status;
1639 }
1640 
1641 /**
1642  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1643  * @fifo: Handle to the fifo object used for non offload send
1644  * @txdlh: Descriptor handle.
1645  * @t_code: One of the enumerated (and documented in the Titan user guide)
1646  *          "transfer codes".
1647  *
1648  * Handle descriptor's transfer code. The latter comes with each completed
1649  * descriptor.
1650  *
1651  * Returns: one of the enum vxge_hw_status{} enumerated types.
1652  * VXGE_HW_OK - for success.
1653  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1654  */
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo * fifo,void * txdlh,enum vxge_hw_fifo_tcode t_code)1655 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1656 					      void *txdlh,
1657 					      enum vxge_hw_fifo_tcode t_code)
1658 {
1659 	enum vxge_hw_status status = VXGE_HW_OK;
1660 
1661 	if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1662 		status = VXGE_HW_ERR_INVALID_TCODE;
1663 		goto exit;
1664 	}
1665 
1666 	fifo->stats->txd_t_code_err_cnt[t_code]++;
1667 exit:
1668 	return status;
1669 }
1670 
1671 /**
1672  * vxge_hw_fifo_txdl_free - Free descriptor.
1673  * @fifo: Handle to the fifo object used for non offload send
1674  * @txdlh: Descriptor handle.
1675  *
1676  * Free the reserved descriptor. This operation is "symmetrical" to
1677  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1678  * lifecycle.
1679  *
1680  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1681  * be:
1682  *
1683  * - reserved (vxge_hw_fifo_txdl_reserve);
1684  *
1685  * - posted (vxge_hw_fifo_txdl_post);
1686  *
1687  * - completed (vxge_hw_fifo_txdl_next_completed);
1688  *
1689  * - and recycled again (vxge_hw_fifo_txdl_free).
1690  *
1691  * For alternative state transitions and more details please refer to
1692  * the design doc.
1693  *
1694  */
vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo * fifo,void * txdlh)1695 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1696 {
1697 	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1698 	u32 max_frags;
1699 	struct __vxge_hw_channel *channel;
1700 
1701 	channel = &fifo->channel;
1702 
1703 	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1704 			(struct vxge_hw_fifo_txd *)txdlh);
1705 
1706 	max_frags = fifo->config->max_frags;
1707 
1708 	vxge_hw_channel_dtr_free(channel, txdlh);
1709 }
1710 
1711 /**
1712  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1713  *               to MAC address table.
1714  * @vp: Vpath handle.
1715  * @macaddr: MAC address to be added for this vpath into the list
1716  * @macaddr_mask: MAC address mask for macaddr
1717  * @duplicate_mode: Duplicate MAC address add mode. Please see
1718  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1719  *
1720  * Adds the given mac address and mac address mask into the list for this
1721  * vpath.
1722  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1723  * vxge_hw_vpath_mac_addr_get_next
1724  *
1725  */
1726 enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN],enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)1727 vxge_hw_vpath_mac_addr_add(
1728 	struct __vxge_hw_vpath_handle *vp,
1729 	u8 (macaddr)[ETH_ALEN],
1730 	u8 (macaddr_mask)[ETH_ALEN],
1731 	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1732 {
1733 	u32 i;
1734 	u64 data1 = 0ULL;
1735 	u64 data2 = 0ULL;
1736 	enum vxge_hw_status status = VXGE_HW_OK;
1737 
1738 	if (vp == NULL) {
1739 		status = VXGE_HW_ERR_INVALID_HANDLE;
1740 		goto exit;
1741 	}
1742 
1743 	for (i = 0; i < ETH_ALEN; i++) {
1744 		data1 <<= 8;
1745 		data1 |= (u8)macaddr[i];
1746 
1747 		data2 <<= 8;
1748 		data2 |= (u8)macaddr_mask[i];
1749 	}
1750 
1751 	switch (duplicate_mode) {
1752 	case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1753 		i = 0;
1754 		break;
1755 	case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1756 		i = 1;
1757 		break;
1758 	case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1759 		i = 2;
1760 		break;
1761 	default:
1762 		i = 0;
1763 		break;
1764 	}
1765 
1766 	status = __vxge_hw_vpath_rts_table_set(vp,
1767 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1768 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1769 			0,
1770 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1771 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1772 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1773 exit:
1774 	return status;
1775 }
1776 
1777 /**
1778  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1779  *               from MAC address table.
1780  * @vp: Vpath handle.
1781  * @macaddr: First MAC address entry for this vpath in the list
1782  * @macaddr_mask: MAC address mask for macaddr
1783  *
1784  * Returns the first mac address and mac address mask in the list for this
1785  * vpath.
1786  * see also: vxge_hw_vpath_mac_addr_get_next
1787  *
1788  */
1789 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1790 vxge_hw_vpath_mac_addr_get(
1791 	struct __vxge_hw_vpath_handle *vp,
1792 	u8 (macaddr)[ETH_ALEN],
1793 	u8 (macaddr_mask)[ETH_ALEN])
1794 {
1795 	u32 i;
1796 	u64 data1 = 0ULL;
1797 	u64 data2 = 0ULL;
1798 	enum vxge_hw_status status = VXGE_HW_OK;
1799 
1800 	if (vp == NULL) {
1801 		status = VXGE_HW_ERR_INVALID_HANDLE;
1802 		goto exit;
1803 	}
1804 
1805 	status = __vxge_hw_vpath_rts_table_get(vp,
1806 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1807 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1808 			0, &data1, &data2);
1809 
1810 	if (status != VXGE_HW_OK)
1811 		goto exit;
1812 
1813 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1814 
1815 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1816 
1817 	for (i = ETH_ALEN; i > 0; i--) {
1818 		macaddr[i-1] = (u8)(data1 & 0xFF);
1819 		data1 >>= 8;
1820 
1821 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1822 		data2 >>= 8;
1823 	}
1824 exit:
1825 	return status;
1826 }
1827 
1828 /**
1829  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1830  * vpath
1831  *               from MAC address table.
1832  * @vp: Vpath handle.
1833  * @macaddr: Next MAC address entry for this vpath in the list
1834  * @macaddr_mask: MAC address mask for macaddr
1835  *
1836  * Returns the next mac address and mac address mask in the list for this
1837  * vpath.
1838  * see also: vxge_hw_vpath_mac_addr_get
1839  *
1840  */
1841 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1842 vxge_hw_vpath_mac_addr_get_next(
1843 	struct __vxge_hw_vpath_handle *vp,
1844 	u8 (macaddr)[ETH_ALEN],
1845 	u8 (macaddr_mask)[ETH_ALEN])
1846 {
1847 	u32 i;
1848 	u64 data1 = 0ULL;
1849 	u64 data2 = 0ULL;
1850 	enum vxge_hw_status status = VXGE_HW_OK;
1851 
1852 	if (vp == NULL) {
1853 		status = VXGE_HW_ERR_INVALID_HANDLE;
1854 		goto exit;
1855 	}
1856 
1857 	status = __vxge_hw_vpath_rts_table_get(vp,
1858 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1859 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1860 			0, &data1, &data2);
1861 
1862 	if (status != VXGE_HW_OK)
1863 		goto exit;
1864 
1865 	data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1866 
1867 	data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1868 
1869 	for (i = ETH_ALEN; i > 0; i--) {
1870 		macaddr[i-1] = (u8)(data1 & 0xFF);
1871 		data1 >>= 8;
1872 
1873 		macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1874 		data2 >>= 8;
1875 	}
1876 
1877 exit:
1878 	return status;
1879 }
1880 
1881 /**
1882  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1883  *               to MAC address table.
1884  * @vp: Vpath handle.
1885  * @macaddr: MAC address to be added for this vpath into the list
1886  * @macaddr_mask: MAC address mask for macaddr
1887  *
1888  * Delete the given mac address and mac address mask into the list for this
1889  * vpath.
1890  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1891  * vxge_hw_vpath_mac_addr_get_next
1892  *
1893  */
1894 enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1895 vxge_hw_vpath_mac_addr_delete(
1896 	struct __vxge_hw_vpath_handle *vp,
1897 	u8 (macaddr)[ETH_ALEN],
1898 	u8 (macaddr_mask)[ETH_ALEN])
1899 {
1900 	u32 i;
1901 	u64 data1 = 0ULL;
1902 	u64 data2 = 0ULL;
1903 	enum vxge_hw_status status = VXGE_HW_OK;
1904 
1905 	if (vp == NULL) {
1906 		status = VXGE_HW_ERR_INVALID_HANDLE;
1907 		goto exit;
1908 	}
1909 
1910 	for (i = 0; i < ETH_ALEN; i++) {
1911 		data1 <<= 8;
1912 		data1 |= (u8)macaddr[i];
1913 
1914 		data2 <<= 8;
1915 		data2 |= (u8)macaddr_mask[i];
1916 	}
1917 
1918 	status = __vxge_hw_vpath_rts_table_set(vp,
1919 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1920 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1921 			0,
1922 			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1923 			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1924 exit:
1925 	return status;
1926 }
1927 
1928 /**
1929  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1930  *               to vlan id table.
1931  * @vp: Vpath handle.
1932  * @vid: vlan id to be added for this vpath into the list
1933  *
1934  * Adds the given vlan id into the list for this  vpath.
1935  * see also: vxge_hw_vpath_vid_delete
1936  *
1937  */
1938 enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle * vp,u64 vid)1939 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1940 {
1941 	enum vxge_hw_status status = VXGE_HW_OK;
1942 
1943 	if (vp == NULL) {
1944 		status = VXGE_HW_ERR_INVALID_HANDLE;
1945 		goto exit;
1946 	}
1947 
1948 	status = __vxge_hw_vpath_rts_table_set(vp,
1949 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1950 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1951 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1952 exit:
1953 	return status;
1954 }
1955 
1956 /**
1957  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1958  *               to vlan id table.
1959  * @vp: Vpath handle.
1960  * @vid: vlan id to be added for this vpath into the list
1961  *
1962  * Adds the given vlan id into the list for this  vpath.
1963  * see also: vxge_hw_vpath_vid_add
1964  *
1965  */
1966 enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle * vp,u64 vid)1967 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1968 {
1969 	enum vxge_hw_status status = VXGE_HW_OK;
1970 
1971 	if (vp == NULL) {
1972 		status = VXGE_HW_ERR_INVALID_HANDLE;
1973 		goto exit;
1974 	}
1975 
1976 	status = __vxge_hw_vpath_rts_table_set(vp,
1977 			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1978 			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1979 			0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1980 exit:
1981 	return status;
1982 }
1983 
1984 /**
1985  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1986  * @vp: Vpath handle.
1987  *
1988  * Enable promiscuous mode of Titan-e operation.
1989  *
1990  * See also: vxge_hw_vpath_promisc_disable().
1991  */
vxge_hw_vpath_promisc_enable(struct __vxge_hw_vpath_handle * vp)1992 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1993 			struct __vxge_hw_vpath_handle *vp)
1994 {
1995 	u64 val64;
1996 	struct __vxge_hw_virtualpath *vpath;
1997 	enum vxge_hw_status status = VXGE_HW_OK;
1998 
1999 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2000 		status = VXGE_HW_ERR_INVALID_HANDLE;
2001 		goto exit;
2002 	}
2003 
2004 	vpath = vp->vpath;
2005 
2006 	/* Enable promiscuous mode for function 0 only */
2007 	if (!(vpath->hldev->access_rights &
2008 		VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2009 		return VXGE_HW_OK;
2010 
2011 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2012 
2013 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2014 
2015 		val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2016 			 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2017 			 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2018 			 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2019 
2020 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2021 	}
2022 exit:
2023 	return status;
2024 }
2025 
2026 /**
2027  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2028  * @vp: Vpath handle.
2029  *
2030  * Disable promiscuous mode of Titan-e operation.
2031  *
2032  * See also: vxge_hw_vpath_promisc_enable().
2033  */
vxge_hw_vpath_promisc_disable(struct __vxge_hw_vpath_handle * vp)2034 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2035 			struct __vxge_hw_vpath_handle *vp)
2036 {
2037 	u64 val64;
2038 	struct __vxge_hw_virtualpath *vpath;
2039 	enum vxge_hw_status status = VXGE_HW_OK;
2040 
2041 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2042 		status = VXGE_HW_ERR_INVALID_HANDLE;
2043 		goto exit;
2044 	}
2045 
2046 	vpath = vp->vpath;
2047 
2048 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2049 
2050 	if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2051 
2052 		val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2053 			   VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2054 			   VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2055 
2056 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2057 	}
2058 exit:
2059 	return status;
2060 }
2061 
2062 /*
2063  * vxge_hw_vpath_bcast_enable - Enable broadcast
2064  * @vp: Vpath handle.
2065  *
2066  * Enable receiving broadcasts.
2067  */
vxge_hw_vpath_bcast_enable(struct __vxge_hw_vpath_handle * vp)2068 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2069 			struct __vxge_hw_vpath_handle *vp)
2070 {
2071 	u64 val64;
2072 	struct __vxge_hw_virtualpath *vpath;
2073 	enum vxge_hw_status status = VXGE_HW_OK;
2074 
2075 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2076 		status = VXGE_HW_ERR_INVALID_HANDLE;
2077 		goto exit;
2078 	}
2079 
2080 	vpath = vp->vpath;
2081 
2082 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2083 
2084 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2085 		val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2086 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2087 	}
2088 exit:
2089 	return status;
2090 }
2091 
2092 /**
2093  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2094  * @vp: Vpath handle.
2095  *
2096  * Enable Titan-e multicast addresses.
2097  * Returns: VXGE_HW_OK on success.
2098  *
2099  */
vxge_hw_vpath_mcast_enable(struct __vxge_hw_vpath_handle * vp)2100 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2101 			struct __vxge_hw_vpath_handle *vp)
2102 {
2103 	u64 val64;
2104 	struct __vxge_hw_virtualpath *vpath;
2105 	enum vxge_hw_status status = VXGE_HW_OK;
2106 
2107 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2108 		status = VXGE_HW_ERR_INVALID_HANDLE;
2109 		goto exit;
2110 	}
2111 
2112 	vpath = vp->vpath;
2113 
2114 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2115 
2116 	if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2117 		val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2118 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2119 	}
2120 exit:
2121 	return status;
2122 }
2123 
2124 /**
2125  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2126  * @vp: Vpath handle.
2127  *
2128  * Disable Titan-e multicast addresses.
2129  * Returns: VXGE_HW_OK - success.
2130  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2131  *
2132  */
2133 enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle * vp)2134 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2135 {
2136 	u64 val64;
2137 	struct __vxge_hw_virtualpath *vpath;
2138 	enum vxge_hw_status status = VXGE_HW_OK;
2139 
2140 	if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2141 		status = VXGE_HW_ERR_INVALID_HANDLE;
2142 		goto exit;
2143 	}
2144 
2145 	vpath = vp->vpath;
2146 
2147 	val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2148 
2149 	if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2150 		val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2151 		writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2152 	}
2153 exit:
2154 	return status;
2155 }
2156 
2157 /*
2158  * vxge_hw_vpath_alarm_process - Process Alarms.
2159  * @vpath: Virtual Path.
2160  * @skip_alarms: Do not clear the alarms
2161  *
2162  * Process vpath alarms.
2163  *
2164  */
vxge_hw_vpath_alarm_process(struct __vxge_hw_vpath_handle * vp,u32 skip_alarms)2165 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2166 			struct __vxge_hw_vpath_handle *vp,
2167 			u32 skip_alarms)
2168 {
2169 	enum vxge_hw_status status = VXGE_HW_OK;
2170 
2171 	if (vp == NULL) {
2172 		status = VXGE_HW_ERR_INVALID_HANDLE;
2173 		goto exit;
2174 	}
2175 
2176 	status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2177 exit:
2178 	return status;
2179 }
2180 
2181 /**
2182  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2183  *                            alrms
2184  * @vp: Virtual Path handle.
2185  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2186  *             interrupts(Can be repeated). If fifo or ring are not enabled
2187  *             the MSIX vector for that should be set to 0
2188  * @alarm_msix_id: MSIX vector for alarm.
2189  *
2190  * This API will associate a given MSIX vector numbers with the four TIM
2191  * interrupts and alarm interrupt.
2192  */
2193 void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle * vp,int * tim_msix_id,int alarm_msix_id)2194 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2195 		       int alarm_msix_id)
2196 {
2197 	u64 val64;
2198 	struct __vxge_hw_virtualpath *vpath = vp->vpath;
2199 	struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2200 	u32 vp_id = vp->vpath->vp_id;
2201 
2202 	val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2203 		  (vp_id * 4) + tim_msix_id[0]) |
2204 		 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2205 		  (vp_id * 4) + tim_msix_id[1]);
2206 
2207 	writeq(val64, &vp_reg->interrupt_cfg0);
2208 
2209 	writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2210 			(vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2211 			&vp_reg->interrupt_cfg2);
2212 
2213 	if (vpath->hldev->config.intr_mode ==
2214 					VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2215 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2216 				VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2217 				0, 32), &vp_reg->one_shot_vect0_en);
2218 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2219 				VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2220 				0, 32), &vp_reg->one_shot_vect1_en);
2221 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2222 				VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2223 				0, 32), &vp_reg->one_shot_vect2_en);
2224 	}
2225 }
2226 
2227 /**
2228  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2229  * @vp: Virtual Path handle.
2230  * @msix_id:  MSIX ID
2231  *
2232  * The function masks the msix interrupt for the given msix_id
2233  *
2234  * Returns: 0,
2235  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2236  * status.
2237  * See also:
2238  */
2239 void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle * vp,int msix_id)2240 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2241 {
2242 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2243 	__vxge_hw_pio_mem_write32_upper(
2244 		(u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2245 		&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2246 }
2247 
2248 /**
2249  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2250  * @vp: Virtual Path handle.
2251  * @msix_id:  MSI ID
2252  *
2253  * The function clears the msix interrupt for the given msix_id
2254  *
2255  * Returns: 0,
2256  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2257  * status.
2258  * See also:
2259  */
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle * vp,int msix_id)2260 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2261 {
2262 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2263 
2264 	if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2265 		__vxge_hw_pio_mem_write32_upper(
2266 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2267 			&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2268 	else
2269 		__vxge_hw_pio_mem_write32_upper(
2270 			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2271 			&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2272 }
2273 
2274 /**
2275  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2276  * @vp: Virtual Path handle.
2277  * @msix_id:  MSI ID
2278  *
2279  * The function unmasks the msix interrupt for the given msix_id
2280  *
2281  * Returns: 0,
2282  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2283  * status.
2284  * See also:
2285  */
2286 void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle * vp,int msix_id)2287 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2288 {
2289 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2290 	__vxge_hw_pio_mem_write32_upper(
2291 			(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2292 			&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2293 }
2294 
2295 /**
2296  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2297  * @vp: Virtual Path handle.
2298  *
2299  * Mask Tx and Rx vpath interrupts.
2300  *
2301  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2302  */
vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle * vp)2303 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2304 {
2305 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2306 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2307 	u64	val64;
2308 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309 
2310 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2311 		tim_int_mask1, vp->vpath->vp_id);
2312 
2313 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2314 
2315 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2316 		(tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2317 		writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2318 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2319 			&hldev->common_reg->tim_int_mask0);
2320 	}
2321 
2322 	val64 = readl(&hldev->common_reg->tim_int_mask1);
2323 
2324 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2325 		(tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2326 		__vxge_hw_pio_mem_write32_upper(
2327 			(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2328 			tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2329 			&hldev->common_reg->tim_int_mask1);
2330 	}
2331 }
2332 
2333 /**
2334  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2335  * @vp: Virtual Path handle.
2336  *
2337  * Unmask Tx and Rx vpath interrupts.
2338  *
2339  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2340  */
vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle * vp)2341 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2342 {
2343 	u64	tim_int_mask0[4] = {[0 ...3] = 0};
2344 	u32	tim_int_mask1[4] = {[0 ...3] = 0};
2345 	u64	val64;
2346 	struct __vxge_hw_device *hldev = vp->vpath->hldev;
2347 
2348 	VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2349 		tim_int_mask1, vp->vpath->vp_id);
2350 
2351 	val64 = readq(&hldev->common_reg->tim_int_mask0);
2352 
2353 	if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2354 	   (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2355 		writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2356 			tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2357 			&hldev->common_reg->tim_int_mask0);
2358 	}
2359 
2360 	if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2361 	   (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2362 		__vxge_hw_pio_mem_write32_upper(
2363 			(~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2364 			  tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2365 			&hldev->common_reg->tim_int_mask1);
2366 	}
2367 }
2368 
2369 /**
2370  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2371  * descriptors and process the same.
2372  * @ring: Handle to the ring object used for receive
2373  *
2374  * The function	polls the Rx for the completed	descriptors and	calls
2375  * the driver via supplied completion	callback.
2376  *
2377  * Returns: VXGE_HW_OK, if the polling is completed successful.
2378  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2379  * descriptors available which are yet to be processed.
2380  *
2381  * See also: vxge_hw_vpath_poll_rx()
2382  */
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring * ring)2383 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2384 {
2385 	u8 t_code;
2386 	enum vxge_hw_status status = VXGE_HW_OK;
2387 	void *first_rxdh;
2388 	u64 val64 = 0;
2389 	int new_count = 0;
2390 
2391 	ring->cmpl_cnt = 0;
2392 
2393 	status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2394 	if (status == VXGE_HW_OK)
2395 		ring->callback(ring, first_rxdh,
2396 			t_code, ring->channel.userdata);
2397 
2398 	if (ring->cmpl_cnt != 0) {
2399 		ring->doorbell_cnt += ring->cmpl_cnt;
2400 		if (ring->doorbell_cnt >= ring->rxds_limit) {
2401 			/*
2402 			 * Each RxD is of 4 qwords, update the number of
2403 			 * qwords replenished
2404 			 */
2405 			new_count = (ring->doorbell_cnt * 4);
2406 
2407 			/* For each block add 4 more qwords */
2408 			ring->total_db_cnt += ring->doorbell_cnt;
2409 			if (ring->total_db_cnt >= ring->rxds_per_block) {
2410 				new_count += 4;
2411 				/* Reset total count */
2412 				ring->total_db_cnt %= ring->rxds_per_block;
2413 			}
2414 			writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2415 				&ring->vp_reg->prc_rxd_doorbell);
2416 			val64 =
2417 			  readl(&ring->common_reg->titan_general_int_status);
2418 			ring->doorbell_cnt = 0;
2419 		}
2420 	}
2421 
2422 	return status;
2423 }
2424 
2425 /**
2426  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2427  * the same.
2428  * @fifo: Handle to the fifo object used for non offload send
2429  *
2430  * The function polls the Tx for the completed descriptors and calls
2431  * the driver via supplied completion callback.
2432  *
2433  * Returns: VXGE_HW_OK, if the polling is completed successful.
2434  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2435  * descriptors available which are yet to be processed.
2436  */
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo * fifo,struct sk_buff *** skb_ptr,int nr_skb,int * more)2437 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2438 					struct sk_buff ***skb_ptr, int nr_skb,
2439 					int *more)
2440 {
2441 	enum vxge_hw_fifo_tcode t_code;
2442 	void *first_txdlh;
2443 	enum vxge_hw_status status = VXGE_HW_OK;
2444 	struct __vxge_hw_channel *channel;
2445 
2446 	channel = &fifo->channel;
2447 
2448 	status = vxge_hw_fifo_txdl_next_completed(fifo,
2449 				&first_txdlh, &t_code);
2450 	if (status == VXGE_HW_OK)
2451 		if (fifo->callback(fifo, first_txdlh, t_code,
2452 			channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2453 			status = VXGE_HW_COMPLETIONS_REMAIN;
2454 
2455 	return status;
2456 }
2457