1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/io-64-nonatomic-lo-hi.h>
16 #include <linux/prefetch.h>
17
18 #include "vxge-traffic.h"
19 #include "vxge-config.h"
20 #include "vxge-main.h"
21
22 /*
23 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
24 * @vp: Virtual Path handle.
25 *
26 * Enable vpath interrupts. The function is to be executed the last in
27 * vpath initialization sequence.
28 *
29 * See also: vxge_hw_vpath_intr_disable()
30 */
vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle * vp)31 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32 {
33 u64 val64;
34
35 struct __vxge_hw_virtualpath *vpath;
36 struct vxge_hw_vpath_reg __iomem *vp_reg;
37 enum vxge_hw_status status = VXGE_HW_OK;
38 if (vp == NULL) {
39 status = VXGE_HW_ERR_INVALID_HANDLE;
40 goto exit;
41 }
42
43 vpath = vp->vpath;
44
45 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
46 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
47 goto exit;
48 }
49
50 vp_reg = vpath->vp_reg;
51
52 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
53
54 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55 &vp_reg->general_errors_reg);
56
57 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58 &vp_reg->pci_config_errors_reg);
59
60 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61 &vp_reg->mrpcim_to_vpath_alarm_reg);
62
63 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64 &vp_reg->srpcim_to_vpath_alarm_reg);
65
66 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67 &vp_reg->vpath_ppif_int_status);
68
69 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70 &vp_reg->srpcim_msg_to_vpath_reg);
71
72 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73 &vp_reg->vpath_pcipif_int_status);
74
75 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76 &vp_reg->prc_alarm_reg);
77
78 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79 &vp_reg->wrdma_alarm_status);
80
81 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
82 &vp_reg->asic_ntwk_vp_err_reg);
83
84 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
85 &vp_reg->xgmac_vp_int_status);
86
87 val64 = readq(&vp_reg->vpath_general_int_status);
88
89 /* Mask unwanted interrupts */
90
91 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
92 &vp_reg->vpath_pcipif_int_mask);
93
94 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
95 &vp_reg->srpcim_msg_to_vpath_mask);
96
97 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98 &vp_reg->srpcim_to_vpath_alarm_mask);
99
100 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
101 &vp_reg->mrpcim_to_vpath_alarm_mask);
102
103 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
104 &vp_reg->pci_config_errors_mask);
105
106 /* Unmask the individual interrupts */
107
108 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
111 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
112 &vp_reg->general_errors_mask);
113
114 __vxge_hw_pio_mem_write32_upper(
115 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
120 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
121 &vp_reg->kdfcctl_errors_mask);
122
123 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
124
125 __vxge_hw_pio_mem_write32_upper(
126 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
127 &vp_reg->prc_alarm_mask);
128
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
130 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
131
132 if (vpath->hldev->first_vp_id != vpath->vp_id)
133 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
134 &vp_reg->asic_ntwk_vp_err_mask);
135 else
136 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
138 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
139 &vp_reg->asic_ntwk_vp_err_mask);
140
141 __vxge_hw_pio_mem_write32_upper(0,
142 &vp_reg->vpath_general_int_mask);
143 exit:
144 return status;
145
146 }
147
148 /*
149 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
150 * @vp: Virtual Path handle.
151 *
152 * Disable vpath interrupts. The function is to be executed the last in
153 * vpath initialization sequence.
154 *
155 * See also: vxge_hw_vpath_intr_enable()
156 */
vxge_hw_vpath_intr_disable(struct __vxge_hw_vpath_handle * vp)157 enum vxge_hw_status vxge_hw_vpath_intr_disable(
158 struct __vxge_hw_vpath_handle *vp)
159 {
160 u64 val64;
161
162 struct __vxge_hw_virtualpath *vpath;
163 enum vxge_hw_status status = VXGE_HW_OK;
164 struct vxge_hw_vpath_reg __iomem *vp_reg;
165 if (vp == NULL) {
166 status = VXGE_HW_ERR_INVALID_HANDLE;
167 goto exit;
168 }
169
170 vpath = vp->vpath;
171
172 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
173 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
174 goto exit;
175 }
176 vp_reg = vpath->vp_reg;
177
178 __vxge_hw_pio_mem_write32_upper(
179 (u32)VXGE_HW_INTR_MASK_ALL,
180 &vp_reg->vpath_general_int_mask);
181
182 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
183
184 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
185
186 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 &vp_reg->general_errors_mask);
188
189 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 &vp_reg->pci_config_errors_mask);
191
192 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 &vp_reg->mrpcim_to_vpath_alarm_mask);
194
195 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 &vp_reg->srpcim_to_vpath_alarm_mask);
197
198 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199 &vp_reg->vpath_ppif_int_mask);
200
201 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202 &vp_reg->srpcim_msg_to_vpath_mask);
203
204 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205 &vp_reg->vpath_pcipif_int_mask);
206
207 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208 &vp_reg->wrdma_alarm_mask);
209
210 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211 &vp_reg->prc_alarm_mask);
212
213 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
214 &vp_reg->xgmac_vp_int_mask);
215
216 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
217 &vp_reg->asic_ntwk_vp_err_mask);
218
219 exit:
220 return status;
221 }
222
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo * fifo)223 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
224 {
225 struct vxge_hw_vpath_reg __iomem *vp_reg;
226 struct vxge_hw_vp_config *config;
227 u64 val64;
228
229 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
230 return;
231
232 vp_reg = fifo->vp_reg;
233 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
234
235 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
236 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
237 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
238 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
239 fifo->tim_tti_cfg1_saved = val64;
240 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
241 }
242 }
243
vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring * ring)244 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
245 {
246 u64 val64 = ring->tim_rti_cfg1_saved;
247
248 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
249 ring->tim_rti_cfg1_saved = val64;
250 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
251 }
252
vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo * fifo)253 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
254 {
255 u64 val64 = fifo->tim_tti_cfg3_saved;
256 u64 timer = (fifo->rtimer * 1000) / 272;
257
258 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
259 if (timer)
260 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
261 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
262
263 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
264 /* tti_cfg3_saved is not updated again because it is
265 * initialized at one place only - init time.
266 */
267 }
268
vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring * ring)269 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
270 {
271 u64 val64 = ring->tim_rti_cfg3_saved;
272 u64 timer = (ring->rtimer * 1000) / 272;
273
274 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
275 if (timer)
276 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
277 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
278
279 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
280 /* rti_cfg3_saved is not updated again because it is
281 * initialized at one place only - init time.
282 */
283 }
284
285 /**
286 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
287 * @channeh: Channel for rx or tx handle
288 * @msix_id: MSIX ID
289 *
290 * The function masks the msix interrupt for the given msix_id
291 *
292 * Returns: 0
293 */
vxge_hw_channel_msix_mask(struct __vxge_hw_channel * channel,int msix_id)294 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
295 {
296
297 __vxge_hw_pio_mem_write32_upper(
298 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
299 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
300 }
301
302 /**
303 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
304 * @channeh: Channel for rx or tx handle
305 * @msix_id: MSI ID
306 *
307 * The function unmasks the msix interrupt for the given msix_id
308 *
309 * Returns: 0
310 */
311 void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel * channel,int msix_id)312 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
313 {
314
315 __vxge_hw_pio_mem_write32_upper(
316 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
317 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
318 }
319
320 /**
321 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
322 * @channel: Channel for rx or tx handle
323 * @msix_id: MSI ID
324 *
325 * The function unmasks the msix interrupt for the given msix_id
326 * if configured in MSIX oneshot mode
327 *
328 * Returns: 0
329 */
vxge_hw_channel_msix_clear(struct __vxge_hw_channel * channel,int msix_id)330 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
331 {
332 __vxge_hw_pio_mem_write32_upper(
333 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
334 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
335 }
336
337 /**
338 * vxge_hw_device_set_intr_type - Updates the configuration
339 * with new interrupt type.
340 * @hldev: HW device handle.
341 * @intr_mode: New interrupt type
342 */
vxge_hw_device_set_intr_type(struct __vxge_hw_device * hldev,u32 intr_mode)343 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
344 {
345
346 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
348 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
349 (intr_mode != VXGE_HW_INTR_MODE_DEF))
350 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
351
352 hldev->config.intr_mode = intr_mode;
353 return intr_mode;
354 }
355
356 /**
357 * vxge_hw_device_intr_enable - Enable interrupts.
358 * @hldev: HW device handle.
359 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
360 * the type(s) of interrupts to enable.
361 *
362 * Enable Titan interrupts. The function is to be executed the last in
363 * Titan initialization sequence.
364 *
365 * See also: vxge_hw_device_intr_disable()
366 */
vxge_hw_device_intr_enable(struct __vxge_hw_device * hldev)367 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
368 {
369 u32 i;
370 u64 val64;
371 u32 val32;
372
373 vxge_hw_device_mask_all(hldev);
374
375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
376
377 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
378 continue;
379
380 vxge_hw_vpath_intr_enable(
381 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
382 }
383
384 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
385 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
386 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
387
388 if (val64 != 0) {
389 writeq(val64, &hldev->common_reg->tim_int_status0);
390
391 writeq(~val64, &hldev->common_reg->tim_int_mask0);
392 }
393
394 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
395 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
396
397 if (val32 != 0) {
398 __vxge_hw_pio_mem_write32_upper(val32,
399 &hldev->common_reg->tim_int_status1);
400
401 __vxge_hw_pio_mem_write32_upper(~val32,
402 &hldev->common_reg->tim_int_mask1);
403 }
404 }
405
406 val64 = readq(&hldev->common_reg->titan_general_int_status);
407
408 vxge_hw_device_unmask_all(hldev);
409 }
410
411 /**
412 * vxge_hw_device_intr_disable - Disable Titan interrupts.
413 * @hldev: HW device handle.
414 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
415 * the type(s) of interrupts to disable.
416 *
417 * Disable Titan interrupts.
418 *
419 * See also: vxge_hw_device_intr_enable()
420 */
vxge_hw_device_intr_disable(struct __vxge_hw_device * hldev)421 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
422 {
423 u32 i;
424
425 vxge_hw_device_mask_all(hldev);
426
427 /* mask all the tim interrupts */
428 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
429 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
430 &hldev->common_reg->tim_int_mask1);
431
432 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
433
434 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
435 continue;
436
437 vxge_hw_vpath_intr_disable(
438 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
439 }
440 }
441
442 /**
443 * vxge_hw_device_mask_all - Mask all device interrupts.
444 * @hldev: HW device handle.
445 *
446 * Mask all device interrupts.
447 *
448 * See also: vxge_hw_device_unmask_all()
449 */
vxge_hw_device_mask_all(struct __vxge_hw_device * hldev)450 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
451 {
452 u64 val64;
453
454 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
455 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
456
457 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
458 &hldev->common_reg->titan_mask_all_int);
459 }
460
461 /**
462 * vxge_hw_device_unmask_all - Unmask all device interrupts.
463 * @hldev: HW device handle.
464 *
465 * Unmask all device interrupts.
466 *
467 * See also: vxge_hw_device_mask_all()
468 */
vxge_hw_device_unmask_all(struct __vxge_hw_device * hldev)469 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
470 {
471 u64 val64 = 0;
472
473 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
474 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
475
476 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
477 &hldev->common_reg->titan_mask_all_int);
478 }
479
480 /**
481 * vxge_hw_device_flush_io - Flush io writes.
482 * @hldev: HW device handle.
483 *
484 * The function performs a read operation to flush io writes.
485 *
486 * Returns: void
487 */
vxge_hw_device_flush_io(struct __vxge_hw_device * hldev)488 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
489 {
490 u32 val32;
491
492 val32 = readl(&hldev->common_reg->titan_general_int_status);
493 }
494
495 /**
496 * __vxge_hw_device_handle_error - Handle error
497 * @hldev: HW device
498 * @vp_id: Vpath Id
499 * @type: Error type. Please see enum vxge_hw_event{}
500 *
501 * Handle error.
502 */
503 static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device * hldev,u32 vp_id,enum vxge_hw_event type)504 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
505 enum vxge_hw_event type)
506 {
507 switch (type) {
508 case VXGE_HW_EVENT_UNKNOWN:
509 break;
510 case VXGE_HW_EVENT_RESET_START:
511 case VXGE_HW_EVENT_RESET_COMPLETE:
512 case VXGE_HW_EVENT_LINK_DOWN:
513 case VXGE_HW_EVENT_LINK_UP:
514 goto out;
515 case VXGE_HW_EVENT_ALARM_CLEARED:
516 goto out;
517 case VXGE_HW_EVENT_ECCERR:
518 case VXGE_HW_EVENT_MRPCIM_ECCERR:
519 goto out;
520 case VXGE_HW_EVENT_FIFO_ERR:
521 case VXGE_HW_EVENT_VPATH_ERR:
522 case VXGE_HW_EVENT_CRITICAL_ERR:
523 case VXGE_HW_EVENT_SERR:
524 break;
525 case VXGE_HW_EVENT_SRPCIM_SERR:
526 case VXGE_HW_EVENT_MRPCIM_SERR:
527 goto out;
528 case VXGE_HW_EVENT_SLOT_FREEZE:
529 break;
530 default:
531 vxge_assert(0);
532 goto out;
533 }
534
535 /* notify driver */
536 if (hldev->uld_callbacks->crit_err)
537 hldev->uld_callbacks->crit_err(hldev,
538 type, vp_id);
539 out:
540
541 return VXGE_HW_OK;
542 }
543
544 /*
545 * __vxge_hw_device_handle_link_down_ind
546 * @hldev: HW device handle.
547 *
548 * Link down indication handler. The function is invoked by HW when
549 * Titan indicates that the link is down.
550 */
551 static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device * hldev)552 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553 {
554 /*
555 * If the previous link state is not down, return.
556 */
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 goto exit;
559
560 hldev->link_state = VXGE_HW_LINK_DOWN;
561
562 /* notify driver */
563 if (hldev->uld_callbacks->link_down)
564 hldev->uld_callbacks->link_down(hldev);
565 exit:
566 return VXGE_HW_OK;
567 }
568
569 /*
570 * __vxge_hw_device_handle_link_up_ind
571 * @hldev: HW device handle.
572 *
573 * Link up indication handler. The function is invoked by HW when
574 * Titan indicates that the link is up for programmable amount of time.
575 */
576 static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device * hldev)577 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578 {
579 /*
580 * If the previous link state is not down, return.
581 */
582 if (hldev->link_state == VXGE_HW_LINK_UP)
583 goto exit;
584
585 hldev->link_state = VXGE_HW_LINK_UP;
586
587 /* notify driver */
588 if (hldev->uld_callbacks->link_up)
589 hldev->uld_callbacks->link_up(hldev);
590 exit:
591 return VXGE_HW_OK;
592 }
593
594 /*
595 * __vxge_hw_vpath_alarm_process - Process Alarms.
596 * @vpath: Virtual Path.
597 * @skip_alarms: Do not clear the alarms
598 *
599 * Process vpath alarms.
600 *
601 */
602 static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath * vpath,u32 skip_alarms)603 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 u32 skip_alarms)
605 {
606 u64 val64;
607 u64 alarm_status;
608 u64 pic_status;
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 u64 mask64;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
614
615 if (vpath == NULL) {
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 alarm_event);
618 goto out2;
619 }
620
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
624
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 alarm_event);
628 goto out;
629 }
630
631 sw_stats = vpath->sw_stats;
632
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
639
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 alarm_event);
642 goto out;
643 }
644
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
648
649 if (val64 &
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653
654 if (((val64 &
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 (!(val64 &
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 ((val64 &
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 (!(val64 &
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 ))) {
663 sw_stats->error_stats.network_sustained_fault++;
664
665 writeq(
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
668
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 }
673
674 if (((val64 &
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 (!(val64 &
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 ((val64 &
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 (!(val64 &
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 ))) {
683
684 sw_stats->error_stats.network_sustained_ok++;
685
686 writeq(
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
689
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
693 }
694
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
697
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700
701 if (skip_alarms)
702 return VXGE_HW_OK;
703 }
704 }
705
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709
710 if (pic_status &
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
715
716 if ((val64 &
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 ~mask64) {
719 sw_stats->error_stats.ini_serr_det++;
720
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
723 }
724
725 if ((val64 &
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 ~mask64) {
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
729
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 }
733
734 if ((val64 &
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 ~mask64)
737 sw_stats->error_stats.statsb_pif_chain_error++;
738
739 if ((val64 &
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 ~mask64)
742 sw_stats->error_stats.statsb_drop_timeout++;
743
744 if ((val64 &
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 ~mask64)
747 sw_stats->error_stats.target_illegal_access++;
748
749 if (!skip_alarms) {
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
754 alarm_event);
755 }
756 }
757
758 if (pic_status &
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763
764 if ((val64 &
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 ~mask64) {
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
771 alarm_event);
772 }
773
774 if ((val64 &
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 ~mask64) {
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
781 alarm_event);
782 }
783
784 if ((val64 &
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 ~mask64) {
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
791 alarm_event);
792 }
793
794 if (!skip_alarms) {
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
799 alarm_event);
800 }
801 }
802
803 }
804
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806
807 val64 = readq(&vp_reg->wrdma_alarm_status);
808
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
813
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 ~mask64)
816 sw_stats->error_stats.prc_ring_bumps++;
817
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 ~mask64) {
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
821
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
824 alarm_event);
825 }
826
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 & ~mask64) {
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
833 alarm_event);
834 }
835
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 & ~mask64) {
838 sw_stats->error_stats.prc_quanta_size_err++;
839
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
842 alarm_event);
843 }
844
845 if (!skip_alarms) {
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
850 alarm_event);
851 }
852 }
853 }
854 out:
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
856 out2:
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 return VXGE_HW_OK;
860
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
865
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 VXGE_HW_ERR_VPATH;
870 }
871
872 /**
873 * vxge_hw_device_begin_irq - Begin IRQ processing.
874 * @hldev: HW device handle.
875 * @skip_alarms: Do not clear the alarms
876 * @reason: "Reason" for the interrupt, the value of Titan's
877 * general_int_status register.
878 *
879 * The function performs two actions, It first checks whether (shared IRQ) the
880 * interrupt was raised by the device. Next, it masks the device interrupts.
881 *
882 * Note:
883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
885 *
886 * Returns: 0, if the interrupt is not "ours" (note that in this case the
887 * device remain enabled).
888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
889 * status.
890 */
vxge_hw_device_begin_irq(struct __vxge_hw_device * hldev,u32 skip_alarms,u64 * reason)891 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 u32 skip_alarms, u64 *reason)
893 {
894 u32 i;
895 u64 val64;
896 u64 adapter_status;
897 u64 vpath_mask;
898 enum vxge_hw_status ret = VXGE_HW_OK;
899
900 val64 = readq(&hldev->common_reg->titan_general_int_status);
901
902 if (unlikely(!val64)) {
903 /* not Titan interrupt */
904 *reason = 0;
905 ret = VXGE_HW_ERR_WRONG_IRQ;
906 goto exit;
907 }
908
909 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910
911 adapter_status = readq(&hldev->common_reg->adapter_status);
912
913 if (adapter_status == VXGE_HW_ALL_FOXES) {
914
915 __vxge_hw_device_handle_error(hldev,
916 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
917 *reason = 0;
918 ret = VXGE_HW_ERR_SLOT_FREEZE;
919 goto exit;
920 }
921 }
922
923 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924
925 *reason = val64;
926
927 vpath_mask = hldev->vpaths_deployed >>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929
930 if (val64 &
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933
934 return VXGE_HW_OK;
935 }
936
937 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938
939 if (unlikely(val64 &
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941
942 enum vxge_hw_status error_level = VXGE_HW_OK;
943
944 hldev->stats.sw_dev_err_stats.vpath_alarms++;
945
946 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947
948 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
949 continue;
950
951 ret = __vxge_hw_vpath_alarm_process(
952 &hldev->virtual_paths[i], skip_alarms);
953
954 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955
956 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
958 break;
959 }
960
961 ret = error_level;
962 }
963 exit:
964 return ret;
965 }
966
967 /**
968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
969 * condition that has caused the Tx and RX interrupt.
970 * @hldev: HW device.
971 *
972 * Acknowledge (that is, clear) the condition that has caused
973 * the Tx and Rx interrupt.
974 * See also: vxge_hw_device_begin_irq(),
975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
976 */
vxge_hw_device_clear_tx_rx(struct __vxge_hw_device * hldev)977 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
978 {
979
980 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 &hldev->common_reg->tim_int_status0);
985 }
986
987 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 &hldev->common_reg->tim_int_status1);
993 }
994 }
995
996 /*
997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
998 * @channel: Channel
999 * @dtrh: Buffer to return the DTR pointer
1000 *
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1003 *
1004 */
1005 static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel * channel,void ** dtrh)1006 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007 {
1008 if (channel->reserve_ptr - channel->reserve_top > 0) {
1009 _alloc_after_swap:
1010 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1011
1012 return VXGE_HW_OK;
1013 }
1014
1015 /* switch between empty and full arrays */
1016
1017 /* the idea behind such a design is that by having free and reserved
1018 * arrays separated we basically separated irq and non-irq parts.
1019 * i.e. no additional lock need to be done when we free a resource */
1020
1021 if (channel->length - channel->free_ptr > 0) {
1022 swap(channel->reserve_arr, channel->free_arr);
1023 channel->reserve_ptr = channel->length;
1024 channel->reserve_top = channel->free_ptr;
1025 channel->free_ptr = channel->length;
1026
1027 channel->stats->reserve_free_swaps_cnt++;
1028
1029 goto _alloc_after_swap;
1030 }
1031
1032 channel->stats->full_cnt++;
1033
1034 *dtrh = NULL;
1035 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1036 }
1037
1038 /*
1039 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1040 * @channelh: Channel
1041 * @dtrh: DTR pointer
1042 *
1043 * Posts a dtr to work array.
1044 *
1045 */
1046 static void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel * channel,void * dtrh)1047 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1048 {
1049 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1050
1051 channel->work_arr[channel->post_index++] = dtrh;
1052
1053 /* wrap-around */
1054 if (channel->post_index == channel->length)
1055 channel->post_index = 0;
1056 }
1057
1058 /*
1059 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1060 * @channel: Channel
1061 * @dtr: Buffer to return the next completed DTR pointer
1062 *
1063 * Returns the next completed dtr with out removing it from work array
1064 *
1065 */
1066 void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel * channel,void ** dtrh)1067 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1068 {
1069 vxge_assert(channel->compl_index < channel->length);
1070
1071 *dtrh = channel->work_arr[channel->compl_index];
1072 prefetch(*dtrh);
1073 }
1074
1075 /*
1076 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1077 * @channel: Channel handle
1078 *
1079 * Removes the next completed dtr from work array
1080 *
1081 */
vxge_hw_channel_dtr_complete(struct __vxge_hw_channel * channel)1082 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1083 {
1084 channel->work_arr[channel->compl_index] = NULL;
1085
1086 /* wrap-around */
1087 if (++channel->compl_index == channel->length)
1088 channel->compl_index = 0;
1089
1090 channel->stats->total_compl_cnt++;
1091 }
1092
1093 /*
1094 * vxge_hw_channel_dtr_free - Frees a dtr
1095 * @channel: Channel handle
1096 * @dtr: DTR pointer
1097 *
1098 * Returns the dtr to free array
1099 *
1100 */
vxge_hw_channel_dtr_free(struct __vxge_hw_channel * channel,void * dtrh)1101 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1102 {
1103 channel->free_arr[--channel->free_ptr] = dtrh;
1104 }
1105
1106 /*
1107 * vxge_hw_channel_dtr_count
1108 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1109 *
1110 * Retrieve number of DTRs available. This function can not be called
1111 * from data path. ring_initial_replenishi() is the only user.
1112 */
vxge_hw_channel_dtr_count(struct __vxge_hw_channel * channel)1113 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1114 {
1115 return (channel->reserve_ptr - channel->reserve_top) +
1116 (channel->length - channel->free_ptr);
1117 }
1118
1119 /**
1120 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
1121 * @ring: Handle to the ring object used for receive
1122 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1123 * with a valid handle.
1124 *
1125 * Reserve Rx descriptor for the subsequent filling-in driver
1126 * and posting on the corresponding channel (@channelh)
1127 * via vxge_hw_ring_rxd_post().
1128 *
1129 * Returns: VXGE_HW_OK - success.
1130 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1131 *
1132 */
vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring * ring,void ** rxdh)1133 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1134 void **rxdh)
1135 {
1136 enum vxge_hw_status status;
1137 struct __vxge_hw_channel *channel;
1138
1139 channel = &ring->channel;
1140
1141 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1142
1143 if (status == VXGE_HW_OK) {
1144 struct vxge_hw_ring_rxd_1 *rxdp =
1145 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1146
1147 rxdp->control_0 = rxdp->control_1 = 0;
1148 }
1149
1150 return status;
1151 }
1152
1153 /**
1154 * vxge_hw_ring_rxd_free - Free descriptor.
1155 * @ring: Handle to the ring object used for receive
1156 * @rxdh: Descriptor handle.
1157 *
1158 * Free the reserved descriptor. This operation is "symmetrical" to
1159 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1160 * lifecycle.
1161 *
1162 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1163 * be:
1164 *
1165 * - reserved (vxge_hw_ring_rxd_reserve);
1166 *
1167 * - posted (vxge_hw_ring_rxd_post);
1168 *
1169 * - completed (vxge_hw_ring_rxd_next_completed);
1170 *
1171 * - and recycled again (vxge_hw_ring_rxd_free).
1172 *
1173 * For alternative state transitions and more details please refer to
1174 * the design doc.
1175 *
1176 */
vxge_hw_ring_rxd_free(struct __vxge_hw_ring * ring,void * rxdh)1177 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1178 {
1179 struct __vxge_hw_channel *channel;
1180
1181 channel = &ring->channel;
1182
1183 vxge_hw_channel_dtr_free(channel, rxdh);
1184
1185 }
1186
1187 /**
1188 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1189 * @ring: Handle to the ring object used for receive
1190 * @rxdh: Descriptor handle.
1191 *
1192 * This routine prepares a rxd and posts
1193 */
vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring * ring,void * rxdh)1194 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1195 {
1196 struct __vxge_hw_channel *channel;
1197
1198 channel = &ring->channel;
1199
1200 vxge_hw_channel_dtr_post(channel, rxdh);
1201 }
1202
1203 /**
1204 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1205 * @ring: Handle to the ring object used for receive
1206 * @rxdh: Descriptor handle.
1207 *
1208 * Processes rxd after post
1209 */
vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring * ring,void * rxdh)1210 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1211 {
1212 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1213
1214 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1215
1216 if (ring->stats->common_stats.usage_cnt > 0)
1217 ring->stats->common_stats.usage_cnt--;
1218 }
1219
1220 /**
1221 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1222 * @ring: Handle to the ring object used for receive
1223 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1224 *
1225 * Post descriptor on the ring.
1226 * Prior to posting the descriptor should be filled in accordance with
1227 * Host/Titan interface specification for a given service (LL, etc.).
1228 *
1229 */
vxge_hw_ring_rxd_post(struct __vxge_hw_ring * ring,void * rxdh)1230 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1231 {
1232 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1233 struct __vxge_hw_channel *channel;
1234
1235 channel = &ring->channel;
1236
1237 wmb();
1238 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1239
1240 vxge_hw_channel_dtr_post(channel, rxdh);
1241
1242 if (ring->stats->common_stats.usage_cnt > 0)
1243 ring->stats->common_stats.usage_cnt--;
1244 }
1245
1246 /**
1247 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1248 * @ring: Handle to the ring object used for receive
1249 * @rxdh: Descriptor handle.
1250 *
1251 * Processes rxd after post with memory barrier.
1252 */
vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring * ring,void * rxdh)1253 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1254 {
1255 wmb();
1256 vxge_hw_ring_rxd_post_post(ring, rxdh);
1257 }
1258
1259 /**
1260 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1261 * @ring: Handle to the ring object used for receive
1262 * @rxdh: Descriptor handle. Returned by HW.
1263 * @t_code: Transfer code, as per Titan User Guide,
1264 * Receive Descriptor Format. Returned by HW.
1265 *
1266 * Retrieve the _next_ completed descriptor.
1267 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1268 * driver of new completed descriptors. After that
1269 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1270 * completions (the very first completion is passed by HW via
1271 * vxge_hw_ring_callback_f).
1272 *
1273 * Implementation-wise, the driver is free to call
1274 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1275 * ring callback, or in a deferred fashion and separate (from HW)
1276 * context.
1277 *
1278 * Non-zero @t_code means failure to fill-in receive buffer(s)
1279 * of the descriptor.
1280 * For instance, parity error detected during the data transfer.
1281 * In this case Titan will complete the descriptor and indicate
1282 * for the host that the received data is not to be used.
1283 * For details please refer to Titan User Guide.
1284 *
1285 * Returns: VXGE_HW_OK - success.
1286 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1287 * are currently available for processing.
1288 *
1289 * See also: vxge_hw_ring_callback_f{},
1290 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1291 */
vxge_hw_ring_rxd_next_completed(struct __vxge_hw_ring * ring,void ** rxdh,u8 * t_code)1292 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1293 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1294 {
1295 struct __vxge_hw_channel *channel;
1296 struct vxge_hw_ring_rxd_1 *rxdp;
1297 enum vxge_hw_status status = VXGE_HW_OK;
1298 u64 control_0, own;
1299
1300 channel = &ring->channel;
1301
1302 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1303
1304 rxdp = *rxdh;
1305 if (rxdp == NULL) {
1306 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1307 goto exit;
1308 }
1309
1310 control_0 = rxdp->control_0;
1311 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1312 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1313
1314 /* check whether it is not the end */
1315 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1316
1317 vxge_assert((rxdp)->host_control !=
1318 0);
1319
1320 ++ring->cmpl_cnt;
1321 vxge_hw_channel_dtr_complete(channel);
1322
1323 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1324
1325 ring->stats->common_stats.usage_cnt++;
1326 if (ring->stats->common_stats.usage_max <
1327 ring->stats->common_stats.usage_cnt)
1328 ring->stats->common_stats.usage_max =
1329 ring->stats->common_stats.usage_cnt;
1330
1331 status = VXGE_HW_OK;
1332 goto exit;
1333 }
1334
1335 /* reset it. since we don't want to return
1336 * garbage to the driver */
1337 *rxdh = NULL;
1338 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1339 exit:
1340 return status;
1341 }
1342
1343 /**
1344 * vxge_hw_ring_handle_tcode - Handle transfer code.
1345 * @ring: Handle to the ring object used for receive
1346 * @rxdh: Descriptor handle.
1347 * @t_code: One of the enumerated (and documented in the Titan user guide)
1348 * "transfer codes".
1349 *
1350 * Handle descriptor's transfer code. The latter comes with each completed
1351 * descriptor.
1352 *
1353 * Returns: one of the enum vxge_hw_status{} enumerated types.
1354 * VXGE_HW_OK - for success.
1355 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1356 */
vxge_hw_ring_handle_tcode(struct __vxge_hw_ring * ring,void * rxdh,u8 t_code)1357 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1358 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1359 {
1360 enum vxge_hw_status status = VXGE_HW_OK;
1361
1362 /* If the t_code is not supported and if the
1363 * t_code is other than 0x5 (unparseable packet
1364 * such as unknown UPV6 header), Drop it !!!
1365 */
1366
1367 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1368 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1369 status = VXGE_HW_OK;
1370 goto exit;
1371 }
1372
1373 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1374 status = VXGE_HW_ERR_INVALID_TCODE;
1375 goto exit;
1376 }
1377
1378 ring->stats->rxd_t_code_err_cnt[t_code]++;
1379 exit:
1380 return status;
1381 }
1382
1383 /**
1384 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1385 *
1386 * @fifo: fifohandle
1387 * @txdl_ptr: The starting location of the TxDL in host memory
1388 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1389 * @no_snoop: No snoop flags
1390 *
1391 * This function posts a non-offload doorbell to doorbell FIFO
1392 *
1393 */
__vxge_hw_non_offload_db_post(struct __vxge_hw_fifo * fifo,u64 txdl_ptr,u32 num_txds,u32 no_snoop)1394 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1395 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1396 {
1397 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1398 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1399 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1400 &fifo->nofl_db->control_0);
1401
1402 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1403 }
1404
1405 /**
1406 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1407 * the fifo
1408 * @fifoh: Handle to the fifo object used for non offload send
1409 */
vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo * fifoh)1410 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1411 {
1412 return vxge_hw_channel_dtr_count(&fifoh->channel);
1413 }
1414
1415 /**
1416 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1417 * @fifoh: Handle to the fifo object used for non offload send
1418 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1419 * with a valid handle.
1420 * @txdl_priv: Buffer to return the pointer to per txdl space
1421 *
1422 * Reserve a single TxDL (that is, fifo descriptor)
1423 * for the subsequent filling-in by driver)
1424 * and posting on the corresponding channel (@channelh)
1425 * via vxge_hw_fifo_txdl_post().
1426 *
1427 * Note: it is the responsibility of driver to reserve multiple descriptors
1428 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1429 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1430 *
1431 * Returns: VXGE_HW_OK - success;
1432 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1433 *
1434 */
vxge_hw_fifo_txdl_reserve(struct __vxge_hw_fifo * fifo,void ** txdlh,void ** txdl_priv)1435 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1436 struct __vxge_hw_fifo *fifo,
1437 void **txdlh, void **txdl_priv)
1438 {
1439 struct __vxge_hw_channel *channel;
1440 enum vxge_hw_status status;
1441 int i;
1442
1443 channel = &fifo->channel;
1444
1445 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1446
1447 if (status == VXGE_HW_OK) {
1448 struct vxge_hw_fifo_txd *txdp =
1449 (struct vxge_hw_fifo_txd *)*txdlh;
1450 struct __vxge_hw_fifo_txdl_priv *priv;
1451
1452 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1453
1454 /* reset the TxDL's private */
1455 priv->align_dma_offset = 0;
1456 priv->align_vaddr_start = priv->align_vaddr;
1457 priv->align_used_frags = 0;
1458 priv->frags = 0;
1459 priv->alloc_frags = fifo->config->max_frags;
1460 priv->next_txdl_priv = NULL;
1461
1462 *txdl_priv = (void *)(size_t)txdp->host_control;
1463
1464 for (i = 0; i < fifo->config->max_frags; i++) {
1465 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1466 txdp->control_0 = txdp->control_1 = 0;
1467 }
1468 }
1469
1470 return status;
1471 }
1472
1473 /**
1474 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1475 * descriptor.
1476 * @fifo: Handle to the fifo object used for non offload send
1477 * @txdlh: Descriptor handle.
1478 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1479 * (of buffers).
1480 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1481 * @size: Size of the data buffer (in bytes).
1482 *
1483 * This API is part of the preparation of the transmit descriptor for posting
1484 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1485 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1486 * All three APIs fill in the fields of the fifo descriptor,
1487 * in accordance with the Titan specification.
1488 *
1489 */
vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo * fifo,void * txdlh,u32 frag_idx,dma_addr_t dma_pointer,u32 size)1490 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1491 void *txdlh, u32 frag_idx,
1492 dma_addr_t dma_pointer, u32 size)
1493 {
1494 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1495 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1496
1497 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1498 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1499
1500 if (frag_idx != 0)
1501 txdp->control_0 = txdp->control_1 = 0;
1502 else {
1503 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1504 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1505 txdp->control_1 |= fifo->interrupt_type;
1506 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1507 fifo->tx_intr_num);
1508 if (txdl_priv->frags) {
1509 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1510 (txdl_priv->frags - 1);
1511 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1512 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1513 }
1514 }
1515
1516 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1517
1518 txdp->buffer_pointer = (u64)dma_pointer;
1519 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1520 fifo->stats->total_buffers++;
1521 txdl_priv->frags++;
1522 }
1523
1524 /**
1525 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1526 * @fifo: Handle to the fifo object used for non offload send
1527 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1528 * @frags: Number of contiguous buffers that are part of a single
1529 * transmit operation.
1530 *
1531 * Post descriptor on the 'fifo' type channel for transmission.
1532 * Prior to posting the descriptor should be filled in accordance with
1533 * Host/Titan interface specification for a given service (LL, etc.).
1534 *
1535 */
vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo * fifo,void * txdlh)1536 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1537 {
1538 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1539 struct vxge_hw_fifo_txd *txdp_last;
1540 struct vxge_hw_fifo_txd *txdp_first;
1541
1542 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1543 txdp_first = txdlh;
1544
1545 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1546 txdp_last->control_0 |=
1547 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1548 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1549
1550 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1551
1552 __vxge_hw_non_offload_db_post(fifo,
1553 (u64)txdl_priv->dma_addr,
1554 txdl_priv->frags - 1,
1555 fifo->no_snoop_bits);
1556
1557 fifo->stats->total_posts++;
1558 fifo->stats->common_stats.usage_cnt++;
1559 if (fifo->stats->common_stats.usage_max <
1560 fifo->stats->common_stats.usage_cnt)
1561 fifo->stats->common_stats.usage_max =
1562 fifo->stats->common_stats.usage_cnt;
1563 }
1564
1565 /**
1566 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1567 * @fifo: Handle to the fifo object used for non offload send
1568 * @txdlh: Descriptor handle. Returned by HW.
1569 * @t_code: Transfer code, as per Titan User Guide,
1570 * Transmit Descriptor Format.
1571 * Returned by HW.
1572 *
1573 * Retrieve the _next_ completed descriptor.
1574 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1575 * driver of new completed descriptors. After that
1576 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1577 * completions (the very first completion is passed by HW via
1578 * vxge_hw_channel_callback_f).
1579 *
1580 * Implementation-wise, the driver is free to call
1581 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1582 * channel callback, or in a deferred fashion and separate (from HW)
1583 * context.
1584 *
1585 * Non-zero @t_code means failure to process the descriptor.
1586 * The failure could happen, for instance, when the link is
1587 * down, in which case Titan completes the descriptor because it
1588 * is not able to send the data out.
1589 *
1590 * For details please refer to Titan User Guide.
1591 *
1592 * Returns: VXGE_HW_OK - success.
1593 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1594 * are currently available for processing.
1595 *
1596 */
vxge_hw_fifo_txdl_next_completed(struct __vxge_hw_fifo * fifo,void ** txdlh,enum vxge_hw_fifo_tcode * t_code)1597 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1598 struct __vxge_hw_fifo *fifo, void **txdlh,
1599 enum vxge_hw_fifo_tcode *t_code)
1600 {
1601 struct __vxge_hw_channel *channel;
1602 struct vxge_hw_fifo_txd *txdp;
1603 enum vxge_hw_status status = VXGE_HW_OK;
1604
1605 channel = &fifo->channel;
1606
1607 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1608
1609 txdp = *txdlh;
1610 if (txdp == NULL) {
1611 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1612 goto exit;
1613 }
1614
1615 /* check whether host owns it */
1616 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1617
1618 vxge_assert(txdp->host_control != 0);
1619
1620 vxge_hw_channel_dtr_complete(channel);
1621
1622 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1623
1624 if (fifo->stats->common_stats.usage_cnt > 0)
1625 fifo->stats->common_stats.usage_cnt--;
1626
1627 status = VXGE_HW_OK;
1628 goto exit;
1629 }
1630
1631 /* no more completions */
1632 *txdlh = NULL;
1633 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1634 exit:
1635 return status;
1636 }
1637
1638 /**
1639 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1640 * @fifo: Handle to the fifo object used for non offload send
1641 * @txdlh: Descriptor handle.
1642 * @t_code: One of the enumerated (and documented in the Titan user guide)
1643 * "transfer codes".
1644 *
1645 * Handle descriptor's transfer code. The latter comes with each completed
1646 * descriptor.
1647 *
1648 * Returns: one of the enum vxge_hw_status{} enumerated types.
1649 * VXGE_HW_OK - for success.
1650 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1651 */
vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo * fifo,void * txdlh,enum vxge_hw_fifo_tcode t_code)1652 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1653 void *txdlh,
1654 enum vxge_hw_fifo_tcode t_code)
1655 {
1656 enum vxge_hw_status status = VXGE_HW_OK;
1657
1658 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1659 status = VXGE_HW_ERR_INVALID_TCODE;
1660 goto exit;
1661 }
1662
1663 fifo->stats->txd_t_code_err_cnt[t_code]++;
1664 exit:
1665 return status;
1666 }
1667
1668 /**
1669 * vxge_hw_fifo_txdl_free - Free descriptor.
1670 * @fifo: Handle to the fifo object used for non offload send
1671 * @txdlh: Descriptor handle.
1672 *
1673 * Free the reserved descriptor. This operation is "symmetrical" to
1674 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1675 * lifecycle.
1676 *
1677 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1678 * be:
1679 *
1680 * - reserved (vxge_hw_fifo_txdl_reserve);
1681 *
1682 * - posted (vxge_hw_fifo_txdl_post);
1683 *
1684 * - completed (vxge_hw_fifo_txdl_next_completed);
1685 *
1686 * - and recycled again (vxge_hw_fifo_txdl_free).
1687 *
1688 * For alternative state transitions and more details please refer to
1689 * the design doc.
1690 *
1691 */
vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo * fifo,void * txdlh)1692 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1693 {
1694 struct __vxge_hw_channel *channel;
1695
1696 channel = &fifo->channel;
1697
1698 vxge_hw_channel_dtr_free(channel, txdlh);
1699 }
1700
1701 /**
1702 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1703 * to MAC address table.
1704 * @vp: Vpath handle.
1705 * @macaddr: MAC address to be added for this vpath into the list
1706 * @macaddr_mask: MAC address mask for macaddr
1707 * @duplicate_mode: Duplicate MAC address add mode. Please see
1708 * enum vxge_hw_vpath_mac_addr_add_mode{}
1709 *
1710 * Adds the given mac address and mac address mask into the list for this
1711 * vpath.
1712 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1713 * vxge_hw_vpath_mac_addr_get_next
1714 *
1715 */
1716 enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN],enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)1717 vxge_hw_vpath_mac_addr_add(
1718 struct __vxge_hw_vpath_handle *vp,
1719 u8 (macaddr)[ETH_ALEN],
1720 u8 (macaddr_mask)[ETH_ALEN],
1721 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1722 {
1723 u32 i;
1724 u64 data1 = 0ULL;
1725 u64 data2 = 0ULL;
1726 enum vxge_hw_status status = VXGE_HW_OK;
1727
1728 if (vp == NULL) {
1729 status = VXGE_HW_ERR_INVALID_HANDLE;
1730 goto exit;
1731 }
1732
1733 for (i = 0; i < ETH_ALEN; i++) {
1734 data1 <<= 8;
1735 data1 |= (u8)macaddr[i];
1736
1737 data2 <<= 8;
1738 data2 |= (u8)macaddr_mask[i];
1739 }
1740
1741 switch (duplicate_mode) {
1742 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1743 i = 0;
1744 break;
1745 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1746 i = 1;
1747 break;
1748 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1749 i = 2;
1750 break;
1751 default:
1752 i = 0;
1753 break;
1754 }
1755
1756 status = __vxge_hw_vpath_rts_table_set(vp,
1757 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1758 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1759 0,
1760 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1761 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1762 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1763 exit:
1764 return status;
1765 }
1766
1767 /**
1768 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1769 * from MAC address table.
1770 * @vp: Vpath handle.
1771 * @macaddr: First MAC address entry for this vpath in the list
1772 * @macaddr_mask: MAC address mask for macaddr
1773 *
1774 * Returns the first mac address and mac address mask in the list for this
1775 * vpath.
1776 * see also: vxge_hw_vpath_mac_addr_get_next
1777 *
1778 */
1779 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1780 vxge_hw_vpath_mac_addr_get(
1781 struct __vxge_hw_vpath_handle *vp,
1782 u8 (macaddr)[ETH_ALEN],
1783 u8 (macaddr_mask)[ETH_ALEN])
1784 {
1785 u32 i;
1786 u64 data1 = 0ULL;
1787 u64 data2 = 0ULL;
1788 enum vxge_hw_status status = VXGE_HW_OK;
1789
1790 if (vp == NULL) {
1791 status = VXGE_HW_ERR_INVALID_HANDLE;
1792 goto exit;
1793 }
1794
1795 status = __vxge_hw_vpath_rts_table_get(vp,
1796 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1797 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1798 0, &data1, &data2);
1799
1800 if (status != VXGE_HW_OK)
1801 goto exit;
1802
1803 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1804
1805 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1806
1807 for (i = ETH_ALEN; i > 0; i--) {
1808 macaddr[i-1] = (u8)(data1 & 0xFF);
1809 data1 >>= 8;
1810
1811 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1812 data2 >>= 8;
1813 }
1814 exit:
1815 return status;
1816 }
1817
1818 /**
1819 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1820 * vpath
1821 * from MAC address table.
1822 * @vp: Vpath handle.
1823 * @macaddr: Next MAC address entry for this vpath in the list
1824 * @macaddr_mask: MAC address mask for macaddr
1825 *
1826 * Returns the next mac address and mac address mask in the list for this
1827 * vpath.
1828 * see also: vxge_hw_vpath_mac_addr_get
1829 *
1830 */
1831 enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1832 vxge_hw_vpath_mac_addr_get_next(
1833 struct __vxge_hw_vpath_handle *vp,
1834 u8 (macaddr)[ETH_ALEN],
1835 u8 (macaddr_mask)[ETH_ALEN])
1836 {
1837 u32 i;
1838 u64 data1 = 0ULL;
1839 u64 data2 = 0ULL;
1840 enum vxge_hw_status status = VXGE_HW_OK;
1841
1842 if (vp == NULL) {
1843 status = VXGE_HW_ERR_INVALID_HANDLE;
1844 goto exit;
1845 }
1846
1847 status = __vxge_hw_vpath_rts_table_get(vp,
1848 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1849 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1850 0, &data1, &data2);
1851
1852 if (status != VXGE_HW_OK)
1853 goto exit;
1854
1855 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1856
1857 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1858
1859 for (i = ETH_ALEN; i > 0; i--) {
1860 macaddr[i-1] = (u8)(data1 & 0xFF);
1861 data1 >>= 8;
1862
1863 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1864 data2 >>= 8;
1865 }
1866
1867 exit:
1868 return status;
1869 }
1870
1871 /**
1872 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1873 * to MAC address table.
1874 * @vp: Vpath handle.
1875 * @macaddr: MAC address to be added for this vpath into the list
1876 * @macaddr_mask: MAC address mask for macaddr
1877 *
1878 * Delete the given mac address and mac address mask into the list for this
1879 * vpath.
1880 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1881 * vxge_hw_vpath_mac_addr_get_next
1882 *
1883 */
1884 enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(struct __vxge_hw_vpath_handle * vp,u8 (macaddr)[ETH_ALEN],u8 (macaddr_mask)[ETH_ALEN])1885 vxge_hw_vpath_mac_addr_delete(
1886 struct __vxge_hw_vpath_handle *vp,
1887 u8 (macaddr)[ETH_ALEN],
1888 u8 (macaddr_mask)[ETH_ALEN])
1889 {
1890 u32 i;
1891 u64 data1 = 0ULL;
1892 u64 data2 = 0ULL;
1893 enum vxge_hw_status status = VXGE_HW_OK;
1894
1895 if (vp == NULL) {
1896 status = VXGE_HW_ERR_INVALID_HANDLE;
1897 goto exit;
1898 }
1899
1900 for (i = 0; i < ETH_ALEN; i++) {
1901 data1 <<= 8;
1902 data1 |= (u8)macaddr[i];
1903
1904 data2 <<= 8;
1905 data2 |= (u8)macaddr_mask[i];
1906 }
1907
1908 status = __vxge_hw_vpath_rts_table_set(vp,
1909 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1910 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1911 0,
1912 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1913 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1914 exit:
1915 return status;
1916 }
1917
1918 /**
1919 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1920 * to vlan id table.
1921 * @vp: Vpath handle.
1922 * @vid: vlan id to be added for this vpath into the list
1923 *
1924 * Adds the given vlan id into the list for this vpath.
1925 * see also: vxge_hw_vpath_vid_delete
1926 *
1927 */
1928 enum vxge_hw_status
vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle * vp,u64 vid)1929 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1930 {
1931 enum vxge_hw_status status = VXGE_HW_OK;
1932
1933 if (vp == NULL) {
1934 status = VXGE_HW_ERR_INVALID_HANDLE;
1935 goto exit;
1936 }
1937
1938 status = __vxge_hw_vpath_rts_table_set(vp,
1939 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1940 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1941 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1942 exit:
1943 return status;
1944 }
1945
1946 /**
1947 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1948 * to vlan id table.
1949 * @vp: Vpath handle.
1950 * @vid: vlan id to be added for this vpath into the list
1951 *
1952 * Adds the given vlan id into the list for this vpath.
1953 * see also: vxge_hw_vpath_vid_add
1954 *
1955 */
1956 enum vxge_hw_status
vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle * vp,u64 vid)1957 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1958 {
1959 enum vxge_hw_status status = VXGE_HW_OK;
1960
1961 if (vp == NULL) {
1962 status = VXGE_HW_ERR_INVALID_HANDLE;
1963 goto exit;
1964 }
1965
1966 status = __vxge_hw_vpath_rts_table_set(vp,
1967 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1968 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1969 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1970 exit:
1971 return status;
1972 }
1973
1974 /**
1975 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1976 * @vp: Vpath handle.
1977 *
1978 * Enable promiscuous mode of Titan-e operation.
1979 *
1980 * See also: vxge_hw_vpath_promisc_disable().
1981 */
vxge_hw_vpath_promisc_enable(struct __vxge_hw_vpath_handle * vp)1982 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1983 struct __vxge_hw_vpath_handle *vp)
1984 {
1985 u64 val64;
1986 struct __vxge_hw_virtualpath *vpath;
1987 enum vxge_hw_status status = VXGE_HW_OK;
1988
1989 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1990 status = VXGE_HW_ERR_INVALID_HANDLE;
1991 goto exit;
1992 }
1993
1994 vpath = vp->vpath;
1995
1996 /* Enable promiscuous mode for function 0 only */
1997 if (!(vpath->hldev->access_rights &
1998 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1999 return VXGE_HW_OK;
2000
2001 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2002
2003 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2004
2005 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2006 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2007 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2008 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2009
2010 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2011 }
2012 exit:
2013 return status;
2014 }
2015
2016 /**
2017 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2018 * @vp: Vpath handle.
2019 *
2020 * Disable promiscuous mode of Titan-e operation.
2021 *
2022 * See also: vxge_hw_vpath_promisc_enable().
2023 */
vxge_hw_vpath_promisc_disable(struct __vxge_hw_vpath_handle * vp)2024 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2025 struct __vxge_hw_vpath_handle *vp)
2026 {
2027 u64 val64;
2028 struct __vxge_hw_virtualpath *vpath;
2029 enum vxge_hw_status status = VXGE_HW_OK;
2030
2031 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2032 status = VXGE_HW_ERR_INVALID_HANDLE;
2033 goto exit;
2034 }
2035
2036 vpath = vp->vpath;
2037
2038 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2039
2040 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2041
2042 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2043 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2044 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2045
2046 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2047 }
2048 exit:
2049 return status;
2050 }
2051
2052 /*
2053 * vxge_hw_vpath_bcast_enable - Enable broadcast
2054 * @vp: Vpath handle.
2055 *
2056 * Enable receiving broadcasts.
2057 */
vxge_hw_vpath_bcast_enable(struct __vxge_hw_vpath_handle * vp)2058 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2059 struct __vxge_hw_vpath_handle *vp)
2060 {
2061 u64 val64;
2062 struct __vxge_hw_virtualpath *vpath;
2063 enum vxge_hw_status status = VXGE_HW_OK;
2064
2065 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2066 status = VXGE_HW_ERR_INVALID_HANDLE;
2067 goto exit;
2068 }
2069
2070 vpath = vp->vpath;
2071
2072 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2073
2074 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2075 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2076 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2077 }
2078 exit:
2079 return status;
2080 }
2081
2082 /**
2083 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2084 * @vp: Vpath handle.
2085 *
2086 * Enable Titan-e multicast addresses.
2087 * Returns: VXGE_HW_OK on success.
2088 *
2089 */
vxge_hw_vpath_mcast_enable(struct __vxge_hw_vpath_handle * vp)2090 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2091 struct __vxge_hw_vpath_handle *vp)
2092 {
2093 u64 val64;
2094 struct __vxge_hw_virtualpath *vpath;
2095 enum vxge_hw_status status = VXGE_HW_OK;
2096
2097 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2098 status = VXGE_HW_ERR_INVALID_HANDLE;
2099 goto exit;
2100 }
2101
2102 vpath = vp->vpath;
2103
2104 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2105
2106 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2107 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2108 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2109 }
2110 exit:
2111 return status;
2112 }
2113
2114 /**
2115 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
2116 * @vp: Vpath handle.
2117 *
2118 * Disable Titan-e multicast addresses.
2119 * Returns: VXGE_HW_OK - success.
2120 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2121 *
2122 */
2123 enum vxge_hw_status
vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle * vp)2124 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2125 {
2126 u64 val64;
2127 struct __vxge_hw_virtualpath *vpath;
2128 enum vxge_hw_status status = VXGE_HW_OK;
2129
2130 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2131 status = VXGE_HW_ERR_INVALID_HANDLE;
2132 goto exit;
2133 }
2134
2135 vpath = vp->vpath;
2136
2137 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2138
2139 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2140 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2141 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2142 }
2143 exit:
2144 return status;
2145 }
2146
2147 /*
2148 * vxge_hw_vpath_alarm_process - Process Alarms.
2149 * @vpath: Virtual Path.
2150 * @skip_alarms: Do not clear the alarms
2151 *
2152 * Process vpath alarms.
2153 *
2154 */
vxge_hw_vpath_alarm_process(struct __vxge_hw_vpath_handle * vp,u32 skip_alarms)2155 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2156 struct __vxge_hw_vpath_handle *vp,
2157 u32 skip_alarms)
2158 {
2159 enum vxge_hw_status status = VXGE_HW_OK;
2160
2161 if (vp == NULL) {
2162 status = VXGE_HW_ERR_INVALID_HANDLE;
2163 goto exit;
2164 }
2165
2166 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2167 exit:
2168 return status;
2169 }
2170
2171 /**
2172 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2173 * alrms
2174 * @vp: Virtual Path handle.
2175 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2176 * interrupts(Can be repeated). If fifo or ring are not enabled
2177 * the MSIX vector for that should be set to 0
2178 * @alarm_msix_id: MSIX vector for alarm.
2179 *
2180 * This API will associate a given MSIX vector numbers with the four TIM
2181 * interrupts and alarm interrupt.
2182 */
2183 void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle * vp,int * tim_msix_id,int alarm_msix_id)2184 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2185 int alarm_msix_id)
2186 {
2187 u64 val64;
2188 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2189 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2190 u32 vp_id = vp->vpath->vp_id;
2191
2192 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2193 (vp_id * 4) + tim_msix_id[0]) |
2194 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2195 (vp_id * 4) + tim_msix_id[1]);
2196
2197 writeq(val64, &vp_reg->interrupt_cfg0);
2198
2199 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2200 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2201 &vp_reg->interrupt_cfg2);
2202
2203 if (vpath->hldev->config.intr_mode ==
2204 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2205 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2206 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2207 0, 32), &vp_reg->one_shot_vect0_en);
2208 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2209 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2210 0, 32), &vp_reg->one_shot_vect1_en);
2211 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2212 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2213 0, 32), &vp_reg->one_shot_vect2_en);
2214 }
2215 }
2216
2217 /**
2218 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2219 * @vp: Virtual Path handle.
2220 * @msix_id: MSIX ID
2221 *
2222 * The function masks the msix interrupt for the given msix_id
2223 *
2224 * Returns: 0,
2225 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2226 * status.
2227 * See also:
2228 */
2229 void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle * vp,int msix_id)2230 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2231 {
2232 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2233 __vxge_hw_pio_mem_write32_upper(
2234 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2235 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2236 }
2237
2238 /**
2239 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2240 * @vp: Virtual Path handle.
2241 * @msix_id: MSI ID
2242 *
2243 * The function clears the msix interrupt for the given msix_id
2244 *
2245 * Returns: 0,
2246 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2247 * status.
2248 * See also:
2249 */
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle * vp,int msix_id)2250 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2251 {
2252 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2253
2254 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2255 __vxge_hw_pio_mem_write32_upper(
2256 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2257 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2258 else
2259 __vxge_hw_pio_mem_write32_upper(
2260 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2261 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2262 }
2263
2264 /**
2265 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2266 * @vp: Virtual Path handle.
2267 * @msix_id: MSI ID
2268 *
2269 * The function unmasks the msix interrupt for the given msix_id
2270 *
2271 * Returns: 0,
2272 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2273 * status.
2274 * See also:
2275 */
2276 void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle * vp,int msix_id)2277 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2278 {
2279 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2280 __vxge_hw_pio_mem_write32_upper(
2281 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2282 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2283 }
2284
2285 /**
2286 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2287 * @vp: Virtual Path handle.
2288 *
2289 * Mask Tx and Rx vpath interrupts.
2290 *
2291 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2292 */
vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle * vp)2293 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2294 {
2295 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2296 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2297 u64 val64;
2298 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2299
2300 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2301 tim_int_mask1, vp->vpath->vp_id);
2302
2303 val64 = readq(&hldev->common_reg->tim_int_mask0);
2304
2305 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2306 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2307 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2308 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2309 &hldev->common_reg->tim_int_mask0);
2310 }
2311
2312 val64 = readl(&hldev->common_reg->tim_int_mask1);
2313
2314 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2315 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2316 __vxge_hw_pio_mem_write32_upper(
2317 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2318 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2319 &hldev->common_reg->tim_int_mask1);
2320 }
2321 }
2322
2323 /**
2324 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2325 * @vp: Virtual Path handle.
2326 *
2327 * Unmask Tx and Rx vpath interrupts.
2328 *
2329 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2330 */
vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle * vp)2331 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2332 {
2333 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2334 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2335 u64 val64;
2336 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2337
2338 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2339 tim_int_mask1, vp->vpath->vp_id);
2340
2341 val64 = readq(&hldev->common_reg->tim_int_mask0);
2342
2343 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2344 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2345 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2346 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2347 &hldev->common_reg->tim_int_mask0);
2348 }
2349
2350 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2351 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2352 __vxge_hw_pio_mem_write32_upper(
2353 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2354 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2355 &hldev->common_reg->tim_int_mask1);
2356 }
2357 }
2358
2359 /**
2360 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2361 * descriptors and process the same.
2362 * @ring: Handle to the ring object used for receive
2363 *
2364 * The function polls the Rx for the completed descriptors and calls
2365 * the driver via supplied completion callback.
2366 *
2367 * Returns: VXGE_HW_OK, if the polling is completed successful.
2368 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2369 * descriptors available which are yet to be processed.
2370 *
2371 * See also: vxge_hw_vpath_poll_rx()
2372 */
vxge_hw_vpath_poll_rx(struct __vxge_hw_ring * ring)2373 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2374 {
2375 u8 t_code;
2376 enum vxge_hw_status status = VXGE_HW_OK;
2377 void *first_rxdh;
2378 u64 val64 = 0;
2379 int new_count = 0;
2380
2381 ring->cmpl_cnt = 0;
2382
2383 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2384 if (status == VXGE_HW_OK)
2385 ring->callback(ring, first_rxdh,
2386 t_code, ring->channel.userdata);
2387
2388 if (ring->cmpl_cnt != 0) {
2389 ring->doorbell_cnt += ring->cmpl_cnt;
2390 if (ring->doorbell_cnt >= ring->rxds_limit) {
2391 /*
2392 * Each RxD is of 4 qwords, update the number of
2393 * qwords replenished
2394 */
2395 new_count = (ring->doorbell_cnt * 4);
2396
2397 /* For each block add 4 more qwords */
2398 ring->total_db_cnt += ring->doorbell_cnt;
2399 if (ring->total_db_cnt >= ring->rxds_per_block) {
2400 new_count += 4;
2401 /* Reset total count */
2402 ring->total_db_cnt %= ring->rxds_per_block;
2403 }
2404 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2405 &ring->vp_reg->prc_rxd_doorbell);
2406 val64 =
2407 readl(&ring->common_reg->titan_general_int_status);
2408 ring->doorbell_cnt = 0;
2409 }
2410 }
2411
2412 return status;
2413 }
2414
2415 /**
2416 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2417 * the same.
2418 * @fifo: Handle to the fifo object used for non offload send
2419 *
2420 * The function polls the Tx for the completed descriptors and calls
2421 * the driver via supplied completion callback.
2422 *
2423 * Returns: VXGE_HW_OK, if the polling is completed successful.
2424 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2425 * descriptors available which are yet to be processed.
2426 */
vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo * fifo,struct sk_buff *** skb_ptr,int nr_skb,int * more)2427 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2428 struct sk_buff ***skb_ptr, int nr_skb,
2429 int *more)
2430 {
2431 enum vxge_hw_fifo_tcode t_code;
2432 void *first_txdlh;
2433 enum vxge_hw_status status = VXGE_HW_OK;
2434 struct __vxge_hw_channel *channel;
2435
2436 channel = &fifo->channel;
2437
2438 status = vxge_hw_fifo_txdl_next_completed(fifo,
2439 &first_txdlh, &t_code);
2440 if (status == VXGE_HW_OK)
2441 if (fifo->callback(fifo, first_txdlh, t_code,
2442 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2443 status = VXGE_HW_COMPLETIONS_REMAIN;
2444
2445 return status;
2446 }
2447