Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[cascardo/linux.git] / drivers / net / vxge / vxge-traffic.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2009 Neterion Inc.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
19
20 /*
21  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22  * @vp: Virtual Path handle.
23  *
24  * Enable vpath interrupts. The function is to be executed the last in
25  * vpath initialization sequence.
26  *
27  * See also: vxge_hw_vpath_intr_disable()
28  */
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
30 {
31         u64 val64;
32
33         struct __vxge_hw_virtualpath *vpath;
34         struct vxge_hw_vpath_reg __iomem *vp_reg;
35         enum vxge_hw_status status = VXGE_HW_OK;
36         if (vp == NULL) {
37                 status = VXGE_HW_ERR_INVALID_HANDLE;
38                 goto exit;
39         }
40
41         vpath = vp->vpath;
42
43         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
45                 goto exit;
46         }
47
48         vp_reg = vpath->vp_reg;
49
50         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
51
52         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53                         &vp_reg->general_errors_reg);
54
55         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56                         &vp_reg->pci_config_errors_reg);
57
58         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59                         &vp_reg->mrpcim_to_vpath_alarm_reg);
60
61         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62                         &vp_reg->srpcim_to_vpath_alarm_reg);
63
64         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65                         &vp_reg->vpath_ppif_int_status);
66
67         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68                         &vp_reg->srpcim_msg_to_vpath_reg);
69
70         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71                         &vp_reg->vpath_pcipif_int_status);
72
73         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74                         &vp_reg->prc_alarm_reg);
75
76         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77                         &vp_reg->wrdma_alarm_status);
78
79         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80                         &vp_reg->asic_ntwk_vp_err_reg);
81
82         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83                         &vp_reg->xgmac_vp_int_status);
84
85         val64 = readq(&vp_reg->vpath_general_int_status);
86
87         /* Mask unwanted interrupts */
88
89         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90                         &vp_reg->vpath_pcipif_int_mask);
91
92         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93                         &vp_reg->srpcim_msg_to_vpath_mask);
94
95         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96                         &vp_reg->srpcim_to_vpath_alarm_mask);
97
98         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99                         &vp_reg->mrpcim_to_vpath_alarm_mask);
100
101         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102                         &vp_reg->pci_config_errors_mask);
103
104         /* Unmask the individual interrupts */
105
106         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110                 &vp_reg->general_errors_mask);
111
112         __vxge_hw_pio_mem_write32_upper(
113                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119                 &vp_reg->kdfcctl_errors_mask);
120
121         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
122
123         __vxge_hw_pio_mem_write32_upper(
124                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125                 &vp_reg->prc_alarm_mask);
126
127         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
129
130         if (vpath->hldev->first_vp_id != vpath->vp_id)
131                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132                         &vp_reg->asic_ntwk_vp_err_mask);
133         else
134                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137                 &vp_reg->asic_ntwk_vp_err_mask);
138
139         __vxge_hw_pio_mem_write32_upper(0,
140                 &vp_reg->vpath_general_int_mask);
141 exit:
142         return status;
143
144 }
145
146 /*
147  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148  * @vp: Virtual Path handle.
149  *
150  * Disable vpath interrupts. The function is to be executed the last in
151  * vpath initialization sequence.
152  *
153  * See also: vxge_hw_vpath_intr_enable()
154  */
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156                         struct __vxge_hw_vpath_handle *vp)
157 {
158         u64 val64;
159
160         struct __vxge_hw_virtualpath *vpath;
161         enum vxge_hw_status status = VXGE_HW_OK;
162         struct vxge_hw_vpath_reg __iomem *vp_reg;
163         if (vp == NULL) {
164                 status = VXGE_HW_ERR_INVALID_HANDLE;
165                 goto exit;
166         }
167
168         vpath = vp->vpath;
169
170         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
172                 goto exit;
173         }
174         vp_reg = vpath->vp_reg;
175
176         __vxge_hw_pio_mem_write32_upper(
177                 (u32)VXGE_HW_INTR_MASK_ALL,
178                 &vp_reg->vpath_general_int_mask);
179
180         val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
181
182         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
183
184         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185                         &vp_reg->general_errors_mask);
186
187         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188                         &vp_reg->pci_config_errors_mask);
189
190         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191                         &vp_reg->mrpcim_to_vpath_alarm_mask);
192
193         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194                         &vp_reg->srpcim_to_vpath_alarm_mask);
195
196         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197                         &vp_reg->vpath_ppif_int_mask);
198
199         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200                         &vp_reg->srpcim_msg_to_vpath_mask);
201
202         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203                         &vp_reg->vpath_pcipif_int_mask);
204
205         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206                         &vp_reg->wrdma_alarm_mask);
207
208         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209                         &vp_reg->prc_alarm_mask);
210
211         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212                         &vp_reg->xgmac_vp_int_mask);
213
214         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215                         &vp_reg->asic_ntwk_vp_err_mask);
216
217 exit:
218         return status;
219 }
220
221 /**
222  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223  * @channeh: Channel for rx or tx handle
224  * @msix_id:  MSIX ID
225  *
226  * The function masks the msix interrupt for the given msix_id
227  *
228  * Returns: 0
229  */
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231 {
232
233         __vxge_hw_pio_mem_write32_upper(
234                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235                 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
236
237         return;
238 }
239
240 /**
241  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
242  * @channeh: Channel for rx or tx handle
243  * @msix_id:  MSI ID
244  *
245  * The function unmasks the msix interrupt for the given msix_id
246  *
247  * Returns: 0
248  */
249 void
250 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
251 {
252
253         __vxge_hw_pio_mem_write32_upper(
254                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
255                 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
256
257         return;
258 }
259
260 /**
261  * vxge_hw_device_set_intr_type - Updates the configuration
262  *              with new interrupt type.
263  * @hldev: HW device handle.
264  * @intr_mode: New interrupt type
265  */
266 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
267 {
268
269         if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
270            (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
271            (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
272            (intr_mode != VXGE_HW_INTR_MODE_DEF))
273                 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
274
275         hldev->config.intr_mode = intr_mode;
276         return intr_mode;
277 }
278
279 /**
280  * vxge_hw_device_intr_enable - Enable interrupts.
281  * @hldev: HW device handle.
282  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
283  *      the type(s) of interrupts to enable.
284  *
285  * Enable Titan interrupts. The function is to be executed the last in
286  * Titan initialization sequence.
287  *
288  * See also: vxge_hw_device_intr_disable()
289  */
290 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
291 {
292         u32 i;
293         u64 val64;
294         u32 val32;
295
296         vxge_hw_device_mask_all(hldev);
297
298         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299
300                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
301                         continue;
302
303                 vxge_hw_vpath_intr_enable(
304                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
305         }
306
307         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
308                 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
309                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
310
311                 if (val64 != 0) {
312                         writeq(val64, &hldev->common_reg->tim_int_status0);
313
314                         writeq(~val64, &hldev->common_reg->tim_int_mask0);
315                 }
316
317                 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
318                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
319
320                 if (val32 != 0) {
321                         __vxge_hw_pio_mem_write32_upper(val32,
322                                         &hldev->common_reg->tim_int_status1);
323
324                         __vxge_hw_pio_mem_write32_upper(~val32,
325                                         &hldev->common_reg->tim_int_mask1);
326                 }
327         }
328
329         val64 = readq(&hldev->common_reg->titan_general_int_status);
330
331         vxge_hw_device_unmask_all(hldev);
332
333         return;
334 }
335
336 /**
337  * vxge_hw_device_intr_disable - Disable Titan interrupts.
338  * @hldev: HW device handle.
339  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
340  *      the type(s) of interrupts to disable.
341  *
342  * Disable Titan interrupts.
343  *
344  * See also: vxge_hw_device_intr_enable()
345  */
346 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
347 {
348         u32 i;
349
350         vxge_hw_device_mask_all(hldev);
351
352         /* mask all the tim interrupts */
353         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
354         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
355                 &hldev->common_reg->tim_int_mask1);
356
357         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
358
359                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
360                         continue;
361
362                 vxge_hw_vpath_intr_disable(
363                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
364         }
365
366         return;
367 }
368
369 /**
370  * vxge_hw_device_mask_all - Mask all device interrupts.
371  * @hldev: HW device handle.
372  *
373  * Mask all device interrupts.
374  *
375  * See also: vxge_hw_device_unmask_all()
376  */
377 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
378 {
379         u64 val64;
380
381         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
382                 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
383
384         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
385                                 &hldev->common_reg->titan_mask_all_int);
386
387         return;
388 }
389
390 /**
391  * vxge_hw_device_unmask_all - Unmask all device interrupts.
392  * @hldev: HW device handle.
393  *
394  * Unmask all device interrupts.
395  *
396  * See also: vxge_hw_device_mask_all()
397  */
398 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
399 {
400         u64 val64 = 0;
401
402         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
403                 val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
404
405         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
406                         &hldev->common_reg->titan_mask_all_int);
407
408         return;
409 }
410
411 /**
412  * vxge_hw_device_flush_io - Flush io writes.
413  * @hldev: HW device handle.
414  *
415  * The function performs a read operation to flush io writes.
416  *
417  * Returns: void
418  */
419 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
420 {
421         u32 val32;
422
423         val32 = readl(&hldev->common_reg->titan_general_int_status);
424 }
425
426 /**
427  * vxge_hw_device_begin_irq - Begin IRQ processing.
428  * @hldev: HW device handle.
429  * @skip_alarms: Do not clear the alarms
430  * @reason: "Reason" for the interrupt, the value of Titan's
431  *      general_int_status register.
432  *
433  * The function performs two actions, It first checks whether (shared IRQ) the
434  * interrupt was raised by the device. Next, it masks the device interrupts.
435  *
436  * Note:
437  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
438  * bridge. Therefore, two back-to-back interrupts are potentially possible.
439  *
440  * Returns: 0, if the interrupt is not "ours" (note that in this case the
441  * device remain enabled).
442  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
443  * status.
444  */
445 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
446                                              u32 skip_alarms, u64 *reason)
447 {
448         u32 i;
449         u64 val64;
450         u64 adapter_status;
451         u64 vpath_mask;
452         enum vxge_hw_status ret = VXGE_HW_OK;
453
454         val64 = readq(&hldev->common_reg->titan_general_int_status);
455
456         if (unlikely(!val64)) {
457                 /* not Titan interrupt  */
458                 *reason = 0;
459                 ret = VXGE_HW_ERR_WRONG_IRQ;
460                 goto exit;
461         }
462
463         if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
464
465                 adapter_status = readq(&hldev->common_reg->adapter_status);
466
467                 if (adapter_status == VXGE_HW_ALL_FOXES) {
468
469                         __vxge_hw_device_handle_error(hldev,
470                                 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
471                         *reason = 0;
472                         ret = VXGE_HW_ERR_SLOT_FREEZE;
473                         goto exit;
474                 }
475         }
476
477         hldev->stats.sw_dev_info_stats.total_intr_cnt++;
478
479         *reason = val64;
480
481         vpath_mask = hldev->vpaths_deployed >>
482                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
483
484         if (val64 &
485             VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
486                 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
487
488                 return VXGE_HW_OK;
489         }
490
491         hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
492
493         if (unlikely(val64 &
494                         VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
495
496                 enum vxge_hw_status error_level = VXGE_HW_OK;
497
498                 hldev->stats.sw_dev_err_stats.vpath_alarms++;
499
500                 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
501
502                         if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
503                                 continue;
504
505                         ret = __vxge_hw_vpath_alarm_process(
506                                 &hldev->virtual_paths[i], skip_alarms);
507
508                         error_level = VXGE_HW_SET_LEVEL(ret, error_level);
509
510                         if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
511                                 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
512                                 break;
513                 }
514
515                 ret = error_level;
516         }
517 exit:
518         return ret;
519 }
520
521 /*
522  * __vxge_hw_device_handle_link_up_ind
523  * @hldev: HW device handle.
524  *
525  * Link up indication handler. The function is invoked by HW when
526  * Titan indicates that the link is up for programmable amount of time.
527  */
528 enum vxge_hw_status
529 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
530 {
531         /*
532          * If the previous link state is not down, return.
533          */
534         if (hldev->link_state == VXGE_HW_LINK_UP)
535                 goto exit;
536
537         hldev->link_state = VXGE_HW_LINK_UP;
538
539         /* notify driver */
540         if (hldev->uld_callbacks.link_up)
541                 hldev->uld_callbacks.link_up(hldev);
542 exit:
543         return VXGE_HW_OK;
544 }
545
546 /*
547  * __vxge_hw_device_handle_link_down_ind
548  * @hldev: HW device handle.
549  *
550  * Link down indication handler. The function is invoked by HW when
551  * Titan indicates that the link is down.
552  */
553 enum vxge_hw_status
554 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
555 {
556         /*
557          * If the previous link state is not down, return.
558          */
559         if (hldev->link_state == VXGE_HW_LINK_DOWN)
560                 goto exit;
561
562         hldev->link_state = VXGE_HW_LINK_DOWN;
563
564         /* notify driver */
565         if (hldev->uld_callbacks.link_down)
566                 hldev->uld_callbacks.link_down(hldev);
567 exit:
568         return VXGE_HW_OK;
569 }
570
571 /**
572  * __vxge_hw_device_handle_error - Handle error
573  * @hldev: HW device
574  * @vp_id: Vpath Id
575  * @type: Error type. Please see enum vxge_hw_event{}
576  *
577  * Handle error.
578  */
579 enum vxge_hw_status
580 __vxge_hw_device_handle_error(
581                 struct __vxge_hw_device *hldev,
582                 u32 vp_id,
583                 enum vxge_hw_event type)
584 {
585         switch (type) {
586         case VXGE_HW_EVENT_UNKNOWN:
587                 break;
588         case VXGE_HW_EVENT_RESET_START:
589         case VXGE_HW_EVENT_RESET_COMPLETE:
590         case VXGE_HW_EVENT_LINK_DOWN:
591         case VXGE_HW_EVENT_LINK_UP:
592                 goto out;
593         case VXGE_HW_EVENT_ALARM_CLEARED:
594                 goto out;
595         case VXGE_HW_EVENT_ECCERR:
596         case VXGE_HW_EVENT_MRPCIM_ECCERR:
597                 goto out;
598         case VXGE_HW_EVENT_FIFO_ERR:
599         case VXGE_HW_EVENT_VPATH_ERR:
600         case VXGE_HW_EVENT_CRITICAL_ERR:
601         case VXGE_HW_EVENT_SERR:
602                 break;
603         case VXGE_HW_EVENT_SRPCIM_SERR:
604         case VXGE_HW_EVENT_MRPCIM_SERR:
605                 goto out;
606         case VXGE_HW_EVENT_SLOT_FREEZE:
607                 break;
608         default:
609                 vxge_assert(0);
610                 goto out;
611         }
612
613         /* notify driver */
614         if (hldev->uld_callbacks.crit_err)
615                 hldev->uld_callbacks.crit_err(
616                         (struct __vxge_hw_device *)hldev,
617                         type, vp_id);
618 out:
619
620         return VXGE_HW_OK;
621 }
622
623 /**
624  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
625  * condition that has caused the Tx and RX interrupt.
626  * @hldev: HW device.
627  *
628  * Acknowledge (that is, clear) the condition that has caused
629  * the Tx and Rx interrupt.
630  * See also: vxge_hw_device_begin_irq(),
631  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
632  */
633 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
634 {
635
636         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
637            (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
638                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
639                                  hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
640                                 &hldev->common_reg->tim_int_status0);
641         }
642
643         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
644            (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
645                 __vxge_hw_pio_mem_write32_upper(
646                                 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
647                                  hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
648                                 &hldev->common_reg->tim_int_status1);
649         }
650
651         return;
652 }
653
654 /*
655  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
656  * @channel: Channel
657  * @dtrh: Buffer to return the DTR pointer
658  *
659  * Allocates a dtr from the reserve array. If the reserve array is empty,
660  * it swaps the reserve and free arrays.
661  *
662  */
663 enum vxge_hw_status
664 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
665 {
666         void **tmp_arr;
667
668         if (channel->reserve_ptr - channel->reserve_top > 0) {
669 _alloc_after_swap:
670                 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
671
672                 return VXGE_HW_OK;
673         }
674
675         /* switch between empty and full arrays */
676
677         /* the idea behind such a design is that by having free and reserved
678          * arrays separated we basically separated irq and non-irq parts.
679          * i.e. no additional lock need to be done when we free a resource */
680
681         if (channel->length - channel->free_ptr > 0) {
682
683                 tmp_arr = channel->reserve_arr;
684                 channel->reserve_arr = channel->free_arr;
685                 channel->free_arr = tmp_arr;
686                 channel->reserve_ptr = channel->length;
687                 channel->reserve_top = channel->free_ptr;
688                 channel->free_ptr = channel->length;
689
690                 channel->stats->reserve_free_swaps_cnt++;
691
692                 goto _alloc_after_swap;
693         }
694
695         channel->stats->full_cnt++;
696
697         *dtrh = NULL;
698         return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
699 }
700
701 /*
702  * vxge_hw_channel_dtr_post - Post a dtr to the channel
703  * @channelh: Channel
704  * @dtrh: DTR pointer
705  *
706  * Posts a dtr to work array.
707  *
708  */
709 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
710 {
711         vxge_assert(channel->work_arr[channel->post_index] == NULL);
712
713         channel->work_arr[channel->post_index++] = dtrh;
714
715         /* wrap-around */
716         if (channel->post_index == channel->length)
717                 channel->post_index = 0;
718 }
719
720 /*
721  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
722  * @channel: Channel
723  * @dtr: Buffer to return the next completed DTR pointer
724  *
725  * Returns the next completed dtr with out removing it from work array
726  *
727  */
728 void
729 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
730 {
731         vxge_assert(channel->compl_index < channel->length);
732
733         *dtrh = channel->work_arr[channel->compl_index];
734         prefetch(*dtrh);
735 }
736
737 /*
738  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
739  * @channel: Channel handle
740  *
741  * Removes the next completed dtr from work array
742  *
743  */
744 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
745 {
746         channel->work_arr[channel->compl_index] = NULL;
747
748         /* wrap-around */
749         if (++channel->compl_index == channel->length)
750                 channel->compl_index = 0;
751
752         channel->stats->total_compl_cnt++;
753 }
754
755 /*
756  * vxge_hw_channel_dtr_free - Frees a dtr
757  * @channel: Channel handle
758  * @dtr:  DTR pointer
759  *
760  * Returns the dtr to free array
761  *
762  */
763 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
764 {
765         channel->free_arr[--channel->free_ptr] = dtrh;
766 }
767
768 /*
769  * vxge_hw_channel_dtr_count
770  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
771  *
772  * Retreive number of DTRs available. This function can not be called
773  * from data path. ring_initial_replenishi() is the only user.
774  */
775 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
776 {
777         return (channel->reserve_ptr - channel->reserve_top) +
778                 (channel->length - channel->free_ptr);
779 }
780
781 /**
782  * vxge_hw_ring_rxd_reserve     - Reserve ring descriptor.
783  * @ring: Handle to the ring object used for receive
784  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
785  * with a valid handle.
786  *
787  * Reserve Rx descriptor for the subsequent filling-in driver
788  * and posting on the corresponding channel (@channelh)
789  * via vxge_hw_ring_rxd_post().
790  *
791  * Returns: VXGE_HW_OK - success.
792  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
793  *
794  */
795 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
796         void **rxdh)
797 {
798         enum vxge_hw_status status;
799         struct __vxge_hw_channel *channel;
800
801         channel = &ring->channel;
802
803         status = vxge_hw_channel_dtr_alloc(channel, rxdh);
804
805         if (status == VXGE_HW_OK) {
806                 struct vxge_hw_ring_rxd_1 *rxdp =
807                         (struct vxge_hw_ring_rxd_1 *)*rxdh;
808
809                 rxdp->control_0 = rxdp->control_1 = 0;
810         }
811
812         return status;
813 }
814
815 /**
816  * vxge_hw_ring_rxd_free - Free descriptor.
817  * @ring: Handle to the ring object used for receive
818  * @rxdh: Descriptor handle.
819  *
820  * Free the reserved descriptor. This operation is "symmetrical" to
821  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
822  * lifecycle.
823  *
824  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
825  * be:
826  *
827  * - reserved (vxge_hw_ring_rxd_reserve);
828  *
829  * - posted     (vxge_hw_ring_rxd_post);
830  *
831  * - completed (vxge_hw_ring_rxd_next_completed);
832  *
833  * - and recycled again (vxge_hw_ring_rxd_free).
834  *
835  * For alternative state transitions and more details please refer to
836  * the design doc.
837  *
838  */
839 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
840 {
841         struct __vxge_hw_channel *channel;
842
843         channel = &ring->channel;
844
845         vxge_hw_channel_dtr_free(channel, rxdh);
846
847 }
848
849 /**
850  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
851  * @ring: Handle to the ring object used for receive
852  * @rxdh: Descriptor handle.
853  *
854  * This routine prepares a rxd and posts
855  */
856 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
857 {
858         struct __vxge_hw_channel *channel;
859
860         channel = &ring->channel;
861
862         vxge_hw_channel_dtr_post(channel, rxdh);
863 }
864
865 /**
866  * vxge_hw_ring_rxd_post_post - Process rxd after post.
867  * @ring: Handle to the ring object used for receive
868  * @rxdh: Descriptor handle.
869  *
870  * Processes rxd after post
871  */
872 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
873 {
874         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
875         struct __vxge_hw_channel *channel;
876
877         channel = &ring->channel;
878
879         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
880
881         if (ring->stats->common_stats.usage_cnt > 0)
882                 ring->stats->common_stats.usage_cnt--;
883 }
884
885 /**
886  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
887  * @ring: Handle to the ring object used for receive
888  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
889  *
890  * Post descriptor on the ring.
891  * Prior to posting the descriptor should be filled in accordance with
892  * Host/Titan interface specification for a given service (LL, etc.).
893  *
894  */
895 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
896 {
897         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
898         struct __vxge_hw_channel *channel;
899
900         channel = &ring->channel;
901
902         wmb();
903         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
904
905         vxge_hw_channel_dtr_post(channel, rxdh);
906
907         if (ring->stats->common_stats.usage_cnt > 0)
908                 ring->stats->common_stats.usage_cnt--;
909 }
910
911 /**
912  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
913  * @ring: Handle to the ring object used for receive
914  * @rxdh: Descriptor handle.
915  *
916  * Processes rxd after post with memory barrier.
917  */
918 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
919 {
920         struct __vxge_hw_channel *channel;
921
922         channel = &ring->channel;
923
924         wmb();
925         vxge_hw_ring_rxd_post_post(ring, rxdh);
926 }
927
928 /**
929  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
930  * @ring: Handle to the ring object used for receive
931  * @rxdh: Descriptor handle. Returned by HW.
932  * @t_code:     Transfer code, as per Titan User Guide,
933  *       Receive Descriptor Format. Returned by HW.
934  *
935  * Retrieve the _next_ completed descriptor.
936  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
937  * driver of new completed descriptors. After that
938  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
939  * completions (the very first completion is passed by HW via
940  * vxge_hw_ring_callback_f).
941  *
942  * Implementation-wise, the driver is free to call
943  * vxge_hw_ring_rxd_next_completed either immediately from inside the
944  * ring callback, or in a deferred fashion and separate (from HW)
945  * context.
946  *
947  * Non-zero @t_code means failure to fill-in receive buffer(s)
948  * of the descriptor.
949  * For instance, parity error detected during the data transfer.
950  * In this case Titan will complete the descriptor and indicate
951  * for the host that the received data is not to be used.
952  * For details please refer to Titan User Guide.
953  *
954  * Returns: VXGE_HW_OK - success.
955  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
956  * are currently available for processing.
957  *
958  * See also: vxge_hw_ring_callback_f{},
959  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
960  */
961 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
962         struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
963 {
964         struct __vxge_hw_channel *channel;
965         struct vxge_hw_ring_rxd_1 *rxdp;
966         enum vxge_hw_status status = VXGE_HW_OK;
967         u64 control_0, own;
968
969         channel = &ring->channel;
970
971         vxge_hw_channel_dtr_try_complete(channel, rxdh);
972
973         rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
974         if (rxdp == NULL) {
975                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
976                 goto exit;
977         }
978
979         control_0 = rxdp->control_0;
980         own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
981         *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
982
983         /* check whether it is not the end */
984         if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
985
986                 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
987                                 0);
988
989                 ++ring->cmpl_cnt;
990                 vxge_hw_channel_dtr_complete(channel);
991
992                 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
993
994                 ring->stats->common_stats.usage_cnt++;
995                 if (ring->stats->common_stats.usage_max <
996                                 ring->stats->common_stats.usage_cnt)
997                         ring->stats->common_stats.usage_max =
998                                 ring->stats->common_stats.usage_cnt;
999
1000                 status = VXGE_HW_OK;
1001                 goto exit;
1002         }
1003
1004         /* reset it. since we don't want to return
1005          * garbage to the driver */
1006         *rxdh = NULL;
1007         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1008 exit:
1009         return status;
1010 }
1011
1012 /**
1013  * vxge_hw_ring_handle_tcode - Handle transfer code.
1014  * @ring: Handle to the ring object used for receive
1015  * @rxdh: Descriptor handle.
1016  * @t_code: One of the enumerated (and documented in the Titan user guide)
1017  * "transfer codes".
1018  *
1019  * Handle descriptor's transfer code. The latter comes with each completed
1020  * descriptor.
1021  *
1022  * Returns: one of the enum vxge_hw_status{} enumerated types.
1023  * VXGE_HW_OK                   - for success.
1024  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1025  */
1026 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1027         struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1028 {
1029         struct __vxge_hw_channel *channel;
1030         enum vxge_hw_status status = VXGE_HW_OK;
1031
1032         channel = &ring->channel;
1033
1034         /* If the t_code is not supported and if the
1035          * t_code is other than 0x5 (unparseable packet
1036          * such as unknown UPV6 header), Drop it !!!
1037          */
1038
1039         if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1040                 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1041                 status = VXGE_HW_OK;
1042                 goto exit;
1043         }
1044
1045         if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1046                 status = VXGE_HW_ERR_INVALID_TCODE;
1047                 goto exit;
1048         }
1049
1050         ring->stats->rxd_t_code_err_cnt[t_code]++;
1051 exit:
1052         return status;
1053 }
1054
1055 /**
1056  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1057  *
1058  * @fifo: fifohandle
1059  * @txdl_ptr: The starting location of the TxDL in host memory
1060  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1061  * @no_snoop: No snoop flags
1062  *
1063  * This function posts a non-offload doorbell to doorbell FIFO
1064  *
1065  */
1066 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1067         u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1068 {
1069         struct __vxge_hw_channel *channel;
1070
1071         channel = &fifo->channel;
1072
1073         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1074                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1075                 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1076                 &fifo->nofl_db->control_0);
1077
1078         mmiowb();
1079
1080         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1081
1082         mmiowb();
1083 }
1084
1085 /**
1086  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1087  * the fifo
1088  * @fifoh: Handle to the fifo object used for non offload send
1089  */
1090 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1091 {
1092         return vxge_hw_channel_dtr_count(&fifoh->channel);
1093 }
1094
1095 /**
1096  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1097  * @fifoh: Handle to the fifo object used for non offload send
1098  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1099  *        with a valid handle.
1100  * @txdl_priv: Buffer to return the pointer to per txdl space
1101  *
1102  * Reserve a single TxDL (that is, fifo descriptor)
1103  * for the subsequent filling-in by driver)
1104  * and posting on the corresponding channel (@channelh)
1105  * via vxge_hw_fifo_txdl_post().
1106  *
1107  * Note: it is the responsibility of driver to reserve multiple descriptors
1108  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1109  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1110  *
1111  * Returns: VXGE_HW_OK - success;
1112  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1113  *
1114  */
1115 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1116         struct __vxge_hw_fifo *fifo,
1117         void **txdlh, void **txdl_priv)
1118 {
1119         struct __vxge_hw_channel *channel;
1120         enum vxge_hw_status status;
1121         int i;
1122
1123         channel = &fifo->channel;
1124
1125         status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1126
1127         if (status == VXGE_HW_OK) {
1128                 struct vxge_hw_fifo_txd *txdp =
1129                         (struct vxge_hw_fifo_txd *)*txdlh;
1130                 struct __vxge_hw_fifo_txdl_priv *priv;
1131
1132                 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1133
1134                 /* reset the TxDL's private */
1135                 priv->align_dma_offset = 0;
1136                 priv->align_vaddr_start = priv->align_vaddr;
1137                 priv->align_used_frags = 0;
1138                 priv->frags = 0;
1139                 priv->alloc_frags = fifo->config->max_frags;
1140                 priv->next_txdl_priv = NULL;
1141
1142                 *txdl_priv = (void *)(size_t)txdp->host_control;
1143
1144                 for (i = 0; i < fifo->config->max_frags; i++) {
1145                         txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1146                         txdp->control_0 = txdp->control_1 = 0;
1147                 }
1148         }
1149
1150         return status;
1151 }
1152
1153 /**
1154  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1155  * descriptor.
1156  * @fifo: Handle to the fifo object used for non offload send
1157  * @txdlh: Descriptor handle.
1158  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1159  *            (of buffers).
1160  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1161  * @size: Size of the data buffer (in bytes).
1162  *
1163  * This API is part of the preparation of the transmit descriptor for posting
1164  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1165  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1166  * All three APIs fill in the fields of the fifo descriptor,
1167  * in accordance with the Titan specification.
1168  *
1169  */
1170 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1171                                   void *txdlh, u32 frag_idx,
1172                                   dma_addr_t dma_pointer, u32 size)
1173 {
1174         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1175         struct vxge_hw_fifo_txd *txdp, *txdp_last;
1176         struct __vxge_hw_channel *channel;
1177
1178         channel = &fifo->channel;
1179
1180         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1181         txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1182
1183         if (frag_idx != 0)
1184                 txdp->control_0 = txdp->control_1 = 0;
1185         else {
1186                 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1187                         VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1188                 txdp->control_1 |= fifo->interrupt_type;
1189                 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1190                         fifo->tx_intr_num);
1191                 if (txdl_priv->frags) {
1192                         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1193                         (txdl_priv->frags - 1);
1194                         txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1195                                 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1196                 }
1197         }
1198
1199         vxge_assert(frag_idx < txdl_priv->alloc_frags);
1200
1201         txdp->buffer_pointer = (u64)dma_pointer;
1202         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1203         fifo->stats->total_buffers++;
1204         txdl_priv->frags++;
1205 }
1206
1207 /**
1208  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1209  * @fifo: Handle to the fifo object used for non offload send
1210  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1211  * @frags: Number of contiguous buffers that are part of a single
1212  *         transmit operation.
1213  *
1214  * Post descriptor on the 'fifo' type channel for transmission.
1215  * Prior to posting the descriptor should be filled in accordance with
1216  * Host/Titan interface specification for a given service (LL, etc.).
1217  *
1218  */
1219 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1220 {
1221         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1222         struct vxge_hw_fifo_txd *txdp_last;
1223         struct vxge_hw_fifo_txd *txdp_first;
1224         struct __vxge_hw_channel *channel;
1225
1226         channel = &fifo->channel;
1227
1228         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1229         txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1230
1231         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1232         txdp_last->control_0 |=
1233               VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1234         txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1235
1236         vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1237
1238         __vxge_hw_non_offload_db_post(fifo,
1239                 (u64)txdl_priv->dma_addr,
1240                 txdl_priv->frags - 1,
1241                 fifo->no_snoop_bits);
1242
1243         fifo->stats->total_posts++;
1244         fifo->stats->common_stats.usage_cnt++;
1245         if (fifo->stats->common_stats.usage_max <
1246                 fifo->stats->common_stats.usage_cnt)
1247                 fifo->stats->common_stats.usage_max =
1248                         fifo->stats->common_stats.usage_cnt;
1249 }
1250
1251 /**
1252  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1253  * @fifo: Handle to the fifo object used for non offload send
1254  * @txdlh: Descriptor handle. Returned by HW.
1255  * @t_code: Transfer code, as per Titan User Guide,
1256  *          Transmit Descriptor Format.
1257  *          Returned by HW.
1258  *
1259  * Retrieve the _next_ completed descriptor.
1260  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1261  * driver of new completed descriptors. After that
1262  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1263  * completions (the very first completion is passed by HW via
1264  * vxge_hw_channel_callback_f).
1265  *
1266  * Implementation-wise, the driver is free to call
1267  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1268  * channel callback, or in a deferred fashion and separate (from HW)
1269  * context.
1270  *
1271  * Non-zero @t_code means failure to process the descriptor.
1272  * The failure could happen, for instance, when the link is
1273  * down, in which case Titan completes the descriptor because it
1274  * is not able to send the data out.
1275  *
1276  * For details please refer to Titan User Guide.
1277  *
1278  * Returns: VXGE_HW_OK - success.
1279  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1280  * are currently available for processing.
1281  *
1282  */
1283 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1284         struct __vxge_hw_fifo *fifo, void **txdlh,
1285         enum vxge_hw_fifo_tcode *t_code)
1286 {
1287         struct __vxge_hw_channel *channel;
1288         struct vxge_hw_fifo_txd *txdp;
1289         enum vxge_hw_status status = VXGE_HW_OK;
1290
1291         channel = &fifo->channel;
1292
1293         vxge_hw_channel_dtr_try_complete(channel, txdlh);
1294
1295         txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1296         if (txdp == NULL) {
1297                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1298                 goto exit;
1299         }
1300
1301         /* check whether host owns it */
1302         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1303
1304                 vxge_assert(txdp->host_control != 0);
1305
1306                 vxge_hw_channel_dtr_complete(channel);
1307
1308                 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1309
1310                 if (fifo->stats->common_stats.usage_cnt > 0)
1311                         fifo->stats->common_stats.usage_cnt--;
1312
1313                 status = VXGE_HW_OK;
1314                 goto exit;
1315         }
1316
1317         /* no more completions */
1318         *txdlh = NULL;
1319         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1320 exit:
1321         return status;
1322 }
1323
1324 /**
1325  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1326  * @fifo: Handle to the fifo object used for non offload send
1327  * @txdlh: Descriptor handle.
1328  * @t_code: One of the enumerated (and documented in the Titan user guide)
1329  *          "transfer codes".
1330  *
1331  * Handle descriptor's transfer code. The latter comes with each completed
1332  * descriptor.
1333  *
1334  * Returns: one of the enum vxge_hw_status{} enumerated types.
1335  * VXGE_HW_OK - for success.
1336  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1337  */
1338 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1339                                               void *txdlh,
1340                                               enum vxge_hw_fifo_tcode t_code)
1341 {
1342         struct __vxge_hw_channel *channel;
1343
1344         enum vxge_hw_status status = VXGE_HW_OK;
1345         channel = &fifo->channel;
1346
1347         if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1348                 status = VXGE_HW_ERR_INVALID_TCODE;
1349                 goto exit;
1350         }
1351
1352         fifo->stats->txd_t_code_err_cnt[t_code]++;
1353 exit:
1354         return status;
1355 }
1356
1357 /**
1358  * vxge_hw_fifo_txdl_free - Free descriptor.
1359  * @fifo: Handle to the fifo object used for non offload send
1360  * @txdlh: Descriptor handle.
1361  *
1362  * Free the reserved descriptor. This operation is "symmetrical" to
1363  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1364  * lifecycle.
1365  *
1366  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1367  * be:
1368  *
1369  * - reserved (vxge_hw_fifo_txdl_reserve);
1370  *
1371  * - posted (vxge_hw_fifo_txdl_post);
1372  *
1373  * - completed (vxge_hw_fifo_txdl_next_completed);
1374  *
1375  * - and recycled again (vxge_hw_fifo_txdl_free).
1376  *
1377  * For alternative state transitions and more details please refer to
1378  * the design doc.
1379  *
1380  */
1381 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1382 {
1383         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1384         u32 max_frags;
1385         struct __vxge_hw_channel *channel;
1386
1387         channel = &fifo->channel;
1388
1389         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1390                         (struct vxge_hw_fifo_txd *)txdlh);
1391
1392         max_frags = fifo->config->max_frags;
1393
1394         vxge_hw_channel_dtr_free(channel, txdlh);
1395 }
1396
1397 /**
1398  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1399  *               to MAC address table.
1400  * @vp: Vpath handle.
1401  * @macaddr: MAC address to be added for this vpath into the list
1402  * @macaddr_mask: MAC address mask for macaddr
1403  * @duplicate_mode: Duplicate MAC address add mode. Please see
1404  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1405  *
1406  * Adds the given mac address and mac address mask into the list for this
1407  * vpath.
1408  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1409  * vxge_hw_vpath_mac_addr_get_next
1410  *
1411  */
1412 enum vxge_hw_status
1413 vxge_hw_vpath_mac_addr_add(
1414         struct __vxge_hw_vpath_handle *vp,
1415         u8 (macaddr)[ETH_ALEN],
1416         u8 (macaddr_mask)[ETH_ALEN],
1417         enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1418 {
1419         u32 i;
1420         u64 data1 = 0ULL;
1421         u64 data2 = 0ULL;
1422         enum vxge_hw_status status = VXGE_HW_OK;
1423
1424         if (vp == NULL) {
1425                 status = VXGE_HW_ERR_INVALID_HANDLE;
1426                 goto exit;
1427         }
1428
1429         for (i = 0; i < ETH_ALEN; i++) {
1430                 data1 <<= 8;
1431                 data1 |= (u8)macaddr[i];
1432
1433                 data2 <<= 8;
1434                 data2 |= (u8)macaddr_mask[i];
1435         }
1436
1437         switch (duplicate_mode) {
1438         case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1439                 i = 0;
1440                 break;
1441         case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1442                 i = 1;
1443                 break;
1444         case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1445                 i = 2;
1446                 break;
1447         default:
1448                 i = 0;
1449                 break;
1450         }
1451
1452         status = __vxge_hw_vpath_rts_table_set(vp,
1453                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1454                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1455                         0,
1456                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1457                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1458                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1459 exit:
1460         return status;
1461 }
1462
1463 /**
1464  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1465  *               from MAC address table.
1466  * @vp: Vpath handle.
1467  * @macaddr: First MAC address entry for this vpath in the list
1468  * @macaddr_mask: MAC address mask for macaddr
1469  *
1470  * Returns the first mac address and mac address mask in the list for this
1471  * vpath.
1472  * see also: vxge_hw_vpath_mac_addr_get_next
1473  *
1474  */
1475 enum vxge_hw_status
1476 vxge_hw_vpath_mac_addr_get(
1477         struct __vxge_hw_vpath_handle *vp,
1478         u8 (macaddr)[ETH_ALEN],
1479         u8 (macaddr_mask)[ETH_ALEN])
1480 {
1481         u32 i;
1482         u64 data1 = 0ULL;
1483         u64 data2 = 0ULL;
1484         enum vxge_hw_status status = VXGE_HW_OK;
1485
1486         if (vp == NULL) {
1487                 status = VXGE_HW_ERR_INVALID_HANDLE;
1488                 goto exit;
1489         }
1490
1491         status = __vxge_hw_vpath_rts_table_get(vp,
1492                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1493                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1494                         0, &data1, &data2);
1495
1496         if (status != VXGE_HW_OK)
1497                 goto exit;
1498
1499         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1500
1501         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1502
1503         for (i = ETH_ALEN; i > 0; i--) {
1504                 macaddr[i-1] = (u8)(data1 & 0xFF);
1505                 data1 >>= 8;
1506
1507                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1508                 data2 >>= 8;
1509         }
1510 exit:
1511         return status;
1512 }
1513
1514 /**
1515  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1516  * vpath
1517  *               from MAC address table.
1518  * @vp: Vpath handle.
1519  * @macaddr: Next MAC address entry for this vpath in the list
1520  * @macaddr_mask: MAC address mask for macaddr
1521  *
1522  * Returns the next mac address and mac address mask in the list for this
1523  * vpath.
1524  * see also: vxge_hw_vpath_mac_addr_get
1525  *
1526  */
1527 enum vxge_hw_status
1528 vxge_hw_vpath_mac_addr_get_next(
1529         struct __vxge_hw_vpath_handle *vp,
1530         u8 (macaddr)[ETH_ALEN],
1531         u8 (macaddr_mask)[ETH_ALEN])
1532 {
1533         u32 i;
1534         u64 data1 = 0ULL;
1535         u64 data2 = 0ULL;
1536         enum vxge_hw_status status = VXGE_HW_OK;
1537
1538         if (vp == NULL) {
1539                 status = VXGE_HW_ERR_INVALID_HANDLE;
1540                 goto exit;
1541         }
1542
1543         status = __vxge_hw_vpath_rts_table_get(vp,
1544                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1545                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1546                         0, &data1, &data2);
1547
1548         if (status != VXGE_HW_OK)
1549                 goto exit;
1550
1551         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1552
1553         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1554
1555         for (i = ETH_ALEN; i > 0; i--) {
1556                 macaddr[i-1] = (u8)(data1 & 0xFF);
1557                 data1 >>= 8;
1558
1559                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1560                 data2 >>= 8;
1561         }
1562
1563 exit:
1564         return status;
1565 }
1566
1567 /**
1568  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1569  *               to MAC address table.
1570  * @vp: Vpath handle.
1571  * @macaddr: MAC address to be added for this vpath into the list
1572  * @macaddr_mask: MAC address mask for macaddr
1573  *
1574  * Delete the given mac address and mac address mask into the list for this
1575  * vpath.
1576  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1577  * vxge_hw_vpath_mac_addr_get_next
1578  *
1579  */
1580 enum vxge_hw_status
1581 vxge_hw_vpath_mac_addr_delete(
1582         struct __vxge_hw_vpath_handle *vp,
1583         u8 (macaddr)[ETH_ALEN],
1584         u8 (macaddr_mask)[ETH_ALEN])
1585 {
1586         u32 i;
1587         u64 data1 = 0ULL;
1588         u64 data2 = 0ULL;
1589         enum vxge_hw_status status = VXGE_HW_OK;
1590
1591         if (vp == NULL) {
1592                 status = VXGE_HW_ERR_INVALID_HANDLE;
1593                 goto exit;
1594         }
1595
1596         for (i = 0; i < ETH_ALEN; i++) {
1597                 data1 <<= 8;
1598                 data1 |= (u8)macaddr[i];
1599
1600                 data2 <<= 8;
1601                 data2 |= (u8)macaddr_mask[i];
1602         }
1603
1604         status = __vxge_hw_vpath_rts_table_set(vp,
1605                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1606                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1607                         0,
1608                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1609                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1610 exit:
1611         return status;
1612 }
1613
1614 /**
1615  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1616  *               to vlan id table.
1617  * @vp: Vpath handle.
1618  * @vid: vlan id to be added for this vpath into the list
1619  *
1620  * Adds the given vlan id into the list for this  vpath.
1621  * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1622  * vxge_hw_vpath_vid_get_next
1623  *
1624  */
1625 enum vxge_hw_status
1626 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1627 {
1628         enum vxge_hw_status status = VXGE_HW_OK;
1629
1630         if (vp == NULL) {
1631                 status = VXGE_HW_ERR_INVALID_HANDLE;
1632                 goto exit;
1633         }
1634
1635         status = __vxge_hw_vpath_rts_table_set(vp,
1636                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1637                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1638                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1639 exit:
1640         return status;
1641 }
1642
1643 /**
1644  * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1645  *               from vlan id table.
1646  * @vp: Vpath handle.
1647  * @vid: Buffer to return vlan id
1648  *
1649  * Returns the first vlan id in the list for this vpath.
1650  * see also: vxge_hw_vpath_vid_get_next
1651  *
1652  */
1653 enum vxge_hw_status
1654 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1655 {
1656         u64 data;
1657         enum vxge_hw_status status = VXGE_HW_OK;
1658
1659         if (vp == NULL) {
1660                 status = VXGE_HW_ERR_INVALID_HANDLE;
1661                 goto exit;
1662         }
1663
1664         status = __vxge_hw_vpath_rts_table_get(vp,
1665                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1666                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1667                         0, vid, &data);
1668
1669         *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1670 exit:
1671         return status;
1672 }
1673
1674 /**
1675  * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1676  *               from vlan id table.
1677  * @vp: Vpath handle.
1678  * @vid: Buffer to return vlan id
1679  *
1680  * Returns the next vlan id in the list for this vpath.
1681  * see also: vxge_hw_vpath_vid_get
1682  *
1683  */
1684 enum vxge_hw_status
1685 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1686 {
1687         u64 data;
1688         enum vxge_hw_status status = VXGE_HW_OK;
1689
1690         if (vp == NULL) {
1691                 status = VXGE_HW_ERR_INVALID_HANDLE;
1692                 goto exit;
1693         }
1694
1695         status = __vxge_hw_vpath_rts_table_get(vp,
1696                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1697                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1698                         0, vid, &data);
1699
1700         *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1701 exit:
1702         return status;
1703 }
1704
1705 /**
1706  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1707  *               to vlan id table.
1708  * @vp: Vpath handle.
1709  * @vid: vlan id to be added for this vpath into the list
1710  *
1711  * Adds the given vlan id into the list for this  vpath.
1712  * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1713  * vxge_hw_vpath_vid_get_next
1714  *
1715  */
1716 enum vxge_hw_status
1717 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1718 {
1719         enum vxge_hw_status status = VXGE_HW_OK;
1720
1721         if (vp == NULL) {
1722                 status = VXGE_HW_ERR_INVALID_HANDLE;
1723                 goto exit;
1724         }
1725
1726         status = __vxge_hw_vpath_rts_table_set(vp,
1727                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1728                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1729                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1730 exit:
1731         return status;
1732 }
1733
1734 /**
1735  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1736  * @vp: Vpath handle.
1737  *
1738  * Enable promiscuous mode of Titan-e operation.
1739  *
1740  * See also: vxge_hw_vpath_promisc_disable().
1741  */
1742 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1743                         struct __vxge_hw_vpath_handle *vp)
1744 {
1745         u64 val64;
1746         struct __vxge_hw_virtualpath *vpath;
1747         enum vxge_hw_status status = VXGE_HW_OK;
1748
1749         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1750                 status = VXGE_HW_ERR_INVALID_HANDLE;
1751                 goto exit;
1752         }
1753
1754         vpath = vp->vpath;
1755
1756         /* Enable promiscous mode for function 0 only */
1757         if (!(vpath->hldev->access_rights &
1758                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1759                 return VXGE_HW_OK;
1760
1761         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1762
1763         if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1764
1765                 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1766                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1767                          VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1768                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1769
1770                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1771         }
1772 exit:
1773         return status;
1774 }
1775
1776 /**
1777  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1778  * @vp: Vpath handle.
1779  *
1780  * Disable promiscuous mode of Titan-e operation.
1781  *
1782  * See also: vxge_hw_vpath_promisc_enable().
1783  */
1784 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1785                         struct __vxge_hw_vpath_handle *vp)
1786 {
1787         u64 val64;
1788         struct __vxge_hw_virtualpath *vpath;
1789         enum vxge_hw_status status = VXGE_HW_OK;
1790
1791         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1792                 status = VXGE_HW_ERR_INVALID_HANDLE;
1793                 goto exit;
1794         }
1795
1796         vpath = vp->vpath;
1797
1798         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1799
1800         if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1801
1802                 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1803                            VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1804                            VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1805
1806                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1807         }
1808 exit:
1809         return status;
1810 }
1811
1812 /*
1813  * vxge_hw_vpath_bcast_enable - Enable broadcast
1814  * @vp: Vpath handle.
1815  *
1816  * Enable receiving broadcasts.
1817  */
1818 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1819                         struct __vxge_hw_vpath_handle *vp)
1820 {
1821         u64 val64;
1822         struct __vxge_hw_virtualpath *vpath;
1823         enum vxge_hw_status status = VXGE_HW_OK;
1824
1825         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1826                 status = VXGE_HW_ERR_INVALID_HANDLE;
1827                 goto exit;
1828         }
1829
1830         vpath = vp->vpath;
1831
1832         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1833
1834         if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1835                 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1836                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1837         }
1838 exit:
1839         return status;
1840 }
1841
1842 /**
1843  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1844  * @vp: Vpath handle.
1845  *
1846  * Enable Titan-e multicast addresses.
1847  * Returns: VXGE_HW_OK on success.
1848  *
1849  */
1850 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1851                         struct __vxge_hw_vpath_handle *vp)
1852 {
1853         u64 val64;
1854         struct __vxge_hw_virtualpath *vpath;
1855         enum vxge_hw_status status = VXGE_HW_OK;
1856
1857         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1858                 status = VXGE_HW_ERR_INVALID_HANDLE;
1859                 goto exit;
1860         }
1861
1862         vpath = vp->vpath;
1863
1864         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1865
1866         if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1867                 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1868                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1869         }
1870 exit:
1871         return status;
1872 }
1873
1874 /**
1875  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
1876  * @vp: Vpath handle.
1877  *
1878  * Disable Titan-e multicast addresses.
1879  * Returns: VXGE_HW_OK - success.
1880  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1881  *
1882  */
1883 enum vxge_hw_status
1884 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1885 {
1886         u64 val64;
1887         struct __vxge_hw_virtualpath *vpath;
1888         enum vxge_hw_status status = VXGE_HW_OK;
1889
1890         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1891                 status = VXGE_HW_ERR_INVALID_HANDLE;
1892                 goto exit;
1893         }
1894
1895         vpath = vp->vpath;
1896
1897         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1898
1899         if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1900                 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1901                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1902         }
1903 exit:
1904         return status;
1905 }
1906
1907 /*
1908  * __vxge_hw_vpath_alarm_process - Process Alarms.
1909  * @vpath: Virtual Path.
1910  * @skip_alarms: Do not clear the alarms
1911  *
1912  * Process vpath alarms.
1913  *
1914  */
1915 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1916                         struct __vxge_hw_virtualpath *vpath,
1917                         u32 skip_alarms)
1918 {
1919         u64 val64;
1920         u64 alarm_status;
1921         u64 pic_status;
1922         struct __vxge_hw_device *hldev = NULL;
1923         enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1924         u64 mask64;
1925         struct vxge_hw_vpath_stats_sw_info *sw_stats;
1926         struct vxge_hw_vpath_reg __iomem *vp_reg;
1927
1928         if (vpath == NULL) {
1929                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1930                         alarm_event);
1931                 goto out2;
1932         }
1933
1934         hldev = vpath->hldev;
1935         vp_reg = vpath->vp_reg;
1936         alarm_status = readq(&vp_reg->vpath_general_int_status);
1937
1938         if (alarm_status == VXGE_HW_ALL_FOXES) {
1939                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1940                         alarm_event);
1941                 goto out;
1942         }
1943
1944         sw_stats = vpath->sw_stats;
1945
1946         if (alarm_status & ~(
1947                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1948                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1949                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1950                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1951                 sw_stats->error_stats.unknown_alarms++;
1952
1953                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1954                         alarm_event);
1955                 goto out;
1956         }
1957
1958         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1959
1960                 val64 = readq(&vp_reg->xgmac_vp_int_status);
1961
1962                 if (val64 &
1963                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1964
1965                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1966
1967                         if (((val64 &
1968                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1969                              (!(val64 &
1970                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1971                             ((val64 &
1972                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1973                              (!(val64 &
1974                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1975                                      ))) {
1976                                 sw_stats->error_stats.network_sustained_fault++;
1977
1978                                 writeq(
1979                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1980                                         &vp_reg->asic_ntwk_vp_err_mask);
1981
1982                                 __vxge_hw_device_handle_link_down_ind(hldev);
1983                                 alarm_event = VXGE_HW_SET_LEVEL(
1984                                         VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1985                         }
1986
1987                         if (((val64 &
1988                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1989                              (!(val64 &
1990                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1991                             ((val64 &
1992                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1993                              (!(val64 &
1994                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1995                                      ))) {
1996
1997                                 sw_stats->error_stats.network_sustained_ok++;
1998
1999                                 writeq(
2000                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
2001                                         &vp_reg->asic_ntwk_vp_err_mask);
2002
2003                                 __vxge_hw_device_handle_link_up_ind(hldev);
2004                                 alarm_event = VXGE_HW_SET_LEVEL(
2005                                         VXGE_HW_EVENT_LINK_UP, alarm_event);
2006                         }
2007
2008                         writeq(VXGE_HW_INTR_MASK_ALL,
2009                                 &vp_reg->asic_ntwk_vp_err_reg);
2010
2011                         alarm_event = VXGE_HW_SET_LEVEL(
2012                                 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2013
2014                         if (skip_alarms)
2015                                 return VXGE_HW_OK;
2016                 }
2017         }
2018
2019         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2020
2021                 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2022
2023                 if (pic_status &
2024                     VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2025
2026                         val64 = readq(&vp_reg->general_errors_reg);
2027                         mask64 = readq(&vp_reg->general_errors_mask);
2028
2029                         if ((val64 &
2030                                 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2031                                 ~mask64) {
2032                                 sw_stats->error_stats.ini_serr_det++;
2033
2034                                 alarm_event = VXGE_HW_SET_LEVEL(
2035                                         VXGE_HW_EVENT_SERR, alarm_event);
2036                         }
2037
2038                         if ((val64 &
2039                             VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2040                                 ~mask64) {
2041                                 sw_stats->error_stats.dblgen_fifo0_overflow++;
2042
2043                                 alarm_event = VXGE_HW_SET_LEVEL(
2044                                         VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2045                         }
2046
2047                         if ((val64 &
2048                             VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2049                                 ~mask64)
2050                                 sw_stats->error_stats.statsb_pif_chain_error++;
2051
2052                         if ((val64 &
2053                            VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2054                                 ~mask64)
2055                                 sw_stats->error_stats.statsb_drop_timeout++;
2056
2057                         if ((val64 &
2058                                 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2059                                 ~mask64)
2060                                 sw_stats->error_stats.target_illegal_access++;
2061
2062                         if (!skip_alarms) {
2063                                 writeq(VXGE_HW_INTR_MASK_ALL,
2064                                         &vp_reg->general_errors_reg);
2065                                 alarm_event = VXGE_HW_SET_LEVEL(
2066                                         VXGE_HW_EVENT_ALARM_CLEARED,
2067                                         alarm_event);
2068                         }
2069                 }
2070
2071                 if (pic_status &
2072                     VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2073
2074                         val64 = readq(&vp_reg->kdfcctl_errors_reg);
2075                         mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2076
2077                         if ((val64 &
2078                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2079                                 ~mask64) {
2080                                 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2081
2082                                 alarm_event = VXGE_HW_SET_LEVEL(
2083                                         VXGE_HW_EVENT_FIFO_ERR,
2084                                         alarm_event);
2085                         }
2086
2087                         if ((val64 &
2088                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2089                                 ~mask64) {
2090                                 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2091
2092                                 alarm_event = VXGE_HW_SET_LEVEL(
2093                                         VXGE_HW_EVENT_FIFO_ERR,
2094                                         alarm_event);
2095                         }
2096
2097                         if ((val64 &
2098                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2099                                 ~mask64) {
2100                                 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2101
2102                                 alarm_event = VXGE_HW_SET_LEVEL(
2103                                         VXGE_HW_EVENT_FIFO_ERR,
2104                                         alarm_event);
2105                         }
2106
2107                         if (!skip_alarms) {
2108                                 writeq(VXGE_HW_INTR_MASK_ALL,
2109                                         &vp_reg->kdfcctl_errors_reg);
2110                                 alarm_event = VXGE_HW_SET_LEVEL(
2111                                         VXGE_HW_EVENT_ALARM_CLEARED,
2112                                         alarm_event);
2113                         }
2114                 }
2115
2116         }
2117
2118         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2119
2120                 val64 = readq(&vp_reg->wrdma_alarm_status);
2121
2122                 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2123
2124                         val64 = readq(&vp_reg->prc_alarm_reg);
2125                         mask64 = readq(&vp_reg->prc_alarm_mask);
2126
2127                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2128                                 ~mask64)
2129                                 sw_stats->error_stats.prc_ring_bumps++;
2130
2131                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2132                                 ~mask64) {
2133                                 sw_stats->error_stats.prc_rxdcm_sc_err++;
2134
2135                                 alarm_event = VXGE_HW_SET_LEVEL(
2136                                         VXGE_HW_EVENT_VPATH_ERR,
2137                                         alarm_event);
2138                         }
2139
2140                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2141                                 & ~mask64) {
2142                                 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2143
2144                                 alarm_event = VXGE_HW_SET_LEVEL(
2145                                                 VXGE_HW_EVENT_VPATH_ERR,
2146                                                 alarm_event);
2147                         }
2148
2149                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2150                                  & ~mask64) {
2151                                 sw_stats->error_stats.prc_quanta_size_err++;
2152
2153                                 alarm_event = VXGE_HW_SET_LEVEL(
2154                                         VXGE_HW_EVENT_VPATH_ERR,
2155                                         alarm_event);
2156                         }
2157
2158                         if (!skip_alarms) {
2159                                 writeq(VXGE_HW_INTR_MASK_ALL,
2160                                         &vp_reg->prc_alarm_reg);
2161                                 alarm_event = VXGE_HW_SET_LEVEL(
2162                                                 VXGE_HW_EVENT_ALARM_CLEARED,
2163                                                 alarm_event);
2164                         }
2165                 }
2166         }
2167 out:
2168         hldev->stats.sw_dev_err_stats.vpath_alarms++;
2169 out2:
2170         if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2171                 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2172                 return VXGE_HW_OK;
2173
2174         __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2175
2176         if (alarm_event == VXGE_HW_EVENT_SERR)
2177                 return VXGE_HW_ERR_CRITICAL;
2178
2179         return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2180                 VXGE_HW_ERR_SLOT_FREEZE :
2181                 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2182                 VXGE_HW_ERR_VPATH;
2183 }
2184
2185 /*
2186  * vxge_hw_vpath_alarm_process - Process Alarms.
2187  * @vpath: Virtual Path.
2188  * @skip_alarms: Do not clear the alarms
2189  *
2190  * Process vpath alarms.
2191  *
2192  */
2193 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2194                         struct __vxge_hw_vpath_handle *vp,
2195                         u32 skip_alarms)
2196 {
2197         enum vxge_hw_status status = VXGE_HW_OK;
2198
2199         if (vp == NULL) {
2200                 status = VXGE_HW_ERR_INVALID_HANDLE;
2201                 goto exit;
2202         }
2203
2204         status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2205 exit:
2206         return status;
2207 }
2208
2209 /**
2210  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2211  *                            alrms
2212  * @vp: Virtual Path handle.
2213  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2214  *             interrupts(Can be repeated). If fifo or ring are not enabled
2215  *             the MSIX vector for that should be set to 0
2216  * @alarm_msix_id: MSIX vector for alarm.
2217  *
2218  * This API will associate a given MSIX vector numbers with the four TIM
2219  * interrupts and alarm interrupt.
2220  */
2221 void
2222 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2223                        int alarm_msix_id)
2224 {
2225         u64 val64;
2226         struct __vxge_hw_virtualpath *vpath = vp->vpath;
2227         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2228         u32 vp_id = vp->vpath->vp_id;
2229
2230         val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2231                   (vp_id * 4) + tim_msix_id[0]) |
2232                  VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2233                   (vp_id * 4) + tim_msix_id[1]);
2234
2235         writeq(val64, &vp_reg->interrupt_cfg0);
2236
2237         writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2238                         (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2239                         &vp_reg->interrupt_cfg2);
2240
2241         if (vpath->hldev->config.intr_mode ==
2242                                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2243                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2244                                 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2245                                 0, 32), &vp_reg->one_shot_vect1_en);
2246         }
2247
2248         if (vpath->hldev->config.intr_mode ==
2249                 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251                                 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2252                                 0, 32), &vp_reg->one_shot_vect2_en);
2253
2254                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2255                                 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2256                                 0, 32), &vp_reg->one_shot_vect3_en);
2257         }
2258
2259         return;
2260 }
2261
2262 /**
2263  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2264  * @vp: Virtual Path handle.
2265  * @msix_id:  MSIX ID
2266  *
2267  * The function masks the msix interrupt for the given msix_id
2268  *
2269  * Returns: 0,
2270  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2271  * status.
2272  * See also:
2273  */
2274 void
2275 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2276 {
2277         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2278         __vxge_hw_pio_mem_write32_upper(
2279                 (u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2280                 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2281
2282         return;
2283 }
2284
2285 /**
2286  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2287  * @vp: Virtual Path handle.
2288  * @msix_id:  MSI ID
2289  *
2290  * The function clears the msix interrupt for the given msix_id
2291  *
2292  * Returns: 0,
2293  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2294  * status.
2295  * See also:
2296  */
2297 void
2298 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2299 {
2300         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301         if (hldev->config.intr_mode ==
2302                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2303                 __vxge_hw_pio_mem_write32_upper(
2304                         (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2305                                 &hldev->common_reg->
2306                                         clr_msix_one_shot_vec[msix_id%4]);
2307         } else {
2308                 __vxge_hw_pio_mem_write32_upper(
2309                         (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2310                                 &hldev->common_reg->
2311                                         clear_msix_mask_vect[msix_id%4]);
2312         }
2313
2314         return;
2315 }
2316
2317 /**
2318  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2319  * @vp: Virtual Path handle.
2320  * @msix_id:  MSI ID
2321  *
2322  * The function unmasks the msix interrupt for the given msix_id
2323  *
2324  * Returns: 0,
2325  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2326  * status.
2327  * See also:
2328  */
2329 void
2330 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2331 {
2332         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2333         __vxge_hw_pio_mem_write32_upper(
2334                         (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2335                         &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2336
2337         return;
2338 }
2339
2340 /**
2341  * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2342  * @vp: Virtual Path handle.
2343  *
2344  * The function masks all msix interrupt for the given vpath
2345  *
2346  */
2347 void
2348 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2349 {
2350
2351         __vxge_hw_pio_mem_write32_upper(
2352                 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2353                 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2354
2355         return;
2356 }
2357
2358 /**
2359  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2360  * @vp: Virtual Path handle.
2361  *
2362  * Mask Tx and Rx vpath interrupts.
2363  *
2364  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2365  */
2366 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2367 {
2368         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2369         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2370         u64     val64;
2371         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2372
2373         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2374                 tim_int_mask1, vp->vpath->vp_id);
2375
2376         val64 = readq(&hldev->common_reg->tim_int_mask0);
2377
2378         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2379                 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2380                 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2381                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2382                         &hldev->common_reg->tim_int_mask0);
2383         }
2384
2385         val64 = readl(&hldev->common_reg->tim_int_mask1);
2386
2387         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2388                 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2389                 __vxge_hw_pio_mem_write32_upper(
2390                         (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2391                         tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2392                         &hldev->common_reg->tim_int_mask1);
2393         }
2394
2395         return;
2396 }
2397
2398 /**
2399  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2400  * @vp: Virtual Path handle.
2401  *
2402  * Unmask Tx and Rx vpath interrupts.
2403  *
2404  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2405  */
2406 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2407 {
2408         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2409         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2410         u64     val64;
2411         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2412
2413         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2414                 tim_int_mask1, vp->vpath->vp_id);
2415
2416         val64 = readq(&hldev->common_reg->tim_int_mask0);
2417
2418         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419            (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420                 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2421                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2422                         &hldev->common_reg->tim_int_mask0);
2423         }
2424
2425         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2426            (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2427                 __vxge_hw_pio_mem_write32_upper(
2428                         (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2429                           tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2430                         &hldev->common_reg->tim_int_mask1);
2431         }
2432
2433         return;
2434 }
2435
2436 /**
2437  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2438  * descriptors and process the same.
2439  * @ring: Handle to the ring object used for receive
2440  *
2441  * The function polls the Rx for the completed  descriptors and calls
2442  * the driver via supplied completion   callback.
2443  *
2444  * Returns: VXGE_HW_OK, if the polling is completed successful.
2445  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2446  * descriptors available which are yet to be processed.
2447  *
2448  * See also: vxge_hw_vpath_poll_rx()
2449  */
2450 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2451 {
2452         u8 t_code;
2453         enum vxge_hw_status status = VXGE_HW_OK;
2454         void *first_rxdh;
2455         u64 val64 = 0;
2456         int new_count = 0;
2457
2458         ring->cmpl_cnt = 0;
2459
2460         status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2461         if (status == VXGE_HW_OK)
2462                 ring->callback(ring, first_rxdh,
2463                         t_code, ring->channel.userdata);
2464
2465         if (ring->cmpl_cnt != 0) {
2466                 ring->doorbell_cnt += ring->cmpl_cnt;
2467                 if (ring->doorbell_cnt >= ring->rxds_limit) {
2468                         /*
2469                          * Each RxD is of 4 qwords, update the number of
2470                          * qwords replenished
2471                          */
2472                         new_count = (ring->doorbell_cnt * 4);
2473
2474                         /* For each block add 4 more qwords */
2475                         ring->total_db_cnt += ring->doorbell_cnt;
2476                         if (ring->total_db_cnt >= ring->rxds_per_block) {
2477                                 new_count += 4;
2478                                 /* Reset total count */
2479                                 ring->total_db_cnt %= ring->rxds_per_block;
2480                         }
2481                         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2482                                 &ring->vp_reg->prc_rxd_doorbell);
2483                         val64 =
2484                           readl(&ring->common_reg->titan_general_int_status);
2485                         ring->doorbell_cnt = 0;
2486                 }
2487         }
2488
2489         return status;
2490 }
2491
2492 /**
2493  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2494  * the same.
2495  * @fifo: Handle to the fifo object used for non offload send
2496  *
2497  * The function polls the Tx for the completed  descriptors and calls
2498  * the driver via supplied completion callback.
2499  *
2500  * Returns: VXGE_HW_OK, if the polling is completed successful.
2501  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2502  * descriptors available which are yet to be processed.
2503  *
2504  * See also: vxge_hw_vpath_poll_tx().
2505  */
2506 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2507                                         struct sk_buff ***skb_ptr, int nr_skb,
2508                                         int *more)
2509 {
2510         enum vxge_hw_fifo_tcode t_code;
2511         void *first_txdlh;
2512         enum vxge_hw_status status = VXGE_HW_OK;
2513         struct __vxge_hw_channel *channel;
2514
2515         channel = &fifo->channel;
2516
2517         status = vxge_hw_fifo_txdl_next_completed(fifo,
2518                                 &first_txdlh, &t_code);
2519         if (status == VXGE_HW_OK)
2520                 if (fifo->callback(fifo, first_txdlh, t_code,
2521                         channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2522                         status = VXGE_HW_COMPLETIONS_REMAIN;
2523
2524         return status;
2525 }