[ethernet] Move Ethernet MAC address checking routines to ethernet.h
[people/meteger/gpxe.git] / src / drivers / net / vxge / vxge_config.c
1 /*
2  * vxge-config.c: gPXE driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
3  *              Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference.  Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14
15 FILE_LICENCE(GPL2_ONLY);
16
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <gpxe/malloc.h>
20 #include <gpxe/iobuf.h>
21 #include <gpxe/ethernet.h>
22 #include <byteswap.h>
23
24 #include "vxge_traffic.h"
25 #include "vxge_config.h"
26 #include "vxge_main.h"
27
28 void
29 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev)
30 {
31         u64 val64;
32         struct __vxge_hw_virtualpath *vpath;
33         struct vxge_hw_vpath_reg __iomem *vp_reg;
34
35         vpath = &hldev->virtual_path;
36         vp_reg = vpath->vp_reg;
37
38         val64 = readq(&vp_reg->rxmac_vcfg0);
39         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40         writeq(val64, &vp_reg->rxmac_vcfg0);
41         val64 = readq(&vp_reg->rxmac_vcfg0);
42         return;
43 }
44
45 enum vxge_hw_status
46 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
47                 u64 vp_id,
48                 u32 action,
49                 u32 offset,
50                 u64 data0,
51                 u64 data1)
52 {
53         enum vxge_hw_status status = VXGE_HW_OK;
54         u64 val64;
55         u32 fw_memo = VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO;
56
57         struct vxge_hw_vpath_reg __iomem *vp_reg;
58
59         vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
60
61         writeq(data0, &vp_reg->rts_access_steer_data0);
62         writeq(data1, &vp_reg->rts_access_steer_data1);
63
64         wmb();
65
66         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
67                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
68                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
69                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE;
70
71         writeq(val64, &vp_reg->rts_access_steer_ctrl);
72
73         wmb();
74
75         status = __vxge_hw_device_register_poll(
76                         &vp_reg->rts_access_steer_ctrl,
77                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
78                         WAIT_FACTOR *
79                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
80
81         if (status != VXGE_HW_OK)
82                 return VXGE_HW_FAIL;
83
84         val64 = readq(&vp_reg->rts_access_steer_ctrl);
85
86         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
87                 status = VXGE_HW_OK;
88         else
89                 status = VXGE_HW_FAIL;
90
91         return status;
92 }
93
94 /* Get function mode */
95 enum vxge_hw_status
96 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode)
97 {
98         enum vxge_hw_status status = VXGE_HW_OK;
99         struct vxge_hw_vpath_reg __iomem *vp_reg;
100         u64 val64;
101         int vp_id;
102
103         /* get the first vpath number assigned to this function */
104         vp_id = hldev->first_vp_id;
105
106         vp_reg = (struct vxge_hw_vpath_reg __iomem *)hldev->vpath_reg[vp_id];
107
108         status = vxge_hw_set_fw_api(hldev, vp_id,
109                                 VXGE_HW_FW_API_GET_FUNC_MODE, 0, 0, 0);
110
111         if (status == VXGE_HW_OK) {
112                 val64 = readq(&vp_reg->rts_access_steer_data0);
113                 *func_mode = VXGE_HW_GET_FUNC_MODE_VAL(val64);
114         }
115
116         return status;
117 }
118
119 /*
120  * __vxge_hw_device_pci_e_init
121  * Initialize certain PCI/PCI-X configuration registers
122  * with recommended values. Save config space for future hw resets.
123  */
124 void
125 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
126 {
127         u16 cmd = 0;
128         struct pci_device *pdev = hldev->pdev;
129
130         vxge_trace();
131
132         /* Set the PErr Repconse bit and SERR in PCI command register. */
133         pci_read_config_word(pdev, PCI_COMMAND, &cmd);
134         cmd |= 0x140;
135         pci_write_config_word(pdev, PCI_COMMAND, cmd);
136
137         return;
138 }
139
140 /*
141  * __vxge_hw_device_register_poll
142  * Will poll certain register for specified amount of time.
143  * Will poll until masked bit is not cleared.
144  */
145 enum vxge_hw_status
146 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
147 {
148         u64 val64;
149         u32 i = 0;
150         enum vxge_hw_status ret = VXGE_HW_FAIL;
151
152         udelay(10);
153
154         do {
155                 val64 = readq(reg);
156                 if (!(val64 & mask))
157                         return VXGE_HW_OK;
158                 udelay(100);
159         } while (++i <= 9);
160
161         i = 0;
162         do {
163                 val64 = readq(reg);
164                 if (!(val64 & mask))
165                         return VXGE_HW_OK;
166                 udelay(1000);
167         } while (++i <= max_millis);
168
169         return ret;
170 }
171
172  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
173  * in progress
174  * This routine checks the vpath reset in progress register is turned zero
175  */
176 enum vxge_hw_status
177 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
178 {
179         enum vxge_hw_status status;
180
181         vxge_trace();
182
183         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
184                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
185                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
186         return status;
187 }
188
189 /*
190  * __vxge_hw_device_toc_get
191  * This routine sets the swapper and reads the toc pointer and returns the
192  * memory mapped address of the toc
193  */
194 struct vxge_hw_toc_reg __iomem *
195 __vxge_hw_device_toc_get(void __iomem *bar0)
196 {
197         u64 val64;
198         struct vxge_hw_toc_reg __iomem *toc = NULL;
199         enum vxge_hw_status status;
200
201         struct vxge_hw_legacy_reg __iomem *legacy_reg =
202                 (struct vxge_hw_legacy_reg __iomem *)bar0;
203
204         status = __vxge_hw_legacy_swapper_set(legacy_reg);
205         if (status != VXGE_HW_OK)
206                 goto exit;
207
208         val64 = readq(&legacy_reg->toc_first_pointer);
209         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
210 exit:
211         return toc;
212 }
213
214 /*
215  * __vxge_hw_device_reg_addr_get
216  * This routine sets the swapper and reads the toc pointer and initializes the
217  * register location pointers in the device object. It waits until the ric is
218  * completed initializing registers.
219  */
220 enum vxge_hw_status
221 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
222 {
223         u64 val64;
224         u32 i;
225         enum vxge_hw_status status = VXGE_HW_OK;
226
227         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
228
229         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
230         if (hldev->toc_reg  == NULL) {
231                 status = VXGE_HW_FAIL;
232                 goto exit;
233         }
234
235         val64 = readq(&hldev->toc_reg->toc_common_pointer);
236         hldev->common_reg =
237         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
238
239         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
240         hldev->mrpcim_reg =
241                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
242
243         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
244                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
245                 hldev->srpcim_reg[i] =
246                         (struct vxge_hw_srpcim_reg __iomem *)
247                                 (hldev->bar0 + val64);
248         }
249
250         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
251                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
252                 hldev->vpmgmt_reg[i] =
253                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
254         }
255
256         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
257                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
258                 hldev->vpath_reg[i] =
259                         (struct vxge_hw_vpath_reg __iomem *)
260                                 (hldev->bar0 + val64);
261         }
262
263         val64 = readq(&hldev->toc_reg->toc_kdfc);
264
265         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
266         case 0:
267                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
268                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
269                 break;
270         default:
271                 break;
272         }
273
274         status = __vxge_hw_device_vpath_reset_in_prog_check(
275                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
276 exit:
277         return status;
278 }
279
280 /*
281  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
282  * This routine returns the Access Rights of the driver
283  */
284 static u32
285 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
286 {
287         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
288
289         switch (host_type) {
290         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
291                 if (func_id == 0) {
292                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
293                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
294                 }
295                 break;
296         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
297                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
298                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
299                 break;
300         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
301                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
302                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
303                 break;
304         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
305         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
306         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
307                 break;
308         case VXGE_HW_SR_VH_FUNCTION0:
309         case VXGE_HW_VH_NORMAL_FUNCTION:
310                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
311                 break;
312         }
313
314         return access_rights;
315 }
316
317 /*
318  * __vxge_hw_device_host_info_get
319  * This routine returns the host type assignments
320  */
321 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
322 {
323         u64 val64;
324         u32 i;
325
326         val64 = readq(&hldev->common_reg->host_type_assignments);
327
328         hldev->host_type =
329            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
330
331         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
332
333         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
334
335                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
336                         continue;
337
338                 hldev->func_id =
339                         __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
340
341                 hldev->access_rights = __vxge_hw_device_access_rights_get(
342                         hldev->host_type, hldev->func_id);
343
344                 hldev->first_vp_id = i;
345                 break;
346         }
347
348         return;
349 }
350
351 /**
352  * vxge_hw_device_hw_info_get - Get the hw information
353  * Returns the vpath mask that has the bits set for each vpath allocated
354  * for the driver, FW version information and the first mac addresse for
355  * each vpath
356  */
357 enum vxge_hw_status
358 vxge_hw_device_hw_info_get(void __iomem *bar0,
359                                 struct vxge_hw_device_hw_info *hw_info)
360 {
361         u32 i;
362         u64 val64;
363         struct vxge_hw_toc_reg __iomem *toc;
364         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
365         struct vxge_hw_common_reg __iomem *common_reg;
366         struct vxge_hw_vpath_reg __iomem *vpath_reg;
367         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
368         enum vxge_hw_status status;
369
370         vxge_trace();
371
372         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
373
374         toc = __vxge_hw_device_toc_get(bar0);
375         if (toc == NULL) {
376                 status = VXGE_HW_ERR_CRITICAL;
377                 goto exit;
378         }
379
380         val64 = readq(&toc->toc_common_pointer);
381         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
382
383         status = __vxge_hw_device_vpath_reset_in_prog_check(
384                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
385         if (status != VXGE_HW_OK)
386                 goto exit;
387
388         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
389
390         val64 = readq(&common_reg->host_type_assignments);
391
392         hw_info->host_type =
393            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
394
395         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
396
397                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
398                         continue;
399
400                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
401
402                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
403                                 (bar0 + val64);
404
405                 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
406                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
407                         hw_info->func_id) &
408                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
409
410                         val64 = readq(&toc->toc_mrpcim_pointer);
411
412                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
413                                         (bar0 + val64);
414
415                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
416                         wmb();
417                 }
418
419                 val64 = readq(&toc->toc_vpath_pointer[i]);
420
421                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
422
423                 status = __vxge_hw_vpath_fw_ver_get(vpath_reg, hw_info);
424                 if (status != VXGE_HW_OK)
425                         goto exit;
426
427                 status = __vxge_hw_vpath_card_info_get(vpath_reg, hw_info);
428                 if (status != VXGE_HW_OK)
429                         goto exit;
430
431                 break;
432         }
433
434         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
435
436                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
437                         continue;
438
439                 val64 = readq(&toc->toc_vpath_pointer[i]);
440                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
441
442                 status =  __vxge_hw_vpath_addr_get(vpath_reg,
443                                 hw_info->mac_addrs[i],
444                                 hw_info->mac_addr_masks[i]);
445                 if (status != VXGE_HW_OK)
446                         goto exit;
447         }
448 exit:
449         return status;
450 }
451
452 /*
453  * vxge_hw_device_initialize - Initialize Titan device.
454  * Initialize Titan device. Note that all the arguments of this public API
455  * are 'IN', including @hldev. Driver cooperates with
456  * OS to find new Titan device, locate its PCI and memory spaces.
457  *
458  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
459  * to enable the latter to perform Titan hardware initialization.
460  */
461 enum vxge_hw_status
462 vxge_hw_device_initialize(
463         struct __vxge_hw_device **devh,
464         void *bar0,
465         struct pci_device *pdev,
466         u8 titan1)
467 {
468         struct __vxge_hw_device *hldev = NULL;
469         enum vxge_hw_status status = VXGE_HW_OK;
470
471         vxge_trace();
472
473         hldev = (struct __vxge_hw_device *)
474                         zalloc(sizeof(struct __vxge_hw_device));
475         if (hldev == NULL) {
476                 vxge_debug(VXGE_ERR, "hldev allocation failed\n");
477                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
478                 goto exit;
479         }
480
481         hldev->magic = VXGE_HW_DEVICE_MAGIC;
482
483         hldev->bar0 = bar0;
484         hldev->pdev = pdev;
485         hldev->titan1 = titan1;
486
487         __vxge_hw_device_pci_e_init(hldev);
488
489         status = __vxge_hw_device_reg_addr_get(hldev);
490         if (status != VXGE_HW_OK) {
491                 vxge_debug(VXGE_ERR, "%s:%d __vxge_hw_device_reg_addr_get "
492                         "failed\n", __func__, __LINE__);
493                 vxge_hw_device_terminate(hldev);
494                 goto exit;
495         }
496
497         __vxge_hw_device_host_info_get(hldev);
498
499         *devh = hldev;
500 exit:
501         return status;
502 }
503
504 /*
505  * vxge_hw_device_terminate - Terminate Titan device.
506  * Terminate HW device.
507  */
508 void
509 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
510 {
511         vxge_trace();
512
513         assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
514
515         hldev->magic = VXGE_HW_DEVICE_DEAD;
516         free(hldev);
517 }
518
519 /*
520  *vxge_hw_ring_replenish - Initial replenish of RxDs
521  * This function replenishes the RxDs from reserve array to work array
522  */
523 enum vxge_hw_status
524 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
525 {
526         struct __vxge_hw_device *hldev;
527         struct vxge_hw_ring_rxd_1 *rxd;
528         enum vxge_hw_status status = VXGE_HW_OK;
529         u8 offset = 0;
530         struct __vxge_hw_ring_block *block;
531         u8 i, iob_off;
532
533         vxge_trace();
534
535         hldev = ring->vpathh->hldev;
536         /*
537          * We allocate all the dma buffers first and then share the
538          * these buffers among the all rx descriptors in the block.
539          */
540         for (i = 0; i < ARRAY_SIZE(ring->iobuf); i++) {
541                 ring->iobuf[i] = alloc_iob(VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
542                 if (!ring->iobuf[i]) {
543                         while (i) {
544                                 free_iob(ring->iobuf[--i]);
545                                 ring->iobuf[i] = NULL;
546                         }
547                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
548                         goto iobuf_err;
549                 }
550         }
551
552         for (offset = 0; offset < VXGE_HW_MAX_RXDS_PER_BLOCK_1; offset++) {
553
554                 rxd = &ring->rxdl->rxd[offset];
555                 if (offset == (VXGE_HW_MAX_RXDS_PER_BLOCK_1 - 1))
556                         iob_off = VXGE_HW_RING_BUF_PER_BLOCK;
557                 else
558                         iob_off = offset % ring->buf_per_block;
559
560                 rxd->control_0 = rxd->control_1 = 0;
561                 vxge_hw_ring_rxd_1b_set(rxd, ring->iobuf[iob_off],
562                                 VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
563
564                 vxge_hw_ring_rxd_post(ring, rxd);
565         }
566         /* linking the block to itself as we use only one rx block*/
567         block = ring->rxdl;
568         block->reserved_2_pNext_RxD_block = (unsigned long) block;
569         block->pNext_RxD_Blk_physical = (u64)virt_to_bus(block);
570
571         ring->rxd_offset = 0;
572 iobuf_err:
573         return status;
574 }
575
576 /*
577  * __vxge_hw_ring_create - Create a Ring
578  * This function creates Ring and initializes it.
579  *
580  */
581 enum vxge_hw_status
582 __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
583                       struct __vxge_hw_ring *ring)
584 {
585         enum vxge_hw_status status = VXGE_HW_OK;
586         struct __vxge_hw_device *hldev;
587         u32 vp_id;
588
589         vxge_trace();
590
591         hldev = vpath->hldev;
592         vp_id = vpath->vp_id;
593
594         ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
595                         sizeof(struct __vxge_hw_ring_block));
596         if (!ring->rxdl) {
597                 vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
598                                 __func__, __LINE__);
599                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
600                 goto exit;
601         }
602         ring->rxd_offset = 0;
603         ring->vpathh = vpath;
604         ring->buf_per_block = VXGE_HW_RING_BUF_PER_BLOCK;
605         ring->rx_poll_weight = VXGE_HW_RING_RX_POLL_WEIGHT;
606         ring->vp_id = vp_id;
607         ring->vp_reg = vpath->vp_reg;
608         ring->common_reg = hldev->common_reg;
609
610         ring->rxd_qword_limit = VXGE_HW_RING_RXD_QWORD_LIMIT;
611
612         status = vxge_hw_ring_replenish(ring);
613         if (status != VXGE_HW_OK) {
614                 __vxge_hw_ring_delete(ring);
615                 goto exit;
616         }
617 exit:
618         return status;
619 }
620
621 /*
622  * __vxge_hw_ring_delete - Removes the ring
623  * This function freeup the memory pool and removes the ring
624  */
625 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
626 {
627         u8 i;
628
629         vxge_trace();
630
631         for (i = 0; (i < ARRAY_SIZE(ring->iobuf)) && ring->iobuf[i]; i++) {
632                 free_iob(ring->iobuf[i]);
633                 ring->iobuf[i] = NULL;
634         }
635
636         if (ring->rxdl) {
637                 free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
638                 ring->rxdl = NULL;
639         }
640         ring->rxd_offset = 0;
641
642         return VXGE_HW_OK;
643 }
644
645 /*
646  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
647  * Set the swapper bits appropriately for the legacy section.
648  */
649 enum vxge_hw_status
650 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
651 {
652         u64 val64;
653         enum vxge_hw_status status = VXGE_HW_OK;
654
655         vxge_trace();
656
657         val64 = readq(&legacy_reg->toc_swapper_fb);
658
659         wmb();
660
661         switch (val64) {
662
663         case VXGE_HW_SWAPPER_INITIAL_VALUE:
664                 return status;
665
666         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
667                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
668                         &legacy_reg->pifm_rd_swap_en);
669                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
670                         &legacy_reg->pifm_rd_flip_en);
671                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
672                         &legacy_reg->pifm_wr_swap_en);
673                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
674                         &legacy_reg->pifm_wr_flip_en);
675                 break;
676
677         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
678                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
679                         &legacy_reg->pifm_rd_swap_en);
680                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
681                         &legacy_reg->pifm_wr_swap_en);
682                 break;
683
684         case VXGE_HW_SWAPPER_BIT_FLIPPED:
685                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
686                         &legacy_reg->pifm_rd_flip_en);
687                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
688                         &legacy_reg->pifm_wr_flip_en);
689                 break;
690         }
691
692         wmb();
693
694         val64 = readq(&legacy_reg->toc_swapper_fb);
695         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
696                 status = VXGE_HW_ERR_SWAPPER_CTRL;
697
698         return status;
699 }
700
701 /*
702  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
703  * Set the swapper bits appropriately for the vpath.
704  */
705 enum vxge_hw_status
706 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
707 {
708         vxge_trace();
709
710 #if (__BYTE_ORDER != __BIG_ENDIAN)
711         u64 val64;
712
713         val64 = readq(&vpath_reg->vpath_general_cfg1);
714         wmb();
715         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
716         writeq(val64, &vpath_reg->vpath_general_cfg1);
717         wmb();
718 #endif
719         return VXGE_HW_OK;
720 }
721
722 /*
723  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
724  * Set the swapper bits appropriately for the vpath.
725  */
726 enum vxge_hw_status
727 __vxge_hw_kdfc_swapper_set(
728         struct vxge_hw_legacy_reg __iomem *legacy_reg,
729         struct vxge_hw_vpath_reg __iomem *vpath_reg)
730 {
731         u64 val64;
732
733         vxge_trace();
734
735         val64 = readq(&legacy_reg->pifm_wr_swap_en);
736
737         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
738                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
739                 wmb();
740
741                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
742                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
743                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
744
745                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
746                 wmb();
747         }
748
749         return VXGE_HW_OK;
750 }
751
752 /*
753  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
754  */
755 enum vxge_hw_status
756 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
757 {
758         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
759         enum vxge_hw_status status = VXGE_HW_OK;
760         int i = 0, j = 0;
761
762         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
763                 if (!((vpath_mask) & vxge_mBIT(i)))
764                         continue;
765                 vpmgmt_reg = hldev->vpmgmt_reg[i];
766                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
767                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
768                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
769                                 return VXGE_HW_FAIL;
770                 }
771         }
772         return status;
773 }
774
775 /*
776  * __vxge_hw_fifo_create - Create a FIFO
777  * This function creates FIFO and initializes it.
778  */
779 enum vxge_hw_status
780 __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
781                         struct __vxge_hw_fifo *fifo)
782 {
783         enum vxge_hw_status status = VXGE_HW_OK;
784
785         vxge_trace();
786
787         fifo->vpathh = vpath;
788         fifo->depth = VXGE_HW_FIFO_TXD_DEPTH;
789         fifo->hw_offset = fifo->sw_offset = 0;
790         fifo->nofl_db = vpath->nofl_db;
791         fifo->vp_id = vpath->vp_id;
792         fifo->vp_reg = vpath->vp_reg;
793         fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
794                                 + VXGE_HW_VPATH_INTR_TX;
795
796         fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
797                                 * fifo->depth, fifo->depth);
798         if (!fifo->txdl) {
799                 vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
800                                 __func__, __LINE__);
801                 return VXGE_HW_ERR_OUT_OF_MEMORY;
802         }
803         memset(fifo->txdl, 0, sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
804         return status;
805 }
806
807 /*
808  * __vxge_hw_fifo_delete - Removes the FIFO
809  * This function freeup the memory pool and removes the FIFO
810  */
811 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
812 {
813         vxge_trace();
814
815         if (fifo->txdl)
816                 free_dma(fifo->txdl,
817                         sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
818
819         fifo->txdl = NULL;
820         fifo->hw_offset = fifo->sw_offset = 0;
821
822         return VXGE_HW_OK;
823 }
824
825 /*
826  * __vxge_hw_vpath_pci_read - Read the content of given address
827  *                          in pci config space.
828  * Read from the vpath pci config space.
829  */
830 enum vxge_hw_status
831 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
832                          u32 phy_func_0, u32 offset, u32 *val)
833 {
834         u64 val64;
835         enum vxge_hw_status status = VXGE_HW_OK;
836         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
837
838         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
839
840         if (phy_func_0)
841                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
842
843         writeq(val64, &vp_reg->pci_config_access_cfg1);
844         wmb();
845         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
846                         &vp_reg->pci_config_access_cfg2);
847         wmb();
848
849         status = __vxge_hw_device_register_poll(
850                         &vp_reg->pci_config_access_cfg2,
851                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
852
853         if (status != VXGE_HW_OK)
854                 goto exit;
855
856         val64 = readq(&vp_reg->pci_config_access_status);
857
858         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
859                 status = VXGE_HW_FAIL;
860                 *val = 0;
861         } else
862                 *val = (u32)vxge_bVALn(val64, 32, 32);
863 exit:
864         return status;
865 }
866
867 /*
868  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
869  * Returns the function number of the vpath.
870  */
871 u32
872 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
873 {
874         u64 val64;
875
876         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
877
878         return
879          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
880 }
881
882 /*
883  * __vxge_hw_read_rts_ds - Program RTS steering critieria
884  */
885 static inline void
886 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
887                                 u64 dta_struct_sel)
888 {
889         writeq(0, &vpath_reg->rts_access_steer_ctrl);
890         wmb();
891         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
892         writeq(0, &vpath_reg->rts_access_steer_data1);
893         wmb();
894         return;
895 }
896
897 /*
898  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
899  * part number and product description.
900  */
901 enum vxge_hw_status
902 __vxge_hw_vpath_card_info_get(
903         struct vxge_hw_vpath_reg __iomem *vpath_reg,
904         struct vxge_hw_device_hw_info *hw_info)
905 {
906         u32 i, j;
907         u64 val64;
908         u64 data1 = 0ULL;
909         u64 data2 = 0ULL;
910         enum vxge_hw_status status = VXGE_HW_OK;
911         u8 *serial_number = hw_info->serial_number;
912         u8 *part_number = hw_info->part_number;
913         u8 *product_desc = hw_info->product_desc;
914
915         __vxge_hw_read_rts_ds(vpath_reg,
916                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
917
918         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
919                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
920                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
921                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
922                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
923                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
924
925         status = __vxge_hw_pio_mem_write64(val64,
926                                 &vpath_reg->rts_access_steer_ctrl,
927                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
928                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
929
930         if (status != VXGE_HW_OK)
931                 return status;
932
933         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
934
935         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
936                 data1 = readq(&vpath_reg->rts_access_steer_data0);
937                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
938
939                 data2 = readq(&vpath_reg->rts_access_steer_data1);
940                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
941                 status = VXGE_HW_OK;
942         } else
943                 *serial_number = 0;
944
945         __vxge_hw_read_rts_ds(vpath_reg,
946                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
947
948         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
949                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
950                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
951                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
952                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
953                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
954
955         status = __vxge_hw_pio_mem_write64(val64,
956                                 &vpath_reg->rts_access_steer_ctrl,
957                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
958                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
959
960         if (status != VXGE_HW_OK)
961                 return status;
962
963         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
964
965         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
966
967                 data1 = readq(&vpath_reg->rts_access_steer_data0);
968                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
969
970                 data2 = readq(&vpath_reg->rts_access_steer_data1);
971                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
972
973                 status = VXGE_HW_OK;
974
975         } else
976                 *part_number = 0;
977
978         j = 0;
979
980         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
981              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
982
983                 __vxge_hw_read_rts_ds(vpath_reg, i);
984
985                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
986                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
987                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
988                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
989                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
990                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
991
992                 status = __vxge_hw_pio_mem_write64(val64,
993                                 &vpath_reg->rts_access_steer_ctrl,
994                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
995                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
996
997                 if (status != VXGE_HW_OK)
998                         return status;
999
1000                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1001
1002                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1003
1004                         data1 = readq(&vpath_reg->rts_access_steer_data0);
1005                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
1006
1007                         data2 = readq(&vpath_reg->rts_access_steer_data1);
1008                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
1009
1010                         status = VXGE_HW_OK;
1011                 } else
1012                         *product_desc = 0;
1013         }
1014
1015         return status;
1016 }
1017
1018 /*
1019  * __vxge_hw_vpath_fw_ver_get - Get the fw version
1020  * Returns FW Version
1021  */
1022 enum vxge_hw_status
1023 __vxge_hw_vpath_fw_ver_get(
1024         struct vxge_hw_vpath_reg __iomem *vpath_reg,
1025         struct vxge_hw_device_hw_info *hw_info)
1026 {
1027         u64 val64;
1028         u64 data1 = 0ULL;
1029         u64 data2 = 0ULL;
1030         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
1031         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
1032         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
1033         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
1034         enum vxge_hw_status status = VXGE_HW_OK;
1035
1036         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
1037                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
1038                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
1039                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
1040                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
1041                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
1042
1043         status = __vxge_hw_pio_mem_write64(val64,
1044                                 &vpath_reg->rts_access_steer_ctrl,
1045                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
1046                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1047
1048         if (status != VXGE_HW_OK)
1049                 goto exit;
1050
1051         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1052
1053         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1054
1055                 data1 = readq(&vpath_reg->rts_access_steer_data0);
1056                 data2 = readq(&vpath_reg->rts_access_steer_data1);
1057
1058                 fw_date->day =
1059                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
1060                                                 data1);
1061                 fw_date->month =
1062                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
1063                                                 data1);
1064                 fw_date->year =
1065                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
1066                                                 data1);
1067
1068                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
1069                         fw_date->month, fw_date->day, fw_date->year);
1070
1071                 fw_version->major =
1072                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
1073                 fw_version->minor =
1074                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
1075                 fw_version->build =
1076                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
1077
1078                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1079                     fw_version->major, fw_version->minor, fw_version->build);
1080
1081                 flash_date->day =
1082                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
1083                 flash_date->month =
1084                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
1085                 flash_date->year =
1086                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
1087
1088                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%d/%d/%d",
1089                         flash_date->month, flash_date->day, flash_date->year);
1090
1091                 flash_version->major =
1092                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
1093                 flash_version->minor =
1094                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
1095                 flash_version->build =
1096                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
1097
1098                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
1099                         flash_version->major, flash_version->minor,
1100                         flash_version->build);
1101
1102                 status = VXGE_HW_OK;
1103
1104         } else
1105                 status = VXGE_HW_FAIL;
1106 exit:
1107         return status;
1108 }
1109
1110 /*
1111  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
1112  *               from MAC address table.
1113  */
1114 enum vxge_hw_status
1115 __vxge_hw_vpath_addr_get(
1116         struct vxge_hw_vpath_reg *vpath_reg,
1117         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
1118 {
1119         u32 i;
1120         u64 val64;
1121         u64 data1 = 0ULL;
1122         u64 data2 = 0ULL;
1123         u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY;
1124         enum vxge_hw_status status = VXGE_HW_OK;
1125
1126         while (1) {
1127                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
1128                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
1129                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
1130                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
1131                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
1132
1133                 status = __vxge_hw_pio_mem_write64(val64,
1134                                         &vpath_reg->rts_access_steer_ctrl,
1135                                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
1136                                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1137
1138                 if (status != VXGE_HW_OK)
1139                         break;
1140
1141                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
1142
1143                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
1144
1145                         data1 = readq(&vpath_reg->rts_access_steer_data0);
1146                         data2 = readq(&vpath_reg->rts_access_steer_data1);
1147
1148                         data1 =
1149                          VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1150                         data2 =
1151                          VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
1152                                                                 data2);
1153
1154                         for (i = ETH_ALEN; i > 0; i--) {
1155                                 macaddr[i-1] = (u8)(data1 & 0xFF);
1156                                 data1 >>= 8;
1157
1158                                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1159                                 data2 >>= 8;
1160                         }
1161                         if (is_valid_ether_addr(macaddr)) {
1162                                 status = VXGE_HW_OK;
1163                                 break;
1164                         }
1165                         action =
1166                           VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
1167                 } else
1168                         status = VXGE_HW_FAIL;
1169         }
1170
1171         return status;
1172 }
1173
1174 /*
1175  * __vxge_hw_vpath_mgmt_read
1176  * This routine reads the vpath_mgmt registers
1177  */
1178 static enum vxge_hw_status
1179 __vxge_hw_vpath_mgmt_read(
1180         struct __vxge_hw_virtualpath *vpath)
1181 {
1182         u32 i, mtu = 0, max_pyld = 0;
1183         u64 val64;
1184         enum vxge_hw_status status = VXGE_HW_OK;
1185
1186         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1187
1188                 val64 = readq(&vpath->vpmgmt_reg->
1189                                 rxmac_cfg0_port_vpmgmt_clone[i]);
1190                 max_pyld =
1191                         (u32)
1192                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
1193                         (val64);
1194                 if (mtu < max_pyld)
1195                         mtu = max_pyld;
1196         }
1197
1198         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
1199
1200         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
1201
1202         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
1203                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
1204         else
1205                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
1206
1207         return status;
1208 }
1209
1210 /*
1211  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
1212  * This routine checks the vpath_rst_in_prog register to see if
1213  * adapter completed the reset process for the vpath
1214  */
1215 enum vxge_hw_status
1216 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
1217 {
1218         enum vxge_hw_status status;
1219
1220         vxge_trace();
1221
1222         status = __vxge_hw_device_register_poll(
1223                         &vpath->hldev->common_reg->vpath_rst_in_prog,
1224                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
1225                                 1 << (16 - vpath->vp_id)),
1226                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
1227
1228         return status;
1229 }
1230
1231 /*
1232  * __vxge_hw_vpath_reset
1233  * This routine resets the vpath on the device
1234  */
1235 enum vxge_hw_status
1236 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
1237 {
1238         u64 val64;
1239         enum vxge_hw_status status = VXGE_HW_OK;
1240
1241         vxge_trace();
1242
1243         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
1244
1245         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
1246                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
1247
1248         return status;
1249 }
1250
1251 /*
1252  * __vxge_hw_vpath_prc_configure
1253  * This routine configures the prc registers of virtual path using the config
1254  * passed
1255  */
1256 void
1257 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev)
1258 {
1259         u64 val64;
1260         struct __vxge_hw_virtualpath *vpath;
1261         struct vxge_hw_vpath_reg __iomem *vp_reg;
1262
1263         vxge_trace();
1264
1265         vpath = &hldev->virtual_path;
1266         vp_reg = vpath->vp_reg;
1267
1268         val64 = readq(&vp_reg->prc_cfg1);
1269         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
1270         writeq(val64, &vp_reg->prc_cfg1);
1271
1272         val64 = readq(&vpath->vp_reg->prc_cfg6);
1273         val64 &= ~VXGE_HW_PRC_CFG6_RXD_CRXDT(0x1ff);
1274         val64 &= ~VXGE_HW_PRC_CFG6_RXD_SPAT(0x1ff);
1275         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
1276         val64 |= VXGE_HW_PRC_CFG6_RXD_CRXDT(0x3);
1277         val64 |= VXGE_HW_PRC_CFG6_RXD_SPAT(0xf);
1278         writeq(val64, &vpath->vp_reg->prc_cfg6);
1279
1280         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
1281                         (u64)virt_to_bus(vpath->ringh.rxdl) >> 3),
1282                         &vp_reg->prc_cfg5);
1283
1284         val64 = readq(&vp_reg->prc_cfg4);
1285         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
1286         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
1287         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
1288                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
1289         val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
1290
1291         writeq(val64, &vp_reg->prc_cfg4);
1292         return;
1293 }
1294
1295 /*
1296  * __vxge_hw_vpath_kdfc_configure
1297  * This routine configures the kdfc registers of virtual path using the
1298  * config passed
1299  */
1300 enum vxge_hw_status
1301 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
1302 {
1303         u64 val64;
1304         u64 vpath_stride;
1305         enum vxge_hw_status status = VXGE_HW_OK;
1306         struct __vxge_hw_virtualpath *vpath;
1307         struct vxge_hw_vpath_reg __iomem *vp_reg;
1308
1309         vxge_trace();
1310
1311         vpath = &hldev->virtual_path;
1312         vp_reg = vpath->vp_reg;
1313         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
1314
1315         if (status != VXGE_HW_OK)
1316                 goto exit;
1317
1318         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
1319
1320         vpath->max_kdfc_db =
1321                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
1322                         val64+1)/2;
1323
1324         vpath->max_nofl_db = vpath->max_kdfc_db;
1325
1326         val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
1327                                 (vpath->max_nofl_db*2)-1);
1328
1329         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
1330
1331         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
1332                 &vp_reg->kdfc_fifo_trpl_ctrl);
1333
1334         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
1335
1336         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
1337                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
1338
1339         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
1340                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
1341 #if (__BYTE_ORDER != __BIG_ENDIAN)
1342                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
1343 #endif
1344                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
1345
1346         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
1347         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
1348         wmb();
1349         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
1350
1351         vpath->nofl_db =
1352                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
1353                 (hldev->kdfc + (vp_id *
1354                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
1355                                         vpath_stride)));
1356 exit:
1357         return status;
1358 }
1359
1360 /*
1361  * __vxge_hw_vpath_mac_configure
1362  * This routine configures the mac of virtual path using the config passed
1363  */
1364 enum vxge_hw_status
1365 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev)
1366 {
1367         u64 val64;
1368         enum vxge_hw_status status = VXGE_HW_OK;
1369         struct __vxge_hw_virtualpath *vpath;
1370         struct vxge_hw_vpath_reg __iomem *vp_reg;
1371
1372         vxge_trace();
1373
1374         vpath = &hldev->virtual_path;
1375         vp_reg = vpath->vp_reg;
1376
1377         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
1378                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
1379
1380         val64 = readq(&vp_reg->rxmac_vcfg1);
1381
1382         val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
1383                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
1384
1385         writeq(val64, &vp_reg->rxmac_vcfg1);
1386         return status;
1387 }
1388
1389 /*
1390  * __vxge_hw_vpath_tim_configure
1391  * This routine configures the tim registers of virtual path using the config
1392  * passed
1393  */
1394 enum vxge_hw_status
1395 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
1396 {
1397         u64 val64;
1398         enum vxge_hw_status status = VXGE_HW_OK;
1399         struct __vxge_hw_virtualpath *vpath;
1400         struct vxge_hw_vpath_reg __iomem *vp_reg;
1401
1402         vxge_trace();
1403
1404         vpath = &hldev->virtual_path;
1405         vp_reg = vpath->vp_reg;
1406
1407         writeq((u64)0, &vp_reg->tim_dest_addr);
1408         writeq((u64)0, &vp_reg->tim_vpath_map);
1409         writeq((u64)0, &vp_reg->tim_bitmap);
1410         writeq((u64)0, &vp_reg->tim_remap);
1411
1412         writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
1413                 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
1414                 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
1415
1416         val64 = readq(&vp_reg->tim_pci_cfg);
1417         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
1418         writeq(val64, &vp_reg->tim_pci_cfg);
1419
1420         /* TX configuration */
1421         val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
1422                         (VXGE_TTI_BTIMER_VAL * 1000) / 272);
1423         val64 |= (VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC |
1424                         VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI |
1425                         VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN);
1426         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(TTI_TX_URANGE_A) |
1427                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(TTI_TX_URANGE_B) |
1428                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(TTI_TX_URANGE_C);
1429         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
1430
1431         val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(TTI_TX_UFC_A) |
1432                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(TTI_TX_UFC_B) |
1433                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(TTI_TX_UFC_C) |
1434                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(TTI_TX_UFC_D);
1435         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
1436
1437         val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
1438                         VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL);
1439         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
1440                         (VXGE_TTI_LTIMER_VAL * 1000) / 272);
1441         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
1442
1443         /* RX configuration */
1444         val64 = VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
1445                         (VXGE_RTI_BTIMER_VAL * 1000) / 272);
1446         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
1447         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(RTI_RX_URANGE_A) |
1448                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(RTI_RX_URANGE_B) |
1449                         VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(RTI_RX_URANGE_C);
1450         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
1451
1452         val64 = VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(RTI_RX_UFC_A) |
1453                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(RTI_RX_UFC_B) |
1454                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(RTI_RX_UFC_C) |
1455                         VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(RTI_RX_UFC_D);
1456         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
1457
1458         val64 = VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
1459                         VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL);
1460         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
1461                         (VXGE_RTI_LTIMER_VAL * 1000) / 272);
1462         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
1463
1464         val64 = 0;
1465         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1466         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1467         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
1468         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1469         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1470         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
1471
1472         return status;
1473 }
1474
1475 /*
1476  * __vxge_hw_vpath_initialize
1477  * This routine is the final phase of init which initializes the
1478  * registers of the vpath using the configuration passed.
1479  */
1480 enum vxge_hw_status
1481 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
1482 {
1483         u64 val64;
1484         u32 val32;
1485         int i;
1486         enum vxge_hw_status status = VXGE_HW_OK;
1487         struct __vxge_hw_virtualpath *vpath;
1488         struct vxge_hw_vpath_reg *vp_reg;
1489
1490         vxge_trace();
1491
1492         vpath = &hldev->virtual_path;
1493
1494         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1495                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
1496                 goto exit;
1497         }
1498         vp_reg = vpath->vp_reg;
1499         status = __vxge_hw_legacy_swapper_set(hldev->legacy_reg);
1500         if (status != VXGE_HW_OK)
1501                 goto exit;
1502
1503         status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
1504
1505         if (status != VXGE_HW_OK)
1506                 goto exit;
1507         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
1508
1509         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1510                 if (val64 & vxge_mBIT(i))
1511                         vpath->vsport_number = i;
1512         }
1513
1514         status = __vxge_hw_vpath_mac_configure(hldev);
1515
1516         if (status != VXGE_HW_OK)
1517                 goto exit;
1518
1519         status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
1520
1521         if (status != VXGE_HW_OK)
1522                 goto exit;
1523
1524         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
1525
1526         if (status != VXGE_HW_OK)
1527                 goto exit;
1528
1529         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
1530
1531         /* Get MRRS value from device control */
1532         status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
1533
1534         if (status == VXGE_HW_OK) {
1535                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
1536                 val64 &=
1537                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
1538                 val64 |=
1539                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
1540
1541                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
1542         }
1543
1544         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
1545         val64 |=
1546             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
1547                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
1548
1549         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
1550         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
1551
1552 exit:
1553         return status;
1554 }
1555
1556 /*
1557  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
1558  * This routine is the initial phase of init which resets the vpath and
1559  * initializes the software support structures.
1560  */
1561 enum vxge_hw_status
1562 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
1563                         struct __vxge_hw_virtualpath *vpath)
1564 {
1565         enum vxge_hw_status status = VXGE_HW_OK;
1566
1567         vxge_trace();
1568
1569         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
1570                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
1571                 goto exit;
1572         }
1573
1574         vpath->vp_id = vp_id;
1575         vpath->vp_open = VXGE_HW_VP_OPEN;
1576         vpath->hldev = hldev;
1577         vpath->vp_reg = hldev->vpath_reg[vp_id];
1578         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
1579
1580         __vxge_hw_vpath_reset(hldev, vp_id);
1581
1582         status = __vxge_hw_vpath_reset_check(vpath);
1583         if (status != VXGE_HW_OK) {
1584                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1585                 goto exit;
1586         }
1587
1588         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
1589                 hldev->tim_int_mask1, vp_id);
1590
1591         status = __vxge_hw_vpath_initialize(hldev, vp_id);
1592
1593         if (status != VXGE_HW_OK) {
1594                 __vxge_hw_vp_terminate(hldev, vpath);
1595                 goto exit;
1596         }
1597
1598         status = __vxge_hw_vpath_mgmt_read(vpath);
1599 exit:
1600         return status;
1601 }
1602
1603 /*
1604  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
1605  * This routine closes all channels it opened and freeup memory
1606  */
1607 void
1608 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
1609                         struct __vxge_hw_virtualpath *vpath)
1610 {
1611         vxge_trace();
1612
1613         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
1614                 return;
1615
1616         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(hldev->tim_int_mask0,
1617                 hldev->tim_int_mask1, vpath->vp_id);
1618
1619         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
1620 }
1621
1622 /*
1623  * vxge_hw_vpath_mtu_set - Set MTU.
1624  * Set new MTU value. Example, to use jumbo frames:
1625  * vxge_hw_vpath_mtu_set(my_device, 9600);
1626  */
1627 enum vxge_hw_status
1628 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu)
1629 {
1630         u64 val64;
1631         enum vxge_hw_status status = VXGE_HW_OK;
1632
1633         vxge_trace();
1634
1635         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
1636
1637         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
1638                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
1639
1640         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1641
1642         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
1643         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
1644
1645         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1646
1647         return status;
1648 }
1649
1650 /*
1651  * vxge_hw_vpath_open - Open a virtual path on a given adapter
1652  * This function is used to open access to virtual path of an
1653  * adapter for offload, GRO operations. This function returns
1654  * synchronously.
1655  */
1656 enum vxge_hw_status
1657 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath)
1658 {
1659         struct __vxge_hw_virtualpath *vpathh;
1660         enum vxge_hw_status status;
1661
1662         vxge_trace();
1663
1664         vpathh = &hldev->virtual_path;
1665
1666         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
1667                 status = VXGE_HW_ERR_INVALID_STATE;
1668                 goto vpath_open_exit1;
1669         }
1670
1671         status = __vxge_hw_vp_initialize(hldev, hldev->first_vp_id, vpathh);
1672         if (status != VXGE_HW_OK)
1673                 goto vpath_open_exit1;
1674
1675         status = __vxge_hw_fifo_create(vpathh, &vpathh->fifoh);
1676         if (status != VXGE_HW_OK)
1677                 goto vpath_open_exit2;
1678
1679         status = __vxge_hw_ring_create(vpathh, &vpathh->ringh);
1680         if (status != VXGE_HW_OK)
1681                 goto vpath_open_exit3;
1682
1683         __vxge_hw_vpath_prc_configure(hldev);
1684
1685         return VXGE_HW_OK;
1686
1687 vpath_open_exit3:
1688         __vxge_hw_fifo_delete(&vpathh->fifoh);
1689 vpath_open_exit2:
1690         __vxge_hw_vp_terminate(hldev, vpathh);
1691 vpath_open_exit1:
1692         return status;
1693 }
1694
1695 /*
1696  * vxge_hw_vpath_rx_doorbell_init -  Post the count of the refreshed region
1697  * of RxD list
1698  * @vp: vpath handle
1699  *
1700  * This function decides on the Rxd replenish count depending on the
1701  * descriptor memory that has been allocated to this VPath.
1702  */
1703 void
1704 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath)
1705 {
1706         u64 new_count, val64;
1707
1708         vxge_trace();
1709
1710         if (vpath->hldev->titan1) {
1711                 new_count = readq(&vpath->vp_reg->rxdmem_size);
1712                 new_count &= 0x1fff;
1713         } else
1714                 new_count = VXGE_HW_RING_RXD_QWORDS_MODE_1 * 4;
1715
1716         val64 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
1717
1718         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val64),
1719                 &vpath->vp_reg->prc_rxd_doorbell);
1720 }
1721
1722 /*
1723  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
1724  * This function is used to close access to virtual path opened
1725  * earlier.
1726  */
1727 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath)
1728 {
1729         struct __vxge_hw_device *devh = NULL;
1730         u32 vp_id = vpath->vp_id;
1731         enum vxge_hw_status status = VXGE_HW_OK;
1732
1733         vxge_trace();
1734
1735         devh = vpath->hldev;
1736
1737         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1738                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1739                 goto vpath_close_exit;
1740         }
1741
1742         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
1743
1744         __vxge_hw_ring_delete(&vpath->ringh);
1745
1746         __vxge_hw_fifo_delete(&vpath->fifoh);
1747
1748         __vxge_hw_vp_terminate(devh, vpath);
1749
1750         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
1751
1752 vpath_close_exit:
1753         return status;
1754 }
1755
1756 /*
1757  * vxge_hw_vpath_reset - Resets vpath
1758  * This function is used to request a reset of vpath
1759  */
1760 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath)
1761 {
1762         enum vxge_hw_status status;
1763         u32 vp_id;
1764
1765         vxge_trace();
1766
1767         vp_id = vpath->vp_id;
1768
1769         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1770                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1771                 goto exit;
1772         }
1773
1774         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
1775 exit:
1776         return status;
1777 }
1778
1779 /*
1780  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
1781  * This function poll's for the vpath reset completion and re initializes
1782  * the vpath.
1783  */
1784 enum vxge_hw_status
1785 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath)
1786 {
1787         enum vxge_hw_status status;
1788         struct __vxge_hw_device *hldev;
1789         u32 vp_id;
1790
1791         vxge_trace();
1792
1793         vp_id = vpath->vp_id;
1794         hldev = vpath->hldev;
1795
1796         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1797                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1798                 goto exit;
1799         }
1800
1801         status = __vxge_hw_vpath_reset_check(vpath);
1802         if (status != VXGE_HW_OK)
1803                 goto exit;
1804
1805         status = __vxge_hw_vpath_initialize(hldev, vp_id);
1806         if (status != VXGE_HW_OK)
1807                 goto exit;
1808
1809         __vxge_hw_vpath_prc_configure(hldev);
1810
1811 exit:
1812         return status;
1813 }
1814
1815 /*
1816  * vxge_hw_vpath_enable - Enable vpath.
1817  * This routine clears the vpath reset thereby enabling a vpath
1818  * to start forwarding frames and generating interrupts.
1819  */
1820 void
1821 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath)
1822 {
1823         struct __vxge_hw_device *hldev;
1824         u64 val64;
1825
1826         vxge_trace();
1827
1828         hldev = vpath->hldev;
1829
1830         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
1831                 1 << (16 - vpath->vp_id));
1832
1833         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
1834                 &hldev->common_reg->cmn_rsthdlr_cfg1);
1835 }