vxge: code cleanup and reorganization
authorJon Mason <jon.mason@exar.com>
Fri, 10 Dec 2010 14:02:56 +0000 (14:02 +0000)
committerDavid S. Miller <davem@davemloft.net>
Sat, 11 Dec 2010 00:08:21 +0000 (16:08 -0800)
Move function locations to remove the need for internal declarations and
other misc clean-ups.

Signed-off-by: Jon Mason <jon.mason@exar.com>
Signed-off-by: Arpit Patel <arpit.patel@exar.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-traffic.c
drivers/net/vxge/vxge-traffic.h

index a0241fe72d8b79f0cbeabbe31955becd6eaa7906..1169aa387caba0281cdbb4866683f272e2b43b1e 100644 (file)
 #include "vxge-config.h"
 #include "vxge-main.h"
 
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(
-       struct __vxge_hw_vpath_handle *vpath_handle);
-
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
-                       u32 size);
-
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool_entry *entry);
-
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
-                                       void *block_addr,
-                                       u32 length,
-                                       struct pci_dev *dma_h,
-                                       struct pci_dev *acc_handle);
-
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-                       struct __vxge_hw_blockpool  *blockpool,
-                       u32 pool_size,
-                       u32 pool_max);
-
-static void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
-
-static void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-static void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
-                       void *memblock,
-                       u32 size,
-                       struct vxge_hw_mempool_dma *dma_object);
-
-static void
-__vxge_hw_channel_free(
-       struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
-
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(
-       void __iomem    *reg,
-       u64 mask, u32 max_millis);
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
-                         u64 mask, u32 max_millis)
-{
-       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
-       wmb();
-
-       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
-       wmb();
-
-       return  __vxge_hw_device_register_poll(addr, mask, max_millis);
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {                         \
+       status = __vxge_hw_vpath_stats_access(vpath,                    \
+                                             VXGE_HW_STATS_OP_READ,    \
+                                             offset,                   \
+                                             &val64);                  \
+       if (status != VXGE_HW_OK)                                       \
+               return status;                                          \
 }
 
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
-                       u32 item_size, u32 private_size, u32 items_initial,
-                       u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
-                       void *userdata);
-
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
-                         struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
-
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
 static void
 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
 {
@@ -124,8 +39,6 @@ vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
        val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
        writeq(val64, &vp_reg->rxmac_vcfg0);
        val64 = readq(&vp_reg->rxmac_vcfg0);
-
-       return;
 }
 
 /*
@@ -197,6 +110,50 @@ void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
        }
 }
 
+/*
+ * __vxge_hw_device_register_poll
+ * Will poll certain register for specified amount of time.
+ * Will poll until masked bit is not cleared.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
+{
+       u64 val64;
+       u32 i = 0;
+       enum vxge_hw_status ret = VXGE_HW_FAIL;
+
+       udelay(10);
+
+       do {
+               val64 = readq(reg);
+               if (!(val64 & mask))
+                       return VXGE_HW_OK;
+               udelay(100);
+       } while (++i <= 9);
+
+       i = 0;
+       do {
+               val64 = readq(reg);
+               if (!(val64 & mask))
+                       return VXGE_HW_OK;
+               mdelay(1);
+       } while (++i <= max_millis);
+
+       return ret;
+}
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+                         u64 mask, u32 max_millis)
+{
+       __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+       wmb();
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+       wmb();
+
+       return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
 static enum vxge_hw_status
 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
                     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
@@ -445,77 +402,6 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
        return status;
 }
 
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-static struct __vxge_hw_channel *
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-                          enum __vxge_hw_channel_type type,
-       u32 length, u32 per_dtr_space, void *userdata)
-{
-       struct __vxge_hw_channel *channel;
-       struct __vxge_hw_device *hldev;
-       int size = 0;
-       u32 vp_id;
-
-       hldev = vph->vpath->hldev;
-       vp_id = vph->vpath->vp_id;
-
-       switch (type) {
-       case VXGE_HW_CHANNEL_TYPE_FIFO:
-               size = sizeof(struct __vxge_hw_fifo);
-               break;
-       case VXGE_HW_CHANNEL_TYPE_RING:
-               size = sizeof(struct __vxge_hw_ring);
-               break;
-       default:
-               break;
-       }
-
-       channel = kzalloc(size, GFP_KERNEL);
-       if (channel == NULL)
-               goto exit0;
-       INIT_LIST_HEAD(&channel->item);
-
-       channel->common_reg = hldev->common_reg;
-       channel->first_vp_id = hldev->first_vp_id;
-       channel->type = type;
-       channel->devh = hldev;
-       channel->vph = vph;
-       channel->userdata = userdata;
-       channel->per_dtr_space = per_dtr_space;
-       channel->length = length;
-       channel->vp_id = vp_id;
-
-       channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-       if (channel->work_arr == NULL)
-               goto exit1;
-
-       channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-       if (channel->free_arr == NULL)
-               goto exit1;
-       channel->free_ptr = length;
-
-       channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-       if (channel->reserve_arr == NULL)
-               goto exit1;
-       channel->reserve_ptr = length;
-       channel->reserve_top = 0;
-
-       channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-       if (channel->orig_arr == NULL)
-               goto exit1;
-
-       return channel;
-exit1:
-       __vxge_hw_channel_free(channel);
-
-exit0:
-       return NULL;
-}
-
 /*
  * __vxge_hw_channel_free - Free memory allocated for channel
  * This function deallocates memory from the channel and various arrays
@@ -609,38 +495,6 @@ static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
        pci_save_state(hldev->pdev);
 }
 
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
-       u64 val64;
-       u32 i = 0;
-       enum vxge_hw_status ret = VXGE_HW_FAIL;
-
-       udelay(10);
-
-       do {
-               val64 = readq(reg);
-               if (!(val64 & mask))
-                       return VXGE_HW_OK;
-               udelay(100);
-       } while (++i <= 9);
-
-       i = 0;
-       do {
-               val64 = readq(reg);
-               if (!(val64 & mask))
-                       return VXGE_HW_OK;
-               mdelay(1);
-       } while (++i <= max_millis);
-
-       return ret;
-}
-
 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
@@ -655,6 +509,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
        return status;
 }
 
+/*
+ * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
+ * Set the swapper bits appropriately for the lagacy section.
+ */
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
+{
+       u64 val64;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       val64 = readq(&legacy_reg->toc_swapper_fb);
+
+       wmb();
+
+       switch (val64) {
+       case VXGE_HW_SWAPPER_INITIAL_VALUE:
+               return status;
+
+       case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
+               writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+                       &legacy_reg->pifm_rd_swap_en);
+               writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+                       &legacy_reg->pifm_rd_flip_en);
+               writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+                       &legacy_reg->pifm_wr_swap_en);
+               writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+                       &legacy_reg->pifm_wr_flip_en);
+               break;
+
+       case VXGE_HW_SWAPPER_BYTE_SWAPPED:
+               writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+                       &legacy_reg->pifm_rd_swap_en);
+               writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+                       &legacy_reg->pifm_wr_swap_en);
+               break;
+
+       case VXGE_HW_SWAPPER_BIT_FLIPPED:
+               writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+                       &legacy_reg->pifm_rd_flip_en);
+               writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+                       &legacy_reg->pifm_wr_flip_en);
+               break;
+       }
+
+       wmb();
+
+       val64 = readq(&legacy_reg->toc_swapper_fb);
+
+       if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
+               status = VXGE_HW_ERR_SWAPPER_CTRL;
+
+       return status;
+}
+
 /*
  * __vxge_hw_device_toc_get
  * This routine sets the swapper and reads the toc pointer and returns the
@@ -1132,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
           (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
                        continue;
 
@@ -1196,9 +1103,221 @@ exit:
 }
 
 /*
- * vxge_hw_device_initialize - Initialize Titan device.
- * Initialize Titan device. Note that all the arguments of this public API
- * are 'IN', including @hldev. Driver cooperates with
+ * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ */
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+{
+       struct __vxge_hw_device *hldev;
+       struct list_head *p, *n;
+       u16 ret;
+
+       if (blockpool == NULL) {
+               ret = 1;
+               goto exit;
+       }
+
+       hldev = blockpool->hldev;
+
+       list_for_each_safe(p, n, &blockpool->free_block_list) {
+               pci_unmap_single(hldev->pdev,
+                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                       ((struct __vxge_hw_blockpool_entry *)p)->length,
+                       PCI_DMA_BIDIRECTIONAL);
+
+               vxge_os_dma_free(hldev->pdev,
+                       ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+                       &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+               list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+               kfree(p);
+               blockpool->pool_size--;
+       }
+
+       list_for_each_safe(p, n, &blockpool->free_entry_list) {
+               list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+               kfree((void *)p);
+       }
+       ret = 0;
+exit:
+       return;
+}
+
+/*
+ * __vxge_hw_blockpool_create - Create block pool
+ */
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+                          struct __vxge_hw_blockpool *blockpool,
+                          u32 pool_size,
+                          u32 pool_max)
+{
+       u32 i;
+       struct __vxge_hw_blockpool_entry *entry = NULL;
+       void *memblock;
+       dma_addr_t dma_addr;
+       struct pci_dev *dma_handle;
+       struct pci_dev *acc_handle;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (blockpool == NULL) {
+               status = VXGE_HW_FAIL;
+               goto blockpool_create_exit;
+       }
+
+       blockpool->hldev = hldev;
+       blockpool->block_size = VXGE_HW_BLOCK_SIZE;
+       blockpool->pool_size = 0;
+       blockpool->pool_max = pool_max;
+       blockpool->req_out = 0;
+
+       INIT_LIST_HEAD(&blockpool->free_block_list);
+       INIT_LIST_HEAD(&blockpool->free_entry_list);
+
+       for (i = 0; i < pool_size + pool_max; i++) {
+               entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+                               GFP_KERNEL);
+               if (entry == NULL) {
+                       __vxge_hw_blockpool_destroy(blockpool);
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto blockpool_create_exit;
+               }
+               list_add(&entry->item, &blockpool->free_entry_list);
+       }
+
+       for (i = 0; i < pool_size; i++) {
+               memblock = vxge_os_dma_malloc(
+                               hldev->pdev,
+                               VXGE_HW_BLOCK_SIZE,
+                               &dma_handle,
+                               &acc_handle);
+               if (memblock == NULL) {
+                       __vxge_hw_blockpool_destroy(blockpool);
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto blockpool_create_exit;
+               }
+
+               dma_addr = pci_map_single(hldev->pdev, memblock,
+                               VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+               if (unlikely(pci_dma_mapping_error(hldev->pdev,
+                               dma_addr))) {
+                       vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
+                       __vxge_hw_blockpool_destroy(blockpool);
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto blockpool_create_exit;
+               }
+
+               if (!list_empty(&blockpool->free_entry_list))
+                       entry = (struct __vxge_hw_blockpool_entry *)
+                               list_first_entry(&blockpool->free_entry_list,
+                                       struct __vxge_hw_blockpool_entry,
+                                       item);
+
+               if (entry == NULL)
+                       entry =
+                           kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+                                       GFP_KERNEL);
+               if (entry != NULL) {
+                       list_del(&entry->item);
+                       entry->length = VXGE_HW_BLOCK_SIZE;
+                       entry->memblock = memblock;
+                       entry->dma_addr = dma_addr;
+                       entry->acc_handle = acc_handle;
+                       entry->dma_handle = dma_handle;
+                       list_add(&entry->item,
+                                         &blockpool->free_block_list);
+                       blockpool->pool_size++;
+               } else {
+                       __vxge_hw_blockpool_destroy(blockpool);
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto blockpool_create_exit;
+               }
+       }
+
+blockpool_create_exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_device_fifo_config_check - Check fifo configuration.
+ * Check the fifo configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+{
+       if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
+           (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
+               return VXGE_HW_BADCFG_FIFO_BLOCKS;
+
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_vpath_config_check - Check vpath configuration.
+ * Check the vpath configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+{
+       enum vxge_hw_status status;
+
+       if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
+           (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
+               return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+
+       status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
+               ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
+               (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
+               return VXGE_HW_BADCFG_VPATH_MTU;
+
+       if ((vp_config->rpa_strip_vlan_tag !=
+               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
+               (vp_config->rpa_strip_vlan_tag !=
+               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
+               (vp_config->rpa_strip_vlan_tag !=
+               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
+               return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_config_check - Check device configuration.
+ * Check the device configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+{
+       u32 i;
+       enum vxge_hw_status status;
+
+       if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+           (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+           (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+           (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
+               return VXGE_HW_BADCFG_INTR_MODE;
+
+       if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
+           (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
+               return VXGE_HW_BADCFG_RTS_MAC_EN;
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+               status = __vxge_hw_device_vpath_config_check(
+                               &new_config->vp_config[i]);
+               if (status != VXGE_HW_OK)
+                       return status;
+       }
+
+       return VXGE_HW_OK;
+}
+
+/*
+ * vxge_hw_device_initialize - Initialize Titan device.
+ * Initialize Titan device. Note that all the arguments of this public API
+ * are 'IN', including @hldev. Driver cooperates with
  * OS to find new Titan device, locate its PCI and memory spaces.
  *
  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
@@ -1303,40 +1422,276 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
 }
 
 /*
- * vxge_hw_device_stats_get - Get the device hw statistics.
- * Returns the vpath h/w stats for the device.
+ * __vxge_hw_vpath_stats_access - Get the statistics from the given location
+ *                           and offset and perform an operation
  */
-enum vxge_hw_status
-vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
-                       struct vxge_hw_device_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+                            u32 operation, u32 offset, u64 *stat)
 {
-       u32 i;
+       u64 val64;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
 
-       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-               if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
-                       (hldev->virtual_paths[i].vp_open ==
-                               VXGE_HW_VP_NOT_OPEN))
-                       continue;
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto vpath_stats_access_exit;
+       }
 
-               memcpy(hldev->virtual_paths[i].hw_stats_sav,
-                               hldev->virtual_paths[i].hw_stats,
-                               sizeof(struct vxge_hw_vpath_stats_hw_info));
+       vp_reg = vpath->vp_reg;
 
-               status = __vxge_hw_vpath_stats_get(
-                       &hldev->virtual_paths[i],
-                       hldev->virtual_paths[i].hw_stats);
-       }
+       val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
+                VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
+                VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
 
-       memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
-                       sizeof(struct vxge_hw_device_stats_hw_info));
+       status = __vxge_hw_pio_mem_write64(val64,
+                               &vp_reg->xmac_stats_access_cmd,
+                               VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
+                               vpath->hldev->config.device_poll_millis);
+       if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+               *stat = readq(&vp_reg->xmac_stats_access_data);
+       else
+               *stat = 0;
 
+vpath_stats_access_exit:
        return status;
 }
 
 /*
- * vxge_hw_driver_stats_get - Get the device sw statistics.
- * Returns the vpath s/w stats for the device.
+ * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                       struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
+{
+       u64 *val64;
+       int i;
+       u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       val64 = (u64 *)vpath_tx_stats;
+
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
+
+       for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
+               status = __vxge_hw_vpath_stats_access(vpath,
+                                       VXGE_HW_STATS_OP_READ,
+                                       offset, val64);
+               if (status != VXGE_HW_OK)
+                       goto exit;
+               offset++;
+               val64++;
+       }
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                       struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+{
+       u64 *val64;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       int i;
+       u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
+       val64 = (u64 *) vpath_rx_stats;
+
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
+       for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
+               status = __vxge_hw_vpath_stats_access(vpath,
+                                       VXGE_HW_STATS_OP_READ,
+                                       offset >> 3, val64);
+               if (status != VXGE_HW_OK)
+                       goto exit;
+
+               offset += 8;
+               val64++;
+       }
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
+{
+       u64 val64;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
+       vp_reg = vpath->vp_reg;
+
+       val64 = readq(&vp_reg->vpath_debug_stats0);
+       hw_stats->ini_num_mwr_sent =
+               (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats1);
+       hw_stats->ini_num_mrd_sent =
+               (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats2);
+       hw_stats->ini_num_cpl_rcvd =
+               (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats3);
+       hw_stats->ini_num_mwr_byte_sent =
+               VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats4);
+       hw_stats->ini_num_cpl_byte_rcvd =
+               VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats5);
+       hw_stats->wrcrdtarb_xoff =
+               (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+
+       val64 = readq(&vp_reg->vpath_debug_stats6);
+       hw_stats->rdcrdtarb_xoff =
+               (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count01);
+       hw_stats->vpath_genstats_count0 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
+               val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count01);
+       hw_stats->vpath_genstats_count1 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
+               val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count23);
+       hw_stats->vpath_genstats_count2 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
+               val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count01);
+       hw_stats->vpath_genstats_count3 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
+               val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count4);
+       hw_stats->vpath_genstats_count4 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
+               val64);
+
+       val64 = readq(&vp_reg->vpath_genstats_count5);
+       hw_stats->vpath_genstats_count5 =
+       (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
+               val64);
+
+       status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       VXGE_HW_VPATH_STATS_PIO_READ(
+               VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+
+       hw_stats->prog_event_vnum0 =
+                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+
+       hw_stats->prog_event_vnum1 =
+                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+
+       VXGE_HW_VPATH_STATS_PIO_READ(
+               VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+
+       hw_stats->prog_event_vnum2 =
+                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+
+       hw_stats->prog_event_vnum3 =
+                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+
+       val64 = readq(&vp_reg->rx_multi_cast_stats);
+       hw_stats->rx_multi_cast_frame_discard =
+               (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+
+       val64 = readq(&vp_reg->rx_frm_transferred);
+       hw_stats->rx_frm_transferred =
+               (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+
+       val64 = readq(&vp_reg->rxd_returned);
+       hw_stats->rxd_returned =
+               (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+
+       val64 = readq(&vp_reg->dbg_stats_rx_mpa);
+       hw_stats->rx_mpa_len_fail_frms =
+               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
+       hw_stats->rx_mpa_mrk_fail_frms =
+               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
+       hw_stats->rx_mpa_crc_fail_frms =
+               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+
+       val64 = readq(&vp_reg->dbg_stats_rx_fau);
+       hw_stats->rx_permitted_frms =
+               (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
+       hw_stats->rx_vp_reset_discarded_frms =
+       (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
+       hw_stats->rx_wol_frms =
+               (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+
+       val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
+       hw_stats->tx_vp_reset_discarded_frms =
+       (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
+               val64);
+exit:
+       return status;
+}
+
+/*
+ * vxge_hw_device_stats_get - Get the device hw statistics.
+ * Returns the vpath h/w stats for the device.
+ */
+enum vxge_hw_status
+vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
+                       struct vxge_hw_device_stats_hw_info *hw_stats)
+{
+       u32 i;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+               if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
+                       (hldev->virtual_paths[i].vp_open ==
+                               VXGE_HW_VP_NOT_OPEN))
+                       continue;
+
+               memcpy(hldev->virtual_paths[i].hw_stats_sav,
+                               hldev->virtual_paths[i].hw_stats,
+                               sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+               status = __vxge_hw_vpath_stats_get(
+                       &hldev->virtual_paths[i],
+                       hldev->virtual_paths[i].hw_stats);
+       }
+
+       memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
+                       sizeof(struct vxge_hw_device_stats_hw_info));
+
+       return status;
+}
+
+/*
+ * vxge_hw_driver_stats_get - Get the device sw statistics.
+ * Returns the vpath s/w stats for the device.
  */
 enum vxge_hw_status vxge_hw_driver_stats_get(
                        struct __vxge_hw_device *hldev,
@@ -1468,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
 
        status = vxge_hw_device_xmac_aggr_stats_get(hldev,
                                        0, &xmac_stats->aggr_stats[0]);
-
        if (status != VXGE_HW_OK)
                goto exit;
 
@@ -1843,206 +2197,376 @@ exit:
 }
 
 /*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
+ * __vxge_hw_channel_allocate - Allocate memory for channel
+ * This function allocates required memory for the channel and various arrays
+ * in the channel
  */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
-                     struct vxge_hw_ring_attr *attr)
+static struct __vxge_hw_channel *
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+                          enum __vxge_hw_channel_type type,
+                          u32 length, u32 per_dtr_space,
+                          void *userdata)
 {
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_ring *ring;
-       u32 ring_length;
-       struct vxge_hw_ring_config *config;
+       struct __vxge_hw_channel *channel;
        struct __vxge_hw_device *hldev;
+       int size = 0;
        u32 vp_id;
-       struct vxge_hw_mempool_cbs ring_mp_callback;
 
-       if ((vp == NULL) || (attr == NULL)) {
-               status = VXGE_HW_FAIL;
-               goto exit;
-       }
+       hldev = vph->vpath->hldev;
+       vp_id = vph->vpath->vp_id;
 
-       hldev = vp->vpath->hldev;
-       vp_id = vp->vpath->vp_id;
-
-       config = &hldev->config.vp_config[vp_id].ring;
-
-       ring_length = config->ring_blocks *
-                       vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
-
-       ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
-                                               VXGE_HW_CHANNEL_TYPE_RING,
-                                               ring_length,
-                                               attr->per_rxd_space,
-                                               attr->userdata);
-
-       if (ring == NULL) {
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-               goto exit;
+       switch (type) {
+       case VXGE_HW_CHANNEL_TYPE_FIFO:
+               size = sizeof(struct __vxge_hw_fifo);
+               break;
+       case VXGE_HW_CHANNEL_TYPE_RING:
+               size = sizeof(struct __vxge_hw_ring);
+               break;
+       default:
+               break;
        }
 
-       vp->vpath->ringh = ring;
-       ring->vp_id = vp_id;
-       ring->vp_reg = vp->vpath->vp_reg;
-       ring->common_reg = hldev->common_reg;
-       ring->stats = &vp->vpath->sw_stats->ring_stats;
-       ring->config = config;
-       ring->callback = attr->callback;
-       ring->rxd_init = attr->rxd_init;
-       ring->rxd_term = attr->rxd_term;
-       ring->buffer_mode = config->buffer_mode;
-       ring->rxds_limit = config->rxds_limit;
-
-       ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
-       ring->rxd_priv_size =
-               sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
-       ring->per_rxd_space = attr->per_rxd_space;
+       channel = kzalloc(size, GFP_KERNEL);
+       if (channel == NULL)
+               goto exit0;
+       INIT_LIST_HEAD(&channel->item);
 
-       ring->rxd_priv_size =
-               ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
-               VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+       channel->common_reg = hldev->common_reg;
+       channel->first_vp_id = hldev->first_vp_id;
+       channel->type = type;
+       channel->devh = hldev;
+       channel->vph = vph;
+       channel->userdata = userdata;
+       channel->per_dtr_space = per_dtr_space;
+       channel->length = length;
+       channel->vp_id = vp_id;
 
-       /* how many RxDs can fit into one block. Depends on configured
-        * buffer_mode. */
-       ring->rxds_per_block =
-               vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+       channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       if (channel->work_arr == NULL)
+               goto exit1;
 
-       /* calculate actual RxD block private size */
-       ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
-       ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
-       ring->mempool = __vxge_hw_mempool_create(hldev,
-                               VXGE_HW_BLOCK_SIZE,
-                               VXGE_HW_BLOCK_SIZE,
-                               ring->rxdblock_priv_size,
-                               ring->config->ring_blocks,
-                               ring->config->ring_blocks,
-                               &ring_mp_callback,
-                               ring);
+       channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       if (channel->free_arr == NULL)
+               goto exit1;
+       channel->free_ptr = length;
 
-       if (ring->mempool == NULL) {
-               __vxge_hw_ring_delete(vp);
-               return VXGE_HW_ERR_OUT_OF_MEMORY;
-       }
+       channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       if (channel->reserve_arr == NULL)
+               goto exit1;
+       channel->reserve_ptr = length;
+       channel->reserve_top = 0;
 
-       status = __vxge_hw_channel_initialize(&ring->channel);
-       if (status != VXGE_HW_OK) {
-               __vxge_hw_ring_delete(vp);
-               goto exit;
-       }
+       channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+       if (channel->orig_arr == NULL)
+               goto exit1;
 
-       /* Note:
-        * Specifying rxd_init callback means two things:
-        * 1) rxds need to be initialized by driver at channel-open time;
-        * 2) rxds need to be posted at channel-open time
-        *    (that's what the initial_replenish() below does)
-        * Currently we don't have a case when the 1) is done without the 2).
-        */
-       if (ring->rxd_init) {
-               status = vxge_hw_ring_replenish(ring);
-               if (status != VXGE_HW_OK) {
-                       __vxge_hw_ring_delete(vp);
-                       goto exit;
-               }
-       }
+       return channel;
+exit1:
+       __vxge_hw_channel_free(channel);
 
-       /* initial replenish will increment the counter in its post() routine,
-        * we have to reset it */
-       ring->stats->common_stats.usage_cnt = 0;
-exit:
-       return status;
+exit0:
+       return NULL;
 }
 
 /*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
+ * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
+ * Adds a block to block pool
  */
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+                                       void *block_addr,
+                                       u32 length,
+                                       struct pci_dev *dma_h,
+                                       struct pci_dev *acc_handle)
 {
-       void *rxdh;
-       struct __vxge_hw_channel *channel;
+       struct __vxge_hw_blockpool *blockpool;
+       struct __vxge_hw_blockpool_entry *entry = NULL;
+       dma_addr_t dma_addr;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       u32 req_out;
 
-       channel = &ring->channel;
+       blockpool = &devh->block_pool;
 
-       for (;;) {
-               vxge_hw_channel_dtr_try_complete(channel, &rxdh);
+       if (block_addr == NULL) {
+               blockpool->req_out--;
+               status = VXGE_HW_FAIL;
+               goto exit;
+       }
 
-               if (rxdh == NULL)
-                       break;
+       dma_addr = pci_map_single(devh->pdev, block_addr, length,
+                               PCI_DMA_BIDIRECTIONAL);
 
-               vxge_hw_channel_dtr_complete(channel);
+       if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+               vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
+               blockpool->req_out--;
+               status = VXGE_HW_FAIL;
+               goto exit;
+       }
 
-               if (ring->rxd_term)
-                       ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
-                               channel->userdata);
+       if (!list_empty(&blockpool->free_entry_list))
+               entry = (struct __vxge_hw_blockpool_entry *)
+                       list_first_entry(&blockpool->free_entry_list,
+                               struct __vxge_hw_blockpool_entry,
+                               item);
 
-               vxge_hw_channel_dtr_free(channel, rxdh);
-       }
+       if (entry == NULL)
+               entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+       else
+               list_del(&entry->item);
 
-       return VXGE_HW_OK;
-}
+       if (entry != NULL) {
+               entry->length = length;
+               entry->memblock = block_addr;
+               entry->dma_addr = dma_addr;
+               entry->acc_handle = acc_handle;
+               entry->dma_handle = dma_h;
+               list_add(&entry->item, &blockpool->free_block_list);
+               blockpool->pool_size++;
+               status = VXGE_HW_OK;
+       } else
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
 
-/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_channel *channel;
+       blockpool->req_out--;
 
-       channel = &ring->channel;
+       req_out = blockpool->req_out;
+exit:
+       return;
+}
 
-       __vxge_hw_ring_abort(ring);
+static inline void
+vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
+{
+       gfp_t flags;
+       void *vaddr;
 
-       status = __vxge_hw_channel_reset(channel);
+       if (in_interrupt())
+               flags = GFP_ATOMIC | GFP_DMA;
+       else
+               flags = GFP_KERNEL | GFP_DMA;
 
-       if (status != VXGE_HW_OK)
-               goto exit;
+       vaddr = kmalloc((size), flags);
 
-       if (ring->rxd_init) {
-               status = vxge_hw_ring_replenish(ring);
-               if (status != VXGE_HW_OK)
-                       goto exit;
-       }
-exit:
-       return status;
+       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
 }
 
 /*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
+ * __vxge_hw_blockpool_blocks_add - Request additional blocks
  */
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static
+void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
 {
-       struct __vxge_hw_ring *ring = vp->vpath->ringh;
-
-       __vxge_hw_ring_abort(ring);
-
-       if (ring->mempool)
-               __vxge_hw_mempool_destroy(ring->mempool);
+       u32 nreq = 0, i;
 
-       vp->vpath->ringh = NULL;
-       __vxge_hw_channel_free(&ring->channel);
+       if ((blockpool->pool_size  +  blockpool->req_out) <
+               VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
+               nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
+               blockpool->req_out += nreq;
+       }
 
-       return VXGE_HW_OK;
+       for (i = 0; i < nreq; i++)
+               vxge_os_dma_malloc_async(
+                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+                       blockpool->hldev, VXGE_HW_BLOCK_SIZE);
 }
 
 /*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
+ * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
+ * Allocates a block of memory of given size, either from block pool
+ * or by calling vxge_os_dma_malloc()
  */
-static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
-                      u32 *num_allocated)
+static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
+                                       struct vxge_hw_mempool_dma *dma_object)
 {
-       u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
-       u32 n_items = mempool->items_per_memblock;
-       u32 start_block_idx = mempool->memblocks_allocated;
-       u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
+       struct __vxge_hw_blockpool_entry *entry = NULL;
+       struct __vxge_hw_blockpool  *blockpool;
+       void *memblock = NULL;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       *num_allocated = 0;
+       blockpool = &devh->block_pool;
+
+       if (size != blockpool->block_size) {
+
+               memblock = vxge_os_dma_malloc(devh->pdev, size,
+                                               &dma_object->handle,
+                                               &dma_object->acc_handle);
+
+               if (memblock == NULL) {
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto exit;
+               }
+
+               dma_object->addr = pci_map_single(devh->pdev, memblock, size,
+                                       PCI_DMA_BIDIRECTIONAL);
+
+               if (unlikely(pci_dma_mapping_error(devh->pdev,
+                               dma_object->addr))) {
+                       vxge_os_dma_free(devh->pdev, memblock,
+                               &dma_object->acc_handle);
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+                       goto exit;
+               }
+
+       } else {
+
+               if (!list_empty(&blockpool->free_block_list))
+                       entry = (struct __vxge_hw_blockpool_entry *)
+                               list_first_entry(&blockpool->free_block_list,
+                                       struct __vxge_hw_blockpool_entry,
+                                       item);
+
+               if (entry != NULL) {
+                       list_del(&entry->item);
+                       dma_object->addr = entry->dma_addr;
+                       dma_object->handle = entry->dma_handle;
+                       dma_object->acc_handle = entry->acc_handle;
+                       memblock = entry->memblock;
+
+                       list_add(&entry->item,
+                               &blockpool->free_entry_list);
+                       blockpool->pool_size--;
+               }
+
+               if (memblock != NULL)
+                       __vxge_hw_blockpool_blocks_add(blockpool);
+       }
+exit:
+       return memblock;
+}
+
+/*
+ * __vxge_hw_blockpool_blocks_remove - Free additional blocks
+ */
+static void
+__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &blockpool->free_block_list) {
+
+               if (blockpool->pool_size < blockpool->pool_max)
+                       break;
+
+               pci_unmap_single(
+                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                       ((struct __vxge_hw_blockpool_entry *)p)->length,
+                       PCI_DMA_BIDIRECTIONAL);
+
+               vxge_os_dma_free(
+                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+                       ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+                       &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+               list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+
+               list_add(p, &blockpool->free_entry_list);
+
+               blockpool->pool_size--;
+
+       }
+}
+
+/*
+ * __vxge_hw_blockpool_free - Frees the memory allcoated with
+ *                             __vxge_hw_blockpool_malloc
+ */
+static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
+                                    void *memblock, u32 size,
+                                    struct vxge_hw_mempool_dma *dma_object)
+{
+       struct __vxge_hw_blockpool_entry *entry = NULL;
+       struct __vxge_hw_blockpool  *blockpool;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       blockpool = &devh->block_pool;
+
+       if (size != blockpool->block_size) {
+               pci_unmap_single(devh->pdev, dma_object->addr, size,
+                       PCI_DMA_BIDIRECTIONAL);
+               vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
+       } else {
+
+               if (!list_empty(&blockpool->free_entry_list))
+                       entry = (struct __vxge_hw_blockpool_entry *)
+                               list_first_entry(&blockpool->free_entry_list,
+                                       struct __vxge_hw_blockpool_entry,
+                                       item);
+
+               if (entry == NULL)
+                       entry = vmalloc(sizeof(
+                                       struct __vxge_hw_blockpool_entry));
+               else
+                       list_del(&entry->item);
+
+               if (entry != NULL) {
+                       entry->length = size;
+                       entry->memblock = memblock;
+                       entry->dma_addr = dma_object->addr;
+                       entry->acc_handle = dma_object->acc_handle;
+                       entry->dma_handle = dma_object->handle;
+                       list_add(&entry->item,
+                                       &blockpool->free_block_list);
+                       blockpool->pool_size++;
+                       status = VXGE_HW_OK;
+               } else
+                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+               if (status == VXGE_HW_OK)
+                       __vxge_hw_blockpool_blocks_remove(blockpool);
+       }
+}
+
+/*
+ * vxge_hw_mempool_destroy
+ */
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+{
+       u32 i, j;
+       struct __vxge_hw_device *devh = mempool->devh;
+
+       for (i = 0; i < mempool->memblocks_allocated; i++) {
+               struct vxge_hw_mempool_dma *dma_object;
+
+               vxge_assert(mempool->memblocks_arr[i]);
+               vxge_assert(mempool->memblocks_dma_arr + i);
+
+               dma_object = mempool->memblocks_dma_arr + i;
+
+               for (j = 0; j < mempool->items_per_memblock; j++) {
+                       u32 index = i * mempool->items_per_memblock + j;
+
+                       /* to skip last partially filled(if any) memblock */
+                       if (index >= mempool->items_current)
+                               break;
+               }
+
+               vfree(mempool->memblocks_priv_arr[i]);
+
+               __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
+                               mempool->memblock_size, dma_object);
+       }
+
+       vfree(mempool->items_arr);
+       vfree(mempool->memblocks_dma_arr);
+       vfree(mempool->memblocks_priv_arr);
+       vfree(mempool->memblocks_arr);
+       vfree(mempool);
+}
+
+/*
+ * __vxge_hw_mempool_grow
+ * Will resize mempool up to %num_allocate value.
+ */
+static enum vxge_hw_status
+__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
+                      u32 *num_allocated)
+{
+       u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
+       u32 n_items = mempool->items_per_memblock;
+       u32 start_block_idx = mempool->memblocks_allocated;
+       u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       *num_allocated = 0;
 
        if (end_block_idx > mempool->memblocks_max) {
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
@@ -2118,16 +2642,15 @@ exit:
  * with size enough to hold %items_initial number of items. Memory is
  * DMA-able but client must map/unmap before interoperating with the device.
  */
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
-       struct __vxge_hw_device *devh,
-       u32 memblock_size,
-       u32 item_size,
-       u32 items_priv_size,
-       u32 items_initial,
-       u32 items_max,
-       struct vxge_hw_mempool_cbs *mp_callback,
-       void *userdata)
+static struct vxge_hw_mempool *
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
+                        u32 memblock_size,
+                        u32 item_size,
+                        u32 items_priv_size,
+                        u32 items_initial,
+                        u32 items_max,
+                        struct vxge_hw_mempool_cbs *mp_callback,
+                        void *userdata)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        u32 memblocks_to_allocate;
@@ -2185,7 +2708,6 @@ __vxge_hw_mempool_create(
        mempool->memblocks_dma_arr =
                vzalloc(sizeof(struct vxge_hw_mempool_dma) *
                        mempool->memblocks_max);
-
        if (mempool->memblocks_dma_arr == NULL) {
                __vxge_hw_mempool_destroy(mempool);
                status = VXGE_HW_ERR_OUT_OF_MEMORY;
@@ -2222,122 +2744,188 @@ exit:
 }
 
 /*
- * vxge_hw_mempool_destroy
+ * __vxge_hw_ring_abort - Returns the RxD
+ * This function terminates the RxDs of ring
  */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
 {
-       u32 i, j;
-       struct __vxge_hw_device *devh = mempool->devh;
-
-       for (i = 0; i < mempool->memblocks_allocated; i++) {
-               struct vxge_hw_mempool_dma *dma_object;
+       void *rxdh;
+       struct __vxge_hw_channel *channel;
 
-               vxge_assert(mempool->memblocks_arr[i]);
-               vxge_assert(mempool->memblocks_dma_arr + i);
+       channel = &ring->channel;
 
-               dma_object = mempool->memblocks_dma_arr + i;
+       for (;;) {
+               vxge_hw_channel_dtr_try_complete(channel, &rxdh);
 
-               for (j = 0; j < mempool->items_per_memblock; j++) {
-                       u32 index = i * mempool->items_per_memblock + j;
+               if (rxdh == NULL)
+                       break;
 
-                       /* to skip last partially filled(if any) memblock */
-                       if (index >= mempool->items_current)
-                               break;
-               }
+               vxge_hw_channel_dtr_complete(channel);
 
-               vfree(mempool->memblocks_priv_arr[i]);
+               if (ring->rxd_term)
+                       ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
+                               channel->userdata);
 
-               __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
-                               mempool->memblock_size, dma_object);
+               vxge_hw_channel_dtr_free(channel, rxdh);
        }
 
-       vfree(mempool->items_arr);
+       return VXGE_HW_OK;
+}
 
-       vfree(mempool->memblocks_dma_arr);
+/*
+ * __vxge_hw_ring_reset - Resets the ring
+ * This function resets the ring during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_channel *channel;
 
-       vfree(mempool->memblocks_priv_arr);
+       channel = &ring->channel;
 
-       vfree(mempool->memblocks_arr);
+       __vxge_hw_ring_abort(ring);
 
-       vfree(mempool);
+       status = __vxge_hw_channel_reset(channel);
+
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       if (ring->rxd_init) {
+               status = vxge_hw_ring_replenish(ring);
+               if (status != VXGE_HW_OK)
+                       goto exit;
+       }
+exit:
+       return status;
 }
 
 /*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
+ * __vxge_hw_ring_delete - Removes the ring
+ * This function freeup the memory pool and removes the ring
  */
 static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
 {
-       if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
-            (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
-               return VXGE_HW_BADCFG_FIFO_BLOCKS;
+       struct __vxge_hw_ring *ring = vp->vpath->ringh;
 
-       return VXGE_HW_OK;
-}
+       __vxge_hw_ring_abort(ring);
 
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
-       enum vxge_hw_status status;
-
-       if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
-               (vp_config->min_bandwidth >
-                                       VXGE_HW_VPATH_BANDWIDTH_MAX))
-               return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
-       status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
-       if (status != VXGE_HW_OK)
-               return status;
-
-       if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
-               ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
-               (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
-               return VXGE_HW_BADCFG_VPATH_MTU;
+       if (ring->mempool)
+               __vxge_hw_mempool_destroy(ring->mempool);
 
-       if ((vp_config->rpa_strip_vlan_tag !=
-               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
-               (vp_config->rpa_strip_vlan_tag !=
-               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
-               (vp_config->rpa_strip_vlan_tag !=
-               VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
-               return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+       vp->vpath->ringh = NULL;
+       __vxge_hw_channel_free(&ring->channel);
 
        return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
+ * __vxge_hw_ring_create - Create a Ring
+ * This function creates Ring and initializes it.
  */
 static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+                     struct vxge_hw_ring_attr *attr)
 {
-       u32 i;
-       enum vxge_hw_status status;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_ring *ring;
+       u32 ring_length;
+       struct vxge_hw_ring_config *config;
+       struct __vxge_hw_device *hldev;
+       u32 vp_id;
+       struct vxge_hw_mempool_cbs ring_mp_callback;
 
-       if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
-          (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
-          (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
-          (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
-               return VXGE_HW_BADCFG_INTR_MODE;
+       if ((vp == NULL) || (attr == NULL)) {
+               status = VXGE_HW_FAIL;
+               goto exit;
+       }
 
-       if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
-          (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
-               return VXGE_HW_BADCFG_RTS_MAC_EN;
+       hldev = vp->vpath->hldev;
+       vp_id = vp->vpath->vp_id;
 
-       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-               status = __vxge_hw_device_vpath_config_check(
-                               &new_config->vp_config[i]);
-               if (status != VXGE_HW_OK)
-                       return status;
+       config = &hldev->config.vp_config[vp_id].ring;
+
+       ring_length = config->ring_blocks *
+                       vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+       ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
+                                               VXGE_HW_CHANNEL_TYPE_RING,
+                                               ring_length,
+                                               attr->per_rxd_space,
+                                               attr->userdata);
+       if (ring == NULL) {
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
+               goto exit;
        }
 
-       return VXGE_HW_OK;
+       vp->vpath->ringh = ring;
+       ring->vp_id = vp_id;
+       ring->vp_reg = vp->vpath->vp_reg;
+       ring->common_reg = hldev->common_reg;
+       ring->stats = &vp->vpath->sw_stats->ring_stats;
+       ring->config = config;
+       ring->callback = attr->callback;
+       ring->rxd_init = attr->rxd_init;
+       ring->rxd_term = attr->rxd_term;
+       ring->buffer_mode = config->buffer_mode;
+       ring->rxds_limit = config->rxds_limit;
+
+       ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
+       ring->rxd_priv_size =
+               sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
+       ring->per_rxd_space = attr->per_rxd_space;
+
+       ring->rxd_priv_size =
+               ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+               VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+
+       /* how many RxDs can fit into one block. Depends on configured
+        * buffer_mode. */
+       ring->rxds_per_block =
+               vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+       /* calculate actual RxD block private size */
+       ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+       ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
+       ring->mempool = __vxge_hw_mempool_create(hldev,
+                               VXGE_HW_BLOCK_SIZE,
+                               VXGE_HW_BLOCK_SIZE,
+                               ring->rxdblock_priv_size,
+                               ring->config->ring_blocks,
+                               ring->config->ring_blocks,
+                               &ring_mp_callback,
+                               ring);
+       if (ring->mempool == NULL) {
+               __vxge_hw_ring_delete(vp);
+               return VXGE_HW_ERR_OUT_OF_MEMORY;
+       }
+
+       status = __vxge_hw_channel_initialize(&ring->channel);
+       if (status != VXGE_HW_OK) {
+               __vxge_hw_ring_delete(vp);
+               goto exit;
+       }
+
+       /* Note:
+        * Specifying rxd_init callback means two things:
+        * 1) rxds need to be initialized by driver at channel-open time;
+        * 2) rxds need to be posted at channel-open time
+        *    (that's what the initial_replenish() below does)
+        * Currently we don't have a case when the 1) is done without the 2).
+        */
+       if (ring->rxd_init) {
+               status = vxge_hw_ring_replenish(ring);
+               if (status != VXGE_HW_OK) {
+                       __vxge_hw_ring_delete(vp);
+                       goto exit;
+               }
+       }
+
+       /* initial replenish will increment the counter in its post() routine,
+        * we have to reset it */
+       ring->stats->common_stats.usage_cnt = 0;
+exit:
+       return status;
 }
 
 /*
@@ -2359,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
        device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                device_config->vp_config[i].vp_id = i;
 
                device_config->vp_config[i].min_bandwidth =
@@ -2498,61 +3085,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
        return VXGE_HW_OK;
 }
 
-/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       val64 = readq(&legacy_reg->toc_swapper_fb);
-
-       wmb();
-
-       switch (val64) {
-
-       case VXGE_HW_SWAPPER_INITIAL_VALUE:
-               return status;
-
-       case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
-               writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
-                       &legacy_reg->pifm_rd_swap_en);
-               writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
-                       &legacy_reg->pifm_rd_flip_en);
-               writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
-                       &legacy_reg->pifm_wr_swap_en);
-               writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
-                       &legacy_reg->pifm_wr_flip_en);
-               break;
-
-       case VXGE_HW_SWAPPER_BYTE_SWAPPED:
-               writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
-                       &legacy_reg->pifm_rd_swap_en);
-               writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
-                       &legacy_reg->pifm_wr_swap_en);
-               break;
-
-       case VXGE_HW_SWAPPER_BIT_FLIPPED:
-               writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
-                       &legacy_reg->pifm_rd_flip_en);
-               writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
-                       &legacy_reg->pifm_wr_flip_en);
-               break;
-       }
-
-       wmb();
-
-       val64 = readq(&legacy_reg->toc_swapper_fb);
-
-       if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
-               status = VXGE_HW_ERR_SWAPPER_CTRL;
-
-       return status;
-}
-
 /*
  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  * Set the swapper bits appropriately for the vpath.
@@ -2577,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
  * Set the swapper bits appropriately for the vpath.
  */
 static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(
-       struct vxge_hw_legacy_reg __iomem *legacy_reg,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg)
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
+                          struct vxge_hw_vpath_reg __iomem *vpath_reg)
 {
        u64 val64;
 
@@ -2829,57 +3360,120 @@ exit:
 }
 
 /*
- * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
- * list callback
- * This function is callback passed to __vxge_hw_mempool_create to create memory
- * pool for TxD list
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
  */
-static void
-__vxge_hw_fifo_mempool_item_alloc(
-       struct vxge_hw_mempool *mempoolh,
-       u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
-       u32 index, u32 is_last)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
 {
-       u32 memblock_item_idx;
-       struct __vxge_hw_fifo_txdl_priv *txdl_priv;
-       struct vxge_hw_fifo_txd *txdp =
-               (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
-       struct __vxge_hw_fifo *fifo =
-                       (struct __vxge_hw_fifo *)mempoolh->userdata;
-       void *memblock = mempoolh->memblocks_arr[memblock_index];
+       void *txdlh;
 
-       vxge_assert(txdp);
+       for (;;) {
+               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
 
-       txdp->host_control = (u64) (size_t)
-       __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
-                                       &memblock_item_idx);
+               if (txdlh == NULL)
+                       break;
 
-       txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
+               vxge_hw_channel_dtr_complete(&fifo->channel);
 
-       vxge_assert(txdl_priv);
+               if (fifo->txdl_term) {
+                       fifo->txdl_term(txdlh,
+                       VXGE_HW_TXDL_STATE_POSTED,
+                       fifo->channel.userdata);
+               }
 
-       fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
+               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
+       }
 
-       /* pre-format HW's TxDL's private */
-       txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
-       txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
-       txdl_priv->dma_handle = dma_object->handle;
-       txdl_priv->memblock   = memblock;
-       txdl_priv->first_txdp = txdp;
-       txdl_priv->next_txdl_priv = NULL;
-       txdl_priv->alloc_frags = 0;
+       return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_fifo_create - Create a FIFO
- * This function creates FIFO and initializes it.
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
  */
-static enum vxge_hw_status
-__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
-                     struct vxge_hw_fifo_attr *attr)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_fifo *fifo;
+
+       __vxge_hw_fifo_abort(fifo);
+       status = __vxge_hw_channel_reset(&fifo->channel);
+
+       return status;
+}
+
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
+
+       __vxge_hw_fifo_abort(fifo);
+
+       if (fifo->mempool)
+               __vxge_hw_mempool_destroy(fifo->mempool);
+
+       vp->vpath->fifoh = NULL;
+
+       __vxge_hw_channel_free(&fifo->channel);
+
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
+ * list callback
+ * This function is callback passed to __vxge_hw_mempool_create to create memory
+ * pool for TxD list
+ */
+static void
+__vxge_hw_fifo_mempool_item_alloc(
+       struct vxge_hw_mempool *mempoolh,
+       u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
+       u32 index, u32 is_last)
+{
+       u32 memblock_item_idx;
+       struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+       struct vxge_hw_fifo_txd *txdp =
+               (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
+       struct __vxge_hw_fifo *fifo =
+                       (struct __vxge_hw_fifo *)mempoolh->userdata;
+       void *memblock = mempoolh->memblocks_arr[memblock_index];
+
+       vxge_assert(txdp);
+
+       txdp->host_control = (u64) (size_t)
+       __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
+                                       &memblock_item_idx);
+
+       txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
+
+       vxge_assert(txdl_priv);
+
+       fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
+
+       /* pre-format HW's TxDL's private */
+       txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
+       txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
+       txdl_priv->dma_handle = dma_object->handle;
+       txdl_priv->memblock   = memblock;
+       txdl_priv->first_txdp = txdp;
+       txdl_priv->next_txdl_priv = NULL;
+       txdl_priv->alloc_frags = 0;
+}
+
+/*
+ * __vxge_hw_fifo_create - Create a FIFO
+ * This function creates FIFO and initializes it.
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+                     struct vxge_hw_fifo_attr *attr)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_fifo *fifo;
        struct vxge_hw_fifo_config *config;
        u32 txdl_size, txdl_per_memblock;
        struct vxge_hw_mempool_cbs fifo_mp_callback;
@@ -2992,69 +3586,6 @@ exit:
        return status;
 }
 
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
-       void *txdlh;
-
-       for (;;) {
-               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
-               if (txdlh == NULL)
-                       break;
-
-               vxge_hw_channel_dtr_complete(&fifo->channel);
-
-               if (fifo->txdl_term) {
-                       fifo->txdl_term(txdlh,
-                       VXGE_HW_TXDL_STATE_POSTED,
-                       fifo->channel.userdata);
-               }
-
-               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
-       }
-
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       __vxge_hw_fifo_abort(fifo);
-       status = __vxge_hw_channel_reset(&fifo->channel);
-
-       return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
-       __vxge_hw_fifo_abort(fifo);
-
-       if (fifo->mempool)
-               __vxge_hw_mempool_destroy(fifo->mempool);
-
-       vp->vpath->fifoh = NULL;
-
-       __vxge_hw_channel_free(&fifo->channel);
-
-       return VXGE_HW_OK;
-}
-
 /*
  * __vxge_hw_vpath_pci_read - Read the content of given address
  *                          in pci config space.
@@ -3786,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
        vp_reg = vpath->vp_reg;
        config = vpath->vp_config;
 
-       writeq((u64)0, &vp_reg->tim_dest_addr);
-       writeq((u64)0, &vp_reg->tim_vpath_map);
-       writeq((u64)0, &vp_reg->tim_bitmap);
-       writeq((u64)0, &vp_reg->tim_remap);
+       writeq(0, &vp_reg->tim_dest_addr);
+       writeq(0, &vp_reg->tim_vpath_map);
+       writeq(0, &vp_reg->tim_bitmap);
+       writeq(0, &vp_reg->tim_remap);
 
        if (config->ring.enable == VXGE_HW_RING_ENABLE)
                writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3902,1355 +4433,699 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
                                        config->tti.ltimer_val);
                }
 
-               writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
-       }
-
-       if (config->ring.enable == VXGE_HW_RING_ENABLE) {
-
-               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-
-               if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
-                                       0x3ffffff);
-                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
-                                       config->rti.btimer_val);
-               }
-
-               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
-
-               if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
-                       if (config->rti.timer_ac_en)
-                               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
-                       else
-                               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
-               }
-
-               if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
-                       if (config->rti.timer_ci_en)
-                               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-                       else
-                               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-               }
-
-               if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
-                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
-                                       config->rti.urange_a);
-               }
-
-               if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
-                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
-                                       config->rti.urange_b);
-               }
-
-               if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
-                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
-                                       config->rti.urange_c);
-               }
-
-               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
-               val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-
-               if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
-                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
-                                               config->rti.uec_a);
-               }
-
-               if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
-                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
-                                               config->rti.uec_b);
-               }
-
-               if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
-                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
-                                               config->rti.uec_c);
-               }
-
-               if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
-                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
-                                               config->rti.uec_d);
-               }
-
-               writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
-               val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-
-               if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
-                       if (config->rti.timer_ri_en)
-                               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
-                       else
-                               val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
-               }
-
-               if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
-                                       0x3ffffff);
-                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
-                                       config->rti.rtimer_val);
-               }
-
-               if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
-                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
-                                       config->rti.util_sel);
-               }
-
-               if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
-                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
-                                       0x3ffffff);
-                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
-                                       config->rti.ltimer_val);
-               }
-
-               writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
-       }
-
-       val64 = 0;
-       writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
-       writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
-       writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
-       writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-       writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-       writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
-
-       return status;
-}
-
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-       struct vxge_hw_vp_config *config;
-       u64 val64;
-
-       vpath = &hldev->virtual_paths[vp_id];
-       vp_reg = vpath->vp_reg;
-       config = vpath->vp_config;
-
-       if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
-               if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
-                       config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
-                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-                       writeq(val64,
-                       &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-               }
-       }
-}
-/*
- * __vxge_hw_vpath_initialize
- * This routine is the final phase of init which initializes the
- * registers of the vpath using the configuration passed.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-       u64 val64;
-       u32 val32;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-       vpath = &hldev->virtual_paths[vp_id];
-
-       if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
-               status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
-               goto exit;
-       }
-       vp_reg = vpath->vp_reg;
-
-       status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
-
-       /* Get MRRS value from device control */
-       status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
-
-       if (status == VXGE_HW_OK) {
-               val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
-               val64 &=
-                   ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
-               val64 |=
-                   VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
-
-               val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
-       }
-
-       val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
-       val64 |=
-           VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
-                   VXGE_HW_MAX_PAYLOAD_SIZE_512);
-
-       val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
-       writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
-
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vp_initialize - Initialize Virtual Path structure
- * This routine is the initial phase of init which resets the vpath and
- * initializes the software support structures.
- */
-static enum vxge_hw_status
-__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
-                       struct vxge_hw_vp_config *config)
-{
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
-               status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
-               goto exit;
-       }
-
-       vpath = &hldev->virtual_paths[vp_id];
-
-       spin_lock_init(&hldev->virtual_paths[vp_id].lock);
-       vpath->vp_id = vp_id;
-       vpath->vp_open = VXGE_HW_VP_OPEN;
-       vpath->hldev = hldev;
-       vpath->vp_config = config;
-       vpath->vp_reg = hldev->vpath_reg[vp_id];
-       vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
-
-       __vxge_hw_vpath_reset(hldev, vp_id);
-
-       status = __vxge_hw_vpath_reset_check(vpath);
-       if (status != VXGE_HW_OK) {
-               memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-       if (status != VXGE_HW_OK) {
-               memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-               goto exit;
-       }
-
-       INIT_LIST_HEAD(&vpath->vpath_handles);
-
-       vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
-
-       VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
-               hldev->tim_int_mask1, vp_id);
-
-       status = __vxge_hw_vpath_initialize(hldev, vp_id);
-       if (status != VXGE_HW_OK)
-               __vxge_hw_vp_terminate(hldev, vp_id);
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-       struct __vxge_hw_virtualpath *vpath;
-
-       vpath = &hldev->virtual_paths[vp_id];
-
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
-               goto exit;
-
-       VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
-               vpath->hldev->tim_int_mask1, vpath->vp_id);
-       hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
-       memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-exit:
-       return;
-}
-
-/*
- * vxge_hw_vpath_mtu_set - Set MTU.
- * Set new MTU value. Example, to use jumbo frames:
- * vxge_hw_vpath_mtu_set(my_device, 9600);
- */
-enum vxge_hw_status
-vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
-{
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_virtualpath *vpath;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-       vpath = vp->vpath;
-
-       new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-       if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
-               status = VXGE_HW_ERR_INVALID_MTU_SIZE;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
-       val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
-       val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
-
-       writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-
-       vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
-
-exit:
-       return status;
-}
-
-/*
- * vxge_hw_vpath_open - Open a virtual path on a given adapter
- * This function is used to open access to virtual path of an
- * adapter for offload, GRO operations. This function returns
- * synchronously.
- */
-enum vxge_hw_status
-vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
-                  struct vxge_hw_vpath_attr *attr,
-                  struct __vxge_hw_vpath_handle **vpath_handle)
-{
-       struct __vxge_hw_virtualpath *vpath;
-       struct __vxge_hw_vpath_handle *vp;
-       enum vxge_hw_status status;
-
-       vpath = &hldev->virtual_paths[attr->vp_id];
-
-       if (vpath->vp_open == VXGE_HW_VP_OPEN) {
-               status = VXGE_HW_ERR_INVALID_STATE;
-               goto vpath_open_exit1;
-       }
-
-       status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
-                       &hldev->config.vp_config[attr->vp_id]);
-
-       if (status != VXGE_HW_OK)
-               goto vpath_open_exit1;
-
-       vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-               goto vpath_open_exit2;
-       }
-
-       vp->vpath = vpath;
-
-       if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
-               status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
-               if (status != VXGE_HW_OK)
-                       goto vpath_open_exit6;
-       }
-
-       if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
-               status = __vxge_hw_ring_create(vp, &attr->ring_attr);
-               if (status != VXGE_HW_OK)
-                       goto vpath_open_exit7;
-
-               __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
-       }
-
-       vpath->fifoh->tx_intr_num =
-               (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
-                       VXGE_HW_VPATH_INTR_TX;
-
-       vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
-                               VXGE_HW_BLOCK_SIZE);
-
-       if (vpath->stats_block == NULL) {
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-               goto vpath_open_exit8;
-       }
-
-       vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
-                       stats_block->memblock;
-       memset(vpath->hw_stats, 0,
-               sizeof(struct vxge_hw_vpath_stats_hw_info));
-
-       hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
-                                               vpath->hw_stats;
-
-       vpath->hw_stats_sav =
-               &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
-       memset(vpath->hw_stats_sav, 0,
-                       sizeof(struct vxge_hw_vpath_stats_hw_info));
-
-       writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
-
-       status = vxge_hw_vpath_stats_enable(vp);
-       if (status != VXGE_HW_OK)
-               goto vpath_open_exit8;
-
-       list_add(&vp->item, &vpath->vpath_handles);
-
-       hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
-
-       *vpath_handle = vp;
-
-       attr->fifo_attr.userdata = vpath->fifoh;
-       attr->ring_attr.userdata = vpath->ringh;
-
-       return VXGE_HW_OK;
-
-vpath_open_exit8:
-       if (vpath->ringh != NULL)
-               __vxge_hw_ring_delete(vp);
-vpath_open_exit7:
-       if (vpath->fifoh != NULL)
-               __vxge_hw_fifo_delete(vp);
-vpath_open_exit6:
-       vfree(vp);
-vpath_open_exit2:
-       __vxge_hw_vp_terminate(hldev, attr->vp_id);
-vpath_open_exit1:
-
-       return status;
-}
-
-/**
- * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
- * (vpath) open
- * @vp: Handle got from previous vpath open
- *
- * This function is used to close access to virtual path opened
- * earlier.
- */
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_virtualpath *vpath = vp->vpath;
-       struct __vxge_hw_ring *ring = vpath->ringh;
-       struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
-       u64 new_count, val64, val164;
-
-       if (vdev->titan1) {
-               new_count = readq(&vpath->vp_reg->rxdmem_size);
-               new_count &= 0x1fff;
-       } else
-               new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
-
-       val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
-
-       writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
-               &vpath->vp_reg->prc_rxd_doorbell);
-       readl(&vpath->vp_reg->prc_rxd_doorbell);
-
-       val164 /= 2;
-       val64 = readq(&vpath->vp_reg->prc_cfg6);
-       val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
-       val64 &= 0x1ff;
-
-       /*
-        * Each RxD is of 4 qwords
-        */
-       new_count -= (val64 + 1);
-       val64 = min(val164, new_count) / 4;
-
-       ring->rxds_limit = min(ring->rxds_limit, val64);
-       if (ring->rxds_limit < 4)
-               ring->rxds_limit = 4;
-}
-
-/*
- * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
- * This function is used to close access to virtual path opened
- * earlier.
- */
-enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_virtualpath *vpath = NULL;
-       struct __vxge_hw_device *devh = NULL;
-       u32 vp_id = vp->vpath->vp_id;
-       u32 is_empty = TRUE;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       vpath = vp->vpath;
-       devh = vpath->hldev;
-
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto vpath_close_exit;
-       }
-
-       list_del(&vp->item);
-
-       if (!list_empty(&vpath->vpath_handles)) {
-               list_add(&vp->item, &vpath->vpath_handles);
-               is_empty = FALSE;
-       }
-
-       if (!is_empty) {
-               status = VXGE_HW_FAIL;
-               goto vpath_close_exit;
-       }
-
-       devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
-
-       if (vpath->ringh != NULL)
-               __vxge_hw_ring_delete(vp);
-
-       if (vpath->fifoh != NULL)
-               __vxge_hw_fifo_delete(vp);
-
-       if (vpath->stats_block != NULL)
-               __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
-
-       vfree(vp);
-
-       __vxge_hw_vp_terminate(devh, vp_id);
-
-       spin_lock(&vpath->lock);
-       vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
-       spin_unlock(&vpath->lock);
-
-vpath_close_exit:
-       return status;
-}
-
-/*
- * vxge_hw_vpath_reset - Resets vpath
- * This function is used to request a reset of vpath
- */
-enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
-{
-       enum vxge_hw_status status;
-       u32 vp_id;
-       struct __vxge_hw_virtualpath *vpath = vp->vpath;
-
-       vp_id = vpath->vp_id;
-
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
-       if (status == VXGE_HW_OK)
-               vpath->sw_stats->soft_reset_cnt++;
-exit:
-       return status;
-}
-
-/*
- * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
- * This function poll's for the vpath reset completion and re initializes
- * the vpath.
- */
-enum vxge_hw_status
-vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_virtualpath *vpath = NULL;
-       enum vxge_hw_status status;
-       struct __vxge_hw_device *hldev;
-       u32 vp_id;
-
-       vp_id = vp->vpath->vp_id;
-       vpath = vp->vpath;
-       hldev = vpath->hldev;
-
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_reset_check(vpath);
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       status = __vxge_hw_vpath_initialize(hldev, vp_id);
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       if (vpath->ringh != NULL)
-               __vxge_hw_vpath_prc_configure(hldev, vp_id);
-
-       memset(vpath->hw_stats, 0,
-               sizeof(struct vxge_hw_vpath_stats_hw_info));
-
-       memset(vpath->hw_stats_sav, 0,
-               sizeof(struct vxge_hw_vpath_stats_hw_info));
-
-       writeq(vpath->stats_block->dma_addr,
-               &vpath->vp_reg->stats_cfg);
-
-       status = vxge_hw_vpath_stats_enable(vp);
+               writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+       }
 
-exit:
-       return status;
-}
+       if (config->ring.enable == VXGE_HW_RING_ENABLE) {
 
-/*
- * vxge_hw_vpath_enable - Enable vpath.
- * This routine clears the vpath reset thereby enabling a vpath
- * to start forwarding frames and generating interrupts.
- */
-void
-vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_device *hldev;
-       u64 val64;
+               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
 
-       hldev = vp->vpath->hldev;
+               if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+                                       0x3ffffff);
+                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
+                                       config->rti.btimer_val);
+               }
 
-       val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
-               1 << (16 - vp->vpath->vp_id));
+               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
 
-       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
-               &hldev->common_reg->cmn_rsthdlr_cfg1);
-}
+               if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
+                       if (config->rti.timer_ac_en)
+                               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+                       else
+                               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
+               }
 
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct __vxge_hw_virtualpath *vpath;
+               if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
+                       if (config->rti.timer_ci_en)
+                               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+                       else
+                               val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+               }
 
-       vpath = vp->vpath;
+               if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
+                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
+                                       config->rti.urange_a);
+               }
 
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto exit;
-       }
+               if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
+                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
+                                       config->rti.urange_b);
+               }
 
-       memcpy(vpath->hw_stats_sav, vpath->hw_stats,
-                       sizeof(struct vxge_hw_vpath_stats_hw_info));
+               if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
+                       val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
+                                       config->rti.urange_c);
+               }
 
-       status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
-       return status;
-}
+               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+               val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
 
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- *                           and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
-                            u32 operation, u32 offset, u64 *stat)
-{
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
+               if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
+                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
+                                               config->rti.uec_a);
+               }
 
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto vpath_stats_access_exit;
-       }
+               if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
+                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
+                                               config->rti.uec_b);
+               }
 
-       vp_reg = vpath->vp_reg;
+               if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
+                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
+                                               config->rti.uec_c);
+               }
 
-       val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
-                VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
-                VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
+               if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
+                       val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
+                                               config->rti.uec_d);
+               }
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->xmac_stats_access_cmd,
-                               VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
-                               vpath->hldev->config.device_poll_millis);
+               writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
+               val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
 
-       if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
-               *stat = readq(&vp_reg->xmac_stats_access_data);
-       else
-               *stat = 0;
+               if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
+                       if (config->rti.timer_ri_en)
+                               val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+                       else
+                               val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
+               }
 
-vpath_stats_access_exit:
-       return status;
-}
+               if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+                                       0x3ffffff);
+                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
+                                       config->rti.rtimer_val);
+               }
 
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
-       struct __vxge_hw_virtualpath *vpath,
-       struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
-       u64 *val64;
-       int i;
-       u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
-       enum vxge_hw_status status = VXGE_HW_OK;
+               if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
+                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
+                                       config->rti.util_sel);
+               }
 
-       val64 = (u64 *) vpath_tx_stats;
+               if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
+                       val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+                                       0x3ffffff);
+                       val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
+                                       config->rti.ltimer_val);
+               }
 
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto exit;
+               writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
        }
 
-       for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
-               status = __vxge_hw_vpath_stats_access(vpath,
-                                       VXGE_HW_STATS_OP_READ,
-                                       offset, val64);
-               if (status != VXGE_HW_OK)
-                       goto exit;
-               offset++;
-               val64++;
-       }
-exit:
+       val64 = 0;
+       writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+       writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+       writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
+       writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+       writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+       writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+
        return status;
 }
 
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
 {
-       u64 *val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       int i;
-       u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
-       val64 = (u64 *) vpath_rx_stats;
+       struct __vxge_hw_virtualpath *vpath;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct vxge_hw_vp_config *config;
+       u64 val64;
 
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-               goto exit;
-       }
-       for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
-               status = __vxge_hw_vpath_stats_access(vpath,
-                                       VXGE_HW_STATS_OP_READ,
-                                       offset >> 3, val64);
-               if (status != VXGE_HW_OK)
-                       goto exit;
+       vpath = &hldev->virtual_paths[vp_id];
+       vp_reg = vpath->vp_reg;
+       config = vpath->vp_config;
 
-               offset += 8;
-               val64++;
+       if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
+           config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+               config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+               val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+               val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+               writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
        }
-exit:
-       return status;
 }
 
 /*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ * __vxge_hw_vpath_initialize
+ * This routine is the final phase of init which initializes the
+ * registers of the vpath using the configuration passed.
  */
 static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
-                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
+__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
 {
        u64 val64;
+       u32 val32;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_virtualpath *vpath;
        struct vxge_hw_vpath_reg __iomem *vp_reg;
 
-       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+       vpath = &hldev->virtual_paths[vp_id];
+
+       if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
+               status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
                goto exit;
        }
        vp_reg = vpath->vp_reg;
 
-       val64 = readq(&vp_reg->vpath_debug_stats0);
-       hw_stats->ini_num_mwr_sent =
-               (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
-       val64 = readq(&vp_reg->vpath_debug_stats1);
-       hw_stats->ini_num_mrd_sent =
-               (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+       status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       val64 = readq(&vp_reg->vpath_debug_stats2);
-       hw_stats->ini_num_cpl_rcvd =
-               (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+       status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       val64 = readq(&vp_reg->vpath_debug_stats3);
-       hw_stats->ini_num_mwr_byte_sent =
-               VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+       status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       val64 = readq(&vp_reg->vpath_debug_stats4);
-       hw_stats->ini_num_cpl_byte_rcvd =
-               VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+       status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       val64 = readq(&vp_reg->vpath_debug_stats5);
-       hw_stats->wrcrdtarb_xoff =
-               (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+       val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
 
-       val64 = readq(&vp_reg->vpath_debug_stats6);
-       hw_stats->rdcrdtarb_xoff =
-               (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+       /* Get MRRS value from device control */
+       status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
+       if (status == VXGE_HW_OK) {
+               val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
+               val64 &=
+                   ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
+               val64 |=
+                   VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
 
-       val64 = readq(&vp_reg->vpath_genstats_count01);
-       hw_stats->vpath_genstats_count0 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
-               val64);
+               val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
+       }
 
-       val64 = readq(&vp_reg->vpath_genstats_count01);
-       hw_stats->vpath_genstats_count1 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
-               val64);
+       val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
+       val64 |=
+           VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
+                   VXGE_HW_MAX_PAYLOAD_SIZE_512);
 
-       val64 = readq(&vp_reg->vpath_genstats_count23);
-       hw_stats->vpath_genstats_count2 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
-               val64);
+       val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
+       writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
 
-       val64 = readq(&vp_reg->vpath_genstats_count01);
-       hw_stats->vpath_genstats_count3 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
-               val64);
+exit:
+       return status;
+}
 
-       val64 = readq(&vp_reg->vpath_genstats_count4);
-       hw_stats->vpath_genstats_count4 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
-               val64);
+/*
+ * __vxge_hw_vp_terminate - Terminate Virtual Path structure
+ * This routine closes all channels it opened and freeup memory
+ */
+static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+       struct __vxge_hw_virtualpath *vpath;
 
-       val64 = readq(&vp_reg->vpath_genstats_count5);
-       hw_stats->vpath_genstats_count5 =
-       (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
-               val64);
+       vpath = &hldev->virtual_paths[vp_id];
 
-       status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
-       if (status != VXGE_HW_OK)
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
                goto exit;
 
-       status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
-       if (status != VXGE_HW_OK)
-               goto exit;
+       VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
+               vpath->hldev->tim_int_mask1, vpath->vp_id);
+       hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
 
-       VXGE_HW_VPATH_STATS_PIO_READ(
-               VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+       memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+exit:
+       return;
+}
 
-       hw_stats->prog_event_vnum0 =
-                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+/*
+ * __vxge_hw_vp_initialize - Initialize Virtual Path structure
+ * This routine is the initial phase of init which resets the vpath and
+ * initializes the software support structures.
+ */
+static enum vxge_hw_status
+__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
+                       struct vxge_hw_vp_config *config)
+{
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-       hw_stats->prog_event_vnum1 =
-                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+       if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
+               status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
+               goto exit;
+       }
 
-       VXGE_HW_VPATH_STATS_PIO_READ(
-               VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+       vpath = &hldev->virtual_paths[vp_id];
 
-       hw_stats->prog_event_vnum2 =
-                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+       spin_lock_init(&hldev->virtual_paths[vp_id].lock);
+       vpath->vp_id = vp_id;
+       vpath->vp_open = VXGE_HW_VP_OPEN;
+       vpath->hldev = hldev;
+       vpath->vp_config = config;
+       vpath->vp_reg = hldev->vpath_reg[vp_id];
+       vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
 
-       hw_stats->prog_event_vnum3 =
-                       (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+       __vxge_hw_vpath_reset(hldev, vp_id);
 
-       val64 = readq(&vp_reg->rx_multi_cast_stats);
-       hw_stats->rx_multi_cast_frame_discard =
-               (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+       status = __vxge_hw_vpath_reset_check(vpath);
+       if (status != VXGE_HW_OK) {
+               memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+               goto exit;
+       }
 
-       val64 = readq(&vp_reg->rx_frm_transferred);
-       hw_stats->rx_frm_transferred =
-               (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+       status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
+       if (status != VXGE_HW_OK) {
+               memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+               goto exit;
+       }
 
-       val64 = readq(&vp_reg->rxd_returned);
-       hw_stats->rxd_returned =
-               (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+       INIT_LIST_HEAD(&vpath->vpath_handles);
 
-       val64 = readq(&vp_reg->dbg_stats_rx_mpa);
-       hw_stats->rx_mpa_len_fail_frms =
-               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
-       hw_stats->rx_mpa_mrk_fail_frms =
-               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
-       hw_stats->rx_mpa_crc_fail_frms =
-               (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+       vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
 
-       val64 = readq(&vp_reg->dbg_stats_rx_fau);
-       hw_stats->rx_permitted_frms =
-               (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
-       hw_stats->rx_vp_reset_discarded_frms =
-       (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
-       hw_stats->rx_wol_frms =
-               (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+       VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
+               hldev->tim_int_mask1, vp_id);
 
-       val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
-       hw_stats->tx_vp_reset_discarded_frms =
-       (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
-               val64);
+       status = __vxge_hw_vpath_initialize(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               __vxge_hw_vp_terminate(hldev, vp_id);
 exit:
        return status;
 }
 
-
-static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
-                                       unsigned long size)
+/*
+ * vxge_hw_vpath_mtu_set - Set MTU.
+ * Set new MTU value. Example, to use jumbo frames:
+ * vxge_hw_vpath_mtu_set(my_device, 9600);
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
 {
-       gfp_t flags;
-       void *vaddr;
+       u64 val64;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_virtualpath *vpath;
 
-       if (in_interrupt())
-               flags = GFP_ATOMIC | GFP_DMA;
-       else
-               flags = GFP_KERNEL | GFP_DMA;
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+       vpath = vp->vpath;
 
-       vaddr = kmalloc((size), flags);
+       new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
 
-       vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
+       if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
+               status = VXGE_HW_ERR_INVALID_MTU_SIZE;
 
-static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
-                            struct pci_dev **p_dma_acch)
-{
-       unsigned long misaligned = *(unsigned long *)p_dma_acch;
-       u8 *tmp = (u8 *)vaddr;
-       tmp -= misaligned;
-       kfree((void *)tmp);
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+       val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
+
+       writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+
+       vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
+
+exit:
+       return status;
 }
 
 /*
- * __vxge_hw_blockpool_create - Create block pool
+ * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
+ * Enable the DMA vpath statistics. The function is to be called to re-enable
+ * the adapter to update stats into the host memory
  */
-
 static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-                          struct __vxge_hw_blockpool *blockpool,
-                          u32 pool_size,
-                          u32 pool_max)
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
 {
-       u32 i;
-       struct __vxge_hw_blockpool_entry *entry = NULL;
-       void *memblock;
-       dma_addr_t dma_addr;
-       struct pci_dev *dma_handle;
-       struct pci_dev *acc_handle;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_virtualpath *vpath;
 
-       if (blockpool == NULL) {
-               status = VXGE_HW_FAIL;
-               goto blockpool_create_exit;
-       }
-
-       blockpool->hldev = hldev;
-       blockpool->block_size = VXGE_HW_BLOCK_SIZE;
-       blockpool->pool_size = 0;
-       blockpool->pool_max = pool_max;
-       blockpool->req_out = 0;
-
-       INIT_LIST_HEAD(&blockpool->free_block_list);
-       INIT_LIST_HEAD(&blockpool->free_entry_list);
+       vpath = vp->vpath;
 
-       for (i = 0; i < pool_size + pool_max; i++) {
-               entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
-                               GFP_KERNEL);
-               if (entry == NULL) {
-                       __vxge_hw_blockpool_destroy(blockpool);
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto blockpool_create_exit;
-               }
-               list_add(&entry->item, &blockpool->free_entry_list);
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
        }
 
-       for (i = 0; i < pool_size; i++) {
-
-               memblock = vxge_os_dma_malloc(
-                               hldev->pdev,
-                               VXGE_HW_BLOCK_SIZE,
-                               &dma_handle,
-                               &acc_handle);
+       memcpy(vpath->hw_stats_sav, vpath->hw_stats,
+                       sizeof(struct vxge_hw_vpath_stats_hw_info));
 
-               if (memblock == NULL) {
-                       __vxge_hw_blockpool_destroy(blockpool);
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto blockpool_create_exit;
-               }
+       status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
+exit:
+       return status;
+}
 
-               dma_addr = pci_map_single(hldev->pdev, memblock,
-                               VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+/*
+ * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
+ * This function allocates a block from block pool or from the system
+ */
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+{
+       struct __vxge_hw_blockpool_entry *entry = NULL;
+       struct __vxge_hw_blockpool  *blockpool;
 
-               if (unlikely(pci_dma_mapping_error(hldev->pdev,
-                               dma_addr))) {
+       blockpool = &devh->block_pool;
 
-                       vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
-                       __vxge_hw_blockpool_destroy(blockpool);
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto blockpool_create_exit;
-               }
+       if (size == blockpool->block_size) {
 
-               if (!list_empty(&blockpool->free_entry_list))
+               if (!list_empty(&blockpool->free_block_list))
                        entry = (struct __vxge_hw_blockpool_entry *)
-                               list_first_entry(&blockpool->free_entry_list,
+                               list_first_entry(&blockpool->free_block_list,
                                        struct __vxge_hw_blockpool_entry,
                                        item);
 
-               if (entry == NULL)
-                       entry =
-                           kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
-                                       GFP_KERNEL);
                if (entry != NULL) {
                        list_del(&entry->item);
-                       entry->length = VXGE_HW_BLOCK_SIZE;
-                       entry->memblock = memblock;
-                       entry->dma_addr = dma_addr;
-                       entry->acc_handle = acc_handle;
-                       entry->dma_handle = dma_handle;
-                       list_add(&entry->item,
-                                         &blockpool->free_block_list);
-                       blockpool->pool_size++;
-               } else {
-                       __vxge_hw_blockpool_destroy(blockpool);
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto blockpool_create_exit;
+                       blockpool->pool_size--;
                }
        }
 
-blockpool_create_exit:
-       return status;
+       if (entry != NULL)
+               __vxge_hw_blockpool_blocks_add(blockpool);
+
+       return entry;
 }
 
 /*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ * vxge_hw_vpath_open - Open a virtual path on a given adapter
+ * This function is used to open access to virtual path of an
+ * adapter for offload, GRO operations. This function returns
+ * synchronously.
  */
-
-static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+enum vxge_hw_status
+vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
+                  struct vxge_hw_vpath_attr *attr,
+                  struct __vxge_hw_vpath_handle **vpath_handle)
 {
+       struct __vxge_hw_virtualpath *vpath;
+       struct __vxge_hw_vpath_handle *vp;
+       enum vxge_hw_status status;
 
-       struct __vxge_hw_device *hldev;
-       struct list_head *p, *n;
-       u16 ret;
+       vpath = &hldev->virtual_paths[attr->vp_id];
 
-       if (blockpool == NULL) {
-               ret = 1;
-               goto exit;
+       if (vpath->vp_open == VXGE_HW_VP_OPEN) {
+               status = VXGE_HW_ERR_INVALID_STATE;
+               goto vpath_open_exit1;
        }
 
-       hldev = blockpool->hldev;
-
-       list_for_each_safe(p, n, &blockpool->free_block_list) {
+       status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
+                       &hldev->config.vp_config[attr->vp_id]);
+       if (status != VXGE_HW_OK)
+               goto vpath_open_exit1;
 
-               pci_unmap_single(hldev->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+       vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
+               goto vpath_open_exit2;
+       }
 
-               vxge_os_dma_free(hldev->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->memblock,
-                       &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
+       vp->vpath = vpath;
 
-               list_del(
-                       &((struct __vxge_hw_blockpool_entry *)p)->item);
-               kfree(p);
-               blockpool->pool_size--;
+       if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+               status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
+               if (status != VXGE_HW_OK)
+                       goto vpath_open_exit6;
        }
 
-       list_for_each_safe(p, n, &blockpool->free_entry_list) {
-               list_del(
-                       &((struct __vxge_hw_blockpool_entry *)p)->item);
-               kfree((void *)p);
+       if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
+               status = __vxge_hw_ring_create(vp, &attr->ring_attr);
+               if (status != VXGE_HW_OK)
+                       goto vpath_open_exit7;
+
+               __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
        }
-       ret = 0;
-exit:
-       return;
-}
 
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
-       u32 nreq = 0, i;
+       vpath->fifoh->tx_intr_num =
+               (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
+                       VXGE_HW_VPATH_INTR_TX;
 
-       if ((blockpool->pool_size  +  blockpool->req_out) <
-               VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
-               nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
-               blockpool->req_out += nreq;
+       vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
+                               VXGE_HW_BLOCK_SIZE);
+       if (vpath->stats_block == NULL) {
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
+               goto vpath_open_exit8;
        }
 
-       for (i = 0; i < nreq; i++)
-               vxge_os_dma_malloc_async(
-                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-                       blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
+       vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
+                       stats_block->memblock;
+       memset(vpath->hw_stats, 0,
+               sizeof(struct vxge_hw_vpath_stats_hw_info));
 
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
-       struct list_head *p, *n;
+       hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
+                                               vpath->hw_stats;
 
-       list_for_each_safe(p, n, &blockpool->free_block_list) {
+       vpath->hw_stats_sav =
+               &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
+       memset(vpath->hw_stats_sav, 0,
+                       sizeof(struct vxge_hw_vpath_stats_hw_info));
 
-               if (blockpool->pool_size < blockpool->pool_max)
-                       break;
+       writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
 
-               pci_unmap_single(
-                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+       status = vxge_hw_vpath_stats_enable(vp);
+       if (status != VXGE_HW_OK)
+               goto vpath_open_exit8;
 
-               vxge_os_dma_free(
-                       ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->memblock,
-                       &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+       list_add(&vp->item, &vpath->vpath_handles);
 
-               list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+       hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
 
-               list_add(p, &blockpool->free_entry_list);
+       *vpath_handle = vp;
 
-               blockpool->pool_size--;
+       attr->fifo_attr.userdata = vpath->fifoh;
+       attr->ring_attr.userdata = vpath->ringh;
 
-       }
+       return VXGE_HW_OK;
+
+vpath_open_exit8:
+       if (vpath->ringh != NULL)
+               __vxge_hw_ring_delete(vp);
+vpath_open_exit7:
+       if (vpath->fifoh != NULL)
+               __vxge_hw_fifo_delete(vp);
+vpath_open_exit6:
+       vfree(vp);
+vpath_open_exit2:
+       __vxge_hw_vp_terminate(hldev, attr->vp_id);
+vpath_open_exit1:
+
+       return status;
 }
 
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
+/**
+ * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
+ * (vpath) open
+ * @vp: Handle got from previous vpath open
+ *
+ * This function is used to close access to virtual path opened
+ * earlier.
  */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
-                                       void *block_addr,
-                                       u32 length,
-                                       struct pci_dev *dma_h,
-                                       struct pci_dev *acc_handle)
+void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_blockpool  *blockpool;
-       struct __vxge_hw_blockpool_entry  *entry = NULL;
-       dma_addr_t dma_addr;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       u32 req_out;
+       struct __vxge_hw_virtualpath *vpath = vp->vpath;
+       struct __vxge_hw_ring *ring = vpath->ringh;
+       struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
+       u64 new_count, val64, val164;
 
-       blockpool = &devh->block_pool;
+       if (vdev->titan1) {
+               new_count = readq(&vpath->vp_reg->rxdmem_size);
+               new_count &= 0x1fff;
+       } else
+               new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
 
-       if (block_addr == NULL) {
-               blockpool->req_out--;
-               status = VXGE_HW_FAIL;
-               goto exit;
-       }
+       val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
 
-       dma_addr = pci_map_single(devh->pdev, block_addr, length,
-                               PCI_DMA_BIDIRECTIONAL);
+       writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
+               &vpath->vp_reg->prc_rxd_doorbell);
+       readl(&vpath->vp_reg->prc_rxd_doorbell);
 
-       if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+       val164 /= 2;
+       val64 = readq(&vpath->vp_reg->prc_cfg6);
+       val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
+       val64 &= 0x1ff;
 
-               vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
-               blockpool->req_out--;
-               status = VXGE_HW_FAIL;
-               goto exit;
-       }
+       /*
+        * Each RxD is of 4 qwords
+        */
+       new_count -= (val64 + 1);
+       val64 = min(val164, new_count) / 4;
 
+       ring->rxds_limit = min(ring->rxds_limit, val64);
+       if (ring->rxds_limit < 4)
+               ring->rxds_limit = 4;
+}
 
-       if (!list_empty(&blockpool->free_entry_list))
-               entry = (struct __vxge_hw_blockpool_entry *)
-                       list_first_entry(&blockpool->free_entry_list,
-                               struct __vxge_hw_blockpool_entry,
-                               item);
+/*
+ * __vxge_hw_blockpool_block_free - Frees a block from block pool
+ * @devh: Hal device
+ * @entry: Entry of block to be freed
+ *
+ * This function frees a block from block pool
+ */
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
+                              struct __vxge_hw_blockpool_entry *entry)
+{
+       struct __vxge_hw_blockpool  *blockpool;
 
-       if (entry == NULL)
-               entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
-       else
-               list_del(&entry->item);
+       blockpool = &devh->block_pool;
 
-       if (entry != NULL) {
-               entry->length = length;
-               entry->memblock = block_addr;
-               entry->dma_addr = dma_addr;
-               entry->acc_handle = acc_handle;
-               entry->dma_handle = dma_h;
+       if (entry->length == blockpool->block_size) {
                list_add(&entry->item, &blockpool->free_block_list);
                blockpool->pool_size++;
-               status = VXGE_HW_OK;
-       } else
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
-       blockpool->req_out--;
+       }
 
-       req_out = blockpool->req_out;
-exit:
-       return;
+       __vxge_hw_blockpool_blocks_remove(blockpool);
 }
 
 /*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
+ * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
+ * This function is used to close access to virtual path opened
+ * earlier.
  */
-static void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
-                               struct vxge_hw_mempool_dma *dma_object)
+enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_blockpool_entry *entry = NULL;
-       struct __vxge_hw_blockpool  *blockpool;
-       void *memblock = NULL;
+       struct __vxge_hw_virtualpath *vpath = NULL;
+       struct __vxge_hw_device *devh = NULL;
+       u32 vp_id = vp->vpath->vp_id;
+       u32 is_empty = TRUE;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       blockpool = &devh->block_pool;
+       vpath = vp->vpath;
+       devh = vpath->hldev;
 
-       if (size != blockpool->block_size) {
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto vpath_close_exit;
+       }
 
-               memblock = vxge_os_dma_malloc(devh->pdev, size,
-                                               &dma_object->handle,
-                                               &dma_object->acc_handle);
+       list_del(&vp->item);
 
-               if (memblock == NULL) {
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto exit;
-               }
+       if (!list_empty(&vpath->vpath_handles)) {
+               list_add(&vp->item, &vpath->vpath_handles);
+               is_empty = FALSE;
+       }
 
-               dma_object->addr = pci_map_single(devh->pdev, memblock, size,
-                                       PCI_DMA_BIDIRECTIONAL);
+       if (!is_empty) {
+               status = VXGE_HW_FAIL;
+               goto vpath_close_exit;
+       }
 
-               if (unlikely(pci_dma_mapping_error(devh->pdev,
-                               dma_object->addr))) {
-                       vxge_os_dma_free(devh->pdev, memblock,
-                               &dma_object->acc_handle);
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
-                       goto exit;
-               }
+       devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
 
-       } else {
+       if (vpath->ringh != NULL)
+               __vxge_hw_ring_delete(vp);
 
-               if (!list_empty(&blockpool->free_block_list))
-                       entry = (struct __vxge_hw_blockpool_entry *)
-                               list_first_entry(&blockpool->free_block_list,
-                                       struct __vxge_hw_blockpool_entry,
-                                       item);
+       if (vpath->fifoh != NULL)
+               __vxge_hw_fifo_delete(vp);
 
-               if (entry != NULL) {
-                       list_del(&entry->item);
-                       dma_object->addr = entry->dma_addr;
-                       dma_object->handle = entry->dma_handle;
-                       dma_object->acc_handle = entry->acc_handle;
-                       memblock = entry->memblock;
+       if (vpath->stats_block != NULL)
+               __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
 
-                       list_add(&entry->item,
-                               &blockpool->free_entry_list);
-                       blockpool->pool_size--;
-               }
+       vfree(vp);
 
-               if (memblock != NULL)
-                       __vxge_hw_blockpool_blocks_add(blockpool);
-       }
-exit:
-       return memblock;
+       __vxge_hw_vp_terminate(devh, vp_id);
+
+       spin_lock(&vpath->lock);
+       vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+       spin_unlock(&vpath->lock);
+
+vpath_close_exit:
+       return status;
 }
 
 /*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
-                               __vxge_hw_blockpool_malloc
+ * vxge_hw_vpath_reset - Resets vpath
+ * This function is used to request a reset of vpath
  */
-static void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
-                       void *memblock, u32 size,
-                       struct vxge_hw_mempool_dma *dma_object)
+enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_blockpool_entry *entry = NULL;
-       struct __vxge_hw_blockpool  *blockpool;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       blockpool = &devh->block_pool;
-
-       if (size != blockpool->block_size) {
-               pci_unmap_single(devh->pdev, dma_object->addr, size,
-                       PCI_DMA_BIDIRECTIONAL);
-               vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
-       } else {
-
-               if (!list_empty(&blockpool->free_entry_list))
-                       entry = (struct __vxge_hw_blockpool_entry *)
-                               list_first_entry(&blockpool->free_entry_list,
-                                       struct __vxge_hw_blockpool_entry,
-                                       item);
-
-               if (entry == NULL)
-                       entry = vmalloc(sizeof(
-                                       struct __vxge_hw_blockpool_entry));
-               else
-                       list_del(&entry->item);
+       enum vxge_hw_status status;
+       u32 vp_id;
+       struct __vxge_hw_virtualpath *vpath = vp->vpath;
 
-               if (entry != NULL) {
-                       entry->length = size;
-                       entry->memblock = memblock;
-                       entry->dma_addr = dma_object->addr;
-                       entry->acc_handle = dma_object->acc_handle;
-                       entry->dma_handle = dma_object->handle;
-                       list_add(&entry->item,
-                                       &blockpool->free_block_list);
-                       blockpool->pool_size++;
-                       status = VXGE_HW_OK;
-               } else
-                       status = VXGE_HW_ERR_OUT_OF_MEMORY;
+       vp_id = vpath->vp_id;
 
-               if (status == VXGE_HW_OK)
-                       __vxge_hw_blockpool_blocks_remove(blockpool);
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
        }
+
+       status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
+       if (status == VXGE_HW_OK)
+               vpath->sw_stats->soft_reset_cnt++;
+exit:
+       return status;
 }
 
 /*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
+ * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
+ * This function poll's for the vpath reset completion and re initializes
+ * the vpath.
  */
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+enum vxge_hw_status
+vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_blockpool_entry *entry = NULL;
-       struct __vxge_hw_blockpool  *blockpool;
+       struct __vxge_hw_virtualpath *vpath = NULL;
+       enum vxge_hw_status status;
+       struct __vxge_hw_device *hldev;
+       u32 vp_id;
 
-       blockpool = &devh->block_pool;
+       vp_id = vp->vpath->vp_id;
+       vpath = vp->vpath;
+       hldev = vpath->hldev;
 
-       if (size == blockpool->block_size) {
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
 
-               if (!list_empty(&blockpool->free_block_list))
-                       entry = (struct __vxge_hw_blockpool_entry *)
-                               list_first_entry(&blockpool->free_block_list,
-                                       struct __vxge_hw_blockpool_entry,
-                                       item);
+       status = __vxge_hw_vpath_reset_check(vpath);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-               if (entry != NULL) {
-                       list_del(&entry->item);
-                       blockpool->pool_size--;
-               }
-       }
+       status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       if (entry != NULL)
-               __vxge_hw_blockpool_blocks_add(blockpool);
+       status = __vxge_hw_vpath_initialize(hldev, vp_id);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
-       return entry;
+       if (vpath->ringh != NULL)
+               __vxge_hw_vpath_prc_configure(hldev, vp_id);
+
+       memset(vpath->hw_stats, 0,
+               sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+       memset(vpath->hw_stats_sav, 0,
+               sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+       writeq(vpath->stats_block->dma_addr,
+               &vpath->vp_reg->stats_cfg);
+
+       status = vxge_hw_vpath_stats_enable(vp);
+
+exit:
+       return status;
 }
 
 /*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
+ * vxge_hw_vpath_enable - Enable vpath.
+ * This routine clears the vpath reset thereby enabling a vpath
+ * to start forwarding frames and generating interrupts.
  */
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
-                       struct __vxge_hw_blockpool_entry *entry)
+void
+vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_blockpool  *blockpool;
+       struct __vxge_hw_device *hldev;
+       u64 val64;
 
-       blockpool = &devh->block_pool;
+       hldev = vp->vpath->hldev;
 
-       if (entry->length == blockpool->block_size) {
-               list_add(&entry->item, &blockpool->free_block_list);
-               blockpool->pool_size++;
-       }
+       val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
+               1 << (16 - vp->vpath->vp_id));
 
-       __vxge_hw_blockpool_blocks_remove(blockpool);
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+               &hldev->common_reg->cmn_rsthdlr_cfg1);
 }
index 5b2c8313426d3c7d7d051f4f51f020746d2af936..e249e288d160af59634a0c60c7b55319ab8b7469 100644 (file)
@@ -314,9 +314,9 @@ struct vxge_hw_ring_config {
 #define VXGE_HW_RING_DEFAULT                                   1
 
        u32                             ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS                                1
-#define VXGE_HW_MAX_RING_BLOCKS                                128
-#define VXGE_HW_DEF_RING_BLOCKS                                2
+#define VXGE_HW_MIN_RING_BLOCKS                                        1
+#define VXGE_HW_MAX_RING_BLOCKS                                        128
+#define VXGE_HW_DEF_RING_BLOCKS                                        2
 
        u32                             buffer_mode;
 #define VXGE_HW_RING_RXD_BUFFER_MODE_1                         1
@@ -700,7 +700,7 @@ struct __vxge_hw_virtualpath {
  *
  * This structure is used to store the callback information.
  */
-struct __vxge_hw_vpath_handle{
+struct __vxge_hw_vpath_handle {
        struct list_head        item;
        struct __vxge_hw_virtualpath    *vpath;
 };
@@ -815,8 +815,8 @@ struct vxge_hw_device_hw_info {
        u8              serial_number[VXGE_HW_INFO_LEN];
        u8              part_number[VXGE_HW_INFO_LEN];
        u8              product_desc[VXGE_HW_INFO_LEN];
-       u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-       u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+       u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+       u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
 };
 
 /**
@@ -863,20 +863,10 @@ struct vxge_hw_device_attr {
                                loc, \
                                offset, \
                                &val64);                        \
-                                                               \
        if (status != VXGE_HW_OK)                               \
                return status;                                          \
 }
 
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {                         \
-       status = __vxge_hw_vpath_stats_access(vpath, \
-                       VXGE_HW_STATS_OP_READ, \
-                       offset, \
-                       &val64);                                        \
-       if (status != VXGE_HW_OK)                                       \
-               return status;                                          \
-}
-
 /*
  * struct __vxge_hw_ring - Ring channel.
  * @channel: Channel "base" of this ring, the common part of all HW
@@ -1148,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper {
  *             lookup to determine the transmit port.
  *             01: Send on physical Port1.
  *             10: Send on physical Port0.
      *              11: Send on both ports.
*            11: Send on both ports.
  *             Bits 18 to 21 - Reserved
  *             Bits 22 to 23 - Gather_Code. This field is set by the host and
  *             is used to describe how individual buffers comprise a frame.
@@ -1927,6 +1917,15 @@ out:
        return vaddr;
 }
 
+static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+                       struct pci_dev **p_dma_acch)
+{
+       unsigned long misaligned = *(unsigned long *)p_dma_acch;
+       u8 *tmp = (u8 *)vaddr;
+       tmp -= misaligned;
+       kfree((void *)tmp);
+}
+
 /*
  * __vxge_hw_mempool_item_priv - will return pointer on per item private space
  */
@@ -1996,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
 
-
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
index 4877b3b8a29e9f8ba92a374266bd9d8519449c0c..70c327910f0907bc7f8e93725e572779e41c0e13 100644 (file)
@@ -84,15 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
 
 static struct vxge_drv_config *driver_config;
 
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-                                            struct macInfo *mac);
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-                                            struct macInfo *mac);
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
        return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -149,8 +140,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
  * This function is called during interrupt context to notify link up state
  * change.
  */
-static void
-vxge_callback_link_up(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
        struct vxgedev *vdev = netdev_priv(dev);
@@ -173,8 +163,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
  * This function is called during interrupt context to notify link down state
  * change.
  */
-static void
-vxge_callback_link_down(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
        struct vxgedev *vdev = netdev_priv(dev);
@@ -196,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
  *
  * Allocate SKB.
  */
-static struct sk_buff*
+static struct sk_buff *
 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
 {
        struct net_device    *dev;
@@ -414,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
 
                prefetch((char *)skb + L1_CACHE_BYTES);
                if (unlikely(t_code)) {
-
                        if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
                                VXGE_HW_OK) {
 
@@ -437,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                }
 
                if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
-
                        if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
-
                                if (!vxge_rx_map(dtr, ring)) {
                                        skb_put(skb, pkt_length);
 
@@ -678,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
        return FALSE;
 }
 
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+       struct vxge_mac_addrs *new_mac_entry;
+       u8 *mac_address = NULL;
+
+       if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
+               return TRUE;
+
+       new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
+       if (!new_mac_entry) {
+               vxge_debug_mem(VXGE_ERR,
+                       "%s: memory allocation failed",
+                       VXGE_DRIVER_NAME);
+               return FALSE;
+       }
+
+       list_add(&new_mac_entry->item, &vpath->mac_addr_list);
+
+       /* Copy the new mac address to the list */
+       mac_address = (u8 *)&new_mac_entry->macaddr;
+       memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+       new_mac_entry->state = mac->state;
+       vpath->mac_addr_cnt++;
+
+       /* Is this a multicast address */
+       if (0x01 & mac->macaddr[0])
+               vpath->mcast_addr_cnt++;
+
+       return TRUE;
+}
+
+/* Add a mac address to DA table */
+static enum vxge_hw_status
+vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_vpath *vpath;
+       enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
+
+       if (0x01 & mac->macaddr[0]) /* multicast address */
+               duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
+       else
+               duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
+
+       vpath = &vdev->vpaths[mac->vpath_no];
+       status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
+                                               mac->macmask, duplicate_mode);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                       "DA config add entry failed for vpath:%d",
+                       vpath->device_id);
+       } else
+               if (FALSE == vxge_mac_list_add(vpath, mac))
+                       status = -EPERM;
+
+       return status;
+}
+
 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
 {
        struct macInfo mac_info;
@@ -1023,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
                "%s:%d  Exiting...", __func__, __LINE__);
 }
 
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+       struct list_head *entry, *next;
+       u64 del_mac = 0;
+       u8 *mac_address = (u8 *) (&del_mac);
+
+       /* Copy the mac address to delete from the list */
+       memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+       list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+               if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
+                       list_del(entry);
+                       kfree((struct vxge_mac_addrs *)entry);
+                       vpath->mac_addr_cnt--;
+
+                       /* Is this a multicast address */
+                       if (0x01 & mac->macaddr[0])
+                               vpath->mcast_addr_cnt--;
+                       return TRUE;
+               }
+       }
+
+       return FALSE;
+}
+
+/* delete a mac address from DA table */
+static enum vxge_hw_status
+vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_vpath *vpath;
+
+       vpath = &vdev->vpaths[mac->vpath_no];
+       status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
+                                               mac->macmask);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                       "DA config delete entry failed for vpath:%d",
+                       vpath->device_id);
+       } else
+               vxge_mac_list_del(vpath, mac);
+       return status;
+}
+
 /**
  * vxge_set_multicast
  * @dev: pointer to the device structure
@@ -1333,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
        }
 }
 
+/* list all mac addresses from DA table */
+static enum vxge_hw_status
+vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       unsigned char macmask[ETH_ALEN];
+       unsigned char macaddr[ETH_ALEN];
+
+       status = vxge_hw_vpath_mac_addr_get(vpath->handle,
+                               macaddr, macmask);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                       "DA config list entry failed for vpath:%d",
+                       vpath->device_id);
+               return status;
+       }
+
+       while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+               status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
+                               macaddr, macmask);
+               if (status != VXGE_HW_OK)
+                       break;
+       }
+
+       return status;
+}
+
+/* Store all mac addresses from the list to the DA table */
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct macInfo mac_info;
+       u8 *mac_address = NULL;
+       struct list_head *entry, *next;
+
+       memset(&mac_info, 0, sizeof(struct macInfo));
+
+       if (vpath->is_open) {
+               list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+                       mac_address =
+                               (u8 *)&
+                               ((struct vxge_mac_addrs *)entry)->macaddr;
+                       memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+                       ((struct vxge_mac_addrs *)entry)->state =
+                               VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+                       /* does this mac address already exist in da table? */
+                       status = vxge_search_mac_addr_in_da_table(vpath,
+                               &mac_info);
+                       if (status != VXGE_HW_OK) {
+                               /* Add this mac address to the DA table */
+                               status = vxge_hw_vpath_mac_addr_add(
+                                       vpath->handle, mac_info.macaddr,
+                                       mac_info.macmask,
+                                   VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
+                               if (status != VXGE_HW_OK) {
+                                       vxge_debug_init(VXGE_ERR,
+                                           "DA add entry failed for vpath:%d",
+                                           vpath->device_id);
+                                       ((struct vxge_mac_addrs *)entry)->state
+                                               = VXGE_LL_MAC_ADDR_IN_LIST;
+                               }
+                       }
+               }
+       }
+
+       return status;
+}
+
+/* Store all vlan ids from the list to the vid table */
+static enum vxge_hw_status
+vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxgedev *vdev = vpath->vdev;
+       u16 vid;
+
+       if (vdev->vlgrp && vpath->is_open) {
+
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
+                       if (!vlan_group_get_device(vdev->vlgrp, vid))
+                               continue;
+                       /* Add these vlan to the vid table */
+                       status = vxge_hw_vpath_vid_add(vpath->handle, vid);
+               }
+       }
+
+       return status;
+}
+
 /*
  * vxge_reset_vpath
  * @vdev: pointer to vdev
@@ -1745,7 +1923,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                                vdev->config.rth_algorithm,
                                &hash_types,
                                vdev->config.rth_bkt_sz);
-
                 if (status != VXGE_HW_OK) {
                        vxge_debug_init(VXGE_ERR,
                                "RTH configuration failed for vpath:%d",
@@ -1757,199 +1934,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
        return status;
 }
 
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
-       struct vxge_mac_addrs *new_mac_entry;
-       u8 *mac_address = NULL;
-
-       if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
-               return TRUE;
-
-       new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
-       if (!new_mac_entry) {
-               vxge_debug_mem(VXGE_ERR,
-                       "%s: memory allocation failed",
-                       VXGE_DRIVER_NAME);
-               return FALSE;
-       }
-
-       list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
-       /* Copy the new mac address to the list */
-       mac_address = (u8 *)&new_mac_entry->macaddr;
-       memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
-       new_mac_entry->state = mac->state;
-       vpath->mac_addr_cnt++;
-
-       /* Is this a multicast address */
-       if (0x01 & mac->macaddr[0])
-               vpath->mcast_addr_cnt++;
-
-       return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-                                            struct macInfo *mac)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_vpath *vpath;
-       enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
-       if (0x01 & mac->macaddr[0]) /* multicast address */
-               duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
-       else
-               duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
-       vpath = &vdev->vpaths[mac->vpath_no];
-       status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
-                                               mac->macmask, duplicate_mode);
-       if (status != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "DA config add entry failed for vpath:%d",
-                       vpath->device_id);
-       } else
-               if (FALSE == vxge_mac_list_add(vpath, mac))
-                       status = -EPERM;
-
-       return status;
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
-       struct list_head *entry, *next;
-       u64 del_mac = 0;
-       u8 *mac_address = (u8 *)(&del_mac);
-
-       /* Copy the mac address to delete from the list */
-       memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
-       list_for_each_safe(entry, next, &vpath->mac_addr_list) {
-               if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
-                       list_del(entry);
-                       kfree((struct vxge_mac_addrs *)entry);
-                       vpath->mac_addr_cnt--;
-
-                       /* Is this a multicast address */
-                       if (0x01 & mac->macaddr[0])
-                               vpath->mcast_addr_cnt--;
-                       return TRUE;
-               }
-       }
-
-       return FALSE;
-}
-/* delete a mac address from DA table */
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-                                            struct macInfo *mac)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_vpath *vpath;
-
-       vpath = &vdev->vpaths[mac->vpath_no];
-       status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
-                                               mac->macmask);
-       if (status != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "DA config delete entry failed for vpath:%d",
-                       vpath->device_id);
-       } else
-               vxge_mac_list_del(vpath, mac);
-       return status;
-}
-
-/* list all mac addresses from DA table */
-enum vxge_hw_status
-static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
-                                       struct macInfo *mac)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       unsigned char macmask[ETH_ALEN];
-       unsigned char macaddr[ETH_ALEN];
-
-       status = vxge_hw_vpath_mac_addr_get(vpath->handle,
-                               macaddr, macmask);
-       if (status != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "DA config list entry failed for vpath:%d",
-                       vpath->device_id);
-               return status;
-       }
-
-       while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
-
-               status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
-                               macaddr, macmask);
-               if (status != VXGE_HW_OK)
-                       break;
-       }
-
-       return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxgedev *vdev = vpath->vdev;
-       u16 vid;
-
-       if (vdev->vlgrp && vpath->is_open) {
-
-               for (vid = 0; vid < VLAN_N_VID; vid++) {
-                       if (!vlan_group_get_device(vdev->vlgrp, vid))
-                               continue;
-                       /* Add these vlan to the vid table */
-                       status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-               }
-       }
-
-       return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct macInfo mac_info;
-       u8 *mac_address = NULL;
-       struct list_head *entry, *next;
-
-       memset(&mac_info, 0, sizeof(struct macInfo));
-
-       if (vpath->is_open) {
-
-               list_for_each_safe(entry, next, &vpath->mac_addr_list) {
-                       mac_address =
-                               (u8 *)&
-                               ((struct vxge_mac_addrs *)entry)->macaddr;
-                       memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-                       ((struct vxge_mac_addrs *)entry)->state =
-                               VXGE_LL_MAC_ADDR_IN_DA_TABLE;
-                       /* does this mac address already exist in da table? */
-                       status = vxge_search_mac_addr_in_da_table(vpath,
-                               &mac_info);
-                       if (status != VXGE_HW_OK) {
-                               /* Add this mac address to the DA table */
-                               status = vxge_hw_vpath_mac_addr_add(
-                                       vpath->handle, mac_info.macaddr,
-                                       mac_info.macmask,
-                                   VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
-                               if (status != VXGE_HW_OK) {
-                                       vxge_debug_init(VXGE_ERR,
-                                           "DA add entry failed for vpath:%d",
-                                           vpath->device_id);
-                                       ((struct vxge_mac_addrs *)entry)->state
-                                               = VXGE_LL_MAC_ADDR_IN_LIST;
-                               }
-                       }
-               }
-       }
-
-       return status;
-}
-
 /* reset vpaths */
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
@@ -2042,6 +2026,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
 
                vpath->ring.ndev = vdev->ndev;
                vpath->ring.pdev = vdev->pdev;
+
                status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
                if (status == VXGE_HW_OK) {
                        vpath->fifo.handle =
@@ -2070,11 +2055,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                        vdev->stats.vpaths_open++;
                } else {
                        vdev->stats.vpath_open_fail++;
-                       vxge_debug_init(VXGE_ERR,
-                               "%s: vpath: %d failed to open "
-                               "with status: %d",
-                           vdev->ndev->name, vpath->device_id,
-                               status);
+                       vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
+                                       "open with status: %d",
+                                       vdev->ndev->name, vpath->device_id,
+                                       status);
                        vxge_close_vpaths(vdev, 0);
                        return -EPERM;
                }
@@ -2082,6 +2066,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                vp_id = vpath->handle->vpath->vp_id;
                vdev->vpaths_deployed |= vxge_mBIT(vp_id);
        }
+
        return VXGE_HW_OK;
 }
 
@@ -2114,8 +2099,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        if (unlikely(!is_vxge_card_up(vdev)))
                return IRQ_HANDLED;
 
-       status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
-                       &reason);
+       status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
        if (status == VXGE_HW_OK) {
                vxge_hw_device_mask_all(hldev);
 
@@ -2568,8 +2552,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-static int
-vxge_open(struct net_device *dev)
+static int vxge_open(struct net_device *dev)
 {
        enum vxge_hw_status status;
        struct vxgedev *vdev;
@@ -2578,6 +2561,7 @@ vxge_open(struct net_device *dev)
        int ret = 0;
        int i;
        u64 val64, function_mode;
+
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", dev->name, __func__, __LINE__);
 
@@ -2830,7 +2814,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
                                        struct vxge_hw_mrpcim_reg,
                                        rts_mgr_cbasin_cfg),
                                &val64);
-
                if (status == VXGE_HW_OK) {
                        val64 &= ~vpath_vector;
                        status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2914,8 +2897,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-static int
-vxge_close(struct net_device *dev)
+static int vxge_close(struct net_device *dev)
 {
        do_vxge_close(dev, 1);
        return 0;
@@ -2989,9 +2971,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
                net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
                net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
                net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
-               net_stats->rx_dropped +=
-                       vdev->vpaths[k].ring.stats.rx_dropped;
-
+               net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
                net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
                net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
                net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -3264,15 +3244,12 @@ static const struct net_device_ops vxge_netdev_ops = {
        .ndo_start_xmit         = vxge_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = vxge_set_multicast,
-
        .ndo_do_ioctl           = vxge_ioctl,
-
        .ndo_set_mac_address    = vxge_set_mac_addr,
        .ndo_change_mtu         = vxge_change_mtu,
        .ndo_vlan_rx_register   = vxge_vlan_rx_register,
        .ndo_vlan_rx_kill_vid   = vxge_vlan_rx_kill_vid,
        .ndo_vlan_rx_add_vid    = vxge_vlan_rx_add_vid,
-
        .ndo_tx_timeout         = vxge_tx_watchdog,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = vxge_netpoll,
@@ -3698,9 +3675,9 @@ static int __devinit vxge_config_vpaths(
                device_config->vp_config[i].tti.timer_ac_en =
                                VXGE_HW_TIM_TIMER_AC_ENABLE;
 
-               /* For msi-x with napi (each vector
-               has a handler of its own) -
-               Set CI to OFF for all vpaths */
+               /* For msi-x with napi (each vector has a handler of its own) -
+                * Set CI to OFF for all vpaths
+                */
                device_config->vp_config[i].tti.timer_ci_en =
                        VXGE_HW_TIM_TIMER_CI_DISABLE;
 
@@ -3730,10 +3707,13 @@ static int __devinit vxge_config_vpaths(
 
                device_config->vp_config[i].ring.ring_blocks  =
                                                VXGE_HW_DEF_RING_BLOCKS;
+
                device_config->vp_config[i].ring.buffer_mode =
                        VXGE_HW_RING_RXD_BUFFER_MODE_1;
+
                device_config->vp_config[i].ring.rxds_limit  =
                                VXGE_HW_DEF_RING_RXDS_LIMIT;
+
                device_config->vp_config[i].ring.scatter_mode =
                                        VXGE_HW_RING_SCATTER_MODE_A;
 
@@ -3813,6 +3793,7 @@ static void __devinit vxge_device_config_init(
                device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
                break;
        }
+
        /* Timer period between device poll */
        device_config->device_poll_millis = VXGE_TIMER_DELAY;
 
@@ -3824,16 +3805,10 @@ static void __devinit vxge_device_config_init(
 
        vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
                        __func__);
-       vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
-                       device_config->dma_blockpool_initial);
-       vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
-                       device_config->dma_blockpool_max);
        vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
                        device_config->intr_mode);
        vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
                        device_config->device_poll_millis);
-       vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
-                       device_config->rts_mac_en);
        vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
                        device_config->rth_en);
        vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -4013,7 +3988,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
        }
 
        pci_set_master(pdev);
-       vxge_reset(vdev);
+       do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 
        return PCI_ERS_RESULT_RECOVERED;
 }
@@ -4244,9 +4219,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        attr.pdev = pdev;
 
        /* In SRIOV-17 mode, functions of the same adapter
-        * can be deployed on different buses */
-       if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
-               (device != PCI_SLOT(pdev->devfn))))
+        * can be deployed on different buses
+        */
+       if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
+           !pdev->is_virtfn)
                new_device = 1;
 
        bus = pdev->bus->number;
@@ -4264,6 +4240,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                driver_config->config_dev_cnt = 0;
                driver_config->total_dev_cnt = 0;
        }
+
        /* Now making the CPU based no of vpath calculation
         * applicable for individual functions as well.
         */
@@ -4286,11 +4263,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit0;
        }
 
-       ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+       ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
        if (!ll_config) {
                ret = -ENOMEM;
                vxge_debug_init(VXGE_ERR,
-                       "ll_config : malloc failed %s %d",
+                       "device_config : malloc failed %s %d",
                        __FILE__, __LINE__);
                goto _exit0;
        }
@@ -4746,6 +4723,10 @@ vxge_starter(void)
                return -ENOMEM;
 
        ret = pci_register_driver(&vxge_driver);
+       if (ret) {
+               kfree(driver_config);
+               goto err;
+       }
 
        if (driver_config->config_dev_cnt &&
           (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4753,10 +4734,7 @@ vxge_starter(void)
                        "%s: Configured %d of %d devices",
                        VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
                        driver_config->total_dev_cnt);
-
-       if (ret)
-               kfree(driver_config);
-
+err:
        return ret;
 }
 
index 953cb0ded3e1897058b0e99724f9026e45d161da..256d5b406a671203b3f3b2a3092e9278b242dde1 100644 (file)
@@ -305,8 +305,8 @@ struct vxge_vpath {
        int is_configured;
        int is_open;
        struct vxgedev *vdev;
-       u8 (macaddr)[ETH_ALEN];
-       u8 (macmask)[ETH_ALEN];
+       u8 macaddr[ETH_ALEN];
+       u8 macmask[ETH_ALEN];
 
 #define VXGE_MAX_LEARN_MAC_ADDR_CNT    2048
        /* mac addresses currently programmed into NIC */
@@ -420,10 +420,8 @@ struct vxge_tx_priv {
                mod_timer(&timer, (jiffies + exp)); \
        } while (0);
 
-extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
-
+void vxge_initialize_ethtool_ops(struct net_device *ndev);
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
 
 /**
index 4bdb611a6842dd6c5bd6a0f7a2a1893c254869f5..42cc29843ac798c00ece16da7bcee905f45a916a 100644 (file)
 #include "vxge-config.h"
 #include "vxge-main.h"
 
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
-                             u32 vp_id, enum vxge_hw_event type);
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-                             u32 skip_alarms);
-
 /*
  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  * @vp: Virtual Path handle.
@@ -418,151 +411,6 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
        val32 = readl(&hldev->common_reg->titan_general_int_status);
 }
 
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- *     general_int_status register.
- *
- * The function        performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised        by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt        is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
-                                            u32 skip_alarms, u64 *reason)
-{
-       u32 i;
-       u64 val64;
-       u64 adapter_status;
-       u64 vpath_mask;
-       enum vxge_hw_status ret = VXGE_HW_OK;
-
-       val64 = readq(&hldev->common_reg->titan_general_int_status);
-
-       if (unlikely(!val64)) {
-               /* not Titan interrupt  */
-               *reason = 0;
-               ret = VXGE_HW_ERR_WRONG_IRQ;
-               goto exit;
-       }
-
-       if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
-               adapter_status = readq(&hldev->common_reg->adapter_status);
-
-               if (adapter_status == VXGE_HW_ALL_FOXES) {
-
-                       __vxge_hw_device_handle_error(hldev,
-                               NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
-                       *reason = 0;
-                       ret = VXGE_HW_ERR_SLOT_FREEZE;
-                       goto exit;
-               }
-       }
-
-       hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
-       *reason = val64;
-
-       vpath_mask = hldev->vpaths_deployed >>
-                               (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
-       if (val64 &
-           VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
-               hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
-               return VXGE_HW_OK;
-       }
-
-       hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
-       if (unlikely(val64 &
-                       VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
-               enum vxge_hw_status error_level = VXGE_HW_OK;
-
-               hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
-               for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
-                       if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
-                               continue;
-
-                       ret = __vxge_hw_vpath_alarm_process(
-                               &hldev->virtual_paths[i], skip_alarms);
-
-                       error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
-                       if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
-                               (ret == VXGE_HW_ERR_SLOT_FREEZE)))
-                               break;
-               }
-
-               ret = error_level;
-       }
-exit:
-       return ret;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
-       /*
-        * If the previous link state is not down, return.
-        */
-       if (hldev->link_state == VXGE_HW_LINK_UP)
-               goto exit;
-
-       hldev->link_state = VXGE_HW_LINK_UP;
-
-       /* notify driver */
-       if (hldev->uld_callbacks.link_up)
-               hldev->uld_callbacks.link_up(hldev);
-exit:
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
-       /*
-        * If the previous link state is not down, return.
-        */
-       if (hldev->link_state == VXGE_HW_LINK_DOWN)
-               goto exit;
-
-       hldev->link_state = VXGE_HW_LINK_DOWN;
-
-       /* notify driver */
-       if (hldev->uld_callbacks.link_down)
-               hldev->uld_callbacks.link_down(hldev);
-exit:
-       return VXGE_HW_OK;
-}
-
 /**
  * __vxge_hw_device_handle_error - Handle error
  * @hldev: HW device
@@ -572,10 +420,8 @@ exit:
  * Handle error.
  */
 static enum vxge_hw_status
-__vxge_hw_device_handle_error(
-               struct __vxge_hw_device *hldev,
-               u32 vp_id,
-               enum vxge_hw_event type)
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+                             enum vxge_hw_event type)
 {
        switch (type) {
        case VXGE_HW_EVENT_UNKNOWN:
@@ -615,95 +461,518 @@ out:
        return VXGE_HW_OK;
 }
 
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
  *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
  */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 {
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_DOWN)
+               goto exit;
 
-       if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
-          (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
-               writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
-                                hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
-                               &hldev->common_reg->tim_int_status0);
-       }
+       hldev->link_state = VXGE_HW_LINK_DOWN;
 
-       if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
-          (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
-               __vxge_hw_pio_mem_write32_upper(
-                               (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
-                                hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
-                               &hldev->common_reg->tim_int_status1);
-       }
+       /* notify driver */
+       if (hldev->uld_callbacks.link_down)
+               hldev->uld_callbacks.link_down(hldev);
+exit:
+       return VXGE_HW_OK;
 }
 
 /*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
  *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
  */
 static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 {
-       void **tmp_arr;
-
-       if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
-               *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
-               return VXGE_HW_OK;
-       }
-
-       /* switch between empty and full arrays */
-
-       /* the idea behind such a design is that by having free and reserved
-        * arrays separated we basically separated irq and non-irq parts.
-        * i.e. no additional lock need to be done when we free a resource */
-
-       if (channel->length - channel->free_ptr > 0) {
-
-               tmp_arr = channel->reserve_arr;
-               channel->reserve_arr = channel->free_arr;
-               channel->free_arr = tmp_arr;
-               channel->reserve_ptr = channel->length;
-               channel->reserve_top = channel->free_ptr;
-               channel->free_ptr = channel->length;
-
-               channel->stats->reserve_free_swaps_cnt++;
-
-               goto _alloc_after_swap;
-       }
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_UP)
+               goto exit;
 
-       channel->stats->full_cnt++;
+       hldev->link_state = VXGE_HW_LINK_UP;
 
-       *dtrh = NULL;
-       return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+       /* notify driver */
+       if (hldev->uld_callbacks.link_up)
+               hldev->uld_callbacks.link_up(hldev);
+exit:
+       return VXGE_HW_OK;
 }
 
 /*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
  *
- * Posts a dtr to work array.
+ * Process vpath alarms.
  *
  */
-static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
-                                    void *dtrh)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms)
 {
-       vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
+       u64 val64;
+       u64 alarm_status;
+       u64 pic_status;
+       struct __vxge_hw_device *hldev = NULL;
+       enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+       u64 mask64;
+       struct vxge_hw_vpath_stats_sw_info *sw_stats;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+       if (vpath == NULL) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out2;
+       }
+
+       hldev = vpath->hldev;
+       vp_reg = vpath->vp_reg;
+       alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+       if (alarm_status == VXGE_HW_ALL_FOXES) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+                       alarm_event);
+               goto out;
+       }
+
+       sw_stats = vpath->sw_stats;
+
+       if (alarm_status & ~(
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+               sw_stats->error_stats.unknown_alarms++;
+
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out;
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+               val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+               if (val64 &
+               VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+                       val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+                       if (((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+                           ((val64 &
+                            VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+                                    ))) {
+                               sw_stats->error_stats.network_sustained_fault++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_down_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+                       }
+
+                       if (((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+                           ((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+                                    ))) {
+
+                               sw_stats->error_stats.network_sustained_ok++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_up_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_UP, alarm_event);
+                       }
+
+                       writeq(VXGE_HW_INTR_MASK_ALL,
+                               &vp_reg->asic_ntwk_vp_err_reg);
+
+                       alarm_event = VXGE_HW_SET_LEVEL(
+                               VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+                       if (skip_alarms)
+                               return VXGE_HW_OK;
+               }
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+               pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+                       val64 = readq(&vp_reg->general_errors_reg);
+                       mask64 = readq(&vp_reg->general_errors_mask);
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+                               ~mask64) {
+                               sw_stats->error_stats.ini_serr_det++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_SERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+                               ~mask64) {
+                               sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_pif_chain_error++;
+
+                       if ((val64 &
+                          VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_drop_timeout++;
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+                               ~mask64)
+                               sw_stats->error_stats.target_illegal_access++;
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->general_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+                       val64 = readq(&vp_reg->kdfcctl_errors_reg);
+                       mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->kdfcctl_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+               val64 = readq(&vp_reg->wrdma_alarm_status);
+
+               if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+                       val64 = readq(&vp_reg->prc_alarm_reg);
+                       mask64 = readq(&vp_reg->prc_alarm_mask);
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+                               ~mask64)
+                               sw_stats->error_stats.prc_ring_bumps++;
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+                               & ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_VPATH_ERR,
+                                               alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+                                & ~mask64) {
+                               sw_stats->error_stats.prc_quanta_size_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->prc_alarm_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_ALARM_CLEARED,
+                                               alarm_event);
+                       }
+               }
+       }
+out:
+       hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+       if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+               (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+               return VXGE_HW_OK;
+
+       __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+       if (alarm_event == VXGE_HW_EVENT_SERR)
+               return VXGE_HW_ERR_CRITICAL;
+
+       return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+               VXGE_HW_ERR_SLOT_FREEZE :
+               (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+               VXGE_HW_ERR_VPATH;
+}
+
+/**
+ * vxge_hw_device_begin_irq - Begin IRQ processing.
+ * @hldev: HW device handle.
+ * @skip_alarms: Do not clear the alarms
+ * @reason: "Reason" for the interrupt, the value of Titan's
+ *     general_int_status register.
+ *
+ * The function        performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised        by the device. Next, it masks the device interrupts.
+ *
+ * Note:
+ * vxge_hw_device_begin_irq() does not flush MMIO writes through the
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
+ *
+ * Returns: 0, if the interrupt        is not "ours" (note that in this case the
+ * device remain enabled).
+ * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
+ * status.
+ */
+enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
+                                            u32 skip_alarms, u64 *reason)
+{
+       u32 i;
+       u64 val64;
+       u64 adapter_status;
+       u64 vpath_mask;
+       enum vxge_hw_status ret = VXGE_HW_OK;
+
+       val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+       if (unlikely(!val64)) {
+               /* not Titan interrupt  */
+               *reason = 0;
+               ret = VXGE_HW_ERR_WRONG_IRQ;
+               goto exit;
+       }
+
+       if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
+
+               adapter_status = readq(&hldev->common_reg->adapter_status);
+
+               if (adapter_status == VXGE_HW_ALL_FOXES) {
+
+                       __vxge_hw_device_handle_error(hldev,
+                               NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
+                       *reason = 0;
+                       ret = VXGE_HW_ERR_SLOT_FREEZE;
+                       goto exit;
+               }
+       }
+
+       hldev->stats.sw_dev_info_stats.total_intr_cnt++;
+
+       *reason = val64;
+
+       vpath_mask = hldev->vpaths_deployed >>
+                               (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
+
+       if (val64 &
+           VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
+               hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
+
+               return VXGE_HW_OK;
+       }
+
+       hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+
+       if (unlikely(val64 &
+                       VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
+
+               enum vxge_hw_status error_level = VXGE_HW_OK;
+
+               hldev->stats.sw_dev_err_stats.vpath_alarms++;
+
+               for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+                       if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                               continue;
+
+                       ret = __vxge_hw_vpath_alarm_process(
+                               &hldev->virtual_paths[i], skip_alarms);
+
+                       error_level = VXGE_HW_SET_LEVEL(ret, error_level);
+
+                       if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
+                               (ret == VXGE_HW_ERR_SLOT_FREEZE)))
+                               break;
+               }
+
+               ret = error_level;
+       }
+exit:
+       return ret;
+}
+
+/**
+ * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
+ * condition that has caused the Tx and RX interrupt.
+ * @hldev: HW device.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Tx and Rx interrupt.
+ * See also: vxge_hw_device_begin_irq(),
+ * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ */
+void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+{
+
+       if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status0);
+       }
+
+       if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               __vxge_hw_pio_mem_write32_upper(
+                               (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status1);
+       }
+}
+
+/*
+ * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
+ * @channel: Channel
+ * @dtrh: Buffer to return the DTR pointer
+ *
+ * Allocates a dtr from the reserve array. If the reserve array is empty,
+ * it swaps the reserve and free arrays.
+ *
+ */
+static enum vxge_hw_status
+vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+{
+       void **tmp_arr;
+
+       if (channel->reserve_ptr - channel->reserve_top > 0) {
+_alloc_after_swap:
+               *dtrh = channel->reserve_arr[--channel->reserve_ptr];
+
+               return VXGE_HW_OK;
+       }
+
+       /* switch between empty and full arrays */
+
+       /* the idea behind such a design is that by having free and reserved
+        * arrays separated we basically separated irq and non-irq parts.
+        * i.e. no additional lock need to be done when we free a resource */
+
+       if (channel->length - channel->free_ptr > 0) {
+
+               tmp_arr = channel->reserve_arr;
+               channel->reserve_arr = channel->free_arr;
+               channel->free_arr = tmp_arr;
+               channel->reserve_ptr = channel->length;
+               channel->reserve_top = channel->free_ptr;
+               channel->free_ptr = channel->length;
+
+               channel->stats->reserve_free_swaps_cnt++;
+
+               goto _alloc_after_swap;
+       }
+
+       channel->stats->full_cnt++;
+
+       *dtrh = NULL;
+       return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+}
+
+/*
+ * vxge_hw_channel_dtr_post - Post a dtr to the channel
+ * @channelh: Channel
+ * @dtrh: DTR pointer
+ *
+ * Posts a dtr to work array.
+ *
+ */
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+{
+       vxge_assert(channel->work_arr[channel->post_index] == NULL);
+
        channel->work_arr[channel->post_index++] = dtrh;
 
        /* wrap-around */
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  */
 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
 {
-       struct __vxge_hw_channel *channel;
-
-       channel = &ring->channel;
-
        wmb();
        vxge_hw_ring_rxd_post_post(ring, rxdh);
 }
@@ -1542,607 +1807,329 @@ vxge_hw_vpath_mac_addr_get_next(
        if (status != VXGE_HW_OK)
                goto exit;
 
-       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
-       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
-       for (i = ETH_ALEN; i > 0; i--) {
-               macaddr[i-1] = (u8)(data1 & 0xFF);
-               data1 >>= 8;
-
-               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
-               data2 >>= 8;
-       }
-
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
- *               to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
-       struct __vxge_hw_vpath_handle *vp,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN])
-{
-       u32 i;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               data1 <<= 8;
-               data1 |= (u8)macaddr[i];
-
-               data2 <<= 8;
-               data2 |= (u8)macaddr_mask[i];
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
-                       0,
-                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
-                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
- *               to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- *               from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
-       u64 data;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_get(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, vid, &data);
-
-       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- *               to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
-                       struct __vxge_hw_vpath_handle *vp)
-{
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       vpath = vp->vpath;
-
-       /* Enable promiscous mode for function 0 only */
-       if (!(vpath->hldev->access_rights &
-               VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
-               return VXGE_HW_OK;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
-               val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
-                        VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
-                        VXGE_HW_RXMAC_VCFG0_BCAST_EN |
-                        VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
-                       struct __vxge_hw_vpath_handle *vp)
-{
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
 
-       if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
+       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
 
-               val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
-                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
-                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
+       for (i = ETH_ALEN; i > 0; i--) {
+               macaddr[i-1] = (u8)(data1 & 0xFF);
+               data1 >>= 8;
 
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+               data2 >>= 8;
        }
+
 exit:
        return status;
 }
 
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
+/**
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
+ *               to MAC address table.
  * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Delete the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
  *
- * Enable receiving broadcasts.
  */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
-                       struct __vxge_hw_vpath_handle *vp)
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN])
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       for (i = 0; i < ETH_ALEN; i++) {
+               data1 <<= 8;
+               data1 |= (u8)macaddr[i];
 
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
-               val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+               data2 <<= 8;
+               data2 |= (u8)macaddr_mask[i];
        }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0,
+                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
 exit:
        return status;
 }
 
 /**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
+ *               to vlan id table.
  * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
  *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
  *
  */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
-                       struct __vxge_hw_vpath_handle *vp)
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
-               val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
 exit:
        return status;
 }
 
 /**
- * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
+ * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
+ *               from vlan id table.
  * @vp: Vpath handle.
+ * @vid: Buffer to return vlan id
  *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ * Returns the first vlan id in the list for this vpath.
+ * see also: vxge_hw_vpath_vid_get_next
  *
  */
 enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
+       u64 data;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, vid, &data);
 
-       if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
-               val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
+       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
 exit:
        return status;
 }
 
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
+/**
+ * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
+ *               to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
  *
- * Process vpath alarms.
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
  *
  */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-                             u32 skip_alarms)
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
 {
-       u64 val64;
-       u64 alarm_status;
-       u64 pic_status;
-       struct __vxge_hw_device *hldev = NULL;
-       enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
-       u64 mask64;
-       struct vxge_hw_vpath_stats_sw_info *sw_stats;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-       if (vpath == NULL) {
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-                       alarm_event);
-               goto out2;
-       }
-
-       hldev = vpath->hldev;
-       vp_reg = vpath->vp_reg;
-       alarm_status = readq(&vp_reg->vpath_general_int_status);
-
-       if (alarm_status == VXGE_HW_ALL_FOXES) {
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
-                       alarm_event);
-               goto out;
-       }
-
-       sw_stats = vpath->sw_stats;
-
-       if (alarm_status & ~(
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
-               sw_stats->error_stats.unknown_alarms++;
-
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-                       alarm_event);
-               goto out;
-       }
-
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
-               val64 = readq(&vp_reg->xgmac_vp_int_status);
-
-               if (val64 &
-               VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
-                       val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
-                       if (((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
-                           ((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
-                                    ))) {
-                               sw_stats->error_stats.network_sustained_fault++;
-
-                               writeq(
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
-                                       &vp_reg->asic_ntwk_vp_err_mask);
-
-                               __vxge_hw_device_handle_link_down_ind(hldev);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_LINK_DOWN, alarm_event);
-                       }
-
-                       if (((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
-                           ((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
-                                    ))) {
-
-                               sw_stats->error_stats.network_sustained_ok++;
-
-                               writeq(
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
-                                       &vp_reg->asic_ntwk_vp_err_mask);
-
-                               __vxge_hw_device_handle_link_up_ind(hldev);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_LINK_UP, alarm_event);
-                       }
-
-                       writeq(VXGE_HW_INTR_MASK_ALL,
-                               &vp_reg->asic_ntwk_vp_err_reg);
-
-                       alarm_event = VXGE_HW_SET_LEVEL(
-                               VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if (skip_alarms)
-                               return VXGE_HW_OK;
-               }
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
        }
 
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
-               pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
-               if (pic_status &
-                   VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
-                       val64 = readq(&vp_reg->general_errors_reg);
-                       mask64 = readq(&vp_reg->general_errors_mask);
-
-                       if ((val64 &
-                               VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
-                               ~mask64) {
-                               sw_stats->error_stats.ini_serr_det++;
-
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_SERR, alarm_event);
-                       }
-
-                       if ((val64 &
-                           VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
-                               ~mask64) {
-                               sw_stats->error_stats.dblgen_fifo0_overflow++;
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR, alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Enable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_disable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if ((val64 &
-                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
-                               ~mask64)
-                               sw_stats->error_stats.statsb_pif_chain_error++;
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                       if ((val64 &
-                          VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
-                               ~mask64)
-                               sw_stats->error_stats.statsb_drop_timeout++;
+       vpath = vp->vpath;
 
-                       if ((val64 &
-                               VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
-                               ~mask64)
-                               sw_stats->error_stats.target_illegal_access++;
+       /* Enable promiscous mode for function 0 only */
+       if (!(vpath->hldev->access_rights &
+               VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
+               return VXGE_HW_OK;
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->general_errors_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_ALARM_CLEARED,
-                                       alarm_event);
-                       }
-               }
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-               if (pic_status &
-                   VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
 
-                       val64 = readq(&vp_reg->kdfcctl_errors_reg);
-                       mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+               val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_BCAST_EN |
+                        VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Disable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_enable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_poison++;
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+       vpath = vp->vpath;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+       if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->kdfcctl_errors_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_ALARM_CLEARED,
-                                       alarm_event);
-                       }
-               }
+               val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
 
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
        }
+exit:
+       return status;
+}
 
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+/*
+ * vxge_hw_vpath_bcast_enable - Enable broadcast
+ * @vp: Vpath handle.
+ *
+ * Enable receiving broadcasts.
+ */
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-               val64 = readq(&vp_reg->wrdma_alarm_status);
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-               if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+       vpath = vp->vpath;
 
-                       val64 = readq(&vp_reg->prc_alarm_reg);
-                       mask64 = readq(&vp_reg->prc_alarm_mask);
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
-                               ~mask64)
-                               sw_stats->error_stats.prc_ring_bumps++;
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
-                               ~mask64) {
-                               sw_stats->error_stats.prc_rxdcm_sc_err++;
+/**
+ * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Enable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK on success.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_VPATH_ERR,
-                                       alarm_event);
-                       }
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
-                               & ~mask64) {
-                               sw_stats->error_stats.prc_rxdcm_sc_abort++;
+       vpath = vp->vpath;
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                               VXGE_HW_EVENT_VPATH_ERR,
-                                               alarm_event);
-                       }
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
-                                & ~mask64) {
-                               sw_stats->error_stats.prc_quanta_size_err++;
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_VPATH_ERR,
-                                       alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Disable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->prc_alarm_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                               VXGE_HW_EVENT_ALARM_CLEARED,
-                                               alarm_event);
-                       }
-               }
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
        }
-out:
-       hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
-       if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
-               (alarm_event == VXGE_HW_EVENT_UNKNOWN))
-               return VXGE_HW_OK;
 
-       __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+       vpath = vp->vpath;
 
-       if (alarm_event == VXGE_HW_EVENT_SERR)
-               return VXGE_HW_ERR_CRITICAL;
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-       return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
-               VXGE_HW_ERR_SLOT_FREEZE :
-               (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
-               VXGE_HW_ERR_VPATH;
+       if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
+               val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
 }
 
 /*
index 1fceee87622878f5e8e2b10d28a9901942bb83be..8c3103fb644250108e14c80d55c857974bca491f 100644 (file)
@@ -2081,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
 #endif
 };
 
-/* ========================= FIFO PRIVATE API ============================= */
-
-struct vxge_hw_fifo_attr;
-
 struct vxge_hw_mempool_cbs {
        void (*item_func_alloc)(
                        struct vxge_hw_mempool *mempoolh,
@@ -2158,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_add(
        struct __vxge_hw_vpath_handle *vpath_handle,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN],
+       u8 *macaddr,
+       u8 *macaddr_mask,
        enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_get(
        struct __vxge_hw_vpath_handle *vpath_handle,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN]);
+       u8 *macaddr,
+       u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_get_next(
        struct __vxge_hw_vpath_handle *vpath_handle,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN]);
+       u8 *macaddr,
+       u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_delete(
        struct __vxge_hw_vpath_handle *vpath_handle,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN]);
+       u8 *macaddr,
+       u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_vid_add(
@@ -2285,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
 
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+
 void
 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);