return 0;
}
+static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
+{
+ struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+
+ if (vnic_rq_posting_soon(rq)) {
+
+ /* SW workaround for A0 HW erratum: if we're just about
+ * to write posted_index, insert a dummy desc
+ * of type resvd
+ */
+
+ rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
+ vnic_rq_post(rq, 0, 0, 0, 0);
+ } else {
+ return enic_rq_alloc_buf(rq);
+ }
+
+ return 0;
+}
+
+static int enic_set_rq_alloc_buf(struct enic *enic)
+{
+ enum vnic_dev_hw_version hw_ver;
+ int err;
+
+ err = vnic_dev_hw_version(enic->vdev, &hw_ver);
+ if (err)
+ return err;
+
+ switch (hw_ver) {
+ case VNIC_DEV_HW_VER_A1:
+ enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
+ break;
+ case VNIC_DEV_HW_VER_A2:
+ case VNIC_DEV_HW_VER_UNKNOWN:
+ enic->rq_alloc_buf = enic_rq_alloc_buf;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *priv)
{
/* Replenish RQ
*/
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
} else {
/* Replenish RQ
*/
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
if (err) {
printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n",
return err;
}
+int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
+{
+ struct vnic_devcmd_fw_info *fw_info;
+ int err;
+
+ err = vnic_dev_fw_info(vdev, &fw_info);
+ if (err)
+ return err;
+
+ if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
+ *hw_ver = VNIC_DEV_HW_VER_A1;
+ else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
+ *hw_ver = VNIC_DEV_HW_VER_A2;
+ else
+ *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
+
+ return 0;
+}
+
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
}
#endif
+enum vnic_dev_hw_version {
+ VNIC_DEV_HW_VER_UNKNOWN,
+ VNIC_DEV_HW_VER_A1,
+ VNIC_DEV_HW_VER_A2,
+};
+
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_hw_version(struct vnic_dev *vdev,
+ enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
}
}
+static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
+{
+ return ((rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0);
+}
+
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
{
int err;
- while (vnic_rq_desc_avail(rq) > 1) {
+ while (vnic_rq_desc_avail(rq) > 0) {
err = (*buf_fill)(rq);
if (err)