val = PIDX_T5_V(q->pend_cred / 8) |
DBTYPE_F;
val |= DBPRIO_F;
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
- wmb(); /* write descriptors before telling HW */
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
}
if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
return -ENOTSUPP;
/* FC CRC offload */
- *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
- TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
- TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
- TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
- TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+ *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+ TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+ TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+ TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+ TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
return 0;
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- LSO_IPHDR_LEN(l3hdr_len / 4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->c.len = htonl(skb->len);
else
- lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+ lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len);
+ cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len) |
+ TXPKT_ETHHDR_LEN_V(eth_xtra_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
#ifdef CONFIG_CHELSIO_T4_FCOE
if (skb->protocol == htons(ETH_P_FCOE))
- cntrl |= TXPKT_VLAN(
+ cntrl |= TXPKT_VLAN_V(
((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->fn));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
static inline bool is_new_response(const struct rsp_ctrl *r,
const struct sge_rspq *q)
{
- return RSPD_GEN(r->type_gen) == q->gen;
+ return (r->type_gen >> RSPD_GEN_S) == q->gen;
}
/**
break;
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
if (likely(q->offset > 0)) {
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
si.tot_len = len;
q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
if (unlikely(ret)) {
/* couldn't process descriptor, back off for recovery */
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
break;
}
return LL_FLUSH_BUSY;
work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+ params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
q->next_intr_params = params;
val = CIDXINC_V(work_done) | SEINTARM_V(params);
int timer_index;
napi_complete(napi);
- timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+ timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
if (work_done > max(timer_pkt_quota[timer_index],
timer_index = timer_index - 1;
timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
- q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
- V_QINTR_CNT_EN;
+ q->next_intr_params =
+ QINTR_TIMER_IDX_V(timer_index) |
+ QINTR_CNT_EN_V(0);
params = q->next_intr_params;
} else {
params = q->next_intr_params;
q->next_intr_params = q->intr_params;
}
} else
- params = QINTR_TIMER_IDX(7);
+ params = QINTR_TIMER_IDX_V(7);
val = CIDXINC_V(work_done) | SEINTARM_V(params);
break;
dma_rmb();
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
qid -= adap->sge.ingr_start;
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
- FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+ FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
FW_IQ_CMD_FL0CONGCIF_F |
FW_IQ_CMD_FL0CONGEN_F);
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
- FW_IQ_CMD_FL0FBMAX_V(3));
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
- FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO_V(1) |
- FW_EQ_ETH_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
- FW_EQ_ETH_CMD_FBMAX_V(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
- FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO_F |
- FW_EQ_CTRL_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
- FW_EQ_CTRL_CMD_FBMAX_V(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
FW_EQ_OFLD_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
- FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO_F |
- FW_EQ_OFLD_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
- FW_EQ_OFLD_CMD_FBMAX_V(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
* Packing Boundary. T5 introduced the ability to specify these
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
- * specifications.
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
*/
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
INGPADBOUNDARY_SHIFT_X);
t4_idma_monitor_init(adap, &s->idma_monitor);
+ /* Set up timers used for recuring callbacks to process RX and TX
+ * administrative tasks.
+ */
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
struct cpl_tx_pkt_core {
__be32 ctrl0;
-#define TXPKT_VF(x) ((x) << 0)
-#define TXPKT_PF(x) ((x) << 8)
-#define TXPKT_VF_VLD (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x) ((x) << 16)
-#define TXPKT_INS_OVLAN (1 << 21)
-#define TXPKT_OPCODE(x) ((x) << 24)
__be16 pack;
__be16 len;
__be64 ctrl1;
-#define TXPKT_CSUM_END(x) ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
-#define TXPKT_VLAN(x) ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD (1ULL << 60)
-#define TXPKT_IPCSUM_DIS (1ULL << 62)
-#define TXPKT_L4CSUM_DIS (1ULL << 63)
};
struct cpl_tx_pkt {
#define cpl_tx_pkt_xt cpl_tx_pkt
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S 0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S 8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S 11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S 12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S 16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S 21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S 24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S 12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S 20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S 20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S 30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S 34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S 40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S 44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S 60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S 62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S 63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F TXPKT_L4CSUM_DIS_V(1ULL)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x) ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x) ((x) << 20)
-#define LSO_LAST_SLICE (1 << 22)
-#define LSO_FIRST_SLICE (1 << 23)
-#define LSO_OPCODE(x) ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
}
if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
+ TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
* Fill in the LSO CPL message.
*/
lso->lso_ctrl =
- cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE |
- LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len/4) |
- LSO_IPHDR_LEN(l3hdr_len/4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0);
if (is_t4(adapter->params.chip))
lso->len = cpu_to_be32(skb->len);
else
- lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
/*
* Set up TX Packet CPL pointer, control word and perform
* accounting.
*/
cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len));
+ cntrl = (TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len) |
+ TXPKT_ETHHDR_LEN_V(eth_xtra_len));
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
/*
*/
if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
/*
* Fill in the TX Packet CPL message header.
*/
- cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->port_id) |
- TXPKT_PF(0));
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
static inline bool is_new_response(const struct rsp_ctrl *rc,
const struct sge_rspq *rspq)
{
- return RSPD_GEN(rc->type_gen) == rspq->gen;
+ return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
}
/**
* SGE.
*/
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl gl;
const struct rx_sw_desc *sdesc;
* If we get a "new buffer" message from the SGE we
* need to move on to the next Free List buffer.
*/
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
/*
* We get one "new buffer" message when we
* first start up a queue so we need to ignore
1);
rspq->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
gl.tot_len = len;
rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = rspq->handler(rspq, rspq->cur_desc, NULL);
} else {
- WARN_ON(rsp_type > RSP_TYPE_CPL);
+ WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
ret = 0;
}
*/
const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
rspq->next_intr_params =
- QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
break;
}
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
- intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+ intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
* never happen ...
*/
dma_rmb();
- if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
dev_err(adapter->pdev_dev,
"Unexpected INTRQ response type %d\n",
- RSPD_TYPE(rc->type_gen));
+ RSPD_TYPE_G(rc->type_gen));
continue;
}
* want to either make them fatal and/or conditionalized under
* DEBUG.
*/
- qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
iq_idx = IQ_IDX(s, qid);
if (unlikely(iq_idx >= MAX_INGQ)) {
dev_err(adapter->pdev_dev,