struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca;
- u16 seqnum;
+ __be16 seqnum;
u8 __reserved[4];
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
} __packed;
struct qm_mcc_fq {
u8 _ncw_verb;
u8 __reserved1[3];
- u32 fqid; /* 24-bit */
+ __be32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
DPAA_ASSERT(eqcr->busy);
- DPAA_ASSERT(!(eqcr->cursor->fqid & ~QM_FQID_MASK));
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->available >= 1);
}
break;
case QM_MR_VERB_FQPN:
/* Parked */
- fq = tag_to_fq(msg->fq.context_b);
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg);
}
} else {
/* Its a software ERN */
- fq = tag_to_fq(msg->ern.tag);
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg);
}
num++;
clear_vdqcr(p, fq);
} else {
/* SDQCR: context_b points to the FQ */
- fq = tag_to_fq(dq->context_b);
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL;
#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL;
}
/* Issue an INITFQ_[PARKED|SCHED] management command */
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/*
* and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings.
*/
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4;
}
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
goto out;
}
if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
}
- if (opts->we_mask & QM_INITFQ_WE_CGID)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid;
}
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
qm_fqid_set(&msg.fq, fq->fqid);
- msg.fq.context_b = fq_to_tag(fq);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg);
}
} else if (res == QM_MCR_RESULT_PENDING) {
goto out;
qm_fqid_set(eq, fq->fqid);
- eq->tag = fq_to_tag(fq);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
if (qman_ip_rev >= QMAN_REV30)
- cgr->cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT | pi;
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
else
- cgr->cscn_targ = val | QM_CGR_TARG_PORTAL(pi);
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
}
static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
{
if (qman_ip_rev >= QMAN_REV30)
- cgr->cscn_targ_upd_ctrl = pi;
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
else
- cgr->cscn_targ = val & ~QM_CGR_TARG_PORTAL(pi);
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
}
static u8 qman_cgr_cpus[CGR_NUM];
goto out;
qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
- cgr_state.cgr.cscn_targ);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
/* send init if flags indicate so */
if (flags & QMAN_CGR_FLAG_USE_INIT)
goto release_lock;
}
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
- cgr_state.cgr.cscn_targ);
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret)
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
return err;
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid);
struct qm_dqrr_entry {
u8 verb;
u8 stat;
- u16 seqnum; /* 15-bit */
+ __be16 seqnum; /* 15-bit */
u8 tok;
u8 __reserved2[3];
- u32 fqid; /* 24-bit */
- u32 context_b;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
} __packed;
/* 'fqid' is a 24-bit field in every h/w descriptor */
#define QM_FQID_MASK GENMASK(23, 0)
-#define qm_fqid_set(p, v) ((p)->fqid = ((v) & QM_FQID_MASK))
-#define qm_fqid_get(p) ((p)->fqid & QM_FQID_MASK)
+#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
+#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
/* "ERN Message Response" */
/* "FQ State Change Notification" */
struct {
u8 verb;
u8 dca;
- u16 seqnum;
+ __be16 seqnum;
u8 rc; /* Rej Code: 8-bit */
u8 __reserved[3];
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved1[32];
} __packed ern;
u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
- u32 fqid; /* 24-bit */
- u32 context_b;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
u8 __reserved2[48];
} __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
};
static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
{
- fqd->context_a.context_hi = upper_32_bits(addr);
- fqd->context_a.context_lo = lower_32_bits(addr);
+ fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
}
static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
*/
struct qm_cgr_wr_parm {
/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
- u32 word;
+ __be32 word;
};
/*
* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
*/
struct qm_cgr_cs_thres {
/* _res[13-15], TA[5-12], Tn[0-4] */
- u16 word;
+ __be16 word;
};
/*
* This identical structure of CGR fields is present in the "Init/Modify CGR"
u8 cscn_en; /* boolean, use QM_CGR_EN */
union {
struct {
- u16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
- u16 cscn_targ_dcp_low;
+ __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
+ __be16 cscn_targ_dcp_low;
};
- u32 cscn_targ; /* use QM_CGR_TARG_* */
+ __be32 cscn_targ; /* use QM_CGR_TARG_* */
};
u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */
/* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{
- return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+ int thres = be16_to_cpu(th->word);
+
+ return ((thres >> 5) & 0xff) << (thres & 0x1f);
}
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
if (roundup && oddbit)
val++;
}
- th->word = ((val & 0xff) << 5) | (e & 0x1f);
+ th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
return 0;
}
/* "Initialize FQ" */
struct qm_mcc_initfq {
u8 __reserved1[2];
- u16 we_mask; /* Write Enable Mask */
- u32 fqid; /* 24-bit */
- u16 count; /* Initialises 'count+1' FQDs */
+ __be16 we_mask; /* Write Enable Mask */
+ __be32 fqid; /* 24-bit */
+ __be16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */
u8 __reserved2[30];
} __packed;
/* "Initialize/Modify CGR" */
struct qm_mcc_initcgr {
u8 __reserve1[2];
- u16 we_mask; /* Write Enable Mask */
+ __be16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2];
u8 cgid;