#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
/* Last packet in the request */
-#define USER_SDMA_TXREQ_FLAGS_LAST_PKT (1 << 0)
+#define TXREQ_FLAGS_REQ_LAST_PKT (1 << 0)
+#define TXREQ_FLAGS_IOVEC_LAST_PKT (1 << 0)
#define SDMA_REQ_IN_USE 0
#define SDMA_REQ_FOR_THREAD 1
unsigned long flags;
};
+/*
+ * A single txreq could span up to 3 physical pages when the MTU
+ * is sufficiently large (> 4K). Each of the IOV pointers also
+ * needs it's own set of flags so the vector has been handled
+ * independently of each other.
+ */
struct user_sdma_txreq {
/* Packet header for the txreq */
struct hfi1_pkt_header hdr;
struct sdma_txreq txreq;
struct user_sdma_request *req;
- struct user_sdma_iovec *iovec1;
- struct user_sdma_iovec *iovec2;
+ struct {
+ struct user_sdma_iovec *vec;
+ u8 flags;
+ } iovecs[3];
+ int idx;
u16 flags;
unsigned busycount;
u64 seqnum;
unsigned seq);
static void activate_packet_queue(struct iowait *, int);
-static inline int iovec_may_free(struct user_sdma_iovec *iovec,
- void (*free)(struct user_sdma_iovec *))
-{
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
- free(iovec);
- return 1;
- }
- return 0;
-}
-
-static inline void iovec_set_complete(struct user_sdma_iovec *iovec)
-{
- iovec->offset = iovec->iov.iov_len;
-}
-
static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait *wait,
tx->flags = 0;
tx->req = req;
tx->busycount = 0;
- tx->iovec1 = NULL;
- tx->iovec2 = NULL;
+ tx->idx = -1;
+ memset(tx->iovecs, 0, sizeof(tx->iovecs));
if (req->seqnum == req->info.npkts - 1)
- tx->flags |= USER_SDMA_TXREQ_FLAGS_LAST_PKT;
+ tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
/*
* Calculate the payload size - this is min of the fragment
goto free_tx;
}
- tx->iovec1 = iovec;
+ tx->iovecs[++tx->idx].vec = iovec;
datalen = compute_data_length(req, tx);
if (!datalen) {
SDMA_DBG(req,
iovec->pages[pageidx],
offset, len);
if (ret) {
+ int i;
+
dd_dev_err(pq->dd,
"SDMA txreq add page failed %d\n",
ret);
- iovec_set_complete(iovec);
+ /* Mark all assigned vectors as complete so they
+ * are unpinned in the callback. */
+ for (i = tx->idx; i >= 0; i--) {
+ tx->iovecs[i].flags |=
+ TXREQ_FLAGS_IOVEC_LAST_PKT;
+ }
goto free_txreq;
}
iov_offset += len;
data_sent += len;
if (unlikely(queued < datalen &&
pageidx == iovec->npages &&
- req->iov_idx < req->data_iovs - 1)) {
+ req->iov_idx < req->data_iovs - 1 &&
+ tx->idx < ARRAY_SIZE(tx->iovecs))) {
iovec->offset += iov_offset;
+ tx->iovecs[tx->idx].flags |=
+ TXREQ_FLAGS_IOVEC_LAST_PKT;
iovec = &req->iovs[++req->iov_idx];
if (!iovec->pages) {
ret = pin_vector_pages(req, iovec);
goto free_txreq;
}
iov_offset = 0;
- tx->iovec2 = iovec;
-
+ tx->iovecs[++tx->idx].vec = iovec;
}
}
/*
req->tidoffset += datalen;
req->sent += data_sent;
if (req->data_len) {
- if (tx->iovec1 && !tx->iovec2)
- tx->iovec1->offset += iov_offset;
- else if (tx->iovec2)
- tx->iovec2->offset += iov_offset;
+ tx->iovecs[tx->idx].vec->offset += iov_offset;
+ /* If we've reached the end of the io vector, mark it
+ * so the callback can unpin the pages and free it. */
+ if (tx->iovecs[tx->idx].vec->offset ==
+ tx->iovecs[tx->idx].vec->iov.iov_len)
+ tx->iovecs[tx->idx].flags |=
+ TXREQ_FLAGS_IOVEC_LAST_PKT;
}
+
/*
* It is important to increment this here as it is used to
* generate the BTH.PSN and, therefore, can't be bulk-updated
req->seqnum));
/* Set ACK request on last packet */
- if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
hdr->bth[2] |= cpu_to_be32(1UL<<31);
/* Set the new offset */
KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
EXP_TID_GET(tidval, IDX));
/* Clear KDETH.SH only on the last packet */
- if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
/*
* Set the KDETH.OFFSET and KDETH.OM based on size of
/* BTH.PSN and BTH.A */
val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
- if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT))
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
val32 |= 1UL << 31;
AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
(EXP_TID_GET(tidval, IDX) & 0x3ff));
/* Clear KDETH.SH on last packet */
- if (unlikely(tx->flags & USER_SDMA_TXREQ_FLAGS_LAST_PKT)) {
+ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
INTR) >> 16);
val &= cpu_to_le16(~(1U << 13));
if (unlikely(!req || !pq))
return;
- if (tx->iovec1)
- iovec_may_free(tx->iovec1, unpin_vector_pages);
- if (tx->iovec2)
- iovec_may_free(tx->iovec2, unpin_vector_pages);
+ /* If we have any io vectors associated with this txreq,
+ * check whether they need to be 'freed'. */
+ if (tx->idx != -1) {
+ int i;
+
+ for (i = tx->idx; i >= 0; i--) {
+ if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
+ unpin_vector_pages(tx->iovecs[i].vec);
+ }
+ }
tx_seqnum = tx->seqnum;
kmem_cache_free(pq->txreq_cache, tx);