u32 j = 0;
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
int nr_kpages = kpages_per_hwpage;
- /* loop over desired chunk entries */
- chunk = pginfo->u.usr.next_chunk;
- prev_chunk = pginfo->u.usr.next_chunk;
- list_for_each_entry_continue(
- chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
- for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
- if (nr_kpages == kpages_per_hwpage) {
- pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
- << PAGE_SHIFT );
- *kpage = pgaddr;
- if ( !(*kpage) ) {
- ehca_gen_err("pgaddr=%llx i=%x",
- pgaddr, i);
+++++++++++ struct scatterlist **sg = &pginfo->u.usr.next_sg;
++++++++ ++
-------- -- /* loop over desired chunk entries */
-------- -- chunk = pginfo->u.usr.next_chunk;
-------- -- prev_chunk = pginfo->u.usr.next_chunk;
-------- -- list_for_each_entry_continue(
-------- -- chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
-------- -- for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
-------- -- if (nr_kpages == kpages_per_hwpage) {
-------- -- pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
-------- -- << PAGE_SHIFT );
-------- -- *kpage = pgaddr;
-------- -- if ( !(*kpage) ) {
-------- -- ehca_gen_err("pgaddr=%llx i=%x",
-------- -- pgaddr, i);
+++++++++++ while (*sg != NULL) {
+
+++++++++++ if (nr_kpages == kpages_per_hwpage) {
+++++++++++ pgaddr = (page_to_pfn(sg_page(*sg))
+++++++++++ << PAGE_SHIFT);
+++++++++++ *kpage = pgaddr;
+++++++++++ if (!(*kpage)) {
+++++++++++ ehca_gen_err("pgaddr=%llx entry=%llx",
+++++++++++ pgaddr, pginfo->u.usr.next_nmap);
+++++++++++ ret = -EFAULT;
+++++++++++ return ret;
+++++++++++ }
+++++++++++ /*
+++++++++++ * The first page in a hwpage must be aligned;
+++++++++++ * the first MR page is exempt from this rule.
+++++++++++ */
+++++++++++ if (pgaddr & (pginfo->hwpage_size - 1)) {
+++++++++++ if (pginfo->hwpage_cnt) {
+++++++++++ ehca_gen_err(
+++++++++++ "invalid alignment "
+++++++++++ "pgaddr=%llx entry=%llx "
+++++++++++ "mr_pgsize=%llx",
+++++++++++ pgaddr, pginfo->u.usr.next_nmap,
+++++++++++ pginfo->hwpage_size);
ret = -EFAULT;
return ret;
}
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+++++++ ++++ struct ocrdma_eq *eq = NULL;
struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
int pdid = 0;
+++++++ ++++ u32 irq, indx;
------- ---- status = ocrdma_mbx_destroy_cq(dev, cq);
+++++++ ++++ dev->cq_tbl[cq->id] = NULL;
+++++++ ++++ indx = ocrdma_get_eq_table_index(dev, cq->eqn);
+++++++ ++++ if (indx == -EINVAL)
+++++++ ++++ BUG();
++++++ ++++
+++++++ ++++ eq = &dev->eq_tbl[indx];
+++++++ ++++ irq = ocrdma_get_irq(dev, eq);
+++++++ ++++ synchronize_irq(irq);
+++++++ ++++ ocrdma_flush_cq(cq);
+
+++++++ ++++ status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
pdid = cq->ucontext->cntxt_pd->id;
ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
}
stop_cqe:
cq->getp = cur_getp;
------- ---- if (polled_hw_cqes || expand || stop) {
------- ---- ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
+++++++ ++++ if (cq->deferred_arm) {
+++++++ ++++ ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
++++++ ++++ polled_hw_cqes);
+++++++ ++++ cq->deferred_arm = false;
+++++++ ++++ cq->deferred_sol = false;
+++++++ ++++ } else {
+++++++ ++++ /* We need to pop the CQE. No need to arm */
+++++++ ++++ ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
+ polled_hw_cqes);
+++++++ ++++ cq->deferred_sol = false;
}
+++++++ ++++
return i;
}