int ehca_init_mrmw_cache(void);
void ehca_cleanup_mrmw_cache(void);
-extern spinlock_t ehca_qp_idr_lock;
-extern spinlock_t ehca_cq_idr_lock;
+extern rwlock_t ehca_qp_idr_lock;
+extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
goto create_cq_exit1;
}
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
} while (ret == -EAGAIN);
"cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
create_cq_exit2:
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
create_cq_exit1:
kmem_cache_free(cq_cache, my_cq);
* remove the CQ from the idr first to make sure
* no more interrupt tasklets will touch this CQ
*/
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
/* now wait until all pending events have completed */
wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
{
struct ib_event event;
struct ehca_qp *qp;
- unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ read_unlock(&ehca_qp_idr_lock);
if (!qp)
u64 eqe)
{
struct ehca_cq *cq;
- unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
if (cq)
atomic_inc(&cq->nr_events);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ read_unlock(&ehca_cq_idr_lock);
if (!cq)
return;
{
u64 eqe_value;
u32 token;
- unsigned long flags;
struct ehca_cq *cq;
eqe_value = eqe->entry;
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
ehca_dbg(&shca->ib_device, "Got completion event");
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
if (cq)
atomic_inc(&cq->nr_events);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ read_unlock(&ehca_cq_idr_lock);
if (cq == NULL) {
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq token=%x",
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- spin_lock(&ehca_cq_idr_lock);
+ read_lock(&ehca_cq_idr_lock);
eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
if (eqe_cache[eqe_cnt].cq)
atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
- spin_unlock(&ehca_cq_idr_lock);
+ read_unlock(&ehca_cq_idr_lock);
if (!eqe_cache[eqe_cnt].cq) {
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq "
MODULE_PARM_DESC(scaling_code,
"set scaling code (0: disabled/default, 1: enabled)");
-DEFINE_SPINLOCK(ehca_qp_idr_lock);
-DEFINE_SPINLOCK(ehca_cq_idr_lock);
+DEFINE_RWLOCK(ehca_qp_idr_lock);
+DEFINE_RWLOCK(ehca_cq_idr_lock);
DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
goto create_qp_exit0;
}
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
} while (ret == -EAGAIN);
hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
create_qp_exit1:
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
create_qp_exit0:
kmem_cache_free(qp_cache, my_qp);
}
}
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
u32 cur_pid = current->tgid;
u32 ret;
- unsigned long flags;
struct ehca_cq *cq;
struct ehca_qp *qp;
struct ehca_pd *pd;
switch (q_type) {
case 1: /* CQ */
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ read_unlock(&ehca_cq_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!cq)
break;
case 2: /* QP */
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ read_unlock(&ehca_qp_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!qp)