int ret = 0;
switch (req) {
- case RDMA_GET_PARAMS:{
- struct rdma_info *req = data;
+ case RDMA_GET_PARAMS: {
+ struct rdma_info *rdma = data;
struct pci_dev *pdev = adapter->pdev;
- req->udbell_physbase = pci_resource_start(pdev, 2);
- req->udbell_len = pci_resource_len(pdev, 2);
- req->tpt_base =
+ rdma->udbell_physbase = pci_resource_start(pdev, 2);
+ rdma->udbell_len = pci_resource_len(pdev, 2);
+ rdma->tpt_base =
t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
- req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
- req->pbl_base =
+ rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
+ rdma->pbl_base =
t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
- req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
- req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
- req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
- req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
- req->pdev = pdev;
+ rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
+ rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
+ rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
+ rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
+ rdma->pdev = pdev;
break;
}
case RDMA_CQ_OP:{
unsigned long flags;
- struct rdma_cq_op *req = data;
+ struct rdma_cq_op *rdma = data;
/* may be called in any context */
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
- ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
- req->credits);
+ ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
+ rdma->credits);
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
break;
}
break;
}
case RDMA_CQ_SETUP:{
- struct rdma_cq_setup *req = data;
+ struct rdma_cq_setup *rdma = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret =
- t3_sge_init_cqcntxt(adapter, req->id,
- req->base_addr, req->size,
+ t3_sge_init_cqcntxt(adapter, rdma->id,
+ rdma->base_addr, rdma->size,
ASYNC_NOTIF_RSPQ,
- req->ovfl_mode, req->credits,
- req->credit_thres);
+ rdma->ovfl_mode, rdma->credits,
+ rdma->credit_thres);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
}
spin_unlock_irq(&adapter->sge.reg_lock);
break;
case RDMA_CTRL_QP_SETUP:{
- struct rdma_ctrlqp_setup *req = data;
+ struct rdma_ctrlqp_setup *rdma = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
SGE_CNTXT_RDMA,
ASYNC_NOTIF_RSPQ,
- req->base_addr, req->size,
+ rdma->base_addr, rdma->size,
FW_RI_TID_START, 1, 0);
spin_unlock_irq(&adapter->sge.reg_lock);
break;
* as HW contexts, packet buffers, and descriptor rings. Traffic to the
* queue set must be quiesced prior to calling this.
*/
-void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
+static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
{
int i;
struct pci_dev *pdev = adapter->pdev;
* The MSI-X interrupt handler for an SGE response queue for the NAPI case
* (i.e., response queue serviced by NAPI polling).
*/
-irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
+static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
struct sge_rspq *q = &qs->rspq;
* one SGE response queue per port in this mode and protect all response
* queues with queue 0's lock.
*/
-irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
+static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
{
int new_packets;
struct adapter *adap = cookie;
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t3_write_reg(adap, addr_reg, start_idx);
* Older PCIe cards lose their config space during reset, PCI-X
* ones don't.
*/
-int t3_reset_adapter(struct adapter *adapter)
+static int t3_reset_adapter(struct adapter *adapter)
{
int i, save_and_restore_pcie =
adapter->params.rev < T3_REV_B2 && is_pcie(adapter);