return 0;
}
+static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ if (bp->hwrm_short_cmd_req_addr) {
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ bp->hwrm_short_cmd_req_addr,
+ bp->hwrm_short_cmd_req_dma_addr);
+ bp->hwrm_short_cmd_req_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_short_cmd_req_addr =
+ dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ &bp->hwrm_short_cmd_req_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_short_cmd_req_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
static void bnxt_free_stats(struct bnxt *bp)
{
u32 size, i;
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ struct hwrm_short_input short_input = {0};
+
+ memcpy(short_cmd_req, req, msg_len);
+ memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
+ msg_len);
+
+ short_input.req_type = req->req_type;
+ short_input.signature =
+ cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
+ short_input.size = cpu_to_le16(msg_len);
+ short_input.req_addr =
+ cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
+
+ data = (u32 *)&short_input;
+ msg_len = sizeof(short_input);
+
+ /* Sync memory write before updating doorbell */
+ wmb();
+
+ max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
+ }
+
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < max_req_len; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
int rc;
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
!resp->chip_metal)
bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bp->flags |= BNXT_FLAG_SHORT_CMD;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_free_hwrm_short_cmd_req(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
if (rc)
goto init_err_pci_clean;
+ if (bp->flags & BNXT_FLAG_SHORT_CMD) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ }
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
+#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
+ #define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
+ void *hwrm_short_cmd_req_addr;
+ dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
void *hwrm_dbg_resp_addr;