/* global key */
WEP_KEY = 0x20,
+ /* Memory */
+ SHARED_MEM_CFG = 0x25,
+
/* TDLS */
TDLS_CHANNEL_SWITCH_CMD = 0x27,
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+#define TX_FIFO_MAX_NUM 8
+#define RX_FIFO_MAX_NUM 2
+
+/**
+ * Shared memory configuration information from the FW
+ *
+ * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
+ * accessible)
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
+ * 0x0 as accessible only via DBGM RDAT)
+ * @sample_buff_size: internal sample buff size
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
+ * 8000 HW set to 0x0 as not accessible)
+ * @txfifo_size: size of TXF0 ... TXF7
+ * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ * when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ */
+struct iwl_shared_mem_cfg {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
return ret;
}
+static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_host_cmd cmd = {
+ .id = SHARED_MEM_CFG,
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_shared_mem_cfg *mem_cfg;
+ u32 i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
+ pkt->hdr.flags);
+ goto exit;
+ }
+
+ mem_cfg = (void *)pkt->data;
+
+ mvm->shared_mem_cfg.shared_mem_addr =
+ le32_to_cpu(mem_cfg->shared_mem_addr);
+ mvm->shared_mem_cfg.shared_mem_size =
+ le32_to_cpu(mem_cfg->shared_mem_size);
+ mvm->shared_mem_cfg.sample_buff_addr =
+ le32_to_cpu(mem_cfg->sample_buff_addr);
+ mvm->shared_mem_cfg.sample_buff_size =
+ le32_to_cpu(mem_cfg->sample_buff_size);
+ mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
+ mvm->shared_mem_cfg.txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
+ mvm->shared_mem_cfg.rxfifo_size[i] =
+ le32_to_cpu(mem_cfg->rxfifo_size[i]);
+ mvm->shared_mem_cfg.page_buff_addr =
+ le32_to_cpu(mem_cfg->page_buff_addr);
+ mvm->shared_mem_cfg.page_buff_size =
+ le32_to_cpu(mem_cfg->page_buff_size);
+ IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
+
+exit:
+ iwl_free_resp(&cmd);
+}
+
void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
{
/* stop recording */
goto error;
}
+ iwl_mvm_get_shared_mem_conf(mvm);
+
ret = iwl_mvm_sf_update(mvm, NULL, false);
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
kfree(fw_error_dump);
}
+static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ u32 *fifo_data;
+ u32 fifo_len;
+ unsigned long flags;
+ int i, j;
+
+ if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
+ return;
+
+ /* Pull RXF data from all RXFs */
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
+ /*
+ * Keep aside the additional offset that might be needed for
+ * next RXF
+ */
+ u32 offset_diff = RXF_DIFF_FROM_PREV * i;
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the RXF */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_D_SPACE +
+ offset_diff));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_WR_PTR +
+ offset_diff));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_RD_PTR +
+ offset_diff));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_RD_FENCE_PTR +
+ offset_diff));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ RXF_SET_FENCE_MODE +
+ offset_diff));
+
+ /* Lock fence */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_SET_FENCE_MODE + offset_diff, 0x1);
+ /* Set fence pointer to the same place like WR pointer */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_WR2FENCE + offset_diff, 0x1);
+ /* Set fence offset */
+ iwl_trans_write_prph(mvm->trans,
+ RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
+ 0x0);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] = iwl_trans_read_prph(mvm->trans,
+ RXF_FIFO_RD_FENCE_INC +
+ offset_diff);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+
+ /* Pull TXF data from all TXFs */
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
+
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the FIFO */
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+ (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FIFO_ITEM_CNT));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_WR_PTR));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_RD_PTR));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_FENCE_PTR));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_LOCK_FENCE));
+
+ /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+ iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
+ TXF_WR_PTR);
+
+ /* Dummy-read to advance the read pointer to the head */
+ iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] = iwl_trans_read_prph(mvm->trans,
+ TXF_READ_MODIFY_DATA);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+
+ iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs;
- u32 file_len, rxf_len;
- unsigned long flags;
- int reg_val;
+ u32 file_len, fifo_data_len = 0;
u32 smem_len = mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len;
sram_len = mvm->cfg->dccm_len;
}
- /* reading buffer size */
- reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
- rxf_len = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+ /* reading RXF/TXF sizes */
+ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
+ struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
+ int i;
+
+ fifo_data_len = 0;
+
+ /* Count RXF size */
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
+ if (!mem_cfg->rxfifo_size[i])
+ continue;
+
+ /* Add header info */
+ fifo_data_len += mem_cfg->rxfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+ if (!mem_cfg->txfifo_size[i])
+ continue;
- /* the register holds the value divided by 128 */
- rxf_len = rxf_len << 7;
+ /* Add header info */
+ fifo_data_len += mem_cfg->txfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
file_len = sizeof(*dump_file) +
- sizeof(*dump_data) * 3 +
+ sizeof(*dump_data) * 2 +
sram_len + sizeof(*dump_mem) +
- rxf_len +
+ fifo_data_len +
sizeof(*dump_info);
/* Make room for the SMEM, if it exists */
sizeof(dump_info->bus_human_readable));
dump_data = iwl_fw_error_next_data(dump_data);
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
- dump_data->len = cpu_to_le32(rxf_len);
-
- if (iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
- u32 *rxf = (void *)dump_data->data;
- int i;
+ /* We only dump the FIFOs if the FW is in error state */
+ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
+ iwl_mvm_dump_fifos(mvm, &dump_data);
- for (i = 0; i < (rxf_len / sizeof(u32)); i++) {
- iwl_trans_write_prph(mvm->trans,
- RXF_LD_FENCE_OFFSET_ADDR,
- i * sizeof(u32));
- rxf[i] = iwl_trans_read_prph(mvm->trans,
- RXF_FIFO_RD_FENCE_ADDR);
- }
- iwl_trans_release_nic_access(mvm->trans, &flags);
- }
-
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;