void iwl_down(struct iwl_priv *priv);
void iwl_cancel_deferred_work(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb);
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
iwlagn_bt_rx_handler_setup(priv);
}
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
iwl_dvm_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
-
- return 0;
}
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
- int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
+ void (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb);
void (*napi_add)(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct net_device *napi_dev,
op_mode->ops->stop(op_mode);
}
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb)
{
- return op_mode->ops->rx(op_mode, rxb, cmd);
+ return op_mode->ops->rx(op_mode, rxb);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
* @resp_pkt: response packet, if %CMD_WANT_SKB was set
* @_rx_page_order: (internally used to free response packet)
* @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
u32 _rx_page_order;
- int handler_status;
u32 flags;
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
}
}
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
iwl_mvm_rx_rx_mpdu(mvm, rxb);
- return 0;
+ return;
}
iwl_mvm_rx_check_trigger(mvm, pkt);
if (!rx_h->async) {
rx_h->fn(mvm, rxb);
- return 0;
+ return;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
/* we can't do much... */
if (!entry)
- return 0;
+ return;
entry->rxb._page = rxb_steal_page(rxb);
entry->rxb._offset = rxb->_offset;
schedule_work(&mvm->async_handlers_wk);
break;
}
-
- return 0;
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status);
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
struct iwl_rx_packet *pkt;
- struct iwl_device_cmd *cmd;
u16 sequence;
bool reclaim;
- int index, cmd_index, err, len;
+ int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
._rx_page_order = trans_pcie->rx_page_order,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- if (reclaim)
- cmd = txq->entries[cmd_index].cmd;
- else
- cmd = NULL;
-
- err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+ iwl_op_mode_rx(trans->op_mode, &rxcb);
if (reclaim) {
kzfree(txq->entries[cmd_index].free_buf);
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (!rxcb._page_stolen)
- iwl_pcie_hcmd_complete(trans, &rxcb, err);
+ iwl_pcie_hcmd_complete(trans, &rxcb);
else
IWL_WARN(trans, "Claim null rxb?\n");
}
/*
* iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- * (put in setup_rx_handlers)
*
* If an Rx buffer has an async callback associated with it the callback
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status)
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
meta->source->resp_pkt = pkt;
meta->source->_rx_page_addr = (unsigned long)page_address(p);
meta->source->_rx_page_order = trans_pcie->rx_page_order;
- meta->source->handler_status = handler_status;
}
iwl_pcie_cmdq_reclaim(trans, txq_id, index);