/**
* Internal message identifier
*
-* @IWL_MVM_RXQ_SYNC: sync RSS queues
+* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mvm_rxq_notif_type {
- IWL_MVM_RXQ_SYNC,
+ IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+* FW is agnostic to the payload, so there are no endianity requirements.
*
* @type: value from &iwl_mvm_rxq_notif_type
+* @sync: ctrl path is waiting for all notifications to be received
* @cookie: internal cookie to identify old notifications
* @data: payload
*/
struct iwl_mvm_internal_rxq_notif {
- u32 type;
+ u16 type;
+ u16 sync;
u32 cookie;
u8 data[];
} __packed;
}
}
-static void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm)
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
{
- struct iwl_mvm_internal_rxq_notif data = {
- .type = IWL_MVM_RXQ_SYNC,
- .cookie = mvm->queue_sync_cookie,
- };
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
if (!iwl_mvm_has_new_rx_api(mvm))
return;
- atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues);
+ notif->cookie = mvm->queue_sync_cookie;
+
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter,
+ mvm->trans->num_rx_queues);
- ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)&data, sizeof(data));
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
- ret = wait_event_timeout(notif_waitq,
- atomic_read(&mvm->queue_sync_counter) == 0,
- HZ);
+
+ if (notif->sync)
+ ret = wait_event_timeout(notif_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0,
+ HZ);
WARN_ON_ONCE(!ret);
out:
static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_internal_rxq_notif data = {
+ .type = IWL_MVM_RXQ_EMPTY,
+ .sync = 1,
+ };
mutex_lock(&mvm->mutex);
- iwl_mvm_sync_rx_queues_internal(mvm);
+ iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
mutex_unlock(&mvm->mutex);
}
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
- switch (internal_notif->type) {
- case IWL_MVM_RXQ_SYNC:
- if (mvm->queue_sync_cookie == internal_notif->cookie)
- atomic_dec(&mvm->queue_sync_counter);
- else
+ if (internal_notif->sync) {
+ if (mvm->queue_sync_cookie != internal_notif->cookie) {
WARN_ONCE(1,
"Received expired RX queue sync message\n");
+ return;
+ }
+ atomic_dec(&mvm->queue_sync_counter);
+ }
+
+ switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
- /* TODO */
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);