iwlagn: add rx_free to transport layer
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Tue, 21 Jun 2011 11:25:45 +0000 (14:25 +0300)
committerWey-Yi Guy <wey-yi.w.guy@intel.com>
Fri, 1 Jul 2011 14:57:45 +0000 (07:57 -0700)
The transport layer ness to release all rx ressources. This function is an API for it.

Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-trans.c

index 3d971142786e510d4169618577438759b4f5ee39..a2c5c6b6cd3a570f4dacb3218cff78f4e48b5002 100644 (file)
@@ -910,33 +910,6 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv)
        iwlagn_rx_queue_restock(priv);
 }
 
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       int i;
-       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-               if (rxq->pool[i].page != NULL) {
-                       dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               DMA_FROM_DEVICE);
-                       __iwl_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-       }
-
-       dma_free_coherent(priv->bus.dev, 4 * RX_QUEUE_SIZE,
-                         rxq->bd, rxq->bd_dma);
-       dma_free_coherent(priv->bus.dev,
-                         sizeof(struct iwl_rb_status),
-                         rxq->rb_stts, rxq->rb_stts_dma);
-       rxq->bd = NULL;
-       rxq->rb_stts  = NULL;
-}
-
 int iwlagn_rxq_stop(struct iwl_priv *priv)
 {
 
index b06571871580dccec9e6782af38c9b2fbd9051d6..f9127e7f36c77a8890f40643641f9a5cfd06da17 100644 (file)
@@ -3718,8 +3718,7 @@ void __devexit iwl_remove(struct iwl_priv * priv)
 
        iwl_dealloc_ucode(priv);
 
-       if (priv->rxq.bd)
-               iwlagn_rx_queue_free(priv, &priv->rxq);
+       priv->trans.ops->rx_free(priv);
        iwlagn_hw_txq_ctx_free(priv);
 
        iwl_eeprom_free(priv);
index 7273297a6f4085a06709290ed2edb0fe3a3b8bb5..877a6944dec3dd6c0633c3fcab79a0ec6fee2738 100644 (file)
@@ -193,7 +193,6 @@ void iwlagn_rx_queue_restock(struct iwl_priv *priv);
 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
 void iwlagn_rx_replenish(struct iwl_priv *priv);
 void iwlagn_rx_replenish_now(struct iwl_priv *priv);
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
 int iwlagn_rxq_stop(struct iwl_priv *priv);
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
 void iwl_setup_rx_handlers(struct iwl_priv *priv);
index 7d3e55d0bc6267933b55ca696fabfc5f75662b2a..04b19a431397e9c8302b2020aa1d261a4bffc21b 100644 (file)
@@ -1233,9 +1233,11 @@ struct iwl_trans;
  * struct iwl_trans_ops - transport specific operations
 
  * @rx_init: inits the rx memory, allocate it if needed
+ *@rx_free: frees the rx memory
  */
 struct iwl_trans_ops {
        int (*rx_init)(struct iwl_priv *priv);
+       void (*rx_free)(struct iwl_priv *priv);
 };
 
 struct iwl_trans {
index ccf73ff63956241e106d935f7c133fb3fa02617b..1f5834b8a639692cbc02457c52663811f69de674 100644 (file)
@@ -60,7 +60,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
- #include "iwl-dev.h"
+#include "iwl-dev.h"
 #include "iwl-trans.h"
 
 static int iwl_trans_rx_alloc(struct iwl_priv *priv)
@@ -78,12 +78,11 @@ static int iwl_trans_rx_alloc(struct iwl_priv *priv)
                return -EINVAL;
 
        /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
-       /*Every descriptor is an __le32, hence its */
-       rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
-                                    GFP_KERNEL);
+       rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+                                    &rxq->bd_dma, GFP_KERNEL);
        if (!rxq->bd)
                goto err_bd;
-       memset(rxq->bd, 0, 4 * RX_QUEUE_SIZE);
+       memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
 
        /*Allocate the driver's pointer to receive buffer status */
        rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
@@ -95,28 +94,18 @@ static int iwl_trans_rx_alloc(struct iwl_priv *priv)
        return 0;
 
 err_rb_stts:
-       dma_free_coherent(dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma);
+       dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+                       rxq->bd, rxq->bd_dma);
        memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
        rxq->bd = NULL;
 err_bd:
        return -ENOMEM;
 }
 
-static int iwl_trans_rx_init(struct iwl_priv *priv)
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
 {
        struct iwl_rx_queue *rxq = &priv->rxq;
-       int i, err;
-       unsigned long flags;
-
-       if (!rxq->bd) {
-               err = iwl_trans_rx_alloc(priv);
-               if (err)
-                       return err;
-       }
-
-       spin_lock_irqsave(&rxq->lock, flags);
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
+       int i;
 
        /* Fill the rx_used queue with _all_ of the Rx buffers */
        for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
@@ -131,6 +120,25 @@ static int iwl_trans_rx_init(struct iwl_priv *priv)
                }
                list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
        }
+}
+
+static int iwl_trans_rx_init(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       int i, err;
+       unsigned long flags;
+
+       if (!rxq->bd) {
+               err = iwl_trans_rx_alloc(priv);
+               if (err)
+                       return err;
+       }
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+
+       iwl_trans_rxq_free_rx_bufs(priv);
 
        for (i = 0; i < RX_QUEUE_SIZE; i++)
                rxq->queue[i] = NULL;
@@ -145,8 +153,40 @@ static int iwl_trans_rx_init(struct iwl_priv *priv)
        return 0;
 }
 
+static void iwl_trans_rx_free(struct iwl_priv *priv)
+{
+       struct iwl_rx_queue *rxq = &priv->rxq;
+       unsigned long flags;
+
+       /*if rxq->bd is NULL, it means that nothing has been allocated,
+        * exit now */
+       if (!rxq->bd) {
+               IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
+               return;
+       }
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       iwl_trans_rxq_free_rx_bufs(priv);
+       spin_unlock_irqrestore(&rxq->lock, flags);
+
+       dma_free_coherent(priv->bus.dev, sizeof(__le32) * RX_QUEUE_SIZE,
+                         rxq->bd, rxq->bd_dma);
+       memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+       rxq->bd = NULL;
+
+       if (rxq->rb_stts)
+               dma_free_coherent(priv->bus.dev,
+                                 sizeof(struct iwl_rb_status),
+                                 rxq->rb_stts, rxq->rb_stts_dma);
+       else
+               IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
+       memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+       rxq->rb_stts = NULL;
+}
+
 static const struct iwl_trans_ops trans_ops = {
        .rx_init = iwl_trans_rx_init,
+       .rx_free = iwl_trans_rx_free,
 };
 
 void iwl_trans_register(struct iwl_trans *trans)