ath10k: reuse copy engine 5 (htt rx) descriptors
authorRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Tue, 22 Mar 2016 11:52:18 +0000 (17:22 +0530)
committerKalle Valo <kvalo@qca.qualcomm.com>
Mon, 4 Apr 2016 14:03:21 +0000 (17:03 +0300)
Whenever htt rx indication i.e target to host messages are received
on rx copy engine (CE5), the message will be freed after processing
the response. Then CE 5 will be refilled with new descriptors at
post rx processing. This memory alloc and free operations can be avoided
by reusing the same descriptors.

During CE pipe allocation, full ring is not initialized i.e n-1 entries
are filled up. So for CE 5 full ring should be filled up to reuse
descriptors. Moreover CE 5 write index will be updated in single shot
instead of incremental access. This could avoid multiple pci_write and
ce_ring access. From experiments, It improves CPU usage by ~3% in IPQ4019
platform.

Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/pci.c

index d6da404c9fa7d7c4e28558f9ef00a9e59e91498b..7212802eb3274cfd5a4857666ef214efb1146bc7 100644 (file)
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 
        lockdep_assert_held(&ar_pci->ce_lock);
 
-       if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+       if ((pipe->id != 5) &&
+           CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
                return -ENOSPC;
 
        desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
        return 0;
 }
 
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+       struct ath10k *ar = pipe->ar;
+       struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int write_index = dest_ring->write_index;
+       u32 ctrl_addr = pipe->ctrl_addr;
+
+       write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+       ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+       dest_ring->write_index = write_index;
+}
+
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 {
        struct ath10k *ar = pipe->ar;
@@ -478,8 +492,11 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                *per_transfer_contextp =
                        dest_ring->per_transfer_context[sw_index];
 
-       /* sanity */
-       dest_ring->per_transfer_context[sw_index] = NULL;
+       /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+        * So update transfer context all CEs except CE5.
+        */
+       if (ce_state->id != 5)
+               dest_ring->per_transfer_context[sw_index] = NULL;
 
        /* Update sw_index */
        sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
index 68717e5b9d898da7d0d746d8a485205f7ad158dd..25cafcfd6b12750fc1f48ee4d36bf9365c622740 100644 (file)
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -410,6 +411,8 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
        (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+               (((idx) + (num)) & (nentries_mask))
 
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
                                ar->regs->ce_wrap_intr_sum_host_msi_lsb
index 290a61afde1a35dd9cf097a0c0481b47ec37ad0f..0b305efe6c946fd900cae31a8020cc89a292f178 100644 (file)
@@ -809,7 +809,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
        spin_lock_bh(&ar_pci->ce_lock);
        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
        spin_unlock_bh(&ar_pci->ce_lock);
-       while (num--) {
+
+       while (num >= 0) {
                ret = __ath10k_pci_rx_post_buf(pipe);
                if (ret) {
                        if (ret == -ENOSPC)
@@ -819,6 +820,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
                                  ATH10K_PCI_RX_POST_RETRY_MS);
                        break;
                }
+               num--;
        }
 }
 
@@ -1212,6 +1214,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
        ath10k_pci_rx_post_pipe(pipe_info);
 }
 
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+                                        void (*callback)(struct ath10k *ar,
+                                                         struct sk_buff *skb))
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+       struct sk_buff *skb;
+       struct sk_buff_head list;
+       void *transfer_context;
+       unsigned int nbytes, max_nbytes, nentries;
+       int orig_len;
+
+       /* No need to aquire ce_lock for CE5, since this is the only place CE5
+        * is processed other than init and deinit. Before releasing CE5
+        * buffers, interrupts are disabled. Thus CE5 access is serialized.
+        */
+       __skb_queue_head_init(&list);
+       while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+                                                   &nbytes) == 0) {
+               skb = transfer_context;
+               max_nbytes = skb->len + skb_tailroom(skb);
+
+               if (unlikely(max_nbytes < nbytes)) {
+                       ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+                                   nbytes, max_nbytes);
+                       continue;
+               }
+
+               dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                       max_nbytes, DMA_FROM_DEVICE);
+               skb_put(skb, nbytes);
+               __skb_queue_tail(&list, skb);
+       }
+
+       nentries = skb_queue_len(&list);
+       while ((skb = __skb_dequeue(&list))) {
+               ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+                          ce_state->id, skb->len);
+               ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+                               skb->data, skb->len);
+
+               orig_len = skb->len;
+               callback(ar, skb);
+               skb_push(skb, orig_len - skb->len);
+               skb_reset_tail_pointer(skb);
+               skb_trim(skb, 0);
+
+               /*let device gain the buffer again*/
+               dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+                                          skb->len + skb_tailroom(skb),
+                                          DMA_FROM_DEVICE);
+       }
+       ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
 /* Called by lower (CE) layer when data is received from the Target. */
 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
 {
@@ -1268,7 +1327,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
         */
        ath10k_ce_per_engine_service(ce_state->ar, 4);
 
-       ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+       ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
 }
 
 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,