*
*****************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <net/ip6_checksum.h>
+#include <net/tso.h>
+#include <net/ip6_checksum.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
return 0;
}
+static void iwl_pcie_free_tso_page(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) {
+ struct page *page =
+ info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA];
+
+ __free_page(page);
+ info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL;
+ }
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
while (q->write_ptr != q->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, q->read_ptr);
+
+ if (txq_id != trans_pcie->cmd_queue) {
+ struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+
+ if (WARN_ON_ONCE(!skb))
+ continue;
+
+ iwl_pcie_free_tso_page(skb);
+ }
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
for (;
q->read_ptr != tfd_num;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
- if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+ if (WARN_ON_ONCE(!skb))
continue;
- __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+ iwl_pcie_free_tso_page(skb);
+
+ __skb_queue_tail(skbs, skb);
txq->entries[txq->q.read_ptr].skb = NULL;
return 0;
}
+#ifdef CONFIG_INET
+static struct iwl_tso_hdr_page *
+get_page_hdr(struct iwl_trans *trans, size_t len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+
+ if (!p->page)
+ goto alloc;
+
+ /* enough room on this page */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
+ return p;
+
+ /* We don't have enough room on this page, get a new one. */
+ __free_page(p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+ return p;
+}
+
+static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
+ bool ipv6, unsigned int len)
+{
+ if (ipv6) {
+ struct ipv6hdr *iphv6 = iph;
+
+ tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
+ len + tcph->doff * 4,
+ IPPROTO_TCP, 0);
+ } else {
+ struct iphdr *iphv4 = iph;
+
+ ip_send_check(iphv4);
+ tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
+ len + tcph->doff * 4,
+ IPPROTO_TCP, 0);
+ }
+}
+
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_txq *txq, u8 hdr_len,
+ struct iwl_cmd_meta *out_meta,
+ struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ struct iwl_queue *q = &txq->q;
+ u16 length, iv_len, amsdu_pad;
+ u8 *start_hdr;
+ struct iwl_tso_hdr_page *hdr_page;
+ int ret;
+ struct tso_t tso;
+
+ /* if the packet is protected, then it must be CCMP or GCMP */
+ BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
+ iv_len = ieee80211_has_protected(hdr->frame_control) ?
+ IEEE80211_CCMP_HDR_LEN : 0;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ &txq->tfds[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+ NULL, 0);
+
+ ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ hdr_page = get_page_hdr(trans, hdr_room);
+ if (!hdr_page)
+ return -ENOMEM;
+
+ get_page(hdr_page->page);
+ start_hdr = hdr_page->pos;
+ info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page;
+ memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
+ hdr_page->pos += iv_len;
+
+ /*
+ * Pull the ieee80211 header + IV to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len + iv_len);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left =
+ min_t(unsigned int, mss, total_len);
+ struct sk_buff *csum_skb = NULL;
+ unsigned int hdr_tb_len;
+ dma_addr_t hdr_tb_phys;
+ struct tcphdr *tcph;
+ u8 *iph;
+
+ total_len -= data_left;
+
+ memset(hdr_page->pos, 0, amsdu_pad);
+ hdr_page->pos += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+ hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+ hdr_page->pos += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+ hdr_page->pos += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+ iph = hdr_page->pos + 8;
+ tcph = (void *)(iph + ip_hdrlen);
+
+ /* For testing on current hardware only */
+ if (trans_pcie->sw_csum_tx) {
+ csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
+ GFP_ATOMIC);
+ if (!csum_skb) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ iwl_compute_pseudo_hdr_csum(iph, tcph,
+ skb->protocol ==
+ htons(ETH_P_IPV6),
+ data_left);
+
+ memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
+ tcph, tcp_hdrlen(skb));
+ skb_set_transport_header(csum_skb, 0);
+ csum_skb->csum_start =
+ (unsigned char *)tcp_hdr(csum_skb) -
+ csum_skb->head;
+ }
+
+ hdr_page->pos += snap_ip_tcp_hdrlen;
+
+ hdr_tb_len = hdr_page->pos - start_hdr;
+ hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
+ hdr_tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+ iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
+ hdr_tb_len, false);
+ trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
+ hdr_tb_len);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = hdr_page->pos;
+
+ /* put the payload */
+ while (data_left) {
+ unsigned int size = min_t(unsigned int, tso.size,
+ data_left);
+ dma_addr_t tb_phys;
+
+ if (trans_pcie->sw_csum_tx)
+ memcpy(skb_put(csum_skb, size), tso.data, size);
+
+ tb_phys = dma_map_single(trans->dev, tso.data,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+ size, false);
+ trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
+ size);
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* For testing on early hardware only */
+ if (trans_pcie->sw_csum_tx) {
+ __wsum csum;
+
+ csum = skb_checksum(csum_skb,
+ skb_checksum_start_offset(csum_skb),
+ csum_skb->len -
+ skb_checksum_start_offset(csum_skb),
+ 0);
+ dev_kfree_skb(csum_skb);
+ dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
+ hdr_tb_len, DMA_TO_DEVICE);
+ tcph->check = csum_fold(csum);
+ dma_sync_single_for_device(trans->dev, hdr_tb_phys,
+ hdr_tb_len, DMA_TO_DEVICE);
+ }
+ }
+
+ /* re -add the WiFi header and IV */
+ skb_push(skb, hdr_len + iv_len);
+
+ return 0;
+
+out_unmap:
+ iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+ return ret;
+}
+#else /* CONFIG_INET */
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_txq *txq, u8 hdr_len,
+ struct iwl_cmd_meta *out_meta,
+ struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+{
+ /* No A-MSDU without CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
- if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
- out_meta, dev_cmd, tb1_len)))
+ if (ieee80211_is_data_qos(fc) &&
+ (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+ if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
+ out_meta, dev_cmd,
+ tb1_len)))
+ goto out_err;
+ } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
+ out_meta, dev_cmd, tb1_len))) {
goto out_err;
+ }
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));