3 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
5 ****************************************************************************/
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/rtnetlink.h>
10 #include <net/sch_generic.h>
11 #include <linux/if_ether.h>
12 #include <scsc/scsc_logring.h>
18 #include "scsc_wifi_fcq.h"
21 #include "hip4_sampler.h"
23 #define IP4_OFFSET_TO_TOS_FIELD 1
24 #define IP6_OFFSET_TO_TC_FIELD_0 0
25 #define IP6_OFFSET_TO_TC_FIELD_1 1
26 #define FIELD_TO_DSCP 2
34 #define DSCP_AF43 0x26
35 #define DSCP_AF42 0x24
36 #define DSCP_AF41 0x22
37 #define DSCP_AF33 0x1E
38 #define DSCP_AF32 0x1C
39 #define DSCP_AF31 0x1A
40 #define DSCP_AF23 0x16
41 #define DSCP_AF22 0x14
42 #define DSCP_AF21 0x12
43 #define DSCP_AF13 0x0E
44 #define DSCP_AF12 0x0C
45 #define DSCP_AF11 0x0A
58 static bool tcp_ack_suppression_disable
;
59 module_param(tcp_ack_suppression_disable
, bool, S_IRUGO
| S_IWUSR
);
60 MODULE_PARM_DESC(tcp_ack_suppression_disable
, "Disable TCP ack suppression feature");
62 static bool tcp_ack_suppression_disable_2g
;
63 module_param(tcp_ack_suppression_disable_2g
, bool, S_IRUGO
| S_IWUSR
);
64 MODULE_PARM_DESC(tcp_ack_suppression_disable_2g
, "Disable TCP ack suppression for only 2.4GHz band");
66 static bool tcp_ack_suppression_monitor
= true;
67 module_param(tcp_ack_suppression_monitor
, bool, S_IRUGO
| S_IWUSR
);
68 MODULE_PARM_DESC(tcp_ack_suppression_monitor
, "TCP ack suppression throughput monitor: Y: enable (default), N: disable");
70 static uint tcp_ack_suppression_monitor_interval
= 500;
71 module_param(tcp_ack_suppression_monitor_interval
, uint
, S_IRUGO
| S_IWUSR
);
72 MODULE_PARM_DESC(tcp_ack_suppression_monitor_interval
, "Sampling interval (in ms) for throughput monitor");
74 static uint tcp_ack_suppression_timeout
= 16;
75 module_param(tcp_ack_suppression_timeout
, uint
, S_IRUGO
| S_IWUSR
);
76 MODULE_PARM_DESC(tcp_ack_suppression_timeout
, "Timeout (in ms) before cached TCP ack is flushed to tx");
78 static uint tcp_ack_suppression_max
= 16;
79 module_param(tcp_ack_suppression_max
, uint
, S_IRUGO
| S_IWUSR
);
80 MODULE_PARM_DESC(tcp_ack_suppression_max
, "Maximum number of TCP acks suppressed before latest flushed to tx");
82 static uint tcp_ack_suppression_rate_very_high
= 100;
83 module_param(tcp_ack_suppression_rate_very_high
, int, S_IRUGO
| S_IWUSR
);
84 MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high
, "Rate (in Mbps) to apply very high degree of suppression");
86 static uint tcp_ack_suppression_rate_very_high_timeout
= 4;
87 module_param(tcp_ack_suppression_rate_very_high_timeout
, int, S_IRUGO
| S_IWUSR
);
88 MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_timeout
, "Timeout (in ms) before cached TCP ack is flushed in very high rate");
90 static uint tcp_ack_suppression_rate_very_high_acks
= 20;
91 module_param(tcp_ack_suppression_rate_very_high_acks
, uint
, S_IRUGO
| S_IWUSR
);
92 MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_acks
, "Maximum number of TCP acks suppressed before latest flushed in very high rate");
94 static uint tcp_ack_suppression_rate_high
= 20;
95 module_param(tcp_ack_suppression_rate_high
, int, S_IRUGO
| S_IWUSR
);
96 MODULE_PARM_DESC(tcp_ack_suppression_rate_high
, "Rate (in Mbps) to apply high degree of suppression");
98 static uint tcp_ack_suppression_rate_high_timeout
= 4;
99 module_param(tcp_ack_suppression_rate_high_timeout
, int, S_IRUGO
| S_IWUSR
);
100 MODULE_PARM_DESC(tcp_ack_suppression_rate_high_timeout
, "Timeout (in ms) before cached TCP ack is flushed in high rate");
102 static uint tcp_ack_suppression_rate_high_acks
= 16;
103 module_param(tcp_ack_suppression_rate_high_acks
, uint
, S_IRUGO
| S_IWUSR
);
104 MODULE_PARM_DESC(tcp_ack_suppression_rate_high_acks
, "Maximum number of TCP acks suppressed before latest flushed in high rate");
106 static uint tcp_ack_suppression_rate_low
= 1;
107 module_param(tcp_ack_suppression_rate_low
, int, S_IRUGO
| S_IWUSR
);
108 MODULE_PARM_DESC(tcp_ack_suppression_rate_low
, "Rate (in Mbps) to apply low degree of suppression");
110 static uint tcp_ack_suppression_rate_low_timeout
= 4;
111 module_param(tcp_ack_suppression_rate_low_timeout
, int, S_IRUGO
| S_IWUSR
);
112 MODULE_PARM_DESC(tcp_ack_suppression_rate_low_timeout
, "Timeout (in ms) before cached TCP ack is flushed in low rate");
114 static uint tcp_ack_suppression_rate_low_acks
= 10;
115 module_param(tcp_ack_suppression_rate_low_acks
, uint
, S_IRUGO
| S_IWUSR
);
116 MODULE_PARM_DESC(tcp_ack_suppression_rate_low_acks
, "Maximum number of TCP acks suppressed before latest flushed in low rate");
118 static uint tcp_ack_suppression_slow_start_acks
= 512;
119 module_param(tcp_ack_suppression_slow_start_acks
, uint
, S_IRUGO
| S_IWUSR
);
120 MODULE_PARM_DESC(tcp_ack_suppression_slow_start_acks
, "Maximum number of Acks sent in slow start");
122 static uint tcp_ack_suppression_rcv_window
= 128;
123 module_param(tcp_ack_suppression_rcv_window
, uint
, S_IRUGO
| S_IWUSR
);
124 MODULE_PARM_DESC(tcp_ack_suppression_rcv_window
, "Receive window size (in unit of Kbytes) that triggers Ack suppression");
126 #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
127 static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list
*t
);
129 static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data
);
131 static int slsi_netif_tcp_ack_suppression_start(struct net_device
*dev
);
132 static int slsi_netif_tcp_ack_suppression_stop(struct net_device
*dev
);
133 static struct sk_buff
*slsi_netif_tcp_ack_suppression_pkt(struct net_device
*dev
, struct sk_buff
*skb
);
136 #ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
137 void slsi_net_randomize_nmi_ndi(struct slsi_dev
*sdev
)
139 int exor_base
= 1, exor_byte
= 5, i
;
140 u8 random_mac
[ETH_ALEN
];
142 /* Randomize mac address */
143 SLSI_ETHER_COPY(random_mac
, sdev
->hw_addr
);
144 /* If random number is same as actual bytes in hw_address
145 * try random again. hope 2nd random will not be same as
146 * bytes in hw_address
148 slsi_get_random_bytes(&random_mac
[3], 3);
149 if (!memcmp(&random_mac
[3], &sdev
->hw_addr
[3], 3))
150 slsi_get_random_bytes(&random_mac
[3], 3);
151 SLSI_ETHER_COPY(sdev
->netdev_addresses
[SLSI_NET_INDEX_NAN
], random_mac
);
152 /* Set the local bit */
153 sdev
->netdev_addresses
[SLSI_NET_INDEX_NAN
][0] |= 0x02;
154 /* EXOR 4th byte with 0x80 */
155 sdev
->netdev_addresses
[SLSI_NET_INDEX_NAN
][3] ^= 0x80;
156 for (i
= SLSI_NAN_DATA_IFINDEX_START
; i
< CONFIG_SCSC_WLAN_MAX_INTERFACES
+ 1; i
++) {
157 SLSI_ETHER_COPY(sdev
->netdev_addresses
[i
], random_mac
);
158 sdev
->netdev_addresses
[i
][0] |= 0x02;
159 sdev
->netdev_addresses
[i
][exor_byte
] ^= exor_base
;
161 /* currently supports upto 15 mac address for nan
170 /* Net Device callback operations */
171 static int slsi_net_open(struct net_device
*dev
)
173 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
174 struct slsi_dev
*sdev
= ndev_vif
->sdev
;
176 unsigned char dev_addr_zero_check
[ETH_ALEN
];
178 if (WARN_ON(ndev_vif
->is_available
))
181 if (sdev
->mlme_blocked
) {
182 SLSI_NET_WARN(dev
, "Fail: called when MLME in blocked state\n");
186 slsi_wakelock(&sdev
->wlan_wl
);
188 /* check if request to rf test mode. */
189 slsi_check_rf_test_mode();
191 err
= slsi_start(sdev
);
193 slsi_wakeunlock(&sdev
->wlan_wl
);
197 if (!sdev
->netdev_up_count
) {
198 slsi_get_hw_mac_address(sdev
, sdev
->hw_addr
);
199 /* Assign Addresses */
200 SLSI_ETHER_COPY(sdev
->netdev_addresses
[SLSI_NET_INDEX_WLAN
], sdev
->hw_addr
);
202 SLSI_ETHER_COPY(sdev
->netdev_addresses
[SLSI_NET_INDEX_P2P
], sdev
->hw_addr
);
203 /* Set the local bit */
204 sdev
->netdev_addresses
[SLSI_NET_INDEX_P2P
][0] |= 0x02;
206 SLSI_ETHER_COPY(sdev
->netdev_addresses
[SLSI_NET_INDEX_P2PX_SWLAN
], sdev
->hw_addr
);
207 /* Set the local bit */
208 sdev
->netdev_addresses
[SLSI_NET_INDEX_P2PX_SWLAN
][0] |= 0x02;
209 /* EXOR 5th byte with 0x80 */
210 sdev
->netdev_addresses
[SLSI_NET_INDEX_P2PX_SWLAN
][4] ^= 0x80;
211 #if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4 && defined(CONFIG_SCSC_WIFI_NAN_ENABLE)
212 slsi_net_randomize_nmi_ndi(sdev
);
214 sdev
->initial_scan
= true;
217 memset(dev_addr_zero_check
, 0, ETH_ALEN
);
218 if (!memcmp(dev
->dev_addr
, dev_addr_zero_check
, ETH_ALEN
)) {
219 #ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
220 if (SLSI_IS_VIF_INDEX_MHS(sdev
, ndev_vif
))
221 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[SLSI_NET_INDEX_P2P
]);
223 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[ndev_vif
->ifnum
]);
225 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[ndev_vif
->ifnum
]);
228 SLSI_ETHER_COPY(dev
->perm_addr
, sdev
->netdev_addresses
[ndev_vif
->ifnum
]);
229 SLSI_MUTEX_LOCK(ndev_vif
->vif_mutex
);
230 #ifdef CONFIG_SCSC_WLAN_DEBUG
231 if (ndev_vif
->iftype
== NL80211_IFTYPE_MONITOR
) {
232 err
= slsi_start_monitor_mode(sdev
, dev
);
234 slsi_wakeunlock(&sdev
->wlan_wl
);
235 SLSI_MUTEX_UNLOCK(ndev_vif
->vif_mutex
);
240 SLSI_NET_INFO(dev
, "ifnum:%d r:%d MAC:%pM\n", ndev_vif
->ifnum
, sdev
->recovery_status
, dev
->dev_addr
);
241 ndev_vif
->is_available
= true;
242 sdev
->netdev_up_count
++;
244 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
245 reinit_completion(&ndev_vif
->sig_wait
.completion
);
247 INIT_COMPLETION(ndev_vif
->sig_wait
.completion
);
250 slsi_netif_tcp_ack_suppression_start(dev
);
253 #ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
254 if (ndev_vif
->ifnum
>= SLSI_NAN_DATA_IFINDEX_START
)
255 netif_carrier_on(dev
);
257 SLSI_MUTEX_UNLOCK(ndev_vif
->vif_mutex
);
259 netif_tx_start_all_queues(dev
);
260 slsi_wakeunlock(&sdev
->wlan_wl
);
262 /* The default power mode in host*/
263 /* 2511 measn unifiForceActive and 1 means active */
264 if (slsi_is_rf_test_mode_enabled()) {
265 SLSI_NET_INFO(dev
, "*#rf# rf test mode set is enabled.\n");
266 slsi_set_mib_roam(sdev
, NULL
, SLSI_PSID_UNIFI_ROAMING_ENABLED
, 0);
267 slsi_set_mib_roam(sdev
, NULL
, SLSI_PSID_UNIFI_ROAM_MODE
, 0);
268 slsi_set_mib_roam(sdev
, NULL
, 2511, 1);
269 slsi_set_mib_roam(sdev
, NULL
, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD
, 0);
275 static int slsi_net_stop(struct net_device
*dev
)
277 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
278 struct slsi_dev
*sdev
= ndev_vif
->sdev
;
280 SLSI_NET_INFO(dev
, "ifnum:%d r:%d\n", ndev_vif
->ifnum
, sdev
->recovery_status
);
281 slsi_wakelock(&sdev
->wlan_wl
);
282 netif_tx_stop_all_queues(dev
);
283 sdev
->initial_scan
= false;
285 if (!ndev_vif
->is_available
) {
286 /* May have been taken out by the Chip going down */
287 SLSI_NET_DBG1(dev
, SLSI_NETDEV
, "Not available\n");
288 slsi_wakeunlock(&sdev
->wlan_wl
);
291 #ifndef SLSI_TEST_DEV
292 if (!slsi_is_rf_test_mode_enabled() && !sdev
->recovery_status
) {
293 SLSI_NET_DBG1(dev
, SLSI_NETDEV
, "To user mode\n");
294 slsi_set_mib_roam(sdev
, NULL
, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD
, -55);
298 slsi_netif_tcp_ack_suppression_stop(dev
);
300 slsi_stop_net_dev(sdev
, dev
);
302 sdev
->allow_switch_40_mhz
= true;
303 sdev
->allow_switch_80_mhz
= true;
304 sdev
->acs_channel_switched
= false;
305 slsi_wakeunlock(&sdev
->wlan_wl
);
309 /* This is called after the WE handlers */
310 static int slsi_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
312 SLSI_NET_DBG4(dev
, SLSI_NETDEV
, "IOCTL cmd:0x%.4x\n", cmd
);
314 if (cmd
== SIOCDEVPRIVATE
+ 2) { /* 0x89f0 + 2 from wpa_supplicant */
315 return slsi_ioctl(dev
, rq
, cmd
);
321 static struct net_device_stats
*slsi_net_get_stats(struct net_device
*dev
)
323 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
325 SLSI_NET_DBG4(dev
, SLSI_NETDEV
, "\n");
326 return &ndev_vif
->stats
;
329 #ifdef CONFIG_SCSC_USE_WMM_TOS
330 static u16
slsi_get_priority_from_tos(u8
*frame
, u16 proto
)
333 return FAPI_PRIORITY_QOS_UP0
;
336 case ETH_P_IP
: /* IPv4 */
337 return (u16
)(((frame
[IP4_OFFSET_TO_TOS_FIELD
]) & 0xE0) >> 5);
339 case ETH_P_IPV6
: /* IPv6 */
340 return (u16
)((*frame
& 0x0E) >> 1);
343 return FAPI_PRIORITY_QOS_UP0
;
348 static u16
slsi_get_priority_from_tos_dscp(u8
*frame
, u16 proto
)
353 return FAPI_PRIORITY_QOS_UP0
;
356 case ETH_P_IP
: /* IPv4 */
357 dscp
= frame
[IP4_OFFSET_TO_TOS_FIELD
] >> FIELD_TO_DSCP
;
360 case ETH_P_IPV6
: /* IPv6 */
361 /* Get traffic class */
362 dscp
= (((frame
[IP6_OFFSET_TO_TC_FIELD_0
] & 0x0F) << 4) |
363 ((frame
[IP6_OFFSET_TO_TC_FIELD_1
] & 0xF0) >> 4)) >> FIELD_TO_DSCP
;
367 return FAPI_PRIORITY_QOS_UP0
;
369 /* DSCP table based in RFC8325 from Android 10 */
370 #if (defined(ANDROID_VERSION) && ANDROID_VERSION >= 100000)
373 return FAPI_PRIORITY_QOS_UP7
;
377 return FAPI_PRIORITY_QOS_UP6
;
379 return FAPI_PRIORITY_QOS_UP5
;
388 return FAPI_PRIORITY_QOS_UP4
;
392 return FAPI_PRIORITY_QOS_UP3
;
398 return FAPI_PRIORITY_QOS_UP0
;
400 return FAPI_PRIORITY_QOS_UP1
;
402 return FAPI_PRIORITY_QOS_UP0
;
408 return FAPI_PRIORITY_QOS_UP6
;
412 return FAPI_PRIORITY_QOS_UP5
;
422 return FAPI_PRIORITY_QOS_UP0
;
424 return FAPI_PRIORITY_QOS_UP7
;
426 return FAPI_PRIORITY_QOS_UP6
;
428 return FAPI_PRIORITY_QOS_UP5
;
430 return FAPI_PRIORITY_QOS_UP4
;
432 return FAPI_PRIORITY_QOS_UP3
;
434 return FAPI_PRIORITY_QOS_UP2
;
436 return FAPI_PRIORITY_QOS_UP1
;
438 return FAPI_PRIORITY_QOS_UP0
;
440 return FAPI_PRIORITY_QOS_UP0
;
447 static bool slsi_net_downgrade_ac(struct net_device
*dev
, struct sk_buff
*skb
)
449 SLSI_UNUSED_PARAMETER(dev
);
451 switch (skb
->priority
) {
454 skb
->priority
= FAPI_PRIORITY_QOS_UP5
; /* VO -> VI */
458 skb
->priority
= FAPI_PRIORITY_QOS_UP3
; /* VI -> BE */
462 skb
->priority
= FAPI_PRIORITY_QOS_UP2
; /* BE -> BK */
469 static u8
slsi_net_up_to_ac_mapping(u8 priority
)
472 case FAPI_PRIORITY_QOS_UP6
:
473 case FAPI_PRIORITY_QOS_UP7
:
474 return BIT(FAPI_PRIORITY_QOS_UP6
) | BIT(FAPI_PRIORITY_QOS_UP7
);
475 case FAPI_PRIORITY_QOS_UP4
:
476 case FAPI_PRIORITY_QOS_UP5
:
477 return BIT(FAPI_PRIORITY_QOS_UP4
) | BIT(FAPI_PRIORITY_QOS_UP5
);
478 case FAPI_PRIORITY_QOS_UP0
:
479 case FAPI_PRIORITY_QOS_UP3
:
480 return BIT(FAPI_PRIORITY_QOS_UP0
) | BIT(FAPI_PRIORITY_QOS_UP3
);
482 return BIT(FAPI_PRIORITY_QOS_UP1
) | BIT(FAPI_PRIORITY_QOS_UP2
);
486 enum slsi_traffic_q
slsi_frame_priority_to_ac_queue(u16 priority
)
489 case FAPI_PRIORITY_QOS_UP0
:
490 case FAPI_PRIORITY_QOS_UP3
:
491 return SLSI_TRAFFIC_Q_BE
;
492 case FAPI_PRIORITY_QOS_UP1
:
493 case FAPI_PRIORITY_QOS_UP2
:
494 return SLSI_TRAFFIC_Q_BK
;
495 case FAPI_PRIORITY_QOS_UP4
:
496 case FAPI_PRIORITY_QOS_UP5
:
497 return SLSI_TRAFFIC_Q_VI
;
498 case FAPI_PRIORITY_QOS_UP6
:
499 case FAPI_PRIORITY_QOS_UP7
:
500 return SLSI_TRAFFIC_Q_VO
;
502 return SLSI_TRAFFIC_Q_BE
;
506 int slsi_ac_to_tids(enum slsi_traffic_q ac
, int *tids
)
509 case SLSI_TRAFFIC_Q_BE
:
510 tids
[0] = FAPI_PRIORITY_QOS_UP0
;
511 tids
[1] = FAPI_PRIORITY_QOS_UP3
;
514 case SLSI_TRAFFIC_Q_BK
:
515 tids
[0] = FAPI_PRIORITY_QOS_UP1
;
516 tids
[1] = FAPI_PRIORITY_QOS_UP2
;
519 case SLSI_TRAFFIC_Q_VI
:
520 tids
[0] = FAPI_PRIORITY_QOS_UP4
;
521 tids
[1] = FAPI_PRIORITY_QOS_UP5
;
524 case SLSI_TRAFFIC_Q_VO
:
525 tids
[0] = FAPI_PRIORITY_QOS_UP6
;
526 tids
[1] = FAPI_PRIORITY_QOS_UP7
;
536 static void slsi_net_downgrade_pri(struct net_device
*dev
, struct slsi_peer
*peer
,
539 /* in case we are a client downgrade the ac if acm is
540 * set and tspec is not established
542 while (unlikely(peer
->wmm_acm
& BIT(skb
->priority
)) &&
543 !(peer
->tspec_established
& slsi_net_up_to_ac_mapping(skb
->priority
))) {
544 SLSI_NET_DBG3(dev
, SLSI_NETDEV
, "Downgrading from UP:%d\n", skb
->priority
);
545 if (!slsi_net_downgrade_ac(dev
, skb
))
548 SLSI_NET_DBG4(dev
, SLSI_NETDEV
, "To UP:%d\n", skb
->priority
);
550 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
551 static u16
slsi_net_select_queue(struct net_device
*dev
, struct sk_buff
*skb
, struct net_device
*sb_dev
, select_queue_fallback_t fallback
)
552 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
553 static u16
slsi_net_select_queue(struct net_device
*dev
, struct sk_buff
*skb
, void *accel_priv
, select_queue_fallback_t fallback
)
554 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
555 static u16
slsi_net_select_queue(struct net_device
*dev
, struct sk_buff
*skb
, void *accel_priv
)
557 static u16
slsi_net_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
560 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
561 struct slsi_dev
*sdev
= ndev_vif
->sdev
;
563 struct ethhdr
*ehdr
= (struct ethhdr
*)skb
->data
;
565 struct slsi_peer
*peer
;
566 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
568 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
571 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
574 SLSI_NET_DBG4(dev
, SLSI_NETDEV
, "\n");
576 /* Defensive check for uninitialized mac header */
577 if (!skb_mac_header_was_set(skb
))
578 skb_reset_mac_header(skb
);
580 if (is_zero_ether_addr(ehdr
->h_dest
) || is_zero_ether_addr(ehdr
->h_source
)) {
581 SLSI_NET_WARN(dev
, "invalid Ethernet addresses (dest:%pM,src:%pM)\n", ehdr
->h_dest
, ehdr
->h_source
);
582 SCSC_BIN_TAG_INFO(BINARY
, skb
->data
, skb
->len
> 128 ? 128 : skb
->len
);
583 return SLSI_NETIF_Q_DISCARD
;
586 proto
= be16_to_cpu(eth_hdr(skb
)->h_proto
);
590 /* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
594 SLSI_NET_DBG3(dev
, SLSI_TX
, "EAP packet. Priority Queue Selected\n");
595 return SLSI_NETIF_Q_PRIORITY
;
597 SLSI_NET_DBG3(dev
, SLSI_TX
, "ARP frame. Priority Queue Selected\n");
598 return SLSI_NETIF_Q_PRIORITY
;
600 if (slsi_is_dhcp_packet(skb
->data
) == SLSI_TX_IS_NOT_DHCP
)
602 SLSI_NET_DBG3(dev
, SLSI_TX
, "DHCP packet. Priority Queue Selected\n");
603 return SLSI_NETIF_Q_PRIORITY
;
606 if (ndev_vif
->vif_type
== FAPI_VIFTYPE_AP
)
607 /* MULTICAST/BROADCAST Queue is only used for AP */
608 if (is_multicast_ether_addr(ehdr
->h_dest
)) {
609 SLSI_NET_DBG3(dev
, SLSI_TX
, "Multicast AC queue will be selected\n");
610 #ifdef CONFIG_SCSC_USE_WMM_TOS
611 skb
->priority
= slsi_get_priority_from_tos(skb
->data
+ ETH_HLEN
, proto
);
613 skb
->priority
= slsi_get_priority_from_tos_dscp(skb
->data
+ ETH_HLEN
, proto
);
615 return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb
->priority
));
618 slsi_spinlock_lock(&ndev_vif
->peer_lock
);
619 peer
= slsi_get_peer_from_mac(sdev
, dev
, ehdr
->h_dest
);
621 SLSI_NET_DBG1(dev
, SLSI_TX
, "Discard: Peer %pM NOT found\n", ehdr
->h_dest
);
622 slsi_spinlock_unlock(&ndev_vif
->peer_lock
);
623 return SLSI_NETIF_Q_DISCARD
;
626 if (peer
->qos_enabled
) {
627 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
628 if (peer
->qos_map_set
) { /*802.11 QoS for interworking*/
629 skb
->priority
= cfg80211_classify8021d(skb
, &peer
->qos_map
);
633 #ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
634 if ((proto
== ETH_P_IP
&& slsi_is_dns_packet(skb
->data
)) ||
635 (proto
== ETH_P_IP
&& slsi_is_mdns_packet(skb
->data
)) ||
636 (proto
== ETH_P_IP
&& slsi_is_tcp_sync_packet(dev
, skb
))) {
637 skb
->priority
= FAPI_PRIORITY_QOS_UP7
;
641 #ifdef CONFIG_SCSC_USE_WMM_TOS
642 skb
->priority
= slsi_get_priority_from_tos(skb
->data
+ ETH_HLEN
, proto
);
644 skb
->priority
= slsi_get_priority_from_tos_dscp(skb
->data
+ ETH_HLEN
, proto
);
649 skb
->priority
= FAPI_PRIORITY_QOS_UP0
;
652 /* Downgrade the priority if acm bit is set and tspec is not established */
653 slsi_net_downgrade_pri(dev
, peer
, skb
);
655 netif_q
= slsi_netif_get_peer_queue(peer
->queueset
, slsi_frame_priority_to_ac_queue(skb
->priority
));
656 SLSI_NET_DBG3(dev
, SLSI_TX
, "prio:%d queue:%u\n", skb
->priority
, netif_q
);
657 slsi_spinlock_unlock(&ndev_vif
->peer_lock
);
661 void slsi_tdls_move_packets(struct slsi_dev
*sdev
, struct net_device
*dev
,
662 struct slsi_peer
*sta_peer
, struct slsi_peer
*tdls_peer
, bool connection
)
664 struct netdev_vif
*netdev_vif
= netdev_priv(dev
);
665 struct sk_buff
*skb
= NULL
;
666 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
667 struct sk_buff
*skb_to_free
= NULL
;
678 struct slsi_tcp_ack_s
*tcp_ack
;
680 /* Get the netdev queue number from queueset */
681 staq
= slsi_netif_get_peer_queue(sta_peer
->queueset
, 0);
682 tdlsq
= slsi_netif_get_peer_queue(tdls_peer
->queueset
, 0);
684 SLSI_NET_DBG1(dev
, SLSI_TDLS
, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq: %d, tdls_netq: %d\n",
685 connection
, sta_peer
->queueset
, tdls_peer
->queueset
, staq
, tdlsq
);
687 /* Pause the TDLS queues and STA netdev queues */
688 slsi_tx_pause_queues(sdev
);
690 /* walk through frames in TCP Ack suppression queue and change mapping to TDLS queue */
691 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
692 tcp_ack
= &netdev_vif
->ack_suppression
[index
];
693 if (!tcp_ack
&& !tcp_ack
->state
)
695 slsi_spinlock_lock(&tcp_ack
->lock
);
696 skb_queue_walk(&tcp_ack
->list
, skb
) {
697 SLSI_NET_DBG2(dev
, SLSI_TDLS
, "frame in TCP Ack list (peer:%pM)\n", eth_hdr(skb
)->h_dest
);
698 /* is it destined to TDLS peer? */
699 if (compare_ether_addr(tdls_peer
->address
, eth_hdr(skb
)->h_dest
) == 0) {
701 /* TDLS setup: change the queue mapping to TDLS queue */
702 skb
->queue_mapping
+= (tdls_peer
->queueset
* SLSI_NETIF_Q_PER_PEER
);
704 /* TDLS teardown: change the queue to STA queue */
705 skb
->queue_mapping
-= (tdls_peer
->queueset
* SLSI_NETIF_Q_PER_PEER
);
709 slsi_spinlock_unlock(&tcp_ack
->lock
);
713 * For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
714 * For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
717 tdls_peer
->valid
= true;
719 tdls_peer
->valid
= false;
721 /* Move packets from netdev queues */
722 for (i
= 0; i
< SLSI_NETIF_Q_PER_PEER
; i
++) {
723 SLSI_NET_DBG2(dev
, SLSI_TDLS
, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
724 i
, dev
->_tx
[tdlsq
+ i
].qdisc
->q
.qlen
, dev
->_tx
[staq
+ i
].qdisc
->q
.qlen
);
727 /* Check if any packet is already avilable in TDLS queue (most likely from last session) */
728 if (dev
->_tx
[tdlsq
+ i
].qdisc
->q
.qlen
)
729 SLSI_NET_ERR(dev
, "tdls_connection: Packet present in queue %d\n", tdlsq
+ i
);
731 qd
= dev
->_tx
[staq
+ i
].qdisc
;
732 /* Get the total number of packets in STAQ */
733 num_pkts
= qd
->q
.qlen
;
735 /* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
736 for (j
= 0; j
< num_pkts
; j
++) {
737 qd
= dev
->_tx
[staq
+ i
].qdisc
;
738 /* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
740 skb
= skb_peek(&qd
->gso_skb
);
745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
746 skb
= __skb_dequeue(&qd
->gso_skb
);
752 skb
= qd
->dequeue(qd
);
756 SLSI_NET_ERR(dev
, "tdls_connection: STA NETQ skb is NULL\n");
760 /* Change the queue mapping for the TDLS packets */
761 netq
= skb
->queue_mapping
;
762 ehdr
= (struct ethhdr
*)skb
->data
;
763 if (compare_ether_addr(tdls_peer
->address
, ehdr
->h_dest
) == 0) {
764 netq
+= (tdls_peer
->queueset
* SLSI_NETIF_Q_PER_PEER
);
765 SLSI_NET_DBG3(dev
, SLSI_TDLS
, "NETQ%d: Queue mapping changed from %d to %d\n",
766 i
, skb
->queue_mapping
, netq
);
767 skb_set_queue_mapping(skb
, netq
);
770 qd
= dev
->_tx
[netq
].qdisc
;
771 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
772 qd
->enqueue(skb
, qd
, &skb_to_free
);
774 /* If the netdev queue is already full then enqueue() will drop the skb */
775 qd
->enqueue(skb
, qd
);
779 num_pkts
= dev
->_tx
[tdlsq
+ i
].qdisc
->q
.qlen
;
780 /* Move the packets from TDLS to STA queue */
781 for (j
= 0; j
< num_pkts
; j
++) {
782 /* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
783 qd
= dev
->_tx
[tdlsq
+ i
].qdisc
;
784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
785 skb
= skb_peek(&qd
->gso_skb
);
790 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
791 skb
= __skb_dequeue(&qd
->gso_skb
);
797 skb
= qd
->dequeue(qd
);
801 SLSI_NET_ERR(dev
, "tdls_teardown: TDLS NETQ skb is NULL\n");
805 /* Update the queue mapping */
806 skb_set_queue_mapping(skb
, staq
+ i
);
808 /* Enqueue the packet in STA queue */
809 qd
= dev
->_tx
[staq
+ i
].qdisc
;
810 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
811 qd
->enqueue(skb
, qd
, &skb_to_free
);
813 /* If the netdev queue is already full then enqueue() will drop the skb */
814 qd
->enqueue(skb
, qd
);
818 SLSI_NET_DBG2(dev
, SLSI_TDLS
, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
819 i
, dev
->_tx
[tdlsq
+ i
].qdisc
->q
.qlen
, dev
->_tx
[staq
+ i
].qdisc
->q
.qlen
);
821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
822 if (unlikely(skb_to_free
))
823 kfree_skb_list(skb_to_free
);
826 /* Teardown - after teardown there should not be any packet in TDLS queues */
828 for (i
= 0; i
< SLSI_NETIF_Q_PER_PEER
; i
++) {
829 if (dev
->_tx
[tdlsq
+ i
].qdisc
->q
.qlen
)
830 SLSI_NET_ERR(dev
, "tdls_teardown: Packet present in NET queue %d\n", tdlsq
+ i
);
833 /* Resume the STA and TDLS netdev queues */
834 slsi_tx_unpause_queues(sdev
);
838 * This is the main TX entry point for the driver.
840 * Ownership of the skb is transferred to another function ONLY IF such
841 * function was able to deal with that skb and ended with a SUCCESS ret code.
842 * Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
844 * In the context of this function:
845 * - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
846 * SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
847 * kernel netstack will receive back NETDEV_TX_OK too.
848 * - ownership is KEPT HERE by this function when lower layers fails somehow
849 * to deal with the transmission of the skb. In this case the skb WOULD HAVE
850 * NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
851 * - intermediate lower layer functions (NOT directly involved in failure or
852 * success) will relay any retcode up to this layer for evaluation.
854 * WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
855 * - ENOSPC: something related to queueing happened...this should be
856 * retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
857 * requeued by the Kernel NetStack itself, using the proper queue.
858 * As a consequence SKB is NOT FREED HERE !.
859 * - ANY OTHER ERR: all other errors are considered at the moment NOT
860 * recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
861 * the proper ERRCODE and stops dealing with the packet considering it
862 * consumed by lower layer. (same behavior as NETDEV_TX_OK)
865 * As detailed in Documentation/networking/drivers.txt the above behavior
866 * of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
867 * discouraged and should be used ONLY in case of a real HARD error(?);
868 * the advised solution is to actively STOP the queues before finishing
869 * the available space and WAKING them up again when more free buffers
870 * would have arrived.
872 static netdev_tx_t
slsi_net_hw_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
874 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
875 struct slsi_dev
*sdev
= ndev_vif
->sdev
;
876 int r
= NETDEV_TX_OK
;
877 struct sk_buff
*original_skb
= NULL
;
878 #ifdef CONFIG_SCSC_WLAN_DEBUG
881 /* Keep the packet length. The packet length will be used to increment
882 * stats for the netdev if the packet was successfully transmitted.
883 * The ownership of the SKB is passed to lower layers, so we should
884 * not refer the SKB after this point
886 unsigned int packet_len
= skb
->len
;
887 enum slsi_traffic_q traffic_q
= slsi_frame_priority_to_ac_queue(skb
->priority
);
889 slsi_wakelock(&sdev
->wlan_wl
);
890 slsi_skb_cb_init(skb
);
892 /* Check for misaligned (oddly aligned) data.
893 * The f/w requires 16 bit aligned.
894 * This is a corner case - for example, the kernel can generate BPDU
895 * that are oddly aligned. Therefore it is acceptable to copy these
896 * frames to a 16 bit alignment.
898 if ((uintptr_t)skb
->data
& 0x1) {
899 struct sk_buff
*skb2
= NULL
;
900 /* Received a socket buffer aligned on an odd address.
901 * Re-align by asking for headroom.
903 skb2
= skb_copy_expand(skb
, SLSI_NETIF_SKB_HEADROOM
, skb_tailroom(skb
), GFP_ATOMIC
);
904 if (skb2
&& (!(((uintptr_t)skb2
->data
) & 0x1))) {
905 /* We should account for this duplication */
908 SLSI_NET_DBG3(dev
, SLSI_TX
, "Oddly aligned skb realigned\n");
910 /* Drop the packet if we can't re-align. */
911 SLSI_NET_WARN(dev
, "Oddly aligned skb failed realignment, dropping\n");
913 SLSI_NET_DBG3(dev
, SLSI_TX
, "skb_copy_expand didn't align for us\n");
914 slsi_kfree_skb(skb2
);
916 SLSI_NET_DBG3(dev
, SLSI_TX
, "skb_copy_expand failed when trying to align\n");
922 slsi_dbg_track_skb(skb
, GFP_ATOMIC
);
924 /* Be defensive about the mac_header - some kernels have a bug where a
925 * frame can be delivered to the driver with mac_header initialised
926 * to ~0U and this causes a crash when the pointer is dereferenced to
927 * access part of the Ethernet header.
929 if (!skb_mac_header_was_set(skb
))
930 skb_reset_mac_header(skb
);
932 SLSI_NET_DBG3(dev
, SLSI_TX
, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb
)->h_proto
));
934 #ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
935 if (ndev_vif
->ifnum
< SLSI_NAN_DATA_IFINDEX_START
) {
937 if (!ndev_vif
->is_available
) {
938 SLSI_NET_WARN(dev
, "vif NOT available\n");
942 #ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
945 if (skb
->queue_mapping
== SLSI_NETIF_Q_DISCARD
) {
946 SLSI_NET_WARN(dev
, "Discard Queue :: Packet Dropped\n");
951 #ifdef CONFIG_SCSC_WLAN_DEBUG
952 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
953 known_users
= refcount_read(&skb
->users
);
955 known_users
= atomic_read(&skb
->users
);
960 skb
= slsi_netif_tcp_ack_suppression_pkt(dev
, skb
);
962 slsi_wakeunlock(&sdev
->wlan_wl
);
964 slsi_kfree_skb(original_skb
);
969 /* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
970 r
= slsi_tx_data(sdev
, dev
, skb
);
974 * A copy has been passed down and successfully transmitted
975 * and freed....here we free the original coming from the
976 * upper network layers....if a copy was passed down.
979 slsi_kfree_skb(original_skb
);
980 /* skb freed by lower layers on success...enjoy */
982 ndev_vif
->tx_packets
[traffic_q
]++;
983 ndev_vif
->stats
.tx_packets
++;
984 ndev_vif
->stats
.tx_bytes
+= packet_len
;
989 * - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
990 * NOT discarded by lower layers and NETDEV_TX_BUSY should
991 * be returned to upper layers: this will cause the skb
992 * (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
994 * NOTE THAT it's the original skb that will be retried
996 * THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
999 * - with any other -ERR instead return the error: this
1000 * anyway let the kernel think that the SKB has
1001 * been consumed, and we drop the frame and free it.
1003 * - a WARN_ON() takes care to ensure the SKB has NOT been
1004 * freed by someone despite this was NOT supposed to happen,
1005 * just before the actual freeing.
1009 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
1010 ndev_vif
->stats
.tx_fifo_errors
++;
1011 /* Free the local copy if any ... */
1013 slsi_kfree_skb(skb
);
1016 #ifdef CONFIG_SCSC_WLAN_DEBUG
1017 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
1018 WARN_ON(known_users
&& refcount_read(&skb
->users
) != known_users
);
1020 WARN_ON(known_users
&& atomic_read(&skb
->users
) != known_users
);
1024 slsi_kfree_skb(original_skb
);
1025 slsi_kfree_skb(skb
);
1026 ndev_vif
->stats
.tx_dropped
++;
1027 /* We return the ORIGINAL Error 'r' anyway
1028 * BUT Kernel treats them as TX complete anyway
1029 * and assumes the SKB has been consumed.
1031 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
1034 /* SKBs are always considered consumed if the driver
1035 * returns NETDEV_TX_OK.
1037 slsi_wakeunlock(&sdev
->wlan_wl
);
1041 static netdev_features_t
slsi_net_fix_features(struct net_device
*dev
, netdev_features_t features
)
1043 SLSI_UNUSED_PARAMETER(dev
);
1045 #ifdef CONFIG_SCSC_WLAN_SG
1046 SLSI_NET_DBG1(dev
, SLSI_RX
, "Scatter-gather and GSO enabled\n");
1047 features
|= NETIF_F_SG
;
1048 features
|= NETIF_F_GSO
;
1051 #ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
1052 SLSI_NET_DBG1(dev
, SLSI_RX
, "NAPI Rx GRO enabled\n");
1053 features
|= NETIF_F_GRO
;
1055 SLSI_NET_DBG1(dev
, SLSI_RX
, "NAPI Rx GRO disabled\n");
1056 features
&= ~NETIF_F_GRO
;
1061 static void slsi_set_multicast_list(struct net_device
*dev
)
1063 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1065 u8 mdns_addr
[ETH_ALEN
] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
1067 #ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1068 u8 mc_addr_prefix
[3] = { 0x01, 0x00, 0x5e };
1070 u8 mdns6_addr
[ETH_ALEN
] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
1071 const u8 solicited_node_addr
[ETH_ALEN
] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
1072 u8 ipv6addr_suffix
[3];
1074 struct netdev_hw_addr
*ha
;
1076 if (ndev_vif
->vif_type
!= FAPI_VIFTYPE_STATION
)
1079 if (!ndev_vif
->is_available
) {
1080 SLSI_NET_DBG1(dev
, SLSI_NETDEV
, "vif NOT available\n");
1084 count
= netdev_mc_count(dev
);
1088 #ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1089 slsi_spinlock_lock(&ndev_vif
->ipv6addr_lock
);
1090 memcpy(ipv6addr_suffix
, &ndev_vif
->ipv6address
.s6_addr
[13], 3);
1091 slsi_spinlock_unlock(&ndev_vif
->ipv6addr_lock
);
1094 netdev_for_each_mc_addr(ha
, dev
) {
1095 #ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1096 if ((!memcmp(ha
->addr
, mdns_addr
, ETH_ALEN
)) || /*mDns is handled separately*/
1097 (memcmp(ha
->addr
, mc_addr_prefix
, 3))) { /*only consider IPv4 multicast addresses*/
1099 if ((!memcmp(ha
->addr
, mdns_addr
, ETH_ALEN
)) ||
1100 (!memcmp(ha
->addr
, mdns6_addr
, ETH_ALEN
)) || /*mDns is handled separately*/
1101 (!memcmp(ha
->addr
, solicited_node_addr
, 3) &&
1102 !memcmp(&ha
->addr
[3], ipv6addr_suffix
, 3))) { /* local multicast addr handled separately*/
1105 SLSI_NET_DBG3(dev
, SLSI_NETDEV
, "Drop MAC %pM\n", ha
->addr
);
1108 if (i
== SLSI_MC_ADDR_ENTRY_MAX
) {
1109 SLSI_NET_WARN(dev
, "MAC list has reached max limit (%d), actual count %d\n", SLSI_MC_ADDR_ENTRY_MAX
, count
);
1113 SLSI_NET_DBG3(dev
, SLSI_NETDEV
, "idx %d MAC %pM\n", i
, ha
->addr
);
1114 SLSI_ETHER_COPY(ndev_vif
->sta
.regd_mc_addr
[i
++], ha
->addr
);
1118 ndev_vif
->sta
.regd_mc_addr_count
= i
;
1121 static int slsi_set_mac_address(struct net_device
*dev
, void *addr
)
1123 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1124 struct slsi_dev
*sdev
= ndev_vif
->sdev
;
1125 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
1127 SLSI_NET_DBG1(dev
, SLSI_NETDEV
, "slsi_set_mac_address %pM\n", sa
->sa_data
);
1128 SLSI_ETHER_COPY(dev
->dev_addr
, sa
->sa_data
);
1130 /* Interface is pulled down before mac address is changed.
1131 * First scan initiated after interface is brought up again, should be treated as initial scan, for faster reconnection.
1133 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif
)) {
1134 sdev
->initial_scan
= true;
1139 static const struct net_device_ops slsi_netdev_ops
= {
1140 .ndo_open
= slsi_net_open
,
1141 .ndo_stop
= slsi_net_stop
,
1142 .ndo_start_xmit
= slsi_net_hw_xmit
,
1143 .ndo_do_ioctl
= slsi_net_ioctl
,
1144 .ndo_get_stats
= slsi_net_get_stats
,
1145 .ndo_select_queue
= slsi_net_select_queue
,
1146 .ndo_fix_features
= slsi_net_fix_features
,
1147 .ndo_set_rx_mode
= slsi_set_multicast_list
,
1148 .ndo_set_mac_address
= slsi_set_mac_address
,
1151 static void slsi_if_setup(struct net_device
*dev
)
1154 dev
->netdev_ops
= &slsi_netdev_ops
;
1155 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
1156 dev
->needs_free_netdev
= true;
1158 dev
->destructor
= free_netdev
;
1162 #ifdef CONFIG_SCSC_WLAN_RX_NAPI
1164 #if defined(CONFIG_SOC_EXYNOS9610) || defined(CONFIG_SOC_EXYNOS9630) || defined(CONFIG_SOC_EXYNOS3830)
1165 #define SCSC_NETIF_RPS_CPUS_MASK "fe"
1167 #define SCSC_NETIF_RPS_CPUS_MASK "0"
1170 static void slsi_netif_rps_map_clear(struct net_device
*dev
)
1172 struct rps_map
*map
;
1174 map
= rcu_dereference_protected(dev
->_rx
->rps_map
, 1);
1176 RCU_INIT_POINTER(dev
->_rx
->rps_map
, NULL
);
1177 kfree_rcu(map
, rcu
);
1178 SLSI_NET_INFO(dev
, "clear rps_cpus map\n");
1182 static int slsi_netif_rps_map_set(struct net_device
*dev
, char *buf
, size_t len
)
1184 struct rps_map
*old_map
, *map
;
1187 static DEFINE_SPINLOCK(rps_map_lock
);
1189 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1192 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1194 free_cpumask_var(mask
);
1195 SLSI_NET_WARN(dev
, "CPU bitmap parse failed\n");
1199 map
= kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
), GFP_KERNEL
);
1201 free_cpumask_var(mask
);
1202 SLSI_NET_WARN(dev
, "CPU mask alloc failed\n");
1207 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
1208 map
->cpus
[i
++] = cpu
;
1217 spin_lock(&rps_map_lock
);
1218 old_map
= rcu_dereference_protected(dev
->_rx
->rps_map
, lockdep_is_held(&rps_map_lock
));
1219 rcu_assign_pointer(dev
->_rx
->rps_map
, map
);
1220 spin_unlock(&rps_map_lock
);
1223 static_key_slow_inc(&rps_needed
);
1225 static_key_slow_dec(&rps_needed
);
1228 kfree_rcu(old_map
, rcu
);
1230 free_cpumask_var(mask
);
1231 SLSI_NET_INFO(dev
, "rps_cpus map set(%s)\n", buf
);
1236 int slsi_netif_add_locked(struct slsi_dev
*sdev
, const char *name
, int ifnum
)
1238 struct net_device
*dev
= NULL
;
1239 struct netdev_vif
*ndev_vif
;
1240 struct wireless_dev
*wdev
;
1241 int alloc_size
, txq_count
= 0, ret
;
1243 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev
->netdev_add_remove_mutex
));
1245 if (WARN_ON(!sdev
|| ifnum
> CONFIG_SCSC_WLAN_MAX_INTERFACES
|| sdev
->netdev
[ifnum
]))
1248 alloc_size
= sizeof(struct netdev_vif
);
1250 txq_count
= SLSI_NETIF_Q_PEER_START
+ (SLSI_NETIF_Q_PER_PEER
* (SLSI_ADHOC_PEER_CONNECTIONS_MAX
));
1252 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
1253 dev
= alloc_netdev_mqs(alloc_size
, name
, NET_NAME_PREDICTABLE
, slsi_if_setup
, txq_count
, 1);
1255 dev
= alloc_netdev_mqs(alloc_size
, name
, slsi_if_setup
, txq_count
, 1);
1258 SLSI_ERR(sdev
, "Failed to allocate private data for netdev\n");
1262 /* Reserve space in skb for later use */
1263 dev
->needed_headroom
= SLSI_NETIF_SKB_HEADROOM
;
1264 dev
->needed_tailroom
= SLSI_NETIF_SKB_TAILROOM
;
1266 ret
= dev_alloc_name(dev
, dev
->name
);
1268 goto exit_with_error
;
1270 ndev_vif
= netdev_priv(dev
);
1271 memset(ndev_vif
, 0x00, sizeof(*ndev_vif
));
1272 SLSI_MUTEX_INIT(ndev_vif
->vif_mutex
);
1273 SLSI_MUTEX_INIT(ndev_vif
->scan_mutex
);
1274 SLSI_MUTEX_INIT(ndev_vif
->scan_result_mutex
);
1275 skb_queue_head_init(&ndev_vif
->ba_complete
);
1276 slsi_sig_send_init(&ndev_vif
->sig_wait
);
1277 ndev_vif
->sdev
= sdev
;
1278 ndev_vif
->ifnum
= ifnum
;
1279 ndev_vif
->vif_type
= SLSI_VIFTYPE_UNSPECIFIED
;
1280 #ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1281 slsi_spinlock_create(&ndev_vif
->ipv6addr_lock
);
1283 slsi_spinlock_create(&ndev_vif
->peer_lock
);
1284 atomic_set(&ndev_vif
->ba_flush
, 0);
1286 /* Reserve memory for the peer database - Not required for p2p0/nan interface */
1287 if (!(SLSI_IS_VIF_INDEX_P2P(ndev_vif
) || SLSI_IS_VIF_INDEX_NAN(ndev_vif
))) {
1290 for (queueset
= 0; queueset
< SLSI_ADHOC_PEER_CONNECTIONS_MAX
; queueset
++) {
1291 ndev_vif
->peer_sta_record
[queueset
] = kzalloc(sizeof(*ndev_vif
->peer_sta_record
[queueset
]), GFP_KERNEL
);
1293 if (!ndev_vif
->peer_sta_record
[queueset
]) {
1296 SLSI_NET_ERR(dev
, "Could not allocate memory for peer entry (queueset:%d)\n", queueset
);
1298 /* Free previously allocated peer database memory till current queueset */
1299 for (j
= 0; j
< queueset
; j
++) {
1300 kfree(ndev_vif
->peer_sta_record
[j
]);
1301 ndev_vif
->peer_sta_record
[j
] = NULL
;
1305 goto exit_with_error
;
1310 /* The default power mode in host*/
1311 if (slsi_is_rf_test_mode_enabled()) {
1312 SLSI_NET_ERR(dev
, "*#rf# rf test mode set is enabled.\n");
1313 ndev_vif
->set_power_mode
= FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE
;
1315 ndev_vif
->set_power_mode
= FAPI_POWERMANAGEMENTMODE_POWER_SAVE
;
1318 INIT_LIST_HEAD(&ndev_vif
->sta
.network_map
);
1319 SLSI_DBG1(sdev
, SLSI_NETDEV
, "ifnum=%d\n", ndev_vif
->ifnum
);
1321 /* For HS2 interface */
1322 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif
))
1323 sdev
->wlan_unsync_vif_state
= WLAN_UNSYNC_NO_VIF
;
1325 /* For p2p0 interface */
1326 else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif
)) {
1327 ret
= slsi_p2p_init(sdev
, ndev_vif
);
1329 goto exit_with_error
;
1332 INIT_DELAYED_WORK(&ndev_vif
->scan_timeout_work
, slsi_scan_ind_timeout_handle
);
1334 ret
= slsi_skb_work_init(sdev
, dev
, &ndev_vif
->rx_data
, "slsi_wlan_rx_data", slsi_rx_netdev_data_work
);
1336 goto exit_with_error
;
1338 ret
= slsi_skb_work_init(sdev
, dev
, &ndev_vif
->rx_mlme
, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work
);
1340 slsi_skb_work_deinit(&ndev_vif
->rx_data
);
1341 goto exit_with_error
;
1344 wdev
= &ndev_vif
->wdev
;
1346 dev
->ieee80211_ptr
= wdev
;
1347 wdev
->wiphy
= sdev
->wiphy
;
1349 wdev
->iftype
= NL80211_IFTYPE_STATION
;
1350 SET_NETDEV_DEV(dev
, sdev
->dev
);
1352 /* We are not ready to send data yet. */
1353 netif_carrier_off(dev
);
1355 #ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1356 if (strcmp(name
, CONFIG_SCSC_AP_INTERFACE_NAME
) == 0)
1357 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[SLSI_NET_INDEX_P2P
]);
1359 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[ifnum
]);
1361 SLSI_ETHER_COPY(dev
->dev_addr
, sdev
->netdev_addresses
[ifnum
]);
1363 SLSI_DBG1(sdev
, SLSI_NETDEV
, "Add:%pM\n", dev
->dev_addr
);
1364 rcu_assign_pointer(sdev
->netdev
[ifnum
], dev
);
1365 ndev_vif
->delete_probe_req_ies
= false;
1366 ndev_vif
->probe_req_ies
= NULL
;
1367 ndev_vif
->probe_req_ie_len
= 0;
1368 ndev_vif
->drv_in_p2p_procedure
= false;
1370 #ifdef CONFIG_SCSC_WLAN_RX_NAPI
1371 slsi_netif_rps_map_set(dev
, SCSC_NETIF_RPS_CPUS_MASK
, strlen(SCSC_NETIF_RPS_CPUS_MASK
));
1376 mutex_lock(&sdev
->netdev_remove_mutex
);
1378 mutex_unlock(&sdev
->netdev_remove_mutex
);
1382 int slsi_netif_dynamic_iface_add(struct slsi_dev
*sdev
, const char *name
)
1384 int index
= -EINVAL
;
1387 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1389 #if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1390 if (sdev
->netdev
[SLSI_NET_INDEX_P2PX_SWLAN
] == sdev
->netdev_ap
) {
1391 rcu_assign_pointer(sdev
->netdev
[SLSI_NET_INDEX_P2PX_SWLAN
], NULL
);
1392 err
= slsi_netif_add_locked(sdev
, name
, SLSI_NET_INDEX_P2PX_SWLAN
);
1393 index
= err
? err
: SLSI_NET_INDEX_P2PX_SWLAN
;
1396 err
= slsi_netif_add_locked(sdev
, name
, SLSI_NET_INDEX_P2PX_SWLAN
);
1397 index
= err
? err
: SLSI_NET_INDEX_P2PX_SWLAN
;
1400 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1404 int slsi_netif_init(struct slsi_dev
*sdev
)
1408 SLSI_DBG3(sdev
, SLSI_NETDEV
, "\n");
1410 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1412 /* Initialize all other netdev interfaces to NULL */
1413 for (i
= 1; i
<= CONFIG_SCSC_WLAN_MAX_INTERFACES
; i
++)
1414 RCU_INIT_POINTER(sdev
->netdev
[i
], NULL
);
1416 if (slsi_netif_add_locked(sdev
, "wlan%d", SLSI_NET_INDEX_WLAN
) != 0) {
1417 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1421 if (slsi_netif_add_locked(sdev
, "p2p%d", SLSI_NET_INDEX_P2P
) != 0) {
1423 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_WLAN
]);
1425 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1428 #ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1429 #if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1430 if (slsi_netif_add_locked(sdev
, CONFIG_SCSC_AP_INTERFACE_NAME
, SLSI_NET_INDEX_P2PX_SWLAN
) != 0) {
1432 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_WLAN
]);
1433 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_P2P
]);
1435 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1440 #if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
1441 if (slsi_netif_add_locked(sdev
, "nan%d", SLSI_NET_INDEX_NAN
) != 0) {
1443 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_WLAN
]);
1444 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_P2P
]);
1445 #ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1446 #if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1447 slsi_netif_remove_locked(sdev
, sdev
->netdev
[SLSI_NET_INDEX_P2PX_SWLAN
]);
1451 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1455 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1459 int slsi_netif_register_locked(struct slsi_dev
*sdev
, struct net_device
*dev
)
1461 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1464 WARN_ON(!rtnl_is_locked());
1465 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev
->netdev_add_remove_mutex
));
1466 if (atomic_read(&ndev_vif
->is_registered
)) {
1467 SLSI_NET_ERR(dev
, "Register:%pM Failed: Already registered\n", dev
->dev_addr
);
1471 err
= register_netdevice(dev
);
1473 SLSI_NET_ERR(dev
, "Register:%pM Failed\n", dev
->dev_addr
);
1475 atomic_set(&ndev_vif
->is_registered
, 1);
1479 int slsi_netif_register_rtlnl_locked(struct slsi_dev
*sdev
, struct net_device
*dev
)
1483 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1484 err
= slsi_netif_register_locked(sdev
, dev
);
1485 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1489 int slsi_netif_register(struct slsi_dev
*sdev
, struct net_device
*dev
)
1494 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1495 err
= slsi_netif_register_locked(sdev
, dev
);
1496 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1501 void slsi_netif_remove_locked(struct slsi_dev
*sdev
, struct net_device
*dev
)
1504 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1506 SLSI_NET_DBG1(dev
, SLSI_NETDEV
, "Unregister:%pM\n", dev
->dev_addr
);
1508 WARN_ON(!rtnl_is_locked());
1509 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev
->netdev_add_remove_mutex
));
1511 if (atomic_read(&ndev_vif
->is_registered
)) {
1512 netif_tx_disable(dev
);
1513 netif_carrier_off(dev
);
1515 slsi_stop_net_dev(sdev
, dev
);
1518 rcu_assign_pointer(sdev
->netdev
[ndev_vif
->ifnum
], NULL
);
1521 /* Free memory of the peer database - Not required for p2p0 interface */
1522 if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif
)) {
1525 for (queueset
= 0; queueset
< SLSI_ADHOC_PEER_CONNECTIONS_MAX
; queueset
++) {
1526 kfree(ndev_vif
->peer_sta_record
[queueset
]);
1527 ndev_vif
->peer_sta_record
[queueset
] = NULL
;
1531 if (SLSI_IS_VIF_INDEX_P2P(ndev_vif
)) {
1532 slsi_p2p_deinit(sdev
, ndev_vif
);
1533 } else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif
)) {
1534 sdev
->wlan_unsync_vif_state
= WLAN_UNSYNC_NO_VIF
;
1535 ndev_vif
->vif_type
= SLSI_VIFTYPE_UNSPECIFIED
;
1538 cancel_delayed_work(&ndev_vif
->scan_timeout_work
);
1539 ndev_vif
->scan
[SLSI_SCAN_HW_ID
].requeue_timeout_work
= false;
1541 slsi_skb_work_deinit(&ndev_vif
->rx_data
);
1542 slsi_skb_work_deinit(&ndev_vif
->rx_mlme
);
1544 for (i
= 0; i
< SLSI_SCAN_MAX
; i
++)
1545 slsi_purge_scan_results(ndev_vif
, i
);
1547 slsi_kfree_skb(ndev_vif
->sta
.mlme_scan_ind_skb
);
1548 slsi_roam_channel_cache_prune(dev
, 0);
1549 kfree(ndev_vif
->probe_req_ies
);
1551 #ifdef CONFIG_SCSC_WLAN_RX_NAPI
1552 slsi_netif_rps_map_clear(dev
);
1554 if (atomic_read(&ndev_vif
->is_registered
)) {
1555 atomic_set(&ndev_vif
->is_registered
, 0);
1556 unregister_netdevice(dev
);
1558 mutex_lock(&sdev
->netdev_remove_mutex
);
1560 mutex_unlock(&sdev
->netdev_remove_mutex
);
1564 void slsi_netif_remove_rtlnl_locked(struct slsi_dev
*sdev
, struct net_device
*dev
)
1566 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1567 slsi_netif_remove_locked(sdev
, dev
);
1568 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1571 void slsi_netif_remove(struct slsi_dev
*sdev
, struct net_device
*dev
)
1574 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1575 slsi_netif_remove_locked(sdev
, dev
);
1576 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1580 void slsi_netif_remove_all(struct slsi_dev
*sdev
)
1584 SLSI_DBG1(sdev
, SLSI_NETDEV
, "\n");
1586 SLSI_MUTEX_LOCK(sdev
->netdev_add_remove_mutex
);
1587 for (i
= 1; i
<= CONFIG_SCSC_WLAN_MAX_INTERFACES
; i
++)
1588 if (sdev
->netdev
[i
])
1589 slsi_netif_remove_locked(sdev
, sdev
->netdev
[i
]);
1590 rcu_assign_pointer(sdev
->netdev_ap
, NULL
);
1591 SLSI_MUTEX_UNLOCK(sdev
->netdev_add_remove_mutex
);
1595 void slsi_netif_deinit(struct slsi_dev
*sdev
)
1597 SLSI_DBG1(sdev
, SLSI_NETDEV
, "\n");
1598 slsi_netif_remove_all(sdev
);
1602 static int slsi_netif_tcp_ack_suppression_start(struct net_device
*dev
)
1605 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1606 struct slsi_tcp_ack_s
*tcp_ack
;
1608 ndev_vif
->last_tcp_ack
= NULL
;
1609 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
1610 tcp_ack
= &ndev_vif
->ack_suppression
[index
];
1615 tcp_ack
->ack_seq
= 0;
1619 skb_queue_head_init(&tcp_ack
->list
);
1620 #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1621 timer_setup(&tcp_ack
->timer
, slsi_netif_tcp_ack_suppression_timeout
, 0);
1623 tcp_ack
->timer
.function
= slsi_netif_tcp_ack_suppression_timeout
;
1624 tcp_ack
->timer
.data
= (unsigned long)tcp_ack
;
1625 init_timer(&tcp_ack
->timer
);
1628 slsi_spinlock_create(&tcp_ack
->lock
);
1631 memset(&ndev_vif
->tcp_ack_stats
, 0, sizeof(struct slsi_tcp_ack_stats
));
1635 static int slsi_netif_tcp_ack_suppression_stop(struct net_device
*dev
)
1638 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1639 struct slsi_tcp_ack_s
*tcp_ack
;
1641 SLSI_MUTEX_LOCK(ndev_vif
->vif_mutex
);
1642 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
1643 tcp_ack
= &ndev_vif
->ack_suppression
[index
];
1644 del_timer_sync(&tcp_ack
->timer
);
1645 slsi_spinlock_lock(&tcp_ack
->lock
);
1647 skb_queue_purge(&tcp_ack
->list
);
1648 slsi_spinlock_unlock(&tcp_ack
->lock
);
1650 ndev_vif
->last_tcp_ack
= NULL
;
1651 SLSI_MUTEX_UNLOCK(ndev_vif
->vif_mutex
);
1655 #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1656 static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list
*t
)
1658 static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data
)
1661 #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1662 struct slsi_tcp_ack_s
*tcp_ack
= from_timer(tcp_ack
, t
, timer
);
1664 struct slsi_tcp_ack_s
*tcp_ack
= (struct slsi_tcp_ack_s
*)data
;
1666 struct sk_buff
*skb
;
1667 struct netdev_vif
*ndev_vif
;
1668 struct slsi_dev
*sdev
;
1674 if (!tcp_ack
->state
)
1677 slsi_spinlock_lock(&tcp_ack
->lock
);
1678 while ((skb
= skb_dequeue(&tcp_ack
->list
)) != 0) {
1683 slsi_spinlock_unlock(&tcp_ack
->lock
);
1686 ndev_vif
= netdev_priv(skb
->dev
);
1687 sdev
= ndev_vif
->sdev
;
1688 ndev_vif
->tcp_ack_stats
.tack_timeout
++;
1690 r
= slsi_tx_data(sdev
, skb
->dev
, skb
);
1692 ndev_vif
->tcp_ack_stats
.tack_sent
++;
1693 tcp_ack
->last_sent
= ktime_get();
1694 } else if (r
== -ENOSPC
) {
1695 ndev_vif
->tcp_ack_stats
.tack_dropped
++;
1696 slsi_kfree_skb(skb
);
1698 ndev_vif
->tcp_ack_stats
.tack_dropped
++;
1701 slsi_spinlock_unlock(&tcp_ack
->lock
);
1704 static int slsi_netif_tcp_ack_suppression_option(struct sk_buff
*skb
, u32 option
)
1706 unsigned char *options
;
1707 u32 optlen
= 0, len
= 0;
1709 if (tcp_hdr(skb
)->doff
> 5)
1710 optlen
= (tcp_hdr(skb
)->doff
- 5) * 4;
1712 options
= ((u8
*)tcp_hdr(skb
)) + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET
;
1714 while (optlen
> 0) {
1715 switch (options
[0]) {
1716 case TCP_ACK_SUPPRESSION_OPTION_EOL
:
1718 case TCP_ACK_SUPPRESSION_OPTION_NOP
:
1721 case TCP_ACK_SUPPRESSION_OPTION_MSS
:
1722 if (option
== TCP_ACK_SUPPRESSION_OPTION_MSS
)
1723 return ((options
[2] << 8) | options
[3]);
1726 case TCP_ACK_SUPPRESSION_OPTION_WINDOW
:
1727 if (option
== TCP_ACK_SUPPRESSION_OPTION_WINDOW
)
1731 case TCP_ACK_SUPPRESSION_OPTION_SACK
:
1732 if (option
== TCP_ACK_SUPPRESSION_OPTION_SACK
)
1740 /* if length field in TCP options is 0, or greater than
1741 * total options length, then options are incorrect; return here
1743 if ((len
== 0) || (len
> optlen
)) {
1744 SLSI_DBG_HEX_NODEV(SLSI_TX
, skb
->data
, skb
->len
< 128 ? skb
->len
: 128, "SKB:\n");
1753 static void slsi_netif_tcp_ack_suppression_syn(struct net_device
*dev
, struct sk_buff
*skb
)
1755 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1756 struct slsi_tcp_ack_s
*tcp_ack
;
1759 SLSI_NET_DBG2(dev
, SLSI_TX
, "\n");
1760 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
1761 tcp_ack
= &ndev_vif
->ack_suppression
[index
];
1762 slsi_spinlock_lock(&tcp_ack
->lock
);
1764 if (!tcp_ack
->state
) {
1765 slsi_spinlock_unlock(&tcp_ack
->lock
);
1768 /* Recover old/hung/unused record. */
1769 if (tcp_ack
->daddr
) {
1770 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack
->last_sent
)) >= TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT
* 1000) {
1771 SLSI_NET_DBG2(dev
, SLSI_TX
, "delete at %d (%pI4.%d > %pI4.%d)\n", index
, &tcp_ack
->saddr
, ntohs(tcp_ack
->sport
), &tcp_ack
->daddr
, ntohs(tcp_ack
->dport
));
1772 skb_queue_purge(&tcp_ack
->list
);
1778 tcp_ack
->ack_seq
= 0;
1779 del_timer(&tcp_ack
->timer
);
1783 if (tcp_ack
->daddr
== 0) {
1784 SLSI_NET_DBG2(dev
, SLSI_TX
, "add at %d (%pI4.%d > %pI4.%d)\n", index
, &ip_hdr(skb
)->saddr
, ntohs(tcp_hdr(skb
)->source
), &ip_hdr(skb
)->daddr
, ntohs(tcp_hdr(skb
)->dest
));
1785 tcp_ack
->daddr
= ip_hdr(skb
)->daddr
;
1786 tcp_ack
->saddr
= ip_hdr(skb
)->saddr
;
1787 tcp_ack
->dport
= tcp_hdr(skb
)->dest
;
1788 tcp_ack
->sport
= tcp_hdr(skb
)->source
;
1790 tcp_ack
->ack_seq
= 0;
1791 tcp_ack
->slow_start_count
= 0;
1792 tcp_ack
->tcp_slow_start
= true;
1793 if (tcp_ack_suppression_monitor
) {
1797 tcp_ack
->max
= tcp_ack_suppression_max
;
1798 tcp_ack
->age
= tcp_ack_suppression_timeout
;
1800 tcp_ack
->last_sent
= ktime_get();
1802 if (tcp_ack_suppression_monitor
) {
1803 tcp_ack
->last_sample_time
= ktime_get();
1804 tcp_ack
->last_ack_seq
= 0;
1805 tcp_ack
->last_tcp_rate
= 0;
1806 tcp_ack
->num_bytes
= 0;
1807 tcp_ack
->hysteresis
= 0;
1809 #ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1810 tcp_ack
->stream_id
= index
;
1812 /* read and validate the window scaling multiplier */
1813 tcp_ack
->window_multiplier
= slsi_netif_tcp_ack_suppression_option(skb
, TCP_ACK_SUPPRESSION_OPTION_WINDOW
);
1814 if (tcp_ack
->window_multiplier
> 14)
1815 tcp_ack
->window_multiplier
= 0;
1816 tcp_ack
->mss
= slsi_netif_tcp_ack_suppression_option(skb
, TCP_ACK_SUPPRESSION_OPTION_MSS
);
1817 SLSI_NET_DBG2(dev
, SLSI_TX
, "options: mss:%u, window:%u\n", tcp_ack
->mss
, tcp_ack
->window_multiplier
);
1818 SCSC_HIP4_SAMPLER_TCP_SYN(ndev_vif
->sdev
->minor_prof
, index
, tcp_ack
->mss
);
1819 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif
->sdev
->minor_prof
, index
, be32_to_cpu(tcp_hdr(skb
)->seq
));
1820 slsi_spinlock_unlock(&tcp_ack
->lock
);
1823 slsi_spinlock_unlock(&tcp_ack
->lock
);
1827 static void slsi_netif_tcp_ack_suppression_fin(struct net_device
*dev
, struct sk_buff
*skb
)
1829 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1830 struct slsi_tcp_ack_s
*tcp_ack
;
1833 SLSI_NET_DBG2(dev
, SLSI_TX
, "\n");
1834 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
1835 tcp_ack
= &ndev_vif
->ack_suppression
[index
];
1836 slsi_spinlock_lock(&tcp_ack
->lock
);
1838 if ((tcp_ack
->dport
== tcp_hdr(skb
)->dest
) &&
1839 (tcp_ack
->daddr
== ip_hdr(skb
)->daddr
)) {
1840 SLSI_NET_DBG2(dev
, SLSI_TX
, "delete at %d (%pI4.%d > %pI4.%d)\n", index
, &tcp_ack
->saddr
, ntohs(tcp_ack
->sport
), &tcp_ack
->daddr
, ntohs(tcp_ack
->dport
));
1841 skb_queue_purge(&tcp_ack
->list
);
1847 tcp_ack
->ack_seq
= 0;
1849 if (tcp_ack_suppression_monitor
) {
1850 tcp_ack
->last_ack_seq
= 0;
1851 tcp_ack
->last_tcp_rate
= 0;
1852 tcp_ack
->num_bytes
= 0;
1853 tcp_ack
->hysteresis
= 0;
1856 del_timer(&tcp_ack
->timer
);
1857 #ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1858 tcp_ack
->stream_id
= 0;
1860 SCSC_HIP4_SAMPLER_TCP_FIN(ndev_vif
->sdev
->minor_prof
, index
);
1861 slsi_spinlock_unlock(&tcp_ack
->lock
);
1864 slsi_spinlock_unlock(&tcp_ack
->lock
);
1868 static struct sk_buff
*slsi_netif_tcp_ack_suppression_pkt(struct net_device
*dev
, struct sk_buff
*skb
)
1870 struct netdev_vif
*ndev_vif
= netdev_priv(dev
);
1872 struct slsi_tcp_ack_s
*tcp_ack
;
1873 int forward_now
= 0, flush
= 0;
1874 struct sk_buff
*cskb
= 0;
1875 u32 tcp_recv_window_size
= 0;
1877 if (tcp_ack_suppression_disable
)
1880 if (tcp_ack_suppression_disable_2g
&& !SLSI_IS_VIF_CHANNEL_5G(ndev_vif
))
1883 /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
1884 * the IP header and TCP header are not set; so return the SKB
1886 if ((ndev_vif
->vif_type
== FAPI_VIFTYPE_AP
) && (compare_ether_addr(eth_hdr(skb
)->h_source
, dev
->dev_addr
) != 0))
1889 /* Return SKB that doesn't match. */
1890 if (be16_to_cpu(eth_hdr(skb
)->h_proto
) != ETH_P_IP
)
1892 if (ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
1894 if (!skb_transport_header_was_set(skb
))
1896 if (tcp_hdr(skb
)->syn
) {
1897 slsi_netif_tcp_ack_suppression_syn(dev
, skb
);
1900 if (tcp_hdr(skb
)->fin
) {
1901 slsi_netif_tcp_ack_suppression_fin(dev
, skb
);
1904 if (!tcp_hdr(skb
)->ack
)
1906 if (tcp_hdr(skb
)->rst
)
1908 if (tcp_hdr(skb
)->urg
)
1911 ndev_vif
->tcp_ack_stats
.tack_acks
++;
1912 /* If we find a record, leave the spinlock taken until the end of the function. */
1914 if (ndev_vif
->last_tcp_ack
) {
1915 tcp_ack
= ndev_vif
->last_tcp_ack
;
1916 slsi_spinlock_lock(&tcp_ack
->lock
);
1917 if (!tcp_ack
->state
) {
1918 slsi_spinlock_unlock(&tcp_ack
->lock
);
1919 ndev_vif
->tcp_ack_stats
.tack_sent
++;
1920 SLSI_ERR_NODEV("last_tcp_ack record not enabled\n");
1923 if ((tcp_ack
->dport
== tcp_hdr(skb
)->dest
) &&
1924 (tcp_ack
->sport
== tcp_hdr(skb
)->source
) &&
1925 (tcp_ack
->daddr
== ip_hdr(skb
)->daddr
)) {
1927 ndev_vif
->tcp_ack_stats
.tack_lastrecord
++;
1929 slsi_spinlock_unlock(&tcp_ack
->lock
);
1933 /* Search for an existing record on this connection. */
1934 for (index
= 0; index
< TCP_ACK_SUPPRESSION_RECORDS_MAX
; index
++) {
1935 tcp_ack
= &ndev_vif
->ack_suppression
[index
];
1937 slsi_spinlock_lock(&tcp_ack
->lock
);
1939 if (!tcp_ack
->state
) {
1940 slsi_spinlock_unlock(&tcp_ack
->lock
);
1941 ndev_vif
->tcp_ack_stats
.tack_sent
++;
1942 SLSI_ERR_NODEV("tcp_ack record %d not enabled\n", index
);
1945 if ((tcp_ack
->dport
== tcp_hdr(skb
)->dest
) &&
1946 (tcp_ack
->sport
== tcp_hdr(skb
)->source
) &&
1947 (tcp_ack
->daddr
== ip_hdr(skb
)->daddr
)) {
1949 ndev_vif
->tcp_ack_stats
.tack_searchrecord
++;
1952 slsi_spinlock_unlock(&tcp_ack
->lock
);
1955 /* No record found, so We cannot suppress the ack, return. */
1956 ndev_vif
->tcp_ack_stats
.tack_norecord
++;
1957 ndev_vif
->tcp_ack_stats
.tack_sent
++;
1960 ndev_vif
->last_tcp_ack
= tcp_ack
;
1963 /* If it is a DUP Ack, send straight away without flushing the cache. */
1964 if (be32_to_cpu(tcp_hdr(skb
)->ack_seq
) < tcp_ack
->ack_seq
) {
1965 /* check for wrap-around */
1966 if (((s32
)((u32
)be32_to_cpu(tcp_hdr(skb
)->ack_seq
) - (u32
)tcp_ack
->ack_seq
)) < 0) {
1967 ndev_vif
->tcp_ack_stats
.tack_dacks
++;
1968 ndev_vif
->tcp_ack_stats
.tack_sent
++;
1969 slsi_spinlock_unlock(&tcp_ack
->lock
);
1974 /* Has data, forward straight away. */
1975 if (be16_to_cpu(ip_hdr(skb
)->tot_len
) > ((ip_hdr(skb
)->ihl
* 4) + (tcp_hdr(skb
)->doff
* 4))) {
1976 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif
->sdev
->minor_prof
, tcp_ack
->stream_id
, be32_to_cpu(tcp_hdr(skb
)->seq
));
1977 SCSC_HIP4_SAMPLER_TCP_CWND(ndev_vif
->sdev
->minor_prof
, tcp_ack
->stream_id
, (skb
->sk
) ? tcp_sk(skb
->sk
)->snd_cwnd
: 0);
1978 #if KERNEL_VERSION(4, 14, 0) >= LINUX_VERSION_CODE
1979 SCSC_HIP4_SAMPLER_TCP_SEND_BUG(ndev_vif
->sdev
->minor_prof
, tcp_ack
->stream_id
, sysctl_tcp_wmem
[2]);
1981 SCSC_HIP4_SAMPLER_TCP_SEND_BUF(ndev_vif
->sdev
->minor_prof
, tcp_ack
->stream_id
, sysctl_tcp_mem
[2]);
1983 ndev_vif
->tcp_ack_stats
.tack_hasdata
++;
1988 /* PSH flag set, forward straight away. */
1989 if (tcp_hdr(skb
)->psh
) {
1990 ndev_vif
->tcp_ack_stats
.tack_psh
++;
1995 /* The ECE flag is set for Explicit Congestion Notification supporting connections when the ECT flag
1996 * is set in the segment packet. We must forward ECE marked acks immediately for ECN to work.
1998 if (tcp_hdr(skb
)->ece
) {
1999 ndev_vif
->tcp_ack_stats
.tack_ece
++;
2004 if (tcp_ack_suppression_monitor
) {
2005 /* Measure the throughput of TCP stream by monitoring the bytes Acked by each Ack over a
2006 * sampling period. Based on throughput apply different degree of Ack suppression
2008 if (tcp_ack
->last_ack_seq
)
2009 tcp_ack
->num_bytes
+= ((u32
)be32_to_cpu(tcp_hdr(skb
)->ack_seq
) - tcp_ack
->last_ack_seq
);
2011 tcp_ack
->last_ack_seq
= be32_to_cpu(tcp_hdr(skb
)->ack_seq
);
2012 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack
->last_sample_time
)) > tcp_ack_suppression_monitor_interval
) {
2014 u32 tcp_rate
= ((tcp_ack
->num_bytes
* 8) / (tcp_ack_suppression_monitor_interval
* 1000));
2016 SLSI_NET_DBG2(dev
, SLSI_TX
, "hysteresis:%u total_bytes:%llu rate:%u Mbps\n",
2017 tcp_ack
->hysteresis
, tcp_ack
->num_bytes
, tcp_rate
);
2019 /* hysterisis - change only if the variation from last value is more than threshold */
2020 if ((abs(tcp_rate
- tcp_ack
->last_tcp_rate
)) > tcp_ack
->hysteresis
) {
2021 if (tcp_rate
>= tcp_ack_suppression_rate_very_high
) {
2022 tcp_ack
->max
= tcp_ack_suppression_rate_very_high_acks
;
2023 tcp_ack
->age
= tcp_ack_suppression_rate_very_high_timeout
;
2024 } else if (tcp_rate
>= tcp_ack_suppression_rate_high
) {
2025 tcp_ack
->max
= tcp_ack_suppression_rate_high_acks
;
2026 tcp_ack
->age
= tcp_ack_suppression_rate_high_timeout
;
2027 } else if (tcp_rate
>= tcp_ack_suppression_rate_low
) {
2028 tcp_ack
->max
= tcp_ack_suppression_rate_low_acks
;
2029 tcp_ack
->age
= tcp_ack_suppression_rate_low_timeout
;
2035 /* Should not be suppressing Acks more than 20% of receiver window size
2036 * doing so can lead to increased RTT and low transmission rate at the
2039 if (tcp_ack
->window_multiplier
)
2040 tcp_recv_window_size
= be16_to_cpu(tcp_hdr(skb
)->window
) * (2 << tcp_ack
->window_multiplier
);
2042 tcp_recv_window_size
= be16_to_cpu(tcp_hdr(skb
)->window
);
2043 SCSC_HIP4_SAMPLER_TCP_RWND(ndev_vif
->sdev
->minor_prof
, tcp_ack
->stream_id
, tcp_recv_window_size
);
2045 acks_max
= (tcp_recv_window_size
/ 5) / (2 * tcp_ack
->mss
);
2046 if (tcp_ack
->max
> acks_max
)
2047 tcp_ack
->max
= acks_max
;
2049 tcp_ack
->hysteresis
= tcp_rate
/ 5; /* 20% hysteresis */
2050 tcp_ack
->last_tcp_rate
= tcp_rate
;
2051 tcp_ack
->num_bytes
= 0;
2052 tcp_ack
->last_sample_time
= ktime_get();
2056 /* Do not suppress Selective Acks. */
2057 if (slsi_netif_tcp_ack_suppression_option(skb
, TCP_ACK_SUPPRESSION_OPTION_SACK
)) {
2058 ndev_vif
->tcp_ack_stats
.tack_sacks
++;
2060 /* A TCP selective Ack suggests TCP segment loss. The TCP sender
2061 * may reduce congestion window and limit the number of segments
2062 * it sends before waiting for Ack.
2063 * It is ideal to switch off TCP ack suppression for certain time
2064 * (being replicated here by tcp_ack_suppression_slow_start_acks
2065 * count) and send as many Acks as possible to allow the cwnd to
2066 * grow at the TCP sender
2068 tcp_ack
->slow_start_count
= 0;
2069 tcp_ack
->tcp_slow_start
= true;
2074 if (be32_to_cpu(tcp_hdr(skb
)->ack_seq
) == tcp_ack
->ack_seq
) {
2075 ndev_vif
->tcp_ack_stats
.tack_dacks
++;
2080 /* When the TCP connection is made, wait until a number of Acks
2081 * are sent before applying the suppression rules. It is to
2082 * allow the cwnd to grow at a normal rate at the TCP sender
2084 if (tcp_ack
->tcp_slow_start
) {
2085 tcp_ack
->slow_start_count
++;
2086 if (tcp_ack
->slow_start_count
>= tcp_ack_suppression_slow_start_acks
) {
2087 tcp_ack
->slow_start_count
= 0;
2088 tcp_ack
->tcp_slow_start
= false;
2094 /* do not suppress if so decided by the TCP monitor */
2095 if (tcp_ack_suppression_monitor
&& (!tcp_ack
->max
|| !tcp_ack
->age
)) {
2100 /* do not suppress delayed Acks that acknowledges for more than 2 TCP
2101 * maximum size segments
2103 if (((u32
)be32_to_cpu(tcp_hdr(skb
)->ack_seq
)) - (tcp_ack
->ack_seq
) > (2 * tcp_ack
->mss
)) {
2104 ndev_vif
->tcp_ack_stats
.tack_delay_acks
++;
2109 /* Do not suppress unless the receive window is large
2111 * With low receive window size the cwnd can't grow much.
2112 * So suppressing Acks has a negative impact on sender
2113 * rate as it increases the Round trip time measured at
2116 if (!tcp_ack_suppression_monitor
) {
2117 if (tcp_ack
->window_multiplier
)
2118 tcp_recv_window_size
= be16_to_cpu(tcp_hdr(skb
)->window
) * (2 << tcp_ack
->window_multiplier
);
2120 tcp_recv_window_size
= be16_to_cpu(tcp_hdr(skb
)->window
);
2121 if (tcp_recv_window_size
< tcp_ack_suppression_rcv_window
* 1024) {
2122 ndev_vif
->tcp_ack_stats
.tack_low_window
++;
2128 if (!tcp_ack_suppression_monitor
&& ktime_to_ms(ktime_sub(ktime_get(), tcp_ack
->last_sent
)) >= tcp_ack
->age
) {
2129 ndev_vif
->tcp_ack_stats
.tack_ktime
++;
2134 /* Test for a new cache */
2135 if (!skb_queue_len(&tcp_ack
->list
)) {
2136 skb_queue_tail(&tcp_ack
->list
, skb
);
2138 tcp_ack
->ack_seq
= be32_to_cpu(tcp_hdr(skb
)->ack_seq
);
2140 mod_timer(&tcp_ack
->timer
, jiffies
+ msecs_to_jiffies(tcp_ack
->age
));
2141 slsi_spinlock_unlock(&tcp_ack
->lock
);
2145 cskb
= skb_dequeue(&tcp_ack
->list
);
2147 if (tcp_ack_suppression_monitor
&& tcp_ack
->age
)
2148 mod_timer(&tcp_ack
->timer
, jiffies
+ msecs_to_jiffies(tcp_ack
->age
));
2149 ndev_vif
->tcp_ack_stats
.tack_suppressed
++;
2150 slsi_kfree_skb(cskb
);
2152 skb_queue_tail(&tcp_ack
->list
, skb
);
2153 tcp_ack
->ack_seq
= be32_to_cpu(tcp_hdr(skb
)->ack_seq
);
2158 if (tcp_ack
->count
>= tcp_ack
->max
) {
2160 ndev_vif
->tcp_ack_stats
.tack_max
++;
2164 slsi_spinlock_unlock(&tcp_ack
->lock
);
2167 /* Flush the cache. */
2168 cskb
= skb_dequeue(&tcp_ack
->list
);
2172 del_timer(&tcp_ack
->timer
);
2174 tcp_ack
->last_sent
= ktime_get();
2176 slsi_spinlock_unlock(&tcp_ack
->lock
);
2177 ndev_vif
->tcp_ack_stats
.tack_sent
++;