usb:configfs:Set udc_name NULL if attach failed
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / net / wireless / scsc / netif.c
CommitLineData
533a23a1
TK
1/*
2 *
3 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
4 *
5 ****************************************************************************/
6
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/rtnetlink.h>
10#include <net/sch_generic.h>
11#include <linux/if_ether.h>
12#include <scsc/scsc_logring.h>
13
14#include "debug.h"
15#include "netif.h"
16#include "dev.h"
17#include "mgt.h"
18#include "scsc_wifi_fcq.h"
19#include "ioctl.h"
20#include "mib.h"
21#include "hip4_sampler.h"
22
23#define IP4_OFFSET_TO_TOS_FIELD 1
24#define IP6_OFFSET_TO_TC_FIELD_0 0
25#define IP6_OFFSET_TO_TC_FIELD_1 1
26#define FIELD_TO_DSCP 2
27
28/* DSCP */
29/* (RFC5865) */
30#define DSCP_VA 0x2C
31/* (RFC3246) */
32#define DSCP_EF 0x2E
33/* (RFC2597) */
34#define DSCP_AF43 0x26
35#define DSCP_AF42 0x24
36#define DSCP_AF41 0x22
37#define DSCP_AF33 0x1E
38#define DSCP_AF32 0x1C
39#define DSCP_AF31 0x1A
40#define DSCP_AF23 0x16
41#define DSCP_AF22 0x14
42#define DSCP_AF21 0x12
43#define DSCP_AF13 0x0E
44#define DSCP_AF12 0x0C
45#define DSCP_AF11 0x0A
46/* (RFC2474) */
47#define CS7 0x38
48#define CS6 0x30
49#define CS5 0x28
50#define CS4 0x20
51#define CS3 0x18
52#define CS2 0x10
53#define CS0 0x00
54/* (RFC3662) */
55#define CS1 0x08
56
57#ifndef CONFIG_ARM
58static bool tcp_ack_suppression_disable;
59module_param(tcp_ack_suppression_disable, bool, S_IRUGO | S_IWUSR);
60MODULE_PARM_DESC(tcp_ack_suppression_disable, "Disable TCP ack suppression feature");
61
62static bool tcp_ack_suppression_disable_2g;
63module_param(tcp_ack_suppression_disable_2g, bool, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(tcp_ack_suppression_disable_2g, "Disable TCP ack suppression for only 2.4GHz band");
65
66static bool tcp_ack_suppression_monitor = true;
67module_param(tcp_ack_suppression_monitor, bool, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(tcp_ack_suppression_monitor, "TCP ack suppression throughput monitor: Y: enable (default), N: disable");
69
70static uint tcp_ack_suppression_monitor_interval = 500;
71module_param(tcp_ack_suppression_monitor_interval, uint, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(tcp_ack_suppression_monitor_interval, "Sampling interval (in ms) for throughput monitor");
73
74static uint tcp_ack_suppression_timeout = 16;
75module_param(tcp_ack_suppression_timeout, uint, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(tcp_ack_suppression_timeout, "Timeout (in ms) before cached TCP ack is flushed to tx");
77
78static uint tcp_ack_suppression_max = 16;
79module_param(tcp_ack_suppression_max, uint, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(tcp_ack_suppression_max, "Maximum number of TCP acks suppressed before latest flushed to tx");
81
82static uint tcp_ack_suppression_rate_very_high = 100;
83module_param(tcp_ack_suppression_rate_very_high, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high, "Rate (in Mbps) to apply very high degree of suppression");
85
86static uint tcp_ack_suppression_rate_very_high_timeout = 4;
87module_param(tcp_ack_suppression_rate_very_high_timeout, int, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in very high rate");
89
90static uint tcp_ack_suppression_rate_very_high_acks = 20;
91module_param(tcp_ack_suppression_rate_very_high_acks, uint, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_acks, "Maximum number of TCP acks suppressed before latest flushed in very high rate");
93
94static uint tcp_ack_suppression_rate_high = 20;
95module_param(tcp_ack_suppression_rate_high, int, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(tcp_ack_suppression_rate_high, "Rate (in Mbps) to apply high degree of suppression");
97
98static uint tcp_ack_suppression_rate_high_timeout = 4;
99module_param(tcp_ack_suppression_rate_high_timeout, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(tcp_ack_suppression_rate_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in high rate");
101
102static uint tcp_ack_suppression_rate_high_acks = 16;
103module_param(tcp_ack_suppression_rate_high_acks, uint, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(tcp_ack_suppression_rate_high_acks, "Maximum number of TCP acks suppressed before latest flushed in high rate");
105
106static uint tcp_ack_suppression_rate_low = 1;
107module_param(tcp_ack_suppression_rate_low, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(tcp_ack_suppression_rate_low, "Rate (in Mbps) to apply low degree of suppression");
109
110static uint tcp_ack_suppression_rate_low_timeout = 4;
111module_param(tcp_ack_suppression_rate_low_timeout, int, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(tcp_ack_suppression_rate_low_timeout, "Timeout (in ms) before cached TCP ack is flushed in low rate");
113
114static uint tcp_ack_suppression_rate_low_acks = 10;
115module_param(tcp_ack_suppression_rate_low_acks, uint, S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(tcp_ack_suppression_rate_low_acks, "Maximum number of TCP acks suppressed before latest flushed in low rate");
117
118static uint tcp_ack_suppression_slow_start_acks = 512;
119module_param(tcp_ack_suppression_slow_start_acks, uint, S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(tcp_ack_suppression_slow_start_acks, "Maximum number of Acks sent in slow start");
121
122static uint tcp_ack_suppression_rcv_window = 128;
123module_param(tcp_ack_suppression_rcv_window, uint, S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(tcp_ack_suppression_rcv_window, "Receive window size (in unit of Kbytes) that triggers Ack suppression");
125
126#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
127static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t);
128#else
129static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data);
130#endif
131static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev);
132static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev);
133static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb);
134#endif
135
136/* Net Device callback operations */
137static int slsi_net_open(struct net_device *dev)
138{
139 struct netdev_vif *ndev_vif = netdev_priv(dev);
140 struct slsi_dev *sdev = ndev_vif->sdev;
141 int err;
142 unsigned char dev_addr_zero_check[ETH_ALEN];
143
144 if (WARN_ON(ndev_vif->is_available))
145 return -EINVAL;
146
147 if (sdev->mlme_blocked) {
148 SLSI_NET_WARN(dev, "Fail: called when MLME in blocked state\n");
149 return -EIO;
150 }
151
152 slsi_wakelock(&sdev->wlan_wl);
153
154 /* check if request to rf test mode. */
155 slsi_check_rf_test_mode();
156
157 err = slsi_start(sdev);
158 if (WARN_ON(err)) {
159 slsi_wakeunlock(&sdev->wlan_wl);
160 return err;
161 }
162
163 if (!sdev->netdev_up_count) {
164 slsi_get_hw_mac_address(sdev, sdev->hw_addr);
165 /* Assign Addresses */
166 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
167
168 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
169 sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02; /* Set the local bit */
170
171 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN], sdev->hw_addr);
172 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][0] |= 0x02; /* Set the local bit */
173 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][4] ^= 0x80; /* EXOR 5th byte with 0x80 */
174#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
175 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_NAN], sdev->hw_addr);
176 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][0] |= 0x02; /* Set the local bit */
177 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][3] ^= 0x80; /* EXOR 4th byte with 0x80 */
178#endif
179 sdev->initial_scan = true;
180 }
181
182 memset(dev_addr_zero_check, 0, ETH_ALEN);
183 if (!memcmp(dev->dev_addr, dev_addr_zero_check, ETH_ALEN)) {
184#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
185 if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
186 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
187 else
188 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
189#else
190 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
191#endif
192 }
193 SLSI_ETHER_COPY(dev->perm_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
194 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
195#ifdef CONFIG_SCSC_WLAN_DEBUG
196 if (ndev_vif->iftype == NL80211_IFTYPE_MONITOR) {
197 err = slsi_start_monitor_mode(sdev, dev);
198 if (WARN_ON(err)) {
199 slsi_wakeunlock(&sdev->wlan_wl);
200 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
201 return err;
202 }
203 }
204#endif
205 SLSI_NET_INFO(dev, "ifnum:%d r:%d MAC:%pM\n", ndev_vif->ifnum, sdev->recovery_status, dev->dev_addr);
206 ndev_vif->is_available = true;
207 sdev->netdev_up_count++;
208
209#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
210 reinit_completion(&ndev_vif->sig_wait.completion);
211#else
212 INIT_COMPLETION(ndev_vif->sig_wait.completion);
213#endif
214#ifndef CONFIG_ARM
215 slsi_netif_tcp_ack_suppression_start(dev);
216#endif
217 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
218
219 netif_tx_start_all_queues(dev);
220 slsi_wakeunlock(&sdev->wlan_wl);
221
222 /* The default power mode in host*/
223 /* 2511 measn unifiForceActive and 1 means active */
224 if (slsi_is_rf_test_mode_enabled()) {
225 SLSI_NET_INFO(dev, "*#rf# rf test mode set is enabled.\n");
226 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAMING_ENABLED, 0);
227 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, 0);
228 slsi_set_mib_roam(sdev, NULL, 2511, 1);
229 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, 0);
230 }
231
232 return 0;
233}
234
235static int slsi_net_stop(struct net_device *dev)
236{
237 struct netdev_vif *ndev_vif = netdev_priv(dev);
238 struct slsi_dev *sdev = ndev_vif->sdev;
239
240 SLSI_NET_INFO(dev, "ifnum:%d r:%d\n", ndev_vif->ifnum, sdev->recovery_status);
241 slsi_wakelock(&sdev->wlan_wl);
242 netif_tx_stop_all_queues(dev);
243 sdev->initial_scan = false;
244
245 if (!ndev_vif->is_available) {
246 /* May have been taken out by the Chip going down */
247 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available\n");
248 slsi_wakeunlock(&sdev->wlan_wl);
249 return 0;
250 }
251#ifndef SLSI_TEST_DEV
252 if (!slsi_is_rf_test_mode_enabled() && !sdev->recovery_status) {
253 SLSI_NET_DBG1(dev, SLSI_NETDEV, "To user mode\n");
254 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, -55);
255 }
256#endif
257#ifndef CONFIG_ARM
258 slsi_netif_tcp_ack_suppression_stop(dev);
259#endif
260 slsi_stop_net_dev(sdev, dev);
261
262 sdev->allow_switch_40_mhz = true;
263 sdev->allow_switch_80_mhz = true;
264 sdev->acs_channel_switched = false;
265 slsi_wakeunlock(&sdev->wlan_wl);
266 return 0;
267}
268
269/* This is called after the WE handlers */
270static int slsi_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
271{
272 SLSI_NET_DBG4(dev, SLSI_NETDEV, "IOCTL cmd:0x%.4x\n", cmd);
273
274 if (cmd == SIOCDEVPRIVATE + 2) { /* 0x89f0 + 2 from wpa_supplicant */
275 return slsi_ioctl(dev, rq, cmd);
276 }
277
278 return -EOPNOTSUPP;
279}
280
281static struct net_device_stats *slsi_net_get_stats(struct net_device *dev)
282{
283 struct netdev_vif *ndev_vif = netdev_priv(dev);
284
285 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
286 return &ndev_vif->stats;
287}
288
289#ifdef CONFIG_SCSC_USE_WMM_TOS
290static u16 slsi_get_priority_from_tos(u8 *frame, u16 proto)
291{
292 if (WARN_ON(!frame))
293 return FAPI_PRIORITY_QOS_UP0;
294
295 switch (proto) {
296 case ETH_P_IP: /* IPv4 */
297 return (u16)(((frame[IP4_OFFSET_TO_TOS_FIELD]) & 0xE0) >> 5);
298
299 case ETH_P_IPV6: /* IPv6 */
300 return (u16)((*frame & 0x0E) >> 1);
301
302 default:
303 return FAPI_PRIORITY_QOS_UP0;
304 }
305}
306
307#else
308static u16 slsi_get_priority_from_tos_dscp(u8 *frame, u16 proto)
309{
310 u8 dscp;
311
312 if (WARN_ON(!frame))
313 return FAPI_PRIORITY_QOS_UP0;
314
315 switch (proto) {
316 case ETH_P_IP: /* IPv4 */
317 dscp = frame[IP4_OFFSET_TO_TOS_FIELD] >> FIELD_TO_DSCP;
318 break;
319
320 case ETH_P_IPV6: /* IPv6 */
321 /* Get traffic class */
322 dscp = (((frame[IP6_OFFSET_TO_TC_FIELD_0] & 0x0F) << 4) |
323 ((frame[IP6_OFFSET_TO_TC_FIELD_1] & 0xF0) >> 4)) >> FIELD_TO_DSCP;
324 break;
325
326 default:
327 return FAPI_PRIORITY_QOS_UP0;
328 }
329/* DSCP table based in RFC8325 from Android 10 */
330#if (defined(ANDROID_VERSION) && ANDROID_VERSION >= 100000)
331 switch (dscp) {
332 case CS7:
333 return FAPI_PRIORITY_QOS_UP7;
334 case CS6:
335 case DSCP_EF:
336 case DSCP_VA:
337 return FAPI_PRIORITY_QOS_UP6;
338 case CS5:
339 return FAPI_PRIORITY_QOS_UP5;
340 case DSCP_AF41:
341 case DSCP_AF42:
342 case DSCP_AF43:
343 case CS4:
344 case DSCP_AF31:
345 case DSCP_AF32:
346 case DSCP_AF33:
347 case CS3:
348 return FAPI_PRIORITY_QOS_UP4;
349 case DSCP_AF21:
350 case DSCP_AF22:
351 case DSCP_AF23:
352 return FAPI_PRIORITY_QOS_UP3;
353 case CS2:
354 case DSCP_AF11:
355 case DSCP_AF12:
356 case DSCP_AF13:
357 case CS0:
358 return FAPI_PRIORITY_QOS_UP0;
359 case CS1:
360 return FAPI_PRIORITY_QOS_UP1;
361 default:
362 return FAPI_PRIORITY_QOS_UP0;
363 }
364#else
365 switch (dscp) {
366 case DSCP_EF:
367 case DSCP_VA:
368 return FAPI_PRIORITY_QOS_UP6;
369 case DSCP_AF43:
370 case DSCP_AF42:
371 case DSCP_AF41:
372 return FAPI_PRIORITY_QOS_UP5;
373 case DSCP_AF33:
374 case DSCP_AF32:
375 case DSCP_AF31:
376 case DSCP_AF23:
377 case DSCP_AF22:
378 case DSCP_AF21:
379 case DSCP_AF13:
380 case DSCP_AF12:
381 case DSCP_AF11:
382 return FAPI_PRIORITY_QOS_UP0;
383 case CS7:
384 return FAPI_PRIORITY_QOS_UP7;
385 case CS6:
386 return FAPI_PRIORITY_QOS_UP6;
387 case CS5:
388 return FAPI_PRIORITY_QOS_UP5;
389 case CS4:
390 return FAPI_PRIORITY_QOS_UP4;
391 case CS3:
392 return FAPI_PRIORITY_QOS_UP3;
393 case CS2:
394 return FAPI_PRIORITY_QOS_UP2;
395 case CS1:
396 return FAPI_PRIORITY_QOS_UP1;
397 case CS0:
398 return FAPI_PRIORITY_QOS_UP0;
399 default:
400 return FAPI_PRIORITY_QOS_UP0;
401 }
402#endif
403}
404
405#endif
406
407static bool slsi_net_downgrade_ac(struct net_device *dev, struct sk_buff *skb)
408{
409 SLSI_UNUSED_PARAMETER(dev);
410
411 switch (skb->priority) {
412 case 6:
413 case 7:
414 skb->priority = FAPI_PRIORITY_QOS_UP5; /* VO -> VI */
415 return true;
416 case 4:
417 case 5:
418 skb->priority = FAPI_PRIORITY_QOS_UP3; /* VI -> BE */
419 return true;
420 case 0:
421 case 3:
422 skb->priority = FAPI_PRIORITY_QOS_UP2; /* BE -> BK */
423 return true;
424 default:
425 return false;
426 }
427}
428
429static u8 slsi_net_up_to_ac_mapping(u8 priority)
430{
431 switch (priority) {
432 case FAPI_PRIORITY_QOS_UP6:
433 case FAPI_PRIORITY_QOS_UP7:
434 return BIT(FAPI_PRIORITY_QOS_UP6) | BIT(FAPI_PRIORITY_QOS_UP7);
435 case FAPI_PRIORITY_QOS_UP4:
436 case FAPI_PRIORITY_QOS_UP5:
437 return BIT(FAPI_PRIORITY_QOS_UP4) | BIT(FAPI_PRIORITY_QOS_UP5);
438 case FAPI_PRIORITY_QOS_UP0:
439 case FAPI_PRIORITY_QOS_UP3:
440 return BIT(FAPI_PRIORITY_QOS_UP0) | BIT(FAPI_PRIORITY_QOS_UP3);
441 default:
442 return BIT(FAPI_PRIORITY_QOS_UP1) | BIT(FAPI_PRIORITY_QOS_UP2);
443 }
444}
445
446enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority)
447{
448 switch (priority) {
449 case FAPI_PRIORITY_QOS_UP0:
450 case FAPI_PRIORITY_QOS_UP3:
451 return SLSI_TRAFFIC_Q_BE;
452 case FAPI_PRIORITY_QOS_UP1:
453 case FAPI_PRIORITY_QOS_UP2:
454 return SLSI_TRAFFIC_Q_BK;
455 case FAPI_PRIORITY_QOS_UP4:
456 case FAPI_PRIORITY_QOS_UP5:
457 return SLSI_TRAFFIC_Q_VI;
458 case FAPI_PRIORITY_QOS_UP6:
459 case FAPI_PRIORITY_QOS_UP7:
460 return SLSI_TRAFFIC_Q_VO;
461 default:
462 return SLSI_TRAFFIC_Q_BE;
463 }
464}
465
466int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids)
467{
468 switch (ac) {
469 case SLSI_TRAFFIC_Q_BE:
470 tids[0] = FAPI_PRIORITY_QOS_UP0;
471 tids[1] = FAPI_PRIORITY_QOS_UP3;
472 break;
473
474 case SLSI_TRAFFIC_Q_BK:
475 tids[0] = FAPI_PRIORITY_QOS_UP1;
476 tids[1] = FAPI_PRIORITY_QOS_UP2;
477 break;
478
479 case SLSI_TRAFFIC_Q_VI:
480 tids[0] = FAPI_PRIORITY_QOS_UP4;
481 tids[1] = FAPI_PRIORITY_QOS_UP5;
482 break;
483
484 case SLSI_TRAFFIC_Q_VO:
485 tids[0] = FAPI_PRIORITY_QOS_UP6;
486 tids[1] = FAPI_PRIORITY_QOS_UP7;
487 break;
488
489 default:
490 return -EINVAL;
491 }
492
493 return 0;
494}
495
496static void slsi_net_downgrade_pri(struct net_device *dev, struct slsi_peer *peer,
497 struct sk_buff *skb)
498{
499 /* in case we are a client downgrade the ac if acm is
500 * set and tspec is not established
501 */
502 while (unlikely(peer->wmm_acm & BIT(skb->priority)) &&
503 !(peer->tspec_established & slsi_net_up_to_ac_mapping(skb->priority))) {
504 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Downgrading from UP:%d\n", skb->priority);
505 if (!slsi_net_downgrade_ac(dev, skb))
506 break;
507 }
508 SLSI_NET_DBG4(dev, SLSI_NETDEV, "To UP:%d\n", skb->priority);
509}
510
511#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
512static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback)
513#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
514static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv)
515#else
516static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb)
517#endif
518{
519 struct netdev_vif *ndev_vif = netdev_priv(dev);
520 struct slsi_dev *sdev = ndev_vif->sdev;
521 u16 netif_q = 0;
522 struct ethhdr *ehdr = (struct ethhdr *)skb->data;
523 int proto = 0;
524 struct slsi_peer *peer;
525
526#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
527 (void)accel_priv;
528#endif
529#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
530 (void)fallback;
531#endif
532 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
533
534 /* Defensive check for uninitialized mac header */
535 if (!skb_mac_header_was_set(skb))
536 skb_reset_mac_header(skb);
537
538 if (is_zero_ether_addr(ehdr->h_dest) || is_zero_ether_addr(ehdr->h_source)) {
539 SLSI_NET_WARN(dev, "invalid Ethernet addresses (dest:%pM,src:%pM)\n", ehdr->h_dest, ehdr->h_source);
540 SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
541 return SLSI_NETIF_Q_DISCARD;
542 }
543
544 proto = be16_to_cpu(eth_hdr(skb)->h_proto);
545
546 switch (proto) {
547 default:
548 /* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
549 break;
550 case ETH_P_PAE:
551 case ETH_P_WAI:
552 SLSI_NET_DBG3(dev, SLSI_TX, "EAP packet. Priority Queue Selected\n");
553 return SLSI_NETIF_Q_PRIORITY;
554 case ETH_P_ARP:
555 SLSI_NET_DBG3(dev, SLSI_TX, "ARP frame. Priority Queue Selected\n");
556 return SLSI_NETIF_Q_PRIORITY;
557 case ETH_P_IP:
558 if (slsi_is_dhcp_packet(skb->data) == SLSI_TX_IS_NOT_DHCP)
559 break;
560 SLSI_NET_DBG3(dev, SLSI_TX, "DHCP packet. Priority Queue Selected\n");
561 return SLSI_NETIF_Q_PRIORITY;
562 }
563
564 if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
565 /* MULTICAST/BROADCAST Queue is only used for AP */
566 if (is_multicast_ether_addr(ehdr->h_dest)) {
567 SLSI_NET_DBG3(dev, SLSI_TX, "Multicast AC queue will be selected\n");
568#ifdef CONFIG_SCSC_USE_WMM_TOS
569 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
570#else
571 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
572#endif
573 return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb->priority));
574 }
575
576 slsi_spinlock_lock(&ndev_vif->peer_lock);
577 peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
578 if (!peer) {
579 SLSI_NET_DBG1(dev, SLSI_TX, "Discard: Peer %pM NOT found\n", ehdr->h_dest);
580 slsi_spinlock_unlock(&ndev_vif->peer_lock);
581 return SLSI_NETIF_Q_DISCARD;
582 }
583
584 if (peer->qos_enabled) {
585#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
586 if (peer->qos_map_set) { /*802.11 QoS for interworking*/
587 skb->priority = cfg80211_classify8021d(skb, &peer->qos_map);
588 } else
589#endif
590 {
591#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
592 if ((proto == ETH_P_IP && slsi_is_dns_packet(skb->data)) ||
593 (proto == ETH_P_IP && slsi_is_mdns_packet(skb->data)) ||
594 (proto == ETH_P_IP && slsi_is_tcp_sync_packet(dev, skb))) {
595 skb->priority = FAPI_PRIORITY_QOS_UP7;
596 } else
597#endif
598 {
599#ifdef CONFIG_SCSC_USE_WMM_TOS
600 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
601#else
602 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
603#endif
604 }
605 }
606 } else{
607 skb->priority = FAPI_PRIORITY_QOS_UP0;
608 }
609
610 /* Downgrade the priority if acm bit is set and tspec is not established */
611 slsi_net_downgrade_pri(dev, peer, skb);
612
613 netif_q = slsi_netif_get_peer_queue(peer->queueset, slsi_frame_priority_to_ac_queue(skb->priority));
614 SLSI_NET_DBG3(dev, SLSI_TX, "prio:%d queue:%u\n", skb->priority, netif_q);
615 slsi_spinlock_unlock(&ndev_vif->peer_lock);
616 return netif_q;
617}
618
619void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
620 struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection)
621{
622 struct netdev_vif *netdev_vif = netdev_priv(dev);
623 struct sk_buff *skb = NULL;
624#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
625 struct sk_buff *skb_to_free = NULL;
626#endif
627 struct ethhdr *ehdr;
628 struct Qdisc *qd;
629 u32 num_pkts;
630 u16 staq;
631 u16 tdlsq;
632 u16 netq;
633 u16 i;
634 u16 j;
635 int index;
636 struct slsi_tcp_ack_s *tcp_ack;
637
638 /* Get the netdev queue number from queueset */
639 staq = slsi_netif_get_peer_queue(sta_peer->queueset, 0);
640 tdlsq = slsi_netif_get_peer_queue(tdls_peer->queueset, 0);
641
642 SLSI_NET_DBG1(dev, SLSI_TDLS, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq: %d, tdls_netq: %d\n",
643 connection, sta_peer->queueset, tdls_peer->queueset, staq, tdlsq);
644
645 /* Pause the TDLS queues and STA netdev queues */
646 slsi_tx_pause_queues(sdev);
647
648 /* walk through frames in TCP Ack suppression queue and change mapping to TDLS queue */
649 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
650 tcp_ack = &netdev_vif->ack_suppression[index];
651 if (!tcp_ack && !tcp_ack->state)
652 continue;
653 slsi_spinlock_lock(&tcp_ack->lock);
654 skb_queue_walk(&tcp_ack->list, skb) {
655 SLSI_NET_DBG2(dev, SLSI_TDLS, "frame in TCP Ack list (peer:%pM)\n", eth_hdr(skb)->h_dest);
656 /* is it destined to TDLS peer? */
657 if (compare_ether_addr(tdls_peer->address, eth_hdr(skb)->h_dest) == 0) {
658 if (connection) {
659 /* TDLS setup: change the queue mapping to TDLS queue */
660 skb->queue_mapping += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
661 } else {
662 /* TDLS teardown: change the queue to STA queue */
663 skb->queue_mapping -= (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
664 }
665 }
666 }
667 slsi_spinlock_unlock(&tcp_ack->lock);
668 }
669
670 /**
671 * For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
672 * For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
673 */
674 if (connection)
675 tdls_peer->valid = true;
676 else
677 tdls_peer->valid = false;
678
679 /* Move packets from netdev queues */
680 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
681 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
682 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
683
684 if (connection) {
685 /* Check if any packet is already avilable in TDLS queue (most likely from last session) */
686 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
687 SLSI_NET_ERR(dev, "tdls_connection: Packet present in queue %d\n", tdlsq + i);
688
689 qd = dev->_tx[staq + i].qdisc;
690 /* Get the total number of packets in STAQ */
691 num_pkts = qd->q.qlen;
692
693 /* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
694 for (j = 0; j < num_pkts; j++) {
695 qd = dev->_tx[staq + i].qdisc;
696 /* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
697 skb = qd->gso_skb;
698 if (skb) {
699 qd->gso_skb = NULL;
700 qd->q.qlen--;
701 } else {
702 skb = qd->dequeue(qd);
703 }
704
705 if (!skb) {
706 SLSI_NET_ERR(dev, "tdls_connection: STA NETQ skb is NULL\n");
707 break;
708 }
709
710 /* Change the queue mapping for the TDLS packets */
711 netq = skb->queue_mapping;
712 ehdr = (struct ethhdr *)skb->data;
713 if (compare_ether_addr(tdls_peer->address, ehdr->h_dest) == 0) {
714 netq += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
715 SLSI_NET_DBG3(dev, SLSI_TDLS, "NETQ%d: Queue mapping changed from %d to %d\n",
716 i, skb->queue_mapping, netq);
717 skb_set_queue_mapping(skb, netq);
718 }
719
720 qd = dev->_tx[netq].qdisc;
721#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
722 qd->enqueue(skb, qd, &skb_to_free);
723#else
724 /* If the netdev queue is already full then enqueue() will drop the skb */
725 qd->enqueue(skb, qd);
726#endif
727 }
728 } else {
729 num_pkts = dev->_tx[tdlsq + i].qdisc->q.qlen;
730 /* Move the packets from TDLS to STA queue */
731 for (j = 0; j < num_pkts; j++) {
732 /* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
733 qd = dev->_tx[tdlsq + i].qdisc;
734 skb = qd->gso_skb;
735 if (skb) {
736 qd->gso_skb = NULL;
737 qd->q.qlen--;
738 } else {
739 skb = qd->dequeue(qd);
740 }
741
742 if (!skb) {
743 SLSI_NET_ERR(dev, "tdls_teardown: TDLS NETQ skb is NULL\n");
744 break;
745 }
746
747 /* Update the queue mapping */
748 skb_set_queue_mapping(skb, staq + i);
749
750 /* Enqueue the packet in STA queue */
751 qd = dev->_tx[staq + i].qdisc;
752#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
753 qd->enqueue(skb, qd, &skb_to_free);
754#else
755 /* If the netdev queue is already full then enqueue() will drop the skb */
756 qd->enqueue(skb, qd);
757#endif
758 }
759 }
760 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
761 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
762 }
763#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
764 if (unlikely(skb_to_free))
765 kfree_skb_list(skb_to_free);
766#endif
767
768 /* Teardown - after teardown there should not be any packet in TDLS queues */
769 if (!connection)
770 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
771 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
772 SLSI_NET_ERR(dev, "tdls_teardown: Packet present in NET queue %d\n", tdlsq + i);
773 }
774
775 /* Resume the STA and TDLS netdev queues */
776 slsi_tx_unpause_queues(sdev);
777}
778
779/**
780 * This is the main TX entry point for the driver.
781 *
782 * Ownership of the skb is transferred to another function ONLY IF such
783 * function was able to deal with that skb and ended with a SUCCESS ret code.
784 * Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
785 *
786 * In the context of this function:
787 * - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
788 * SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
789 * kernel netstack will receive back NETDEV_TX_OK too.
790 * - ownership is KEPT HERE by this function when lower layers fails somehow
791 * to deal with the transmission of the skb. In this case the skb WOULD HAVE
792 * NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
793 * - intermediate lower layer functions (NOT directly involved in failure or
794 * success) will relay any retcode up to this layer for evaluation.
795 *
796 * WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
797 * - ENOSPC: something related to queueing happened...this should be
798 * retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
799 * requeued by the Kernel NetStack itself, using the proper queue.
800 * As a consequence SKB is NOT FREED HERE !.
801 * - ANY OTHER ERR: all other errors are considered at the moment NOT
802 * recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
803 * the proper ERRCODE and stops dealing with the packet considering it
804 * consumed by lower layer. (same behavior as NETDEV_TX_OK)
805 *
806 * BIG NOTE:
807 * As detailed in Documentation/networking/drivers.txt the above behavior
808 * of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
809 * discouraged and should be used ONLY in case of a real HARD error(?);
810 * the advised solution is to actively STOP the queues before finishing
811 * the available space and WAKING them up again when more free buffers
812 * would have arrived.
813 */
814static netdev_tx_t slsi_net_hw_xmit(struct sk_buff *skb, struct net_device *dev)
815{
816 struct netdev_vif *ndev_vif = netdev_priv(dev);
817 struct slsi_dev *sdev = ndev_vif->sdev;
818 int r = NETDEV_TX_OK;
819 struct sk_buff *original_skb = NULL;
820#ifdef CONFIG_SCSC_WLAN_DEBUG
821 int known_users = 0;
822#endif
823 /* Keep the packet length. The packet length will be used to increment
824 * stats for the netdev if the packet was successfully transmitted.
825 * The ownership of the SKB is passed to lower layers, so we should
826 * not refer the SKB after this point
827 */
828 unsigned int packet_len = skb->len;
829 enum slsi_traffic_q traffic_q = slsi_frame_priority_to_ac_queue(skb->priority);
830
831 slsi_wakelock(&sdev->wlan_wl);
832 slsi_skb_cb_init(skb);
833
834 /* Check for misaligned (oddly aligned) data.
835 * The f/w requires 16 bit aligned.
836 * This is a corner case - for example, the kernel can generate BPDU
837 * that are oddly aligned. Therefore it is acceptable to copy these
838 * frames to a 16 bit alignment.
839 */
840 if ((uintptr_t)skb->data & 0x1) {
841 struct sk_buff *skb2 = NULL;
842 /* Received a socket buffer aligned on an odd address.
843 * Re-align by asking for headroom.
844 */
845 skb2 = skb_copy_expand(skb, SLSI_NETIF_SKB_HEADROOM, skb_tailroom(skb), GFP_ATOMIC);
846 if (skb2 && (!(((uintptr_t)skb2->data) & 0x1))) {
847 /* We should account for this duplication */
848 original_skb = skb;
849 skb = skb2;
850 SLSI_NET_DBG3(dev, SLSI_TX, "Oddly aligned skb realigned\n");
851 } else {
852 /* Drop the packet if we can't re-align. */
853 SLSI_NET_WARN(dev, "Oddly aligned skb failed realignment, dropping\n");
854 if (skb2) {
855 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand didn't align for us\n");
856 slsi_kfree_skb(skb2);
857 } else {
858 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand failed when trying to align\n");
859 }
860 r = -EFAULT;
861 goto evaluate;
862 }
863 }
864 slsi_dbg_track_skb(skb, GFP_ATOMIC);
865
866 /* Be defensive about the mac_header - some kernels have a bug where a
867 * frame can be delivered to the driver with mac_header initialised
868 * to ~0U and this causes a crash when the pointer is dereferenced to
869 * access part of the Ethernet header.
870 */
871 if (!skb_mac_header_was_set(skb))
872 skb_reset_mac_header(skb);
873
874 SLSI_NET_DBG3(dev, SLSI_TX, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb)->h_proto));
875
876 if (!ndev_vif->is_available) {
877 SLSI_NET_WARN(dev, "vif NOT available\n");
878 r = -EFAULT;
879 goto evaluate;
880 }
881 if (skb->queue_mapping == SLSI_NETIF_Q_DISCARD) {
882 SLSI_NET_WARN(dev, "Discard Queue :: Packet Dropped\n");
883 r = -EIO;
884 goto evaluate;
885 }
886
887#ifdef CONFIG_SCSC_WLAN_DEBUG
888#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
889 known_users = refcount_read(&skb->users);
890#else
891 known_users = atomic_read(&skb->users);
892#endif
893#endif
894
895#ifndef CONFIG_ARM
896 skb = slsi_netif_tcp_ack_suppression_pkt(dev, skb);
897 if (!skb) {
898 slsi_wakeunlock(&sdev->wlan_wl);
899 if (original_skb)
900 slsi_kfree_skb(original_skb);
901 return NETDEV_TX_OK;
902 }
903#endif
904
905 /* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
906 r = slsi_tx_data(sdev, dev, skb);
907evaluate:
908 if (r == 0) {
909 /**
910 * A copy has been passed down and successfully transmitted
911 * and freed....here we free the original coming from the
912 * upper network layers....if a copy was passed down.
913 */
914 if (original_skb)
915 slsi_kfree_skb(original_skb);
916 /* skb freed by lower layers on success...enjoy */
917
918 ndev_vif->tx_packets[traffic_q]++;
919 ndev_vif->stats.tx_packets++;
920 ndev_vif->stats.tx_bytes += packet_len;
921 r = NETDEV_TX_OK;
922 } else {
923 /**
924 * Failed to send:
925 * - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
926 * NOT discarded by lower layers and NETDEV_TX_BUSY should
927 * be returned to upper layers: this will cause the skb
928 * (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
929 * to be requeued ...
930 * NOTE THAT it's the original skb that will be retried
931 * by upper netstack.
932 * THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
933 * the following.
934 *
935 * - with any other -ERR instead return the error: this
936 * anyway let the kernel think that the SKB has
937 * been consumed, and we drop the frame and free it.
938 *
939 * - a WARN_ON() takes care to ensure the SKB has NOT been
940 * freed by someone despite this was NOT supposed to happen,
941 * just before the actual freeing.
942 *
943 */
944 if (r == -ENOSPC) {
945 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
946 ndev_vif->stats.tx_fifo_errors++;
947 /* Free the local copy if any ... */
948 if (original_skb)
949 slsi_kfree_skb(skb);
950 r = NETDEV_TX_BUSY;
951 } else {
952#ifdef CONFIG_SCSC_WLAN_DEBUG
953#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
954 WARN_ON(known_users && refcount_read(&skb->users) != known_users);
955#else
956 WARN_ON(known_users && atomic_read(&skb->users) != known_users);
957#endif
958#endif
959 if (original_skb)
960 slsi_kfree_skb(original_skb);
961 slsi_kfree_skb(skb);
962 ndev_vif->stats.tx_dropped++;
963 /* We return the ORIGINAL Error 'r' anyway
964 * BUT Kernel treats them as TX complete anyway
965 * and assumes the SKB has been consumed.
966 */
967 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
968 }
969 }
970 /* SKBs are always considered consumed if the driver
971 * returns NETDEV_TX_OK.
972 */
973 slsi_wakeunlock(&sdev->wlan_wl);
974 return r;
975}
976
977static netdev_features_t slsi_net_fix_features(struct net_device *dev, netdev_features_t features)
978{
979 SLSI_UNUSED_PARAMETER(dev);
980
981#ifdef CONFIG_SCSC_WLAN_SG
982 SLSI_NET_DBG1(dev, SLSI_RX, "Scatter-gather and GSO enabled\n");
983 features |= NETIF_F_SG;
984 features |= NETIF_F_GSO;
985#endif
986
987#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
988 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO enabled\n");
989 features |= NETIF_F_GRO;
990#else
991 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO disabled\n");
992 features &= ~NETIF_F_GRO;
993#endif
994 return features;
995}
996
997static void slsi_set_multicast_list(struct net_device *dev)
998{
999 struct netdev_vif *ndev_vif = netdev_priv(dev);
1000 u8 count, i = 0;
1001 u8 mdns_addr[ETH_ALEN] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
1002
1003#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1004 u8 mc_addr_prefix[3] = { 0x01, 0x00, 0x5e };
1005#else
1006 u8 mdns6_addr[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
1007 const u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
1008 u8 ipv6addr_suffix[3];
1009#endif
1010 struct netdev_hw_addr *ha;
1011
1012 if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
1013 return;
1014
1015 if (!ndev_vif->is_available) {
1016 SLSI_NET_DBG1(dev, SLSI_NETDEV, "vif NOT available\n");
1017 return;
1018 }
1019
1020 count = netdev_mc_count(dev);
1021 if (!count)
1022 goto exit;
1023
1024#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1025 slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
1026 memcpy(ipv6addr_suffix, &ndev_vif->ipv6address.s6_addr[13], 3);
1027 slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
1028#endif
1029
1030 netdev_for_each_mc_addr(ha, dev) {
1031#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1032 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) || /*mDns is handled separately*/
1033 (memcmp(ha->addr, mc_addr_prefix, 3))) { /*only consider IPv4 multicast addresses*/
1034#else
1035 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) ||
1036 (!memcmp(ha->addr, mdns6_addr, ETH_ALEN)) || /*mDns is handled separately*/
1037 (!memcmp(ha->addr, solicited_node_addr, 3) &&
1038 !memcmp(&ha->addr[3], ipv6addr_suffix, 3))) { /* local multicast addr handled separately*/
1039#endif
1040
1041 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Drop MAC %pM\n", ha->addr);
1042 continue;
1043 }
1044 if (i == SLSI_MC_ADDR_ENTRY_MAX) {
1045 SLSI_NET_WARN(dev, "MAC list has reached max limit (%d), actual count %d\n", SLSI_MC_ADDR_ENTRY_MAX, count);
1046 break;
1047 }
1048
1049 SLSI_NET_DBG3(dev, SLSI_NETDEV, "idx %d MAC %pM\n", i, ha->addr);
1050 SLSI_ETHER_COPY(ndev_vif->sta.regd_mc_addr[i++], ha->addr);
1051 }
1052
1053exit:
1054 ndev_vif->sta.regd_mc_addr_count = i;
1055}
1056
1057static int slsi_set_mac_address(struct net_device *dev, void *addr)
1058{
1059 struct sockaddr *sa = (struct sockaddr *)addr;
1060
1061 SLSI_NET_DBG1(dev, SLSI_NETDEV, "slsi_set_mac_address %pM\n", sa->sa_data);
1062 SLSI_ETHER_COPY(dev->dev_addr, sa->sa_data);
1063 return 0;
1064}
1065
1066static const struct net_device_ops slsi_netdev_ops = {
1067 .ndo_open = slsi_net_open,
1068 .ndo_stop = slsi_net_stop,
1069 .ndo_start_xmit = slsi_net_hw_xmit,
1070 .ndo_do_ioctl = slsi_net_ioctl,
1071 .ndo_get_stats = slsi_net_get_stats,
1072 .ndo_select_queue = slsi_net_select_queue,
1073 .ndo_fix_features = slsi_net_fix_features,
1074 .ndo_set_rx_mode = slsi_set_multicast_list,
1075 .ndo_set_mac_address = slsi_set_mac_address,
1076};
1077
1078static void slsi_if_setup(struct net_device *dev)
1079{
1080 ether_setup(dev);
1081 dev->netdev_ops = &slsi_netdev_ops;
1082#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
1083 dev->needs_free_netdev = true;
1084#else
1085 dev->destructor = free_netdev;
1086#endif
1087}
1088
1089#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1090
1091#if defined(CONFIG_SOC_EXYNOS9610) || defined(CONFIG_SOC_EXYNOS9630) || defined(CONFIG_SOC_EXYNOS3830)
1092#define SCSC_NETIF_RPS_CPUS_MASK "fe"
1093#else
1094#define SCSC_NETIF_RPS_CPUS_MASK "0"
1095#endif
1096
1097static void slsi_netif_rps_map_clear(struct net_device *dev)
1098{
1099 struct rps_map *map;
1100
1101 map = rcu_dereference_protected(dev->_rx->rps_map, 1);
1102 if (map) {
1103 RCU_INIT_POINTER(dev->_rx->rps_map, NULL);
1104 kfree_rcu(map, rcu);
1105 SLSI_NET_INFO(dev, "clear rps_cpus map\n");
1106 }
1107}
1108
1109static int slsi_netif_rps_map_set(struct net_device *dev, char *buf, size_t len)
1110{
1111 struct rps_map *old_map, *map;
1112 cpumask_var_t mask;
1113 int err, cpu, i;
1114 static DEFINE_SPINLOCK(rps_map_lock);
1115
1116 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1117 return -ENOMEM;
1118
1119 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1120 if (err) {
1121 free_cpumask_var(mask);
1122 SLSI_NET_WARN(dev, "CPU bitmap parse failed\n");
1123 return err;
1124 }
1125
1126 map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL);
1127 if (!map) {
1128 free_cpumask_var(mask);
1129 SLSI_NET_WARN(dev, "CPU mask alloc failed\n");
1130 return -ENOMEM;
1131 }
1132
1133 i = 0;
1134 for_each_cpu_and(cpu, mask, cpu_online_mask)
1135 map->cpus[i++] = cpu;
1136
1137 if (i) {
1138 map->len = i;
1139 } else {
1140 kfree(map);
1141 map = NULL;
1142 }
1143
1144 spin_lock(&rps_map_lock);
1145 old_map = rcu_dereference_protected(dev->_rx->rps_map, lockdep_is_held(&rps_map_lock));
1146 rcu_assign_pointer(dev->_rx->rps_map, map);
1147 spin_unlock(&rps_map_lock);
1148
1149 if (map)
1150 static_key_slow_inc(&rps_needed);
1151 if (old_map)
1152 static_key_slow_dec(&rps_needed);
1153
1154 if (old_map)
1155 kfree_rcu(old_map, rcu);
1156
1157 free_cpumask_var(mask);
1158 SLSI_NET_INFO(dev, "rps_cpus map set(%s)\n", buf);
1159 return len;
1160}
1161#endif
1162
1163int slsi_netif_add_locked(struct slsi_dev *sdev, const char *name, int ifnum)
1164{
1165 struct net_device *dev = NULL;
1166 struct netdev_vif *ndev_vif;
1167 struct wireless_dev *wdev;
1168 int alloc_size, txq_count = 0, ret;
1169
1170 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1171
1172 if (WARN_ON(!sdev || ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES || sdev->netdev[ifnum]))
1173 return -EINVAL;
1174
1175 alloc_size = sizeof(struct netdev_vif);
1176
1177 txq_count = SLSI_NETIF_Q_PEER_START + (SLSI_NETIF_Q_PER_PEER * (SLSI_ADHOC_PEER_CONNECTIONS_MAX));
1178
1179#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
1180 dev = alloc_netdev_mqs(alloc_size, name, NET_NAME_PREDICTABLE, slsi_if_setup, txq_count, 1);
1181#else
1182 dev = alloc_netdev_mqs(alloc_size, name, slsi_if_setup, txq_count, 1);
1183#endif
1184 if (!dev) {
1185 SLSI_ERR(sdev, "Failed to allocate private data for netdev\n");
1186 return -ENOMEM;
1187 }
1188
1189 /* Reserve space in skb for later use */
1190 dev->needed_headroom = SLSI_NETIF_SKB_HEADROOM;
1191 dev->needed_tailroom = SLSI_NETIF_SKB_TAILROOM;
1192
1193 ret = dev_alloc_name(dev, dev->name);
1194 if (ret < 0)
1195 goto exit_with_error;
1196
1197 ndev_vif = netdev_priv(dev);
1198 memset(ndev_vif, 0x00, sizeof(*ndev_vif));
1199 SLSI_MUTEX_INIT(ndev_vif->vif_mutex);
1200 SLSI_MUTEX_INIT(ndev_vif->scan_mutex);
1201 SLSI_MUTEX_INIT(ndev_vif->scan_result_mutex);
1202 skb_queue_head_init(&ndev_vif->ba_complete);
1203 slsi_sig_send_init(&ndev_vif->sig_wait);
1204 ndev_vif->sdev = sdev;
1205 ndev_vif->ifnum = ifnum;
1206 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1207#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1208 slsi_spinlock_create(&ndev_vif->ipv6addr_lock);
1209#endif
1210 slsi_spinlock_create(&ndev_vif->peer_lock);
1211 atomic_set(&ndev_vif->ba_flush, 0);
1212
1213 /* Reserve memory for the peer database - Not required for p2p0/nan interface */
1214 if (!(SLSI_IS_VIF_INDEX_P2P(ndev_vif) || SLSI_IS_VIF_INDEX_NAN(ndev_vif))) {
1215 int queueset;
1216
1217 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1218 ndev_vif->peer_sta_record[queueset] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queueset]), GFP_KERNEL);
1219
1220 if (!ndev_vif->peer_sta_record[queueset]) {
1221 int j;
1222
1223 SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queueset:%d)\n", queueset);
1224
1225 /* Free previously allocated peer database memory till current queueset */
1226 for (j = 0; j < queueset; j++) {
1227 kfree(ndev_vif->peer_sta_record[j]);
1228 ndev_vif->peer_sta_record[j] = NULL;
1229 }
1230
1231 ret = -ENOMEM;
1232 goto exit_with_error;
1233 }
1234 }
1235 }
1236
1237 /* The default power mode in host*/
1238 if (slsi_is_rf_test_mode_enabled()) {
1239 SLSI_NET_ERR(dev, "*#rf# rf test mode set is enabled.\n");
1240 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
1241 } else {
1242 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
1243 }
1244
1245 INIT_LIST_HEAD(&ndev_vif->sta.network_map);
1246 SLSI_DBG1(sdev, SLSI_NETDEV, "ifnum=%d\n", ndev_vif->ifnum);
1247
1248 /* For HS2 interface */
1249 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
1250 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1251
1252 /* For p2p0 interface */
1253 else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1254 ret = slsi_p2p_init(sdev, ndev_vif);
1255 if (ret)
1256 goto exit_with_error;
1257 }
1258
1259 INIT_DELAYED_WORK(&ndev_vif->scan_timeout_work, slsi_scan_ind_timeout_handle);
1260
1261 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_data, "slsi_wlan_rx_data", slsi_rx_netdev_data_work);
1262 if (ret)
1263 goto exit_with_error;
1264
1265 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_mlme, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work);
1266 if (ret) {
1267 slsi_skb_work_deinit(&ndev_vif->rx_data);
1268 goto exit_with_error;
1269 }
1270
1271 wdev = &ndev_vif->wdev;
1272
1273 dev->ieee80211_ptr = wdev;
1274 wdev->wiphy = sdev->wiphy;
1275 wdev->netdev = dev;
1276 wdev->iftype = NL80211_IFTYPE_STATION;
1277 SET_NETDEV_DEV(dev, sdev->dev);
1278
1279 /* We are not ready to send data yet. */
1280 netif_carrier_off(dev);
1281
1282#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1283 if (strcmp(name, CONFIG_SCSC_AP_INTERFACE_NAME) == 0)
1284 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
1285 else
1286 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1287#else
1288 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1289#endif
1290 SLSI_DBG1(sdev, SLSI_NETDEV, "Add:%pM\n", dev->dev_addr);
1291 rcu_assign_pointer(sdev->netdev[ifnum], dev);
1292 ndev_vif->delete_probe_req_ies = false;
1293 ndev_vif->probe_req_ies = NULL;
1294 ndev_vif->probe_req_ie_len = 0;
1295 ndev_vif->drv_in_p2p_procedure = false;
1296
1297#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1298 slsi_netif_rps_map_set(dev, SCSC_NETIF_RPS_CPUS_MASK, strlen(SCSC_NETIF_RPS_CPUS_MASK));
1299#endif
1300 return 0;
1301
1302exit_with_error:
1303 mutex_lock(&sdev->netdev_remove_mutex);
1304 free_netdev(dev);
1305 mutex_unlock(&sdev->netdev_remove_mutex);
1306 return ret;
1307}
1308
1309int slsi_netif_dynamic_iface_add(struct slsi_dev *sdev, const char *name)
1310{
1311 int index = -EINVAL;
1312 int err;
1313
1314 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1315
1316#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1317 if (sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN] == sdev->netdev_ap) {
1318 rcu_assign_pointer(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN], NULL);
1319 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1320 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1321 }
1322#else
1323 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1324 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1325#endif
1326
1327 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1328 return index;
1329}
1330
1331int slsi_netif_init(struct slsi_dev *sdev)
1332{
1333 int i;
1334
1335 SLSI_DBG3(sdev, SLSI_NETDEV, "\n");
1336
1337 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1338
1339 /* Initialize all other netdev interfaces to NULL */
1340 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1341 RCU_INIT_POINTER(sdev->netdev[i], NULL);
1342
1343 if (slsi_netif_add_locked(sdev, "wlan%d", SLSI_NET_INDEX_WLAN) != 0) {
1344 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1345 return -EINVAL;
1346 }
1347
1348 if (slsi_netif_add_locked(sdev, "p2p%d", SLSI_NET_INDEX_P2P) != 0) {
1349 rtnl_lock();
1350 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1351 rtnl_unlock();
1352 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1353 return -EINVAL;
1354 }
1355#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1356#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1357 if (slsi_netif_add_locked(sdev, CONFIG_SCSC_AP_INTERFACE_NAME, SLSI_NET_INDEX_P2PX_SWLAN) != 0) {
1358 rtnl_lock();
1359 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1360 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1361 rtnl_unlock();
1362 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1363 return -EINVAL;
1364 }
1365#endif
1366#endif
1367#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
1368 if (slsi_netif_add_locked(sdev, "nan%d", SLSI_NET_INDEX_NAN) != 0) {
1369 rtnl_lock();
1370 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1371 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1372#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1373#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1374 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
1375#endif
1376#endif
1377 rtnl_unlock();
1378 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1379 return -EINVAL;
1380 }
1381#endif
1382 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1383 return 0;
1384}
1385
1386static int slsi_netif_register_locked(struct slsi_dev *sdev, struct net_device *dev)
1387{
1388 struct netdev_vif *ndev_vif = netdev_priv(dev);
1389 int err;
1390
1391 WARN_ON(!rtnl_is_locked());
1392 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1393 if (atomic_read(&ndev_vif->is_registered)) {
1394 SLSI_NET_ERR(dev, "Register:%pM Failed: Already registered\n", dev->dev_addr);
1395 return 0;
1396 }
1397
1398 err = register_netdevice(dev);
1399 if (err)
1400 SLSI_NET_ERR(dev, "Register:%pM Failed\n", dev->dev_addr);
1401 else
1402 atomic_set(&ndev_vif->is_registered, 1);
1403 return err;
1404}
1405
1406int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1407{
1408 int err;
1409
1410 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1411 err = slsi_netif_register_locked(sdev, dev);
1412 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1413 return err;
1414}
1415
1416int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev)
1417{
1418 int err;
1419
1420 rtnl_lock();
1421 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1422 err = slsi_netif_register_locked(sdev, dev);
1423 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1424 rtnl_unlock();
1425 return err;
1426}
1427
1428void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev)
1429{
1430 int i;
1431 struct netdev_vif *ndev_vif = netdev_priv(dev);
1432
1433 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Unregister:%pM\n", dev->dev_addr);
1434
1435 WARN_ON(!rtnl_is_locked());
1436 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1437
1438 if (atomic_read(&ndev_vif->is_registered)) {
1439 netif_tx_disable(dev);
1440 netif_carrier_off(dev);
1441
1442 slsi_stop_net_dev(sdev, dev);
1443 }
1444
1445 rcu_assign_pointer(sdev->netdev[ndev_vif->ifnum], NULL);
1446 synchronize_rcu();
1447
1448 /* Free memory of the peer database - Not required for p2p0 interface */
1449 if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1450 int queueset;
1451
1452 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1453 kfree(ndev_vif->peer_sta_record[queueset]);
1454 ndev_vif->peer_sta_record[queueset] = NULL;
1455 }
1456 }
1457
1458 if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1459 slsi_p2p_deinit(sdev, ndev_vif);
1460 } else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
1461 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1462 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1463 }
1464
1465 cancel_delayed_work(&ndev_vif->scan_timeout_work);
1466 ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
1467
1468 slsi_skb_work_deinit(&ndev_vif->rx_data);
1469 slsi_skb_work_deinit(&ndev_vif->rx_mlme);
1470
1471 for (i = 0; i < SLSI_SCAN_MAX; i++)
1472 slsi_purge_scan_results(ndev_vif, i);
1473
1474 slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
1475 slsi_roam_channel_cache_prune(dev, 0);
1476 kfree(ndev_vif->probe_req_ies);
1477
1478#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1479 slsi_netif_rps_map_clear(dev);
1480#endif
1481 if (atomic_read(&ndev_vif->is_registered)) {
1482 atomic_set(&ndev_vif->is_registered, 0);
1483 unregister_netdevice(dev);
1484 } else {
1485 mutex_lock(&sdev->netdev_remove_mutex);
1486 free_netdev(dev);
1487 mutex_unlock(&sdev->netdev_remove_mutex);
1488 }
1489}
1490
1491void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1492{
1493 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1494 slsi_netif_remove_locked(sdev, dev);
1495 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1496}
1497
1498void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev)
1499{
1500 rtnl_lock();
1501 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1502 slsi_netif_remove_locked(sdev, dev);
1503 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1504 rtnl_unlock();
1505}
1506
1507void slsi_netif_remove_all(struct slsi_dev *sdev)
1508{
1509 int i;
1510
1511 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1512 rtnl_lock();
1513 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1514 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1515 if (sdev->netdev[i])
1516 slsi_netif_remove_locked(sdev, sdev->netdev[i]);
1517 rcu_assign_pointer(sdev->netdev_ap, NULL);
1518 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1519 rtnl_unlock();
1520}
1521
1522void slsi_netif_deinit(struct slsi_dev *sdev)
1523{
1524 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1525 slsi_netif_remove_all(sdev);
1526}
1527
1528#ifndef CONFIG_ARM
1529static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev)
1530{
1531 int index;
1532 struct netdev_vif *ndev_vif = netdev_priv(dev);
1533 struct slsi_tcp_ack_s *tcp_ack;
1534
1535 ndev_vif->last_tcp_ack = NULL;
1536 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1537 tcp_ack = &ndev_vif->ack_suppression[index];
1538 tcp_ack->dport = 0;
1539 tcp_ack->daddr = 0;
1540 tcp_ack->sport = 0;
1541 tcp_ack->saddr = 0;
1542 tcp_ack->ack_seq = 0;
1543 tcp_ack->count = 0;
1544 tcp_ack->max = 0;
1545 tcp_ack->age = 0;
1546 skb_queue_head_init(&tcp_ack->list);
1547#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1548 timer_setup(&tcp_ack->timer, slsi_netif_tcp_ack_suppression_timeout, 0);
1549#else
1550 tcp_ack->timer.function = slsi_netif_tcp_ack_suppression_timeout;
1551 tcp_ack->timer.data = (unsigned long)tcp_ack;
1552 init_timer(&tcp_ack->timer);
1553#endif
1554 tcp_ack->state = 1;
1555 slsi_spinlock_create(&tcp_ack->lock);
1556 }
1557
1558 memset(&ndev_vif->tcp_ack_stats, 0, sizeof(struct slsi_tcp_ack_stats));
1559 return 0;
1560}
1561
1562static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev)
1563{
1564 int index;
1565 struct netdev_vif *ndev_vif = netdev_priv(dev);
1566 struct slsi_tcp_ack_s *tcp_ack;
1567
1568 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
1569 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1570 tcp_ack = &ndev_vif->ack_suppression[index];
1571 del_timer_sync(&tcp_ack->timer);
1572 slsi_spinlock_lock(&tcp_ack->lock);
1573 tcp_ack->state = 0;
1574 skb_queue_purge(&tcp_ack->list);
1575 slsi_spinlock_unlock(&tcp_ack->lock);
1576 }
1577 ndev_vif->last_tcp_ack = NULL;
1578 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
1579 return 0;
1580}
1581
1582#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1583static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t)
1584#else
1585static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data)
1586#endif
1587{
1588#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1589 struct slsi_tcp_ack_s *tcp_ack = from_timer(tcp_ack, t, timer);
1590#else
1591 struct slsi_tcp_ack_s *tcp_ack = (struct slsi_tcp_ack_s *)data;
1592#endif
1593 struct sk_buff *skb;
1594 struct netdev_vif *ndev_vif;
1595 struct slsi_dev *sdev;
1596 int r;
1597
1598 if (!tcp_ack)
1599 return;
1600
1601 if (!tcp_ack->state)
1602 return;
1603
1604 slsi_spinlock_lock(&tcp_ack->lock);
1605 while ((skb = skb_dequeue(&tcp_ack->list)) != 0) {
1606 tcp_ack->count = 0;
1607
1608 if (!skb->dev) {
1609 kfree_skb(skb);
1610 slsi_spinlock_unlock(&tcp_ack->lock);
1611 return;
1612 }
1613 ndev_vif = netdev_priv(skb->dev);
1614 sdev = ndev_vif->sdev;
1615 ndev_vif->tcp_ack_stats.tack_timeout++;
1616
1617 r = slsi_tx_data(sdev, skb->dev, skb);
1618 if (r == 0) {
1619 ndev_vif->tcp_ack_stats.tack_sent++;
1620 tcp_ack->last_sent = ktime_get();
1621 } else if (r == -ENOSPC) {
1622 ndev_vif->tcp_ack_stats.tack_dropped++;
1623 slsi_kfree_skb(skb);
1624 } else {
1625 ndev_vif->tcp_ack_stats.tack_dropped++;
1626 }
1627 }
1628 slsi_spinlock_unlock(&tcp_ack->lock);
1629}
1630
1631static int slsi_netif_tcp_ack_suppression_option(struct sk_buff *skb, u32 option)
1632{
1633 unsigned char *options;
1634 u32 optlen = 0, len = 0;
1635
1636 if (tcp_hdr(skb)->doff > 5)
1637 optlen = (tcp_hdr(skb)->doff - 5) * 4;
1638
1639 options = ((u8 *)tcp_hdr(skb)) + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET;
1640
1641 while (optlen > 0) {
1642 switch (options[0]) {
1643 case TCP_ACK_SUPPRESSION_OPTION_EOL:
1644 return 0;
1645 case TCP_ACK_SUPPRESSION_OPTION_NOP:
1646 len = 1;
1647 break;
1648 case TCP_ACK_SUPPRESSION_OPTION_MSS:
1649 if (option == TCP_ACK_SUPPRESSION_OPTION_MSS)
1650 return ((options[2] << 8) | options[3]);
1651 len = options[1];
1652 break;
1653 case TCP_ACK_SUPPRESSION_OPTION_WINDOW:
1654 if (option == TCP_ACK_SUPPRESSION_OPTION_WINDOW)
1655 return options[2];
1656 len = 1;
1657 break;
1658 case TCP_ACK_SUPPRESSION_OPTION_SACK:
1659 if (option == TCP_ACK_SUPPRESSION_OPTION_SACK)
1660 return 1;
1661 len = options[1];
1662 break;
1663 default:
1664 len = options[1];
1665 break;
1666 }
1667 /* if length field in TCP options is 0, or greater than
1668 * total options length, then options are incorrect; return here
1669 */
1670 if ((len == 0) || (len > optlen)) {
1671 SLSI_DBG_HEX_NODEV(SLSI_TX, skb->data, skb->len < 128 ? skb->len : 128, "SKB:\n");
1672 return 0;
1673 }
1674 optlen -= len;
1675 options += len;
1676 }
1677 return 0;
1678}
1679
1680static void slsi_netif_tcp_ack_suppression_syn(struct net_device *dev, struct sk_buff *skb)
1681{
1682 struct netdev_vif *ndev_vif = netdev_priv(dev);
1683 struct slsi_tcp_ack_s *tcp_ack;
1684 int index;
1685
1686 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1687 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1688 tcp_ack = &ndev_vif->ack_suppression[index];
1689 slsi_spinlock_lock(&tcp_ack->lock);
1690
1691 if (!tcp_ack->state) {
1692 slsi_spinlock_unlock(&tcp_ack->lock);
1693 return;
1694 }
1695 /* Recover old/hung/unused record. */
1696 if (tcp_ack->daddr) {
1697 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT * 1000) {
1698 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1699 skb_queue_purge(&tcp_ack->list);
1700 tcp_ack->dport = 0;
1701 tcp_ack->sport = 0;
1702 tcp_ack->daddr = 0;
1703 tcp_ack->saddr = 0;
1704 tcp_ack->count = 0;
1705 tcp_ack->ack_seq = 0;
1706 del_timer(&tcp_ack->timer);
1707 }
1708 }
1709
1710 if (tcp_ack->daddr == 0) {
1711 SLSI_NET_DBG2(dev, SLSI_TX, "add at %d (%pI4.%d > %pI4.%d)\n", index, &ip_hdr(skb)->saddr, ntohs(tcp_hdr(skb)->source), &ip_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest));
1712 tcp_ack->daddr = ip_hdr(skb)->daddr;
1713 tcp_ack->saddr = ip_hdr(skb)->saddr;
1714 tcp_ack->dport = tcp_hdr(skb)->dest;
1715 tcp_ack->sport = tcp_hdr(skb)->source;
1716 tcp_ack->count = 0;
1717 tcp_ack->ack_seq = 0;
1718 tcp_ack->slow_start_count = 0;
1719 tcp_ack->tcp_slow_start = true;
1720 if (tcp_ack_suppression_monitor) {
1721 tcp_ack->max = 0;
1722 tcp_ack->age = 0;
1723 } else {
1724 tcp_ack->max = tcp_ack_suppression_max;
1725 tcp_ack->age = tcp_ack_suppression_timeout;
1726 }
1727 tcp_ack->last_sent = ktime_get();
1728
1729 if (tcp_ack_suppression_monitor) {
1730 tcp_ack->last_sample_time = ktime_get();
1731 tcp_ack->last_ack_seq = 0;
1732 tcp_ack->last_tcp_rate = 0;
1733 tcp_ack->num_bytes = 0;
1734 tcp_ack->hysteresis = 0;
1735 }
1736#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1737 tcp_ack->stream_id = index;
1738#endif
1739 /* read and validate the window scaling multiplier */
1740 tcp_ack->window_multiplier = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_WINDOW);
1741 if (tcp_ack->window_multiplier > 14)
1742 tcp_ack->window_multiplier = 0;
1743 tcp_ack->mss = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_MSS);
1744 SLSI_NET_DBG2(dev, SLSI_TX, "options: mss:%u, window:%u\n", tcp_ack->mss, tcp_ack->window_multiplier);
1745 SCSC_HIP4_SAMPLER_TCP_SYN(ndev_vif->sdev->minor_prof, index, tcp_ack->mss);
1746 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, index, be32_to_cpu(tcp_hdr(skb)->seq));
1747 slsi_spinlock_unlock(&tcp_ack->lock);
1748 return;
1749 }
1750 slsi_spinlock_unlock(&tcp_ack->lock);
1751 }
1752}
1753
1754static void slsi_netif_tcp_ack_suppression_fin(struct net_device *dev, struct sk_buff *skb)
1755{
1756 struct netdev_vif *ndev_vif = netdev_priv(dev);
1757 struct slsi_tcp_ack_s *tcp_ack;
1758 int index;
1759
1760 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1761 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1762 tcp_ack = &ndev_vif->ack_suppression[index];
1763 slsi_spinlock_lock(&tcp_ack->lock);
1764
1765 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1766 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1767 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1768 skb_queue_purge(&tcp_ack->list);
1769 tcp_ack->dport = 0;
1770 tcp_ack->sport = 0;
1771 tcp_ack->daddr = 0;
1772 tcp_ack->saddr = 0;
1773 tcp_ack->count = 0;
1774 tcp_ack->ack_seq = 0;
1775
1776 if (tcp_ack_suppression_monitor) {
1777 tcp_ack->last_ack_seq = 0;
1778 tcp_ack->last_tcp_rate = 0;
1779 tcp_ack->num_bytes = 0;
1780 tcp_ack->hysteresis = 0;
1781 }
1782
1783 del_timer(&tcp_ack->timer);
1784#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1785 tcp_ack->stream_id = 0;
1786#endif
1787 SCSC_HIP4_SAMPLER_TCP_FIN(ndev_vif->sdev->minor_prof, index);
1788 slsi_spinlock_unlock(&tcp_ack->lock);
1789 return;
1790 }
1791 slsi_spinlock_unlock(&tcp_ack->lock);
1792 }
1793}
1794
1795static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb)
1796{
1797 struct netdev_vif *ndev_vif = netdev_priv(dev);
1798 int index, found;
1799 struct slsi_tcp_ack_s *tcp_ack;
1800 int forward_now = 0, flush = 0;
1801 struct sk_buff *cskb = 0;
1802 u32 tcp_recv_window_size = 0;
1803
1804 if (tcp_ack_suppression_disable)
1805 return skb;
1806
1807 if (tcp_ack_suppression_disable_2g && !SLSI_IS_VIF_CHANNEL_5G(ndev_vif))
1808 return skb;
1809
1810 /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
1811 * the IP header and TCP header are not set; so return the SKB
1812 */
1813 if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (compare_ether_addr(eth_hdr(skb)->h_source, dev->dev_addr) != 0))
1814 return skb;
1815
1816 /* Return SKB that doesn't match. */
1817 if (be16_to_cpu(eth_hdr(skb)->h_proto) != ETH_P_IP)
1818 return skb;
1819 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1820 return skb;
1821 if (!skb_transport_header_was_set(skb))
1822 return skb;
1823 if (tcp_hdr(skb)->syn) {
1824 slsi_netif_tcp_ack_suppression_syn(dev, skb);
1825 return skb;
1826 }
1827 if (tcp_hdr(skb)->fin) {
1828 slsi_netif_tcp_ack_suppression_fin(dev, skb);
1829 return skb;
1830 }
1831 if (!tcp_hdr(skb)->ack)
1832 return skb;
1833 if (tcp_hdr(skb)->rst)
1834 return skb;
1835 if (tcp_hdr(skb)->urg)
1836 return skb;
1837
1838 ndev_vif->tcp_ack_stats.tack_acks++;
1839 /* If we find a record, leave the spinlock taken until the end of the function. */
1840 found = 0;
1841 if (ndev_vif->last_tcp_ack) {
1842 tcp_ack = ndev_vif->last_tcp_ack;
1843 slsi_spinlock_lock(&tcp_ack->lock);
1844 if (!tcp_ack->state) {
1845 slsi_spinlock_unlock(&tcp_ack->lock);
1846 ndev_vif->tcp_ack_stats.tack_sent++;
1847 SLSI_ERR_NODEV("last_tcp_ack record not enabled\n");
1848 return skb;
1849 }
1850 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1851 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1852 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1853 found = 1;
1854 ndev_vif->tcp_ack_stats.tack_lastrecord++;
1855 } else {
1856 slsi_spinlock_unlock(&tcp_ack->lock);
1857 }
1858 }
1859 if (found == 0) {
1860 /* Search for an existing record on this connection. */
1861 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1862 tcp_ack = &ndev_vif->ack_suppression[index];
1863
1864 slsi_spinlock_lock(&tcp_ack->lock);
1865
1866 if (!tcp_ack->state) {
1867 slsi_spinlock_unlock(&tcp_ack->lock);
1868 ndev_vif->tcp_ack_stats.tack_sent++;
1869 SLSI_ERR_NODEV("tcp_ack record %d not enabled\n", index);
1870 return skb;
1871 }
1872 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1873 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1874 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1875 found = 1;
1876 ndev_vif->tcp_ack_stats.tack_searchrecord++;
1877 break;
1878 }
1879 slsi_spinlock_unlock(&tcp_ack->lock);
1880 }
1881 if (found == 0) {
1882 /* No record found, so We cannot suppress the ack, return. */
1883 ndev_vif->tcp_ack_stats.tack_norecord++;
1884 ndev_vif->tcp_ack_stats.tack_sent++;
1885 return skb;
1886 }
1887 ndev_vif->last_tcp_ack = tcp_ack;
1888 }
1889
1890 /* If it is a DUP Ack, send straight away without flushing the cache. */
1891 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) < tcp_ack->ack_seq) {
1892 /* check for wrap-around */
1893 if (((s32)((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - (u32)tcp_ack->ack_seq)) < 0) {
1894 ndev_vif->tcp_ack_stats.tack_dacks++;
1895 ndev_vif->tcp_ack_stats.tack_sent++;
1896 slsi_spinlock_unlock(&tcp_ack->lock);
1897 return skb;
1898 }
1899 }
1900
1901 /* Has data, forward straight away. */
1902 if (be16_to_cpu(ip_hdr(skb)->tot_len) > ((ip_hdr(skb)->ihl * 4) + (tcp_hdr(skb)->doff * 4))) {
1903 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr(skb)->seq));
1904 SCSC_HIP4_SAMPLER_TCP_CWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, (skb->sk) ? tcp_sk(skb->sk)->snd_cwnd : 0);
1905 SCSC_HIP4_SAMPLER_TCP_SEND_BUF(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, sysctl_tcp_wmem[2]);
1906 ndev_vif->tcp_ack_stats.tack_hasdata++;
1907 forward_now = 1;
1908 goto _forward_now;
1909 }
1910
1911 /* PSH flag set, forward straight away. */
1912 if (tcp_hdr(skb)->psh) {
1913 ndev_vif->tcp_ack_stats.tack_psh++;
1914 forward_now = 1;
1915 goto _forward_now;
1916 }
1917
1918 /* The ECE flag is set for Explicit Congestion Notification supporting connections when the ECT flag
1919 * is set in the segment packet. We must forward ECE marked acks immediately for ECN to work.
1920 */
1921 if (tcp_hdr(skb)->ece) {
1922 ndev_vif->tcp_ack_stats.tack_ece++;
1923 forward_now = 1;
1924 goto _forward_now;
1925 }
1926
1927 if (tcp_ack_suppression_monitor) {
1928 /* Measure the throughput of TCP stream by monitoring the bytes Acked by each Ack over a
1929 * sampling period. Based on throughput apply different degree of Ack suppression
1930 */
1931 if (tcp_ack->last_ack_seq)
1932 tcp_ack->num_bytes += ((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - tcp_ack->last_ack_seq);
1933
1934 tcp_ack->last_ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
1935 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sample_time)) > tcp_ack_suppression_monitor_interval) {
1936 u16 acks_max;
1937 u32 tcp_rate = ((tcp_ack->num_bytes * 8) / (tcp_ack_suppression_monitor_interval * 1000));
1938
1939 SLSI_NET_DBG2(dev, SLSI_TX, "hysteresis:%u total_bytes:%llu rate:%u Mbps\n",
1940 tcp_ack->hysteresis, tcp_ack->num_bytes, tcp_rate);
1941
1942 /* hysterisis - change only if the variation from last value is more than threshold */
1943 if ((abs(tcp_rate - tcp_ack->last_tcp_rate)) > tcp_ack->hysteresis) {
1944 if (tcp_rate >= tcp_ack_suppression_rate_very_high) {
1945 tcp_ack->max = tcp_ack_suppression_rate_very_high_acks;
1946 tcp_ack->age = tcp_ack_suppression_rate_very_high_timeout;
1947 } else if (tcp_rate >= tcp_ack_suppression_rate_high) {
1948 tcp_ack->max = tcp_ack_suppression_rate_high_acks;
1949 tcp_ack->age = tcp_ack_suppression_rate_high_timeout;
1950 } else if (tcp_rate >= tcp_ack_suppression_rate_low) {
1951 tcp_ack->max = tcp_ack_suppression_rate_low_acks;
1952 tcp_ack->age = tcp_ack_suppression_rate_low_timeout;
1953 } else {
1954 tcp_ack->max = 0;
1955 tcp_ack->age = 0;
1956 }
1957
1958 /* Should not be suppressing Acks more than 20% of receiver window size
1959 * doing so can lead to increased RTT and low transmission rate at the
1960 * TCP sender
1961 */
1962 if (tcp_ack->window_multiplier)
1963 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
1964 else
1965 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
1966 SCSC_HIP4_SAMPLER_TCP_RWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, tcp_recv_window_size);
1967
1968 acks_max = (tcp_recv_window_size / 5) / (2 * tcp_ack->mss);
1969 if (tcp_ack->max > acks_max)
1970 tcp_ack->max = acks_max;
1971 }
1972 tcp_ack->hysteresis = tcp_rate / 5; /* 20% hysteresis */
1973 tcp_ack->last_tcp_rate = tcp_rate;
1974 tcp_ack->num_bytes = 0;
1975 tcp_ack->last_sample_time = ktime_get();
1976 }
1977 }
1978
1979 /* Do not suppress Selective Acks. */
1980 if (slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_SACK)) {
1981 ndev_vif->tcp_ack_stats.tack_sacks++;
1982
1983 /* A TCP selective Ack suggests TCP segment loss. The TCP sender
1984 * may reduce congestion window and limit the number of segments
1985 * it sends before waiting for Ack.
1986 * It is ideal to switch off TCP ack suppression for certain time
1987 * (being replicated here by tcp_ack_suppression_slow_start_acks
1988 * count) and send as many Acks as possible to allow the cwnd to
1989 * grow at the TCP sender
1990 */
1991 tcp_ack->slow_start_count = 0;
1992 tcp_ack->tcp_slow_start = true;
1993 forward_now = 1;
1994 goto _forward_now;
1995 }
1996
1997 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) == tcp_ack->ack_seq) {
1998 ndev_vif->tcp_ack_stats.tack_dacks++;
1999 forward_now = 1;
2000 goto _forward_now;
2001 }
2002
2003 /* When the TCP connection is made, wait until a number of Acks
2004 * are sent before applying the suppression rules. It is to
2005 * allow the cwnd to grow at a normal rate at the TCP sender
2006 */
2007 if (tcp_ack->tcp_slow_start) {
2008 tcp_ack->slow_start_count++;
2009 if (tcp_ack->slow_start_count >= tcp_ack_suppression_slow_start_acks) {
2010 tcp_ack->slow_start_count = 0;
2011 tcp_ack->tcp_slow_start = false;
2012 }
2013 forward_now = 1;
2014 goto _forward_now;
2015 }
2016
2017 /* do not suppress if so decided by the TCP monitor */
2018 if (tcp_ack_suppression_monitor && (!tcp_ack->max || !tcp_ack->age)) {
2019 forward_now = 1;
2020 goto _forward_now;
2021 }
2022
2023 /* do not suppress delayed Acks that acknowledges for more than 2 TCP
2024 * maximum size segments
2025 */
2026 if (((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq)) - (tcp_ack->ack_seq) > (2 * tcp_ack->mss)) {
2027 ndev_vif->tcp_ack_stats.tack_delay_acks++;
2028 forward_now = 1;
2029 goto _forward_now;
2030 }
2031
2032 /* Do not suppress unless the receive window is large
2033 * enough.
2034 * With low receive window size the cwnd can't grow much.
2035 * So suppressing Acks has a negative impact on sender
2036 * rate as it increases the Round trip time measured at
2037 * sender
2038 */
2039 if (!tcp_ack_suppression_monitor) {
2040 if (tcp_ack->window_multiplier)
2041 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
2042 else
2043 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
2044 if (tcp_recv_window_size < tcp_ack_suppression_rcv_window * 1024) {
2045 ndev_vif->tcp_ack_stats.tack_low_window++;
2046 forward_now = 1;
2047 goto _forward_now;
2048 }
2049 }
2050
2051 if (!tcp_ack_suppression_monitor && ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= tcp_ack->age) {
2052 ndev_vif->tcp_ack_stats.tack_ktime++;
2053 forward_now = 1;
2054 goto _forward_now;
2055 }
2056
2057 /* Test for a new cache */
2058 if (!skb_queue_len(&tcp_ack->list)) {
2059 skb_queue_tail(&tcp_ack->list, skb);
2060 tcp_ack->count = 1;
2061 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2062 if (tcp_ack->age)
2063 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2064 slsi_spinlock_unlock(&tcp_ack->lock);
2065 return 0;
2066 }
2067_forward_now:
2068 cskb = skb_dequeue(&tcp_ack->list);
2069 if (cskb) {
2070 if (tcp_ack_suppression_monitor && tcp_ack->age)
2071 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2072 ndev_vif->tcp_ack_stats.tack_suppressed++;
2073 slsi_kfree_skb(cskb);
2074 }
2075 skb_queue_tail(&tcp_ack->list, skb);
2076 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2077 tcp_ack->count++;
2078 if (forward_now) {
2079 flush = 1;
2080 } else {
2081 if (tcp_ack->count >= tcp_ack->max) {
2082 flush = 1;
2083 ndev_vif->tcp_ack_stats.tack_max++;
2084 }
2085 }
2086 if (!flush) {
2087 slsi_spinlock_unlock(&tcp_ack->lock);
2088 return 0;
2089 }
2090 /* Flush the cache. */
2091 cskb = skb_dequeue(&tcp_ack->list);
2092 tcp_ack->count = 0;
2093
2094 if (tcp_ack->age)
2095 del_timer(&tcp_ack->timer);
2096
2097 tcp_ack->last_sent = ktime_get();
2098
2099 slsi_spinlock_unlock(&tcp_ack->lock);
2100 ndev_vif->tcp_ack_stats.tack_sent++;
2101 return cskb;
2102}
2103#endif