usb: gadget: Zero ffs_io_data
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / net / wireless / scsc / netif.c
CommitLineData
533a23a1
TK
1/*
2 *
3 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
4 *
5 ****************************************************************************/
6
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/rtnetlink.h>
10#include <net/sch_generic.h>
11#include <linux/if_ether.h>
12#include <scsc/scsc_logring.h>
13
14#include "debug.h"
15#include "netif.h"
16#include "dev.h"
17#include "mgt.h"
18#include "scsc_wifi_fcq.h"
19#include "ioctl.h"
20#include "mib.h"
21#include "hip4_sampler.h"
22
23#define IP4_OFFSET_TO_TOS_FIELD 1
24#define IP6_OFFSET_TO_TC_FIELD_0 0
25#define IP6_OFFSET_TO_TC_FIELD_1 1
26#define FIELD_TO_DSCP 2
27
28/* DSCP */
29/* (RFC5865) */
30#define DSCP_VA 0x2C
31/* (RFC3246) */
32#define DSCP_EF 0x2E
33/* (RFC2597) */
34#define DSCP_AF43 0x26
35#define DSCP_AF42 0x24
36#define DSCP_AF41 0x22
37#define DSCP_AF33 0x1E
38#define DSCP_AF32 0x1C
39#define DSCP_AF31 0x1A
40#define DSCP_AF23 0x16
41#define DSCP_AF22 0x14
42#define DSCP_AF21 0x12
43#define DSCP_AF13 0x0E
44#define DSCP_AF12 0x0C
45#define DSCP_AF11 0x0A
46/* (RFC2474) */
47#define CS7 0x38
48#define CS6 0x30
49#define CS5 0x28
50#define CS4 0x20
51#define CS3 0x18
52#define CS2 0x10
53#define CS0 0x00
54/* (RFC3662) */
55#define CS1 0x08
56
57#ifndef CONFIG_ARM
58static bool tcp_ack_suppression_disable;
59module_param(tcp_ack_suppression_disable, bool, S_IRUGO | S_IWUSR);
60MODULE_PARM_DESC(tcp_ack_suppression_disable, "Disable TCP ack suppression feature");
61
62static bool tcp_ack_suppression_disable_2g;
63module_param(tcp_ack_suppression_disable_2g, bool, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(tcp_ack_suppression_disable_2g, "Disable TCP ack suppression for only 2.4GHz band");
65
66static bool tcp_ack_suppression_monitor = true;
67module_param(tcp_ack_suppression_monitor, bool, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(tcp_ack_suppression_monitor, "TCP ack suppression throughput monitor: Y: enable (default), N: disable");
69
70static uint tcp_ack_suppression_monitor_interval = 500;
71module_param(tcp_ack_suppression_monitor_interval, uint, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(tcp_ack_suppression_monitor_interval, "Sampling interval (in ms) for throughput monitor");
73
74static uint tcp_ack_suppression_timeout = 16;
75module_param(tcp_ack_suppression_timeout, uint, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(tcp_ack_suppression_timeout, "Timeout (in ms) before cached TCP ack is flushed to tx");
77
78static uint tcp_ack_suppression_max = 16;
79module_param(tcp_ack_suppression_max, uint, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(tcp_ack_suppression_max, "Maximum number of TCP acks suppressed before latest flushed to tx");
81
82static uint tcp_ack_suppression_rate_very_high = 100;
83module_param(tcp_ack_suppression_rate_very_high, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high, "Rate (in Mbps) to apply very high degree of suppression");
85
86static uint tcp_ack_suppression_rate_very_high_timeout = 4;
87module_param(tcp_ack_suppression_rate_very_high_timeout, int, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in very high rate");
89
90static uint tcp_ack_suppression_rate_very_high_acks = 20;
91module_param(tcp_ack_suppression_rate_very_high_acks, uint, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_acks, "Maximum number of TCP acks suppressed before latest flushed in very high rate");
93
94static uint tcp_ack_suppression_rate_high = 20;
95module_param(tcp_ack_suppression_rate_high, int, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(tcp_ack_suppression_rate_high, "Rate (in Mbps) to apply high degree of suppression");
97
98static uint tcp_ack_suppression_rate_high_timeout = 4;
99module_param(tcp_ack_suppression_rate_high_timeout, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(tcp_ack_suppression_rate_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in high rate");
101
102static uint tcp_ack_suppression_rate_high_acks = 16;
103module_param(tcp_ack_suppression_rate_high_acks, uint, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(tcp_ack_suppression_rate_high_acks, "Maximum number of TCP acks suppressed before latest flushed in high rate");
105
106static uint tcp_ack_suppression_rate_low = 1;
107module_param(tcp_ack_suppression_rate_low, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(tcp_ack_suppression_rate_low, "Rate (in Mbps) to apply low degree of suppression");
109
110static uint tcp_ack_suppression_rate_low_timeout = 4;
111module_param(tcp_ack_suppression_rate_low_timeout, int, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(tcp_ack_suppression_rate_low_timeout, "Timeout (in ms) before cached TCP ack is flushed in low rate");
113
114static uint tcp_ack_suppression_rate_low_acks = 10;
115module_param(tcp_ack_suppression_rate_low_acks, uint, S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(tcp_ack_suppression_rate_low_acks, "Maximum number of TCP acks suppressed before latest flushed in low rate");
117
118static uint tcp_ack_suppression_slow_start_acks = 512;
119module_param(tcp_ack_suppression_slow_start_acks, uint, S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(tcp_ack_suppression_slow_start_acks, "Maximum number of Acks sent in slow start");
121
122static uint tcp_ack_suppression_rcv_window = 128;
123module_param(tcp_ack_suppression_rcv_window, uint, S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(tcp_ack_suppression_rcv_window, "Receive window size (in unit of Kbytes) that triggers Ack suppression");
125
126#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
127static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t);
128#else
129static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data);
130#endif
131static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev);
132static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev);
133static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb);
134#endif
135
136/* Net Device callback operations */
137static int slsi_net_open(struct net_device *dev)
138{
139 struct netdev_vif *ndev_vif = netdev_priv(dev);
140 struct slsi_dev *sdev = ndev_vif->sdev;
141 int err;
142 unsigned char dev_addr_zero_check[ETH_ALEN];
143
144 if (WARN_ON(ndev_vif->is_available))
145 return -EINVAL;
146
147 if (sdev->mlme_blocked) {
148 SLSI_NET_WARN(dev, "Fail: called when MLME in blocked state\n");
149 return -EIO;
150 }
151
152 slsi_wakelock(&sdev->wlan_wl);
153
154 /* check if request to rf test mode. */
155 slsi_check_rf_test_mode();
156
157 err = slsi_start(sdev);
158 if (WARN_ON(err)) {
159 slsi_wakeunlock(&sdev->wlan_wl);
160 return err;
161 }
162
163 if (!sdev->netdev_up_count) {
164 slsi_get_hw_mac_address(sdev, sdev->hw_addr);
165 /* Assign Addresses */
166 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
167
168 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
169 sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02; /* Set the local bit */
170
171 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN], sdev->hw_addr);
172 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][0] |= 0x02; /* Set the local bit */
173 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][4] ^= 0x80; /* EXOR 5th byte with 0x80 */
174#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
175 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_NAN], sdev->hw_addr);
176 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][0] |= 0x02; /* Set the local bit */
177 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][3] ^= 0x80; /* EXOR 4th byte with 0x80 */
178#endif
179 sdev->initial_scan = true;
180 }
181
182 memset(dev_addr_zero_check, 0, ETH_ALEN);
183 if (!memcmp(dev->dev_addr, dev_addr_zero_check, ETH_ALEN)) {
184#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
185 if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
186 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
187 else
188 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
189#else
190 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
191#endif
192 }
193 SLSI_ETHER_COPY(dev->perm_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
194 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
195#ifdef CONFIG_SCSC_WLAN_DEBUG
196 if (ndev_vif->iftype == NL80211_IFTYPE_MONITOR) {
197 err = slsi_start_monitor_mode(sdev, dev);
198 if (WARN_ON(err)) {
199 slsi_wakeunlock(&sdev->wlan_wl);
200 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
201 return err;
202 }
203 }
204#endif
205 SLSI_NET_INFO(dev, "ifnum:%d r:%d MAC:%pM\n", ndev_vif->ifnum, sdev->recovery_status, dev->dev_addr);
206 ndev_vif->is_available = true;
207 sdev->netdev_up_count++;
208
209#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
210 reinit_completion(&ndev_vif->sig_wait.completion);
211#else
212 INIT_COMPLETION(ndev_vif->sig_wait.completion);
213#endif
214#ifndef CONFIG_ARM
215 slsi_netif_tcp_ack_suppression_start(dev);
216#endif
217 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
218
219 netif_tx_start_all_queues(dev);
220 slsi_wakeunlock(&sdev->wlan_wl);
221
222 /* The default power mode in host*/
223 /* 2511 measn unifiForceActive and 1 means active */
224 if (slsi_is_rf_test_mode_enabled()) {
225 SLSI_NET_INFO(dev, "*#rf# rf test mode set is enabled.\n");
226 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAMING_ENABLED, 0);
227 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, 0);
228 slsi_set_mib_roam(sdev, NULL, 2511, 1);
229 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, 0);
230 }
231
232 return 0;
233}
234
235static int slsi_net_stop(struct net_device *dev)
236{
237 struct netdev_vif *ndev_vif = netdev_priv(dev);
238 struct slsi_dev *sdev = ndev_vif->sdev;
239
240 SLSI_NET_INFO(dev, "ifnum:%d r:%d\n", ndev_vif->ifnum, sdev->recovery_status);
241 slsi_wakelock(&sdev->wlan_wl);
242 netif_tx_stop_all_queues(dev);
243 sdev->initial_scan = false;
244
245 if (!ndev_vif->is_available) {
246 /* May have been taken out by the Chip going down */
247 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available\n");
248 slsi_wakeunlock(&sdev->wlan_wl);
249 return 0;
250 }
251#ifndef SLSI_TEST_DEV
252 if (!slsi_is_rf_test_mode_enabled() && !sdev->recovery_status) {
253 SLSI_NET_DBG1(dev, SLSI_NETDEV, "To user mode\n");
254 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, -55);
255 }
256#endif
257#ifndef CONFIG_ARM
258 slsi_netif_tcp_ack_suppression_stop(dev);
259#endif
260 slsi_stop_net_dev(sdev, dev);
261
262 sdev->allow_switch_40_mhz = true;
263 sdev->allow_switch_80_mhz = true;
264 sdev->acs_channel_switched = false;
265 slsi_wakeunlock(&sdev->wlan_wl);
266 return 0;
267}
268
269/* This is called after the WE handlers */
270static int slsi_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
271{
272 SLSI_NET_DBG4(dev, SLSI_NETDEV, "IOCTL cmd:0x%.4x\n", cmd);
273
274 if (cmd == SIOCDEVPRIVATE + 2) { /* 0x89f0 + 2 from wpa_supplicant */
275 return slsi_ioctl(dev, rq, cmd);
276 }
277
278 return -EOPNOTSUPP;
279}
280
281static struct net_device_stats *slsi_net_get_stats(struct net_device *dev)
282{
283 struct netdev_vif *ndev_vif = netdev_priv(dev);
284
285 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
286 return &ndev_vif->stats;
287}
288
289#ifdef CONFIG_SCSC_USE_WMM_TOS
290static u16 slsi_get_priority_from_tos(u8 *frame, u16 proto)
291{
292 if (WARN_ON(!frame))
293 return FAPI_PRIORITY_QOS_UP0;
294
295 switch (proto) {
296 case ETH_P_IP: /* IPv4 */
297 return (u16)(((frame[IP4_OFFSET_TO_TOS_FIELD]) & 0xE0) >> 5);
298
299 case ETH_P_IPV6: /* IPv6 */
300 return (u16)((*frame & 0x0E) >> 1);
301
302 default:
303 return FAPI_PRIORITY_QOS_UP0;
304 }
305}
306
307#else
308static u16 slsi_get_priority_from_tos_dscp(u8 *frame, u16 proto)
309{
310 u8 dscp;
311
312 if (WARN_ON(!frame))
313 return FAPI_PRIORITY_QOS_UP0;
314
315 switch (proto) {
316 case ETH_P_IP: /* IPv4 */
317 dscp = frame[IP4_OFFSET_TO_TOS_FIELD] >> FIELD_TO_DSCP;
318 break;
319
320 case ETH_P_IPV6: /* IPv6 */
321 /* Get traffic class */
322 dscp = (((frame[IP6_OFFSET_TO_TC_FIELD_0] & 0x0F) << 4) |
323 ((frame[IP6_OFFSET_TO_TC_FIELD_1] & 0xF0) >> 4)) >> FIELD_TO_DSCP;
324 break;
325
326 default:
327 return FAPI_PRIORITY_QOS_UP0;
328 }
329/* DSCP table based in RFC8325 from Android 10 */
330#if (defined(ANDROID_VERSION) && ANDROID_VERSION >= 100000)
331 switch (dscp) {
332 case CS7:
333 return FAPI_PRIORITY_QOS_UP7;
334 case CS6:
335 case DSCP_EF:
336 case DSCP_VA:
337 return FAPI_PRIORITY_QOS_UP6;
338 case CS5:
339 return FAPI_PRIORITY_QOS_UP5;
340 case DSCP_AF41:
341 case DSCP_AF42:
342 case DSCP_AF43:
343 case CS4:
344 case DSCP_AF31:
345 case DSCP_AF32:
346 case DSCP_AF33:
347 case CS3:
348 return FAPI_PRIORITY_QOS_UP4;
349 case DSCP_AF21:
350 case DSCP_AF22:
351 case DSCP_AF23:
352 return FAPI_PRIORITY_QOS_UP3;
353 case CS2:
354 case DSCP_AF11:
355 case DSCP_AF12:
356 case DSCP_AF13:
357 case CS0:
358 return FAPI_PRIORITY_QOS_UP0;
359 case CS1:
360 return FAPI_PRIORITY_QOS_UP1;
361 default:
362 return FAPI_PRIORITY_QOS_UP0;
363 }
364#else
365 switch (dscp) {
366 case DSCP_EF:
367 case DSCP_VA:
368 return FAPI_PRIORITY_QOS_UP6;
369 case DSCP_AF43:
370 case DSCP_AF42:
371 case DSCP_AF41:
372 return FAPI_PRIORITY_QOS_UP5;
373 case DSCP_AF33:
374 case DSCP_AF32:
375 case DSCP_AF31:
376 case DSCP_AF23:
377 case DSCP_AF22:
378 case DSCP_AF21:
379 case DSCP_AF13:
380 case DSCP_AF12:
381 case DSCP_AF11:
382 return FAPI_PRIORITY_QOS_UP0;
383 case CS7:
384 return FAPI_PRIORITY_QOS_UP7;
385 case CS6:
386 return FAPI_PRIORITY_QOS_UP6;
387 case CS5:
388 return FAPI_PRIORITY_QOS_UP5;
389 case CS4:
390 return FAPI_PRIORITY_QOS_UP4;
391 case CS3:
392 return FAPI_PRIORITY_QOS_UP3;
393 case CS2:
394 return FAPI_PRIORITY_QOS_UP2;
395 case CS1:
396 return FAPI_PRIORITY_QOS_UP1;
397 case CS0:
398 return FAPI_PRIORITY_QOS_UP0;
399 default:
400 return FAPI_PRIORITY_QOS_UP0;
401 }
402#endif
403}
404
405#endif
406
407static bool slsi_net_downgrade_ac(struct net_device *dev, struct sk_buff *skb)
408{
409 SLSI_UNUSED_PARAMETER(dev);
410
411 switch (skb->priority) {
412 case 6:
413 case 7:
414 skb->priority = FAPI_PRIORITY_QOS_UP5; /* VO -> VI */
415 return true;
416 case 4:
417 case 5:
418 skb->priority = FAPI_PRIORITY_QOS_UP3; /* VI -> BE */
419 return true;
420 case 0:
421 case 3:
422 skb->priority = FAPI_PRIORITY_QOS_UP2; /* BE -> BK */
423 return true;
424 default:
425 return false;
426 }
427}
428
429static u8 slsi_net_up_to_ac_mapping(u8 priority)
430{
431 switch (priority) {
432 case FAPI_PRIORITY_QOS_UP6:
433 case FAPI_PRIORITY_QOS_UP7:
434 return BIT(FAPI_PRIORITY_QOS_UP6) | BIT(FAPI_PRIORITY_QOS_UP7);
435 case FAPI_PRIORITY_QOS_UP4:
436 case FAPI_PRIORITY_QOS_UP5:
437 return BIT(FAPI_PRIORITY_QOS_UP4) | BIT(FAPI_PRIORITY_QOS_UP5);
438 case FAPI_PRIORITY_QOS_UP0:
439 case FAPI_PRIORITY_QOS_UP3:
440 return BIT(FAPI_PRIORITY_QOS_UP0) | BIT(FAPI_PRIORITY_QOS_UP3);
441 default:
442 return BIT(FAPI_PRIORITY_QOS_UP1) | BIT(FAPI_PRIORITY_QOS_UP2);
443 }
444}
445
446enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority)
447{
448 switch (priority) {
449 case FAPI_PRIORITY_QOS_UP0:
450 case FAPI_PRIORITY_QOS_UP3:
451 return SLSI_TRAFFIC_Q_BE;
452 case FAPI_PRIORITY_QOS_UP1:
453 case FAPI_PRIORITY_QOS_UP2:
454 return SLSI_TRAFFIC_Q_BK;
455 case FAPI_PRIORITY_QOS_UP4:
456 case FAPI_PRIORITY_QOS_UP5:
457 return SLSI_TRAFFIC_Q_VI;
458 case FAPI_PRIORITY_QOS_UP6:
459 case FAPI_PRIORITY_QOS_UP7:
460 return SLSI_TRAFFIC_Q_VO;
461 default:
462 return SLSI_TRAFFIC_Q_BE;
463 }
464}
465
466int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids)
467{
468 switch (ac) {
469 case SLSI_TRAFFIC_Q_BE:
470 tids[0] = FAPI_PRIORITY_QOS_UP0;
471 tids[1] = FAPI_PRIORITY_QOS_UP3;
472 break;
473
474 case SLSI_TRAFFIC_Q_BK:
475 tids[0] = FAPI_PRIORITY_QOS_UP1;
476 tids[1] = FAPI_PRIORITY_QOS_UP2;
477 break;
478
479 case SLSI_TRAFFIC_Q_VI:
480 tids[0] = FAPI_PRIORITY_QOS_UP4;
481 tids[1] = FAPI_PRIORITY_QOS_UP5;
482 break;
483
484 case SLSI_TRAFFIC_Q_VO:
485 tids[0] = FAPI_PRIORITY_QOS_UP6;
486 tids[1] = FAPI_PRIORITY_QOS_UP7;
487 break;
488
489 default:
490 return -EINVAL;
491 }
492
493 return 0;
494}
495
496static void slsi_net_downgrade_pri(struct net_device *dev, struct slsi_peer *peer,
497 struct sk_buff *skb)
498{
499 /* in case we are a client downgrade the ac if acm is
500 * set and tspec is not established
501 */
502 while (unlikely(peer->wmm_acm & BIT(skb->priority)) &&
503 !(peer->tspec_established & slsi_net_up_to_ac_mapping(skb->priority))) {
504 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Downgrading from UP:%d\n", skb->priority);
505 if (!slsi_net_downgrade_ac(dev, skb))
506 break;
507 }
508 SLSI_NET_DBG4(dev, SLSI_NETDEV, "To UP:%d\n", skb->priority);
509}
510
511#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
512static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback)
513#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
514static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv)
515#else
516static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb)
517#endif
518{
519 struct netdev_vif *ndev_vif = netdev_priv(dev);
520 struct slsi_dev *sdev = ndev_vif->sdev;
521 u16 netif_q = 0;
522 struct ethhdr *ehdr = (struct ethhdr *)skb->data;
523 int proto = 0;
524 struct slsi_peer *peer;
525
526#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
527 (void)accel_priv;
528#endif
529#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
530 (void)fallback;
531#endif
532 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
533
534 /* Defensive check for uninitialized mac header */
535 if (!skb_mac_header_was_set(skb))
536 skb_reset_mac_header(skb);
537
538 if (is_zero_ether_addr(ehdr->h_dest) || is_zero_ether_addr(ehdr->h_source)) {
539 SLSI_NET_WARN(dev, "invalid Ethernet addresses (dest:%pM,src:%pM)\n", ehdr->h_dest, ehdr->h_source);
540 SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
541 return SLSI_NETIF_Q_DISCARD;
542 }
543
544 proto = be16_to_cpu(eth_hdr(skb)->h_proto);
545
546 switch (proto) {
547 default:
548 /* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
549 break;
550 case ETH_P_PAE:
551 case ETH_P_WAI:
552 SLSI_NET_DBG3(dev, SLSI_TX, "EAP packet. Priority Queue Selected\n");
553 return SLSI_NETIF_Q_PRIORITY;
554 case ETH_P_ARP:
555 SLSI_NET_DBG3(dev, SLSI_TX, "ARP frame. Priority Queue Selected\n");
556 return SLSI_NETIF_Q_PRIORITY;
557 case ETH_P_IP:
558 if (slsi_is_dhcp_packet(skb->data) == SLSI_TX_IS_NOT_DHCP)
559 break;
560 SLSI_NET_DBG3(dev, SLSI_TX, "DHCP packet. Priority Queue Selected\n");
561 return SLSI_NETIF_Q_PRIORITY;
562 }
563
564 if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
565 /* MULTICAST/BROADCAST Queue is only used for AP */
566 if (is_multicast_ether_addr(ehdr->h_dest)) {
567 SLSI_NET_DBG3(dev, SLSI_TX, "Multicast AC queue will be selected\n");
568#ifdef CONFIG_SCSC_USE_WMM_TOS
569 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
570#else
571 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
572#endif
573 return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb->priority));
574 }
575
576 slsi_spinlock_lock(&ndev_vif->peer_lock);
577 peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
578 if (!peer) {
579 SLSI_NET_DBG1(dev, SLSI_TX, "Discard: Peer %pM NOT found\n", ehdr->h_dest);
580 slsi_spinlock_unlock(&ndev_vif->peer_lock);
581 return SLSI_NETIF_Q_DISCARD;
582 }
583
584 if (peer->qos_enabled) {
585#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
586 if (peer->qos_map_set) { /*802.11 QoS for interworking*/
587 skb->priority = cfg80211_classify8021d(skb, &peer->qos_map);
588 } else
589#endif
590 {
591#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
592 if ((proto == ETH_P_IP && slsi_is_dns_packet(skb->data)) ||
593 (proto == ETH_P_IP && slsi_is_mdns_packet(skb->data)) ||
594 (proto == ETH_P_IP && slsi_is_tcp_sync_packet(dev, skb))) {
595 skb->priority = FAPI_PRIORITY_QOS_UP7;
596 } else
597#endif
598 {
599#ifdef CONFIG_SCSC_USE_WMM_TOS
600 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
601#else
602 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
603#endif
604 }
605 }
606 } else{
607 skb->priority = FAPI_PRIORITY_QOS_UP0;
608 }
609
610 /* Downgrade the priority if acm bit is set and tspec is not established */
611 slsi_net_downgrade_pri(dev, peer, skb);
612
613 netif_q = slsi_netif_get_peer_queue(peer->queueset, slsi_frame_priority_to_ac_queue(skb->priority));
614 SLSI_NET_DBG3(dev, SLSI_TX, "prio:%d queue:%u\n", skb->priority, netif_q);
615 slsi_spinlock_unlock(&ndev_vif->peer_lock);
616 return netif_q;
617}
618
619void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
620 struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection)
621{
622 struct netdev_vif *netdev_vif = netdev_priv(dev);
623 struct sk_buff *skb = NULL;
624#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
625 struct sk_buff *skb_to_free = NULL;
626#endif
627 struct ethhdr *ehdr;
628 struct Qdisc *qd;
629 u32 num_pkts;
630 u16 staq;
631 u16 tdlsq;
632 u16 netq;
633 u16 i;
634 u16 j;
635 int index;
636 struct slsi_tcp_ack_s *tcp_ack;
637
638 /* Get the netdev queue number from queueset */
639 staq = slsi_netif_get_peer_queue(sta_peer->queueset, 0);
640 tdlsq = slsi_netif_get_peer_queue(tdls_peer->queueset, 0);
641
642 SLSI_NET_DBG1(dev, SLSI_TDLS, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq: %d, tdls_netq: %d\n",
643 connection, sta_peer->queueset, tdls_peer->queueset, staq, tdlsq);
644
645 /* Pause the TDLS queues and STA netdev queues */
646 slsi_tx_pause_queues(sdev);
647
648 /* walk through frames in TCP Ack suppression queue and change mapping to TDLS queue */
649 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
650 tcp_ack = &netdev_vif->ack_suppression[index];
651 if (!tcp_ack && !tcp_ack->state)
652 continue;
653 slsi_spinlock_lock(&tcp_ack->lock);
654 skb_queue_walk(&tcp_ack->list, skb) {
655 SLSI_NET_DBG2(dev, SLSI_TDLS, "frame in TCP Ack list (peer:%pM)\n", eth_hdr(skb)->h_dest);
656 /* is it destined to TDLS peer? */
657 if (compare_ether_addr(tdls_peer->address, eth_hdr(skb)->h_dest) == 0) {
658 if (connection) {
659 /* TDLS setup: change the queue mapping to TDLS queue */
660 skb->queue_mapping += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
661 } else {
662 /* TDLS teardown: change the queue to STA queue */
663 skb->queue_mapping -= (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
664 }
665 }
666 }
667 slsi_spinlock_unlock(&tcp_ack->lock);
668 }
669
670 /**
671 * For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
672 * For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
673 */
674 if (connection)
675 tdls_peer->valid = true;
676 else
677 tdls_peer->valid = false;
678
679 /* Move packets from netdev queues */
680 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
681 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
682 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
683
684 if (connection) {
685 /* Check if any packet is already avilable in TDLS queue (most likely from last session) */
686 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
687 SLSI_NET_ERR(dev, "tdls_connection: Packet present in queue %d\n", tdlsq + i);
688
689 qd = dev->_tx[staq + i].qdisc;
690 /* Get the total number of packets in STAQ */
691 num_pkts = qd->q.qlen;
692
693 /* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
694 for (j = 0; j < num_pkts; j++) {
695 qd = dev->_tx[staq + i].qdisc;
696 /* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
697 skb = qd->gso_skb;
698 if (skb) {
699 qd->gso_skb = NULL;
700 qd->q.qlen--;
701 } else {
702 skb = qd->dequeue(qd);
703 }
704
705 if (!skb) {
706 SLSI_NET_ERR(dev, "tdls_connection: STA NETQ skb is NULL\n");
707 break;
708 }
709
710 /* Change the queue mapping for the TDLS packets */
711 netq = skb->queue_mapping;
712 ehdr = (struct ethhdr *)skb->data;
713 if (compare_ether_addr(tdls_peer->address, ehdr->h_dest) == 0) {
714 netq += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
715 SLSI_NET_DBG3(dev, SLSI_TDLS, "NETQ%d: Queue mapping changed from %d to %d\n",
716 i, skb->queue_mapping, netq);
717 skb_set_queue_mapping(skb, netq);
718 }
719
720 qd = dev->_tx[netq].qdisc;
721#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
722 qd->enqueue(skb, qd, &skb_to_free);
723#else
724 /* If the netdev queue is already full then enqueue() will drop the skb */
725 qd->enqueue(skb, qd);
726#endif
727 }
728 } else {
729 num_pkts = dev->_tx[tdlsq + i].qdisc->q.qlen;
730 /* Move the packets from TDLS to STA queue */
731 for (j = 0; j < num_pkts; j++) {
732 /* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
733 qd = dev->_tx[tdlsq + i].qdisc;
734 skb = qd->gso_skb;
735 if (skb) {
736 qd->gso_skb = NULL;
737 qd->q.qlen--;
738 } else {
739 skb = qd->dequeue(qd);
740 }
741
742 if (!skb) {
743 SLSI_NET_ERR(dev, "tdls_teardown: TDLS NETQ skb is NULL\n");
744 break;
745 }
746
747 /* Update the queue mapping */
748 skb_set_queue_mapping(skb, staq + i);
749
750 /* Enqueue the packet in STA queue */
751 qd = dev->_tx[staq + i].qdisc;
752#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
753 qd->enqueue(skb, qd, &skb_to_free);
754#else
755 /* If the netdev queue is already full then enqueue() will drop the skb */
756 qd->enqueue(skb, qd);
757#endif
758 }
759 }
760 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
761 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
762 }
763#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
764 if (unlikely(skb_to_free))
765 kfree_skb_list(skb_to_free);
766#endif
767
768 /* Teardown - after teardown there should not be any packet in TDLS queues */
769 if (!connection)
770 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
771 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
772 SLSI_NET_ERR(dev, "tdls_teardown: Packet present in NET queue %d\n", tdlsq + i);
773 }
774
775 /* Resume the STA and TDLS netdev queues */
776 slsi_tx_unpause_queues(sdev);
777}
778
779/**
780 * This is the main TX entry point for the driver.
781 *
782 * Ownership of the skb is transferred to another function ONLY IF such
783 * function was able to deal with that skb and ended with a SUCCESS ret code.
784 * Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
785 *
786 * In the context of this function:
787 * - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
788 * SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
789 * kernel netstack will receive back NETDEV_TX_OK too.
790 * - ownership is KEPT HERE by this function when lower layers fails somehow
791 * to deal with the transmission of the skb. In this case the skb WOULD HAVE
792 * NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
793 * - intermediate lower layer functions (NOT directly involved in failure or
794 * success) will relay any retcode up to this layer for evaluation.
795 *
796 * WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
797 * - ENOSPC: something related to queueing happened...this should be
798 * retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
799 * requeued by the Kernel NetStack itself, using the proper queue.
800 * As a consequence SKB is NOT FREED HERE !.
801 * - ANY OTHER ERR: all other errors are considered at the moment NOT
802 * recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
803 * the proper ERRCODE and stops dealing with the packet considering it
804 * consumed by lower layer. (same behavior as NETDEV_TX_OK)
805 *
806 * BIG NOTE:
807 * As detailed in Documentation/networking/drivers.txt the above behavior
808 * of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
809 * discouraged and should be used ONLY in case of a real HARD error(?);
810 * the advised solution is to actively STOP the queues before finishing
811 * the available space and WAKING them up again when more free buffers
812 * would have arrived.
813 */
814static netdev_tx_t slsi_net_hw_xmit(struct sk_buff *skb, struct net_device *dev)
815{
816 struct netdev_vif *ndev_vif = netdev_priv(dev);
817 struct slsi_dev *sdev = ndev_vif->sdev;
818 int r = NETDEV_TX_OK;
819 struct sk_buff *original_skb = NULL;
820#ifdef CONFIG_SCSC_WLAN_DEBUG
821 int known_users = 0;
822#endif
823 /* Keep the packet length. The packet length will be used to increment
824 * stats for the netdev if the packet was successfully transmitted.
825 * The ownership of the SKB is passed to lower layers, so we should
826 * not refer the SKB after this point
827 */
828 unsigned int packet_len = skb->len;
829 enum slsi_traffic_q traffic_q = slsi_frame_priority_to_ac_queue(skb->priority);
830
831 slsi_wakelock(&sdev->wlan_wl);
832 slsi_skb_cb_init(skb);
833
834 /* Check for misaligned (oddly aligned) data.
835 * The f/w requires 16 bit aligned.
836 * This is a corner case - for example, the kernel can generate BPDU
837 * that are oddly aligned. Therefore it is acceptable to copy these
838 * frames to a 16 bit alignment.
839 */
840 if ((uintptr_t)skb->data & 0x1) {
841 struct sk_buff *skb2 = NULL;
842 /* Received a socket buffer aligned on an odd address.
843 * Re-align by asking for headroom.
844 */
845 skb2 = skb_copy_expand(skb, SLSI_NETIF_SKB_HEADROOM, skb_tailroom(skb), GFP_ATOMIC);
846 if (skb2 && (!(((uintptr_t)skb2->data) & 0x1))) {
847 /* We should account for this duplication */
848 original_skb = skb;
849 skb = skb2;
850 SLSI_NET_DBG3(dev, SLSI_TX, "Oddly aligned skb realigned\n");
851 } else {
852 /* Drop the packet if we can't re-align. */
853 SLSI_NET_WARN(dev, "Oddly aligned skb failed realignment, dropping\n");
854 if (skb2) {
855 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand didn't align for us\n");
856 slsi_kfree_skb(skb2);
857 } else {
858 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand failed when trying to align\n");
859 }
860 r = -EFAULT;
861 goto evaluate;
862 }
863 }
864 slsi_dbg_track_skb(skb, GFP_ATOMIC);
865
866 /* Be defensive about the mac_header - some kernels have a bug where a
867 * frame can be delivered to the driver with mac_header initialised
868 * to ~0U and this causes a crash when the pointer is dereferenced to
869 * access part of the Ethernet header.
870 */
871 if (!skb_mac_header_was_set(skb))
872 skb_reset_mac_header(skb);
873
874 SLSI_NET_DBG3(dev, SLSI_TX, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb)->h_proto));
875
876 if (!ndev_vif->is_available) {
877 SLSI_NET_WARN(dev, "vif NOT available\n");
878 r = -EFAULT;
879 goto evaluate;
880 }
881 if (skb->queue_mapping == SLSI_NETIF_Q_DISCARD) {
882 SLSI_NET_WARN(dev, "Discard Queue :: Packet Dropped\n");
883 r = -EIO;
884 goto evaluate;
885 }
886
887#ifdef CONFIG_SCSC_WLAN_DEBUG
888#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
889 known_users = refcount_read(&skb->users);
890#else
891 known_users = atomic_read(&skb->users);
892#endif
893#endif
894
895#ifndef CONFIG_ARM
896 skb = slsi_netif_tcp_ack_suppression_pkt(dev, skb);
897 if (!skb) {
898 slsi_wakeunlock(&sdev->wlan_wl);
899 if (original_skb)
900 slsi_kfree_skb(original_skb);
901 return NETDEV_TX_OK;
902 }
903#endif
904
905 /* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
906 r = slsi_tx_data(sdev, dev, skb);
907evaluate:
908 if (r == 0) {
909 /**
910 * A copy has been passed down and successfully transmitted
911 * and freed....here we free the original coming from the
912 * upper network layers....if a copy was passed down.
913 */
914 if (original_skb)
915 slsi_kfree_skb(original_skb);
916 /* skb freed by lower layers on success...enjoy */
917
918 ndev_vif->tx_packets[traffic_q]++;
919 ndev_vif->stats.tx_packets++;
920 ndev_vif->stats.tx_bytes += packet_len;
921 r = NETDEV_TX_OK;
922 } else {
923 /**
924 * Failed to send:
925 * - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
926 * NOT discarded by lower layers and NETDEV_TX_BUSY should
927 * be returned to upper layers: this will cause the skb
928 * (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
929 * to be requeued ...
930 * NOTE THAT it's the original skb that will be retried
931 * by upper netstack.
932 * THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
933 * the following.
934 *
935 * - with any other -ERR instead return the error: this
936 * anyway let the kernel think that the SKB has
937 * been consumed, and we drop the frame and free it.
938 *
939 * - a WARN_ON() takes care to ensure the SKB has NOT been
940 * freed by someone despite this was NOT supposed to happen,
941 * just before the actual freeing.
942 *
943 */
944 if (r == -ENOSPC) {
945 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
946 ndev_vif->stats.tx_fifo_errors++;
947 /* Free the local copy if any ... */
948 if (original_skb)
949 slsi_kfree_skb(skb);
950 r = NETDEV_TX_BUSY;
951 } else {
952#ifdef CONFIG_SCSC_WLAN_DEBUG
953#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
954 WARN_ON(known_users && refcount_read(&skb->users) != known_users);
955#else
956 WARN_ON(known_users && atomic_read(&skb->users) != known_users);
957#endif
958#endif
959 if (original_skb)
960 slsi_kfree_skb(original_skb);
961 slsi_kfree_skb(skb);
962 ndev_vif->stats.tx_dropped++;
963 /* We return the ORIGINAL Error 'r' anyway
964 * BUT Kernel treats them as TX complete anyway
965 * and assumes the SKB has been consumed.
966 */
967 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
968 }
969 }
970 /* SKBs are always considered consumed if the driver
971 * returns NETDEV_TX_OK.
972 */
973 slsi_wakeunlock(&sdev->wlan_wl);
974 return r;
975}
976
977static netdev_features_t slsi_net_fix_features(struct net_device *dev, netdev_features_t features)
978{
979 SLSI_UNUSED_PARAMETER(dev);
980
981#ifdef CONFIG_SCSC_WLAN_SG
982 SLSI_NET_DBG1(dev, SLSI_RX, "Scatter-gather and GSO enabled\n");
983 features |= NETIF_F_SG;
984 features |= NETIF_F_GSO;
985#endif
986
987#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
988 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO enabled\n");
989 features |= NETIF_F_GRO;
990#else
991 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO disabled\n");
992 features &= ~NETIF_F_GRO;
993#endif
994 return features;
995}
996
997static void slsi_set_multicast_list(struct net_device *dev)
998{
999 struct netdev_vif *ndev_vif = netdev_priv(dev);
1000 u8 count, i = 0;
1001 u8 mdns_addr[ETH_ALEN] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
1002
1003#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1004 u8 mc_addr_prefix[3] = { 0x01, 0x00, 0x5e };
1005#else
1006 u8 mdns6_addr[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
1007 const u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
1008 u8 ipv6addr_suffix[3];
1009#endif
1010 struct netdev_hw_addr *ha;
1011
1012 if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
1013 return;
1014
1015 if (!ndev_vif->is_available) {
1016 SLSI_NET_DBG1(dev, SLSI_NETDEV, "vif NOT available\n");
1017 return;
1018 }
1019
1020 count = netdev_mc_count(dev);
1021 if (!count)
1022 goto exit;
1023
1024#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1025 slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
1026 memcpy(ipv6addr_suffix, &ndev_vif->ipv6address.s6_addr[13], 3);
1027 slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
1028#endif
1029
1030 netdev_for_each_mc_addr(ha, dev) {
1031#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1032 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) || /*mDns is handled separately*/
1033 (memcmp(ha->addr, mc_addr_prefix, 3))) { /*only consider IPv4 multicast addresses*/
1034#else
1035 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) ||
1036 (!memcmp(ha->addr, mdns6_addr, ETH_ALEN)) || /*mDns is handled separately*/
1037 (!memcmp(ha->addr, solicited_node_addr, 3) &&
1038 !memcmp(&ha->addr[3], ipv6addr_suffix, 3))) { /* local multicast addr handled separately*/
1039#endif
1040
1041 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Drop MAC %pM\n", ha->addr);
1042 continue;
1043 }
1044 if (i == SLSI_MC_ADDR_ENTRY_MAX) {
1045 SLSI_NET_WARN(dev, "MAC list has reached max limit (%d), actual count %d\n", SLSI_MC_ADDR_ENTRY_MAX, count);
1046 break;
1047 }
1048
1049 SLSI_NET_DBG3(dev, SLSI_NETDEV, "idx %d MAC %pM\n", i, ha->addr);
1050 SLSI_ETHER_COPY(ndev_vif->sta.regd_mc_addr[i++], ha->addr);
1051 }
1052
1053exit:
1054 ndev_vif->sta.regd_mc_addr_count = i;
1055}
1056
1057static int slsi_set_mac_address(struct net_device *dev, void *addr)
1058{
fd725e7a
MG
1059 struct netdev_vif *ndev_vif = netdev_priv(dev);
1060 struct slsi_dev *sdev = ndev_vif->sdev;
533a23a1
TK
1061 struct sockaddr *sa = (struct sockaddr *)addr;
1062
1063 SLSI_NET_DBG1(dev, SLSI_NETDEV, "slsi_set_mac_address %pM\n", sa->sa_data);
1064 SLSI_ETHER_COPY(dev->dev_addr, sa->sa_data);
fd725e7a
MG
1065
1066 // Setting of MAC Address is called, when the Mac Address is changed.
1067 // And Mac Address is changed during the Mac Randomization Cases.
1068 // During Connected Mac Randomization, enabling the initial scan for faster reconnection.
1069 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
1070 sdev->initial_scan = true;
1071 SLSI_NET_DBG1(dev, SLSI_NETDEV, "slsi_set_mac_address : Value of initial_scan is %d\n", sdev->initial_scan);
1072 }
533a23a1
TK
1073 return 0;
1074}
1075
1076static const struct net_device_ops slsi_netdev_ops = {
1077 .ndo_open = slsi_net_open,
1078 .ndo_stop = slsi_net_stop,
1079 .ndo_start_xmit = slsi_net_hw_xmit,
1080 .ndo_do_ioctl = slsi_net_ioctl,
1081 .ndo_get_stats = slsi_net_get_stats,
1082 .ndo_select_queue = slsi_net_select_queue,
1083 .ndo_fix_features = slsi_net_fix_features,
1084 .ndo_set_rx_mode = slsi_set_multicast_list,
1085 .ndo_set_mac_address = slsi_set_mac_address,
1086};
1087
1088static void slsi_if_setup(struct net_device *dev)
1089{
1090 ether_setup(dev);
1091 dev->netdev_ops = &slsi_netdev_ops;
1092#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
1093 dev->needs_free_netdev = true;
1094#else
1095 dev->destructor = free_netdev;
1096#endif
1097}
1098
1099#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1100
1101#if defined(CONFIG_SOC_EXYNOS9610) || defined(CONFIG_SOC_EXYNOS9630) || defined(CONFIG_SOC_EXYNOS3830)
1102#define SCSC_NETIF_RPS_CPUS_MASK "fe"
1103#else
1104#define SCSC_NETIF_RPS_CPUS_MASK "0"
1105#endif
1106
1107static void slsi_netif_rps_map_clear(struct net_device *dev)
1108{
1109 struct rps_map *map;
1110
1111 map = rcu_dereference_protected(dev->_rx->rps_map, 1);
1112 if (map) {
1113 RCU_INIT_POINTER(dev->_rx->rps_map, NULL);
1114 kfree_rcu(map, rcu);
1115 SLSI_NET_INFO(dev, "clear rps_cpus map\n");
1116 }
1117}
1118
1119static int slsi_netif_rps_map_set(struct net_device *dev, char *buf, size_t len)
1120{
1121 struct rps_map *old_map, *map;
1122 cpumask_var_t mask;
1123 int err, cpu, i;
1124 static DEFINE_SPINLOCK(rps_map_lock);
1125
1126 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1127 return -ENOMEM;
1128
1129 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1130 if (err) {
1131 free_cpumask_var(mask);
1132 SLSI_NET_WARN(dev, "CPU bitmap parse failed\n");
1133 return err;
1134 }
1135
1136 map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL);
1137 if (!map) {
1138 free_cpumask_var(mask);
1139 SLSI_NET_WARN(dev, "CPU mask alloc failed\n");
1140 return -ENOMEM;
1141 }
1142
1143 i = 0;
1144 for_each_cpu_and(cpu, mask, cpu_online_mask)
1145 map->cpus[i++] = cpu;
1146
1147 if (i) {
1148 map->len = i;
1149 } else {
1150 kfree(map);
1151 map = NULL;
1152 }
1153
1154 spin_lock(&rps_map_lock);
1155 old_map = rcu_dereference_protected(dev->_rx->rps_map, lockdep_is_held(&rps_map_lock));
1156 rcu_assign_pointer(dev->_rx->rps_map, map);
1157 spin_unlock(&rps_map_lock);
1158
1159 if (map)
1160 static_key_slow_inc(&rps_needed);
1161 if (old_map)
1162 static_key_slow_dec(&rps_needed);
1163
1164 if (old_map)
1165 kfree_rcu(old_map, rcu);
1166
1167 free_cpumask_var(mask);
1168 SLSI_NET_INFO(dev, "rps_cpus map set(%s)\n", buf);
1169 return len;
1170}
1171#endif
1172
1173int slsi_netif_add_locked(struct slsi_dev *sdev, const char *name, int ifnum)
1174{
1175 struct net_device *dev = NULL;
1176 struct netdev_vif *ndev_vif;
1177 struct wireless_dev *wdev;
1178 int alloc_size, txq_count = 0, ret;
1179
1180 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1181
1182 if (WARN_ON(!sdev || ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES || sdev->netdev[ifnum]))
1183 return -EINVAL;
1184
1185 alloc_size = sizeof(struct netdev_vif);
1186
1187 txq_count = SLSI_NETIF_Q_PEER_START + (SLSI_NETIF_Q_PER_PEER * (SLSI_ADHOC_PEER_CONNECTIONS_MAX));
1188
1189#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
1190 dev = alloc_netdev_mqs(alloc_size, name, NET_NAME_PREDICTABLE, slsi_if_setup, txq_count, 1);
1191#else
1192 dev = alloc_netdev_mqs(alloc_size, name, slsi_if_setup, txq_count, 1);
1193#endif
1194 if (!dev) {
1195 SLSI_ERR(sdev, "Failed to allocate private data for netdev\n");
1196 return -ENOMEM;
1197 }
1198
1199 /* Reserve space in skb for later use */
1200 dev->needed_headroom = SLSI_NETIF_SKB_HEADROOM;
1201 dev->needed_tailroom = SLSI_NETIF_SKB_TAILROOM;
1202
1203 ret = dev_alloc_name(dev, dev->name);
1204 if (ret < 0)
1205 goto exit_with_error;
1206
1207 ndev_vif = netdev_priv(dev);
1208 memset(ndev_vif, 0x00, sizeof(*ndev_vif));
1209 SLSI_MUTEX_INIT(ndev_vif->vif_mutex);
1210 SLSI_MUTEX_INIT(ndev_vif->scan_mutex);
1211 SLSI_MUTEX_INIT(ndev_vif->scan_result_mutex);
1212 skb_queue_head_init(&ndev_vif->ba_complete);
1213 slsi_sig_send_init(&ndev_vif->sig_wait);
1214 ndev_vif->sdev = sdev;
1215 ndev_vif->ifnum = ifnum;
1216 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1217#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1218 slsi_spinlock_create(&ndev_vif->ipv6addr_lock);
1219#endif
1220 slsi_spinlock_create(&ndev_vif->peer_lock);
1221 atomic_set(&ndev_vif->ba_flush, 0);
1222
1223 /* Reserve memory for the peer database - Not required for p2p0/nan interface */
1224 if (!(SLSI_IS_VIF_INDEX_P2P(ndev_vif) || SLSI_IS_VIF_INDEX_NAN(ndev_vif))) {
1225 int queueset;
1226
1227 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1228 ndev_vif->peer_sta_record[queueset] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queueset]), GFP_KERNEL);
1229
1230 if (!ndev_vif->peer_sta_record[queueset]) {
1231 int j;
1232
1233 SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queueset:%d)\n", queueset);
1234
1235 /* Free previously allocated peer database memory till current queueset */
1236 for (j = 0; j < queueset; j++) {
1237 kfree(ndev_vif->peer_sta_record[j]);
1238 ndev_vif->peer_sta_record[j] = NULL;
1239 }
1240
1241 ret = -ENOMEM;
1242 goto exit_with_error;
1243 }
1244 }
1245 }
1246
1247 /* The default power mode in host*/
1248 if (slsi_is_rf_test_mode_enabled()) {
1249 SLSI_NET_ERR(dev, "*#rf# rf test mode set is enabled.\n");
1250 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
1251 } else {
1252 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
1253 }
1254
1255 INIT_LIST_HEAD(&ndev_vif->sta.network_map);
1256 SLSI_DBG1(sdev, SLSI_NETDEV, "ifnum=%d\n", ndev_vif->ifnum);
1257
1258 /* For HS2 interface */
1259 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
1260 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1261
1262 /* For p2p0 interface */
1263 else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1264 ret = slsi_p2p_init(sdev, ndev_vif);
1265 if (ret)
1266 goto exit_with_error;
1267 }
1268
1269 INIT_DELAYED_WORK(&ndev_vif->scan_timeout_work, slsi_scan_ind_timeout_handle);
1270
1271 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_data, "slsi_wlan_rx_data", slsi_rx_netdev_data_work);
1272 if (ret)
1273 goto exit_with_error;
1274
1275 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_mlme, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work);
1276 if (ret) {
1277 slsi_skb_work_deinit(&ndev_vif->rx_data);
1278 goto exit_with_error;
1279 }
1280
1281 wdev = &ndev_vif->wdev;
1282
1283 dev->ieee80211_ptr = wdev;
1284 wdev->wiphy = sdev->wiphy;
1285 wdev->netdev = dev;
1286 wdev->iftype = NL80211_IFTYPE_STATION;
1287 SET_NETDEV_DEV(dev, sdev->dev);
1288
1289 /* We are not ready to send data yet. */
1290 netif_carrier_off(dev);
1291
1292#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1293 if (strcmp(name, CONFIG_SCSC_AP_INTERFACE_NAME) == 0)
1294 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
1295 else
1296 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1297#else
1298 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1299#endif
1300 SLSI_DBG1(sdev, SLSI_NETDEV, "Add:%pM\n", dev->dev_addr);
1301 rcu_assign_pointer(sdev->netdev[ifnum], dev);
1302 ndev_vif->delete_probe_req_ies = false;
1303 ndev_vif->probe_req_ies = NULL;
1304 ndev_vif->probe_req_ie_len = 0;
1305 ndev_vif->drv_in_p2p_procedure = false;
1306
1307#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1308 slsi_netif_rps_map_set(dev, SCSC_NETIF_RPS_CPUS_MASK, strlen(SCSC_NETIF_RPS_CPUS_MASK));
1309#endif
1310 return 0;
1311
1312exit_with_error:
1313 mutex_lock(&sdev->netdev_remove_mutex);
1314 free_netdev(dev);
1315 mutex_unlock(&sdev->netdev_remove_mutex);
1316 return ret;
1317}
1318
1319int slsi_netif_dynamic_iface_add(struct slsi_dev *sdev, const char *name)
1320{
1321 int index = -EINVAL;
1322 int err;
1323
1324 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1325
1326#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1327 if (sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN] == sdev->netdev_ap) {
1328 rcu_assign_pointer(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN], NULL);
1329 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1330 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1331 }
1332#else
1333 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1334 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1335#endif
1336
1337 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1338 return index;
1339}
1340
1341int slsi_netif_init(struct slsi_dev *sdev)
1342{
1343 int i;
1344
1345 SLSI_DBG3(sdev, SLSI_NETDEV, "\n");
1346
1347 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1348
1349 /* Initialize all other netdev interfaces to NULL */
1350 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1351 RCU_INIT_POINTER(sdev->netdev[i], NULL);
1352
1353 if (slsi_netif_add_locked(sdev, "wlan%d", SLSI_NET_INDEX_WLAN) != 0) {
1354 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1355 return -EINVAL;
1356 }
1357
1358 if (slsi_netif_add_locked(sdev, "p2p%d", SLSI_NET_INDEX_P2P) != 0) {
1359 rtnl_lock();
1360 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1361 rtnl_unlock();
1362 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1363 return -EINVAL;
1364 }
1365#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1366#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1367 if (slsi_netif_add_locked(sdev, CONFIG_SCSC_AP_INTERFACE_NAME, SLSI_NET_INDEX_P2PX_SWLAN) != 0) {
1368 rtnl_lock();
1369 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1370 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1371 rtnl_unlock();
1372 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1373 return -EINVAL;
1374 }
1375#endif
1376#endif
1377#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
1378 if (slsi_netif_add_locked(sdev, "nan%d", SLSI_NET_INDEX_NAN) != 0) {
1379 rtnl_lock();
1380 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1381 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1382#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1383#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1384 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
1385#endif
1386#endif
1387 rtnl_unlock();
1388 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1389 return -EINVAL;
1390 }
1391#endif
1392 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1393 return 0;
1394}
1395
1396static int slsi_netif_register_locked(struct slsi_dev *sdev, struct net_device *dev)
1397{
1398 struct netdev_vif *ndev_vif = netdev_priv(dev);
1399 int err;
1400
1401 WARN_ON(!rtnl_is_locked());
1402 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1403 if (atomic_read(&ndev_vif->is_registered)) {
1404 SLSI_NET_ERR(dev, "Register:%pM Failed: Already registered\n", dev->dev_addr);
1405 return 0;
1406 }
1407
1408 err = register_netdevice(dev);
1409 if (err)
1410 SLSI_NET_ERR(dev, "Register:%pM Failed\n", dev->dev_addr);
1411 else
1412 atomic_set(&ndev_vif->is_registered, 1);
1413 return err;
1414}
1415
1416int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1417{
1418 int err;
1419
1420 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1421 err = slsi_netif_register_locked(sdev, dev);
1422 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1423 return err;
1424}
1425
1426int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev)
1427{
1428 int err;
1429
1430 rtnl_lock();
1431 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1432 err = slsi_netif_register_locked(sdev, dev);
1433 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1434 rtnl_unlock();
1435 return err;
1436}
1437
1438void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev)
1439{
1440 int i;
1441 struct netdev_vif *ndev_vif = netdev_priv(dev);
1442
1443 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Unregister:%pM\n", dev->dev_addr);
1444
1445 WARN_ON(!rtnl_is_locked());
1446 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1447
1448 if (atomic_read(&ndev_vif->is_registered)) {
1449 netif_tx_disable(dev);
1450 netif_carrier_off(dev);
1451
1452 slsi_stop_net_dev(sdev, dev);
1453 }
1454
1455 rcu_assign_pointer(sdev->netdev[ndev_vif->ifnum], NULL);
1456 synchronize_rcu();
1457
1458 /* Free memory of the peer database - Not required for p2p0 interface */
1459 if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1460 int queueset;
1461
1462 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1463 kfree(ndev_vif->peer_sta_record[queueset]);
1464 ndev_vif->peer_sta_record[queueset] = NULL;
1465 }
1466 }
1467
1468 if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1469 slsi_p2p_deinit(sdev, ndev_vif);
1470 } else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
1471 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1472 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1473 }
1474
1475 cancel_delayed_work(&ndev_vif->scan_timeout_work);
1476 ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
1477
1478 slsi_skb_work_deinit(&ndev_vif->rx_data);
1479 slsi_skb_work_deinit(&ndev_vif->rx_mlme);
1480
1481 for (i = 0; i < SLSI_SCAN_MAX; i++)
1482 slsi_purge_scan_results(ndev_vif, i);
1483
1484 slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
1485 slsi_roam_channel_cache_prune(dev, 0);
1486 kfree(ndev_vif->probe_req_ies);
1487
1488#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1489 slsi_netif_rps_map_clear(dev);
1490#endif
1491 if (atomic_read(&ndev_vif->is_registered)) {
1492 atomic_set(&ndev_vif->is_registered, 0);
1493 unregister_netdevice(dev);
1494 } else {
1495 mutex_lock(&sdev->netdev_remove_mutex);
1496 free_netdev(dev);
1497 mutex_unlock(&sdev->netdev_remove_mutex);
1498 }
1499}
1500
1501void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1502{
1503 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1504 slsi_netif_remove_locked(sdev, dev);
1505 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1506}
1507
1508void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev)
1509{
1510 rtnl_lock();
1511 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1512 slsi_netif_remove_locked(sdev, dev);
1513 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1514 rtnl_unlock();
1515}
1516
1517void slsi_netif_remove_all(struct slsi_dev *sdev)
1518{
1519 int i;
1520
1521 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1522 rtnl_lock();
1523 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1524 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1525 if (sdev->netdev[i])
1526 slsi_netif_remove_locked(sdev, sdev->netdev[i]);
1527 rcu_assign_pointer(sdev->netdev_ap, NULL);
1528 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1529 rtnl_unlock();
1530}
1531
1532void slsi_netif_deinit(struct slsi_dev *sdev)
1533{
1534 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1535 slsi_netif_remove_all(sdev);
1536}
1537
1538#ifndef CONFIG_ARM
1539static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev)
1540{
1541 int index;
1542 struct netdev_vif *ndev_vif = netdev_priv(dev);
1543 struct slsi_tcp_ack_s *tcp_ack;
1544
1545 ndev_vif->last_tcp_ack = NULL;
1546 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1547 tcp_ack = &ndev_vif->ack_suppression[index];
1548 tcp_ack->dport = 0;
1549 tcp_ack->daddr = 0;
1550 tcp_ack->sport = 0;
1551 tcp_ack->saddr = 0;
1552 tcp_ack->ack_seq = 0;
1553 tcp_ack->count = 0;
1554 tcp_ack->max = 0;
1555 tcp_ack->age = 0;
1556 skb_queue_head_init(&tcp_ack->list);
1557#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1558 timer_setup(&tcp_ack->timer, slsi_netif_tcp_ack_suppression_timeout, 0);
1559#else
1560 tcp_ack->timer.function = slsi_netif_tcp_ack_suppression_timeout;
1561 tcp_ack->timer.data = (unsigned long)tcp_ack;
1562 init_timer(&tcp_ack->timer);
1563#endif
1564 tcp_ack->state = 1;
1565 slsi_spinlock_create(&tcp_ack->lock);
1566 }
1567
1568 memset(&ndev_vif->tcp_ack_stats, 0, sizeof(struct slsi_tcp_ack_stats));
1569 return 0;
1570}
1571
1572static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev)
1573{
1574 int index;
1575 struct netdev_vif *ndev_vif = netdev_priv(dev);
1576 struct slsi_tcp_ack_s *tcp_ack;
1577
1578 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
1579 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1580 tcp_ack = &ndev_vif->ack_suppression[index];
1581 del_timer_sync(&tcp_ack->timer);
1582 slsi_spinlock_lock(&tcp_ack->lock);
1583 tcp_ack->state = 0;
1584 skb_queue_purge(&tcp_ack->list);
1585 slsi_spinlock_unlock(&tcp_ack->lock);
1586 }
1587 ndev_vif->last_tcp_ack = NULL;
1588 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
1589 return 0;
1590}
1591
1592#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1593static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t)
1594#else
1595static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data)
1596#endif
1597{
1598#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1599 struct slsi_tcp_ack_s *tcp_ack = from_timer(tcp_ack, t, timer);
1600#else
1601 struct slsi_tcp_ack_s *tcp_ack = (struct slsi_tcp_ack_s *)data;
1602#endif
1603 struct sk_buff *skb;
1604 struct netdev_vif *ndev_vif;
1605 struct slsi_dev *sdev;
1606 int r;
1607
1608 if (!tcp_ack)
1609 return;
1610
1611 if (!tcp_ack->state)
1612 return;
1613
1614 slsi_spinlock_lock(&tcp_ack->lock);
1615 while ((skb = skb_dequeue(&tcp_ack->list)) != 0) {
1616 tcp_ack->count = 0;
1617
1618 if (!skb->dev) {
1619 kfree_skb(skb);
1620 slsi_spinlock_unlock(&tcp_ack->lock);
1621 return;
1622 }
1623 ndev_vif = netdev_priv(skb->dev);
1624 sdev = ndev_vif->sdev;
1625 ndev_vif->tcp_ack_stats.tack_timeout++;
1626
1627 r = slsi_tx_data(sdev, skb->dev, skb);
1628 if (r == 0) {
1629 ndev_vif->tcp_ack_stats.tack_sent++;
1630 tcp_ack->last_sent = ktime_get();
1631 } else if (r == -ENOSPC) {
1632 ndev_vif->tcp_ack_stats.tack_dropped++;
1633 slsi_kfree_skb(skb);
1634 } else {
1635 ndev_vif->tcp_ack_stats.tack_dropped++;
1636 }
1637 }
1638 slsi_spinlock_unlock(&tcp_ack->lock);
1639}
1640
1641static int slsi_netif_tcp_ack_suppression_option(struct sk_buff *skb, u32 option)
1642{
1643 unsigned char *options;
1644 u32 optlen = 0, len = 0;
1645
1646 if (tcp_hdr(skb)->doff > 5)
1647 optlen = (tcp_hdr(skb)->doff - 5) * 4;
1648
1649 options = ((u8 *)tcp_hdr(skb)) + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET;
1650
1651 while (optlen > 0) {
1652 switch (options[0]) {
1653 case TCP_ACK_SUPPRESSION_OPTION_EOL:
1654 return 0;
1655 case TCP_ACK_SUPPRESSION_OPTION_NOP:
1656 len = 1;
1657 break;
1658 case TCP_ACK_SUPPRESSION_OPTION_MSS:
1659 if (option == TCP_ACK_SUPPRESSION_OPTION_MSS)
1660 return ((options[2] << 8) | options[3]);
1661 len = options[1];
1662 break;
1663 case TCP_ACK_SUPPRESSION_OPTION_WINDOW:
1664 if (option == TCP_ACK_SUPPRESSION_OPTION_WINDOW)
1665 return options[2];
1666 len = 1;
1667 break;
1668 case TCP_ACK_SUPPRESSION_OPTION_SACK:
1669 if (option == TCP_ACK_SUPPRESSION_OPTION_SACK)
1670 return 1;
1671 len = options[1];
1672 break;
1673 default:
1674 len = options[1];
1675 break;
1676 }
1677 /* if length field in TCP options is 0, or greater than
1678 * total options length, then options are incorrect; return here
1679 */
1680 if ((len == 0) || (len > optlen)) {
1681 SLSI_DBG_HEX_NODEV(SLSI_TX, skb->data, skb->len < 128 ? skb->len : 128, "SKB:\n");
1682 return 0;
1683 }
1684 optlen -= len;
1685 options += len;
1686 }
1687 return 0;
1688}
1689
1690static void slsi_netif_tcp_ack_suppression_syn(struct net_device *dev, struct sk_buff *skb)
1691{
1692 struct netdev_vif *ndev_vif = netdev_priv(dev);
1693 struct slsi_tcp_ack_s *tcp_ack;
1694 int index;
1695
1696 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1697 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1698 tcp_ack = &ndev_vif->ack_suppression[index];
1699 slsi_spinlock_lock(&tcp_ack->lock);
1700
1701 if (!tcp_ack->state) {
1702 slsi_spinlock_unlock(&tcp_ack->lock);
1703 return;
1704 }
1705 /* Recover old/hung/unused record. */
1706 if (tcp_ack->daddr) {
1707 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT * 1000) {
1708 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1709 skb_queue_purge(&tcp_ack->list);
1710 tcp_ack->dport = 0;
1711 tcp_ack->sport = 0;
1712 tcp_ack->daddr = 0;
1713 tcp_ack->saddr = 0;
1714 tcp_ack->count = 0;
1715 tcp_ack->ack_seq = 0;
1716 del_timer(&tcp_ack->timer);
1717 }
1718 }
1719
1720 if (tcp_ack->daddr == 0) {
1721 SLSI_NET_DBG2(dev, SLSI_TX, "add at %d (%pI4.%d > %pI4.%d)\n", index, &ip_hdr(skb)->saddr, ntohs(tcp_hdr(skb)->source), &ip_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest));
1722 tcp_ack->daddr = ip_hdr(skb)->daddr;
1723 tcp_ack->saddr = ip_hdr(skb)->saddr;
1724 tcp_ack->dport = tcp_hdr(skb)->dest;
1725 tcp_ack->sport = tcp_hdr(skb)->source;
1726 tcp_ack->count = 0;
1727 tcp_ack->ack_seq = 0;
1728 tcp_ack->slow_start_count = 0;
1729 tcp_ack->tcp_slow_start = true;
1730 if (tcp_ack_suppression_monitor) {
1731 tcp_ack->max = 0;
1732 tcp_ack->age = 0;
1733 } else {
1734 tcp_ack->max = tcp_ack_suppression_max;
1735 tcp_ack->age = tcp_ack_suppression_timeout;
1736 }
1737 tcp_ack->last_sent = ktime_get();
1738
1739 if (tcp_ack_suppression_monitor) {
1740 tcp_ack->last_sample_time = ktime_get();
1741 tcp_ack->last_ack_seq = 0;
1742 tcp_ack->last_tcp_rate = 0;
1743 tcp_ack->num_bytes = 0;
1744 tcp_ack->hysteresis = 0;
1745 }
1746#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1747 tcp_ack->stream_id = index;
1748#endif
1749 /* read and validate the window scaling multiplier */
1750 tcp_ack->window_multiplier = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_WINDOW);
1751 if (tcp_ack->window_multiplier > 14)
1752 tcp_ack->window_multiplier = 0;
1753 tcp_ack->mss = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_MSS);
1754 SLSI_NET_DBG2(dev, SLSI_TX, "options: mss:%u, window:%u\n", tcp_ack->mss, tcp_ack->window_multiplier);
1755 SCSC_HIP4_SAMPLER_TCP_SYN(ndev_vif->sdev->minor_prof, index, tcp_ack->mss);
1756 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, index, be32_to_cpu(tcp_hdr(skb)->seq));
1757 slsi_spinlock_unlock(&tcp_ack->lock);
1758 return;
1759 }
1760 slsi_spinlock_unlock(&tcp_ack->lock);
1761 }
1762}
1763
1764static void slsi_netif_tcp_ack_suppression_fin(struct net_device *dev, struct sk_buff *skb)
1765{
1766 struct netdev_vif *ndev_vif = netdev_priv(dev);
1767 struct slsi_tcp_ack_s *tcp_ack;
1768 int index;
1769
1770 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1771 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1772 tcp_ack = &ndev_vif->ack_suppression[index];
1773 slsi_spinlock_lock(&tcp_ack->lock);
1774
1775 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1776 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1777 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1778 skb_queue_purge(&tcp_ack->list);
1779 tcp_ack->dport = 0;
1780 tcp_ack->sport = 0;
1781 tcp_ack->daddr = 0;
1782 tcp_ack->saddr = 0;
1783 tcp_ack->count = 0;
1784 tcp_ack->ack_seq = 0;
1785
1786 if (tcp_ack_suppression_monitor) {
1787 tcp_ack->last_ack_seq = 0;
1788 tcp_ack->last_tcp_rate = 0;
1789 tcp_ack->num_bytes = 0;
1790 tcp_ack->hysteresis = 0;
1791 }
1792
1793 del_timer(&tcp_ack->timer);
1794#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1795 tcp_ack->stream_id = 0;
1796#endif
1797 SCSC_HIP4_SAMPLER_TCP_FIN(ndev_vif->sdev->minor_prof, index);
1798 slsi_spinlock_unlock(&tcp_ack->lock);
1799 return;
1800 }
1801 slsi_spinlock_unlock(&tcp_ack->lock);
1802 }
1803}
1804
1805static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb)
1806{
1807 struct netdev_vif *ndev_vif = netdev_priv(dev);
1808 int index, found;
1809 struct slsi_tcp_ack_s *tcp_ack;
1810 int forward_now = 0, flush = 0;
1811 struct sk_buff *cskb = 0;
1812 u32 tcp_recv_window_size = 0;
1813
1814 if (tcp_ack_suppression_disable)
1815 return skb;
1816
1817 if (tcp_ack_suppression_disable_2g && !SLSI_IS_VIF_CHANNEL_5G(ndev_vif))
1818 return skb;
1819
1820 /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
1821 * the IP header and TCP header are not set; so return the SKB
1822 */
1823 if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (compare_ether_addr(eth_hdr(skb)->h_source, dev->dev_addr) != 0))
1824 return skb;
1825
1826 /* Return SKB that doesn't match. */
1827 if (be16_to_cpu(eth_hdr(skb)->h_proto) != ETH_P_IP)
1828 return skb;
1829 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1830 return skb;
1831 if (!skb_transport_header_was_set(skb))
1832 return skb;
1833 if (tcp_hdr(skb)->syn) {
1834 slsi_netif_tcp_ack_suppression_syn(dev, skb);
1835 return skb;
1836 }
1837 if (tcp_hdr(skb)->fin) {
1838 slsi_netif_tcp_ack_suppression_fin(dev, skb);
1839 return skb;
1840 }
1841 if (!tcp_hdr(skb)->ack)
1842 return skb;
1843 if (tcp_hdr(skb)->rst)
1844 return skb;
1845 if (tcp_hdr(skb)->urg)
1846 return skb;
1847
1848 ndev_vif->tcp_ack_stats.tack_acks++;
1849 /* If we find a record, leave the spinlock taken until the end of the function. */
1850 found = 0;
1851 if (ndev_vif->last_tcp_ack) {
1852 tcp_ack = ndev_vif->last_tcp_ack;
1853 slsi_spinlock_lock(&tcp_ack->lock);
1854 if (!tcp_ack->state) {
1855 slsi_spinlock_unlock(&tcp_ack->lock);
1856 ndev_vif->tcp_ack_stats.tack_sent++;
1857 SLSI_ERR_NODEV("last_tcp_ack record not enabled\n");
1858 return skb;
1859 }
1860 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1861 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1862 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1863 found = 1;
1864 ndev_vif->tcp_ack_stats.tack_lastrecord++;
1865 } else {
1866 slsi_spinlock_unlock(&tcp_ack->lock);
1867 }
1868 }
1869 if (found == 0) {
1870 /* Search for an existing record on this connection. */
1871 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1872 tcp_ack = &ndev_vif->ack_suppression[index];
1873
1874 slsi_spinlock_lock(&tcp_ack->lock);
1875
1876 if (!tcp_ack->state) {
1877 slsi_spinlock_unlock(&tcp_ack->lock);
1878 ndev_vif->tcp_ack_stats.tack_sent++;
1879 SLSI_ERR_NODEV("tcp_ack record %d not enabled\n", index);
1880 return skb;
1881 }
1882 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1883 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1884 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1885 found = 1;
1886 ndev_vif->tcp_ack_stats.tack_searchrecord++;
1887 break;
1888 }
1889 slsi_spinlock_unlock(&tcp_ack->lock);
1890 }
1891 if (found == 0) {
1892 /* No record found, so We cannot suppress the ack, return. */
1893 ndev_vif->tcp_ack_stats.tack_norecord++;
1894 ndev_vif->tcp_ack_stats.tack_sent++;
1895 return skb;
1896 }
1897 ndev_vif->last_tcp_ack = tcp_ack;
1898 }
1899
1900 /* If it is a DUP Ack, send straight away without flushing the cache. */
1901 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) < tcp_ack->ack_seq) {
1902 /* check for wrap-around */
1903 if (((s32)((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - (u32)tcp_ack->ack_seq)) < 0) {
1904 ndev_vif->tcp_ack_stats.tack_dacks++;
1905 ndev_vif->tcp_ack_stats.tack_sent++;
1906 slsi_spinlock_unlock(&tcp_ack->lock);
1907 return skb;
1908 }
1909 }
1910
1911 /* Has data, forward straight away. */
1912 if (be16_to_cpu(ip_hdr(skb)->tot_len) > ((ip_hdr(skb)->ihl * 4) + (tcp_hdr(skb)->doff * 4))) {
1913 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr(skb)->seq));
1914 SCSC_HIP4_SAMPLER_TCP_CWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, (skb->sk) ? tcp_sk(skb->sk)->snd_cwnd : 0);
1915 SCSC_HIP4_SAMPLER_TCP_SEND_BUF(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, sysctl_tcp_wmem[2]);
1916 ndev_vif->tcp_ack_stats.tack_hasdata++;
1917 forward_now = 1;
1918 goto _forward_now;
1919 }
1920
1921 /* PSH flag set, forward straight away. */
1922 if (tcp_hdr(skb)->psh) {
1923 ndev_vif->tcp_ack_stats.tack_psh++;
1924 forward_now = 1;
1925 goto _forward_now;
1926 }
1927
1928 /* The ECE flag is set for Explicit Congestion Notification supporting connections when the ECT flag
1929 * is set in the segment packet. We must forward ECE marked acks immediately for ECN to work.
1930 */
1931 if (tcp_hdr(skb)->ece) {
1932 ndev_vif->tcp_ack_stats.tack_ece++;
1933 forward_now = 1;
1934 goto _forward_now;
1935 }
1936
1937 if (tcp_ack_suppression_monitor) {
1938 /* Measure the throughput of TCP stream by monitoring the bytes Acked by each Ack over a
1939 * sampling period. Based on throughput apply different degree of Ack suppression
1940 */
1941 if (tcp_ack->last_ack_seq)
1942 tcp_ack->num_bytes += ((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - tcp_ack->last_ack_seq);
1943
1944 tcp_ack->last_ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
1945 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sample_time)) > tcp_ack_suppression_monitor_interval) {
1946 u16 acks_max;
1947 u32 tcp_rate = ((tcp_ack->num_bytes * 8) / (tcp_ack_suppression_monitor_interval * 1000));
1948
1949 SLSI_NET_DBG2(dev, SLSI_TX, "hysteresis:%u total_bytes:%llu rate:%u Mbps\n",
1950 tcp_ack->hysteresis, tcp_ack->num_bytes, tcp_rate);
1951
1952 /* hysterisis - change only if the variation from last value is more than threshold */
1953 if ((abs(tcp_rate - tcp_ack->last_tcp_rate)) > tcp_ack->hysteresis) {
1954 if (tcp_rate >= tcp_ack_suppression_rate_very_high) {
1955 tcp_ack->max = tcp_ack_suppression_rate_very_high_acks;
1956 tcp_ack->age = tcp_ack_suppression_rate_very_high_timeout;
1957 } else if (tcp_rate >= tcp_ack_suppression_rate_high) {
1958 tcp_ack->max = tcp_ack_suppression_rate_high_acks;
1959 tcp_ack->age = tcp_ack_suppression_rate_high_timeout;
1960 } else if (tcp_rate >= tcp_ack_suppression_rate_low) {
1961 tcp_ack->max = tcp_ack_suppression_rate_low_acks;
1962 tcp_ack->age = tcp_ack_suppression_rate_low_timeout;
1963 } else {
1964 tcp_ack->max = 0;
1965 tcp_ack->age = 0;
1966 }
1967
1968 /* Should not be suppressing Acks more than 20% of receiver window size
1969 * doing so can lead to increased RTT and low transmission rate at the
1970 * TCP sender
1971 */
1972 if (tcp_ack->window_multiplier)
1973 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
1974 else
1975 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
1976 SCSC_HIP4_SAMPLER_TCP_RWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, tcp_recv_window_size);
1977
1978 acks_max = (tcp_recv_window_size / 5) / (2 * tcp_ack->mss);
1979 if (tcp_ack->max > acks_max)
1980 tcp_ack->max = acks_max;
1981 }
1982 tcp_ack->hysteresis = tcp_rate / 5; /* 20% hysteresis */
1983 tcp_ack->last_tcp_rate = tcp_rate;
1984 tcp_ack->num_bytes = 0;
1985 tcp_ack->last_sample_time = ktime_get();
1986 }
1987 }
1988
1989 /* Do not suppress Selective Acks. */
1990 if (slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_SACK)) {
1991 ndev_vif->tcp_ack_stats.tack_sacks++;
1992
1993 /* A TCP selective Ack suggests TCP segment loss. The TCP sender
1994 * may reduce congestion window and limit the number of segments
1995 * it sends before waiting for Ack.
1996 * It is ideal to switch off TCP ack suppression for certain time
1997 * (being replicated here by tcp_ack_suppression_slow_start_acks
1998 * count) and send as many Acks as possible to allow the cwnd to
1999 * grow at the TCP sender
2000 */
2001 tcp_ack->slow_start_count = 0;
2002 tcp_ack->tcp_slow_start = true;
2003 forward_now = 1;
2004 goto _forward_now;
2005 }
2006
2007 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) == tcp_ack->ack_seq) {
2008 ndev_vif->tcp_ack_stats.tack_dacks++;
2009 forward_now = 1;
2010 goto _forward_now;
2011 }
2012
2013 /* When the TCP connection is made, wait until a number of Acks
2014 * are sent before applying the suppression rules. It is to
2015 * allow the cwnd to grow at a normal rate at the TCP sender
2016 */
2017 if (tcp_ack->tcp_slow_start) {
2018 tcp_ack->slow_start_count++;
2019 if (tcp_ack->slow_start_count >= tcp_ack_suppression_slow_start_acks) {
2020 tcp_ack->slow_start_count = 0;
2021 tcp_ack->tcp_slow_start = false;
2022 }
2023 forward_now = 1;
2024 goto _forward_now;
2025 }
2026
2027 /* do not suppress if so decided by the TCP monitor */
2028 if (tcp_ack_suppression_monitor && (!tcp_ack->max || !tcp_ack->age)) {
2029 forward_now = 1;
2030 goto _forward_now;
2031 }
2032
2033 /* do not suppress delayed Acks that acknowledges for more than 2 TCP
2034 * maximum size segments
2035 */
2036 if (((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq)) - (tcp_ack->ack_seq) > (2 * tcp_ack->mss)) {
2037 ndev_vif->tcp_ack_stats.tack_delay_acks++;
2038 forward_now = 1;
2039 goto _forward_now;
2040 }
2041
2042 /* Do not suppress unless the receive window is large
2043 * enough.
2044 * With low receive window size the cwnd can't grow much.
2045 * So suppressing Acks has a negative impact on sender
2046 * rate as it increases the Round trip time measured at
2047 * sender
2048 */
2049 if (!tcp_ack_suppression_monitor) {
2050 if (tcp_ack->window_multiplier)
2051 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
2052 else
2053 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
2054 if (tcp_recv_window_size < tcp_ack_suppression_rcv_window * 1024) {
2055 ndev_vif->tcp_ack_stats.tack_low_window++;
2056 forward_now = 1;
2057 goto _forward_now;
2058 }
2059 }
2060
2061 if (!tcp_ack_suppression_monitor && ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= tcp_ack->age) {
2062 ndev_vif->tcp_ack_stats.tack_ktime++;
2063 forward_now = 1;
2064 goto _forward_now;
2065 }
2066
2067 /* Test for a new cache */
2068 if (!skb_queue_len(&tcp_ack->list)) {
2069 skb_queue_tail(&tcp_ack->list, skb);
2070 tcp_ack->count = 1;
2071 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2072 if (tcp_ack->age)
2073 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2074 slsi_spinlock_unlock(&tcp_ack->lock);
2075 return 0;
2076 }
2077_forward_now:
2078 cskb = skb_dequeue(&tcp_ack->list);
2079 if (cskb) {
2080 if (tcp_ack_suppression_monitor && tcp_ack->age)
2081 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2082 ndev_vif->tcp_ack_stats.tack_suppressed++;
2083 slsi_kfree_skb(cskb);
2084 }
2085 skb_queue_tail(&tcp_ack->list, skb);
2086 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2087 tcp_ack->count++;
2088 if (forward_now) {
2089 flush = 1;
2090 } else {
2091 if (tcp_ack->count >= tcp_ack->max) {
2092 flush = 1;
2093 ndev_vif->tcp_ack_stats.tack_max++;
2094 }
2095 }
2096 if (!flush) {
2097 slsi_spinlock_unlock(&tcp_ack->lock);
2098 return 0;
2099 }
2100 /* Flush the cache. */
2101 cskb = skb_dequeue(&tcp_ack->list);
2102 tcp_ack->count = 0;
2103
2104 if (tcp_ack->age)
2105 del_timer(&tcp_ack->timer);
2106
2107 tcp_ack->last_sent = ktime_get();
2108
2109 slsi_spinlock_unlock(&tcp_ack->lock);
2110 ndev_vif->tcp_ack_stats.tack_sent++;
2111 return cskb;
2112}
2113#endif