[9610] wlbt: SCSC Driver version 10.9.1.0
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / net / wireless / scsc / netif.c
CommitLineData
533a23a1
TK
1/*
2 *
3 * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
4 *
5 ****************************************************************************/
6
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/rtnetlink.h>
10#include <net/sch_generic.h>
11#include <linux/if_ether.h>
12#include <scsc/scsc_logring.h>
13
14#include "debug.h"
15#include "netif.h"
16#include "dev.h"
17#include "mgt.h"
18#include "scsc_wifi_fcq.h"
19#include "ioctl.h"
20#include "mib.h"
21#include "hip4_sampler.h"
22
23#define IP4_OFFSET_TO_TOS_FIELD 1
24#define IP6_OFFSET_TO_TC_FIELD_0 0
25#define IP6_OFFSET_TO_TC_FIELD_1 1
26#define FIELD_TO_DSCP 2
27
28/* DSCP */
29/* (RFC5865) */
30#define DSCP_VA 0x2C
31/* (RFC3246) */
32#define DSCP_EF 0x2E
33/* (RFC2597) */
34#define DSCP_AF43 0x26
35#define DSCP_AF42 0x24
36#define DSCP_AF41 0x22
37#define DSCP_AF33 0x1E
38#define DSCP_AF32 0x1C
39#define DSCP_AF31 0x1A
40#define DSCP_AF23 0x16
41#define DSCP_AF22 0x14
42#define DSCP_AF21 0x12
43#define DSCP_AF13 0x0E
44#define DSCP_AF12 0x0C
45#define DSCP_AF11 0x0A
46/* (RFC2474) */
47#define CS7 0x38
48#define CS6 0x30
49#define CS5 0x28
50#define CS4 0x20
51#define CS3 0x18
52#define CS2 0x10
53#define CS0 0x00
54/* (RFC3662) */
55#define CS1 0x08
56
57#ifndef CONFIG_ARM
58static bool tcp_ack_suppression_disable;
59module_param(tcp_ack_suppression_disable, bool, S_IRUGO | S_IWUSR);
60MODULE_PARM_DESC(tcp_ack_suppression_disable, "Disable TCP ack suppression feature");
61
62static bool tcp_ack_suppression_disable_2g;
63module_param(tcp_ack_suppression_disable_2g, bool, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(tcp_ack_suppression_disable_2g, "Disable TCP ack suppression for only 2.4GHz band");
65
66static bool tcp_ack_suppression_monitor = true;
67module_param(tcp_ack_suppression_monitor, bool, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(tcp_ack_suppression_monitor, "TCP ack suppression throughput monitor: Y: enable (default), N: disable");
69
70static uint tcp_ack_suppression_monitor_interval = 500;
71module_param(tcp_ack_suppression_monitor_interval, uint, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(tcp_ack_suppression_monitor_interval, "Sampling interval (in ms) for throughput monitor");
73
74static uint tcp_ack_suppression_timeout = 16;
75module_param(tcp_ack_suppression_timeout, uint, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(tcp_ack_suppression_timeout, "Timeout (in ms) before cached TCP ack is flushed to tx");
77
78static uint tcp_ack_suppression_max = 16;
79module_param(tcp_ack_suppression_max, uint, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(tcp_ack_suppression_max, "Maximum number of TCP acks suppressed before latest flushed to tx");
81
82static uint tcp_ack_suppression_rate_very_high = 100;
83module_param(tcp_ack_suppression_rate_very_high, int, S_IRUGO | S_IWUSR);
84MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high, "Rate (in Mbps) to apply very high degree of suppression");
85
86static uint tcp_ack_suppression_rate_very_high_timeout = 4;
87module_param(tcp_ack_suppression_rate_very_high_timeout, int, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in very high rate");
89
90static uint tcp_ack_suppression_rate_very_high_acks = 20;
91module_param(tcp_ack_suppression_rate_very_high_acks, uint, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_acks, "Maximum number of TCP acks suppressed before latest flushed in very high rate");
93
94static uint tcp_ack_suppression_rate_high = 20;
95module_param(tcp_ack_suppression_rate_high, int, S_IRUGO | S_IWUSR);
96MODULE_PARM_DESC(tcp_ack_suppression_rate_high, "Rate (in Mbps) to apply high degree of suppression");
97
98static uint tcp_ack_suppression_rate_high_timeout = 4;
99module_param(tcp_ack_suppression_rate_high_timeout, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(tcp_ack_suppression_rate_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in high rate");
101
102static uint tcp_ack_suppression_rate_high_acks = 16;
103module_param(tcp_ack_suppression_rate_high_acks, uint, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(tcp_ack_suppression_rate_high_acks, "Maximum number of TCP acks suppressed before latest flushed in high rate");
105
106static uint tcp_ack_suppression_rate_low = 1;
107module_param(tcp_ack_suppression_rate_low, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(tcp_ack_suppression_rate_low, "Rate (in Mbps) to apply low degree of suppression");
109
110static uint tcp_ack_suppression_rate_low_timeout = 4;
111module_param(tcp_ack_suppression_rate_low_timeout, int, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(tcp_ack_suppression_rate_low_timeout, "Timeout (in ms) before cached TCP ack is flushed in low rate");
113
114static uint tcp_ack_suppression_rate_low_acks = 10;
115module_param(tcp_ack_suppression_rate_low_acks, uint, S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(tcp_ack_suppression_rate_low_acks, "Maximum number of TCP acks suppressed before latest flushed in low rate");
117
118static uint tcp_ack_suppression_slow_start_acks = 512;
119module_param(tcp_ack_suppression_slow_start_acks, uint, S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(tcp_ack_suppression_slow_start_acks, "Maximum number of Acks sent in slow start");
121
122static uint tcp_ack_suppression_rcv_window = 128;
123module_param(tcp_ack_suppression_rcv_window, uint, S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(tcp_ack_suppression_rcv_window, "Receive window size (in unit of Kbytes) that triggers Ack suppression");
125
126#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
127static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t);
128#else
129static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data);
130#endif
131static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev);
132static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev);
133static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb);
134#endif
135
781f598d
TK
136#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
137void slsi_net_randomize_nmi_ndi(struct slsi_dev *sdev)
138{
139 int exor_base = 1, exor_byte = 5, i;
140 u8 random_mac[ETH_ALEN];
141
142 /* Randomize mac address */
143 SLSI_ETHER_COPY(random_mac, sdev->hw_addr);
144 /* If random number is same as actual bytes in hw_address
145 * try random again. hope 2nd random will not be same as
146 * bytes in hw_address
147 */
148 slsi_get_random_bytes(&random_mac[3], 3);
149 if (!memcmp(&random_mac[3], &sdev->hw_addr[3], 3))
150 slsi_get_random_bytes(&random_mac[3], 3);
151 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_NAN], random_mac);
152 /* Set the local bit */
153 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][0] |= 0x02;
154 /* EXOR 4th byte with 0x80 */
155 sdev->netdev_addresses[SLSI_NET_INDEX_NAN][3] ^= 0x80;
156 for (i = SLSI_NAN_DATA_IFINDEX_START; i < CONFIG_SCSC_WLAN_MAX_INTERFACES + 1; i++) {
157 SLSI_ETHER_COPY(sdev->netdev_addresses[i], random_mac);
158 sdev->netdev_addresses[i][0] |= 0x02;
159 sdev->netdev_addresses[i][exor_byte] ^= exor_base;
160 exor_base++;
161 /* currently supports upto 15 mac address for nan
162 * data interface
163 */
164 if (exor_base > 0xf)
165 break;
166 }
167}
168#endif
169
533a23a1
TK
170/* Net Device callback operations */
171static int slsi_net_open(struct net_device *dev)
172{
173 struct netdev_vif *ndev_vif = netdev_priv(dev);
174 struct slsi_dev *sdev = ndev_vif->sdev;
175 int err;
176 unsigned char dev_addr_zero_check[ETH_ALEN];
177
178 if (WARN_ON(ndev_vif->is_available))
179 return -EINVAL;
180
181 if (sdev->mlme_blocked) {
182 SLSI_NET_WARN(dev, "Fail: called when MLME in blocked state\n");
183 return -EIO;
184 }
185
186 slsi_wakelock(&sdev->wlan_wl);
187
188 /* check if request to rf test mode. */
189 slsi_check_rf_test_mode();
190
191 err = slsi_start(sdev);
192 if (WARN_ON(err)) {
193 slsi_wakeunlock(&sdev->wlan_wl);
194 return err;
195 }
196
197 if (!sdev->netdev_up_count) {
198 slsi_get_hw_mac_address(sdev, sdev->hw_addr);
199 /* Assign Addresses */
200 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
201
202 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
781f598d
TK
203 /* Set the local bit */
204 sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02;
533a23a1
TK
205
206 SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN], sdev->hw_addr);
781f598d
TK
207 /* Set the local bit */
208 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][0] |= 0x02;
209 /* EXOR 5th byte with 0x80 */
210 sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][4] ^= 0x80;
211#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4 && defined(CONFIG_SCSC_WIFI_NAN_ENABLE)
212 slsi_net_randomize_nmi_ndi(sdev);
533a23a1
TK
213#endif
214 sdev->initial_scan = true;
215 }
216
217 memset(dev_addr_zero_check, 0, ETH_ALEN);
218 if (!memcmp(dev->dev_addr, dev_addr_zero_check, ETH_ALEN)) {
219#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
220 if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
221 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
222 else
223 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
224#else
225 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
226#endif
227 }
228 SLSI_ETHER_COPY(dev->perm_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
229 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
230#ifdef CONFIG_SCSC_WLAN_DEBUG
231 if (ndev_vif->iftype == NL80211_IFTYPE_MONITOR) {
232 err = slsi_start_monitor_mode(sdev, dev);
233 if (WARN_ON(err)) {
234 slsi_wakeunlock(&sdev->wlan_wl);
235 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
236 return err;
237 }
238 }
239#endif
240 SLSI_NET_INFO(dev, "ifnum:%d r:%d MAC:%pM\n", ndev_vif->ifnum, sdev->recovery_status, dev->dev_addr);
241 ndev_vif->is_available = true;
242 sdev->netdev_up_count++;
243
244#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
245 reinit_completion(&ndev_vif->sig_wait.completion);
246#else
247 INIT_COMPLETION(ndev_vif->sig_wait.completion);
248#endif
249#ifndef CONFIG_ARM
250 slsi_netif_tcp_ack_suppression_start(dev);
251#endif
781f598d
TK
252
253#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
254 if (ndev_vif->ifnum >= SLSI_NAN_DATA_IFINDEX_START)
255 netif_carrier_on(dev);
256#endif
533a23a1
TK
257 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
258
259 netif_tx_start_all_queues(dev);
260 slsi_wakeunlock(&sdev->wlan_wl);
261
262 /* The default power mode in host*/
263 /* 2511 measn unifiForceActive and 1 means active */
264 if (slsi_is_rf_test_mode_enabled()) {
265 SLSI_NET_INFO(dev, "*#rf# rf test mode set is enabled.\n");
266 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAMING_ENABLED, 0);
267 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, 0);
268 slsi_set_mib_roam(sdev, NULL, 2511, 1);
269 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, 0);
270 }
271
272 return 0;
273}
274
275static int slsi_net_stop(struct net_device *dev)
276{
277 struct netdev_vif *ndev_vif = netdev_priv(dev);
278 struct slsi_dev *sdev = ndev_vif->sdev;
279
280 SLSI_NET_INFO(dev, "ifnum:%d r:%d\n", ndev_vif->ifnum, sdev->recovery_status);
281 slsi_wakelock(&sdev->wlan_wl);
282 netif_tx_stop_all_queues(dev);
283 sdev->initial_scan = false;
284
285 if (!ndev_vif->is_available) {
286 /* May have been taken out by the Chip going down */
287 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available\n");
288 slsi_wakeunlock(&sdev->wlan_wl);
289 return 0;
290 }
291#ifndef SLSI_TEST_DEV
292 if (!slsi_is_rf_test_mode_enabled() && !sdev->recovery_status) {
293 SLSI_NET_DBG1(dev, SLSI_NETDEV, "To user mode\n");
294 slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, -55);
295 }
296#endif
297#ifndef CONFIG_ARM
298 slsi_netif_tcp_ack_suppression_stop(dev);
299#endif
300 slsi_stop_net_dev(sdev, dev);
301
302 sdev->allow_switch_40_mhz = true;
303 sdev->allow_switch_80_mhz = true;
304 sdev->acs_channel_switched = false;
305 slsi_wakeunlock(&sdev->wlan_wl);
306 return 0;
307}
308
309/* This is called after the WE handlers */
310static int slsi_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
311{
312 SLSI_NET_DBG4(dev, SLSI_NETDEV, "IOCTL cmd:0x%.4x\n", cmd);
313
314 if (cmd == SIOCDEVPRIVATE + 2) { /* 0x89f0 + 2 from wpa_supplicant */
315 return slsi_ioctl(dev, rq, cmd);
316 }
317
318 return -EOPNOTSUPP;
319}
320
321static struct net_device_stats *slsi_net_get_stats(struct net_device *dev)
322{
323 struct netdev_vif *ndev_vif = netdev_priv(dev);
324
325 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
326 return &ndev_vif->stats;
327}
328
329#ifdef CONFIG_SCSC_USE_WMM_TOS
330static u16 slsi_get_priority_from_tos(u8 *frame, u16 proto)
331{
332 if (WARN_ON(!frame))
333 return FAPI_PRIORITY_QOS_UP0;
334
335 switch (proto) {
336 case ETH_P_IP: /* IPv4 */
337 return (u16)(((frame[IP4_OFFSET_TO_TOS_FIELD]) & 0xE0) >> 5);
338
339 case ETH_P_IPV6: /* IPv6 */
340 return (u16)((*frame & 0x0E) >> 1);
341
342 default:
343 return FAPI_PRIORITY_QOS_UP0;
344 }
345}
346
347#else
348static u16 slsi_get_priority_from_tos_dscp(u8 *frame, u16 proto)
349{
350 u8 dscp;
351
352 if (WARN_ON(!frame))
353 return FAPI_PRIORITY_QOS_UP0;
354
355 switch (proto) {
356 case ETH_P_IP: /* IPv4 */
357 dscp = frame[IP4_OFFSET_TO_TOS_FIELD] >> FIELD_TO_DSCP;
358 break;
359
360 case ETH_P_IPV6: /* IPv6 */
361 /* Get traffic class */
362 dscp = (((frame[IP6_OFFSET_TO_TC_FIELD_0] & 0x0F) << 4) |
363 ((frame[IP6_OFFSET_TO_TC_FIELD_1] & 0xF0) >> 4)) >> FIELD_TO_DSCP;
364 break;
365
366 default:
367 return FAPI_PRIORITY_QOS_UP0;
368 }
369/* DSCP table based in RFC8325 from Android 10 */
370#if (defined(ANDROID_VERSION) && ANDROID_VERSION >= 100000)
371 switch (dscp) {
372 case CS7:
373 return FAPI_PRIORITY_QOS_UP7;
374 case CS6:
375 case DSCP_EF:
376 case DSCP_VA:
377 return FAPI_PRIORITY_QOS_UP6;
378 case CS5:
379 return FAPI_PRIORITY_QOS_UP5;
380 case DSCP_AF41:
381 case DSCP_AF42:
382 case DSCP_AF43:
383 case CS4:
384 case DSCP_AF31:
385 case DSCP_AF32:
386 case DSCP_AF33:
387 case CS3:
388 return FAPI_PRIORITY_QOS_UP4;
389 case DSCP_AF21:
390 case DSCP_AF22:
391 case DSCP_AF23:
392 return FAPI_PRIORITY_QOS_UP3;
393 case CS2:
394 case DSCP_AF11:
395 case DSCP_AF12:
396 case DSCP_AF13:
397 case CS0:
398 return FAPI_PRIORITY_QOS_UP0;
399 case CS1:
400 return FAPI_PRIORITY_QOS_UP1;
401 default:
402 return FAPI_PRIORITY_QOS_UP0;
403 }
404#else
405 switch (dscp) {
406 case DSCP_EF:
407 case DSCP_VA:
408 return FAPI_PRIORITY_QOS_UP6;
409 case DSCP_AF43:
410 case DSCP_AF42:
411 case DSCP_AF41:
412 return FAPI_PRIORITY_QOS_UP5;
413 case DSCP_AF33:
414 case DSCP_AF32:
415 case DSCP_AF31:
416 case DSCP_AF23:
417 case DSCP_AF22:
418 case DSCP_AF21:
419 case DSCP_AF13:
420 case DSCP_AF12:
421 case DSCP_AF11:
422 return FAPI_PRIORITY_QOS_UP0;
423 case CS7:
424 return FAPI_PRIORITY_QOS_UP7;
425 case CS6:
426 return FAPI_PRIORITY_QOS_UP6;
427 case CS5:
428 return FAPI_PRIORITY_QOS_UP5;
429 case CS4:
430 return FAPI_PRIORITY_QOS_UP4;
431 case CS3:
432 return FAPI_PRIORITY_QOS_UP3;
433 case CS2:
434 return FAPI_PRIORITY_QOS_UP2;
435 case CS1:
436 return FAPI_PRIORITY_QOS_UP1;
437 case CS0:
438 return FAPI_PRIORITY_QOS_UP0;
439 default:
440 return FAPI_PRIORITY_QOS_UP0;
441 }
442#endif
443}
444
445#endif
446
447static bool slsi_net_downgrade_ac(struct net_device *dev, struct sk_buff *skb)
448{
449 SLSI_UNUSED_PARAMETER(dev);
450
451 switch (skb->priority) {
452 case 6:
453 case 7:
454 skb->priority = FAPI_PRIORITY_QOS_UP5; /* VO -> VI */
455 return true;
456 case 4:
457 case 5:
458 skb->priority = FAPI_PRIORITY_QOS_UP3; /* VI -> BE */
459 return true;
460 case 0:
461 case 3:
462 skb->priority = FAPI_PRIORITY_QOS_UP2; /* BE -> BK */
463 return true;
464 default:
465 return false;
466 }
467}
468
469static u8 slsi_net_up_to_ac_mapping(u8 priority)
470{
471 switch (priority) {
472 case FAPI_PRIORITY_QOS_UP6:
473 case FAPI_PRIORITY_QOS_UP7:
474 return BIT(FAPI_PRIORITY_QOS_UP6) | BIT(FAPI_PRIORITY_QOS_UP7);
475 case FAPI_PRIORITY_QOS_UP4:
476 case FAPI_PRIORITY_QOS_UP5:
477 return BIT(FAPI_PRIORITY_QOS_UP4) | BIT(FAPI_PRIORITY_QOS_UP5);
478 case FAPI_PRIORITY_QOS_UP0:
479 case FAPI_PRIORITY_QOS_UP3:
480 return BIT(FAPI_PRIORITY_QOS_UP0) | BIT(FAPI_PRIORITY_QOS_UP3);
481 default:
482 return BIT(FAPI_PRIORITY_QOS_UP1) | BIT(FAPI_PRIORITY_QOS_UP2);
483 }
484}
485
486enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority)
487{
488 switch (priority) {
489 case FAPI_PRIORITY_QOS_UP0:
490 case FAPI_PRIORITY_QOS_UP3:
491 return SLSI_TRAFFIC_Q_BE;
492 case FAPI_PRIORITY_QOS_UP1:
493 case FAPI_PRIORITY_QOS_UP2:
494 return SLSI_TRAFFIC_Q_BK;
495 case FAPI_PRIORITY_QOS_UP4:
496 case FAPI_PRIORITY_QOS_UP5:
497 return SLSI_TRAFFIC_Q_VI;
498 case FAPI_PRIORITY_QOS_UP6:
499 case FAPI_PRIORITY_QOS_UP7:
500 return SLSI_TRAFFIC_Q_VO;
501 default:
502 return SLSI_TRAFFIC_Q_BE;
503 }
504}
505
506int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids)
507{
508 switch (ac) {
509 case SLSI_TRAFFIC_Q_BE:
510 tids[0] = FAPI_PRIORITY_QOS_UP0;
511 tids[1] = FAPI_PRIORITY_QOS_UP3;
512 break;
513
514 case SLSI_TRAFFIC_Q_BK:
515 tids[0] = FAPI_PRIORITY_QOS_UP1;
516 tids[1] = FAPI_PRIORITY_QOS_UP2;
517 break;
518
519 case SLSI_TRAFFIC_Q_VI:
520 tids[0] = FAPI_PRIORITY_QOS_UP4;
521 tids[1] = FAPI_PRIORITY_QOS_UP5;
522 break;
523
524 case SLSI_TRAFFIC_Q_VO:
525 tids[0] = FAPI_PRIORITY_QOS_UP6;
526 tids[1] = FAPI_PRIORITY_QOS_UP7;
527 break;
528
529 default:
530 return -EINVAL;
531 }
532
533 return 0;
534}
535
536static void slsi_net_downgrade_pri(struct net_device *dev, struct slsi_peer *peer,
537 struct sk_buff *skb)
538{
539 /* in case we are a client downgrade the ac if acm is
540 * set and tspec is not established
541 */
542 while (unlikely(peer->wmm_acm & BIT(skb->priority)) &&
543 !(peer->tspec_established & slsi_net_up_to_ac_mapping(skb->priority))) {
544 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Downgrading from UP:%d\n", skb->priority);
545 if (!slsi_net_downgrade_ac(dev, skb))
546 break;
547 }
548 SLSI_NET_DBG4(dev, SLSI_NETDEV, "To UP:%d\n", skb->priority);
549}
781f598d
TK
550#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
551static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev, select_queue_fallback_t fallback)
552#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
533a23a1
TK
553static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback)
554#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
555static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv)
556#else
557static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb)
558#endif
559{
560 struct netdev_vif *ndev_vif = netdev_priv(dev);
561 struct slsi_dev *sdev = ndev_vif->sdev;
562 u16 netif_q = 0;
563 struct ethhdr *ehdr = (struct ethhdr *)skb->data;
564 int proto = 0;
565 struct slsi_peer *peer;
781f598d
TK
566#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
567 (void)sb_dev;
568#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
533a23a1
TK
569 (void)accel_priv;
570#endif
571#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
572 (void)fallback;
573#endif
574 SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
575
576 /* Defensive check for uninitialized mac header */
577 if (!skb_mac_header_was_set(skb))
578 skb_reset_mac_header(skb);
579
580 if (is_zero_ether_addr(ehdr->h_dest) || is_zero_ether_addr(ehdr->h_source)) {
581 SLSI_NET_WARN(dev, "invalid Ethernet addresses (dest:%pM,src:%pM)\n", ehdr->h_dest, ehdr->h_source);
582 SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
583 return SLSI_NETIF_Q_DISCARD;
584 }
585
586 proto = be16_to_cpu(eth_hdr(skb)->h_proto);
587
588 switch (proto) {
589 default:
590 /* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
591 break;
592 case ETH_P_PAE:
593 case ETH_P_WAI:
594 SLSI_NET_DBG3(dev, SLSI_TX, "EAP packet. Priority Queue Selected\n");
595 return SLSI_NETIF_Q_PRIORITY;
596 case ETH_P_ARP:
597 SLSI_NET_DBG3(dev, SLSI_TX, "ARP frame. Priority Queue Selected\n");
598 return SLSI_NETIF_Q_PRIORITY;
599 case ETH_P_IP:
600 if (slsi_is_dhcp_packet(skb->data) == SLSI_TX_IS_NOT_DHCP)
601 break;
602 SLSI_NET_DBG3(dev, SLSI_TX, "DHCP packet. Priority Queue Selected\n");
603 return SLSI_NETIF_Q_PRIORITY;
604 }
605
606 if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
607 /* MULTICAST/BROADCAST Queue is only used for AP */
608 if (is_multicast_ether_addr(ehdr->h_dest)) {
609 SLSI_NET_DBG3(dev, SLSI_TX, "Multicast AC queue will be selected\n");
610#ifdef CONFIG_SCSC_USE_WMM_TOS
611 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
612#else
613 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
614#endif
615 return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb->priority));
616 }
617
618 slsi_spinlock_lock(&ndev_vif->peer_lock);
619 peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
620 if (!peer) {
621 SLSI_NET_DBG1(dev, SLSI_TX, "Discard: Peer %pM NOT found\n", ehdr->h_dest);
622 slsi_spinlock_unlock(&ndev_vif->peer_lock);
623 return SLSI_NETIF_Q_DISCARD;
624 }
625
626 if (peer->qos_enabled) {
627#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
628 if (peer->qos_map_set) { /*802.11 QoS for interworking*/
629 skb->priority = cfg80211_classify8021d(skb, &peer->qos_map);
630 } else
631#endif
632 {
633#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
634 if ((proto == ETH_P_IP && slsi_is_dns_packet(skb->data)) ||
635 (proto == ETH_P_IP && slsi_is_mdns_packet(skb->data)) ||
636 (proto == ETH_P_IP && slsi_is_tcp_sync_packet(dev, skb))) {
637 skb->priority = FAPI_PRIORITY_QOS_UP7;
638 } else
639#endif
640 {
641#ifdef CONFIG_SCSC_USE_WMM_TOS
642 skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
643#else
644 skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
645#endif
646 }
647 }
648 } else{
649 skb->priority = FAPI_PRIORITY_QOS_UP0;
650 }
651
652 /* Downgrade the priority if acm bit is set and tspec is not established */
653 slsi_net_downgrade_pri(dev, peer, skb);
654
655 netif_q = slsi_netif_get_peer_queue(peer->queueset, slsi_frame_priority_to_ac_queue(skb->priority));
656 SLSI_NET_DBG3(dev, SLSI_TX, "prio:%d queue:%u\n", skb->priority, netif_q);
657 slsi_spinlock_unlock(&ndev_vif->peer_lock);
658 return netif_q;
659}
660
661void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
662 struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection)
663{
664 struct netdev_vif *netdev_vif = netdev_priv(dev);
665 struct sk_buff *skb = NULL;
666#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
667 struct sk_buff *skb_to_free = NULL;
668#endif
669 struct ethhdr *ehdr;
670 struct Qdisc *qd;
671 u32 num_pkts;
672 u16 staq;
673 u16 tdlsq;
674 u16 netq;
675 u16 i;
676 u16 j;
677 int index;
678 struct slsi_tcp_ack_s *tcp_ack;
679
680 /* Get the netdev queue number from queueset */
681 staq = slsi_netif_get_peer_queue(sta_peer->queueset, 0);
682 tdlsq = slsi_netif_get_peer_queue(tdls_peer->queueset, 0);
683
684 SLSI_NET_DBG1(dev, SLSI_TDLS, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq: %d, tdls_netq: %d\n",
685 connection, sta_peer->queueset, tdls_peer->queueset, staq, tdlsq);
686
687 /* Pause the TDLS queues and STA netdev queues */
688 slsi_tx_pause_queues(sdev);
689
690 /* walk through frames in TCP Ack suppression queue and change mapping to TDLS queue */
691 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
692 tcp_ack = &netdev_vif->ack_suppression[index];
693 if (!tcp_ack && !tcp_ack->state)
694 continue;
695 slsi_spinlock_lock(&tcp_ack->lock);
696 skb_queue_walk(&tcp_ack->list, skb) {
697 SLSI_NET_DBG2(dev, SLSI_TDLS, "frame in TCP Ack list (peer:%pM)\n", eth_hdr(skb)->h_dest);
698 /* is it destined to TDLS peer? */
699 if (compare_ether_addr(tdls_peer->address, eth_hdr(skb)->h_dest) == 0) {
700 if (connection) {
701 /* TDLS setup: change the queue mapping to TDLS queue */
702 skb->queue_mapping += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
703 } else {
704 /* TDLS teardown: change the queue to STA queue */
705 skb->queue_mapping -= (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
706 }
707 }
708 }
709 slsi_spinlock_unlock(&tcp_ack->lock);
710 }
711
712 /**
713 * For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
714 * For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
715 */
716 if (connection)
717 tdls_peer->valid = true;
718 else
719 tdls_peer->valid = false;
720
721 /* Move packets from netdev queues */
722 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
723 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
724 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
725
726 if (connection) {
727 /* Check if any packet is already avilable in TDLS queue (most likely from last session) */
728 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
729 SLSI_NET_ERR(dev, "tdls_connection: Packet present in queue %d\n", tdlsq + i);
730
731 qd = dev->_tx[staq + i].qdisc;
732 /* Get the total number of packets in STAQ */
733 num_pkts = qd->q.qlen;
734
735 /* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
736 for (j = 0; j < num_pkts; j++) {
737 qd = dev->_tx[staq + i].qdisc;
738 /* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
781f598d
TK
739 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
740 skb = skb_peek(&qd->gso_skb);
741 #else
742 skb = qd->gso_skb;
743 #endif
533a23a1 744 if (skb) {
781f598d
TK
745 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
746 skb = __skb_dequeue(&qd->gso_skb);
747 #else
533a23a1 748 qd->gso_skb = NULL;
781f598d 749 #endif
533a23a1
TK
750 qd->q.qlen--;
751 } else {
752 skb = qd->dequeue(qd);
753 }
754
755 if (!skb) {
756 SLSI_NET_ERR(dev, "tdls_connection: STA NETQ skb is NULL\n");
757 break;
758 }
759
760 /* Change the queue mapping for the TDLS packets */
761 netq = skb->queue_mapping;
762 ehdr = (struct ethhdr *)skb->data;
763 if (compare_ether_addr(tdls_peer->address, ehdr->h_dest) == 0) {
764 netq += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
765 SLSI_NET_DBG3(dev, SLSI_TDLS, "NETQ%d: Queue mapping changed from %d to %d\n",
766 i, skb->queue_mapping, netq);
767 skb_set_queue_mapping(skb, netq);
768 }
769
770 qd = dev->_tx[netq].qdisc;
771#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
772 qd->enqueue(skb, qd, &skb_to_free);
773#else
774 /* If the netdev queue is already full then enqueue() will drop the skb */
775 qd->enqueue(skb, qd);
776#endif
777 }
778 } else {
779 num_pkts = dev->_tx[tdlsq + i].qdisc->q.qlen;
780 /* Move the packets from TDLS to STA queue */
781 for (j = 0; j < num_pkts; j++) {
782 /* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
783 qd = dev->_tx[tdlsq + i].qdisc;
781f598d
TK
784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
785 skb = skb_peek(&qd->gso_skb);
786 #else
787 skb = qd->gso_skb;
788 #endif
533a23a1 789 if (skb) {
781f598d
TK
790 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
791 skb = __skb_dequeue(&qd->gso_skb);
792 #else
533a23a1 793 qd->gso_skb = NULL;
781f598d 794 #endif
533a23a1
TK
795 qd->q.qlen--;
796 } else {
797 skb = qd->dequeue(qd);
798 }
799
800 if (!skb) {
801 SLSI_NET_ERR(dev, "tdls_teardown: TDLS NETQ skb is NULL\n");
802 break;
803 }
804
805 /* Update the queue mapping */
806 skb_set_queue_mapping(skb, staq + i);
807
808 /* Enqueue the packet in STA queue */
809 qd = dev->_tx[staq + i].qdisc;
810#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
811 qd->enqueue(skb, qd, &skb_to_free);
812#else
813 /* If the netdev queue is already full then enqueue() will drop the skb */
814 qd->enqueue(skb, qd);
815#endif
816 }
817 }
818 SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
819 i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
820 }
821#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
822 if (unlikely(skb_to_free))
823 kfree_skb_list(skb_to_free);
824#endif
825
826 /* Teardown - after teardown there should not be any packet in TDLS queues */
827 if (!connection)
828 for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
829 if (dev->_tx[tdlsq + i].qdisc->q.qlen)
830 SLSI_NET_ERR(dev, "tdls_teardown: Packet present in NET queue %d\n", tdlsq + i);
831 }
832
833 /* Resume the STA and TDLS netdev queues */
834 slsi_tx_unpause_queues(sdev);
835}
836
837/**
838 * This is the main TX entry point for the driver.
839 *
840 * Ownership of the skb is transferred to another function ONLY IF such
841 * function was able to deal with that skb and ended with a SUCCESS ret code.
842 * Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
843 *
844 * In the context of this function:
845 * - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
846 * SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
847 * kernel netstack will receive back NETDEV_TX_OK too.
848 * - ownership is KEPT HERE by this function when lower layers fails somehow
849 * to deal with the transmission of the skb. In this case the skb WOULD HAVE
850 * NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
851 * - intermediate lower layer functions (NOT directly involved in failure or
852 * success) will relay any retcode up to this layer for evaluation.
853 *
854 * WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
855 * - ENOSPC: something related to queueing happened...this should be
856 * retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
857 * requeued by the Kernel NetStack itself, using the proper queue.
858 * As a consequence SKB is NOT FREED HERE !.
859 * - ANY OTHER ERR: all other errors are considered at the moment NOT
860 * recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
861 * the proper ERRCODE and stops dealing with the packet considering it
862 * consumed by lower layer. (same behavior as NETDEV_TX_OK)
863 *
864 * BIG NOTE:
865 * As detailed in Documentation/networking/drivers.txt the above behavior
866 * of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
867 * discouraged and should be used ONLY in case of a real HARD error(?);
868 * the advised solution is to actively STOP the queues before finishing
869 * the available space and WAKING them up again when more free buffers
870 * would have arrived.
871 */
872static netdev_tx_t slsi_net_hw_xmit(struct sk_buff *skb, struct net_device *dev)
873{
874 struct netdev_vif *ndev_vif = netdev_priv(dev);
875 struct slsi_dev *sdev = ndev_vif->sdev;
876 int r = NETDEV_TX_OK;
877 struct sk_buff *original_skb = NULL;
878#ifdef CONFIG_SCSC_WLAN_DEBUG
879 int known_users = 0;
880#endif
881 /* Keep the packet length. The packet length will be used to increment
882 * stats for the netdev if the packet was successfully transmitted.
883 * The ownership of the SKB is passed to lower layers, so we should
884 * not refer the SKB after this point
885 */
886 unsigned int packet_len = skb->len;
887 enum slsi_traffic_q traffic_q = slsi_frame_priority_to_ac_queue(skb->priority);
888
889 slsi_wakelock(&sdev->wlan_wl);
890 slsi_skb_cb_init(skb);
891
892 /* Check for misaligned (oddly aligned) data.
893 * The f/w requires 16 bit aligned.
894 * This is a corner case - for example, the kernel can generate BPDU
895 * that are oddly aligned. Therefore it is acceptable to copy these
896 * frames to a 16 bit alignment.
897 */
898 if ((uintptr_t)skb->data & 0x1) {
899 struct sk_buff *skb2 = NULL;
900 /* Received a socket buffer aligned on an odd address.
901 * Re-align by asking for headroom.
902 */
903 skb2 = skb_copy_expand(skb, SLSI_NETIF_SKB_HEADROOM, skb_tailroom(skb), GFP_ATOMIC);
904 if (skb2 && (!(((uintptr_t)skb2->data) & 0x1))) {
905 /* We should account for this duplication */
906 original_skb = skb;
907 skb = skb2;
908 SLSI_NET_DBG3(dev, SLSI_TX, "Oddly aligned skb realigned\n");
909 } else {
910 /* Drop the packet if we can't re-align. */
911 SLSI_NET_WARN(dev, "Oddly aligned skb failed realignment, dropping\n");
912 if (skb2) {
913 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand didn't align for us\n");
914 slsi_kfree_skb(skb2);
915 } else {
916 SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand failed when trying to align\n");
917 }
918 r = -EFAULT;
919 goto evaluate;
920 }
921 }
922 slsi_dbg_track_skb(skb, GFP_ATOMIC);
923
924 /* Be defensive about the mac_header - some kernels have a bug where a
925 * frame can be delivered to the driver with mac_header initialised
926 * to ~0U and this causes a crash when the pointer is dereferenced to
927 * access part of the Ethernet header.
928 */
929 if (!skb_mac_header_was_set(skb))
930 skb_reset_mac_header(skb);
931
932 SLSI_NET_DBG3(dev, SLSI_TX, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb)->h_proto));
933
781f598d
TK
934#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
935 if (ndev_vif->ifnum < SLSI_NAN_DATA_IFINDEX_START) {
936#endif
937 if (!ndev_vif->is_available) {
938 SLSI_NET_WARN(dev, "vif NOT available\n");
939 r = -EFAULT;
940 goto evaluate;
941 }
942#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
533a23a1 943 }
781f598d 944#endif
533a23a1
TK
945 if (skb->queue_mapping == SLSI_NETIF_Q_DISCARD) {
946 SLSI_NET_WARN(dev, "Discard Queue :: Packet Dropped\n");
947 r = -EIO;
948 goto evaluate;
949 }
950
951#ifdef CONFIG_SCSC_WLAN_DEBUG
952#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
953 known_users = refcount_read(&skb->users);
954#else
955 known_users = atomic_read(&skb->users);
956#endif
957#endif
958
959#ifndef CONFIG_ARM
960 skb = slsi_netif_tcp_ack_suppression_pkt(dev, skb);
961 if (!skb) {
962 slsi_wakeunlock(&sdev->wlan_wl);
963 if (original_skb)
964 slsi_kfree_skb(original_skb);
965 return NETDEV_TX_OK;
966 }
967#endif
968
969 /* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
970 r = slsi_tx_data(sdev, dev, skb);
971evaluate:
972 if (r == 0) {
973 /**
974 * A copy has been passed down and successfully transmitted
975 * and freed....here we free the original coming from the
976 * upper network layers....if a copy was passed down.
977 */
978 if (original_skb)
979 slsi_kfree_skb(original_skb);
980 /* skb freed by lower layers on success...enjoy */
981
982 ndev_vif->tx_packets[traffic_q]++;
983 ndev_vif->stats.tx_packets++;
984 ndev_vif->stats.tx_bytes += packet_len;
985 r = NETDEV_TX_OK;
986 } else {
987 /**
988 * Failed to send:
989 * - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
990 * NOT discarded by lower layers and NETDEV_TX_BUSY should
991 * be returned to upper layers: this will cause the skb
992 * (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
993 * to be requeued ...
994 * NOTE THAT it's the original skb that will be retried
995 * by upper netstack.
996 * THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
997 * the following.
998 *
999 * - with any other -ERR instead return the error: this
1000 * anyway let the kernel think that the SKB has
1001 * been consumed, and we drop the frame and free it.
1002 *
1003 * - a WARN_ON() takes care to ensure the SKB has NOT been
1004 * freed by someone despite this was NOT supposed to happen,
1005 * just before the actual freeing.
1006 *
1007 */
1008 if (r == -ENOSPC) {
1009 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
1010 ndev_vif->stats.tx_fifo_errors++;
1011 /* Free the local copy if any ... */
1012 if (original_skb)
1013 slsi_kfree_skb(skb);
1014 r = NETDEV_TX_BUSY;
1015 } else {
1016#ifdef CONFIG_SCSC_WLAN_DEBUG
1017#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
1018 WARN_ON(known_users && refcount_read(&skb->users) != known_users);
1019#else
1020 WARN_ON(known_users && atomic_read(&skb->users) != known_users);
1021#endif
1022#endif
1023 if (original_skb)
1024 slsi_kfree_skb(original_skb);
1025 slsi_kfree_skb(skb);
1026 ndev_vif->stats.tx_dropped++;
1027 /* We return the ORIGINAL Error 'r' anyway
1028 * BUT Kernel treats them as TX complete anyway
1029 * and assumes the SKB has been consumed.
1030 */
1031 /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
1032 }
1033 }
1034 /* SKBs are always considered consumed if the driver
1035 * returns NETDEV_TX_OK.
1036 */
1037 slsi_wakeunlock(&sdev->wlan_wl);
1038 return r;
1039}
1040
1041static netdev_features_t slsi_net_fix_features(struct net_device *dev, netdev_features_t features)
1042{
1043 SLSI_UNUSED_PARAMETER(dev);
1044
1045#ifdef CONFIG_SCSC_WLAN_SG
1046 SLSI_NET_DBG1(dev, SLSI_RX, "Scatter-gather and GSO enabled\n");
1047 features |= NETIF_F_SG;
1048 features |= NETIF_F_GSO;
1049#endif
1050
1051#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
1052 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO enabled\n");
1053 features |= NETIF_F_GRO;
1054#else
1055 SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO disabled\n");
1056 features &= ~NETIF_F_GRO;
1057#endif
1058 return features;
1059}
1060
1061static void slsi_set_multicast_list(struct net_device *dev)
1062{
1063 struct netdev_vif *ndev_vif = netdev_priv(dev);
1064 u8 count, i = 0;
1065 u8 mdns_addr[ETH_ALEN] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
1066
1067#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1068 u8 mc_addr_prefix[3] = { 0x01, 0x00, 0x5e };
1069#else
1070 u8 mdns6_addr[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
1071 const u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
1072 u8 ipv6addr_suffix[3];
1073#endif
1074 struct netdev_hw_addr *ha;
1075
1076 if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
1077 return;
1078
1079 if (!ndev_vif->is_available) {
1080 SLSI_NET_DBG1(dev, SLSI_NETDEV, "vif NOT available\n");
1081 return;
1082 }
1083
1084 count = netdev_mc_count(dev);
1085 if (!count)
1086 goto exit;
1087
1088#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1089 slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
1090 memcpy(ipv6addr_suffix, &ndev_vif->ipv6address.s6_addr[13], 3);
1091 slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
1092#endif
1093
1094 netdev_for_each_mc_addr(ha, dev) {
1095#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
1096 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) || /*mDns is handled separately*/
1097 (memcmp(ha->addr, mc_addr_prefix, 3))) { /*only consider IPv4 multicast addresses*/
1098#else
1099 if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) ||
1100 (!memcmp(ha->addr, mdns6_addr, ETH_ALEN)) || /*mDns is handled separately*/
1101 (!memcmp(ha->addr, solicited_node_addr, 3) &&
1102 !memcmp(&ha->addr[3], ipv6addr_suffix, 3))) { /* local multicast addr handled separately*/
1103#endif
1104
1105 SLSI_NET_DBG3(dev, SLSI_NETDEV, "Drop MAC %pM\n", ha->addr);
1106 continue;
1107 }
1108 if (i == SLSI_MC_ADDR_ENTRY_MAX) {
1109 SLSI_NET_WARN(dev, "MAC list has reached max limit (%d), actual count %d\n", SLSI_MC_ADDR_ENTRY_MAX, count);
1110 break;
1111 }
1112
1113 SLSI_NET_DBG3(dev, SLSI_NETDEV, "idx %d MAC %pM\n", i, ha->addr);
1114 SLSI_ETHER_COPY(ndev_vif->sta.regd_mc_addr[i++], ha->addr);
1115 }
1116
1117exit:
1118 ndev_vif->sta.regd_mc_addr_count = i;
1119}
1120
1121static int slsi_set_mac_address(struct net_device *dev, void *addr)
1122{
fd725e7a
MG
1123 struct netdev_vif *ndev_vif = netdev_priv(dev);
1124 struct slsi_dev *sdev = ndev_vif->sdev;
533a23a1
TK
1125 struct sockaddr *sa = (struct sockaddr *)addr;
1126
1127 SLSI_NET_DBG1(dev, SLSI_NETDEV, "slsi_set_mac_address %pM\n", sa->sa_data);
1128 SLSI_ETHER_COPY(dev->dev_addr, sa->sa_data);
fd725e7a 1129
781f598d
TK
1130 /* Interface is pulled down before mac address is changed.
1131 * First scan initiated after interface is brought up again, should be treated as initial scan, for faster reconnection.
1132 */
fd725e7a
MG
1133 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
1134 sdev->initial_scan = true;
fd725e7a 1135 }
533a23a1
TK
1136 return 0;
1137}
1138
1139static const struct net_device_ops slsi_netdev_ops = {
1140 .ndo_open = slsi_net_open,
1141 .ndo_stop = slsi_net_stop,
1142 .ndo_start_xmit = slsi_net_hw_xmit,
1143 .ndo_do_ioctl = slsi_net_ioctl,
1144 .ndo_get_stats = slsi_net_get_stats,
1145 .ndo_select_queue = slsi_net_select_queue,
1146 .ndo_fix_features = slsi_net_fix_features,
1147 .ndo_set_rx_mode = slsi_set_multicast_list,
1148 .ndo_set_mac_address = slsi_set_mac_address,
1149};
1150
1151static void slsi_if_setup(struct net_device *dev)
1152{
1153 ether_setup(dev);
1154 dev->netdev_ops = &slsi_netdev_ops;
1155#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
1156 dev->needs_free_netdev = true;
1157#else
1158 dev->destructor = free_netdev;
1159#endif
1160}
1161
1162#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1163
1164#if defined(CONFIG_SOC_EXYNOS9610) || defined(CONFIG_SOC_EXYNOS9630) || defined(CONFIG_SOC_EXYNOS3830)
1165#define SCSC_NETIF_RPS_CPUS_MASK "fe"
1166#else
1167#define SCSC_NETIF_RPS_CPUS_MASK "0"
1168#endif
1169
1170static void slsi_netif_rps_map_clear(struct net_device *dev)
1171{
1172 struct rps_map *map;
1173
1174 map = rcu_dereference_protected(dev->_rx->rps_map, 1);
1175 if (map) {
1176 RCU_INIT_POINTER(dev->_rx->rps_map, NULL);
1177 kfree_rcu(map, rcu);
1178 SLSI_NET_INFO(dev, "clear rps_cpus map\n");
1179 }
1180}
1181
1182static int slsi_netif_rps_map_set(struct net_device *dev, char *buf, size_t len)
1183{
1184 struct rps_map *old_map, *map;
1185 cpumask_var_t mask;
1186 int err, cpu, i;
1187 static DEFINE_SPINLOCK(rps_map_lock);
1188
1189 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1190 return -ENOMEM;
1191
1192 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1193 if (err) {
1194 free_cpumask_var(mask);
1195 SLSI_NET_WARN(dev, "CPU bitmap parse failed\n");
1196 return err;
1197 }
1198
1199 map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL);
1200 if (!map) {
1201 free_cpumask_var(mask);
1202 SLSI_NET_WARN(dev, "CPU mask alloc failed\n");
1203 return -ENOMEM;
1204 }
1205
1206 i = 0;
1207 for_each_cpu_and(cpu, mask, cpu_online_mask)
1208 map->cpus[i++] = cpu;
1209
1210 if (i) {
1211 map->len = i;
1212 } else {
1213 kfree(map);
1214 map = NULL;
1215 }
1216
1217 spin_lock(&rps_map_lock);
1218 old_map = rcu_dereference_protected(dev->_rx->rps_map, lockdep_is_held(&rps_map_lock));
1219 rcu_assign_pointer(dev->_rx->rps_map, map);
1220 spin_unlock(&rps_map_lock);
1221
1222 if (map)
1223 static_key_slow_inc(&rps_needed);
1224 if (old_map)
1225 static_key_slow_dec(&rps_needed);
1226
1227 if (old_map)
1228 kfree_rcu(old_map, rcu);
1229
1230 free_cpumask_var(mask);
1231 SLSI_NET_INFO(dev, "rps_cpus map set(%s)\n", buf);
1232 return len;
1233}
1234#endif
1235
1236int slsi_netif_add_locked(struct slsi_dev *sdev, const char *name, int ifnum)
1237{
1238 struct net_device *dev = NULL;
1239 struct netdev_vif *ndev_vif;
1240 struct wireless_dev *wdev;
1241 int alloc_size, txq_count = 0, ret;
1242
1243 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1244
1245 if (WARN_ON(!sdev || ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES || sdev->netdev[ifnum]))
1246 return -EINVAL;
1247
1248 alloc_size = sizeof(struct netdev_vif);
1249
1250 txq_count = SLSI_NETIF_Q_PEER_START + (SLSI_NETIF_Q_PER_PEER * (SLSI_ADHOC_PEER_CONNECTIONS_MAX));
1251
1252#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
1253 dev = alloc_netdev_mqs(alloc_size, name, NET_NAME_PREDICTABLE, slsi_if_setup, txq_count, 1);
1254#else
1255 dev = alloc_netdev_mqs(alloc_size, name, slsi_if_setup, txq_count, 1);
1256#endif
1257 if (!dev) {
1258 SLSI_ERR(sdev, "Failed to allocate private data for netdev\n");
1259 return -ENOMEM;
1260 }
1261
1262 /* Reserve space in skb for later use */
1263 dev->needed_headroom = SLSI_NETIF_SKB_HEADROOM;
1264 dev->needed_tailroom = SLSI_NETIF_SKB_TAILROOM;
1265
1266 ret = dev_alloc_name(dev, dev->name);
1267 if (ret < 0)
1268 goto exit_with_error;
1269
1270 ndev_vif = netdev_priv(dev);
1271 memset(ndev_vif, 0x00, sizeof(*ndev_vif));
1272 SLSI_MUTEX_INIT(ndev_vif->vif_mutex);
1273 SLSI_MUTEX_INIT(ndev_vif->scan_mutex);
1274 SLSI_MUTEX_INIT(ndev_vif->scan_result_mutex);
1275 skb_queue_head_init(&ndev_vif->ba_complete);
1276 slsi_sig_send_init(&ndev_vif->sig_wait);
1277 ndev_vif->sdev = sdev;
1278 ndev_vif->ifnum = ifnum;
1279 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1280#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
1281 slsi_spinlock_create(&ndev_vif->ipv6addr_lock);
1282#endif
1283 slsi_spinlock_create(&ndev_vif->peer_lock);
1284 atomic_set(&ndev_vif->ba_flush, 0);
1285
1286 /* Reserve memory for the peer database - Not required for p2p0/nan interface */
1287 if (!(SLSI_IS_VIF_INDEX_P2P(ndev_vif) || SLSI_IS_VIF_INDEX_NAN(ndev_vif))) {
1288 int queueset;
1289
1290 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1291 ndev_vif->peer_sta_record[queueset] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queueset]), GFP_KERNEL);
1292
1293 if (!ndev_vif->peer_sta_record[queueset]) {
1294 int j;
1295
1296 SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queueset:%d)\n", queueset);
1297
1298 /* Free previously allocated peer database memory till current queueset */
1299 for (j = 0; j < queueset; j++) {
1300 kfree(ndev_vif->peer_sta_record[j]);
1301 ndev_vif->peer_sta_record[j] = NULL;
1302 }
1303
1304 ret = -ENOMEM;
1305 goto exit_with_error;
1306 }
1307 }
1308 }
1309
1310 /* The default power mode in host*/
1311 if (slsi_is_rf_test_mode_enabled()) {
1312 SLSI_NET_ERR(dev, "*#rf# rf test mode set is enabled.\n");
1313 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
1314 } else {
1315 ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
1316 }
1317
1318 INIT_LIST_HEAD(&ndev_vif->sta.network_map);
1319 SLSI_DBG1(sdev, SLSI_NETDEV, "ifnum=%d\n", ndev_vif->ifnum);
1320
1321 /* For HS2 interface */
1322 if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
1323 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1324
1325 /* For p2p0 interface */
1326 else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1327 ret = slsi_p2p_init(sdev, ndev_vif);
1328 if (ret)
1329 goto exit_with_error;
1330 }
1331
1332 INIT_DELAYED_WORK(&ndev_vif->scan_timeout_work, slsi_scan_ind_timeout_handle);
1333
1334 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_data, "slsi_wlan_rx_data", slsi_rx_netdev_data_work);
1335 if (ret)
1336 goto exit_with_error;
1337
1338 ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_mlme, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work);
1339 if (ret) {
1340 slsi_skb_work_deinit(&ndev_vif->rx_data);
1341 goto exit_with_error;
1342 }
1343
1344 wdev = &ndev_vif->wdev;
1345
1346 dev->ieee80211_ptr = wdev;
1347 wdev->wiphy = sdev->wiphy;
1348 wdev->netdev = dev;
1349 wdev->iftype = NL80211_IFTYPE_STATION;
1350 SET_NETDEV_DEV(dev, sdev->dev);
1351
1352 /* We are not ready to send data yet. */
1353 netif_carrier_off(dev);
1354
1355#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1356 if (strcmp(name, CONFIG_SCSC_AP_INTERFACE_NAME) == 0)
1357 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
1358 else
1359 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1360#else
1361 SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
1362#endif
1363 SLSI_DBG1(sdev, SLSI_NETDEV, "Add:%pM\n", dev->dev_addr);
1364 rcu_assign_pointer(sdev->netdev[ifnum], dev);
1365 ndev_vif->delete_probe_req_ies = false;
1366 ndev_vif->probe_req_ies = NULL;
1367 ndev_vif->probe_req_ie_len = 0;
1368 ndev_vif->drv_in_p2p_procedure = false;
1369
1370#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1371 slsi_netif_rps_map_set(dev, SCSC_NETIF_RPS_CPUS_MASK, strlen(SCSC_NETIF_RPS_CPUS_MASK));
1372#endif
1373 return 0;
1374
1375exit_with_error:
1376 mutex_lock(&sdev->netdev_remove_mutex);
1377 free_netdev(dev);
1378 mutex_unlock(&sdev->netdev_remove_mutex);
1379 return ret;
1380}
1381
1382int slsi_netif_dynamic_iface_add(struct slsi_dev *sdev, const char *name)
1383{
1384 int index = -EINVAL;
1385 int err;
1386
1387 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1388
1389#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1390 if (sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN] == sdev->netdev_ap) {
1391 rcu_assign_pointer(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN], NULL);
1392 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1393 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1394 }
1395#else
1396 err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
1397 index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
1398#endif
1399
1400 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1401 return index;
1402}
1403
1404int slsi_netif_init(struct slsi_dev *sdev)
1405{
1406 int i;
1407
1408 SLSI_DBG3(sdev, SLSI_NETDEV, "\n");
1409
1410 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1411
1412 /* Initialize all other netdev interfaces to NULL */
1413 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1414 RCU_INIT_POINTER(sdev->netdev[i], NULL);
1415
1416 if (slsi_netif_add_locked(sdev, "wlan%d", SLSI_NET_INDEX_WLAN) != 0) {
1417 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1418 return -EINVAL;
1419 }
1420
1421 if (slsi_netif_add_locked(sdev, "p2p%d", SLSI_NET_INDEX_P2P) != 0) {
1422 rtnl_lock();
1423 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1424 rtnl_unlock();
1425 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1426 return -EINVAL;
1427 }
1428#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1429#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1430 if (slsi_netif_add_locked(sdev, CONFIG_SCSC_AP_INTERFACE_NAME, SLSI_NET_INDEX_P2PX_SWLAN) != 0) {
1431 rtnl_lock();
1432 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1433 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1434 rtnl_unlock();
1435 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1436 return -EINVAL;
1437 }
1438#endif
1439#endif
1440#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
1441 if (slsi_netif_add_locked(sdev, "nan%d", SLSI_NET_INDEX_NAN) != 0) {
1442 rtnl_lock();
1443 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
1444 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
1445#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
1446#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
1447 slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
1448#endif
1449#endif
1450 rtnl_unlock();
1451 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1452 return -EINVAL;
1453 }
1454#endif
1455 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1456 return 0;
1457}
1458
781f598d 1459int slsi_netif_register_locked(struct slsi_dev *sdev, struct net_device *dev)
533a23a1
TK
1460{
1461 struct netdev_vif *ndev_vif = netdev_priv(dev);
1462 int err;
1463
1464 WARN_ON(!rtnl_is_locked());
1465 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1466 if (atomic_read(&ndev_vif->is_registered)) {
1467 SLSI_NET_ERR(dev, "Register:%pM Failed: Already registered\n", dev->dev_addr);
1468 return 0;
1469 }
1470
1471 err = register_netdevice(dev);
1472 if (err)
1473 SLSI_NET_ERR(dev, "Register:%pM Failed\n", dev->dev_addr);
1474 else
1475 atomic_set(&ndev_vif->is_registered, 1);
1476 return err;
1477}
1478
1479int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1480{
1481 int err;
1482
1483 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1484 err = slsi_netif_register_locked(sdev, dev);
1485 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1486 return err;
1487}
1488
1489int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev)
1490{
1491 int err;
1492
1493 rtnl_lock();
1494 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1495 err = slsi_netif_register_locked(sdev, dev);
1496 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1497 rtnl_unlock();
1498 return err;
1499}
1500
1501void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev)
1502{
1503 int i;
1504 struct netdev_vif *ndev_vif = netdev_priv(dev);
1505
1506 SLSI_NET_DBG1(dev, SLSI_NETDEV, "Unregister:%pM\n", dev->dev_addr);
1507
1508 WARN_ON(!rtnl_is_locked());
1509 WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
1510
1511 if (atomic_read(&ndev_vif->is_registered)) {
1512 netif_tx_disable(dev);
1513 netif_carrier_off(dev);
1514
1515 slsi_stop_net_dev(sdev, dev);
1516 }
1517
1518 rcu_assign_pointer(sdev->netdev[ndev_vif->ifnum], NULL);
1519 synchronize_rcu();
1520
1521 /* Free memory of the peer database - Not required for p2p0 interface */
1522 if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1523 int queueset;
1524
1525 for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
1526 kfree(ndev_vif->peer_sta_record[queueset]);
1527 ndev_vif->peer_sta_record[queueset] = NULL;
1528 }
1529 }
1530
1531 if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
1532 slsi_p2p_deinit(sdev, ndev_vif);
1533 } else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
1534 sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
1535 ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
1536 }
1537
1538 cancel_delayed_work(&ndev_vif->scan_timeout_work);
1539 ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
1540
1541 slsi_skb_work_deinit(&ndev_vif->rx_data);
1542 slsi_skb_work_deinit(&ndev_vif->rx_mlme);
1543
1544 for (i = 0; i < SLSI_SCAN_MAX; i++)
1545 slsi_purge_scan_results(ndev_vif, i);
1546
1547 slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
1548 slsi_roam_channel_cache_prune(dev, 0);
1549 kfree(ndev_vif->probe_req_ies);
1550
1551#ifdef CONFIG_SCSC_WLAN_RX_NAPI
1552 slsi_netif_rps_map_clear(dev);
1553#endif
1554 if (atomic_read(&ndev_vif->is_registered)) {
1555 atomic_set(&ndev_vif->is_registered, 0);
1556 unregister_netdevice(dev);
1557 } else {
1558 mutex_lock(&sdev->netdev_remove_mutex);
1559 free_netdev(dev);
1560 mutex_unlock(&sdev->netdev_remove_mutex);
1561 }
1562}
1563
1564void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
1565{
1566 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1567 slsi_netif_remove_locked(sdev, dev);
1568 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1569}
1570
1571void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev)
1572{
1573 rtnl_lock();
1574 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1575 slsi_netif_remove_locked(sdev, dev);
1576 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1577 rtnl_unlock();
1578}
1579
1580void slsi_netif_remove_all(struct slsi_dev *sdev)
1581{
1582 int i;
1583
1584 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1585 rtnl_lock();
1586 SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
1587 for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
1588 if (sdev->netdev[i])
1589 slsi_netif_remove_locked(sdev, sdev->netdev[i]);
1590 rcu_assign_pointer(sdev->netdev_ap, NULL);
1591 SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
1592 rtnl_unlock();
1593}
1594
1595void slsi_netif_deinit(struct slsi_dev *sdev)
1596{
1597 SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
1598 slsi_netif_remove_all(sdev);
1599}
1600
1601#ifndef CONFIG_ARM
1602static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev)
1603{
1604 int index;
1605 struct netdev_vif *ndev_vif = netdev_priv(dev);
1606 struct slsi_tcp_ack_s *tcp_ack;
1607
1608 ndev_vif->last_tcp_ack = NULL;
1609 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1610 tcp_ack = &ndev_vif->ack_suppression[index];
1611 tcp_ack->dport = 0;
1612 tcp_ack->daddr = 0;
1613 tcp_ack->sport = 0;
1614 tcp_ack->saddr = 0;
1615 tcp_ack->ack_seq = 0;
1616 tcp_ack->count = 0;
1617 tcp_ack->max = 0;
1618 tcp_ack->age = 0;
1619 skb_queue_head_init(&tcp_ack->list);
1620#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1621 timer_setup(&tcp_ack->timer, slsi_netif_tcp_ack_suppression_timeout, 0);
1622#else
1623 tcp_ack->timer.function = slsi_netif_tcp_ack_suppression_timeout;
1624 tcp_ack->timer.data = (unsigned long)tcp_ack;
1625 init_timer(&tcp_ack->timer);
1626#endif
1627 tcp_ack->state = 1;
1628 slsi_spinlock_create(&tcp_ack->lock);
1629 }
1630
1631 memset(&ndev_vif->tcp_ack_stats, 0, sizeof(struct slsi_tcp_ack_stats));
1632 return 0;
1633}
1634
1635static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev)
1636{
1637 int index;
1638 struct netdev_vif *ndev_vif = netdev_priv(dev);
1639 struct slsi_tcp_ack_s *tcp_ack;
1640
1641 SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
1642 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1643 tcp_ack = &ndev_vif->ack_suppression[index];
1644 del_timer_sync(&tcp_ack->timer);
1645 slsi_spinlock_lock(&tcp_ack->lock);
1646 tcp_ack->state = 0;
1647 skb_queue_purge(&tcp_ack->list);
1648 slsi_spinlock_unlock(&tcp_ack->lock);
1649 }
1650 ndev_vif->last_tcp_ack = NULL;
1651 SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
1652 return 0;
1653}
1654
1655#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1656static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t)
1657#else
1658static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data)
1659#endif
1660{
1661#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
1662 struct slsi_tcp_ack_s *tcp_ack = from_timer(tcp_ack, t, timer);
1663#else
1664 struct slsi_tcp_ack_s *tcp_ack = (struct slsi_tcp_ack_s *)data;
1665#endif
1666 struct sk_buff *skb;
1667 struct netdev_vif *ndev_vif;
1668 struct slsi_dev *sdev;
1669 int r;
1670
1671 if (!tcp_ack)
1672 return;
1673
1674 if (!tcp_ack->state)
1675 return;
1676
1677 slsi_spinlock_lock(&tcp_ack->lock);
1678 while ((skb = skb_dequeue(&tcp_ack->list)) != 0) {
1679 tcp_ack->count = 0;
1680
1681 if (!skb->dev) {
1682 kfree_skb(skb);
1683 slsi_spinlock_unlock(&tcp_ack->lock);
1684 return;
1685 }
1686 ndev_vif = netdev_priv(skb->dev);
1687 sdev = ndev_vif->sdev;
1688 ndev_vif->tcp_ack_stats.tack_timeout++;
1689
1690 r = slsi_tx_data(sdev, skb->dev, skb);
1691 if (r == 0) {
1692 ndev_vif->tcp_ack_stats.tack_sent++;
1693 tcp_ack->last_sent = ktime_get();
1694 } else if (r == -ENOSPC) {
1695 ndev_vif->tcp_ack_stats.tack_dropped++;
1696 slsi_kfree_skb(skb);
1697 } else {
1698 ndev_vif->tcp_ack_stats.tack_dropped++;
1699 }
1700 }
1701 slsi_spinlock_unlock(&tcp_ack->lock);
1702}
1703
1704static int slsi_netif_tcp_ack_suppression_option(struct sk_buff *skb, u32 option)
1705{
1706 unsigned char *options;
1707 u32 optlen = 0, len = 0;
1708
1709 if (tcp_hdr(skb)->doff > 5)
1710 optlen = (tcp_hdr(skb)->doff - 5) * 4;
1711
1712 options = ((u8 *)tcp_hdr(skb)) + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET;
1713
1714 while (optlen > 0) {
1715 switch (options[0]) {
1716 case TCP_ACK_SUPPRESSION_OPTION_EOL:
1717 return 0;
1718 case TCP_ACK_SUPPRESSION_OPTION_NOP:
1719 len = 1;
1720 break;
1721 case TCP_ACK_SUPPRESSION_OPTION_MSS:
1722 if (option == TCP_ACK_SUPPRESSION_OPTION_MSS)
1723 return ((options[2] << 8) | options[3]);
1724 len = options[1];
1725 break;
1726 case TCP_ACK_SUPPRESSION_OPTION_WINDOW:
1727 if (option == TCP_ACK_SUPPRESSION_OPTION_WINDOW)
1728 return options[2];
1729 len = 1;
1730 break;
1731 case TCP_ACK_SUPPRESSION_OPTION_SACK:
1732 if (option == TCP_ACK_SUPPRESSION_OPTION_SACK)
1733 return 1;
1734 len = options[1];
1735 break;
1736 default:
1737 len = options[1];
1738 break;
1739 }
1740 /* if length field in TCP options is 0, or greater than
1741 * total options length, then options are incorrect; return here
1742 */
1743 if ((len == 0) || (len > optlen)) {
1744 SLSI_DBG_HEX_NODEV(SLSI_TX, skb->data, skb->len < 128 ? skb->len : 128, "SKB:\n");
1745 return 0;
1746 }
1747 optlen -= len;
1748 options += len;
1749 }
1750 return 0;
1751}
1752
1753static void slsi_netif_tcp_ack_suppression_syn(struct net_device *dev, struct sk_buff *skb)
1754{
1755 struct netdev_vif *ndev_vif = netdev_priv(dev);
1756 struct slsi_tcp_ack_s *tcp_ack;
1757 int index;
1758
1759 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1760 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1761 tcp_ack = &ndev_vif->ack_suppression[index];
1762 slsi_spinlock_lock(&tcp_ack->lock);
1763
1764 if (!tcp_ack->state) {
1765 slsi_spinlock_unlock(&tcp_ack->lock);
1766 return;
1767 }
1768 /* Recover old/hung/unused record. */
1769 if (tcp_ack->daddr) {
1770 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT * 1000) {
1771 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1772 skb_queue_purge(&tcp_ack->list);
1773 tcp_ack->dport = 0;
1774 tcp_ack->sport = 0;
1775 tcp_ack->daddr = 0;
1776 tcp_ack->saddr = 0;
1777 tcp_ack->count = 0;
1778 tcp_ack->ack_seq = 0;
1779 del_timer(&tcp_ack->timer);
1780 }
1781 }
1782
1783 if (tcp_ack->daddr == 0) {
1784 SLSI_NET_DBG2(dev, SLSI_TX, "add at %d (%pI4.%d > %pI4.%d)\n", index, &ip_hdr(skb)->saddr, ntohs(tcp_hdr(skb)->source), &ip_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest));
1785 tcp_ack->daddr = ip_hdr(skb)->daddr;
1786 tcp_ack->saddr = ip_hdr(skb)->saddr;
1787 tcp_ack->dport = tcp_hdr(skb)->dest;
1788 tcp_ack->sport = tcp_hdr(skb)->source;
1789 tcp_ack->count = 0;
1790 tcp_ack->ack_seq = 0;
1791 tcp_ack->slow_start_count = 0;
1792 tcp_ack->tcp_slow_start = true;
1793 if (tcp_ack_suppression_monitor) {
1794 tcp_ack->max = 0;
1795 tcp_ack->age = 0;
1796 } else {
1797 tcp_ack->max = tcp_ack_suppression_max;
1798 tcp_ack->age = tcp_ack_suppression_timeout;
1799 }
1800 tcp_ack->last_sent = ktime_get();
1801
1802 if (tcp_ack_suppression_monitor) {
1803 tcp_ack->last_sample_time = ktime_get();
1804 tcp_ack->last_ack_seq = 0;
1805 tcp_ack->last_tcp_rate = 0;
1806 tcp_ack->num_bytes = 0;
1807 tcp_ack->hysteresis = 0;
1808 }
1809#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1810 tcp_ack->stream_id = index;
1811#endif
1812 /* read and validate the window scaling multiplier */
1813 tcp_ack->window_multiplier = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_WINDOW);
1814 if (tcp_ack->window_multiplier > 14)
1815 tcp_ack->window_multiplier = 0;
1816 tcp_ack->mss = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_MSS);
1817 SLSI_NET_DBG2(dev, SLSI_TX, "options: mss:%u, window:%u\n", tcp_ack->mss, tcp_ack->window_multiplier);
1818 SCSC_HIP4_SAMPLER_TCP_SYN(ndev_vif->sdev->minor_prof, index, tcp_ack->mss);
1819 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, index, be32_to_cpu(tcp_hdr(skb)->seq));
1820 slsi_spinlock_unlock(&tcp_ack->lock);
1821 return;
1822 }
1823 slsi_spinlock_unlock(&tcp_ack->lock);
1824 }
1825}
1826
1827static void slsi_netif_tcp_ack_suppression_fin(struct net_device *dev, struct sk_buff *skb)
1828{
1829 struct netdev_vif *ndev_vif = netdev_priv(dev);
1830 struct slsi_tcp_ack_s *tcp_ack;
1831 int index;
1832
1833 SLSI_NET_DBG2(dev, SLSI_TX, "\n");
1834 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1835 tcp_ack = &ndev_vif->ack_suppression[index];
1836 slsi_spinlock_lock(&tcp_ack->lock);
1837
1838 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1839 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1840 SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
1841 skb_queue_purge(&tcp_ack->list);
1842 tcp_ack->dport = 0;
1843 tcp_ack->sport = 0;
1844 tcp_ack->daddr = 0;
1845 tcp_ack->saddr = 0;
1846 tcp_ack->count = 0;
1847 tcp_ack->ack_seq = 0;
1848
1849 if (tcp_ack_suppression_monitor) {
1850 tcp_ack->last_ack_seq = 0;
1851 tcp_ack->last_tcp_rate = 0;
1852 tcp_ack->num_bytes = 0;
1853 tcp_ack->hysteresis = 0;
1854 }
1855
1856 del_timer(&tcp_ack->timer);
1857#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
1858 tcp_ack->stream_id = 0;
1859#endif
1860 SCSC_HIP4_SAMPLER_TCP_FIN(ndev_vif->sdev->minor_prof, index);
1861 slsi_spinlock_unlock(&tcp_ack->lock);
1862 return;
1863 }
1864 slsi_spinlock_unlock(&tcp_ack->lock);
1865 }
1866}
1867
1868static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb)
1869{
1870 struct netdev_vif *ndev_vif = netdev_priv(dev);
1871 int index, found;
1872 struct slsi_tcp_ack_s *tcp_ack;
1873 int forward_now = 0, flush = 0;
1874 struct sk_buff *cskb = 0;
1875 u32 tcp_recv_window_size = 0;
1876
1877 if (tcp_ack_suppression_disable)
1878 return skb;
1879
1880 if (tcp_ack_suppression_disable_2g && !SLSI_IS_VIF_CHANNEL_5G(ndev_vif))
1881 return skb;
1882
1883 /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
1884 * the IP header and TCP header are not set; so return the SKB
1885 */
1886 if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (compare_ether_addr(eth_hdr(skb)->h_source, dev->dev_addr) != 0))
1887 return skb;
1888
1889 /* Return SKB that doesn't match. */
1890 if (be16_to_cpu(eth_hdr(skb)->h_proto) != ETH_P_IP)
1891 return skb;
1892 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1893 return skb;
1894 if (!skb_transport_header_was_set(skb))
1895 return skb;
1896 if (tcp_hdr(skb)->syn) {
1897 slsi_netif_tcp_ack_suppression_syn(dev, skb);
1898 return skb;
1899 }
1900 if (tcp_hdr(skb)->fin) {
1901 slsi_netif_tcp_ack_suppression_fin(dev, skb);
1902 return skb;
1903 }
1904 if (!tcp_hdr(skb)->ack)
1905 return skb;
1906 if (tcp_hdr(skb)->rst)
1907 return skb;
1908 if (tcp_hdr(skb)->urg)
1909 return skb;
1910
1911 ndev_vif->tcp_ack_stats.tack_acks++;
1912 /* If we find a record, leave the spinlock taken until the end of the function. */
1913 found = 0;
1914 if (ndev_vif->last_tcp_ack) {
1915 tcp_ack = ndev_vif->last_tcp_ack;
1916 slsi_spinlock_lock(&tcp_ack->lock);
1917 if (!tcp_ack->state) {
1918 slsi_spinlock_unlock(&tcp_ack->lock);
1919 ndev_vif->tcp_ack_stats.tack_sent++;
1920 SLSI_ERR_NODEV("last_tcp_ack record not enabled\n");
1921 return skb;
1922 }
1923 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1924 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1925 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1926 found = 1;
1927 ndev_vif->tcp_ack_stats.tack_lastrecord++;
1928 } else {
1929 slsi_spinlock_unlock(&tcp_ack->lock);
1930 }
1931 }
1932 if (found == 0) {
1933 /* Search for an existing record on this connection. */
1934 for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
1935 tcp_ack = &ndev_vif->ack_suppression[index];
1936
1937 slsi_spinlock_lock(&tcp_ack->lock);
1938
1939 if (!tcp_ack->state) {
1940 slsi_spinlock_unlock(&tcp_ack->lock);
1941 ndev_vif->tcp_ack_stats.tack_sent++;
1942 SLSI_ERR_NODEV("tcp_ack record %d not enabled\n", index);
1943 return skb;
1944 }
1945 if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
1946 (tcp_ack->sport == tcp_hdr(skb)->source) &&
1947 (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
1948 found = 1;
1949 ndev_vif->tcp_ack_stats.tack_searchrecord++;
1950 break;
1951 }
1952 slsi_spinlock_unlock(&tcp_ack->lock);
1953 }
1954 if (found == 0) {
1955 /* No record found, so We cannot suppress the ack, return. */
1956 ndev_vif->tcp_ack_stats.tack_norecord++;
1957 ndev_vif->tcp_ack_stats.tack_sent++;
1958 return skb;
1959 }
1960 ndev_vif->last_tcp_ack = tcp_ack;
1961 }
1962
1963 /* If it is a DUP Ack, send straight away without flushing the cache. */
1964 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) < tcp_ack->ack_seq) {
1965 /* check for wrap-around */
1966 if (((s32)((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - (u32)tcp_ack->ack_seq)) < 0) {
1967 ndev_vif->tcp_ack_stats.tack_dacks++;
1968 ndev_vif->tcp_ack_stats.tack_sent++;
1969 slsi_spinlock_unlock(&tcp_ack->lock);
1970 return skb;
1971 }
1972 }
1973
1974 /* Has data, forward straight away. */
1975 if (be16_to_cpu(ip_hdr(skb)->tot_len) > ((ip_hdr(skb)->ihl * 4) + (tcp_hdr(skb)->doff * 4))) {
1976 SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr(skb)->seq));
1977 SCSC_HIP4_SAMPLER_TCP_CWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, (skb->sk) ? tcp_sk(skb->sk)->snd_cwnd : 0);
781f598d
TK
1978 #if KERNEL_VERSION(4, 14, 0) >= LINUX_VERSION_CODE
1979 SCSC_HIP4_SAMPLER_TCP_SEND_BUG(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, sysctl_tcp_wmem[2]);
1980 #else
1981 SCSC_HIP4_SAMPLER_TCP_SEND_BUF(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, sysctl_tcp_mem[2]);
1982 #endif
533a23a1
TK
1983 ndev_vif->tcp_ack_stats.tack_hasdata++;
1984 forward_now = 1;
1985 goto _forward_now;
1986 }
1987
1988 /* PSH flag set, forward straight away. */
1989 if (tcp_hdr(skb)->psh) {
1990 ndev_vif->tcp_ack_stats.tack_psh++;
1991 forward_now = 1;
1992 goto _forward_now;
1993 }
1994
1995 /* The ECE flag is set for Explicit Congestion Notification supporting connections when the ECT flag
1996 * is set in the segment packet. We must forward ECE marked acks immediately for ECN to work.
1997 */
1998 if (tcp_hdr(skb)->ece) {
1999 ndev_vif->tcp_ack_stats.tack_ece++;
2000 forward_now = 1;
2001 goto _forward_now;
2002 }
2003
2004 if (tcp_ack_suppression_monitor) {
2005 /* Measure the throughput of TCP stream by monitoring the bytes Acked by each Ack over a
2006 * sampling period. Based on throughput apply different degree of Ack suppression
2007 */
2008 if (tcp_ack->last_ack_seq)
2009 tcp_ack->num_bytes += ((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - tcp_ack->last_ack_seq);
2010
2011 tcp_ack->last_ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2012 if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sample_time)) > tcp_ack_suppression_monitor_interval) {
2013 u16 acks_max;
2014 u32 tcp_rate = ((tcp_ack->num_bytes * 8) / (tcp_ack_suppression_monitor_interval * 1000));
2015
2016 SLSI_NET_DBG2(dev, SLSI_TX, "hysteresis:%u total_bytes:%llu rate:%u Mbps\n",
2017 tcp_ack->hysteresis, tcp_ack->num_bytes, tcp_rate);
2018
2019 /* hysterisis - change only if the variation from last value is more than threshold */
2020 if ((abs(tcp_rate - tcp_ack->last_tcp_rate)) > tcp_ack->hysteresis) {
2021 if (tcp_rate >= tcp_ack_suppression_rate_very_high) {
2022 tcp_ack->max = tcp_ack_suppression_rate_very_high_acks;
2023 tcp_ack->age = tcp_ack_suppression_rate_very_high_timeout;
2024 } else if (tcp_rate >= tcp_ack_suppression_rate_high) {
2025 tcp_ack->max = tcp_ack_suppression_rate_high_acks;
2026 tcp_ack->age = tcp_ack_suppression_rate_high_timeout;
2027 } else if (tcp_rate >= tcp_ack_suppression_rate_low) {
2028 tcp_ack->max = tcp_ack_suppression_rate_low_acks;
2029 tcp_ack->age = tcp_ack_suppression_rate_low_timeout;
2030 } else {
2031 tcp_ack->max = 0;
2032 tcp_ack->age = 0;
2033 }
2034
2035 /* Should not be suppressing Acks more than 20% of receiver window size
2036 * doing so can lead to increased RTT and low transmission rate at the
2037 * TCP sender
2038 */
2039 if (tcp_ack->window_multiplier)
2040 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
2041 else
2042 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
2043 SCSC_HIP4_SAMPLER_TCP_RWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, tcp_recv_window_size);
2044
2045 acks_max = (tcp_recv_window_size / 5) / (2 * tcp_ack->mss);
2046 if (tcp_ack->max > acks_max)
2047 tcp_ack->max = acks_max;
2048 }
2049 tcp_ack->hysteresis = tcp_rate / 5; /* 20% hysteresis */
2050 tcp_ack->last_tcp_rate = tcp_rate;
2051 tcp_ack->num_bytes = 0;
2052 tcp_ack->last_sample_time = ktime_get();
2053 }
2054 }
2055
2056 /* Do not suppress Selective Acks. */
2057 if (slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_SACK)) {
2058 ndev_vif->tcp_ack_stats.tack_sacks++;
2059
2060 /* A TCP selective Ack suggests TCP segment loss. The TCP sender
2061 * may reduce congestion window and limit the number of segments
2062 * it sends before waiting for Ack.
2063 * It is ideal to switch off TCP ack suppression for certain time
2064 * (being replicated here by tcp_ack_suppression_slow_start_acks
2065 * count) and send as many Acks as possible to allow the cwnd to
2066 * grow at the TCP sender
2067 */
2068 tcp_ack->slow_start_count = 0;
2069 tcp_ack->tcp_slow_start = true;
2070 forward_now = 1;
2071 goto _forward_now;
2072 }
2073
2074 if (be32_to_cpu(tcp_hdr(skb)->ack_seq) == tcp_ack->ack_seq) {
2075 ndev_vif->tcp_ack_stats.tack_dacks++;
2076 forward_now = 1;
2077 goto _forward_now;
2078 }
2079
2080 /* When the TCP connection is made, wait until a number of Acks
2081 * are sent before applying the suppression rules. It is to
2082 * allow the cwnd to grow at a normal rate at the TCP sender
2083 */
2084 if (tcp_ack->tcp_slow_start) {
2085 tcp_ack->slow_start_count++;
2086 if (tcp_ack->slow_start_count >= tcp_ack_suppression_slow_start_acks) {
2087 tcp_ack->slow_start_count = 0;
2088 tcp_ack->tcp_slow_start = false;
2089 }
2090 forward_now = 1;
2091 goto _forward_now;
2092 }
2093
2094 /* do not suppress if so decided by the TCP monitor */
2095 if (tcp_ack_suppression_monitor && (!tcp_ack->max || !tcp_ack->age)) {
2096 forward_now = 1;
2097 goto _forward_now;
2098 }
2099
2100 /* do not suppress delayed Acks that acknowledges for more than 2 TCP
2101 * maximum size segments
2102 */
2103 if (((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq)) - (tcp_ack->ack_seq) > (2 * tcp_ack->mss)) {
2104 ndev_vif->tcp_ack_stats.tack_delay_acks++;
2105 forward_now = 1;
2106 goto _forward_now;
2107 }
2108
2109 /* Do not suppress unless the receive window is large
2110 * enough.
2111 * With low receive window size the cwnd can't grow much.
2112 * So suppressing Acks has a negative impact on sender
2113 * rate as it increases the Round trip time measured at
2114 * sender
2115 */
2116 if (!tcp_ack_suppression_monitor) {
2117 if (tcp_ack->window_multiplier)
2118 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
2119 else
2120 tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
2121 if (tcp_recv_window_size < tcp_ack_suppression_rcv_window * 1024) {
2122 ndev_vif->tcp_ack_stats.tack_low_window++;
2123 forward_now = 1;
2124 goto _forward_now;
2125 }
2126 }
2127
2128 if (!tcp_ack_suppression_monitor && ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= tcp_ack->age) {
2129 ndev_vif->tcp_ack_stats.tack_ktime++;
2130 forward_now = 1;
2131 goto _forward_now;
2132 }
2133
2134 /* Test for a new cache */
2135 if (!skb_queue_len(&tcp_ack->list)) {
2136 skb_queue_tail(&tcp_ack->list, skb);
2137 tcp_ack->count = 1;
2138 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2139 if (tcp_ack->age)
2140 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2141 slsi_spinlock_unlock(&tcp_ack->lock);
2142 return 0;
2143 }
2144_forward_now:
2145 cskb = skb_dequeue(&tcp_ack->list);
2146 if (cskb) {
2147 if (tcp_ack_suppression_monitor && tcp_ack->age)
2148 mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
2149 ndev_vif->tcp_ack_stats.tack_suppressed++;
2150 slsi_kfree_skb(cskb);
2151 }
2152 skb_queue_tail(&tcp_ack->list, skb);
2153 tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
2154 tcp_ack->count++;
2155 if (forward_now) {
2156 flush = 1;
2157 } else {
2158 if (tcp_ack->count >= tcp_ack->max) {
2159 flush = 1;
2160 ndev_vif->tcp_ack_stats.tack_max++;
2161 }
2162 }
2163 if (!flush) {
2164 slsi_spinlock_unlock(&tcp_ack->lock);
2165 return 0;
2166 }
2167 /* Flush the cache. */
2168 cskb = skb_dequeue(&tcp_ack->list);
2169 tcp_ack->count = 0;
2170
2171 if (tcp_ack->age)
2172 del_timer(&tcp_ack->timer);
2173
2174 tcp_ack->last_sent = ktime_get();
2175
2176 slsi_spinlock_unlock(&tcp_ack->lock);
2177 ndev_vif->tcp_ack_stats.tack_sent++;
2178 return cskb;
2179}
2180#endif