adt3-S dhd_driver source code [1/1]
[GitHub/LineageOS/G12/android_hardware_amlogic_kernel-modules_dhd-driver.git] / bcmdhd.100.10.315.x / linux_pkt.c
1 /*
2 * Linux Packet (skb) interface
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $
28 */
29
30 #include <typedefs.h>
31 #include <bcmendian.h>
32 #include <linuxver.h>
33 #include <bcmdefs.h>
34
35 #include <linux/random.h>
36
37 #include <osl.h>
38 #include <bcmutils.h>
39 #include <pcicfg.h>
40 #include <dngl_stats.h>
41 #include <dhd.h>
42
43 #include <linux/fs.h>
44 #include "linux_osl_priv.h"
45
46 #ifdef CONFIG_DHD_USE_STATIC_BUF
47
48 bcm_static_buf_t *bcm_static_buf = 0;
49 bcm_static_pkt_t *bcm_static_skb = 0;
50
51 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
52 #endif /* CONFIG_DHD_USE_STATIC_BUF */
53
54 #ifdef BCM_OBJECT_TRACE
55 /* don't clear the first 4 byte that is the pkt sn */
56 #define OSL_PKTTAG_CLEAR(p) \
57 do { \
58 struct sk_buff *s = (struct sk_buff *)(p); \
59 uint tagsz = sizeof(s->cb); \
60 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
61 memset(s->cb + 4, 0, tagsz - 4); \
62 } while (0)
63 #else
64 #define OSL_PKTTAG_CLEAR(p) \
65 do { \
66 struct sk_buff *s = (struct sk_buff *)(p); \
67 uint tagsz = sizeof(s->cb); \
68 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
69 memset(s->cb, 0, tagsz); \
70 } while (0)
71 #endif /* BCM_OBJECT_TRACE */
72
73 int osl_static_mem_init(osl_t *osh, void *adapter)
74 {
75 #ifdef CONFIG_DHD_USE_STATIC_BUF
76 if (!bcm_static_buf && adapter) {
77 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
78 DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
79 printk("can not alloc static buf!\n");
80 bcm_static_skb = NULL;
81 ASSERT(osh->magic == OS_HANDLE_MAGIC);
82 return -ENOMEM;
83 } else {
84 printk("succeed to alloc static buf\n");
85 }
86
87 spin_lock_init(&bcm_static_buf->static_lock);
88
89 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
90 }
91
92 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
93 if (!bcm_static_skb && adapter) {
94 int i;
95 void *skb_buff_ptr = 0;
96 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
97 skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
98 if (!skb_buff_ptr) {
99 printk("cannot alloc static buf!\n");
100 bcm_static_buf = NULL;
101 bcm_static_skb = NULL;
102 ASSERT(osh->magic == OS_HANDLE_MAGIC);
103 return -ENOMEM;
104 }
105
106 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
107 (STATIC_PKT_MAX_NUM));
108 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
109 bcm_static_skb->pkt_use[i] = 0;
110 }
111
112 #ifdef DHD_USE_STATIC_CTRLBUF
113 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
114 bcm_static_skb->last_allocated_index = 0;
115 #else
116 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
117 #endif /* DHD_USE_STATIC_CTRLBUF */
118 }
119 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
120 #endif /* CONFIG_DHD_USE_STATIC_BUF */
121
122 return 0;
123 }
124
125 int osl_static_mem_deinit(osl_t *osh, void *adapter)
126 {
127 #ifdef CONFIG_DHD_USE_STATIC_BUF
128 if (bcm_static_buf) {
129 bcm_static_buf = 0;
130 }
131 #ifdef BCMSDIO
132 if (bcm_static_skb) {
133 bcm_static_skb = 0;
134 }
135 #endif /* BCMSDIO */
136 #endif /* CONFIG_DHD_USE_STATIC_BUF */
137 return 0;
138 }
139
140 /*
141 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
142 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
143 * explicitly managed from a coherency perspective.
144 */
145 static inline void BCMFASTPATH
146 osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
147 {
148 }
149
150 static struct sk_buff * BCMFASTPATH
151 osl_alloc_skb(osl_t *osh, unsigned int len)
152 {
153 struct sk_buff *skb;
154 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
155 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
156 #ifdef DHD_USE_ATOMIC_PKTGET
157 flags = GFP_ATOMIC;
158 #endif /* DHD_USE_ATOMIC_PKTGET */
159 skb = __dev_alloc_skb(len, flags);
160 #else
161 skb = dev_alloc_skb(len);
162 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
163
164 return skb;
165 }
166
167 /* Convert a driver packet to native(OS) packet
168 * In the process, packettag is zeroed out before sending up
169 * IP code depends on skb->cb to be setup correctly with various options
170 * In our case, that means it should be 0
171 */
172 struct sk_buff * BCMFASTPATH
173 osl_pkt_tonative(osl_t *osh, void *pkt)
174 {
175 struct sk_buff *nskb;
176
177 if (osh->pub.pkttag)
178 OSL_PKTTAG_CLEAR(pkt);
179
180 /* Decrement the packet counter */
181 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
182 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
183
184 }
185 return (struct sk_buff *)pkt;
186 }
187
188 /* Convert a native(OS) packet to driver packet.
189 * In the process, native packet is destroyed, there is no copying
190 * Also, a packettag is zeroed out
191 */
192 void * BCMFASTPATH
193 osl_pkt_frmnative(osl_t *osh, void *pkt)
194 {
195 struct sk_buff *cskb;
196 struct sk_buff *nskb;
197 unsigned long pktalloced = 0;
198
199 if (osh->pub.pkttag)
200 OSL_PKTTAG_CLEAR(pkt);
201
202 /* walk the PKTCLINK() list */
203 for (cskb = (struct sk_buff *)pkt;
204 cskb != NULL;
205 cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
206
207 /* walk the pkt buffer list */
208 for (nskb = cskb; nskb; nskb = nskb->next) {
209
210 /* Increment the packet counter */
211 pktalloced++;
212
213 /* clean the 'prev' pointer
214 * Kernel 3.18 is leaving skb->prev pointer set to skb
215 * to indicate a non-fragmented skb
216 */
217 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
218 nskb->prev = NULL;
219 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
220
221 }
222 }
223
224 /* Increment the packet counter */
225 atomic_add(pktalloced, &osh->cmn->pktalloced);
226
227 return (void *)pkt;
228 }
229
230 /* Return a new packet. zero out pkttag */
231 void * BCMFASTPATH
232 #ifdef BCM_OBJECT_TRACE
233 linux_pktget(osl_t *osh, uint len, int line, const char *caller)
234 #else
235 linux_pktget(osl_t *osh, uint len)
236 #endif /* BCM_OBJECT_TRACE */
237 {
238 struct sk_buff *skb;
239 uchar num = 0;
240 if (lmtest != FALSE) {
241 get_random_bytes(&num, sizeof(uchar));
242 if ((num + 1) <= (256 * lmtest / 100))
243 return NULL;
244 }
245
246 if ((skb = osl_alloc_skb(osh, len))) {
247 skb->tail += len;
248 skb->len += len;
249 skb->priority = 0;
250
251 atomic_inc(&osh->cmn->pktalloced);
252 #ifdef BCM_OBJECT_TRACE
253 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
254 #endif /* BCM_OBJECT_TRACE */
255 }
256
257 return ((void*) skb);
258 }
259
260 /* Free the driver packet. Free the tag if present */
261 void BCMFASTPATH
262 #ifdef BCM_OBJECT_TRACE
263 linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
264 #else
265 linux_pktfree(osl_t *osh, void *p, bool send)
266 #endif /* BCM_OBJECT_TRACE */
267 {
268 struct sk_buff *skb, *nskb;
269 if (osh == NULL)
270 return;
271
272 skb = (struct sk_buff*) p;
273
274 if (send) {
275 if (osh->pub.tx_fn) {
276 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
277 }
278 } else {
279 if (osh->pub.rx_fn) {
280 osh->pub.rx_fn(osh->pub.rx_ctx, p);
281 }
282 }
283
284 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
285
286 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
287 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
288 printk("%s: pkt %p is from static pool\n",
289 __FUNCTION__, p);
290 dump_stack();
291 return;
292 }
293
294 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
295 printk("%s: pkt %p is from static pool and not in used\n",
296 __FUNCTION__, p);
297 dump_stack();
298 return;
299 }
300 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
301
302 /* perversion: we use skb->next to chain multi-skb packets */
303 while (skb) {
304 nskb = skb->next;
305 skb->next = NULL;
306
307 #ifdef BCM_OBJECT_TRACE
308 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
309 #endif /* BCM_OBJECT_TRACE */
310
311 {
312 if (skb->destructor) {
313 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
314 * destructor exists
315 */
316 dev_kfree_skb_any(skb);
317 } else {
318 /* can free immediately (even in_irq()) if destructor
319 * does not exist
320 */
321 dev_kfree_skb(skb);
322 }
323 }
324 atomic_dec(&osh->cmn->pktalloced);
325 skb = nskb;
326 }
327 }
328
329 #ifdef CONFIG_DHD_USE_STATIC_BUF
330 void*
331 osl_pktget_static(osl_t *osh, uint len)
332 {
333 int i = 0;
334 struct sk_buff *skb;
335 #ifdef DHD_USE_STATIC_CTRLBUF
336 unsigned long flags;
337 #endif /* DHD_USE_STATIC_CTRLBUF */
338
339 if (!bcm_static_skb)
340 return linux_pktget(osh, len);
341
342 if (len > DHD_SKB_MAX_BUFSIZE) {
343 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
344 return linux_pktget(osh, len);
345 }
346
347 #ifdef DHD_USE_STATIC_CTRLBUF
348 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
349
350 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
351 uint32 index;
352 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
353 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
354 bcm_static_skb->last_allocated_index++;
355 if (bcm_static_skb->skb_8k[index] &&
356 bcm_static_skb->pkt_use[index] == 0) {
357 break;
358 }
359 }
360
361 if (i < STATIC_PKT_2PAGE_NUM) {
362 bcm_static_skb->pkt_use[index] = 1;
363 skb = bcm_static_skb->skb_8k[index];
364 skb->data = skb->head;
365 #ifdef NET_SKBUFF_DATA_USES_OFFSET
366 skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
367 #else
368 skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
369 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
370 skb->data += PKT_HEADROOM_DEFAULT;
371 skb->cloned = 0;
372 skb->priority = 0;
373 #ifdef NET_SKBUFF_DATA_USES_OFFSET
374 skb_set_tail_pointer(skb, len);
375 #else
376 skb->tail = skb->data + len;
377 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
378 skb->len = len;
379 skb->mac_len = PREALLOC_USED_MAGIC;
380 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
381 return skb;
382 }
383 }
384
385 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
386 printk("%s: all static pkt in use!\n", __FUNCTION__);
387 return NULL;
388 #else
389 down(&bcm_static_skb->osl_pkt_sem);
390
391 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
392 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
393 if (bcm_static_skb->skb_4k[i] &&
394 bcm_static_skb->pkt_use[i] == 0) {
395 break;
396 }
397 }
398
399 if (i != STATIC_PKT_1PAGE_NUM) {
400 bcm_static_skb->pkt_use[i] = 1;
401
402 skb = bcm_static_skb->skb_4k[i];
403 #ifdef NET_SKBUFF_DATA_USES_OFFSET
404 skb_set_tail_pointer(skb, len);
405 #else
406 skb->tail = skb->data + len;
407 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
408 skb->len = len;
409
410 up(&bcm_static_skb->osl_pkt_sem);
411 return skb;
412 }
413 }
414
415 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
416 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
417 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
418 bcm_static_skb->pkt_use[i] == 0) {
419 break;
420 }
421 }
422
423 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
424 bcm_static_skb->pkt_use[i] = 1;
425 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
426 #ifdef NET_SKBUFF_DATA_USES_OFFSET
427 skb_set_tail_pointer(skb, len);
428 #else
429 skb->tail = skb->data + len;
430 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
431 skb->len = len;
432
433 up(&bcm_static_skb->osl_pkt_sem);
434 return skb;
435 }
436 }
437
438 #if defined(ENHANCED_STATIC_BUF)
439 if (bcm_static_skb->skb_16k &&
440 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
441 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
442
443 skb = bcm_static_skb->skb_16k;
444 #ifdef NET_SKBUFF_DATA_USES_OFFSET
445 skb_set_tail_pointer(skb, len);
446 #else
447 skb->tail = skb->data + len;
448 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
449 skb->len = len;
450
451 up(&bcm_static_skb->osl_pkt_sem);
452 return skb;
453 }
454 #endif /* ENHANCED_STATIC_BUF */
455
456 up(&bcm_static_skb->osl_pkt_sem);
457 printk("%s: all static pkt in use!\n", __FUNCTION__);
458 return linux_pktget(osh, len);
459 #endif /* DHD_USE_STATIC_CTRLBUF */
460 }
461
462 void
463 osl_pktfree_static(osl_t *osh, void *p, bool send)
464 {
465 int i;
466 #ifdef DHD_USE_STATIC_CTRLBUF
467 struct sk_buff *skb = (struct sk_buff *)p;
468 unsigned long flags;
469 #endif /* DHD_USE_STATIC_CTRLBUF */
470
471 if (!p) {
472 return;
473 }
474
475 if (!bcm_static_skb) {
476 linux_pktfree(osh, p, send);
477 return;
478 }
479
480 #ifdef DHD_USE_STATIC_CTRLBUF
481 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
482
483 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
484 if (p == bcm_static_skb->skb_8k[i]) {
485 if (bcm_static_skb->pkt_use[i] == 0) {
486 printk("%s: static pkt idx %d(%p) is double free\n",
487 __FUNCTION__, i, p);
488 } else {
489 bcm_static_skb->pkt_use[i] = 0;
490 }
491
492 if (skb->mac_len != PREALLOC_USED_MAGIC) {
493 printk("%s: static pkt idx %d(%p) is not in used\n",
494 __FUNCTION__, i, p);
495 }
496
497 skb->mac_len = PREALLOC_FREE_MAGIC;
498 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
499 return;
500 }
501 }
502
503 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
504 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
505 #else
506 down(&bcm_static_skb->osl_pkt_sem);
507 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
508 if (p == bcm_static_skb->skb_4k[i]) {
509 bcm_static_skb->pkt_use[i] = 0;
510 up(&bcm_static_skb->osl_pkt_sem);
511 return;
512 }
513 }
514
515 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
516 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
517 bcm_static_skb->pkt_use[i] = 0;
518 up(&bcm_static_skb->osl_pkt_sem);
519 return;
520 }
521 }
522 #ifdef ENHANCED_STATIC_BUF
523 if (p == bcm_static_skb->skb_16k) {
524 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
525 up(&bcm_static_skb->osl_pkt_sem);
526 return;
527 }
528 #endif // endif
529 up(&bcm_static_skb->osl_pkt_sem);
530 #endif /* DHD_USE_STATIC_CTRLBUF */
531 linux_pktfree(osh, p, send);
532 }
533 #endif /* CONFIG_DHD_USE_STATIC_BUF */
534
535 /* Clone a packet.
536 * The pkttag contents are NOT cloned.
537 */
538 void *
539 #ifdef BCM_OBJECT_TRACE
540 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
541 #else
542 osl_pktdup(osl_t *osh, void *skb)
543 #endif /* BCM_OBJECT_TRACE */
544 {
545 void * p;
546
547 ASSERT(!PKTISCHAINED(skb));
548
549 /* clear the CTFBUF flag if set and map the rest of the buffer
550 * before cloning.
551 */
552 PKTCTFMAP(osh, skb);
553
554 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
555 return NULL;
556
557 /* skb_clone copies skb->cb.. we don't want that */
558 if (osh->pub.pkttag)
559 OSL_PKTTAG_CLEAR(p);
560
561 /* Increment the packet counter */
562 atomic_inc(&osh->cmn->pktalloced);
563 #ifdef BCM_OBJECT_TRACE
564 bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
565 #endif /* BCM_OBJECT_TRACE */
566
567 return (p);
568 }
569
570 /*
571 * BINOSL selects the slightly slower function-call-based binary compatible osl.
572 */
573
574 uint
575 osl_pktalloced(osl_t *osh)
576 {
577 if (atomic_read(&osh->cmn->refcount) == 1)
578 return (atomic_read(&osh->cmn->pktalloced));
579 else
580 return 0;
581 }
582
583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
584 #include <linux/kallsyms.h>
585 #include <net/sock.h>
586 void
587 osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
588 {
589 uint32 fraction;
590 static void *p_tcp_wfree = NULL;
591
592 if (tsq <= 0)
593 return;
594
595 if (!skb->destructor || skb->destructor == sock_wfree)
596 return;
597
598 if (unlikely(!p_tcp_wfree)) {
599 char sym[KSYM_SYMBOL_LEN];
600 sprint_symbol(sym, (unsigned long)skb->destructor);
601 sym[9] = 0;
602 if (!strcmp(sym, "tcp_wfree"))
603 p_tcp_wfree = skb->destructor;
604 else
605 return;
606 }
607
608 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
609 return;
610
611 /* abstract a certain portion of skb truesize from the socket
612 * sk_wmem_alloc to allow more skb can be allocated for this
613 * socket for better cusion meeting WiFi device requirement
614 */
615 fraction = skb->truesize * (tsq - 1) / tsq;
616 skb->truesize -= fraction;
617
618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
619 atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
620 #else
621 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
622 #endif // endif
623 skb_orphan(skb);
624 }
625 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */