import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / eemcs / lte_dev_test_lib.c
1
2 #include <linux/skbuff.h>
3 #include <linux/time.h>
4 #include <linux/jiffies.h>
5
6
7
8
9 void f_calc_cs_byte(void *startingAddr_p, unsigned int lengthToCalculate, unsigned char *checksum_p)
10 {
11 unsigned int i;
12 unsigned char *cp;
13
14 *checksum_p = 0;
15 cp = (unsigned char *) startingAddr_p;
16
17 for (i=0; i<lengthToCalculate; i++, cp++) {
18 *checksum_p += *cp;
19 }
20 }
21
22
23 int sdio_send_pkt(int ulq_no,int data_length, unsigned char ul_que, unsigned char dl_que)
24 {
25
26 int ret, i;
27 struct sk_buff *skb = NULL;
28 PAT_PKT_HEADER pAtHeader = NULL;
29 unsigned char rand_seed = 0, bak_seed = 0;
30 unsigned char *buf;
31 unsigned char cksm = 0;
32 unsigned int timeout =0;
33
34 ret = RET_SUCCESS;
35 buf = buff_kmemory_ulpkt_data;
36
37 while(mtlte_df_UL_swq_space(ulq_no)==0){
38 KAL_SLEEP_USEC(1) ;
39 timeout++;
40 if(timeout > 10000){
41 KAL_DBGPRINT(KAL, DBG_ERROR,("%s : send pkt timeout becaucse no que space!\n", KAL_FUNC_NAME));
42 return RET_FAIL ;
43 }
44 }
45
46 //if (mtlte_df_UL_swq_space(ulq_no)==0){
47 // KAL_DBGPRINT(KAL, DBG_WARN,("%s : UL Queue %d has no space.\n", KAL_FUNC_NAME, ulq_no));
48 // return RET_FAIL;
49 //}
50
51 if ((skb = dev_alloc_skb(data_length))==NULL){
52 KAL_DBGPRINT(KAL, DBG_ERROR,("%s : allocate skb failed\n", KAL_FUNC_NAME));
53 return RET_FAIL ;
54 }
55
56
57 switch (send_pattern) {
58 case ATCASE_LB_DATA_5A :
59 memset(buf, 0x5a , data_length);
60 break;
61 case ATCASE_LB_DATA_A5:
62 memset(buf, 0xa5 , data_length);
63 break;
64 case ATCASE_LB_DATA_INC:
65 get_random_bytes(&rand_seed , 1);
66 for (i = 0 ; i < data_length ; i ++) {
67 buf[i] = rand_seed++;
68 }
69 break;
70
71 case ATCASE_LB_DATA_AUTO :
72 default:
73 // fill packet payload
74 pAtHeader = (PAT_PKT_HEADER)buf;
75 memset(pAtHeader, 0 , sizeof(AT_PKT_HEADER));
76
77 get_random_bytes(&rand_seed , 1);
78 bak_seed = rand_seed;
79 KAL_DBGPRINT(KAL, DBG_TRACE,("rand_seed = %d..\n", rand_seed));
80 pAtHeader->RndSeed = rand_seed;
81 pAtHeader->SrcQID = ul_que & 0xf;
82 pAtHeader->DstQID = dl_que & 0xf;
83 pAtHeader->SeqNo = 0;
84 if (data_length < sizeof(AT_PKT_HEADER)) {
85 data_length = sizeof(AT_PKT_HEADER);
86 }
87 pAtHeader->PktLen = data_length;
88
89 f_calc_cs_byte(pAtHeader, sizeof(AT_PKT_HEADER), &cksm);
90 pAtHeader->Checksum = ~cksm;
91
92 // fill payload, don't fill memory lenght larger than URB buffer
93 for (i = 0 ; i < (data_length - sizeof(AT_PKT_HEADER)) ; i ++) {
94 pAtHeader->Data[i] = rand_seed++;
95 }
96 break;
97
98 }
99
100 /* fill the data content */
101 memcpy(skb_put(skb, data_length), buf, data_length);
102
103 /* always reply we have free space or add ccci_write_space_check */
104 ret = mtlte_df_UL_write_skb_to_swq(ulq_no, skb);
105
106 if(ret != KAL_SUCCESS){
107 return RET_FAIL;
108 }
109 return ret;
110 }
111
112
113 int sdio_dl_npkt(athif_dl_tgpd_cfg_t *p_dl_cfg)
114 {
115 int ret = RET_SUCCESS;
116 athif_cmd_t cmd;
117 unsigned int recv_cnt = 0, timeout = 0, i=0;
118 unsigned int old_recv_cnt = 0;
119 //athif_dl_tgpd_cfg_t *struct_dl_cfg;
120
121 struct sk_buff *result_ptr = NULL;
122 attest_option_t back_test_option;
123
124 back_test_option = sdio_test_option;
125 sdio_test_option.auto_receive_pkt = false;
126
127 memcpy(cmd.buf , p_dl_cfg , sizeof(athif_dl_tgpd_cfg_t));
128 //struct_dl_cfg = (athif_dl_tgpd_cfg_t *)cmd.buf;
129 //struct_dl_cfg->q_num = p_dl_cfg->q_num;
130 //struct_dl_cfg->gpd_num = p_dl_cfg->gpd_num;
131 //struct_dl_cfg->tgpd_format = p_dl_cfg->tgpd_format;
132
133 cmd.cmd = ATHIF_CMD_DL_SEND_N;
134 cmd.len = sizeof(athif_dl_tgpd_cfg_t);
135
136 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
137 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
138 recv_cnt = 0;
139
140 #if 1
141 /*wait receiving packets*/
142 while(recv_cnt != p_dl_cfg->gpd_num) {
143
144 result_ptr = mtlte_df_DL_read_skb_from_swq(p_dl_cfg->q_num);
145 if( result_ptr != NULL ) {
146 recv_cnt++;
147 mtlte_df_DL_pkt_handle_complete(p_dl_cfg->q_num);
148
149 KAL_DBGPRINT(KAL, DBG_TRACE,("[INFO] : receive pkt from RxQ %d .\n", p_dl_cfg->q_num));
150
151 if(true == sdio_test_option.show_dl_content){
152 KAL_DBGPRINT(KAL, DBG_ERROR,("Content : "));
153 for(i=0; i<result_ptr->len; i++){
154 KAL_DBGPRINT(KAL, DBG_ERROR,("%x ", *(result_ptr->data+i) ));
155 }
156 KAL_DBGPRINT(KAL, DBG_ERROR,(" \n"));
157 }
158
159 if(true == sdio_test_option.exam_dl_content){
160 if ( RET_FAIL == f_compare_recv_pkt(result_ptr, p_dl_cfg->q_num) ){
161 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] data compare error at que=%d gpd_num=%d !!! \n", \
162 KAL_FUNC_NAME, p_dl_cfg->q_num, p_dl_cfg->gpd_num)) ;
163 KAL_DBGPRINT(KAL, DBG_ERROR,(" buf_len=%d, ext_len=%d, bd_num=%d !!! \n", \
164 p_dl_cfg->tgpd_format.tgpd_buf_len, p_dl_cfg->tgpd_format.tgpd_ext_len, p_dl_cfg->tgpd_format.tbd_num)) ;
165 return RET_FAIL;
166 }
167 }
168 }
169
170 dev_kfree_skb(result_ptr);
171
172 KAL_SLEEP_USEC(1);
173 if(recv_cnt != old_recv_cnt){ timeout ==0; }
174 else{ timeout++; }
175
176 if (timeout > 10000) { // 1sec no any packet
177 KAL_DBGPRINT(KAL, DBG_ERROR,("Timeout at receiving packet, received packet now = %d \n", recv_cnt ));
178 ret = RET_FAIL;
179 break;
180 }
181 old_recv_cnt = recv_cnt;
182 }
183
184 if( RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT)){
185 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Device response test fail!!! \n"));
186 return RET_FAIL;
187 }
188
189 #endif
190 sdio_test_option.auto_receive_pkt = back_test_option.auto_receive_pkt;
191 return ret;
192 }
193
194 int sdio_dl_n_rand_pkt(unsigned int pkt_num , unsigned int que_num)
195 {
196 int ret = RET_SUCCESS;
197 athif_cmd_t cmd;
198 //athif_status_t status;
199 unsigned int recv_cnt = 0, timeout = 0, old_pkt_cnt = 0;
200 athif_basic_set_t tst_cfg;
201
202 tst_cfg.gpd_num = pkt_num;
203 tst_cfg.q_num = que_num;
204 memcpy(cmd.buf , &tst_cfg, sizeof(athif_basic_set_t));
205 cmd.cmd = ATHIF_CMD_DL_SEND_RAND_N;
206 cmd.len = sizeof(athif_basic_set_t);
207 recv_cnt = tst_cfg.gpd_num;
208
209 sdio_test_option.auto_receive_pkt = true;
210 sdio_test_option.exam_dl_content = true;
211 recv_total_pkt_cnt = 0;
212
213 #if 1
214 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, sizeof(athif_basic_set_t), dev_test_athif_cmd_t);
215 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
216
217
218
219 /*wait receiving packets*/
220 while (recv_total_pkt_cnt < recv_cnt) {
221 KAL_SLEEP_MSEC(1);
222 if(recv_total_pkt_cnt != old_pkt_cnt){ timeout == 0; }
223 else{ timeout++; }
224
225 if (recv_th_rslt == RET_FAIL) {
226 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] receive pkt fail at %d pkt !!! \n",KAL_FUNC_NAME, recv_total_pkt_cnt)) ;
227 ret = RET_FAIL;
228 break;
229 }
230 if (timeout > 1000) { //1sec no any packet
231 ret = RET_FAIL;
232 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] total receive pkt =%d But expect pkt =%d !!! \n",KAL_FUNC_NAME, recv_total_pkt_cnt, recv_cnt)) ;
233 break;
234 }
235 old_pkt_cnt = recv_total_pkt_cnt;
236 }
237
238 /*some command shout not get cmd_ack*/
239 if( RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT)){
240 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Device response test fail!!! \n"));
241 return RET_FAIL;
242 }
243 #endif
244
245 sdio_test_option.auto_receive_pkt = false;
246 sdio_test_option.exam_dl_content = false;
247
248 return ret;
249 }
250
251 int sdio_dl_n_rand_stress(unsigned int pkt_num, unsigned int que_num)
252 {
253 int ret = RET_SUCCESS;
254 athif_cmd_t cmd;
255 //athif_status_t status;
256 unsigned int recv_cnt = 0, timeout = 0;
257 unsigned int old_pkt_cnt =0;
258
259 memcpy(cmd.buf , &pkt_num, sizeof(unsigned int));
260 /*ep_md = n, test ep 1~n*/
261 cmd.buf[4] = que_num;
262 cmd.cmd = ATHIF_CMD_DL_SEND_RAND_STRESS;
263 // TODO: cmd.buf[5]~[8] is used by ming, please check it if this test case failed.
264 cmd.len = 1 + sizeof(unsigned int);
265 recv_cnt = pkt_num;
266
267 sdio_test_option.auto_receive_pkt = true;
268 sdio_test_option.exam_dl_content = true;
269 //sdio_test_option.show_dl_content = true;
270 recv_total_pkt_cnt = 0;
271
272 #if 1
273 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, (1 + sizeof(unsigned int)), dev_test_athif_cmd_t);
274 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
275
276
277 /*wait receiving packets*/
278 while (recv_total_pkt_cnt < recv_cnt) {
279 KAL_SLEEP_MSEC(10);
280 if(recv_total_pkt_cnt != old_pkt_cnt){ timeout == 0; }
281 else{ timeout++; }
282
283 if (recv_th_rslt == RET_FAIL) {
284 ret = RET_FAIL;
285 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] receive pkt fail at %d pkt !!! \n",KAL_FUNC_NAME, recv_total_pkt_cnt)) ;
286 break;
287 }
288 if (timeout > 5000) { //5sec
289 ret = RET_FAIL;
290 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] total receive pkt =%d But expect pkt =%d !!! \n",KAL_FUNC_NAME, recv_total_pkt_cnt, recv_cnt)) ;
291 break;
292 }
293 old_pkt_cnt = recv_total_pkt_cnt;
294 }
295
296 /*some command shout not get cmd_ack*/
297 if( RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT)){
298 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Device response test fail!!! \n"));
299 return RET_FAIL;
300 }
301 #endif
302
303 sdio_test_option.auto_receive_pkt = false;
304 sdio_test_option.exam_dl_content = false;
305 //sdio_test_option.show_dl_content = false;
306
307 return ret;
308 }
309
310
311 int sdio_dl_npkt_sp(athif_dl_tgpd_cfg_t *p_dl_cfg)
312 {
313 int ret = RET_SUCCESS;
314 athif_cmd_t cmd;
315 unsigned int recv_cnt = 0, timeout = 0, i=0;
316 unsigned int old_recv_cnt = 0;
317 //athif_dl_tgpd_cfg_t *struct_dl_cfg;
318
319 struct sk_buff *result_ptr = NULL;
320
321 memcpy(cmd.buf , p_dl_cfg , sizeof(athif_dl_tgpd_cfg_t));
322 //struct_dl_cfg = (athif_dl_tgpd_cfg_t *)cmd.buf;
323 //struct_dl_cfg->q_num = p_dl_cfg->q_num;
324 //struct_dl_cfg->gpd_num = p_dl_cfg->gpd_num;
325 //struct_dl_cfg->tgpd_format = p_dl_cfg->tgpd_format;
326
327 cmd.cmd = SDIO_AT_DL_SEND_SP;
328 cmd.len = sizeof(athif_dl_tgpd_cfg_t);
329
330 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
331 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
332 recv_cnt = 0;
333
334 #if 1
335 /*wait receiving packets*/
336 while(recv_cnt != p_dl_cfg->gpd_num) {
337
338 result_ptr = mtlte_df_DL_read_skb_from_swq(p_dl_cfg->q_num);
339 if( result_ptr != NULL ) {
340 recv_cnt++;
341 mtlte_df_DL_pkt_handle_complete(p_dl_cfg->q_num);
342
343 KAL_DBGPRINT(KAL, DBG_TRACE,("[INFO] : receive pkt from RxQ %d .\n", p_dl_cfg->q_num));
344
345 if(true == sdio_test_option.show_dl_content){
346 KAL_DBGPRINT(KAL, DBG_ERROR,("Content : "));
347 for(i=0; i<result_ptr->len; i++){
348 KAL_DBGPRINT(KAL, DBG_ERROR,("%x ", *(result_ptr->data+i) ));
349 }
350 KAL_DBGPRINT(KAL, DBG_ERROR,(" \n"));
351 }
352
353 if(true == sdio_test_option.exam_dl_content){
354 if ( RET_FAIL == f_compare_recv_pkt(result_ptr, p_dl_cfg->q_num) ){
355 KAL_DBGPRINT(KAL, DBG_ERROR,("[%s]:[ERR] data compare error at que=%d expect gpd_num=%d, now is %d!!! \n", \
356 KAL_FUNC_NAME, p_dl_cfg->q_num, p_dl_cfg->gpd_num, recv_cnt)) ;
357 KAL_DBGPRINT(KAL, DBG_ERROR,(" buf_len=%d, ext_len=%d, bd_num=%d !!! \n", \
358 p_dl_cfg->tgpd_format.tgpd_buf_len, p_dl_cfg->tgpd_format.tgpd_ext_len, p_dl_cfg->tgpd_format.tbd_num)) ;
359 return RET_FAIL;
360 }
361 }
362 }
363
364 dev_kfree_skb(result_ptr);
365
366 KAL_SLEEP_MSEC(1);
367 if(recv_cnt != old_recv_cnt){ timeout ==0; }
368 else{ timeout++; }
369
370 if (timeout > 1000) { // 1sec no any packet
371 KAL_DBGPRINT(KAL, DBG_ERROR,("Timeout at receiving packet, received packet now = %d \n", recv_cnt ));
372 ret = RET_FAIL;
373 break;
374 }
375 old_recv_cnt = recv_cnt;
376 }
377
378 if( RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT)){
379 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Device response test fail!!! \n"));
380 return RET_FAIL;
381 }
382
383 #endif
384
385 return ret;
386 }
387
388
389 int f_compare_auto_pattern(struct sk_buff *dl_skb, unsigned int que_num)
390 {
391 int ret = RET_SUCCESS;
392 unsigned int pkt_len = 0, idx = 0;
393 PAT_PKT_HEADER pAtHeader;
394 unsigned char cksm = 0 ,*buf , data_char = 0;
395
396
397 pAtHeader = (PAT_PKT_HEADER)(dl_skb->data);
398 buf = dl_skb->data;
399 pkt_len = dl_skb->len;
400
401 if (pkt_len == 0) {
402 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Zero Pkt received que_num=%d , pkt_len=%d!!\n", que_num , pkt_len));
403 }
404
405 if (pkt_len < sizeof(AT_PKT_HEADER)) {
406 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] que_num=%d , pkt_len=%d length less than header!!\n", que_num , pkt_len));
407 return RET_SUCCESS;
408 }
409
410 if (pkt_len != pAtHeader->PktLen) {
411 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] dl_skb=%p ,que_num=%d , pkt_len=%d bytes, expect %d bytes!!\n",dl_skb ,que_num , pkt_len,pAtHeader->PktLen));
412 return RET_FAIL;
413 }
414
415 /*Packet count here TODO*/
416
417 /*check loopback que-to-que mapping here TODO*/
418
419 /*check loopback packet sequence here TODO*/
420
421 /*compare payload header check sum*/
422 f_calc_cs_byte(buf, sizeof(AT_PKT_HEADER), &cksm);
423 if (cksm != 0xff) {
424 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] que_num=%d , pkt_len=%d checksum error!!\n", que_num , pkt_len));
425 return RET_FAIL;
426 }
427
428 data_char = pAtHeader->RndSeed;
429 for (idx = sizeof(AT_PKT_HEADER) ; idx < pkt_len ; idx ++, data_char ++) {
430 if (buf[idx] != data_char) {
431 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
432 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] (pkt_len=%d, pos=%d , expect=0x%02x, read=0x%x)!!\n" ,pkt_len ,idx ,data_char ,buf[idx]));
433 return RET_FAIL;
434 }
435 }
436
437 if (ret == RET_SUCCESS) {
438 KAL_DBGPRINT(KAL, DBG_CRIT,("[WARN] que_num=%d , pkt_len=%d compare success!!\n", que_num , pkt_len));
439 }
440
441 return ret;
442 }
443
444 #define BPS_GPD_ADDR_TAG 0
445 int f_compare_fragment_pattern(struct sk_buff *dl_skb, unsigned int que_num)
446 {
447 int ret = RET_SUCCESS;
448 unsigned int pkt_len = 0, idx = 0 , que_idx = 0;
449 PAT_PKT_HEADER pAtHeader;
450 unsigned char cksm = 0 ,*buf , data_char = 0;
451 recv_fragment_ctrl_t *p_frag_ctrl = NULL;
452 //struct usb_endpoint_descriptor *p_ep_desc = NULL;
453 bool first_gpd_of_pkt = false;
454
455
456 que_idx = que_num;
457 p_frag_ctrl = &recv_frag_ctrl[que_idx];
458
459 buf = dl_skb->data;
460 pkt_len = dl_skb->len;
461
462 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] recv one packet que_num=%d, len=%d!!\n",__FUNCTION__,__LINE__,que_num,pkt_len));
463
464
465 /*start packet of this fragment transfer, assume 1st packet contain whole packet header*/
466 if (p_frag_ctrl->xfered_len == 0) {
467 pAtHeader = (PAT_PKT_HEADER)dl_skb->data;
468 memcpy(&p_frag_ctrl->pkt_head, pAtHeader , sizeof(AT_PKT_HEADER));
469 p_frag_ctrl->expected_xfer_len = pAtHeader->PktLen;
470 p_frag_ctrl->xfered_len = dl_skb->len;
471 p_frag_ctrl->xfered_pkt_idx = 1;
472 /*compare payload header check sum*/
473 f_calc_cs_byte(buf, sizeof(AT_PKT_HEADER), &cksm);
474 if (cksm != 0xff) {
475 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d checksum error!!\n", que_num , pkt_len));
476 /*set xfered_len as 0 to start another auto-test transfer*/
477 p_frag_ctrl->xfered_len = 0;
478 return RET_FAIL;
479 }
480 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] auto-test transfer start!!\n",__FUNCTION__,__LINE__));
481 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] expect_len=%d, cur_len=%d, cur_pkt_cnt=%d!!\n",__FUNCTION__,__LINE__
482 ,p_frag_ctrl->expected_xfer_len,p_frag_ctrl->xfered_len, p_frag_ctrl->xfered_pkt_idx));
483
484 data_char = pAtHeader->RndSeed;
485 for (idx = sizeof(AT_PKT_HEADER) ; idx < pkt_len ; idx ++, data_char ++) {
486 #if BPS_GPD_ADDR_TAG
487 if (idx < (sizeof(AT_PKT_HEADER) + 4)) {
488 continue;
489 }
490 #endif
491 if (buf[idx] != data_char) {
492 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
493 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] (pkt_len=%d, pos=%d , expect=0x%02x, read=0x%x)!!\n" ,pkt_len ,idx ,data_char ,buf[idx]));
494 /*set xfered_len as 0 to start another auto-test transfer*/
495 p_frag_ctrl->xfered_len = 0;
496 return RET_FAIL;
497 }
498 }
499 /*store next fragment packet start pattern*/
500 p_frag_ctrl->next_expected_char = data_char;
501 first_gpd_of_pkt = true;
502
503 } else {
504 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] auto-test in progress!!\n",__FUNCTION__,__LINE__));
505 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] expect_len=%d, cur_len=%d, cur_pkt_cnt=%d!!\n",__FUNCTION__,__LINE__
506 ,p_frag_ctrl->expected_xfer_len,p_frag_ctrl->xfered_len, p_frag_ctrl->xfered_pkt_idx));
507 p_frag_ctrl->xfered_len += dl_skb->len;
508 p_frag_ctrl->xfered_pkt_idx ++;
509
510 data_char = p_frag_ctrl->next_expected_char;
511 for (idx = 0 ; idx < pkt_len ; idx ++, data_char ++) {
512 if (buf[idx] != data_char) {
513 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
514 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] (pkt_len=%d, pos=%d , expect=0x%02x, read=0x%x)!!\n" ,pkt_len ,idx ,data_char ,buf[idx]));
515 /*set xfered_len as 0 to start another auto-test transfer*/
516 p_frag_ctrl->xfered_len = 0;
517 return RET_FAIL;
518 }
519 }
520 /*store next fragment packet start pattern*/
521 p_frag_ctrl->next_expected_char = data_char;
522 }
523
524
525 if (pkt_len == 0) {
526 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR][%s:%d] Zero Pkt received que_num=%d , pkt_len=%d!!\n",__FUNCTION__,__LINE__, que_num , pkt_len));
527 return RET_FAIL;
528 }
529
530 /*means latest fragment packet of this auto-test transfer*/
531 if (p_frag_ctrl->xfered_len == p_frag_ctrl->expected_xfer_len) {
532 KAL_DBGPRINT(KAL, DBG_TRACE,( "[TRACE][%s:%d] auto-test end!!\n",__FUNCTION__,__LINE__));
533 KAL_DBGPRINT(KAL, DBG_TRACE,("[TRACE][%s:%d] expect_len=%d, cur_len=%d, cur_pkt_cnt=%d!!\n",__FUNCTION__,__LINE__
534 ,p_frag_ctrl->expected_xfer_len,p_frag_ctrl->xfered_len, p_frag_ctrl->xfered_pkt_idx));
535
536 if (p_frag_ctrl->xfered_len < sizeof(AT_PKT_HEADER)) {
537 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN][%s:%d] que_num=%d , pkt_len=%d length less than header!!\n",__FUNCTION__,__LINE__, que_num , pkt_len));
538 ret = RET_SUCCESS;
539 }
540 /*set xfered_len as 0 to start another auto-test transfer*/
541 p_frag_ctrl->xfered_len = 0;
542 /*count xfer count after aggregation*/
543 recv_total_pkt_cnt_agg ++;
544
545 }
546 /* else if(true == first_gpd_of_pkt){
547
548 // TODO: Remove this [else if] when Tx header can be auto removed by HW
549 if (dl_skb->len != (recv_frag_ctrl[que_idx].max_frag_unit_sz - 4) ) {
550 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR][%s:%d] fragment length error , recv_len=%d, expected allow_len=(%d-4) (Because Tx Header)!!\n",
551 __FUNCTION__,__LINE__,dl_skb->len,recv_frag_ctrl[que_idx].max_frag_unit_sz));
552 ret = RET_FAIL;
553 }
554 first_gpd_of_pkt = false;
555
556 } */
557 else { /*if not the end of the fragment transfer, the usb transfer size should be == rgpd/bd allow length*/
558 if (dl_skb->len != recv_frag_ctrl[que_idx].max_frag_unit_sz) {
559 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR][%s:%d] fragment length error , recv_len=%d, expected allow_len=%d!!\n",
560 __FUNCTION__,__LINE__,dl_skb->len,recv_frag_ctrl[que_idx].max_frag_unit_sz));
561 ret = RET_FAIL;
562 }
563 }
564
565 if (ret == RET_SUCCESS) {
566 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d compare success!!\n", que_num , pkt_len));
567 }
568
569 return ret;
570 }
571
572 int f_compare_5a_pattern(struct sk_buff *dl_skb, unsigned int que_num)
573 {
574 int ret = RET_SUCCESS;
575 unsigned int pkt_len = 0, idx = 0;
576 unsigned char *buf;
577
578
579 buf = dl_skb->data;
580 pkt_len = dl_skb->len;
581
582 if (pkt_len == 0) {
583 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] Zero Pkt received que_num=%d , pkt_len=%d!!\n", que_num , pkt_len));
584 }
585
586 for (idx = 0 ; idx < pkt_len ; idx ++) {
587 if (buf[idx] != 0x5a) {
588 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
589 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] (pkt_len=%d, pos=%d , expect=0x5a, read=0x%x)!!\n" ,pkt_len ,idx ,buf[idx]));
590 return RET_FAIL;
591 }
592 }
593
594 return ret;
595 }
596
597 int f_compare_a5_pattern(struct sk_buff *dl_skb, unsigned int que_num)
598 {
599 int ret = RET_SUCCESS;
600 unsigned int pkt_len = 0, idx = 0;
601 unsigned char *buf;
602
603
604 buf = dl_skb->data;
605 pkt_len = dl_skb->len;
606
607 if (pkt_len == 0) {
608 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] Zero Pkt received que_num=%d , pkt_len=%d!!\n", que_num , pkt_len));
609 }
610
611 for (idx = 0 ; idx < pkt_len ; idx ++) {
612 if (buf[idx] != 0xa5) {
613 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
614 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] (pkt_len=%d, pos=%d , expect=0xa5, read=0x%x)!!\n" ,pkt_len ,idx ,buf[idx]));
615 return RET_FAIL;
616 }
617 }
618
619 return ret;
620 }
621
622 int f_compare_inc_pattern(struct sk_buff *dl_skb, unsigned int que_num)
623 {
624 int ret = RET_SUCCESS;
625 unsigned int pkt_len = 0, idx = 0;
626 unsigned char *buf , data_char = 0;
627
628
629 buf = dl_skb->data;
630 pkt_len = dl_skb->len;
631
632 if (pkt_len == 0) {
633 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] Zero Pkt received que_num=%d , pkt_len=%d!!\n", que_num , pkt_len) );
634 }
635
636 /*ATCASE_LB_DATA_INC increase from 1st byte char*/
637 data_char = buf[0];
638 for (idx = 0 ; idx < pkt_len ; idx ++, data_char++) {
639 if (buf[idx] != data_char) {
640 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] que_num=%d , pkt_len=%d data mismatch !!\n" , que_num , pkt_len));
641 KAL_DBGPRINT(KAL, DBG_WARN,("[WARN] (pkt_len=%d, pos=%d , expect=0x%x, read=0x%x)!!\n" ,pkt_len , data_char ,idx ,buf[idx]));
642 return RET_FAIL;
643 }
644 }
645
646 return ret;
647 }
648
649 int f_compare_recv_pkt(struct sk_buff *dl_skb, unsigned int que_num)
650 {
651 int ret = RET_SUCCESS;
652 #if 1
653 switch (cmp_pattern) {
654 case ATCASE_LB_DATA_5A :
655 ret = f_compare_5a_pattern(dl_skb, que_num);
656 break;
657 case ATCASE_LB_DATA_A5 :
658 ret = f_compare_a5_pattern(dl_skb, que_num);
659 break;
660 case ATCASE_LB_DATA_INC :
661 ret = f_compare_inc_pattern(dl_skb, que_num);
662 break;
663 case ATCASE_LB_DATA_FRAGMENT:
664 ret = f_compare_fragment_pattern(dl_skb, que_num);
665 break;
666 case ATCASE_LB_DATA_AUTO :
667 default :
668 ret = f_compare_auto_pattern(dl_skb, que_num);
669 break;
670 }
671 #endif
672 return ret;
673 }
674
675 int f_wait_recv_pkt_cnt(unsigned int expect_num , unsigned int timeout_ms)
676 {
677 int ret = RET_SUCCESS;
678 unsigned int idx = 0, msg_delay = 0 , cur_pkt_num = 0;;
679
680 for (idx = 0 ; idx < timeout_ms ; idx ++) {
681 if (cmp_pattern == ATCASE_LB_DATA_FRAGMENT) {
682 if (expect_num <= recv_total_pkt_cnt_agg) {
683 break;
684 } else {
685 cur_pkt_num = recv_total_pkt_cnt_agg;
686 }
687 } else {
688 if (expect_num <= recv_total_pkt_cnt) {
689 break;
690 } else {
691 cur_pkt_num = recv_total_pkt_cnt;
692 }
693 }
694 if (recv_th_rslt != RET_SUCCESS) {
695 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] f_wait_recv_pkt_cnt compare fail\n", __FUNCTION__, __LINE__));
696 return RET_FAIL;
697 }
698 KAL_SLEEP_MSEC(1) ;
699 msg_delay ++;
700 if (msg_delay > 1000) {
701 msg_delay = 0;
702 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] f_wait_recv_pkt_cnt waiting for %d ms, expect=%d pkts , cur=%d pkts \n",
703 __FUNCTION__, __LINE__, idx, expect_num,cur_pkt_num ));
704 }
705 }
706
707 if (idx >= timeout_ms) {
708 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] f_wait_recv_pkt_cnt timeout\n", __FUNCTION__, __LINE__));
709 ret = RET_FAIL;
710 }
711
712 return ret;
713 }
714
715
716
717 int f_ul_rgpd_allow_len_tst(unsigned int txq_no ,athif_ul_rgpd_format_t *p_rgpd_format, unsigned int pkt_len_start, unsigned int pkt_len_end)
718 {
719 int ret = RET_SUCCESS;
720 athif_cmd_t cmd;
721 //athif_status_t status;
722 athif_ul_rgpd_tst_cfg_t *p_rgpd_cfg;
723 unsigned int pktSize = 0;
724 unsigned int q_num = 0 , pkt_cnt = 0;
725 //int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
726 unsigned int total_allow_len = 0, idx = 0;
727
728
729 recv_th_rslt = RET_SUCCESS;
730 recv_total_pkt_cnt = 0;
731 recv_total_pkt_cnt_agg = 0;
732 recv_total_bytes_cnt = 0;
733
734 cmp_pattern = ATCASE_LB_DATA_AUTO;
735
736 /*pause reload rgpd flow*/
737 cmd.cmd = ATHIF_CMD_PAUSE_RGPD_RL;
738 cmd.buf[0] = 1; // 1 : pause , 0 : resume
739 cmd.len = 1;
740
741 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, 1, dev_test_athif_cmd_t);
742 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
743 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
744
745 at_mtlte_hif_sdio_clear_tx_count();
746
747 /*prepare RGPD*/
748 if (pkt_len_start < sizeof(AT_PKT_HEADER)) {
749 pkt_len_start = sizeof(AT_PKT_HEADER);
750 }
751 if (pkt_len_end < sizeof(AT_PKT_HEADER)) {
752 pkt_len_end = sizeof(AT_PKT_HEADER);
753 }
754
755 /*calculate the whole rgpd allow length*/
756 total_allow_len = 0;
757 if (p_rgpd_format->rbd_num) {
758 for (idx = 0 ; idx < p_rgpd_format->rbd_num ; idx ++) {
759 if (p_rgpd_format->rbd_allow_len[idx] > 0) {
760 total_allow_len += p_rgpd_format->rbd_allow_len[idx];
761 }
762 }
763 } else {
764 total_allow_len = p_rgpd_format->rgpd_allow_len;
765 }
766 if (total_allow_len == 0) {
767 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s : %d] RGPD allow length configure err\n",__FUNCTION__ ,__LINE__));
768 return RET_FAIL;
769 }
770
771 /*calculate expected pkt count for specific allow length*/
772 pkt_cnt = 0;
773 for (pktSize = pkt_len_start ; pktSize <= pkt_len_end ; pktSize ++) {
774 pkt_cnt += ((pktSize) / total_allow_len);
775 if ( (pktSize) % total_allow_len) {
776 pkt_cnt ++;
777 }
778 }
779
780 q_num = txq_no;
781 cmd.cmd = ATHIF_CMD_PREPARE_RGPD;
782 p_rgpd_cfg = (athif_ul_rgpd_tst_cfg_t *)cmd.buf;
783 p_rgpd_cfg->q_num = q_num;
784 /*must add one more gpd for queue initial tail*/
785 p_rgpd_cfg->gpd_num = pkt_cnt + 1;
786 memcpy(&p_rgpd_cfg->rgpd_format, p_rgpd_format , sizeof(athif_ul_rgpd_format_t));
787 cmd.len = sizeof(athif_ul_rgpd_tst_cfg_t);
788
789 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
790 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
791 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
792
793 /*send and compare loopback pkt to check if correct*/
794 #if 1
795 for (pktSize = pkt_len_start ; pktSize <= pkt_len_end ; pktSize ++) {
796
797 ret = sdio_send_pkt(txq_no, pktSize , txq_no, 0);
798 if (ret != RET_SUCCESS) {
799 break;
800 }
801 if (recv_th_rslt != RET_SUCCESS) {
802 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
803 ret = RET_FAIL;
804 break;
805 }
806 }
807
808 if (ret == RET_SUCCESS) {
809 /*wait loopback data*/
810 ret = f_wait_recv_pkt_cnt(pkt_cnt, 10000);
811 if (ret != RET_SUCCESS) {
812 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] f_wait_recv_pkt_cnt timeout\n", __FUNCTION__));
813 }
814 if (recv_th_rslt != RET_SUCCESS) {
815 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
816 ret = RET_FAIL;
817 }
818 recv_th_rslt = RET_SUCCESS;
819 recv_total_pkt_cnt = 0;
820 recv_total_pkt_cnt_agg = 0;
821 recv_total_bytes_cnt = 0;
822 }
823
824
825 cmp_pattern = ATCASE_LB_DATA_AUTO;
826
827 if (ret != RET_SUCCESS) {
828 return ret;
829 }
830
831 at_mtlte_hif_sdio_clear_tx_count();
832
833 /*resume reload rgpd flow*/
834 cmd.cmd = ATHIF_CMD_PAUSE_RGPD_RL;
835 cmd.buf[0] = 0; // 1 : pause , 0 : resume
836 cmd.len = 1;
837
838 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
839 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
840 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
841
842 #endif
843 return ret;
844 }
845
846
847
848
849 /*
850 * loopback length 1~100 bytes for each queue with specific data pattern
851 */
852 int f_small_pkt_lb(lb_data_pattern_e pattern)
853 {
854 int ret = RET_SUCCESS;
855 struct timespec start_t , end_t, diff_t;
856 athif_cmd_t cmd;
857 athif_status_t status;
858 unsigned int i = 0 ;
859 int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
860 unsigned int rand_num = 0,pktSize = 0, q_random_mod = 0,packetnum=0;
861 unsigned char que_no = 0;
862 lb_data_pattern_e org_send_pattern = 0, org_cmp_pattern = 0;
863 unsigned int min_size, max_size;
864
865 recv_th_rslt = RET_SUCCESS;
866 recv_total_pkt_cnt = 0;
867
868 if (pattern > ATCASE_LB_DATA_INC) {
869 return RET_FAIL;
870 }
871
872 /*backup pattern mode*/
873 org_send_pattern = send_pattern;
874 org_cmp_pattern = cmp_pattern;
875 send_pattern = pattern;
876 cmp_pattern = pattern;
877
878 min_size = 1;
879 max_size = 101;
880
881 for (que_no = 0 ; que_no < HIF_MAX_ULQ_NUM ; que_no ++) {
882 for (pktSize = min_size ; pktSize < max_size ; pktSize ++) {
883
884 ret = sdio_send_pkt(que_no, pktSize , que_no, 0);
885
886 if (ret != RET_SUCCESS) {
887 break;
888 }
889 if (recv_th_rslt != RET_SUCCESS) {
890 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
891 ret = RET_FAIL;
892 break;
893 }
894 }
895 if (ret == RET_SUCCESS) {
896 /*wait loopback data*/
897 ret = f_wait_recv_pkt_cnt(max_size-min_size , 10000);
898 if (ret != RET_SUCCESS) {
899 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] f_wait_recv_pkt_cnt timeout\n", __FUNCTION__));
900 break;
901 }
902 if (recv_th_rslt != RET_SUCCESS) {
903 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
904 ret = RET_FAIL;
905 break;
906 }
907 recv_th_rslt = RET_SUCCESS;
908 recv_total_pkt_cnt = 0;
909
910 } else {
911 break;
912 }
913 }
914
915 /*restore pattern mode*/
916 send_pattern = org_send_pattern;
917 cmp_pattern = org_cmp_pattern;
918
919 return ret;
920 }
921
922
923 struct timespec time_diff(struct timespec start , struct timespec end)
924 {
925 struct timespec temp;
926 if ((end.tv_nsec-start.tv_nsec)<0) {
927 temp.tv_sec = end.tv_sec-start.tv_sec-1;
928 temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
929 } else {
930 temp.tv_sec = end.tv_sec-start.tv_sec;
931 temp.tv_nsec = end.tv_nsec-start.tv_nsec;
932 }
933 return temp;
934 }
935
936
937
938 int f_tx_rx_ep0_perf_lb(unsigned int loop, unsigned int offset, unsigned int pkt_md,
939 unsigned int q_md, unsigned int pkt_len, perf_tst_case_e lb_md)
940 {
941 int ret = RET_SUCCESS;
942 struct timespec start_t , end_t, diff_t;
943 athif_cmd_t cmd;
944 athif_status_t status;
945 unsigned int chk_payload = 0, ep0_tst = 0, i = 0 ;
946 int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
947 unsigned int ep0_delay_cnt = 0 , ep0_delay_th = 100 , rand_num = 0,pktSize = 0, q_random_mod = 0,packetnum=0;
948 unsigned char tx_ep = 0;
949 unsigned long long transferdata=0,performance = 0;
950 unsigned long long diff_ms = 0 ;
951
952 if (lb_md == ATCASE_PERF_TXRX) {
953 chk_payload = 1;
954 }
955
956 /*perpare ep0 buffer first*/
957 for (i = 0 ; i < 1024 ; i++) {
958 //cmd.buf[i] = (EP0_TST_BUF_SEED + i) & 0xff;
959 if (i % 2) {
960 cmd.buf[i] = 0x5a;
961 } else {
962 cmd.buf[i] = 0xa5;
963 }
964 }
965
966 recv_th_rslt = RET_SUCCESS;
967 recv_total_pkt_cnt = 0;
968 recv_total_pkt_cnt_agg = 0;
969
970 sdio_test_option.auto_receive_pkt = true;
971
972 while (loop) {
973 if(packetnum == 0){
974 jiffies_to_timespec(jiffies , &start_t);
975 }
976 if ((chk_payload) && (recv_th_rslt != RET_SUCCESS)) {
977 ret = recv_th_rslt;
978 break;
979 }
980 switch (q_md) {
981 case 0 : //all out ep random
982 q_random_mod = HIF_MAX_ULQ_NUM;
983 break;
984 case 1 : //random queue 0~2
985 q_random_mod = 3;
986 break;
987 case 2 : //random queue 0~1
988 q_random_mod = 2;
989 break;
990 case 3 : //random queue 0
991 q_random_mod = 1;
992 break;
993 default :
994 q_random_mod = HIF_MAX_ULQ_NUM;
995 break;
996 }
997 get_random_bytes(&rand_num, sizeof(rand_num));
998 tx_ep = rand_num % q_random_mod;
999
1000
1001 switch (pkt_md) {
1002 case 0 : //random pktSize = random(2048)
1003 get_random_bytes(&rand_num, sizeof(rand_num));
1004 pktSize =1 + rand_num %MAX_UL_PKT_SIZE;
1005 break;
1006 case 1 : //random pktSize = random(pkt_len)
1007 get_random_bytes(&rand_num, sizeof(rand_num));
1008 pktSize =1 + rand_num%pkt_len;
1009 break;
1010 case 2 : //pkt_len specific
1011 pktSize =pkt_len;
1012 break;
1013 default :
1014 get_random_bytes(&rand_num, sizeof(rand_num));
1015 pktSize =1 + rand_num %MAX_UL_PKT_SIZE;
1016 break;
1017 }
1018 if (pktSize < sizeof(AT_PKT_HEADER)){
1019 pktSize = sizeof(AT_PKT_HEADER);
1020 }
1021
1022 if (pktSize > MAX_UL_PKT_SIZE) {
1023 pktSize = MAX_UL_PKT_SIZE;
1024 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] pktSize error len=%d \n", __FUNCTION__,pktSize));
1025 }
1026 if (pktSize == 0) {
1027 pktSize = 100;
1028 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] pktSize error len=%d \n", __FUNCTION__,pktSize));
1029 }
1030
1031 transferdata+=pktSize;
1032
1033
1034 ret = sdio_send_pkt(tx_ep, pktSize , tx_ep, 0);
1035
1036 if (ret != RET_SUCCESS) {
1037 KAL_DBGPRINT(KAL, DBG_ERROR,("%s : sending error at pkt num = %d! \n", KAL_FUNC_NAME, packetnum));
1038 break;
1039 }
1040 if (recv_th_rslt != RET_SUCCESS) {
1041 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
1042 ret = RET_FAIL;
1043 break;
1044 }
1045 packetnum ++;
1046
1047 if (packetnum > 100000) {
1048 if (chk_payload) {
1049 ret = f_wait_recv_pkt_cnt(packetnum , 100000);
1050 if (ret != RET_SUCCESS) {
1051 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] f_wait_recv_pkt_cnt timeout\n", __FUNCTION__));
1052 break;
1053 }
1054 if (recv_th_rslt != RET_SUCCESS) {
1055 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
1056 ret = RET_FAIL;
1057 break;
1058 }
1059 if (cmp_pattern == ATCASE_LB_DATA_FRAGMENT) {
1060 if (packetnum != recv_total_pkt_cnt_agg) {
1061 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv fragment pattern pkt number mismatch expect=%d, recv=%d\n", __FUNCTION__,packetnum , recv_total_pkt_cnt_agg));
1062 ret = RET_FAIL;
1063 break;
1064 }
1065 } else {
1066 if (packetnum != recv_total_pkt_cnt) {
1067 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv auto pattern pkt number mismatch expect=%d, recv=%d\n", __FUNCTION__,packetnum , recv_total_pkt_cnt));
1068 ret = RET_FAIL;
1069 break;
1070 }
1071 }
1072 }
1073 /*transfer done without error, calc performance*/
1074 jiffies_to_timespec(jiffies , &end_t);
1075 diff_t = time_diff(start_t, end_t);
1076 diff_ms = (1000 * diff_t.tv_sec) ;
1077 diff_ms += (diff_t.tv_nsec / 1000000);
1078 performance = ((unsigned int)transferdata / (unsigned int)diff_ms);
1079
1080 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] performance = %d KBPS\n", __FUNCTION__, performance ));
1081 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] transfered data=%u\n", __FUNCTION__, transferdata));
1082 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] diff_ms=%u\n", __FUNCTION__, diff_ms));
1083
1084 recv_total_pkt_cnt = 0;
1085 recv_total_pkt_cnt_agg = 0;
1086 recv_th_rslt = RET_SUCCESS;
1087 packetnum = 0;
1088 transferdata = 0;
1089
1090 loop --;
1091 }
1092 if (recv_th_rslt != RET_SUCCESS) {
1093 ret = RET_FAIL;
1094 break;
1095 }
1096
1097 }
1098
1099 sdio_test_option.auto_receive_pkt = false;
1100 return ret;
1101 }
1102
1103
1104 int f_rx_perf_tst(unsigned int loop, unsigned int offset, unsigned int pkt_num,
1105 unsigned int q_md, unsigned int pkt_len, perf_tst_case_e lb_md)
1106 {
1107 int ret = RET_SUCCESS;
1108 athif_cmd_t cmd;
1109 athif_status_t status;
1110 unsigned int tst_q_num = 0, i = 0;
1111 athif_dl_perf_cfg_t *p_dl_perf_cfg;
1112 struct timespec start_t , end_t, diff_t;
1113 unsigned long long transferdata=0,performance = 0;
1114 unsigned long long diff_ms = 0 ;
1115
1116 sdio_test_option.auto_receive_pkt = true;
1117 sdio_test_option.exam_dl_content = false;
1118 recv_total_pkt_cnt = 0;
1119 recv_total_bytes_cnt = 0;
1120 recv_th_rslt = RET_SUCCESS;
1121
1122 tst_q_num = q_md ;
1123 p_dl_perf_cfg = cmd.buf;
1124 memset(p_dl_perf_cfg , 0 , sizeof(athif_dl_perf_cfg_t));
1125 for (i = 0 ; i < tst_q_num ; i++) { //start queue 0~q_md dl test
1126 p_dl_perf_cfg->txq_cfg[i].que_en = true;
1127 p_dl_perf_cfg->txq_cfg[i].gpd_type = ATHIF_GPD_GENERIC;
1128 p_dl_perf_cfg->txq_cfg[i].bd_num = 0;
1129 p_dl_perf_cfg->txq_cfg[i].q_num = i;
1130 p_dl_perf_cfg->txq_cfg[i].gpd_num = 100;
1131 p_dl_perf_cfg->txq_cfg[i].gpd_len = pkt_len;
1132 p_dl_perf_cfg->txq_cfg[i].pkt_cnt = pkt_num;
1133 }
1134 cmd.cmd = ATHIF_CMD_DL_PERF;
1135 cmd.len = sizeof(athif_dl_perf_cfg_t);
1136
1137 while (loop){
1138
1139 recv_total_pkt_cnt = 0;
1140 recv_total_bytes_cnt = 0;
1141 recv_th_rslt = RET_SUCCESS;
1142 for (i = 0 ; i < tst_q_num ; i ++) {
1143 que_recv_pkt_cnt[i] = 0;
1144 }
1145 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1146 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1147
1148 jiffies_to_timespec(jiffies , &start_t);
1149 while (1) {
1150 for (i = 0 ; i < tst_q_num ; i ++) {
1151 if (que_recv_pkt_cnt[i] < p_dl_perf_cfg->txq_cfg[i].pkt_cnt) {
1152 break;
1153 }
1154 }
1155 if (i >= tst_q_num) { //all dl queue transfer done
1156 if (recv_th_rslt != RET_SUCCESS) {
1157 ret = RET_FAIL;
1158 break;
1159 }
1160 jiffies_to_timespec(jiffies , &end_t);
1161 diff_t = time_diff(start_t, end_t);
1162 diff_ms = (1000 * diff_t.tv_sec) ;
1163 diff_ms += (diff_t.tv_nsec / 1000000);
1164 performance = ((unsigned int)recv_total_bytes_cnt / (unsigned int)diff_ms);
1165 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] performance = %d KBPS\n", __FUNCTION__, performance ));
1166 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] transfered data=%u\n", __FUNCTION__, recv_total_bytes_cnt));
1167 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] diff_ms=%u\n", __FUNCTION__, diff_ms));
1168
1169 /*reset the profile variable*/
1170 recv_total_pkt_cnt = 0;
1171 recv_total_bytes_cnt = 0;
1172 recv_th_rslt = RET_SUCCESS;
1173 for (i = 0 ; i < tst_q_num ; i ++) {
1174 que_recv_pkt_cnt[i] = 0;
1175 }
1176 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1177 loop --;
1178 break;
1179
1180 }
1181 /*if fail just return*/
1182 if (recv_th_rslt != RET_SUCCESS) {
1183 ret = RET_FAIL;
1184 break;
1185 }
1186 if (ret != RET_SUCCESS) {
1187 break;
1188 }
1189
1190 /*check timeout*/
1191 jiffies_to_timespec(jiffies , &end_t);
1192 diff_t = time_diff(start_t, end_t);
1193 diff_ms = (1000 * diff_t.tv_sec) ;
1194 diff_ms += (diff_t.tv_nsec / 1000000);
1195 if (diff_ms > 120*1000) {
1196 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] wait recv timeout %d ms , recv_cnt=%d\n", __FUNCTION__,diff_ms, recv_total_pkt_cnt));
1197 ret = RET_FAIL;
1198 break;
1199 }
1200 KAL_SLEEP_MSEC(0);
1201 }
1202 if (ret != RET_SUCCESS) {
1203 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] fail , loop=%d, recv_cnt=%d\n", __FUNCTION__,loop , recv_total_pkt_cnt));
1204 break;
1205 }
1206
1207 }
1208 sdio_test_option.auto_receive_pkt = false;
1209 return ret;
1210 }
1211
1212 int f_ul_cs_err_tst(unsigned int cs_len, unsigned int is_bd)
1213 {
1214 unsigned int ret = RET_SUCCESS;
1215 unsigned int org_cs_len = 0, q_num = 0, valid_pkt_cnt = 0, expect_free_cnt = 0,idx = 0;
1216 unsigned int cs_err_position = 0;
1217 athif_mem_tst_cfg_t *p_mem_rw_cfg;
1218 unsigned int is_cs16 = 0, orig_HWFCR = 0, new_HWFCR = 0;
1219 athif_gpd_cs_cfg_t *p_rgpd_cfg;
1220 athif_local_rgpd_rslt_t *p_rgpd_rslt;
1221 athif_cmd_t cmd;
1222 athif_status_t status;
1223 hifsdio_isr_status_t *device_int_st;
1224
1225
1226
1227 p_mem_rw_cfg = cmd.buf;
1228 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1229 p_mem_rw_cfg->len = 4;
1230 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_READ_MEM, (char *)p_mem_rw_cfg, sizeof(athif_mem_tst_cfg_t), dev_test_athif_cmd_t);
1231 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1232 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1233 orig_HWFCR = (*(unsigned int *)athif_result_save_t->buf);
1234
1235 /*configure checksum mode and chechsum enable disable*/
1236 new_HWFCR = orig_HWFCR | ORG_SDIO_TRX_DESC_CHKSUM_EN;
1237 if (cs_len == 16) {
1238 is_cs16 = 1;
1239 new_HWFCR = new_HWFCR & (~ORG_SDIO_TRX_DESC_CHKSUM_12);
1240 } else {
1241 is_cs16 = 0;
1242 new_HWFCR = new_HWFCR | ORG_SDIO_TRX_DESC_CHKSUM_12;
1243 }
1244
1245 /*set new checksum enable configure*/
1246 p_mem_rw_cfg = cmd.buf;
1247 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1248 p_mem_rw_cfg->len = 4;
1249 *(unsigned int*)p_mem_rw_cfg->mem_val = new_HWFCR;
1250 cmd.len = sizeof(athif_mem_tst_cfg_t)+4;
1251 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_WRITE_MEM, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1252 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1253 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1254
1255
1256
1257 for (q_num = 0 ; q_num < HIF_MAX_ULQ_NUM ; q_num ++) {
1258
1259 /*pause reload rgpd flow*/
1260 cmd.cmd = ATHIF_CMD_PAUSE_RESUME_DATAFLOW;
1261 cmd.buf[0] = 1; // 1 : pause , 0 : resume
1262 cmd.len = 1;
1263 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1264 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1265 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1266
1267 /*clear qmu interrupt info*/
1268 cmd.cmd = SDIO_AT_DL_INT_TEST_SWITCH;
1269 cmd.buf[0] = 1; // 1 : test mode , 0 : normal mode
1270 cmd.len = 1;
1271 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1272 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1273 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1274
1275 p_rgpd_cfg = cmd.buf;
1276 valid_pkt_cnt = 20;
1277 cs_err_position = 10; //means 10GPD correct
1278 memset(p_rgpd_cfg , 0 , sizeof(athif_gpd_cs_cfg_t));
1279 /*prepare 20 RGPD/BD with error checksum, include bypass RGPD*/
1280 p_rgpd_cfg->gpd_num = valid_pkt_cnt + 1; //plus 1 for the hwo=0 gpd
1281 expect_free_cnt = p_rgpd_cfg->gpd_num;
1282 p_rgpd_cfg->q_num = q_num;
1283 for(idx = 0 ; idx < p_rgpd_cfg->gpd_num ; idx ++) {
1284 if (is_cs16) {
1285 p_rgpd_cfg->ioc_bps_valid[idx] = (1<<2);
1286 } else {
1287 p_rgpd_cfg->ioc_bps_valid[idx] = 0;
1288 }
1289 }
1290 /*set error checksum GPD*/
1291 p_rgpd_cfg->ioc_bps_valid[cs_err_position] |= (is_bd ? (1<<4) : (1<<3));
1292
1293 cmd.cmd = ATHIF_CMD_PREPARE_CS_TST_RGPD;
1294 cmd.len = sizeof(athif_gpd_cs_cfg_t) + p_rgpd_cfg->gpd_num;
1295 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1296 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1297 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1298
1299
1300 for (idx = 0 ; idx < cs_err_position ; idx ++) {
1301 ret = sdio_send_pkt(q_num, 60 , q_num, 0); //set small than smallest MPS
1302 if (ret != RET_SUCCESS) {
1303 break;
1304 }
1305 }
1306
1307 /*sleep to wait the device recv the urbs*/
1308 msleep(100);
1309
1310 /*no error interrupt assert here and the error interrupt would be asserted at next packet*/
1311 cmd.cmd = SDIO_AT_READ_INT_STATUS;
1312 cmd.buf[0] = 0;
1313 cmd.len = 1;
1314 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1315 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1316 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1317 else { //compare expected interrupt information
1318 device_int_st = (hifsdio_isr_status_t *)athif_result_save_t->buf;
1319
1320 if ((device_int_st->UL0_INTR_Status & 0xFFFFFF00) != 0) {
1321 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] cs err intr cmp fail, is_bd=%d, cs_len=%d ,q_num=%d, ul_int=%x !\n"
1322 ,__FUNCTION__,__LINE__,is_bd, cs_len, q_num, device_int_st->UL0_INTR_Status));
1323 ret = RET_FAIL;
1324 return ret;
1325 } else {
1326 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] cs err intr cmp success, is_bd=%d, cs_len=%d ,q_num=%d !\n"
1327 ,__FUNCTION__,__LINE__,is_bd, cs_len, q_num));
1328 }
1329 }
1330
1331 ret = sdio_send_pkt(q_num, 60 , q_num, 0); //set small than smallest MPS
1332 if (ret != RET_SUCCESS) {
1333 return ret;
1334 }
1335
1336 /*get qmu interrupt info and expect no error interrupt*/
1337 cmd.cmd = SDIO_AT_READ_INT_STATUS;
1338 cmd.buf[0] = 0;
1339 cmd.len = 1;
1340 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1341 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1342 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1343 else { //compare expected interrupt information
1344 device_int_st = (hifsdio_isr_status_t *)athif_result_save_t->buf;
1345
1346 /*the rx/rx err interrupt info is bit-map*/
1347 if ((device_int_st->UL0_INTR_Status & 0xFFFFFF00) != ORG_SDIO_TXQ_CHKSUM_ERR(q_num)){
1348 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] cs err intr cmp fail, is_bd=%d, cs_len=%d ,q_num=%d, ul_int=%x !\n"
1349 ,__FUNCTION__,__LINE__,is_bd, cs_len, q_num, device_int_st->UL0_INTR_Status));
1350 ret = RET_FAIL;
1351 return ret;
1352 } else {
1353 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] cs err intr cmp success, is_bd=%d, cs_len=%d ,q_num=%d !\n"
1354 ,__FUNCTION__,__LINE__,is_bd, cs_len, q_num));
1355 }
1356 }
1357
1358 /*
1359 cmd.cmd = ATHIF_CMD_GET_LOCAL_RGPD_RSLT;
1360 *(unsigned int*)cmd.buf = q_num;
1361 cmd.len = 4;
1362 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1363 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1364 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1365 else { //compare expected interrupt information
1366 p_rgpd_rslt = (athif_local_rgpd_rslt_t *)athif_result_save_t->buf;
1367 //don't care fail count, because some GPD HWO=1
1368 if ((p_rgpd_rslt->correct_cnt != cs_err_position) || (p_rgpd_rslt->free_cnt != expect_free_cnt)) {
1369 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] q_num=%d, local_rgpd_result fail !\n",__FUNCTION__,__LINE__, q_num));
1370 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] fail_cnt=%d, correct_cnt=%d, free_cnt=%d !\n",
1371 __FUNCTION__,__LINE__, p_rgpd_rslt->fail_cnt, p_rgpd_rslt->correct_cnt, p_rgpd_rslt->free_cnt));
1372 ret = RET_FAIL;
1373 return ret;
1374 }
1375 }*/
1376
1377 cmd.cmd = SDIO_AT_RESET_UL_QUEUE;
1378 cmd.buf[0] = 0xFF;
1379 cmd.len = 1;
1380 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1381 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1382 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1383
1384 /*resume reload rgpd flow*/
1385 cmd.cmd = ATHIF_CMD_PAUSE_RESUME_DATAFLOW;
1386 cmd.buf[0] = 0; // 1 : pause , 0 : resume
1387 cmd.len = 1;
1388 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1389 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1390 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1391
1392 }
1393
1394 cmd.cmd = SDIO_AT_DL_INT_TEST_SWITCH;
1395 cmd.buf[0] = 0; // 1 : test mode , 0 : normal mode
1396 cmd.len = 1;
1397 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1398 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1399 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1400
1401 /*restore original checksum enable configure*/
1402
1403 p_mem_rw_cfg = cmd.buf;
1404 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1405 p_mem_rw_cfg->len = 4;
1406 *(unsigned int*)p_mem_rw_cfg->mem_val = orig_HWFCR;
1407 cmd.len = sizeof(athif_mem_tst_cfg_t)+4;
1408 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_WRITE_MEM, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1409 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1410 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1411
1412
1413 return ret;
1414 }
1415
1416
1417 int f_dl_cs_err_tst(unsigned int cs_len, unsigned int is_bd)
1418 {
1419 unsigned int ret = RET_SUCCESS;
1420 unsigned int org_cs_len = 0, q_num = 0, valid_pkt_cnt = 0, expect_free_cnt = 0,idx = 0;
1421 unsigned int cs_err_position = 0, expected_tx_err_intr = 0;
1422 athif_mem_tst_cfg_t *p_mem_rw_cfg;
1423 unsigned int is_cs16 = 0, orig_HWFCR = 0, new_HWFCR = 0;
1424 athif_gpd_cs_cfg_t *p_tgpd_cfg;
1425 athif_local_tgpd_rslt_t *p_tgpd_rslt;
1426 int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
1427 athif_cmd_t cmd;
1428 athif_status_t status;
1429 hifsdio_isr_status_t *device_int_st;
1430 struct sk_buff *result_ptr = NULL;
1431
1432
1433
1434 p_mem_rw_cfg = cmd.buf;
1435 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1436 p_mem_rw_cfg->len = 4;
1437 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_READ_MEM, (char *)p_mem_rw_cfg, sizeof(athif_mem_tst_cfg_t), dev_test_athif_cmd_t);
1438 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1439 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1440 orig_HWFCR = (*(unsigned int *)athif_result_save_t->buf);
1441
1442 /*configure checksum mode and chechsum enable disable*/
1443 new_HWFCR = orig_HWFCR | ORG_SDIO_TRX_DESC_CHKSUM_EN;
1444 if (cs_len == 16) {
1445 is_cs16 = 1;
1446 new_HWFCR = new_HWFCR & (~ORG_SDIO_TRX_DESC_CHKSUM_12);
1447 } else {
1448 is_cs16 = 0;
1449 new_HWFCR = new_HWFCR | ORG_SDIO_TRX_DESC_CHKSUM_12;
1450 }
1451
1452 /*set new checksum enable configure*/
1453 p_mem_rw_cfg = cmd.buf;
1454 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1455 p_mem_rw_cfg->len = 4;
1456 *(unsigned int*)p_mem_rw_cfg->mem_val = new_HWFCR;
1457 cmd.len = sizeof(athif_mem_tst_cfg_t)+4;
1458 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_WRITE_MEM, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1459 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1460 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1461
1462
1463
1464 for (q_num = 0 ; q_num < HIF_MAX_DLQ_NUM ; q_num ++) {
1465
1466 /*pause reload rgpd flow*/
1467 cmd.cmd = ATHIF_CMD_PAUSE_RESUME_DATAFLOW;
1468 cmd.buf[0] = 1; // 1 : pause , 0 : resume
1469 cmd.len = 1;
1470 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1471 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1472 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1473
1474 /*clear qmu interrupt info*/
1475 cmd.cmd = SDIO_AT_DL_INT_TEST_SWITCH;
1476 cmd.buf[0] = 1; // 1 : test mode , 0 : normal mode
1477 cmd.len = 1;
1478 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1479 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1480 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1481
1482 p_tgpd_cfg = (athif_gpd_cs_cfg_t *)cmd.buf;
1483 memset(p_tgpd_cfg , 0 , sizeof(athif_gpd_cs_cfg_t));
1484 valid_pkt_cnt = 10;
1485 cs_err_position = 5; //means 10GPD correct
1486
1487 /*prepare 20 RGPD/BD with error checksum, include bypass RGPD*/
1488 p_tgpd_cfg->gpd_num = valid_pkt_cnt + 1; //plus 1 for the hwo=0 gpd
1489 expect_free_cnt = p_tgpd_cfg->gpd_num;
1490 p_tgpd_cfg->q_num = q_num;
1491
1492 for(idx = 0 ; idx < p_tgpd_cfg->gpd_num ; idx ++) {
1493 if (is_cs16) {
1494 p_tgpd_cfg->ioc_bps_valid[idx] = (1<<2);
1495 } else {
1496 p_tgpd_cfg->ioc_bps_valid[idx] = 0;
1497 }
1498 }
1499 /*set error checksum GPD*/
1500 p_tgpd_cfg->ioc_bps_valid[cs_err_position] |= (is_bd ? (1<<4) : (1<<3));
1501
1502 recv_th_rslt = RET_SUCCESS;
1503 recv_total_pkt_cnt = 0;
1504 sdio_test_option.auto_receive_pkt = true;
1505
1506 cmd.cmd = ATHIF_CMD_PREPARE_CS_TST_TGPD;
1507 cmd.len = sizeof(athif_gpd_cs_cfg_t) + p_tgpd_cfg->gpd_num;
1508
1509 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1510 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1511 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1512
1513
1514 /*wait expected received pkt count*/
1515 if (ret = f_wait_recv_pkt_cnt(cs_err_position , 100000) == RET_FAIL) {
1516 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] wait recv pkt cnt timeout, expect %d pkts !\n",__FUNCTION__,__LINE__, valid_pkt_cnt));
1517 return ret;
1518 } else {
1519 if (recv_th_rslt != RET_SUCCESS) {
1520 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] recv packet payload compare fail !\n",__FUNCTION__,__LINE__));
1521 return RET_FAIL;
1522 }
1523 }
1524 /*
1525 cmd.cmd = ATHIF_CMD_GET_LOCAL_TGPD_RSLT;
1526 *(unsigned int*)cmd.buf = q_num;
1527 cmd.len = 4;
1528 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1529 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1530 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1531 else { //compare expected interrupt information
1532 p_tgpd_rslt = athif_result_save_t->buf;
1533 if ((p_tgpd_rslt->sent_cnt != cs_err_position) || (p_tgpd_rslt->free_cnt != expect_free_cnt)) {
1534 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] q_num=%d, local_rgpd_result fail !\n",__FUNCTION__,__LINE__, q_num));
1535 ret = RET_FAIL;
1536 return ret;
1537 }
1538 }
1539 */
1540 cmd.cmd = SDIO_AT_READ_INT_STATUS;
1541 cmd.buf[0] = 0;
1542 cmd.len = 1;
1543 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1544 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1545 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1546 else { //compare expected interrupt information
1547 device_int_st = (hifsdio_isr_status_t *)athif_result_save_t->buf;
1548
1549 if (device_int_st->DL0_INTR_Status & 0x00FF0000 != ORG_SDIO_RXQ_CHKSUM_ERR(q_num)) {
1550 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] checksum test fail, cs_len=%d, is_bd=%d ,q_num=%d ,dl0_int=%x !\n"
1551 ,__FUNCTION__,__LINE__, cs_len, is_bd, q_num, device_int_st->DL0_INTR_Status));
1552 ret = RET_FAIL;
1553 return ret;
1554 } else {
1555 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s:%d] checksum test success, cs_len=%d, is_bd=%d ,q_num=%d !\n"
1556 ,__FUNCTION__,__LINE__, cs_len, is_bd, q_num));
1557 }
1558 }
1559 cmd.cmd = SDIO_AT_RESET_DL_QUEUE;
1560 cmd.buf[0] = 0xCE; //checksum error test, directly stop DL queue
1561 cmd.buf[1] = q_num;
1562 cmd.len = 2;
1563 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1564 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1565 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1566
1567 sdio_test_option.auto_receive_pkt = false;
1568
1569 /*resume reload rgpd flow*/
1570 cmd.cmd = ATHIF_CMD_PAUSE_RESUME_DATAFLOW;
1571 cmd.buf[0] = 0; // 1 : pause , 0 : resume
1572 cmd.len = 1;
1573 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1574 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1575 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1576
1577
1578
1579 result_ptr = mtlte_df_DL_read_skb_from_swq(q_num);
1580 while(result_ptr != NULL){
1581 dev_kfree_skb(result_ptr);
1582 result_ptr = mtlte_df_DL_read_skb_from_swq(q_num);
1583 }
1584
1585 }
1586
1587
1588
1589 cmd.cmd = SDIO_AT_DL_INT_TEST_SWITCH;
1590 cmd.buf[0] = 0; // 1 : test mode , 0 : normal mode
1591 cmd.len = 1;
1592 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, cmd.cmd, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1593 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1594 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1595
1596 /*restore original checksum enable configure*/
1597
1598 p_mem_rw_cfg = cmd.buf;
1599 p_mem_rw_cfg->mem_addr = (unsigned int)ORG_SDIO_HWFCR;
1600 p_mem_rw_cfg->len = 4;
1601 *(unsigned int*)p_mem_rw_cfg->mem_val = orig_HWFCR;
1602 cmd.len = sizeof(athif_mem_tst_cfg_t)+4;
1603 mtlte_dev_test_config_atcmd(ATHIF_CMD_SET_SIG, ATHIF_CMD_WRITE_MEM, cmd.buf, cmd.len, dev_test_athif_cmd_t);
1604 mtlte_dev_test_sent_atcmd(dev_test_athif_cmd_t);
1605 if(RET_FAIL == mtlte_dev_test_check_cmd_ack(athif_result_save_t, WAIT_TIMEOUT) ){return RET_FAIL;}
1606
1607
1608 return ret;
1609 }
1610
1611
1612 #define TEST_ALIGN_TO_DWORD(_value) (((_value) + 0x3) & ~0x3)
1613 #define test2_rx_tail_len (4+4+4+2+2+2+2+2*(test_rx_pkt_cnt_q0+test_rx_pkt_cnt_q1+test_rx_pkt_cnt_q2+test_rx_pkt_cnt_q3)+4+4)
1614
1615
1616 sdio_tx_queue_info tx_queue_info_test[HIF_MAX_ULQ_NUM] = {
1617 {TXQ_Q0, SDIO_IP_WTDR1},
1618 {TXQ_Q1, SDIO_IP_WTDR1},
1619 {TXQ_Q2, SDIO_IP_WTDR1},
1620 {TXQ_Q3, SDIO_IP_WTDR1},
1621 {TXQ_Q4, SDIO_IP_WTDR1},
1622 {TXQ_Q5, SDIO_IP_WTDR1},
1623 {TXQ_Q6, SDIO_IP_WTDR1},
1624 };
1625
1626 int tx_perf_hw_limit(unsigned int loop, unsigned int offset, unsigned int pkt_md,
1627 unsigned int q_md, unsigned int pkt_len, perf_tst_case_e lb_md)
1628 {
1629 int ret = RET_SUCCESS;
1630 struct timespec start_t , end_t, diff_t;
1631 athif_cmd_t cmd;
1632 athif_status_t status;
1633 unsigned int chk_payload = 0, ep0_tst = 0, i = 0, pkt_no = 0;
1634 int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
1635 unsigned int ep0_delay_cnt = 0 , ep0_delay_th = 100 , rand_num = 0,pktSize = 0, q_random_mod = 0,packetnum=0;
1636 unsigned char tx_ep = 0;
1637 unsigned long long transferdata=0,performance = 0;
1638 unsigned long long diff_ms = 0 ;
1639 unsigned char *buf_pt;
1640 PAT_PKT_HEADER pAtHeader = NULL;
1641 unsigned char rand_seed = 0, bak_seed = 0;
1642 unsigned char cksm = 0;
1643 unsigned int pkt_size_record[HIF_MAX_ULQ_NUM][64];
1644 KAL_UINT32 Tx_avail_GPD[HIF_MAX_ULQ_NUM];
1645 sdio_whisr_enhance *test_whisr;
1646 unsigned int pkt_no_thistime=0, pkt_len_thistime=0;
1647 sdio_tx_sdu_header *tx_header_temp ;
1648 unsigned int timeout;
1649
1650
1651
1652 recv_th_rslt = RET_SUCCESS;
1653 recv_total_pkt_cnt = 0;
1654 recv_total_pkt_cnt_agg = 0;
1655
1656 KAL_ALLOCATE_PHYSICAL_MEM(buff_kmemory_hwlimit, 458752);
1657
1658 KAL_ZERO_MEM(buff_kmemory_hwlimit, 458752) ;
1659 buf_pt = buff_kmemory_hwlimit;
1660
1661 // pre-fill the packet content for UL HW limit speed test.
1662 for(tx_ep=0; tx_ep<HIF_MAX_ULQ_NUM; tx_ep++){
1663 buf_pt = buff_kmemory_hwlimit + (tx_ep*65536) ;
1664
1665 for(pkt_no=0; pkt_no<30; pkt_no++){
1666 switch (pkt_md) {
1667 case 0 : //random pktSize = random(2048)
1668 get_random_bytes(&rand_num, sizeof(rand_num));
1669 pktSize =1 + rand_num %MAX_UL_PKT_SIZE;
1670 break;
1671 case 1 : //random pktSize = random(pkt_len)
1672 get_random_bytes(&rand_num, sizeof(rand_num));
1673 pktSize =1 + rand_num%pkt_len;
1674 break;
1675 case 2 : //pkt_len specific
1676 pktSize =pkt_len;
1677 break;
1678 default :
1679 get_random_bytes(&rand_num, sizeof(rand_num));
1680 pktSize =1 + rand_num %MAX_UL_PKT_SIZE;
1681 break;
1682 }
1683
1684 if (pktSize < sizeof(AT_PKT_HEADER)){
1685 //pktSize = sizeof(AT_PKT_HEADER);
1686 pktSize = 20;
1687 }
1688
1689 if (pktSize > MAX_UL_PKT_SIZE) {
1690 pktSize = MAX_UL_PKT_SIZE;
1691 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] pktSize error len=%d \n", __FUNCTION__,pktSize));
1692 }
1693 if (pktSize == 0) {
1694 pktSize = 100;
1695 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] pktSize error len=%d \n", __FUNCTION__,pktSize));
1696 }
1697
1698 tx_header_temp = (sdio_tx_sdu_header *)buf_pt;
1699 tx_header_temp->u.bits.length = pktSize+MT_LTE_TX_HEADER_LENGTH;
1700 tx_header_temp->u.bits.tx_type = tx_ep ;
1701 buf_pt = buf_pt + MT_LTE_TX_HEADER_LENGTH;
1702
1703
1704 switch (send_pattern) {
1705 case ATCASE_LB_DATA_5A :
1706 memset(buf_pt, 0x5a , pktSize);
1707 break;
1708 case ATCASE_LB_DATA_A5:
1709 memset(buf_pt, 0xa5 , pktSize);
1710 break;
1711 case ATCASE_LB_DATA_INC:
1712 get_random_bytes(&rand_seed , 1);
1713 for (i = 0 ; i < pktSize ; i++) {
1714 buf_pt[i] = rand_seed++;
1715 }
1716 break;
1717
1718 case ATCASE_LB_DATA_AUTO :
1719 default:
1720 // fill packet payload
1721 pAtHeader = (PAT_PKT_HEADER)buf_pt;
1722 memset(pAtHeader, 0 , sizeof(AT_PKT_HEADER));
1723
1724 get_random_bytes(&rand_seed , 1);
1725 bak_seed = rand_seed;
1726 KAL_DBGPRINT(KAL, DBG_TRACE,("rand_seed = %d..\n", rand_seed));
1727 pAtHeader->RndSeed = rand_seed;
1728 pAtHeader->SrcQID = 0xf;
1729 pAtHeader->DstQID = 0xf;
1730 pAtHeader->SeqNo = 0;
1731
1732 pAtHeader->PktLen = pktSize;
1733
1734 f_calc_cs_byte(pAtHeader, sizeof(AT_PKT_HEADER), &cksm);
1735 pAtHeader->Checksum = ~cksm;
1736
1737 // fill payload, don't fill memory lenght larger than URB buffer
1738 for (i = 0 ; i < (pktSize - sizeof(AT_PKT_HEADER)) ; i ++) {
1739 pAtHeader->Data[i] = rand_seed++;
1740 }
1741 break;
1742
1743 }
1744
1745 buf_pt = (unsigned char *)( (unsigned int)buf_pt + TEST_ALIGN_TO_DWORD(pktSize) );
1746 pkt_size_record[tx_ep][pkt_no] = pktSize;
1747 }
1748 }
1749
1750
1751 sdio_test_option.auto_receive_pkt = true;
1752 for (i = 0 ; i<HIF_MAX_ULQ_NUM ; i++) {
1753 Tx_avail_GPD[i] = 0;
1754 }
1755
1756 at_mtlte_hif_sdio_get_tx_count((KAL_UINT32 *)Tx_avail_GPD);
1757
1758 KAL_DBGPRINT(KAL, DBG_ERROR,("[TEST] start transfer packet !! \r\n")) ;
1759
1760
1761 // start transfer Tx packet.
1762 while (loop) {
1763 if(packetnum == 0){
1764 jiffies_to_timespec(jiffies , &start_t);
1765 }
1766
1767 switch (q_md) {
1768 case 0 : //all out ep random
1769 q_random_mod = HIF_MAX_ULQ_NUM;
1770 break;
1771 case 1 : //random queue 0~2
1772 q_random_mod = 3;
1773 break;
1774 case 2 : //random queue 0~1
1775 q_random_mod = 2;
1776 break;
1777 case 3 : //random queue 0
1778 q_random_mod = 1;
1779 break;
1780 default :
1781 q_random_mod = HIF_MAX_ULQ_NUM;
1782 break;
1783 }
1784 get_random_bytes(&rand_num, sizeof(rand_num));
1785 tx_ep = rand_num % q_random_mod;
1786
1787 //KAL_DBGPRINT(KAL, DBG_ERROR,("[TEST] send pkt to txq %d this time !! \r\n", tx_ep)) ;
1788
1789 timeout = 0;
1790 while(Tx_avail_GPD[tx_ep] < 30){
1791
1792 for(i=0; i<HIF_MAX_ULQ_NUM; i++){
1793 if(Tx_avail_GPD[i] > 30){
1794 tx_ep = i;
1795 break;
1796 }
1797 }
1798 if(i!=HIF_MAX_ULQ_NUM) {break;}
1799
1800 if(1 == test_rx_tail_change){
1801 sdio_func1_rd(SDIO_IP_WHISR, buff_kmemory_ulpkt_data, test2_rx_tail_len) ;
1802 }else{
1803 sdio_func1_rd(SDIO_IP_WHISR, buff_kmemory_ulpkt_data, MT_LTE_RX_TAILOR_LENGTH) ;
1804 }
1805
1806 test_whisr = (sdio_whisr_enhance *)buff_kmemory_ulpkt_data;
1807
1808 Tx_avail_GPD[0] += test_whisr->whtsr0.u.bits.tq0_cnt;
1809 Tx_avail_GPD[1] += test_whisr->whtsr0.u.bits.tq1_cnt ;
1810 Tx_avail_GPD[2] += test_whisr->whtsr0.u.bits.tq2_cnt ;
1811 Tx_avail_GPD[3] += test_whisr->whtsr0.u.bits.tq3_cnt ;
1812 Tx_avail_GPD[4] += test_whisr->whtsr1.u.bits.tq4_cnt ;
1813 Tx_avail_GPD[5] += test_whisr->whtsr1.u.bits.tq5_cnt ;
1814 Tx_avail_GPD[6] += test_whisr->whtsr1.u.bits.tq6_cnt ;
1815
1816
1817 timeout++;
1818 if (timeout > 100){
1819 KAL_DBGPRINT(KAL, DBG_ERROR,("[TEST] wait tx empty packet timeout !! \r\n")) ;
1820 return RET_FAIL;
1821 }
1822 KAL_SLEEP_MSEC(1) ;
1823 }
1824
1825 pkt_no_thistime = 30;
1826
1827 buf_pt = buff_kmemory_hwlimit;
1828 pkt_len_thistime = 0;
1829 for(i = 0 ; i<pkt_no_thistime ; i++) {
1830
1831 pkt_len_thistime += TEST_ALIGN_TO_DWORD(pkt_size_record[tx_ep][i] + MT_LTE_TX_HEADER_LENGTH);
1832 transferdata += pkt_size_record[tx_ep][i];
1833 }
1834
1835 KAL_ZERO_MEM((buf_pt + pkt_len_thistime),MT_LTE_TX_ZERO_PADDING_LEN) ;
1836
1837 if(sdio_func1_wr(tx_queue_info_test[tx_ep].port_address, (unsigned char *)(buff_kmemory_hwlimit+tx_ep*65536), ALIGN_TO_BLOCK_SIZE(pkt_len_thistime))){
1838 KAL_DBGPRINT(KAL, DBG_ERROR,("[ERR] Tx transfer fail in tx_hw_limit test !! \r\n")) ;
1839 return RET_FAIL;
1840 }
1841
1842 Tx_avail_GPD[tx_ep] -= pkt_no_thistime;
1843
1844 if (recv_th_rslt != RET_SUCCESS) {
1845 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
1846 ret = RET_FAIL;
1847 break;
1848 }
1849 packetnum += pkt_no_thistime;
1850
1851 if (packetnum > 100000) {
1852
1853 /*transfer done without error, calc performance*/
1854 jiffies_to_timespec(jiffies , &end_t);
1855 diff_t = time_diff(start_t, end_t);
1856 diff_ms = (1000 * diff_t.tv_sec) ;
1857 diff_ms += (diff_t.tv_nsec / 1000000);
1858 performance = ((unsigned int)transferdata / (unsigned int)diff_ms);
1859
1860 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] performance = %d KBPS\n", __FUNCTION__, performance ));
1861 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] transfered data=%u\n", __FUNCTION__, transferdata));
1862 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] diff_ms=%u\n", __FUNCTION__, diff_ms));
1863
1864 recv_total_pkt_cnt = 0;
1865 recv_total_pkt_cnt_agg = 0;
1866 recv_th_rslt = RET_SUCCESS;
1867 packetnum = 0;
1868 transferdata = 0;
1869
1870 loop --;
1871 }
1872 if (recv_th_rslt != RET_SUCCESS) {
1873 ret = RET_FAIL;
1874 break;
1875 }
1876
1877 }
1878
1879 KAL_FREE_PHYSICAL_MEM(buff_kmemory_hwlimit);
1880
1881 sdio_test_option.auto_receive_pkt = false;
1882 return ret;
1883 }
1884
1885
1886 int f_brom_pkt_lb(lb_data_pattern_e pattern, unsigned int min_size, unsigned int max_size)
1887 {
1888 int ret = RET_SUCCESS;
1889 struct timespec start_t , end_t, diff_t;
1890 athif_cmd_t cmd;
1891 athif_status_t status;
1892 unsigned int i = 0 ;
1893 int send_err_timeout = SEND_ERR_TIMEOUT, send_err_retry = SEND_ERR_RETRY;
1894 unsigned int rand_num = 0,pktSize = 0, q_random_mod = 0,packetnum=0;
1895 unsigned char que_no = 0;
1896 lb_data_pattern_e org_send_pattern = 0, org_cmp_pattern = 0;
1897
1898 recv_th_rslt = RET_SUCCESS;
1899 recv_total_pkt_cnt = 0;
1900
1901 /*backup pattern mode*/
1902 org_send_pattern = send_pattern;
1903 org_cmp_pattern = cmp_pattern;
1904 send_pattern = pattern;
1905 cmp_pattern = pattern;
1906
1907 que_no = 0;
1908
1909 for (pktSize = min_size ; pktSize < max_size ; pktSize ++) {
1910
1911 ret = sdio_send_pkt(que_no, pktSize , que_no, 0);
1912
1913 if (ret != RET_SUCCESS) {
1914 break;
1915 }
1916 if (recv_th_rslt != RET_SUCCESS) {
1917 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
1918 ret = RET_FAIL;
1919 break;
1920 }
1921 }
1922
1923 if (ret == RET_SUCCESS) {
1924 /*wait loopback data*/
1925 ret = f_wait_recv_pkt_cnt(max_size-min_size , 10000);
1926 if (ret != RET_SUCCESS) {
1927 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] f_wait_recv_pkt_cnt timeout\n", __FUNCTION__));
1928 }
1929 if (recv_th_rslt != RET_SUCCESS) {
1930 KAL_DBGPRINT(KAL, DBG_ERROR, ("[%s] recv thread report fail\n", __FUNCTION__));
1931 ret = RET_FAIL;
1932 }
1933 recv_th_rslt = RET_SUCCESS;
1934 recv_total_pkt_cnt = 0;
1935 }
1936
1937
1938 /*restore pattern mode*/
1939 send_pattern = org_send_pattern;
1940 cmp_pattern = org_cmp_pattern;
1941
1942 return ret;
1943 }
1944
1945