import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / dual_ccci / ccmni_net.c
1 /*****************************************************************************
2 *
3 * Filename:
4 * ---------
5 * ccmni.c
6 *
7 * Project:
8 * --------
9 * YuSu
10 *
11 * Description:
12 * ------------
13 * MT6516 Cross Chip Modem Network Interface
14 *
15 * Author:
16 * -------
17 * TL Lau (mtk02008)
18 *
19 ****************************************************************************/
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/bitops.h>
28 #include <linux/wakelock.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/wait.h>
33 #include <linux/dma-mapping.h>
34 #include <asm/dma-mapping.h>
35 #include <asm/bitops.h>
36 #include <linux/timer.h>
37 #include <mach/mt_typedefs.h>
38 #include <ccmni_pfp.h>
39 #include <ccci_tty.h>
40 #include <ccci.h>
41
42 #define CCMNI_TX_QUEUE 8
43 #define CCMNI_UART_OFFSET 2
44
45 extern void *ccmni_ctl_block[];
46
47 typedef struct
48 {
49 int channel;
50 int m_md_id;
51 int uart_rx;
52 int uart_rx_ack;
53 int uart_tx;
54 int uart_tx_ack;
55 int ready;
56 int net_if_off;
57 unsigned long flags;
58 struct timer_list timer;
59 unsigned long send_len;
60 struct net_device *dev;
61 struct wake_lock wake_lock;
62 spinlock_t spinlock;
63
64 shared_mem_tty_t *shared_mem;
65 int shared_mem_phys_addr;
66
67 unsigned char write_buffer [CCMNI_MTU + 4];
68 unsigned char read_buffer [CCCI1_CCMNI_BUF_SIZE];
69 unsigned char decode_buffer[CCCI1_CCMNI_BUF_SIZE];
70
71 unsigned char mac_addr [ETH_ALEN];
72
73 struct tasklet_struct tasklet;
74 void *owner;
75
76 } ccmni_instance_t;
77
78 typedef struct _ccmni_v1_ctl_block
79 {
80 int m_md_id;
81 int ccci_is_ready;
82 ccmni_instance_t *ccmni_instance[CCMNI_V1_PORT_NUM];
83 struct wake_lock ccmni_wake_lock;
84 char wakelock_name[16];
85 MD_CALL_BACK_QUEUE ccmni_notifier;
86 }ccmni_v1_ctl_block_t;
87
88 static void ccmni_read (unsigned long arg);
89 //static DECLARE_TASKLET (ccmni_read_tasklet, ccmni_read, 0);
90
91 static void reset_ccmni_instance_buffer(ccmni_instance_t *ccmni_instance)
92 {
93 unsigned long flags;
94 spin_lock_irqsave(&ccmni_instance->spinlock, flags);
95 ccci_reset_buffers(ccmni_instance->shared_mem, CCCI1_CCMNI_BUF_SIZE);
96 spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
97 }
98
99 int ccmni_v1_ipo_h_restore(int md_id)
100 {
101 int i;
102 ccmni_v1_ctl_block_t *ctlb;
103
104 ctlb = ccmni_ctl_block[md_id];
105 for(i=0; i<CCMNI_V1_PORT_NUM; i++)
106 ccci_reset_buffers(ctlb->ccmni_instance[i]->shared_mem, CCCI1_CCMNI_BUF_SIZE);
107
108 return 0;
109 }
110
111 static void restore_ccmni_instance(ccmni_instance_t *ccmni_instance)
112 {
113 unsigned long flags;
114 spin_lock_irqsave(&ccmni_instance->spinlock, flags);
115 if(ccmni_instance->net_if_off) {
116 ccmni_instance->net_if_off = 0;
117 netif_carrier_on(ccmni_instance->dev);
118 }
119 spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
120 }
121
122 static void stop_ccmni_instance(ccmni_instance_t *ccmni_instance)
123 {
124 unsigned long flags;
125 spin_lock_irqsave(&ccmni_instance->spinlock, flags);
126 if(ccmni_instance->net_if_off == 0) {
127 ccmni_instance->net_if_off = 1;
128 del_timer(&ccmni_instance->timer);
129 netif_carrier_off(ccmni_instance->dev);
130 }
131 spin_unlock_irqrestore(&ccmni_instance->spinlock, flags);
132 }
133
134
135 static void ccmni_notifier_call(MD_CALL_BACK_QUEUE *notifier, unsigned long val)
136 {
137 int i;
138 ccmni_v1_ctl_block_t *ctl_b = container_of(notifier, ccmni_v1_ctl_block_t,
139 ccmni_notifier);
140 ccmni_instance_t *instance;
141
142 switch(val)
143 {
144 case CCCI_MD_EXCEPTION :
145 ctl_b->ccci_is_ready=0;
146 for(i=0;i<CCMNI_V1_PORT_NUM;i++)
147 {
148 instance = ctl_b->ccmni_instance[i];
149 if (instance)
150 stop_ccmni_instance(instance);
151 }
152 break;
153 case CCCI_MD_STOP:
154 for(i=0;i<CCMNI_V1_PORT_NUM;i++)
155 {
156 instance = ctl_b->ccmni_instance[i];
157 if (instance) {
158 stop_ccmni_instance(instance);
159 }
160 }
161 break;
162 case CCCI_MD_RESET :
163 ctl_b->ccci_is_ready=0;
164 for(i=0;i<CCMNI_V1_PORT_NUM;i++)
165 {
166 instance = ctl_b->ccmni_instance[i];
167 if (instance) {
168 reset_ccmni_instance_buffer(instance);
169 }
170 }
171 break;
172
173 case CCCI_MD_BOOTUP:
174 if (ctl_b->ccci_is_ready==0)
175 {
176 ctl_b->ccci_is_ready=1;
177 for(i=0;i<CCMNI_V1_PORT_NUM;i++)
178 {
179 instance = ctl_b->ccmni_instance[i];
180 if (instance)
181 restore_ccmni_instance(instance);
182 }
183 }
184 break;
185
186 default:
187 break;
188 }
189
190 return ;
191 }
192
193
194 static void timer_func(unsigned long data)
195 {
196 ccmni_instance_t *ccmni=(ccmni_instance_t *)data;
197 int contin=0;
198 int ret=0;
199 ccci_msg_t msg;
200 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
201 int md_id = ctl_b->m_md_id;
202 spin_lock_bh(&ccmni->spinlock);
203
204 if (ctl_b->ccci_is_ready == 0)
205 goto out;
206
207 if (test_bit(CCMNI_RECV_ACK_PENDING,&ccmni->flags))
208 {
209 msg.magic = 0;
210 msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
211 msg.channel = ccmni->uart_rx_ack;
212 msg.reserved = 0;
213 ret = ccci_message_send(md_id, &msg, 1);
214
215 if (ret==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
216 contin=1;
217 else
218 clear_bit(CCMNI_RECV_ACK_PENDING, &ccmni->flags);
219 }
220
221 if (test_bit(CCMNI_SEND_PENDING,&ccmni->flags))
222 {
223 msg.addr = 0;
224 msg.len = ccmni->send_len;
225 msg.channel = ccmni->uart_tx;
226 msg.reserved = 0;
227 ret = ccci_message_send(md_id, &msg, 1);
228
229 if (ret==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
230 contin=1;
231 else {
232 clear_bit(CCMNI_SEND_PENDING,&ccmni->flags);
233 ccmni->send_len=0;
234 }
235 }
236 out:
237 spin_unlock_bh(&ccmni->spinlock);
238 if (contin)
239 mod_timer(&ccmni->timer,jiffies+2);
240
241 return;
242
243 }
244
245 static void ccmni_make_etherframe(void *_eth_hdr, u8 *mac_addr, int packet_type)
246 {
247 struct ethhdr *eth_hdr = _eth_hdr;
248
249 memcpy(eth_hdr->h_dest, mac_addr, sizeof(eth_hdr->h_dest));
250 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_source));
251 if(packet_type == IPV6_VERSION){
252 eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IPV6);
253 }else{
254 eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP);
255 }
256 }
257
258 static int ccmni_receive(ccmni_instance_t *ccmni, int length)
259 {
260 int counter, ret;
261 packet_info_t packet_info;
262 complete_ippkt_t *packet;
263 complete_ippkt_t *processed_packet;
264 struct sk_buff *skb;
265 complete_ippkt_t last_packet = {0};
266 int offset_put_pkt = 0;
267 int offset_parse_frame = 0;
268 int packet_type;
269 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
270 int md_id = ctl_b->m_md_id;
271
272 CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive() invoke pfp_unframe()\n", ccmni->channel);
273 do
274 {
275 packet_info = pfp_unframe(ccmni->decode_buffer+offset_put_pkt, \
276 CCCI1_CCMNI_BUF_SIZE-offset_put_pkt, ccmni->read_buffer+offset_parse_frame, \
277 length, ccmni->channel);
278 packet = packet_info.pkt_list;
279
280 CCCI_CCMNI_MSG(md_id, "CCMNI%d num_complete_pkt=%d after pfp_unframe \n", \
281 ccmni->channel, packet_info.num_complete_packets);
282
283 for(counter = 0; counter < packet_info.num_complete_packets; counter++)
284 {
285 skb = dev_alloc_skb(packet->pkt_size);
286 if (skb)
287 {
288 packet_type = packet->pkt_data[0] & 0xF0;
289 memcpy(skb_put(skb, packet->pkt_size), packet->pkt_data, packet->pkt_size);
290 ccmni_make_etherframe(skb->data - ETH_HLEN, ccmni->dev->dev_addr, packet_type);
291 skb_set_mac_header(skb, -ETH_HLEN);
292 skb->dev = ccmni->dev;
293 if(packet_type == IPV6_VERSION){
294 skb->protocol = htons(ETH_P_IPV6);
295 }
296 else {
297 skb->protocol = htons(ETH_P_IP);
298 }
299 skb->ip_summed = CHECKSUM_NONE;
300
301 ret = netif_rx(skb);
302
303 CCCI_CCMNI_MSG(md_id, "CCMNI%d invoke netif_rx()=%d\n", ccmni->channel, ret);
304 ccmni->dev->stats.rx_packets++;
305 ccmni->dev->stats.rx_bytes += packet->pkt_size;
306 CCCI_CCMNI_MSG(md_id, "CCMNI%d rx_pkts=%ld, stats_rx_bytes=%ld\n", ccmni->channel, \
307 ccmni->dev->stats.rx_packets, ccmni->dev->stats.rx_bytes);
308 }
309 else
310 {
311 CCCI_DBG_MSG(md_id, "net", "CCMNI%d Socket buffer allocate fail\n", ccmni->channel);
312 }
313
314 processed_packet = packet;
315 last_packet = *processed_packet;
316 packet = packet->next;
317
318 /* Only clear the entry_used flag as 0 */
319 release_one_used_complete_ippkt_entry(processed_packet);
320 };
321
322 /* It must to check if it is necessary to invoke the pfp_unframe() again due to no available complete_ippkt entry */
323 if (packet_info.try_decode_again == 1)
324 {
325 offset_put_pkt += (last_packet.pkt_data - ccmni->decode_buffer + last_packet.pkt_size);
326 offset_parse_frame += packet_info.consumed_length;
327 }
328 } while (packet_info.try_decode_again == 1);
329
330 offset_parse_frame += packet_info.consumed_length;
331 return offset_parse_frame;
332 }
333
334 static void ccmni_read(unsigned long arg)
335 {
336 int part, size;
337 int ret;
338 int read, write, consumed;
339 unsigned char *string;
340 ccmni_instance_t *ccmni = (ccmni_instance_t *) arg;
341 ccci_msg_t msg;
342 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
343 int md_id= ctl_b->m_md_id;
344 char *rx_buffer;
345
346 spin_lock_bh(&ccmni->spinlock);
347 if (ctl_b->ccci_is_ready==0)
348 {
349 CCCI_DBG_MSG(md_id, "net", "CCMNI%d_read fail when modem not ready\n", ccmni->channel);
350 goto out;
351 }
352
353 string = ccmni->read_buffer;
354 read = ccmni->shared_mem->rx_control.read;
355 write = ccmni->shared_mem->rx_control.write;
356 size = write - read;
357 part = 0;
358 rx_buffer = ccmni->shared_mem->buffer;
359
360 if (size < 0)
361 {
362 size += ccmni->shared_mem->rx_control.length;
363 }
364
365 if (read > write)
366 {
367 part = ccmni->shared_mem->rx_control.length - read;
368 memcpy(string, &rx_buffer[read], part);
369
370 size -= part;
371 string += part;
372 read = 0;
373 }
374
375 memcpy(string, &rx_buffer[read], size);
376 CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[Before]: size=%d, read=%d\n", \
377 ccmni->channel, (size+part), read);
378 consumed = ccmni_receive(ccmni, size + part);
379 CCCI_CCMNI_MSG(md_id, "CCMNI%d_receive[After]: consume=%d\n", ccmni->channel, consumed);
380
381 // Calculate the new position of the read pointer.
382 // Take into consideration the number of bytes actually consumed;
383 // i.e. number of bytes taken up by complete IP packets.
384 read += size;
385 if (read >= ccmni->shared_mem->rx_control.length)
386 {
387 read -= ccmni->shared_mem->rx_control.length;
388 }
389
390 if (consumed < (size + part))
391 {
392 read -= ((size + part) - consumed);
393 if (read < 0)
394 {
395 read += ccmni->shared_mem->rx_control.length;
396 }
397 }
398
399 ccmni->shared_mem->rx_control.read = read;
400 // Send an acknowledgement back to modem side.
401 CCCI_CCMNI_MSG(md_id, "CCMNI%d_read to write mailbox(ch%d, tty%d)\n", ccmni->channel,
402 ccmni->uart_rx_ack, CCMNI_CHANNEL_OFFSET + ccmni->channel);
403 //ret = ccci_write_mailbox(ccmni->uart_rx_ack, CCMNI_CHANNEL_OFFSET + ccmni->channel);
404 msg.magic = 0xFFFFFFFF;
405 msg.id = CCMNI_CHANNEL_OFFSET + ccmni->channel;
406 msg.channel = ccmni->uart_rx_ack;
407 msg.reserved = 0;
408 ret = ccci_message_send(md_id, &msg, 1);
409 if (ret==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
410 {
411 set_bit(CCMNI_RECV_ACK_PENDING,&ccmni->flags);
412 mod_timer(&ccmni->timer,jiffies);
413 }
414 else if (ret==sizeof(ccci_msg_t))
415 clear_bit(CCMNI_RECV_ACK_PENDING,&ccmni->flags);
416 out:
417 spin_unlock_bh(&ccmni->spinlock);
418
419 CCCI_CCMNI_MSG(md_id, "CCMNI%d_read invoke wake_lock_timeout(1s)\n", ccmni->channel);
420 wake_lock_timeout(&ctl_b->ccmni_wake_lock, HZ);
421
422 return;
423 }
424
425
426 // will be called when modem sends us something.
427 // we will then copy it to the tty's buffer.
428 // this is essentially the "read" fops.
429 static void ccmni_callback(void *private)
430 {
431 logic_channel_info_t *ch_info = (logic_channel_info_t*)private;
432 ccmni_instance_t *ccmni = (ccmni_instance_t *)(ch_info->m_owner);
433 ccci_msg_t msg;
434
435 while(get_logic_ch_data(ch_info, &msg)){
436 switch(msg.channel)
437 {
438 case CCCI_CCMNI1_TX_ACK:
439 case CCCI_CCMNI2_TX_ACK:
440 case CCCI_CCMNI3_TX_ACK:
441 // this should be in an interrupt,
442 // so no locking required...
443 ccmni->ready = 1;
444 netif_wake_queue(ccmni->dev);
445 break;
446
447 case CCCI_CCMNI1_RX:
448 case CCCI_CCMNI2_RX:
449 case CCCI_CCMNI3_RX:
450 //ccmni_read_tasklet2.data = (unsigned long) private_data;
451 //tasklet_schedule(&ccmni_read_tasklet);
452 tasklet_schedule(&ccmni->tasklet);
453 break;
454
455 default:
456 break;
457 }
458 }
459 }
460
461
462 static void ccmni_write(ccmni_instance_t *ccmni, frame_info_t *frame_info)
463 {
464 int size, over, total;
465 int ret;
466 unsigned read, write, length, len;
467 unsigned tmp_write;
468 unsigned char *ptr;
469 ccci_msg_t msg;
470 char *tx_buffer;
471 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
472 int md_id = ctl_b->m_md_id;
473
474 size = 0;
475 ptr = (unsigned char *) frame_info->frame_list[0].frame_data;
476 len = frame_info->frame_list[0].frame_size;
477
478
479 read = ccmni->shared_mem->tx_control.read;
480 write = ccmni->shared_mem->tx_control.write;
481 length = ccmni->shared_mem->tx_control.length;
482 over = length - write;
483 tx_buffer = ccmni->shared_mem->buffer + length;
484
485 if (read == write)
486 {
487 size = length;
488 }
489 else if (read < write)
490 {
491 size = length - write;
492 size += read;
493 }
494 else
495 {
496 size = read - write;
497 }
498
499 if (len > size)
500 {
501 len = size;
502 total = size;
503 }
504
505 total = len;
506
507 if (over < len)
508 {
509 memcpy(&tx_buffer[write], (void *) ptr, over);
510 len -= over;
511 ptr += over;
512 write = 0;
513 }
514
515 memcpy(&tx_buffer[write], (void *) ptr, len);
516 mb();
517 tmp_write = write + len;
518 if (tmp_write >= length)
519 {
520 tmp_write -= length;
521 }
522 ccmni->shared_mem->tx_control.write = tmp_write;
523
524 // ccmni->ready = 0;
525 len = total;
526 msg.addr = 0;
527 msg.len = len;
528 msg.channel = ccmni->uart_tx;
529 msg.reserved = 0;
530
531 ret = ccci_message_send(md_id, &msg, 1);
532 if (ret==-CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL)
533 {
534 set_bit(CCMNI_SEND_PENDING,&ccmni->flags);
535 ccmni->send_len +=len;
536 mod_timer(&ccmni->timer,jiffies);
537 }
538 else if (ret==sizeof(ccci_msg_t))
539 clear_bit(CCMNI_SEND_PENDING,&ccmni->flags);
540
541 return;
542 }
543
544
545 // The function start_xmit is called when there is one packet to transmit.
546 static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
547 {
548 int ret = NETDEV_TX_OK;
549 int size;
550 unsigned int read, write, length;
551 frame_info_t frame_info;
552 ccmni_instance_t *ccmni = netdev_priv(dev);
553 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
554 int md_id = ctl_b->m_md_id;
555
556 spin_lock_bh(&ccmni->spinlock);
557
558 if (ctl_b->ccci_is_ready==0)
559 {
560 CCCI_DBG_MSG(md_id, "net", "CCMNI%d transfer data fail when modem not ready \n", ccmni->channel);
561 ret = NETDEV_TX_BUSY;
562 goto _ccmni_start_xmit_busy;
563 }
564
565 read = ccmni->shared_mem->tx_control.read;
566 write = ccmni->shared_mem->tx_control.write;
567 length = ccmni->shared_mem->tx_control.length;
568 size = read - write;
569
570 CCCI_CCMNI_MSG(md_id, "CCMNI%d_start_xmit: skb_len=%d, size=%d, ccmni_ready=%d \n", \
571 ccmni->channel, skb->len, size, ccmni->ready);
572
573 if (size <= 0)
574 {
575 size += length;
576 }
577
578 if (skb->len > CCMNI_MTU)
579 {
580 // Sanity check; this should not happen!
581 // Digest and return OK.
582 CCCI_DBG_MSG(md_id, "net", "CCMNI%d packet size exceed 1500 bytes: size=%d \n", \
583 ccmni->channel, skb->len);
584 dev->stats.tx_dropped++;
585 goto _ccmni_start_xmit_exit;
586 }
587
588 if(size >= 1)
589 size-=1;
590 else
591 CCCI_DBG_MSG(md_id, "net", "CCMNI%d size is Zero(1) \n", ccmni->channel);
592
593 if (size < (skb->len + 4))
594 {
595 // The TX buffer is full, or its not ready yet,
596 // we should stop the net queue for the moment.
597 CCCI_DBG_MSG(md_id, "net", "CCMNI%d TX busy and stop queue: size=%d, skb->len=%d \n", \
598 ccmni->channel, size, skb->len);
599 CCCI_DBG_MSG(md_id, "net", " TX read = %d write = %d\n", \
600 ccmni->shared_mem->tx_control.read, ccmni->shared_mem->tx_control.write);
601 CCCI_DBG_MSG(md_id, "net", " RX read = %d write = %d\n", \
602 ccmni->shared_mem->rx_control.read, ccmni->shared_mem->rx_control.write);
603
604 netif_stop_queue(ccmni->dev);
605
606 // Set CCMNI ready to ZERO, and wait for the ACK from modem side.
607 ccmni->ready = 0;
608 ret = NETDEV_TX_BUSY;
609
610 goto _ccmni_start_xmit_busy;
611 }
612
613 frame_info = pfp_frame(ccmni->write_buffer, skb->data, skb->len, FRAME_START, ccmni->channel);
614 ccmni_write (ccmni, &frame_info);
615
616 dev->stats.tx_packets++;
617 dev->stats.tx_bytes += skb->len;
618
619 _ccmni_start_xmit_exit:
620 dev_kfree_skb(skb);
621
622 _ccmni_start_xmit_busy:
623 spin_unlock_bh(&ccmni->spinlock);
624
625 return ret;
626 }
627
628
629 static int ccmni_open(struct net_device *dev)
630 {
631 ccmni_instance_t *ccmni = netdev_priv(dev);
632 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
633 int md_id= ctl_b->m_md_id;
634
635 CCCI_MSG_INF(md_id, "net", "CCMNI%d open \n", ccmni->channel);
636 if (ctl_b->ccci_is_ready == 0) {
637 CCCI_MSG_INF(md_id, "net", "CCMNI%d open fail when modem not ready \n", ccmni->channel);
638 return -EIO;
639 }
640 netif_start_queue(dev);
641 return 0;
642 }
643
644 static int ccmni_close(struct net_device *dev)
645 {
646 ccmni_instance_t *ccmni = netdev_priv(dev);
647 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t*)ccmni->owner;
648
649 CCCI_MSG_INF(ctl_b->m_md_id, "net", "CCMNI%d close \n", ccmni->channel);
650 netif_stop_queue(dev);
651 return 0;
652 }
653
654 static int ccmni_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
655 {
656 // No implementation at this moment.
657 // This is a place holder.
658
659 return 0;
660 }
661
662
663 static void ccmni_tx_timeout(struct net_device *dev)
664 {
665 // No implementation at this moment.
666 // This is a place holder.
667
668 dev->stats.tx_errors++;
669 netif_wake_queue(dev);
670 }
671
672
673 static const struct net_device_ops ccmni_netdev_ops =
674 {
675 .ndo_open = ccmni_open,
676 .ndo_stop = ccmni_close,
677 .ndo_start_xmit = ccmni_start_xmit,
678 .ndo_do_ioctl = ccmni_net_ioctl,
679 .ndo_tx_timeout = ccmni_tx_timeout,
680 };
681
682 extern int is_mac_addr_duplicate(char *mac);
683 static void ccmni_setup(struct net_device *dev)
684 {
685 int retry = 10;
686
687 ether_setup(dev);
688
689 dev->header_ops = NULL;
690 dev->netdev_ops = &ccmni_netdev_ops;
691 dev->flags = IFF_NOARP & (~IFF_BROADCAST & ~IFF_MULTICAST);
692 dev->mtu = CCMNI_MTU;
693 dev->tx_queue_len = CCMNI_TX_QUEUE;
694 dev->addr_len = ETH_ALEN;
695 dev->destructor = free_netdev;
696
697 while(retry-->0){
698 random_ether_addr((u8 *) dev->dev_addr);
699 if(is_mac_addr_duplicate((u8*)dev->dev_addr))
700 continue;
701 else
702 break;
703 }
704
705 return;
706 }
707
708 static int ccmni_create_instance(int md_id, int channel)
709 {
710 int ret, size;
711 int uart_rx, uart_rx_ack;
712 int uart_tx, uart_tx_ack;
713 ccmni_instance_t *ccmni;
714 struct net_device *dev = NULL;
715 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t *)ccmni_ctl_block[md_id];
716
717 // Network device creation and registration.
718 dev = alloc_netdev(sizeof(ccmni_instance_t), "", ccmni_setup);
719 if (dev == NULL)
720 {
721 CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate netdev fail!\n", channel);
722 return -ENOMEM;
723 }
724
725 ccmni = netdev_priv(dev);
726 ccmni->dev = dev;
727 ccmni->channel = channel;
728 ccmni->owner = ccmni_ctl_block[md_id];
729
730 if(md_id == MD_SYS1) {
731 sprintf(dev->name, "ccmni%d", channel);
732 } else {
733 sprintf(dev->name, "cc%dmni%d", md_id+1, channel);
734 //sprintf(dev->name, "ccmni%d", channel);
735 }
736
737 ret = register_netdev(dev);
738 if (ret != 0)
739 {
740 CCCI_MSG_INF(md_id, "net", "CCMNI%d register netdev fail: %d\n", ccmni->channel, ret);
741 goto _ccmni_create_instance_exit;
742 }
743
744 // CCCI channel registration.
745 ASSERT(ccci_uart_base_req(md_id, CCMNI_UART_OFFSET + ccmni->channel, (int*)&ccmni->shared_mem,
746 &ccmni->shared_mem_phys_addr, &size) == CCCI_SUCCESS);
747
748 if (ccmni->shared_mem == NULL)
749 {
750 CCCI_MSG_INF(md_id, "net", "CCMNI%d allocate memory fail\n", ccmni->channel);
751 unregister_netdev(dev);
752 ret = -ENOMEM;
753 goto _ccmni_create_instance_exit;
754 }
755
756 CCCI_CCMNI_MSG(md_id, "0x%08X:0x%08X:%d\n", (unsigned int)ccmni->shared_mem, \
757 (unsigned int)ccmni->shared_mem_phys_addr, size);
758
759 ccmni->shared_mem->tx_control.length = CCCI1_CCMNI_BUF_SIZE;
760 ccmni->shared_mem->tx_control.read = 0;
761 ccmni->shared_mem->tx_control.write = 0;
762
763 ccmni->shared_mem->rx_control.length = CCCI1_CCMNI_BUF_SIZE;
764 ccmni->shared_mem->rx_control.read = 0;
765 ccmni->shared_mem->rx_control.write = 0;
766
767 switch(ccmni->channel)
768 {
769 case 0:
770 uart_rx = CCCI_CCMNI1_RX;
771 uart_rx_ack = CCCI_CCMNI1_RX_ACK;
772 uart_tx = CCCI_CCMNI1_TX;
773 uart_tx_ack = CCCI_CCMNI1_TX_ACK;
774 break;
775
776 case 1:
777 uart_rx = CCCI_CCMNI2_RX;
778 uart_rx_ack = CCCI_CCMNI2_RX_ACK;
779 uart_tx = CCCI_CCMNI2_TX;
780 uart_tx_ack = CCCI_CCMNI2_TX_ACK;
781 break;
782
783 case 2:
784 uart_rx = CCCI_CCMNI3_RX;
785 uart_rx_ack = CCCI_CCMNI3_RX_ACK;
786 uart_tx = CCCI_CCMNI3_TX;
787 uart_tx_ack = CCCI_CCMNI3_TX_ACK;
788 break;
789
790 default:
791 CCCI_MSG_INF(md_id, "net", "[Error]CCMNI%d Invalid ccmni number\n", ccmni->channel);
792 unregister_netdev(dev);
793 ret = -ENOSYS;
794 goto _ccmni_create_instance_exit;
795 }
796 ccmni->m_md_id = md_id;
797
798 ccmni->uart_rx = uart_rx;
799 ccmni->uart_rx_ack = uart_rx_ack;
800 ccmni->uart_tx = uart_tx;
801 ccmni->uart_tx_ack = uart_tx_ack;
802
803 // Register this ccmni instance to the ccci driver.
804 // pass it the notification handler.
805 ASSERT(register_to_logic_ch(md_id, uart_rx, ccmni_callback, (void *) ccmni) == 0);
806 ASSERT(register_to_logic_ch(md_id, uart_tx_ack, ccmni_callback, (void *) ccmni) == 0);
807
808 // Initialize the spinlock.
809 spin_lock_init(&ccmni->spinlock);
810 setup_timer(&ccmni->timer,timer_func,(unsigned long)ccmni);
811
812 // Initialize the tasklet.
813 tasklet_init(&ccmni->tasklet, ccmni_read, (unsigned long)ccmni);
814
815 ctl_b->ccmni_instance[channel] = ccmni;
816 ccmni->ready = 1;
817 ccmni->net_if_off = 0;
818 return ret;
819
820 _ccmni_create_instance_exit:
821 free_netdev(dev);
822 kfree(ccmni);
823 ctl_b->ccmni_instance[channel] = NULL;
824 return ret;
825 }
826
827
828 static void ccmni_destroy_instance(int md_id, int channel)
829 {
830 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t *)ccmni_ctl_block[md_id];
831 ccmni_instance_t *ccmni = ctl_b->ccmni_instance[channel];
832
833 if (ccmni != NULL)
834 {
835 ccmni->ready = 0;
836 un_register_to_logic_ch(md_id, ccmni->uart_rx);
837 un_register_to_logic_ch(md_id, ccmni->uart_tx_ack);
838
839 if (ccmni->shared_mem != NULL) {
840 ccmni->shared_mem = NULL;
841 ccmni->shared_mem_phys_addr = 0;
842 }
843
844 if(ccmni->dev != NULL) {
845 unregister_netdev(ccmni->dev);
846 //free_netdev(ccmni->dev);
847 }
848 //tasklet_kill(&ccmni->tasklet);
849 ctl_b->ccmni_instance[channel] = NULL;
850 }
851 }
852
853
854 int ccmni_v1_init(int md_id)
855 {
856 int count, ret, curr;
857 ccmni_v1_ctl_block_t *ctl_b;
858
859 // Create control block structure
860 ctl_b = (ccmni_v1_ctl_block_t *)kmalloc(sizeof(ccmni_v1_ctl_block_t), GFP_KERNEL);
861 if(ctl_b == NULL)
862 return -CCCI_ERR_GET_MEM_FAIL;
863
864 memset(ctl_b, 0, sizeof(ccmni_v1_ctl_block_t));
865 ccmni_ctl_block[md_id] = ctl_b;
866
867 // Init ctl_b
868 ctl_b->m_md_id = md_id;
869 ctl_b->ccmni_notifier.call = ccmni_notifier_call;
870 ctl_b->ccmni_notifier.next = NULL;
871
872 for(count = 0; count < CCMNI_V1_PORT_NUM; count++)
873 {
874 ret = ccmni_create_instance(md_id, count);
875 if (ret != 0) {
876 CCCI_MSG_INF(md_id, "net", "CCMNI%d create instance fail: %d\n", count, ret);
877 goto _CCMNI_INSTANCE_CREATE_FAIL;
878 } else {
879 //CCCI_MSG_INF(md_id, "net", "CCMNI%d create instance ok!\n", count);
880 }
881 }
882
883 ret=md_register_call_chain(md_id ,&ctl_b->ccmni_notifier);
884 if(ret) {
885 CCCI_MSG_INF(md_id, "net", "md_register_call_chain fail: %d\n", ret);
886 goto _CCMNI_INSTANCE_CREATE_FAIL;
887 }
888
889 snprintf(ctl_b->wakelock_name, sizeof(ctl_b->wakelock_name), "ccci%d_net_v1", (md_id+1));
890 wake_lock_init(&ctl_b->ccmni_wake_lock, WAKE_LOCK_SUSPEND, ctl_b->wakelock_name);
891
892 return ret;
893
894 _CCMNI_INSTANCE_CREATE_FAIL:
895 for(curr=0; curr<=count; curr++) {
896 ccmni_destroy_instance(md_id, curr);
897 }
898 kfree(ctl_b);
899 ccmni_ctl_block[md_id] = NULL;
900 return ret;
901 }
902
903
904 void ccmni_v1_exit(int md_id)
905 {
906 int count;
907 ccmni_v1_ctl_block_t *ctl_b = (ccmni_v1_ctl_block_t *)ccmni_ctl_block[md_id];
908
909 if (ctl_b) {
910 for(count = 0; count < CCMNI_V1_PORT_NUM; count++)
911 ccmni_destroy_instance(md_id, count);
912
913 md_unregister_call_chain(md_id, &ctl_b->ccmni_notifier);
914 wake_lock_destroy(&ctl_b->ccmni_wake_lock);
915 kfree(ctl_b);
916 ccmni_ctl_block[md_id] = NULL;
917 }
918
919 return;
920 }
921
922