import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / eccci / ccci_core.h
1 #ifndef __CCCI_CORE_H__
2 #define __CCCI_CORE_H__
3
4 #include <linux/wait.h>
5 #include <linux/skbuff.h>
6 #include <linux/timer.h>
7 #include <linux/types.h>
8 #include <linux/ktime.h>
9 #include <linux/netdevice.h>
10 #include <linux/platform_device.h>
11 #include <linux/wakelock.h>
12 #include <linux/kobject.h>
13 #include <linux/sysfs.h>
14 #include <mach/mt_ccci_common.h>
15 #include <mach/ccci_config.h>
16 #include "ccci_debug.h"
17
18 #define CCCI_DEV_NAME "ccci"
19 #define CCCI_MTU (3584-128)
20 //#define CCMNI_MTU 1500
21 #define CCCI_MAGIC_NUM 0xFFFFFFFF
22
23 struct ccci_md_attribute
24 {
25 struct attribute attr;
26 struct ccci_modem *modem;
27 ssize_t (*show)(struct ccci_modem *md, char *buf);
28 ssize_t (*store)(struct ccci_modem *md, const char *buf, size_t count);
29 };
30
31 #define CCCI_MD_ATTR(_modem, _name, _mode, _show, _store) \
32 static struct ccci_md_attribute ccci_md_attr_##_name = { \
33 .attr = {.name = __stringify(_name), .mode = _mode }, \
34 .modem = _modem, \
35 .show = _show, \
36 .store = _store, \
37 }
38
39 /* enumerations and marcos */
40 struct ccci_header{
41 u32 data[2]; // do NOT assump data[1] is data length in Rx
42 #ifdef FEATURE_SEQ_CHECK_EN
43 u16 channel:16;
44 u16 seq_num:15;
45 u16 assert_bit:1;
46 #else
47 u32 channel;
48 #endif
49 u32 reserved;
50 } __attribute__ ((packed)); // not necessary, but it's a good gesture, :)
51
52
53 typedef enum {
54 INVALID = 0, // no traffic
55 GATED, // broadcast by modem driver, no traffic
56 BOOTING, // broadcast by modem driver
57 READY, // broadcast by port_kernel
58 EXCEPTION, // broadcast by port_kernel
59 RESET, // broadcast by modem driver, no traffic
60
61 RX_IRQ, // broadcast by modem driver, illegal for md->md_state, only for NAPI!
62 TX_IRQ, // broadcast by modem driver, illegal for md->md_state, only for network!
63 TX_FULL, // broadcast by modem driver, illegal for md->md_state, only for network!
64 BOOT_FAIL, // broadcast by port_kernel, illegal for md->md_state
65 }MD_STATE; // for CCCI internal
66
67 typedef enum {
68 MD_BOOT_STAGE_0 = 0,
69 MD_BOOT_STAGE_1 = 1,
70 MD_BOOT_STAGE_2 = 2,
71 MD_BOOT_STAGE_EXCEPTION = 3
72 }MD_BOOT_STAGE; // for other module
73
74 typedef enum {
75 EX_NONE = 0,
76 EX_INIT,
77 EX_DHL_DL_RDY,
78 EX_INIT_DONE,
79 // internal use
80 MD_NO_RESPONSE,
81 MD_WDT,
82 }MD_EX_STAGE;
83
84 /* MODEM MAUI Exception header (4 bytes)*/
85 typedef struct _exception_record_header_t {
86 u8 ex_type;
87 u8 ex_nvram;
88 u16 ex_serial_num;
89 } __attribute__ ((packed)) EX_HEADER_T;
90
91 /* MODEM MAUI Environment information (164 bytes) */
92 typedef struct _ex_environment_info_t {
93 u8 boot_mode; /* offset: +0x10*/
94 u8 reserved1[8];
95 u8 execution_unit[8];
96 u8 status; /* offset: +0x21, length: 1 */
97 u8 ELM_status; /* offset: +0x22, length: 1 */
98 u8 reserved2[145];
99 } __attribute__ ((packed)) EX_ENVINFO_T;
100
101 /* MODEM MAUI Special for fatal error (8 bytes)*/
102 typedef struct _ex_fatalerror_code_t {
103 u32 code1;
104 u32 code2;
105 } __attribute__ ((packed)) EX_FATALERR_CODE_T;
106
107 /* MODEM MAUI fatal error (296 bytes)*/
108 typedef struct _ex_fatalerror_t {
109 EX_FATALERR_CODE_T error_code;
110 u8 reserved1[288];
111 } __attribute__ ((packed)) EX_FATALERR_T;
112
113 /* MODEM MAUI Assert fail (296 bytes)*/
114 typedef struct _ex_assert_fail_t {
115 u8 filename[24];
116 u32 linenumber;
117 u32 parameters[3];
118 u8 reserved1[256];
119 } __attribute__ ((packed)) EX_ASSERTFAIL_T;
120
121 /* MODEM MAUI Globally exported data structure (300 bytes) */
122 typedef union {
123 EX_FATALERR_T fatalerr;
124 EX_ASSERTFAIL_T assert;
125 } __attribute__ ((packed)) EX_CONTENT_T;
126
127 /* MODEM MAUI Standard structure of an exception log ( */
128 typedef struct _ex_exception_log_t {
129 EX_HEADER_T header;
130 u8 reserved1[12];
131 EX_ENVINFO_T envinfo;
132 u8 reserved2[36];
133 EX_CONTENT_T content;
134 } __attribute__ ((packed)) EX_LOG_T;
135
136 typedef struct _ccci_msg {
137 union{
138 u32 magic; // For mail box magic number
139 u32 addr; // For stream start addr
140 u32 data0; // For ccci common data[0]
141 };
142 union{
143 u32 id; // For mail box message id
144 u32 len; // For stream len
145 u32 data1; // For ccci common data[1]
146 };
147 u32 channel;
148 u32 reserved;
149 } __attribute__ ((packed)) ccci_msg_t;
150
151 typedef struct dump_debug_info {
152 unsigned int type;
153 char *name;
154 unsigned int more_info;
155 union {
156 struct {
157 char file_name[30];
158 int line_num;
159 unsigned int parameters[3];
160 } assert;
161 struct {
162 int err_code1;
163 int err_code2;
164 char offender[9];
165 }fatal_error;
166 ccci_msg_t data;
167 struct {
168 unsigned char execution_unit[9]; // 8+1
169 char file_name[30];
170 int line_num;
171 unsigned int parameters[3];
172 }dsp_assert;
173 struct {
174 unsigned char execution_unit[9];
175 unsigned int code1;
176 }dsp_exception;
177 struct {
178 unsigned char execution_unit[9];
179 unsigned int err_code[2];
180 }dsp_fatal_err;
181 };
182 void *ext_mem;
183 size_t ext_size;
184 void *md_image;
185 size_t md_size;
186 void *platform_data;
187 void (*platform_call)(void *data);
188 }DEBUG_INFO_T;
189
190 typedef enum {
191 IDLE = 0,
192 FLYING,
193 PARTIAL_READ,
194 ERROR,
195 }REQ_STATE;
196
197 typedef enum {
198 IN = 0,
199 OUT
200 }DIRECTION;
201
202 /*
203 * This tells request free routine how it handles skb.
204 * The CCCI request structure will always be recycled, but its skb can have different policy.
205 * CCCI request can work as just a wrapper, due to netowork subsys will handler skb itself.
206 * Tx: policy is determined by sender;
207 * Rx: policy is determined by receiver;
208 */
209 typedef enum {
210 NOOP = 0, // don't handle the skb, just recycle the reqeust wrapper
211 RECYCLE, // put the skb back into our pool
212 FREE, // simply free the skb
213 }DATA_POLICY;
214
215 // core classes
216 #define CCCI_REQUEST_TRACE_DEPTH 3
217 struct ccci_request{
218 struct sk_buff *skb;
219 void *gpd; // virtual address for CPU
220
221 struct list_head entry;
222 char policy;
223 char blocking; // only for Tx
224 char state; // only update by buffer manager
225
226 char dir;
227 dma_addr_t gpd_addr; // physical address for DMA
228 dma_addr_t data_buffer_ptr_saved;
229 };
230
231 struct ccci_modem;
232 struct ccci_port;
233
234 struct ccci_port_ops {
235 // must-have
236 int (*init)(struct ccci_port *port);
237 int (*recv_request)(struct ccci_port *port, struct ccci_request* req);
238 // optional
239 int (*req_match)(struct ccci_port *port, struct ccci_request* req);
240 void (*md_state_notice)(struct ccci_port *port, MD_STATE state);
241 };
242
243 struct ccci_port {
244 // don't change the sequence unless you modified modem drivers as well
245 // identity
246 CCCI_CH tx_ch;
247 CCCI_CH rx_ch;
248 /*
249 * 0xF? is used as invalid index number, all virtual ports should use queue 0, but not 0xF?.
250 * always access queue index by using PORT_TXQ_INDEX and PORT_RXQ_INDEX macros.
251 * modem driver should always use >valid_queue_number to check invalid index, but not
252 * using ==0xF? style.
253 *
254 * here is a nasty trick, we assume no modem provide more than 0xF0 queues, so we use
255 * the lower 4 bit to smuggle info for network ports.
256 * Attention, in this trick we assume hardware queue index for net port will not exceed 0xF.
257 * check NET_ACK_TXQ_INDEX@port_net.c
258 */
259 unsigned char txq_index;
260 unsigned char rxq_index;
261 unsigned char txq_exp_index;
262 unsigned char rxq_exp_index;
263 unsigned char flags;
264 struct ccci_port_ops *ops;
265 // device node related
266 unsigned int minor;
267 char *name;
268 // un-initiallized in defination, always put them at the end
269 struct ccci_modem *modem;
270 void *private_data;
271 atomic_t usage_cnt;
272 struct list_head entry;
273 /*
274 * the Tx and Rx flow are asymmetric due to ports are mutilplexed on queues.
275 * Tx: data block are sent directly to queue's list, so port won't maitain a Tx list. It only
276 provide a wait_queue_head for blocking write.
277 * Rx: due to modem needs to dispatch Rx packet as quickly as possible, so port needs a
278 * Rx list to hold packets.
279 */
280 struct list_head rx_req_list;
281 spinlock_t rx_req_lock;
282 wait_queue_head_t rx_wq; // for uplayer user
283 int rx_length;
284 int rx_length_th;
285 struct wake_lock rx_wakelock;
286 unsigned int tx_busy_count;
287 unsigned int rx_busy_count;
288 };
289 #define PORT_F_ALLOW_DROP (1<<0) // packet will be dropped if port's Rx buffer full
290 #define PORT_F_RX_FULLED (1<<1) // rx buffer has been full once
291 #define PORT_F_USER_HEADER (1<<2) // CCCI header will be provided by user, but not by CCCI
292 #define PORT_F_RX_EXCLUSIVE (1<<3) // Rx queue only has this one port
293
294 struct ccci_modem_cfg {
295 unsigned int load_type;
296 unsigned int load_type_saving;
297 unsigned int setting;
298 };
299 #define MD_SETTING_ENABLE (1<<0)
300 #define MD_SETTING_RELOAD (1<<1)
301 #define MD_SETTING_FIRST_BOOT (1<<2) // this is the first time of boot up
302 #define MD_SETTING_STOP_RETRY_BOOT (1<<3)
303 #define MD_SETTING_DUMMY (1<<7)
304
305 struct ccci_mem_layout // all from AP view, AP has no haredware remap after MT6592
306 {
307 // MD image
308 void __iomem* md_region_vir;
309 phys_addr_t md_region_phy;
310 unsigned int md_region_size;
311 // DSP image
312 void __iomem* dsp_region_vir;
313 phys_addr_t dsp_region_phy;
314 unsigned int dsp_region_size;
315 // Share memory
316 void __iomem* smem_region_vir;
317 phys_addr_t smem_region_phy;
318 unsigned int smem_region_size;
319 unsigned int smem_offset_AP_to_MD; // offset between AP and MD view of share memory
320 };
321
322 struct ccci_smem_layout
323 {
324 // total exception region
325 void __iomem* ccci_exp_smem_base_vir;
326 phys_addr_t ccci_exp_smem_base_phy;
327 unsigned int ccci_exp_smem_size;
328 unsigned int ccci_exp_dump_size;
329
330 // how we dump exception region
331 void __iomem* ccci_exp_smem_ccci_debug_vir;
332 unsigned int ccci_exp_smem_ccci_debug_size;
333 void __iomem* ccci_exp_smem_mdss_debug_vir;
334 unsigned int ccci_exp_smem_mdss_debug_size;
335
336 // the address we parse MD exception record
337 void __iomem* ccci_exp_rec_base_vir;
338 };
339
340 typedef enum {
341 DUMP_FLAG_CCIF = (1<<0),
342 DUMP_FLAG_CLDMA = (1<<1),
343 DUMP_FLAG_REG = (1<<2),
344 DUMP_FLAG_SMEM = (1<<3),
345 DUMP_FLAG_IMAGE = (1<<4),
346 DUMP_FLAG_LAYOUT = (1<<5),
347 } MODEM_DUMP_FLAG;
348
349 typedef enum {
350 EE_FLAG_ENABLE_WDT = (1<<0),
351 EE_FLAG_DISABLE_WDT = (1<<1),
352 } MODEM_EE_FLAG;
353
354 #define MD_IMG_DUMP_SIZE (1<<8)
355 #define DSP_IMG_DUMP_SIZE (1<<9)
356
357 typedef enum {
358 LOW_BATTERY,
359 BATTERY_PERCENT,
360 } LOW_POEWR_NOTIFY_TYPE;
361
362 typedef enum {
363 CCCI_MESSAGE,
364 CCIF_INTERRUPT,
365 CCIF_INTR_SEQ,
366 } MD_COMM_TYPE;
367
368 typedef enum {
369 MD_STATUS_POLL_BUSY = (1<<0),
370 MD_STATUS_ASSERTED = (1<<1),
371 } MD_STATUS_POLL_FLAG;
372
373 struct ccci_modem_ops {
374 // must-have
375 int (*init)(struct ccci_modem *md);
376 int (*start)(struct ccci_modem *md);
377 int (*reset)(struct ccci_modem *md); // as pre-stop
378 int (*stop)(struct ccci_modem *md, unsigned int timeout);
379 int (*send_request)(struct ccci_modem *md, unsigned char txqno, struct ccci_request *req);
380 int (*give_more)(struct ccci_modem *md, unsigned char rxqno);
381 int (*write_room)(struct ccci_modem *md, unsigned char txqno);
382 int (*start_queue)(struct ccci_modem *md, unsigned char qno, DIRECTION dir);
383 int (*stop_queue)(struct ccci_modem *md, unsigned char qno, DIRECTION dir);
384 int (*napi_poll)(struct ccci_modem *md, unsigned char rxqno, struct napi_struct *napi ,int weight);
385 int (*send_runtime_data)(struct ccci_modem *md, unsigned int sbp_code);
386 int (*broadcast_state)(struct ccci_modem *md, MD_STATE state);
387 int (*force_assert)(struct ccci_modem *md, MD_COMM_TYPE type);
388 int (*dump_info)(struct ccci_modem *md, MODEM_DUMP_FLAG flag, void *buff, int length);
389 struct ccci_port* (*get_port_by_minor)(struct ccci_modem *md, int minor);
390 /*
391 * here we assume Rx and Tx channels are in the same address space,
392 * and Rx channel should be check first, so user can save one comparison if it always sends
393 * in Rx channel ID to identify a port.
394 */
395 struct ccci_port* (*get_port_by_channel)(struct ccci_modem *md, CCCI_CH ch);
396 int (*low_power_notify)(struct ccci_modem *md, LOW_POEWR_NOTIFY_TYPE type, int level);
397 int (*ee_callback)(struct ccci_modem *md, MODEM_EE_FLAG flag);
398 };
399
400 struct ccci_modem {
401 unsigned char index;
402 unsigned char *private_data;
403 struct list_head rx_ch_ports[CCCI_MAX_CH_NUM]; // port list of each Rx channel, for Rx dispatching
404 short seq_nums[2][CCCI_MAX_CH_NUM];
405 unsigned int capability;
406 volatile MD_STATE md_state; // check comments below, put it here for cache benefit
407 struct ccci_modem_ops *ops;
408 atomic_t wakeup_src;
409 struct ccci_port *ports;
410
411 struct list_head entry;
412 unsigned char port_number;
413 char post_fix[IMG_POSTFIX_LEN];
414 unsigned int major;
415 unsigned int minor_base;
416 struct kobject kobj;
417 struct ccci_mem_layout mem_layout;
418 struct ccci_smem_layout smem_layout;
419 struct ccci_image_info img_info[IMG_NUM];
420 unsigned int sim_type;
421 unsigned int sbp_code;
422 unsigned int sbp_code_default;
423 unsigned char critical_user_active[4];
424 unsigned int md_img_exist[MAX_IMG_NUM];
425 struct platform_device *plat_dev;
426 /*
427 * the following members are readonly for CCCI core. they are maintained by modem and
428 * port_kernel.c.
429 * port_kernel.c should not be considered as part of CCCI core, we just move common part
430 * of modem message handling into this file. current modem all follows the same message
431 * protocol during bootup and exception. if future modem abandoned this protocl, we can
432 * simply replace function set of kernel port to support it.
433 */
434 volatile MD_BOOT_STAGE boot_stage;
435 MD_EX_STAGE ex_stage; // only for logging
436 struct ccci_modem_cfg config;
437 struct timer_list bootup_timer;
438 struct timer_list ex_monitor;
439 struct timer_list ex_monitor2;
440 struct timer_list md_status_poller;
441 struct timer_list md_status_timeout;
442 unsigned int md_status_poller_flag;
443 spinlock_t ctrl_lock;
444 volatile unsigned int ee_info_flag;
445 DEBUG_INFO_T debug_info;
446 unsigned char ex_type;
447 EX_LOG_T ex_info;
448
449 //unsigned char private_data[0]; // do NOT use this manner, otherwise spinlock inside private_data will trigger alignment exception
450 };
451
452 // APIs
453 extern void ccci_free_req(struct ccci_request *req);
454 extern void ccci_md_exception_notify(struct ccci_modem *md, MD_EX_STAGE stage);
455
456 static inline void ccci_setup_channel_mapping(struct ccci_modem *md)
457 {
458 int i;
459 struct ccci_port *port = NULL;
460 // setup mapping
461 for(i=0; i<ARRAY_SIZE(md->rx_ch_ports); i++) {
462 INIT_LIST_HEAD(&md->rx_ch_ports[i]); // clear original list
463 }
464 for(i=0; i<md->port_number; i++) {
465 list_add_tail(&md->ports[i].entry, &md->rx_ch_ports[md->ports[i].rx_ch]);
466 }
467 for(i=0; i<ARRAY_SIZE(md->rx_ch_ports); i++) {
468 if(!list_empty(&md->rx_ch_ports[i])) {
469 CCCI_INF_MSG(md->index, CORE, "CH%d ports:", i);
470 list_for_each_entry(port, &md->rx_ch_ports[i], entry) {
471 printk("%s(%d/%d) ", port->name, port->rx_ch, port->tx_ch);
472 }
473 printk("\n");
474 }
475 }
476 }
477
478 static inline void ccci_reset_seq_num(struct ccci_modem *md)
479 {
480 // it's redundant to use 2 arrays, but this makes sequence checking easy
481 memset(md->seq_nums[OUT], 0, sizeof(md->seq_nums[OUT]));
482 memset(md->seq_nums[IN], -1, sizeof(md->seq_nums[IN]));
483 }
484
485 // as one channel can only use one hardware queue, so it's safe we call this function in hardware queue's lock protection
486 static inline void ccci_inc_tx_seq_num(struct ccci_modem *md, struct ccci_request *req)
487 {
488 #ifdef FEATURE_SEQ_CHECK_EN
489 struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
490 if(ccci_h->channel>=sizeof(md->seq_nums[OUT]) || ccci_h->channel<0) {
491 CCCI_INF_MSG(md->index, CORE, "ignore seq inc on channel %x\n", *(((u32 *)ccci_h)+2));
492 return; // for force assert channel, etc.
493 }
494 ccci_h->seq_num = md->seq_nums[OUT][ccci_h->channel]++;
495 ccci_h->assert_bit = 1;
496
497 // for rpx channel, can only set assert_bit when md is in single-task phase.
498 // when md is in multi-task phase, assert bit should be 0, since ipc task are preemptible
499 if ((ccci_h->channel==CCCI_RPC_TX || ccci_h->channel==CCCI_FS_TX) && md->boot_stage!=MD_BOOT_STAGE_1)
500 ccci_h->assert_bit = 0;
501
502 #endif
503 }
504
505 #define PORT_TXQ_INDEX(p) ((p)->modem->md_state==EXCEPTION?(p)->txq_exp_index:(p)->txq_index)
506 #define PORT_RXQ_INDEX(p) ((p)->modem->md_state==EXCEPTION?(p)->rxq_exp_index:(p)->rxq_index)
507
508 /*
509 * if send_request returns 0, then it's modem driver's duty to free the request, and caller should NOT reference the
510 * request any more. but if it returns error, calller should be responsible to free the request.
511 */
512 static inline int ccci_port_send_request(struct ccci_port *port, struct ccci_request *req)
513 {
514 struct ccci_modem *md = port->modem;
515 return md->ops->send_request(md, PORT_TXQ_INDEX(port), req);
516 }
517
518 /*
519 * if recv_request returns 0 or -CCCI_ERR_DROP_PACKET, then it's port's duty to free the request, and caller should
520 * NOT reference the request any more. but if it returns other error, caller should be responsible to free the request.
521 */
522 static inline int ccci_port_recv_request(struct ccci_modem *md, struct ccci_request *req)
523 {
524 struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
525 struct ccci_port *port = NULL;
526 struct list_head *port_list = NULL;
527 int ret = -CCCI_ERR_CHANNEL_NUM_MIS_MATCH;
528 #ifdef FEATURE_SEQ_CHECK_EN
529 u16 channel, seq_num, assert_bit;
530 #endif
531 char matched = 0;
532
533 if(unlikely(ccci_h->channel >= CCCI_MAX_CH_NUM)) {
534 ret = -CCCI_ERR_CHANNEL_NUM_MIS_MATCH;
535 goto err_exit;
536 }
537
538 port_list = &md->rx_ch_ports[ccci_h->channel];
539 list_for_each_entry(port, port_list, entry) {
540 /*
541 * multi-cast is not supported, because one port may freed or modified this request
542 * before another port can process it. but we still can use req->state to achive some
543 * kind of multi-cast if needed.
544 */
545 matched = (port->ops->req_match==NULL)?(ccci_h->channel == port->rx_ch):port->ops->req_match(port, req);
546 if(matched) {
547 #ifdef FEATURE_SEQ_CHECK_EN
548 channel = ccci_h->channel;
549 seq_num = ccci_h->seq_num;
550 assert_bit = ccci_h->assert_bit;
551 #endif
552 ret = port->ops->recv_request(port, req);
553 #ifdef FEATURE_SEQ_CHECK_EN
554 if(ret>=0 || ret==-CCCI_ERR_DROP_PACKET) {
555 if(assert_bit && ((seq_num - md->seq_nums[IN][channel]) & 0x7FFF) != 1) {
556 CCCI_ERR_MSG(md->index, CORE, "port %s seq number out-of-order %d->%d\n",
557 port->name, seq_num, md->seq_nums[IN][channel]);
558 md->ops->force_assert(md, CCIF_INTR_SEQ);
559 } else {
560 //CCCI_INF_MSG(md->index, CORE, "ch %d seq %d->%d %d\n", channel, md->seq_nums[IN][channel], seq_num, assert_bit);
561 md->seq_nums[IN][channel] = seq_num;
562 }
563 }
564 #endif
565 if(ret == -CCCI_ERR_PORT_RX_FULL)
566 port->rx_busy_count++;
567 break;
568 }
569 }
570
571 err_exit:
572 if(ret == -CCCI_ERR_CHANNEL_NUM_MIS_MATCH) {
573 CCCI_ERR_MSG(md->index, CORE, "drop on not supported channel %d\n", ccci_h->channel);
574 list_del(&req->entry);
575 req->policy = RECYCLE;
576 ccci_free_req(req);
577 ret = -CCCI_ERR_DROP_PACKET;
578 }
579 return ret;
580 }
581
582 /*
583 * caller should lock with port->rx_req_lock
584 */
585 static inline int ccci_port_ask_more_request(struct ccci_port *port)
586 {
587 struct ccci_modem *md = port->modem;
588 int ret;
589
590 if(port->flags & PORT_F_RX_FULLED)
591 ret = md->ops->give_more(port->modem, PORT_RXQ_INDEX(port));
592 else
593 ret = -1;
594 return ret;
595 }
596
597 // structure initialize
598 static inline void ccci_port_struct_init(struct ccci_port *port, struct ccci_modem *md)
599 {
600 INIT_LIST_HEAD(&port->rx_req_list);
601 spin_lock_init(&port->rx_req_lock);
602 INIT_LIST_HEAD(&port->entry);
603 init_waitqueue_head(&port->rx_wq);
604 port->rx_length = 0;
605 port->tx_busy_count = 0;
606 port->rx_busy_count = 0;
607 atomic_set(&port->usage_cnt, 0);
608 port->modem = md;
609 wake_lock_init(&port->rx_wakelock, WAKE_LOCK_SUSPEND, port->name);
610 }
611
612 /*
613 * only used during allocate buffer pool, should NOT be used after allocated a request
614 */
615 static inline void ccci_request_struct_init(struct ccci_request *req)
616 {
617 req->skb = NULL;
618 req->state = IDLE;
619 req->policy = FREE;
620 /*
621 * as this request is not in any list, but pay ATTENTION, this will cause list_add(req) fail due
622 * to it's not pointing to itself.
623 */
624 req->entry.next = LIST_POISON1;
625 req->entry.prev = LIST_POISON2;
626 }
627
628 struct ccci_modem *ccci_allocate_modem(int private_size);
629 int ccci_register_modem(struct ccci_modem *modem);
630 int ccci_register_dev_node(const char *name, int major_id, int minor);
631 struct ccci_port *ccci_get_port_for_node(int major, int minor);
632 int ccci_send_msg_to_md(struct ccci_modem *md, CCCI_CH ch, CCCI_MD_MSG msg, u32 resv, int blocking);
633 int ccci_send_virtual_md_msg(struct ccci_modem *md, CCCI_CH ch, CCCI_MD_MSG msg, u32 resv);
634 struct ccci_modem *ccci_get_modem_by_id(int md_id);
635 int exec_ccci_kern_func_by_md_id(int md_id, unsigned int id, char *buf, unsigned int len);
636
637 #endif // __CCCI_CORE_H__