1 #ifndef __CCCI_CORE_H__
2 #define __CCCI_CORE_H__
4 #include <linux/wait.h>
5 #include <linux/skbuff.h>
6 #include <linux/timer.h>
7 #include <linux/types.h>
8 #include <linux/ktime.h>
9 #include <linux/netdevice.h>
10 #include <linux/platform_device.h>
11 #include <linux/wakelock.h>
12 #include <linux/kobject.h>
13 #include <linux/sysfs.h>
14 #include <mach/mt_ccci_common.h>
15 #include <mach/ccci_config.h>
16 #include "ccci_debug.h"
18 #define CCCI_DEV_NAME "ccci"
19 #define CCCI_MTU (3584-128)
20 //#define CCMNI_MTU 1500
21 #define CCCI_MAGIC_NUM 0xFFFFFFFF
23 struct ccci_md_attribute
25 struct attribute attr
;
26 struct ccci_modem
*modem
;
27 ssize_t (*show
)(struct ccci_modem
*md
, char *buf
);
28 ssize_t (*store
)(struct ccci_modem
*md
, const char *buf
, size_t count
);
31 #define CCCI_MD_ATTR(_modem, _name, _mode, _show, _store) \
32 static struct ccci_md_attribute ccci_md_attr_##_name = { \
33 .attr = {.name = __stringify(_name), .mode = _mode }, \
39 /* enumerations and marcos */
41 u32 data
[2]; // do NOT assump data[1] is data length in Rx
42 #ifdef FEATURE_SEQ_CHECK_EN
50 } __attribute__ ((packed
)); // not necessary, but it's a good gesture, :)
54 INVALID
= 0, // no traffic
55 GATED
, // broadcast by modem driver, no traffic
56 BOOTING
, // broadcast by modem driver
57 READY
, // broadcast by port_kernel
58 EXCEPTION
, // broadcast by port_kernel
59 RESET
, // broadcast by modem driver, no traffic
61 RX_IRQ
, // broadcast by modem driver, illegal for md->md_state, only for NAPI!
62 TX_IRQ
, // broadcast by modem driver, illegal for md->md_state, only for network!
63 TX_FULL
, // broadcast by modem driver, illegal for md->md_state, only for network!
64 BOOT_FAIL
, // broadcast by port_kernel, illegal for md->md_state
65 }MD_STATE
; // for CCCI internal
71 MD_BOOT_STAGE_EXCEPTION
= 3
72 }MD_BOOT_STAGE
; // for other module
84 /* MODEM MAUI Exception header (4 bytes)*/
85 typedef struct _exception_record_header_t
{
89 } __attribute__ ((packed
)) EX_HEADER_T
;
91 /* MODEM MAUI Environment information (164 bytes) */
92 typedef struct _ex_environment_info_t
{
93 u8 boot_mode
; /* offset: +0x10*/
96 u8 status
; /* offset: +0x21, length: 1 */
97 u8 ELM_status
; /* offset: +0x22, length: 1 */
99 } __attribute__ ((packed
)) EX_ENVINFO_T
;
101 /* MODEM MAUI Special for fatal error (8 bytes)*/
102 typedef struct _ex_fatalerror_code_t
{
105 } __attribute__ ((packed
)) EX_FATALERR_CODE_T
;
107 /* MODEM MAUI fatal error (296 bytes)*/
108 typedef struct _ex_fatalerror_t
{
109 EX_FATALERR_CODE_T error_code
;
111 } __attribute__ ((packed
)) EX_FATALERR_T
;
113 /* MODEM MAUI Assert fail (296 bytes)*/
114 typedef struct _ex_assert_fail_t
{
119 } __attribute__ ((packed
)) EX_ASSERTFAIL_T
;
121 /* MODEM MAUI Globally exported data structure (300 bytes) */
123 EX_FATALERR_T fatalerr
;
124 EX_ASSERTFAIL_T
assert;
125 } __attribute__ ((packed
)) EX_CONTENT_T
;
127 /* MODEM MAUI Standard structure of an exception log ( */
128 typedef struct _ex_exception_log_t
{
131 EX_ENVINFO_T envinfo
;
133 EX_CONTENT_T content
;
134 } __attribute__ ((packed
)) EX_LOG_T
;
136 typedef struct _ccci_msg
{
138 u32 magic
; // For mail box magic number
139 u32 addr
; // For stream start addr
140 u32 data0
; // For ccci common data[0]
143 u32 id
; // For mail box message id
144 u32 len
; // For stream len
145 u32 data1
; // For ccci common data[1]
149 } __attribute__ ((packed
)) ccci_msg_t
;
151 typedef struct dump_debug_info
{
154 unsigned int more_info
;
159 unsigned int parameters
[3];
168 unsigned char execution_unit
[9]; // 8+1
171 unsigned int parameters
[3];
174 unsigned char execution_unit
[9];
178 unsigned char execution_unit
[9];
179 unsigned int err_code
[2];
187 void (*platform_call
)(void *data
);
203 * This tells request free routine how it handles skb.
204 * The CCCI request structure will always be recycled, but its skb can have different policy.
205 * CCCI request can work as just a wrapper, due to netowork subsys will handler skb itself.
206 * Tx: policy is determined by sender;
207 * Rx: policy is determined by receiver;
210 NOOP
= 0, // don't handle the skb, just recycle the reqeust wrapper
211 RECYCLE
, // put the skb back into our pool
212 FREE
, // simply free the skb
216 #define CCCI_REQUEST_TRACE_DEPTH 3
219 void *gpd
; // virtual address for CPU
221 struct list_head entry
;
223 char blocking
; // only for Tx
224 char state
; // only update by buffer manager
227 dma_addr_t gpd_addr
; // physical address for DMA
228 dma_addr_t data_buffer_ptr_saved
;
234 struct ccci_port_ops
{
236 int (*init
)(struct ccci_port
*port
);
237 int (*recv_request
)(struct ccci_port
*port
, struct ccci_request
* req
);
239 int (*req_match
)(struct ccci_port
*port
, struct ccci_request
* req
);
240 void (*md_state_notice
)(struct ccci_port
*port
, MD_STATE state
);
244 // don't change the sequence unless you modified modem drivers as well
249 * 0xF? is used as invalid index number, all virtual ports should use queue 0, but not 0xF?.
250 * always access queue index by using PORT_TXQ_INDEX and PORT_RXQ_INDEX macros.
251 * modem driver should always use >valid_queue_number to check invalid index, but not
252 * using ==0xF? style.
254 * here is a nasty trick, we assume no modem provide more than 0xF0 queues, so we use
255 * the lower 4 bit to smuggle info for network ports.
256 * Attention, in this trick we assume hardware queue index for net port will not exceed 0xF.
257 * check NET_ACK_TXQ_INDEX@port_net.c
259 unsigned char txq_index
;
260 unsigned char rxq_index
;
261 unsigned char txq_exp_index
;
262 unsigned char rxq_exp_index
;
264 struct ccci_port_ops
*ops
;
265 // device node related
268 // un-initiallized in defination, always put them at the end
269 struct ccci_modem
*modem
;
272 struct list_head entry
;
274 * the Tx and Rx flow are asymmetric due to ports are mutilplexed on queues.
275 * Tx: data block are sent directly to queue's list, so port won't maitain a Tx list. It only
276 provide a wait_queue_head for blocking write.
277 * Rx: due to modem needs to dispatch Rx packet as quickly as possible, so port needs a
278 * Rx list to hold packets.
280 struct list_head rx_req_list
;
281 spinlock_t rx_req_lock
;
282 wait_queue_head_t rx_wq
; // for uplayer user
285 struct wake_lock rx_wakelock
;
286 unsigned int tx_busy_count
;
287 unsigned int rx_busy_count
;
289 #define PORT_F_ALLOW_DROP (1<<0) // packet will be dropped if port's Rx buffer full
290 #define PORT_F_RX_FULLED (1<<1) // rx buffer has been full once
291 #define PORT_F_USER_HEADER (1<<2) // CCCI header will be provided by user, but not by CCCI
292 #define PORT_F_RX_EXCLUSIVE (1<<3) // Rx queue only has this one port
294 struct ccci_modem_cfg
{
295 unsigned int load_type
;
296 unsigned int load_type_saving
;
297 unsigned int setting
;
299 #define MD_SETTING_ENABLE (1<<0)
300 #define MD_SETTING_RELOAD (1<<1)
301 #define MD_SETTING_FIRST_BOOT (1<<2) // this is the first time of boot up
302 #define MD_SETTING_STOP_RETRY_BOOT (1<<3)
303 #define MD_SETTING_DUMMY (1<<7)
305 struct ccci_mem_layout
// all from AP view, AP has no haredware remap after MT6592
308 void __iomem
* md_region_vir
;
309 phys_addr_t md_region_phy
;
310 unsigned int md_region_size
;
312 void __iomem
* dsp_region_vir
;
313 phys_addr_t dsp_region_phy
;
314 unsigned int dsp_region_size
;
316 void __iomem
* smem_region_vir
;
317 phys_addr_t smem_region_phy
;
318 unsigned int smem_region_size
;
319 unsigned int smem_offset_AP_to_MD
; // offset between AP and MD view of share memory
322 struct ccci_smem_layout
324 // total exception region
325 void __iomem
* ccci_exp_smem_base_vir
;
326 phys_addr_t ccci_exp_smem_base_phy
;
327 unsigned int ccci_exp_smem_size
;
328 unsigned int ccci_exp_dump_size
;
330 // how we dump exception region
331 void __iomem
* ccci_exp_smem_ccci_debug_vir
;
332 unsigned int ccci_exp_smem_ccci_debug_size
;
333 void __iomem
* ccci_exp_smem_mdss_debug_vir
;
334 unsigned int ccci_exp_smem_mdss_debug_size
;
336 // the address we parse MD exception record
337 void __iomem
* ccci_exp_rec_base_vir
;
341 DUMP_FLAG_CCIF
= (1<<0),
342 DUMP_FLAG_CLDMA
= (1<<1),
343 DUMP_FLAG_REG
= (1<<2),
344 DUMP_FLAG_SMEM
= (1<<3),
345 DUMP_FLAG_IMAGE
= (1<<4),
346 DUMP_FLAG_LAYOUT
= (1<<5),
350 EE_FLAG_ENABLE_WDT
= (1<<0),
351 EE_FLAG_DISABLE_WDT
= (1<<1),
354 #define MD_IMG_DUMP_SIZE (1<<8)
355 #define DSP_IMG_DUMP_SIZE (1<<9)
360 } LOW_POEWR_NOTIFY_TYPE
;
369 MD_STATUS_POLL_BUSY
= (1<<0),
370 MD_STATUS_ASSERTED
= (1<<1),
371 } MD_STATUS_POLL_FLAG
;
373 struct ccci_modem_ops
{
375 int (*init
)(struct ccci_modem
*md
);
376 int (*start
)(struct ccci_modem
*md
);
377 int (*reset
)(struct ccci_modem
*md
); // as pre-stop
378 int (*stop
)(struct ccci_modem
*md
, unsigned int timeout
);
379 int (*send_request
)(struct ccci_modem
*md
, unsigned char txqno
, struct ccci_request
*req
);
380 int (*give_more
)(struct ccci_modem
*md
, unsigned char rxqno
);
381 int (*write_room
)(struct ccci_modem
*md
, unsigned char txqno
);
382 int (*start_queue
)(struct ccci_modem
*md
, unsigned char qno
, DIRECTION dir
);
383 int (*stop_queue
)(struct ccci_modem
*md
, unsigned char qno
, DIRECTION dir
);
384 int (*napi_poll
)(struct ccci_modem
*md
, unsigned char rxqno
, struct napi_struct
*napi
,int weight
);
385 int (*send_runtime_data
)(struct ccci_modem
*md
, unsigned int sbp_code
);
386 int (*broadcast_state
)(struct ccci_modem
*md
, MD_STATE state
);
387 int (*force_assert
)(struct ccci_modem
*md
, MD_COMM_TYPE type
);
388 int (*dump_info
)(struct ccci_modem
*md
, MODEM_DUMP_FLAG flag
, void *buff
, int length
);
389 struct ccci_port
* (*get_port_by_minor
)(struct ccci_modem
*md
, int minor
);
391 * here we assume Rx and Tx channels are in the same address space,
392 * and Rx channel should be check first, so user can save one comparison if it always sends
393 * in Rx channel ID to identify a port.
395 struct ccci_port
* (*get_port_by_channel
)(struct ccci_modem
*md
, CCCI_CH ch
);
396 int (*low_power_notify
)(struct ccci_modem
*md
, LOW_POEWR_NOTIFY_TYPE type
, int level
);
397 int (*ee_callback
)(struct ccci_modem
*md
, MODEM_EE_FLAG flag
);
402 unsigned char *private_data
;
403 struct list_head rx_ch_ports
[CCCI_MAX_CH_NUM
]; // port list of each Rx channel, for Rx dispatching
404 short seq_nums
[2][CCCI_MAX_CH_NUM
];
405 unsigned int capability
;
406 volatile MD_STATE md_state
; // check comments below, put it here for cache benefit
407 struct ccci_modem_ops
*ops
;
409 struct ccci_port
*ports
;
411 struct list_head entry
;
412 unsigned char port_number
;
413 char post_fix
[IMG_POSTFIX_LEN
];
415 unsigned int minor_base
;
417 struct ccci_mem_layout mem_layout
;
418 struct ccci_smem_layout smem_layout
;
419 struct ccci_image_info img_info
[IMG_NUM
];
420 unsigned int sim_type
;
421 unsigned int sbp_code
;
422 unsigned int sbp_code_default
;
423 unsigned char critical_user_active
[4];
424 unsigned int md_img_exist
[MAX_IMG_NUM
];
425 struct platform_device
*plat_dev
;
427 * the following members are readonly for CCCI core. they are maintained by modem and
429 * port_kernel.c should not be considered as part of CCCI core, we just move common part
430 * of modem message handling into this file. current modem all follows the same message
431 * protocol during bootup and exception. if future modem abandoned this protocl, we can
432 * simply replace function set of kernel port to support it.
434 volatile MD_BOOT_STAGE boot_stage
;
435 MD_EX_STAGE ex_stage
; // only for logging
436 struct ccci_modem_cfg config
;
437 struct timer_list bootup_timer
;
438 struct timer_list ex_monitor
;
439 struct timer_list ex_monitor2
;
440 struct timer_list md_status_poller
;
441 struct timer_list md_status_timeout
;
442 unsigned int md_status_poller_flag
;
443 spinlock_t ctrl_lock
;
444 volatile unsigned int ee_info_flag
;
445 DEBUG_INFO_T debug_info
;
446 unsigned char ex_type
;
449 //unsigned char private_data[0]; // do NOT use this manner, otherwise spinlock inside private_data will trigger alignment exception
453 extern void ccci_free_req(struct ccci_request
*req
);
454 extern void ccci_md_exception_notify(struct ccci_modem
*md
, MD_EX_STAGE stage
);
456 static inline void ccci_setup_channel_mapping(struct ccci_modem
*md
)
459 struct ccci_port
*port
= NULL
;
461 for(i
=0; i
<ARRAY_SIZE(md
->rx_ch_ports
); i
++) {
462 INIT_LIST_HEAD(&md
->rx_ch_ports
[i
]); // clear original list
464 for(i
=0; i
<md
->port_number
; i
++) {
465 list_add_tail(&md
->ports
[i
].entry
, &md
->rx_ch_ports
[md
->ports
[i
].rx_ch
]);
467 for(i
=0; i
<ARRAY_SIZE(md
->rx_ch_ports
); i
++) {
468 if(!list_empty(&md
->rx_ch_ports
[i
])) {
469 CCCI_INF_MSG(md
->index
, CORE
, "CH%d ports:", i
);
470 list_for_each_entry(port
, &md
->rx_ch_ports
[i
], entry
) {
471 printk("%s(%d/%d) ", port
->name
, port
->rx_ch
, port
->tx_ch
);
478 static inline void ccci_reset_seq_num(struct ccci_modem
*md
)
480 // it's redundant to use 2 arrays, but this makes sequence checking easy
481 memset(md
->seq_nums
[OUT
], 0, sizeof(md
->seq_nums
[OUT
]));
482 memset(md
->seq_nums
[IN
], -1, sizeof(md
->seq_nums
[IN
]));
485 // as one channel can only use one hardware queue, so it's safe we call this function in hardware queue's lock protection
486 static inline void ccci_inc_tx_seq_num(struct ccci_modem
*md
, struct ccci_request
*req
)
488 #ifdef FEATURE_SEQ_CHECK_EN
489 struct ccci_header
*ccci_h
= (struct ccci_header
*)req
->skb
->data
;
490 if(ccci_h
->channel
>=sizeof(md
->seq_nums
[OUT
]) || ccci_h
->channel
<0) {
491 CCCI_INF_MSG(md
->index
, CORE
, "ignore seq inc on channel %x\n", *(((u32
*)ccci_h
)+2));
492 return; // for force assert channel, etc.
494 ccci_h
->seq_num
= md
->seq_nums
[OUT
][ccci_h
->channel
]++;
495 ccci_h
->assert_bit
= 1;
497 // for rpx channel, can only set assert_bit when md is in single-task phase.
498 // when md is in multi-task phase, assert bit should be 0, since ipc task are preemptible
499 if ((ccci_h
->channel
==CCCI_RPC_TX
|| ccci_h
->channel
==CCCI_FS_TX
) && md
->boot_stage
!=MD_BOOT_STAGE_1
)
500 ccci_h
->assert_bit
= 0;
505 #define PORT_TXQ_INDEX(p) ((p)->modem->md_state==EXCEPTION?(p)->txq_exp_index:(p)->txq_index)
506 #define PORT_RXQ_INDEX(p) ((p)->modem->md_state==EXCEPTION?(p)->rxq_exp_index:(p)->rxq_index)
509 * if send_request returns 0, then it's modem driver's duty to free the request, and caller should NOT reference the
510 * request any more. but if it returns error, calller should be responsible to free the request.
512 static inline int ccci_port_send_request(struct ccci_port
*port
, struct ccci_request
*req
)
514 struct ccci_modem
*md
= port
->modem
;
515 return md
->ops
->send_request(md
, PORT_TXQ_INDEX(port
), req
);
519 * if recv_request returns 0 or -CCCI_ERR_DROP_PACKET, then it's port's duty to free the request, and caller should
520 * NOT reference the request any more. but if it returns other error, caller should be responsible to free the request.
522 static inline int ccci_port_recv_request(struct ccci_modem
*md
, struct ccci_request
*req
)
524 struct ccci_header
*ccci_h
= (struct ccci_header
*)req
->skb
->data
;
525 struct ccci_port
*port
= NULL
;
526 struct list_head
*port_list
= NULL
;
527 int ret
= -CCCI_ERR_CHANNEL_NUM_MIS_MATCH
;
528 #ifdef FEATURE_SEQ_CHECK_EN
529 u16 channel
, seq_num
, assert_bit
;
533 if(unlikely(ccci_h
->channel
>= CCCI_MAX_CH_NUM
)) {
534 ret
= -CCCI_ERR_CHANNEL_NUM_MIS_MATCH
;
538 port_list
= &md
->rx_ch_ports
[ccci_h
->channel
];
539 list_for_each_entry(port
, port_list
, entry
) {
541 * multi-cast is not supported, because one port may freed or modified this request
542 * before another port can process it. but we still can use req->state to achive some
543 * kind of multi-cast if needed.
545 matched
= (port
->ops
->req_match
==NULL
)?(ccci_h
->channel
== port
->rx_ch
):port
->ops
->req_match(port
, req
);
547 #ifdef FEATURE_SEQ_CHECK_EN
548 channel
= ccci_h
->channel
;
549 seq_num
= ccci_h
->seq_num
;
550 assert_bit
= ccci_h
->assert_bit
;
552 ret
= port
->ops
->recv_request(port
, req
);
553 #ifdef FEATURE_SEQ_CHECK_EN
554 if(ret
>=0 || ret
==-CCCI_ERR_DROP_PACKET
) {
555 if(assert_bit
&& ((seq_num
- md
->seq_nums
[IN
][channel
]) & 0x7FFF) != 1) {
556 CCCI_ERR_MSG(md
->index
, CORE
, "port %s seq number out-of-order %d->%d\n",
557 port
->name
, seq_num
, md
->seq_nums
[IN
][channel
]);
558 md
->ops
->force_assert(md
, CCIF_INTR_SEQ
);
560 //CCCI_INF_MSG(md->index, CORE, "ch %d seq %d->%d %d\n", channel, md->seq_nums[IN][channel], seq_num, assert_bit);
561 md
->seq_nums
[IN
][channel
] = seq_num
;
565 if(ret
== -CCCI_ERR_PORT_RX_FULL
)
566 port
->rx_busy_count
++;
572 if(ret
== -CCCI_ERR_CHANNEL_NUM_MIS_MATCH
) {
573 CCCI_ERR_MSG(md
->index
, CORE
, "drop on not supported channel %d\n", ccci_h
->channel
);
574 list_del(&req
->entry
);
575 req
->policy
= RECYCLE
;
577 ret
= -CCCI_ERR_DROP_PACKET
;
583 * caller should lock with port->rx_req_lock
585 static inline int ccci_port_ask_more_request(struct ccci_port
*port
)
587 struct ccci_modem
*md
= port
->modem
;
590 if(port
->flags
& PORT_F_RX_FULLED
)
591 ret
= md
->ops
->give_more(port
->modem
, PORT_RXQ_INDEX(port
));
597 // structure initialize
598 static inline void ccci_port_struct_init(struct ccci_port
*port
, struct ccci_modem
*md
)
600 INIT_LIST_HEAD(&port
->rx_req_list
);
601 spin_lock_init(&port
->rx_req_lock
);
602 INIT_LIST_HEAD(&port
->entry
);
603 init_waitqueue_head(&port
->rx_wq
);
605 port
->tx_busy_count
= 0;
606 port
->rx_busy_count
= 0;
607 atomic_set(&port
->usage_cnt
, 0);
609 wake_lock_init(&port
->rx_wakelock
, WAKE_LOCK_SUSPEND
, port
->name
);
613 * only used during allocate buffer pool, should NOT be used after allocated a request
615 static inline void ccci_request_struct_init(struct ccci_request
*req
)
621 * as this request is not in any list, but pay ATTENTION, this will cause list_add(req) fail due
622 * to it's not pointing to itself.
624 req
->entry
.next
= LIST_POISON1
;
625 req
->entry
.prev
= LIST_POISON2
;
628 struct ccci_modem
*ccci_allocate_modem(int private_size
);
629 int ccci_register_modem(struct ccci_modem
*modem
);
630 int ccci_register_dev_node(const char *name
, int major_id
, int minor
);
631 struct ccci_port
*ccci_get_port_for_node(int major
, int minor
);
632 int ccci_send_msg_to_md(struct ccci_modem
*md
, CCCI_CH ch
, CCCI_MD_MSG msg
, u32 resv
, int blocking
);
633 int ccci_send_virtual_md_msg(struct ccci_modem
*md
, CCCI_CH ch
, CCCI_MD_MSG msg
, u32 resv
);
634 struct ccci_modem
*ccci_get_modem_by_id(int md_id
);
635 int exec_ccci_kern_func_by_md_id(int md_id
, unsigned int id
, char *buf
, unsigned int len
);
637 #endif // __CCCI_CORE_H__