Commit | Line | Data |
---|---|---|
6fa3eb70 S |
1 | #include <linux/module.h> |
2 | #include <linux/proc_fs.h> | |
3 | #include <linux/uaccess.h> | |
4 | #include <linux/kthread.h> | |
5 | #include <linux/mmc/card.h> | |
6 | #include <linux/mmc/host.h> | |
7 | #include <linux/mmc/sdio_func.h> | |
8 | ||
9 | #ifdef CHANGE_SCHED_POLICY | |
10 | #include <linux/sched.h> | |
11 | #endif | |
12 | ||
13 | #include "mt_sd.h" | |
14 | #include "sdio_autok.h" | |
15 | ||
16 | #define PROC_AUTOK_NAME "autok" | |
17 | ||
18 | #ifndef MTK_SDIO30_ONLINE_TUNING_SUPPORT | |
19 | #define DMA_ON 0 | |
20 | #define DMA_OFF 1 | |
21 | #endif | |
22 | ||
23 | struct proc_dir_entry *s_proc = NULL; | |
24 | ||
25 | #ifdef USE_KERNEL_THREAD | |
26 | struct sdio_autok_thread_data g_autok_thread_data; | |
27 | struct task_struct *task; | |
28 | #else // USE_KERNEL_THREAD | |
29 | struct workqueue_struct *g_autok_wq; | |
30 | struct sdio_autok_workqueue_data g_autok_thread_data; | |
31 | #endif // USE_KERNEL_THREAD | |
32 | ||
33 | unsigned int autok_done = 0; | |
34 | ||
35 | extern int sdio_autok_processed; | |
36 | ||
37 | extern void mmc_set_clock(struct mmc_host *host, unsigned int hz); | |
38 | ||
39 | #ifdef USE_KERNEL_THREAD | |
40 | static int autok_thread_func(void *data); | |
41 | #else // USE_KERNEL_THREAD | |
42 | static int autok_thread_func(struct work_struct *data); | |
43 | #endif // USE_KERNEL_THREAD | |
44 | ||
45 | #if 0//ndef USE_KERNEL_THREAD | |
46 | ||
47 | /* | |
48 | * The poor guys doing the actual heavy lifting. All on-duty workers | |
49 | * are either serving the manager role, on idle list or on busy hash. | |
50 | */ | |
51 | struct worker { | |
52 | /* on idle list while idle, on busy hash table while busy */ | |
53 | union { | |
54 | struct list_head entry; /* L: while idle */ | |
55 | struct hlist_node hentry; /* L: while busy */ | |
56 | }; | |
57 | ||
58 | struct work_struct *current_work; /* L: work being processed */ | |
59 | struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */ | |
60 | struct list_head scheduled; /* L: scheduled works */ | |
61 | struct task_struct *task; /* I: worker task */ | |
62 | struct global_cwq *gcwq; /* I: the associated gcwq */ | |
63 | /* 64 bytes boundary on 64bit, 32 on 32bit */ | |
64 | unsigned long last_active; /* L: last active timestamp */ | |
65 | unsigned int flags; /* X: flags */ | |
66 | int id; /* I: worker id */ | |
67 | struct work_struct rebind_work; /* L: rebind worker to cpu */ | |
68 | }; | |
69 | ||
70 | /* | |
71 | * The externally visible workqueue abstraction is an array of | |
72 | * per-CPU workqueues: | |
73 | */ | |
74 | struct workqueue_struct { | |
75 | unsigned int flags; /* W: WQ_* flags */ | |
76 | union { | |
77 | struct cpu_workqueue_struct __percpu *pcpu; | |
78 | struct cpu_workqueue_struct *single; | |
79 | unsigned long v; | |
80 | } cpu_wq; /* I: cwq's */ | |
81 | struct list_head list; /* W: list of all workqueues */ | |
82 | ||
83 | struct mutex flush_mutex; /* protects wq flushing */ | |
84 | int work_color; /* F: current work color */ | |
85 | int flush_color; /* F: current flush color */ | |
86 | atomic_t nr_cwqs_to_flush; /* flush in progress */ | |
87 | struct wq_flusher *first_flusher; /* F: first flusher */ | |
88 | struct list_head flusher_queue; /* F: flush waiters */ | |
89 | struct list_head flusher_overflow; /* F: flush overflow list */ | |
90 | ||
91 | mayday_mask_t mayday_mask; /* cpus requesting rescue */ | |
92 | struct worker *rescuer; /* I: rescue worker */ | |
93 | ||
94 | int nr_drainers; /* W: drain in progress */ | |
95 | int saved_max_active; /* W: saved cwq max_active */ | |
96 | #ifdef CONFIG_LOCKDEP | |
97 | struct lockdep_map lockdep_map; | |
98 | #endif | |
99 | char name[]; /* I: workqueue name */ | |
100 | }; | |
101 | ||
102 | #endif // USE_KERNEL_THREAD | |
103 | ||
104 | extern void msdc_ungate_clock(struct msdc_host* host); | |
105 | extern void msdc_gate_clock(struct msdc_host* host, int delay); | |
106 | ||
107 | #define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO) | |
108 | #define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO) | |
109 | #define msdc_dma_status() ((sdr_read32(MSDC_CFG) & MSDC_CFG_PIO) >> 3) | |
110 | ||
111 | void autok_claim_host(struct msdc_host *host) | |
112 | { | |
113 | mmc_claim_host(host->mmc); | |
114 | printk("[%s] msdc%d host claimed\n", __func__, host->id); | |
115 | } | |
116 | ||
117 | void autok_release_host(struct msdc_host *host) | |
118 | { | |
119 | mmc_release_host(host->mmc); | |
120 | printk("[%s] msdc%d host released\n", __func__, host->id); | |
121 | } | |
122 | ||
123 | ||
124 | static int autok_writeproc(struct file *file,const char *buffer, | |
125 | unsigned long count, void *data) | |
126 | { | |
127 | char stage; | |
128 | char bufferContent[PROC_BUF_SIZE]; | |
129 | char *bufContIdx; | |
130 | struct msdc_host *host; | |
131 | struct sdio_func *sdioFunc; | |
132 | struct mmc_host *mmc; | |
133 | int len; | |
134 | int procParamsOffset = 0; | |
135 | int i; | |
136 | ||
137 | printk(KERN_INFO "[%s] (/proc/%s) called\n", __func__, PROC_AUTOK_NAME); | |
138 | ||
139 | if(count >= PROC_BUF_SIZE) | |
140 | { | |
141 | printk(KERN_INFO "[%s] proc input size (%ld) is larger than buffer size (%d) \n", __func__, count, PROC_BUF_SIZE); | |
142 | return -EFAULT; | |
143 | } | |
144 | ||
145 | if (copy_from_user(bufferContent, buffer, count)) | |
146 | return -EFAULT; | |
147 | ||
148 | bufferContent[count] = '\0'; | |
149 | printk(KERN_INFO "[%s] bufferContent: (count = %ld)\n", __func__, count); | |
150 | for(i = 0; i < count; i++) | |
151 | printk(" %x ", bufferContent[i]); | |
152 | printk("\n"); | |
153 | ||
154 | // Parsing bufferContent | |
155 | bufContIdx = bufferContent; | |
156 | sdioFunc = (struct sdio_func *)(*(int *)bufContIdx); | |
157 | bufContIdx += 4; | |
158 | procParamsOffset += 4; | |
159 | stage = *bufContIdx; | |
160 | bufContIdx += 1; | |
161 | procParamsOffset += 1; | |
162 | if(count <= procParamsOffset) | |
163 | { | |
164 | printk(KERN_INFO "[%s] count <= procParamsOffset, count = %d, procParamsOffset = %d\n", __func__, count, procParamsOffset); | |
165 | stage = 1; | |
166 | } | |
167 | else | |
168 | { | |
169 | memcpy(&len, bufContIdx, sizeof(int)); | |
170 | bufContIdx = bufContIdx + sizeof(int); | |
171 | procParamsOffset += sizeof(int); | |
172 | if(len > count - procParamsOffset) | |
173 | { | |
174 | printk(KERN_INFO "[%s] autok stage 1 result len (%d) is larger than actual proc input size (%ld) \n", __func__, len, count - procParamsOffset); | |
175 | return -EFAULT; | |
176 | } | |
177 | ||
178 | memcpy(g_autok_thread_data.autok_stage1_result, bufContIdx, len); | |
179 | g_autok_thread_data.len = len; | |
180 | ||
181 | printk(KERN_INFO "[%s] autok_stage1_result: (len = %d)\n", __func__, len); | |
182 | for(i = 0; i < len; i++) | |
183 | printk(" %x ", g_autok_thread_data.autok_stage1_result[i]); | |
184 | printk("\n"); | |
185 | } | |
186 | ||
187 | printk(KERN_INFO "[%s] stage = %d\n", __func__, stage); | |
188 | ||
189 | if(sdioFunc == NULL) | |
190 | { | |
191 | printk(KERN_INFO "[%s] sdioFunc = NULL\n", __func__); | |
192 | return -EFAULT; | |
193 | } | |
194 | ||
195 | mmc = sdioFunc->card->host; | |
196 | host = mmc_priv(mmc); | |
197 | ||
198 | // Set clock to card max clock | |
199 | sdio_autok_processed = 1; | |
200 | //printk(KERN_INFO "[%s] mmc->ios.clock = %d, mmc->ios.power_mode = %d\n", __func__, mmc->ios.clock, mmc->ios.power_mode); | |
201 | mmc_set_clock(mmc, mmc->ios.clock); | |
202 | ||
203 | g_autok_thread_data.host = host; | |
204 | g_autok_thread_data.sdioFunc = sdioFunc; | |
205 | g_autok_thread_data.stage = stage; | |
206 | ||
207 | autok_done = 0; | |
208 | ||
209 | #ifdef USE_KERNEL_THREAD | |
210 | task = kthread_run(&autok_thread_func,(void *)(&g_autok_thread_data),"autokp"); | |
211 | //if(!IS_ERR(task)) | |
212 | // wake_up_process(task); | |
213 | #else // USE_KERNEL_THREAD | |
214 | queue_delayed_work_on(0, g_autok_wq, (struct delayed_work *)&g_autok_thread_data, msecs_to_jiffies(0)); | |
215 | #endif // USE_KERNEL_THREAD | |
216 | ||
217 | return count; | |
218 | } | |
219 | ||
220 | ||
221 | static int autok_readproc(char *page, char **start, off_t off, | |
222 | int count, int *eof, void *data) | |
223 | { | |
224 | void *param; | |
225 | int len; | |
226 | int i; | |
227 | char *p = page; | |
228 | ||
229 | printk(KERN_INFO "[%s] (/proc/%s) called\n", __func__, PROC_AUTOK_NAME); | |
230 | ||
231 | // read auto-K result from auto-K callback function | |
232 | msdc_autok_stg1_data_get(¶m, &len); | |
233 | ||
234 | memcpy(p, &len, sizeof(int)); | |
235 | p = p + sizeof(int); | |
236 | memcpy(p, param, len); | |
237 | ||
238 | printk(KERN_INFO "[%s] page = (len = %d)\n", __func__, len); | |
239 | for(i = 0; i < len + sizeof(int); i++) | |
240 | { | |
241 | printk(" %x ", page[i]); | |
242 | } | |
243 | ||
244 | printk("\n"); | |
245 | ||
246 | return len + sizeof(int); | |
247 | } | |
248 | ||
249 | extern unsigned int autok_get_current_vcore_offset(void); | |
250 | extern void mt_cpufreq_disable(unsigned int type, bool disabled); | |
251 | ||
252 | #ifdef USE_KERNEL_THREAD | |
253 | static int autok_thread_func(void *data) | |
254 | #else // USE_KERNEL_THREAD | |
255 | static int autok_thread_func(struct work_struct *data) | |
256 | #endif // USE_KERNEL_THREAD | |
257 | { | |
258 | int err = 0; | |
259 | unsigned int vcore_uv = 0; | |
260 | void *msdc_param = NULL; | |
261 | int len = 0; | |
262 | ||
263 | #ifdef USE_KERNEL_THREAD | |
264 | struct sdio_autok_thread_data *autok_thread_data = (struct sdio_autok_thread_data *)data; | |
265 | #else // USE_KERNEL_THREAD | |
266 | struct sdio_autok_workqueue_data *autok_thread_data = (struct sdio_autok_workqueue_data *)data; | |
267 | #ifdef CHANGE_SCHED_POLICY | |
268 | struct task_struct *ltask = (struct task_struct *)get_wq_task(g_autok_wq); | |
269 | #endif // CHANGE_SCHED_POLICY | |
270 | #endif // USE_KERNEL_THREAD | |
271 | ||
272 | struct msdc_host *host = autok_thread_data->host; | |
273 | struct sdio_func *sdioFunc = autok_thread_data->sdioFunc; | |
274 | char stage = autok_thread_data->stage; | |
275 | char *envp[2]; | |
276 | char *lteprocenvp[2]; | |
277 | u32 base = host->base; | |
278 | u32 dma = msdc_dma_status(); | |
279 | #ifdef CHANGE_SCHED_POLICY | |
280 | struct sched_param param; | |
281 | int sched_policy; | |
282 | ||
283 | #ifdef SCHED_POLICY_INFO | |
284 | sched_policy = sched_getscheduler(0); | |
285 | printk("[%s] orig. sched policy: %d\n", __func__, sched_policy); | |
286 | ||
287 | param.sched_priority = sched_get_priority_max(SCHED_FIFO); | |
288 | if( sched_setscheduler( 0, SCHED_FIFO, ¶m ) == -1 ) | |
289 | { | |
290 | printk("[%s] sched_setscheduler fail\n", __func__); | |
291 | } | |
292 | ||
293 | sched_policy = sched_getscheduler(0); | |
294 | printk("[%s] sched policy FIFO: %d\n", __func__, sched_policy); | |
295 | #endif | |
296 | ||
297 | //param.sched_priority = sched_get_priority_max(SCHED_RR); | |
298 | param.sched_priority = 1; | |
299 | #ifdef USE_KERNEL_THREAD | |
300 | if( sched_setscheduler( task, SCHED_RR, ¶m ) == -1 ) | |
301 | { | |
302 | printk("[%s] sched_setscheduler fail\n", __func__); | |
303 | } | |
304 | #else // USE_KERNEL_THREAD | |
305 | if( sched_setscheduler( ltask, SCHED_RR, ¶m ) == -1 ) | |
306 | { | |
307 | printk("[%s] sched_setscheduler fail\n", __func__); | |
308 | } | |
309 | #endif // USE_KERNEL_THREAD | |
310 | ||
311 | #ifdef SCHED_POLICY_INFO | |
312 | sched_policy = sched_getscheduler(0); | |
313 | printk("[%s] modified sched policy: %d\n", __func__, sched_policy); | |
314 | #endif | |
315 | #endif | |
316 | ||
317 | mt_cpufreq_disable(0, true); | |
318 | ||
319 | vcore_uv = autok_get_current_vcore_offset(); | |
320 | ||
321 | #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT | |
322 | atomic_set(&host->ot_work.ot_disable, 1); | |
323 | #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT | |
324 | ||
325 | //sdio_claim_host(sdioFunc); | |
326 | autok_claim_host(host); | |
327 | ||
328 | msdc_ungate_clock(host); | |
329 | ||
330 | /* Set PIO mode */ | |
331 | msdc_dma_off(); | |
332 | ||
333 | if(stage == 1) { | |
334 | // call stage 1 auto-K callback function | |
335 | msdc_autok_stg1_cal(host, vcore_uv); | |
336 | ||
337 | // read auto-K result from auto-K callback function | |
338 | if(msdc_autok_stg1_data_get(&msdc_param, &len) == 0) | |
339 | { | |
340 | // apply MSDC parameter for current vcore | |
341 | msdc_autok_apply_param(host, msdc_param, len, vcore_uv); | |
342 | } | |
343 | else | |
344 | printk("[%s] msdc_autok_stg1_data_get error\n", __func__); | |
345 | ||
346 | envp[0] = "FROM=sdio_autok"; | |
347 | envp[1] = NULL; | |
348 | err = kobject_uevent_env(&host->mmc->class_dev.kobj, KOBJ_ONLINE, envp); | |
349 | if(err < 0) | |
350 | printk(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err); | |
351 | } else if(stage == 2) { | |
352 | // call stage 2 auto-K callback function | |
353 | msdc_autok_stg2_cal(host, autok_thread_data->autok_stage1_result, autok_thread_data->len, vcore_uv); | |
354 | } else { | |
355 | printk(KERN_INFO "[%s] stage %d doesn't support in auto-K\n", __func__, stage); | |
356 | //sdio_release_host(sdioFunc); | |
357 | autok_release_host(host); | |
358 | mt_cpufreq_disable(0, false); | |
359 | return -EFAULT; | |
360 | } | |
361 | ||
362 | if(dma == DMA_ON) | |
363 | msdc_dma_on(); | |
364 | msdc_gate_clock(host,1); | |
365 | ||
366 | ||
367 | //sdio_release_host(sdioFunc); | |
368 | autok_release_host(host); | |
369 | ||
370 | vcore_uv = autok_get_current_vcore_offset(); | |
371 | ||
372 | mt_cpufreq_disable(0, false); | |
373 | ||
374 | #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT | |
375 | atomic_set(&host->ot_work.autok_done, 1); | |
376 | atomic_set(&host->ot_work.ot_disable, 0); | |
377 | #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT | |
378 | ||
379 | autok_done = 1; | |
380 | ||
381 | lteprocenvp[0] = "FROM=autok_done"; | |
382 | lteprocenvp[1] = NULL; | |
383 | err = kobject_uevent_env(&host->mmc->class_dev.kobj, KOBJ_ONLINE, lteprocenvp); | |
384 | if(err < 0) | |
385 | printk(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err); | |
386 | ||
387 | return 0; | |
388 | } | |
389 | ||
390 | #ifdef LINUX310 | |
391 | static const struct file_operations autok_proc_ops = { | |
392 | .owner = THIS_MODULE, | |
393 | .read = autok_readproc, | |
394 | .write = autok_writeproc, | |
395 | }; | |
396 | #endif | |
397 | ||
398 | static int autok_module_init(void) | |
399 | { | |
400 | #ifdef LINUX310 | |
401 | s_proc = proc_create(PROC_AUTOK_NAME, 0660, NULL, &autok_proc_ops); | |
402 | #else | |
403 | s_proc = create_proc_entry(PROC_AUTOK_NAME, 0660, NULL); | |
404 | #endif | |
405 | ||
406 | if (s_proc == NULL) { | |
407 | remove_proc_entry(PROC_AUTOK_NAME, NULL); | |
408 | printk(KERN_ALERT "Error: Could not initialize /proc/%s\n", | |
409 | PROC_AUTOK_NAME); | |
410 | return -ENOMEM; | |
411 | } | |
412 | #ifndef LINUX310 | |
413 | s_proc->write_proc = autok_writeproc; | |
414 | s_proc->read_proc = autok_readproc; | |
415 | #endif | |
416 | s_proc->gid = 1000; | |
417 | ||
418 | printk(KERN_INFO "/proc/%s created\n", PROC_AUTOK_NAME); | |
419 | ||
420 | #ifdef USE_KERNEL_THREAD | |
421 | //task = kthread_create(&autok_thread_func,(void *)(&g_autok_thread_data),"autokp"); | |
422 | #else // USE_KERNEL_THREAD | |
423 | g_autok_wq = create_workqueue("autok_queue"); | |
424 | INIT_DELAYED_WORK((struct delayed_work *)(&g_autok_thread_data), autok_thread_func); | |
425 | #endif // USE_KERNEL_THREAD | |
426 | ||
427 | return 0; /* everything is ok */ | |
428 | } | |
429 | ||
430 | static void autok_module_exit(void) | |
431 | { | |
432 | remove_proc_entry(PROC_AUTOK_NAME, NULL); | |
433 | ||
434 | #ifdef USE_KERNEL_THREAD | |
435 | //ret = kthread_stop(task); | |
436 | #else // USE_KERNEL_THREAD | |
437 | flush_workqueue(g_autok_wq); | |
438 | destroy_workqueue(g_autok_wq); | |
439 | #endif // USE_KERNEL_THREAD | |
440 | ||
441 | printk(KERN_INFO "/proc/%s removed\n", PROC_AUTOK_NAME); | |
442 | } | |
443 | ||
444 | module_init(autok_module_init); | |
445 | module_exit(autok_module_exit); | |
446 | ||
447 | MODULE_AUTHOR("MediaTek Inc."); | |
448 | MODULE_DESCRIPTION("MediaTek SDIO Auto-K Proc"); | |
449 | MODULE_LICENSE("GPL"); |