Merge master.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / sound / core / seq / seq_instr.c
1 /*
2 * Generic Instrument routines for ALSA sequencer
3 * Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
28
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
32
33
34 static void snd_instr_lock_ops(snd_seq_kinstr_list_t *list)
35 {
36 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
37 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
38 } else {
39 down(&list->ops_mutex);
40 }
41 }
42
43 static void snd_instr_unlock_ops(snd_seq_kinstr_list_t *list)
44 {
45 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
46 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
47 } else {
48 up(&list->ops_mutex);
49 }
50 }
51
52 static snd_seq_kinstr_t *snd_seq_instr_new(int add_len, int atomic)
53 {
54 snd_seq_kinstr_t *instr;
55
56 instr = kzalloc(sizeof(snd_seq_kinstr_t) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
57 if (instr == NULL)
58 return NULL;
59 instr->add_len = add_len;
60 return instr;
61 }
62
63 static int snd_seq_instr_free(snd_seq_kinstr_t *instr, int atomic)
64 {
65 int result = 0;
66
67 if (instr == NULL)
68 return -EINVAL;
69 if (instr->ops && instr->ops->remove)
70 result = instr->ops->remove(instr->ops->private_data, instr, 1);
71 if (!result)
72 kfree(instr);
73 return result;
74 }
75
76 snd_seq_kinstr_list_t *snd_seq_instr_list_new(void)
77 {
78 snd_seq_kinstr_list_t *list;
79
80 list = kzalloc(sizeof(snd_seq_kinstr_list_t), GFP_KERNEL);
81 if (list == NULL)
82 return NULL;
83 spin_lock_init(&list->lock);
84 spin_lock_init(&list->ops_lock);
85 init_MUTEX(&list->ops_mutex);
86 list->owner = -1;
87 return list;
88 }
89
90 void snd_seq_instr_list_free(snd_seq_kinstr_list_t **list_ptr)
91 {
92 snd_seq_kinstr_list_t *list;
93 snd_seq_kinstr_t *instr;
94 snd_seq_kcluster_t *cluster;
95 int idx;
96 unsigned long flags;
97
98 if (list_ptr == NULL)
99 return;
100 list = *list_ptr;
101 *list_ptr = NULL;
102 if (list == NULL)
103 return;
104
105 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
106 while ((instr = list->hash[idx]) != NULL) {
107 list->hash[idx] = instr->next;
108 list->count--;
109 spin_lock_irqsave(&list->lock, flags);
110 while (instr->use) {
111 spin_unlock_irqrestore(&list->lock, flags);
112 schedule_timeout_interruptible(1);
113 spin_lock_irqsave(&list->lock, flags);
114 }
115 spin_unlock_irqrestore(&list->lock, flags);
116 if (snd_seq_instr_free(instr, 0)<0)
117 snd_printk(KERN_WARNING "instrument free problem\n");
118 }
119 while ((cluster = list->chash[idx]) != NULL) {
120 list->chash[idx] = cluster->next;
121 list->ccount--;
122 kfree(cluster);
123 }
124 }
125 kfree(list);
126 }
127
128 static int instr_free_compare(snd_seq_kinstr_t *instr,
129 snd_seq_instr_header_t *ifree,
130 unsigned int client)
131 {
132 switch (ifree->cmd) {
133 case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
134 /* all, except private for other clients */
135 if ((instr->instr.std & 0xff000000) == 0)
136 return 0;
137 if (((instr->instr.std >> 24) & 0xff) == client)
138 return 0;
139 return 1;
140 case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
141 /* all my private instruments */
142 if ((instr->instr.std & 0xff000000) == 0)
143 return 1;
144 if (((instr->instr.std >> 24) & 0xff) == client)
145 return 0;
146 return 1;
147 case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
148 /* all my private instruments */
149 if ((instr->instr.std & 0xff000000) == 0) {
150 if (instr->instr.cluster == ifree->id.cluster)
151 return 0;
152 return 1;
153 }
154 if (((instr->instr.std >> 24) & 0xff) == client) {
155 if (instr->instr.cluster == ifree->id.cluster)
156 return 0;
157 }
158 return 1;
159 }
160 return 1;
161 }
162
163 int snd_seq_instr_list_free_cond(snd_seq_kinstr_list_t *list,
164 snd_seq_instr_header_t *ifree,
165 int client,
166 int atomic)
167 {
168 snd_seq_kinstr_t *instr, *prev, *next, *flist;
169 int idx;
170 unsigned long flags;
171
172 snd_instr_lock_ops(list);
173 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
174 spin_lock_irqsave(&list->lock, flags);
175 instr = list->hash[idx];
176 prev = flist = NULL;
177 while (instr) {
178 while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
179 prev = instr;
180 instr = instr->next;
181 }
182 if (instr == NULL)
183 continue;
184 if (instr->ops && instr->ops->notify)
185 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
186 next = instr->next;
187 if (prev == NULL) {
188 list->hash[idx] = next;
189 } else {
190 prev->next = next;
191 }
192 list->count--;
193 instr->next = flist;
194 flist = instr;
195 instr = next;
196 }
197 spin_unlock_irqrestore(&list->lock, flags);
198 while (flist) {
199 instr = flist;
200 flist = instr->next;
201 while (instr->use)
202 schedule_timeout_interruptible(1);
203 if (snd_seq_instr_free(instr, atomic)<0)
204 snd_printk(KERN_WARNING "instrument free problem\n");
205 instr = next;
206 }
207 }
208 snd_instr_unlock_ops(list);
209 return 0;
210 }
211
212 static int compute_hash_instr_key(snd_seq_instr_t *instr)
213 {
214 int result;
215
216 result = instr->bank | (instr->prg << 16);
217 result += result >> 24;
218 result += result >> 16;
219 result += result >> 8;
220 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
221 }
222
223 #if 0
224 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
225 {
226 int result;
227
228 result = cluster;
229 result += result >> 24;
230 result += result >> 16;
231 result += result >> 8;
232 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
233 }
234 #endif
235
236 static int compare_instr(snd_seq_instr_t *i1, snd_seq_instr_t *i2, int exact)
237 {
238 if (exact) {
239 if (i1->cluster != i2->cluster ||
240 i1->bank != i2->bank ||
241 i1->prg != i2->prg)
242 return 1;
243 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
244 return 1;
245 if (!(i1->std & i2->std))
246 return 1;
247 return 0;
248 } else {
249 unsigned int client_check;
250
251 if (i2->cluster && i1->cluster != i2->cluster)
252 return 1;
253 client_check = i2->std & 0xff000000;
254 if (client_check) {
255 if ((i1->std & 0xff000000) != client_check)
256 return 1;
257 } else {
258 if ((i1->std & i2->std) != i2->std)
259 return 1;
260 }
261 return i1->bank != i2->bank || i1->prg != i2->prg;
262 }
263 }
264
265 snd_seq_kinstr_t *snd_seq_instr_find(snd_seq_kinstr_list_t *list,
266 snd_seq_instr_t *instr,
267 int exact,
268 int follow_alias)
269 {
270 unsigned long flags;
271 int depth = 0;
272 snd_seq_kinstr_t *result;
273
274 if (list == NULL || instr == NULL)
275 return NULL;
276 spin_lock_irqsave(&list->lock, flags);
277 __again:
278 result = list->hash[compute_hash_instr_key(instr)];
279 while (result) {
280 if (!compare_instr(&result->instr, instr, exact)) {
281 if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
282 instr = (snd_seq_instr_t *)KINSTR_DATA(result);
283 if (++depth > 10)
284 goto __not_found;
285 goto __again;
286 }
287 result->use++;
288 spin_unlock_irqrestore(&list->lock, flags);
289 return result;
290 }
291 result = result->next;
292 }
293 __not_found:
294 spin_unlock_irqrestore(&list->lock, flags);
295 return NULL;
296 }
297
298 void snd_seq_instr_free_use(snd_seq_kinstr_list_t *list,
299 snd_seq_kinstr_t *instr)
300 {
301 unsigned long flags;
302
303 if (list == NULL || instr == NULL)
304 return;
305 spin_lock_irqsave(&list->lock, flags);
306 if (instr->use <= 0) {
307 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
308 } else {
309 instr->use--;
310 }
311 spin_unlock_irqrestore(&list->lock, flags);
312 }
313
314 static snd_seq_kinstr_ops_t *instr_ops(snd_seq_kinstr_ops_t *ops, char *instr_type)
315 {
316 while (ops) {
317 if (!strcmp(ops->instr_type, instr_type))
318 return ops;
319 ops = ops->next;
320 }
321 return NULL;
322 }
323
324 static int instr_result(snd_seq_event_t *ev,
325 int type, int result,
326 int atomic)
327 {
328 snd_seq_event_t sev;
329
330 memset(&sev, 0, sizeof(sev));
331 sev.type = SNDRV_SEQ_EVENT_RESULT;
332 sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
333 SNDRV_SEQ_PRIORITY_NORMAL;
334 sev.source = ev->dest;
335 sev.dest = ev->source;
336 sev.data.result.event = type;
337 sev.data.result.result = result;
338 #if 0
339 printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
340 type, result,
341 sev.queue,
342 sev.source.client, sev.source.port,
343 sev.dest.client, sev.dest.port);
344 #endif
345 return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
346 }
347
348 static int instr_begin(snd_seq_kinstr_ops_t *ops,
349 snd_seq_kinstr_list_t *list,
350 snd_seq_event_t *ev,
351 int atomic, int hop)
352 {
353 unsigned long flags;
354
355 spin_lock_irqsave(&list->lock, flags);
356 if (list->owner >= 0 && list->owner != ev->source.client) {
357 spin_unlock_irqrestore(&list->lock, flags);
358 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
359 }
360 list->owner = ev->source.client;
361 spin_unlock_irqrestore(&list->lock, flags);
362 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
363 }
364
365 static int instr_end(snd_seq_kinstr_ops_t *ops,
366 snd_seq_kinstr_list_t *list,
367 snd_seq_event_t *ev,
368 int atomic, int hop)
369 {
370 unsigned long flags;
371
372 /* TODO: timeout handling */
373 spin_lock_irqsave(&list->lock, flags);
374 if (list->owner == ev->source.client) {
375 list->owner = -1;
376 spin_unlock_irqrestore(&list->lock, flags);
377 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
378 }
379 spin_unlock_irqrestore(&list->lock, flags);
380 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
381 }
382
383 static int instr_info(snd_seq_kinstr_ops_t *ops,
384 snd_seq_kinstr_list_t *list,
385 snd_seq_event_t *ev,
386 int atomic, int hop)
387 {
388 return -ENXIO;
389 }
390
391 static int instr_format_info(snd_seq_kinstr_ops_t *ops,
392 snd_seq_kinstr_list_t *list,
393 snd_seq_event_t *ev,
394 int atomic, int hop)
395 {
396 return -ENXIO;
397 }
398
399 static int instr_reset(snd_seq_kinstr_ops_t *ops,
400 snd_seq_kinstr_list_t *list,
401 snd_seq_event_t *ev,
402 int atomic, int hop)
403 {
404 return -ENXIO;
405 }
406
407 static int instr_status(snd_seq_kinstr_ops_t *ops,
408 snd_seq_kinstr_list_t *list,
409 snd_seq_event_t *ev,
410 int atomic, int hop)
411 {
412 return -ENXIO;
413 }
414
415 static int instr_put(snd_seq_kinstr_ops_t *ops,
416 snd_seq_kinstr_list_t *list,
417 snd_seq_event_t *ev,
418 int atomic, int hop)
419 {
420 unsigned long flags;
421 snd_seq_instr_header_t put;
422 snd_seq_kinstr_t *instr;
423 int result = -EINVAL, len, key;
424
425 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
426 goto __return;
427
428 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
429 goto __return;
430 if (copy_from_user(&put, (void __user *)ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
431 result = -EFAULT;
432 goto __return;
433 }
434 snd_instr_lock_ops(list);
435 if (put.id.instr.std & 0xff000000) { /* private instrument */
436 put.id.instr.std &= 0x00ffffff;
437 put.id.instr.std |= (unsigned int)ev->source.client << 24;
438 }
439 if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
440 snd_seq_instr_free_use(list, instr);
441 snd_instr_unlock_ops(list);
442 result = -EBUSY;
443 goto __return;
444 }
445 ops = instr_ops(ops, put.data.data.format);
446 if (ops == NULL) {
447 snd_instr_unlock_ops(list);
448 goto __return;
449 }
450 len = ops->add_len;
451 if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
452 len = sizeof(snd_seq_instr_t);
453 instr = snd_seq_instr_new(len, atomic);
454 if (instr == NULL) {
455 snd_instr_unlock_ops(list);
456 result = -ENOMEM;
457 goto __return;
458 }
459 instr->ops = ops;
460 instr->instr = put.id.instr;
461 strlcpy(instr->name, put.data.name, sizeof(instr->name));
462 instr->type = put.data.type;
463 if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
464 result = ops->put(ops->private_data,
465 instr,
466 (void __user *)ev->data.ext.ptr + sizeof(snd_seq_instr_header_t),
467 ev->data.ext.len - sizeof(snd_seq_instr_header_t),
468 atomic,
469 put.cmd);
470 if (result < 0) {
471 snd_seq_instr_free(instr, atomic);
472 snd_instr_unlock_ops(list);
473 goto __return;
474 }
475 }
476 key = compute_hash_instr_key(&instr->instr);
477 spin_lock_irqsave(&list->lock, flags);
478 instr->next = list->hash[key];
479 list->hash[key] = instr;
480 list->count++;
481 spin_unlock_irqrestore(&list->lock, flags);
482 snd_instr_unlock_ops(list);
483 result = 0;
484 __return:
485 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
486 return result;
487 }
488
489 static int instr_get(snd_seq_kinstr_ops_t *ops,
490 snd_seq_kinstr_list_t *list,
491 snd_seq_event_t *ev,
492 int atomic, int hop)
493 {
494 return -ENXIO;
495 }
496
497 static int instr_free(snd_seq_kinstr_ops_t *ops,
498 snd_seq_kinstr_list_t *list,
499 snd_seq_event_t *ev,
500 int atomic, int hop)
501 {
502 snd_seq_instr_header_t ifree;
503 snd_seq_kinstr_t *instr, *prev;
504 int result = -EINVAL;
505 unsigned long flags;
506 unsigned int hash;
507
508 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
509 goto __return;
510
511 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
512 goto __return;
513 if (copy_from_user(&ifree, (void __user *)ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
514 result = -EFAULT;
515 goto __return;
516 }
517 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
518 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
519 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
520 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
521 goto __return;
522 }
523 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
524 if (ifree.id.instr.std & 0xff000000) {
525 ifree.id.instr.std &= 0x00ffffff;
526 ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
527 }
528 hash = compute_hash_instr_key(&ifree.id.instr);
529 snd_instr_lock_ops(list);
530 spin_lock_irqsave(&list->lock, flags);
531 instr = list->hash[hash];
532 prev = NULL;
533 while (instr) {
534 if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
535 goto __free_single;
536 prev = instr;
537 instr = instr->next;
538 }
539 result = -ENOENT;
540 spin_unlock_irqrestore(&list->lock, flags);
541 snd_instr_unlock_ops(list);
542 goto __return;
543
544 __free_single:
545 if (prev) {
546 prev->next = instr->next;
547 } else {
548 list->hash[hash] = instr->next;
549 }
550 if (instr->ops && instr->ops->notify)
551 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
552 while (instr->use) {
553 spin_unlock_irqrestore(&list->lock, flags);
554 schedule_timeout_interruptible(1);
555 spin_lock_irqsave(&list->lock, flags);
556 }
557 spin_unlock_irqrestore(&list->lock, flags);
558 result = snd_seq_instr_free(instr, atomic);
559 snd_instr_unlock_ops(list);
560 goto __return;
561 }
562
563 __return:
564 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
565 return result;
566 }
567
568 static int instr_list(snd_seq_kinstr_ops_t *ops,
569 snd_seq_kinstr_list_t *list,
570 snd_seq_event_t *ev,
571 int atomic, int hop)
572 {
573 return -ENXIO;
574 }
575
576 static int instr_cluster(snd_seq_kinstr_ops_t *ops,
577 snd_seq_kinstr_list_t *list,
578 snd_seq_event_t *ev,
579 int atomic, int hop)
580 {
581 return -ENXIO;
582 }
583
584 int snd_seq_instr_event(snd_seq_kinstr_ops_t *ops,
585 snd_seq_kinstr_list_t *list,
586 snd_seq_event_t *ev,
587 int client,
588 int atomic,
589 int hop)
590 {
591 int direct = 0;
592
593 snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
594 if (snd_seq_ev_is_direct(ev)) {
595 direct = 1;
596 switch (ev->type) {
597 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
598 return instr_begin(ops, list, ev, atomic, hop);
599 case SNDRV_SEQ_EVENT_INSTR_END:
600 return instr_end(ops, list, ev, atomic, hop);
601 }
602 }
603 if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
604 return -EINVAL;
605 switch (ev->type) {
606 case SNDRV_SEQ_EVENT_INSTR_INFO:
607 return instr_info(ops, list, ev, atomic, hop);
608 case SNDRV_SEQ_EVENT_INSTR_FINFO:
609 return instr_format_info(ops, list, ev, atomic, hop);
610 case SNDRV_SEQ_EVENT_INSTR_RESET:
611 return instr_reset(ops, list, ev, atomic, hop);
612 case SNDRV_SEQ_EVENT_INSTR_STATUS:
613 return instr_status(ops, list, ev, atomic, hop);
614 case SNDRV_SEQ_EVENT_INSTR_PUT:
615 return instr_put(ops, list, ev, atomic, hop);
616 case SNDRV_SEQ_EVENT_INSTR_GET:
617 return instr_get(ops, list, ev, atomic, hop);
618 case SNDRV_SEQ_EVENT_INSTR_FREE:
619 return instr_free(ops, list, ev, atomic, hop);
620 case SNDRV_SEQ_EVENT_INSTR_LIST:
621 return instr_list(ops, list, ev, atomic, hop);
622 case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
623 return instr_cluster(ops, list, ev, atomic, hop);
624 }
625 return -EINVAL;
626 }
627
628 /*
629 * Init part
630 */
631
632 static int __init alsa_seq_instr_init(void)
633 {
634 return 0;
635 }
636
637 static void __exit alsa_seq_instr_exit(void)
638 {
639 }
640
641 module_init(alsa_seq_instr_init)
642 module_exit(alsa_seq_instr_exit)
643
644 EXPORT_SYMBOL(snd_seq_instr_list_new);
645 EXPORT_SYMBOL(snd_seq_instr_list_free);
646 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
647 EXPORT_SYMBOL(snd_seq_instr_find);
648 EXPORT_SYMBOL(snd_seq_instr_free_use);
649 EXPORT_SYMBOL(snd_seq_instr_event);