static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
- unsigned long flags;
struct snd_pcm_runtime *runtime;
struct ct_vm *vm;
runtime = apcm->substream->runtime;
vm = atc->vm;
- spin_lock_irqsave(&atc->vm_lock, flags);
apcm->vm_block = vm->map(vm, runtime->dma_area, runtime->dma_bytes);
- spin_unlock_irqrestore(&atc->vm_lock, flags);
if (NULL == apcm->vm_block)
return -ENOENT;
static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
- unsigned long flags;
struct ct_vm *vm;
if (NULL == apcm->vm_block)
vm = atc->vm;
- spin_lock_irqsave(&atc->vm_lock, flags);
vm->unmap(vm, apcm->vm_block);
- spin_unlock_irqrestore(&atc->vm_lock, flags);
apcm->vm_block = NULL;
}
struct ct_vm *vm;
void *kvirt_addr;
unsigned long phys_addr;
- unsigned long flags;
- spin_lock_irqsave(&atc->vm_lock, flags);
vm = atc->vm;
kvirt_addr = vm->get_ptp_virt(vm, index);
if (kvirt_addr == NULL)
else
phys_addr = virt_to_phys(kvirt_addr);
- spin_unlock_irqrestore(&atc->vm_lock, flags);
-
return phys_addr;
}
atc_set_ops(atc);
spin_lock_init(&atc->atc_lock);
- spin_lock_init(&atc->vm_lock);
/* Find card model */
err = atc_identify_card(atc);
unsigned long (*get_ptp_phys)(struct ct_atc *atc, int index);
spinlock_t atc_lock;
- spinlock_t vm_lock;
int (*pcm_playback_prepare)(struct ct_atc *atc,
struct ct_atc_pcm *apcm);
struct ct_vm_block *block = NULL, *entry = NULL;
struct list_head *pos = NULL;
+ mutex_lock(&vm->lock);
list_for_each(pos, &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list);
if (entry->size >= size)
break; /* found a block that is big enough */
}
if (pos == &vm->unused)
- return NULL;
+ goto out;
if (entry->size == size) {
/* Move the vm node from unused list to used list directly */
list_del(&entry->list);
list_add(&entry->list, &vm->used);
vm->size -= size;
- return entry;
+ block = entry;
+ goto out;
}
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (NULL == block)
- return NULL;
+ goto out;
block->addr = entry->addr;
block->size = size;
entry->size -= size;
vm->size -= size;
+ out:
+ mutex_unlock(&vm->lock);
return block;
}
struct ct_vm_block *entry = NULL, *pre_ent = NULL;
struct list_head *pos = NULL, *pre = NULL;
+ mutex_lock(&vm->lock);
list_del(&block->list);
vm->size += block->size;
pos = pre;
pre = pos->prev;
}
+ mutex_unlock(&vm->lock);
}
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
if (NULL == vm)
return -ENOMEM;
+ mutex_init(&vm->lock);
+
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
#define CT_PTP_NUM 1 /* num of device page table pages */
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/list.h>
struct ct_vm_block {
unsigned int size; /* Available addr space in bytes */
struct list_head unused; /* List of unused blocks */
struct list_head used; /* List of used blocks */
+ struct mutex lock;
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
struct ct_vm_block *(*map)(struct ct_vm *, void *host_addr, int size);