struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */
- spinlock_t lock;
+ struct mutex lock;
/* CPU mappings LRU */
unsigned int vaddr_use;
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
- unsigned long flags;
nvkm_ltc_flush(ltc);
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
if (node->base.vaddr) {
if (!node->use_cpt) {
out:
node->use_cpt++;
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
return node->base.vaddr;
}
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
- unsigned long flags;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out:
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
wmb();
nvkm_ltc_invalidate(ltc);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem;
- unsigned long flags;
int i;
if (unlikely(!r))
goto out;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
+ mutex_init(&imem->lock);
*pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */