{
struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
struct its_collection *sync_col;
+ unsigned long flags;
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
cmd = its_allocate_entry(its);
if (!cmd) { /* We're soooooo screewed... */
pr_err_ratelimited("ITS can't allocate, dropping command\n");
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
return;
}
sync_col = builder(cmd, desc);
post:
next_cmd = its_post_commands(its);
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
its_wait_for_range_completion(its, cmd, next_cmd);
}
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
{
struct its_device *its_dev = NULL, *tmp;
+ unsigned long flags;
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
list_for_each_entry(tmp, &its->its_device_list, entry) {
if (tmp->device_id == dev_id) {
}
}
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
return its_dev;
}
{
struct its_device *dev;
unsigned long *lpi_map;
+ unsigned long flags;
void *itt;
int lpi_base;
int nr_lpis;
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
- raw_spin_lock(&its->lock);
+ raw_spin_lock_irqsave(&its->lock, flags);
list_add(&dev->entry, &its->its_device_list);
- raw_spin_unlock(&its->lock);
+ raw_spin_unlock_irqrestore(&its->lock, flags);
/* Bind the device to the first possible CPU */
cpu = cpumask_first(cpu_online_mask);
static void its_free_device(struct its_device *its_dev)
{
- raw_spin_lock(&its_dev->its->lock);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
- raw_spin_unlock(&its_dev->its->lock);
+ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->itt);
kfree(its_dev);
}