gfp = GFP_NOWAIT;
#ifdef CONFIG_CPUMASK_OFFSTACK
- if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
+ if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
return false;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
- free_cpumask_var(desc->affinity);
+ free_cpumask_var(desc->irq_data.affinity);
return false;
}
#endif
static inline void init_desc_masks(struct irq_desc *desc)
{
- cpumask_setall(desc->affinity);
+ cpumask_setall(desc->irq_data.affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
struct irq_desc *new_desc)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
- cpumask_copy(new_desc->affinity, old_desc->affinity);
+ cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
static inline void free_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
- free_cpumask_var(old_desc->affinity);
+ free_cpumask_var(old_desc->irq_data.affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var(old_desc->pending_mask);
* Some chips need to know about probing in
* progress:
*/
- if (desc->chip->set_type)
- desc->chip->set_type(i, IRQ_TYPE_PROBE);
- desc->chip->startup(i);
+ if (desc->irq_data.chip->set_type)
+ desc->irq_data.chip->set_type(i, IRQ_TYPE_PROBE);
+ desc->irq_data.chip->startup(i);
}
raw_spin_unlock_irq(&desc->lock);
}
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->chip->startup(i))
+ if (desc->irq_data.chip->startup(i))
desc->status |= IRQ_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->irq_data.chip->shutdown(i);
} else
if (i < 32)
mask |= 1 << i;
mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->irq_data.chip->shutdown(i);
}
raw_spin_unlock_irq(&desc->lock);
}
nr_of_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->irq_data.chip->shutdown(i);
}
raw_spin_unlock_irq(&desc->lock);
}
/* Ensure we don't have left over values from a previous use of this irq */
raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
- desc->chip = &no_irq_chip;
+ desc->irq_data.chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
desc->depth = 1;
- desc->msi_desc = NULL;
- desc->handler_data = NULL;
+ desc->irq_data.msi_desc = NULL;
+ desc->irq_data.handler_data = NULL;
if (!keep_chip_data)
- desc->chip_data = NULL;
+ desc->irq_data.chip_data = NULL;
desc->action = NULL;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
- cpumask_setall(desc->affinity);
+ cpumask_setall(desc->irq_data.affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
* @irq: irq number to initialize
*
- * does not set irq_to_desc(irq)->chip_data to NULL
+ * does not set irq_to_desc(irq)->irq_data.chip_data to NULL
*/
void dynamic_irq_init_keep_chip_data(unsigned int irq)
{
irq);
return;
}
- desc->msi_desc = NULL;
- desc->handler_data = NULL;
+ desc->irq_data.msi_desc = NULL;
+ desc->irq_data.handler_data = NULL;
if (!keep_chip_data)
- desc->chip_data = NULL;
+ desc->irq_data.chip_data = NULL;
desc->handle_irq = handle_bad_irq;
- desc->chip = &no_irq_chip;
+ desc->irq_data.chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
* @irq: irq number to initialize
*
- * does not set irq_to_desc(irq)->chip_data to NULL
+ * does not set irq_to_desc(irq)->irq_data.chip_data to NULL
*/
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
{
raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
- desc->chip = chip;
+ desc->irq_data.chip = chip;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->handler_data = data;
+ desc->irq_data.handler_data = data;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
}
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->msi_desc = entry;
+ desc->irq_data.msi_desc = entry;
if (entry)
entry->irq = irq;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return -EINVAL;
}
- if (!desc->chip) {
+ if (!desc->irq_data.chip) {
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
return -EINVAL;
}
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->chip_data = data;
+ desc->irq_data.chip_data = data;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
{
struct irq_desc *desc = irq_to_desc(irq);
- desc->chip->unmask(irq);
+ desc->irq_data.chip->unmask(irq);
desc->status &= ~IRQ_MASKED;
}
{
struct irq_desc *desc = irq_to_desc(irq);
- desc->chip->enable(irq);
+ desc->irq_data.chip->enable(irq);
return 0;
}
{
struct irq_desc *desc = irq_to_desc(irq);
- desc->chip->mask(irq);
+ desc->irq_data.chip->mask(irq);
desc->status |= IRQ_MASKED;
}
static inline void mask_ack_irq(struct irq_desc *desc, int irq)
{
- if (desc->chip->mask_ack)
- desc->chip->mask_ack(irq);
+ if (desc->irq_data.chip->mask_ack)
+ desc->irq_data.chip->mask_ack(irq);
else {
- desc->chip->mask(irq);
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->irq_data.chip->mask(irq);
+ if (desc->irq_data.chip->ack)
+ desc->irq_data.chip->ack(irq);
}
desc->status |= IRQ_MASKED;
}
static inline void mask_irq(struct irq_desc *desc, int irq)
{
- if (desc->chip->mask) {
- desc->chip->mask(irq);
+ if (desc->irq_data.chip->mask) {
+ desc->irq_data.chip->mask(irq);
desc->status |= IRQ_MASKED;
}
}
static inline void unmask_irq(struct irq_desc *desc, int irq)
{
- if (desc->chip->unmask) {
- desc->chip->unmask(irq);
+ if (desc->irq_data.chip->unmask) {
+ desc->irq_data.chip->unmask(irq);
desc->status &= ~IRQ_MASKED;
}
}
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->chip->eoi(irq);
+ desc->irq_data.chip->eoi(irq);
raw_spin_unlock(&desc->lock);
}
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (desc->irq_data.chip->ack)
+ desc->irq_data.chip->ack(irq);
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
kstat_incr_irqs_this_cpu(irq, desc);
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (desc->irq_data.chip->ack)
+ desc->irq_data.chip->ack(irq);
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- if (desc->chip->eoi)
- desc->chip->eoi(irq);
+ if (desc->irq_data.chip->eoi)
+ desc->irq_data.chip->eoi(irq);
}
void
if (!handle)
handle = handle_bad_irq;
- else if (desc->chip == &no_irq_chip) {
+ else if (desc->irq_data.chip == &no_irq_chip) {
printk(KERN_WARNING "Trying to install %sinterrupt handler "
"for IRQ%d\n", is_chained ? "chained " : "", irq);
/*
* prevent us to setup the interrupt at all. Switch it to
* dummy_irq_chip for easy transition.
*/
- desc->chip = &dummy_irq_chip;
+ desc->irq_data.chip = &dummy_irq_chip;
}
chip_bus_lock(irq, desc);
/* Uninstall? */
if (handle == handle_bad_irq) {
- if (desc->chip != &no_irq_chip)
+ if (desc->irq_data.chip != &no_irq_chip)
mask_ack_irq(desc, irq);
desc->status |= IRQ_DISABLED;
desc->depth = 1;
desc->status &= ~IRQ_DISABLED;
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
desc->depth = 0;
- desc->chip->startup(irq);
+ desc->irq_data.chip->startup(irq);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
raw_spin_lock_init(&desc->lock);
desc->irq_data.irq = irq;
#ifdef CONFIG_SMP
- desc->node = node;
+ desc->irq_data.node = node;
#endif
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_kstat_irqs(desc, node, nr_cpu_ids);
desc[i].irq_data.irq = i;
desc[i].irq_data.chip = &no_irq_chip;
#ifdef CONFIG_SMP
- desc[i].node = node;
+ desc[i].irq_data.node = node;
#endif
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
/*
* No locking required for CPU-local interrupts:
*/
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (desc->irq_data.chip->ack)
+ desc->irq_data.chip->ack(irq);
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
}
- desc->chip->end(irq);
+ desc->irq_data.chip->end(irq);
return 1;
}
raw_spin_lock(&desc->lock);
- if (desc->chip->ack)
- desc->chip->ack(irq);
+ if (desc->irq_data.chip->ack)
+ desc->irq_data.chip->ack(irq);
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
- desc->chip->end(irq);
+ desc->irq_data.chip->end(irq);
raw_spin_unlock(&desc->lock);
return 1;
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
{
- if (unlikely(desc->chip->bus_lock))
- desc->chip->bus_lock(irq);
+ if (unlikely(desc->irq_data.chip->bus_lock))
+ desc->irq_data.chip->bus_lock(irq);
}
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
{
- if (unlikely(desc->chip->bus_sync_unlock))
- desc->chip->bus_sync_unlock(irq);
+ if (unlikely(desc->irq_data.chip->bus_sync_unlock))
+ desc->irq_data.chip->bus_sync_unlock(irq);
}
/*
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
- printk("->chip(): %p, ", desc->chip);
- print_symbol("%s\n", (unsigned long)desc->chip);
+ printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
+ print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
printk("->action(): %p\n", desc->action);
if (desc->action) {
printk("->action->handler(): %p, ", desc->action->handler);
{
struct irq_desc *desc = irq_to_desc(irq);
- if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
- !desc->chip->set_affinity)
+ if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
+ !desc->irq_data.chip->set_affinity)
return 0;
return 1;
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
- if (!desc->chip->set_affinity)
+ if (!desc->irq_data.chip->set_affinity)
return -EINVAL;
raw_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
- if (!desc->chip->set_affinity(irq, cpumask)) {
- cpumask_copy(desc->affinity, cpumask);
+ if (!desc->irq_data.chip->set_affinity(irq, cpumask)) {
+ cpumask_copy(desc->irq_data.affinity, cpumask);
irq_set_thread_affinity(desc);
}
}
cpumask_copy(desc->pending_mask, cpumask);
}
#else
- if (!desc->chip->set_affinity(irq, cpumask)) {
- cpumask_copy(desc->affinity, cpumask);
+ if (!desc->irq_data.chip->set_affinity(irq, cpumask)) {
+ cpumask_copy(desc->irq_data.affinity, cpumask);
irq_set_thread_affinity(desc);
}
#endif
* one of the targets is online.
*/
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
- if (cpumask_any_and(desc->affinity, cpu_online_mask)
+ if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
< nr_cpu_ids)
goto set_affinity;
else
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
+ cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, desc->affinity);
+ desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity);
return 0;
}
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
+ desc->irq_data.chip->disable(irq);
}
}
* IRQ line is re-enabled.
*
* This function may be called from IRQ context only when
- * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
+ * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
- if (desc->chip->set_wake)
- ret = desc->chip->set_wake(irq, on);
+ if (desc->irq_data.chip->set_wake)
+ ret = desc->irq_data.chip->set_wake(irq, on);
return ret;
}
unsigned long flags)
{
int ret;
- struct irq_chip *chip = desc->chip;
+ struct irq_chip *chip = desc->irq_data.chip;
if (!chip || !chip->set_type) {
/*
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
desc->status |= flags;
- if (chip != desc->chip)
- irq_chip_set_defaults(desc->chip);
+ if (chip != desc->irq_data.chip)
+ irq_chip_set_defaults(desc->irq_data.chip);
}
return ret;
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
- desc->chip->unmask(irq);
+ desc->irq_data.chip->unmask(irq);
}
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
}
raw_spin_lock_irq(&desc->lock);
- cpumask_copy(mask, desc->affinity);
+ cpumask_copy(mask, desc->irq_data.affinity);
raw_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
if (!desc)
return -EINVAL;
- if (desc->chip == &no_irq_chip)
+ if (desc->irq_data.chip == &no_irq_chip)
return -ENOSYS;
/*
* Some drivers like serial.c use request_irq() heavily,
}
if (!shared) {
- irq_chip_set_defaults(desc->chip);
+ irq_chip_set_defaults(desc->irq_data.chip);
init_waitqueue_head(&desc->wait_for_threads);
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
- desc->chip->startup(irq);
+ desc->irq_data.chip->startup(irq);
} else
/* Undo nested disables: */
desc->depth = 1;
/* Currently used only by UML, might disappear one day: */
#ifdef CONFIG_IRQ_RELEASE_METHOD
- if (desc->chip->release)
- desc->chip->release(irq, dev_id);
+ if (desc->irq_data.chip->release)
+ desc->irq_data.chip->release(irq, dev_id);
#endif
/* If this was the last handler, shut down the IRQ line: */
if (!desc->action) {
desc->status |= IRQ_DISABLED;
- if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
+ if (desc->irq_data.chip->shutdown)
+ desc->irq_data.chip->shutdown(irq);
else
- desc->chip->disable(irq);
+ desc->irq_data.chip->disable(irq);
}
#ifdef CONFIG_SMP
if (unlikely(cpumask_empty(desc->pending_mask)))
return;
- if (!desc->chip->set_affinity)
+ if (!desc->irq_data.chip->set_affinity)
return;
assert_raw_spin_locked(&desc->lock);
*/
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids))
- if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
- cpumask_copy(desc->affinity, desc->pending_mask);
+ if (!desc->irq_data.chip->set_affinity(irq, desc->pending_mask)) {
+ cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
irq_set_thread_affinity(desc);
}
if (unlikely(desc->status & IRQ_DISABLED))
return;
- desc->chip->mask(irq);
+ desc->irq_data.chip->mask(irq);
move_masked_irq(irq);
- desc->chip->unmask(irq);
+ desc->irq_data.chip->unmask(irq);
}
return false;
}
raw_spin_lock_init(&desc->lock);
- desc->node = node;
+ desc->irq_data.node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
init_copy_desc_masks(old_desc, desc);
unsigned int irq;
unsigned long flags;
- irq = old_desc->irq;
+ irq = old_desc->irq_data.irq;
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
{
/* those static or target node is -1, do not move them */
- if (desc->irq < NR_IRQS_LEGACY || node == -1)
+ if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1)
return desc;
- if (desc->node != node)
+ if (desc->irq_data.node != node)
desc = __real_move_irq_desc(desc, node);
return desc;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- const struct cpumask *mask = desc->affinity;
+ const struct cpumask *mask = desc->irq_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
cpumask_var_t new_value;
int err;
- if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
+ if (!irq_to_desc(irq)->irq_data.chip->set_affinity || no_irq_affinity ||
irq_balancing_disabled(irq))
return -EIO;
{
struct irq_desc *desc = irq_to_desc((long) m->private);
- seq_printf(m, "%d\n", desc->node);
+ seq_printf(m, "%d\n", desc->irq_data.node);
return 0;
}
{
char name [MAX_NAMELEN];
- if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
+ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
return;
memset(name, 0, MAX_NAMELEN);
/*
* Make sure the interrupt is enabled, before resending it:
*/
- desc->chip->enable(irq);
+ desc->irq_data.chip->enable(irq);
/*
* We do not resend level type interrupts. Level type
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
- if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
+ if (!desc->irq_data.chip->retrigger ||
+ !desc->irq_data.chip->retrigger(irq)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
* If we did actual work for the real IRQ line we must let the
* IRQ controller clean up too
*/
- if (work && desc->chip && desc->chip->end)
- desc->chip->end(irq);
+ if (work && desc->irq_data.chip && desc->irq_data.chip->end)
+ desc->irq_data.chip->end(irq);
raw_spin_unlock(&desc->lock);
return ok;
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
desc->depth++;
- desc->chip->disable(irq);
+ desc->irq_data.chip->disable(irq);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);