--- /dev/null
+/*
+ * MSI hooks for standard x86 apic
+ */
+
+#include <linux/pci.h>
+#include <linux/irq.h>
+
+#include "msi.h"
+
+/*
+ * Shifts for APIC-based data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+
+#define MSI_DATA_DELIVERY_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for APIC-based bus address
+ */
+
+#define MSI_ADDR_HEADER 0xfee00000
+
+#define MSI_ADDR_DESTID_MASK 0xfff0000f
+#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
+
+#define MSI_ADDR_DESTMODE_SHIFT 2
+#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
+#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+
+static void
+msi_target_apic(unsigned int vector,
+ unsigned int dest_cpu,
+ u32 *address_hi, /* in/out */
+ u32 *address_lo) /* in/out */
+{
+ u32 addr = *address_lo;
+
+ addr &= MSI_ADDR_DESTID_MASK;
+ addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu));
+
+ *address_lo = addr;
+}
+
+static int
+msi_setup_apic(struct pci_dev *pdev, /* unused in generic */
+ unsigned int vector,
+ u32 *address_hi,
+ u32 *address_lo,
+ u32 *data)
+{
+ unsigned long dest_phys_id;
+
+ dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
+
+ *address_hi = 0;
+ *address_lo = MSI_ADDR_HEADER |
+ MSI_ADDR_DESTMODE_PHYS |
+ MSI_ADDR_REDIRECTION_CPU |
+ MSI_ADDR_DESTID_CPU(dest_phys_id);
+
+ *data = MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ MSI_DATA_DELIVERY_FIXED |
+ MSI_DATA_VECTOR(vector);
+
+ return 0;
+}
+
+static void
+msi_teardown_apic(unsigned int vector)
+{
+ return; /* no-op */
+}
+
+/*
+ * Generic ops used on most IA archs/platforms. Set with msi_register()
+ */
+
+struct msi_ops msi_apic_ops = {
+ .setup = msi_setup_apic,
+ .teardown = msi_teardown_apic,
+ .target = msi_target_apic,
+};
#include "pci.h"
#include "msi.h"
-#define MSI_TARGET_CPU first_cpu(cpu_online_map)
-
static DEFINE_SPINLOCK(msi_lock);
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
static kmem_cache_t* msi_cachep;
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
#endif
+static struct msi_ops *msi_ops;
+
+int
+msi_register(struct msi_ops *ops)
+{
+ msi_ops = ops;
+ return 0;
+}
+
static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags)
{
memset(p, 0, NR_IRQS * sizeof(struct msi_desc));
static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
{
struct msi_desc *entry;
- struct msg_address address;
+ u32 address_hi, address_lo;
unsigned int irq = vector;
unsigned int dest_cpu = first_cpu(cpu_mask);
if (!pos)
return;
+ pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
+ &address_hi);
pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
- &address.lo_address.value);
- address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
+ &address_lo);
+
+ msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
+
+ pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
+ address_hi);
pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
- address.lo_address.value);
+ address_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
case PCI_CAP_ID_MSIX:
{
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
-
- address.lo_address.value = readl(entry->mask_base + offset);
- address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
- writel(address.lo_address.value, entry->mask_base + offset);
+ int offset_hi =
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
+ int offset_lo =
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
+
+ address_hi = readl(entry->mask_base + offset_hi);
+ address_lo = readl(entry->mask_base + offset_lo);
+
+ msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
+
+ writel(address_hi, entry->mask_base + offset_hi);
+ writel(address_lo, entry->mask_base + offset_lo);
set_native_irq_info(irq, cpu_mask);
break;
}
.set_affinity = set_msi_affinity
};
-static void msi_data_init(struct msg_data *msi_data,
- unsigned int vector)
-{
- memset(msi_data, 0, sizeof(struct msg_data));
- msi_data->vector = (u8)vector;
- msi_data->delivery_mode = MSI_DELIVERY_MODE;
- msi_data->level = MSI_LEVEL_MODE;
- msi_data->trigger = MSI_TRIGGER_MODE;
-}
-
-static void msi_address_init(struct msg_address *msi_address)
-{
- unsigned int dest_id;
- unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
-
- memset(msi_address, 0, sizeof(struct msg_address));
- msi_address->hi_address = (u32)0;
- dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
- msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
- msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
- msi_address->lo_address.u.dest_id = dest_id;
- msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
-}
-
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
static int assign_msi_vector(void)
{
return status;
}
+ status = msi_arch_init();
+ if (status < 0) {
+ pci_msi_enable = 0;
+ printk(KERN_WARNING
+ "PCI: MSI arch init failed. MSI disabled.\n");
+ return status;
+ }
+
+ if (! msi_ops) {
+ printk(KERN_WARNING
+ "PCI: MSI ops not registered. MSI disabled.\n");
+ status = -EINVAL;
+ return status;
+ }
+
+ last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
status = msi_cache_init();
if (status < 0) {
pci_msi_enable = 0;
printk(KERN_WARNING "PCI: MSI cache init failed\n");
return status;
}
- last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
+
if (last_alloc_vector < 0) {
pci_msi_enable = 0;
printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
int pci_save_msix_state(struct pci_dev *dev)
{
int pos;
+ int temp;
+ int vector, head, tail = 0;
u16 control;
struct pci_cap_saved_state *save_state;
if (pos <= 0 || dev->no_msi)
return 0;
+ /* save the capability */
pci_read_config_word(dev, msi_control_reg(pos), &control);
if (!(control & PCI_MSIX_FLAGS_ENABLE))
return 0;
}
*((u16 *)&save_state->data[0]) = control;
+ /* save the table */
+ temp = dev->irq;
+ if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
+ kfree(save_state);
+ return -EINVAL;
+ }
+
+ vector = head = dev->irq;
+ while (head != tail) {
+ int j;
+ void __iomem *base;
+ struct msi_desc *entry;
+
+ entry = msi_desc[vector];
+ base = entry->mask_base;
+ j = entry->msi_attrib.entry_nr;
+
+ entry->address_lo_save =
+ readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
+ entry->address_hi_save =
+ readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
+ entry->data_save =
+ readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_DATA_OFFSET);
+
+ tail = msi_desc[vector]->link.tail;
+ vector = tail;
+ }
+ dev->irq = temp;
+
disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
save_state->cap_nr = PCI_CAP_ID_MSIX;
pci_add_saved_cap(dev, save_state);
int vector, head, tail = 0;
void __iomem *base;
int j;
- struct msg_address address;
- struct msg_data data;
struct msi_desc *entry;
int temp;
struct pci_cap_saved_state *save_state;
base = entry->mask_base;
j = entry->msi_attrib.entry_nr;
- msi_address_init(&address);
- msi_data_init(&data, vector);
-
- address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= entry->msi_attrib.current_cpu <<
- MSI_TARGET_CPU_SHIFT;
-
- writel(address.lo_address.value,
+ writel(entry->address_lo_save,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address,
+ writel(entry->address_hi_save,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data,
+ writel(entry->data_save,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
}
#endif
-static void msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
+static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
{
- struct msg_address address;
- struct msg_data data;
+ int status;
+ u32 address_hi;
+ u32 address_lo;
+ u32 data;
int pos, vector = dev->irq;
u16 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
pci_read_config_word(dev, msi_control_reg(pos), &control);
+
/* Configure MSI capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- address.lo_address.value);
+ status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
+ if (status < 0)
+ return status;
+
+ pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
if (is_64bit_address(control)) {
pci_write_config_dword(dev,
- msi_upper_address_reg(pos), address.hi_address);
+ msi_upper_address_reg(pos), address_hi);
pci_write_config_word(dev,
- msi_data_reg(pos, 1), *((u32*)&data));
+ msi_data_reg(pos, 1), data);
} else
pci_write_config_word(dev,
- msi_data_reg(pos, 0), *((u32*)&data));
+ msi_data_reg(pos, 0), data);
if (entry->msi_attrib.maskbit) {
unsigned int maskbits, temp;
/* All MSIs are unmasked by default, Mask them all */
msi_mask_bits_reg(pos, is_64bit_address(control)),
maskbits);
}
+
+ return 0;
}
/**
**/
static int msi_capability_init(struct pci_dev *dev)
{
+ int status;
struct msi_desc *entry;
int pos, vector;
u16 control;
/* Replace with MSI handler */
irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
/* Configure MSI capability structure */
- msi_register_init(dev, entry);
+ status = msi_register_init(dev, entry);
+ if (status != 0) {
+ dev->irq = entry->msi_attrib.default_vector;
+ kmem_cache_free(msi_cachep, entry);
+ return status;
+ }
attach_msi_entry(entry, vector);
/* Set MSI enabled bits */
struct msix_entry *entries, int nvec)
{
struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
- struct msg_address address;
- struct msg_data data;
+ u32 address_hi;
+ u32 address_lo;
+ u32 data;
+ int status;
int vector, pos, i, j, nr_entries, temp = 0;
unsigned long phys_addr;
u32 table_offset;
/* Replace with MSI-X handler */
irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
/* Configure MSI-X capability structure */
- msi_address_init(&address);
- msi_data_init(&data, vector);
- entry->msi_attrib.current_cpu =
- ((address.lo_address.u.dest_id >>
- MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK);
- writel(address.lo_address.value,
+ status = msi_ops->setup(dev, vector,
+ &address_hi,
+ &address_lo,
+ &data);
+ if (status < 0)
+ break;
+
+ writel(address_lo,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
- writel(address.hi_address,
+ writel(address_hi,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
- writel(*(u32*)&data,
+ writel(data,
base + j * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_DATA_OFFSET);
attach_msi_entry(entry, vector);
vector_irq[dev->irq] = -1;
nr_released_vectors--;
spin_unlock_irqrestore(&msi_lock, flags);
- msi_register_init(dev, msi_desc[dev->irq]);
- enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
- return 0;
+ status = msi_register_init(dev, msi_desc[dev->irq]);
+ if (status == 0)
+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
+ return status;
}
spin_unlock_irqrestore(&msi_lock, flags);
dev->irq = temp;
void __iomem *base;
unsigned long flags;
+ msi_ops->teardown(vector);
+
spin_lock_irqsave(&msi_lock, flags);
entry = msi_desc[vector];
if (!entry || entry->dev != dev) {
#ifndef MSI_H
#define MSI_H
+/*
+ * MSI operation vector. Used by the msi core code (drivers/pci/msi.c)
+ * to abstract platform-specific tasks relating to MSI address generation
+ * and resource management.
+ */
+struct msi_ops {
+ /**
+ * setup - generate an MSI bus address and data for a given vector
+ * @pdev: PCI device context (in)
+ * @vector: vector allocated by the msi core (in)
+ * @addr_hi: upper 32 bits of PCI bus MSI address (out)
+ * @addr_lo: lower 32 bits of PCI bus MSI address (out)
+ * @data: MSI data payload (out)
+ *
+ * Description: The setup op is used to generate a PCI bus addres and
+ * data which the msi core will program into the card MSI capability
+ * registers. The setup routine is responsible for picking an initial
+ * cpu to target the MSI at. The setup routine is responsible for
+ * examining pdev to determine the MSI capabilities of the card and
+ * generating a suitable address/data. The setup routine is
+ * responsible for allocating and tracking any system resources it
+ * needs to route the MSI to the cpu it picks, and for associating
+ * those resources with the passed in vector.
+ *
+ * Returns 0 if the MSI address/data was successfully setup.
+ **/
+
+ int (*setup) (struct pci_dev *pdev, unsigned int vector,
+ u32 *addr_hi, u32 *addr_lo, u32 *data);
+
+ /**
+ * teardown - release resources allocated by setup
+ * @vector: vector context for resources (in)
+ *
+ * Description: The teardown op is used to release any resources
+ * that were allocated in the setup routine associated with the passed
+ * in vector.
+ **/
+
+ void (*teardown) (unsigned int vector);
+
+ /**
+ * target - retarget an MSI at a different cpu
+ * @vector: vector context for resources (in)
+ * @cpu: new cpu to direct vector at (in)
+ * @addr_hi: new value of PCI bus upper 32 bits (in/out)
+ * @addr_lo: new value of PCI bus lower 32 bits (in/out)
+ *
+ * Description: The target op is used to redirect an MSI vector
+ * at a different cpu. addr_hi/addr_lo coming in are the existing
+ * values that the MSI core has programmed into the card. The
+ * target code is responsible for freeing any resources (if any)
+ * associated with the old address, and generating a new PCI bus
+ * addr_hi/addr_lo that will redirect the vector at the indicated cpu.
+ **/
+
+ void (*target) (unsigned int vector, unsigned int cpu,
+ u32 *addr_hi, u32 *addr_lo);
+};
+
+extern int msi_register(struct msi_ops *ops);
+
#include <asm/msi.h>
/*
#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
-/*
- * MSI Defined Data Structures
- */
-#define MSI_ADDRESS_HEADER 0xfee
-#define MSI_ADDRESS_HEADER_SHIFT 12
-#define MSI_ADDRESS_HEADER_MASK 0xfff000
-#define MSI_ADDRESS_DEST_ID_MASK 0xfff0000f
-#define MSI_TARGET_CPU_MASK 0xff
-#define MSI_DELIVERY_MODE 0
-#define MSI_LEVEL_MODE 1 /* Edge always assert */
-#define MSI_TRIGGER_MODE 0 /* MSI is edge sensitive */
-#define MSI_PHYSICAL_MODE 0
-#define MSI_LOGICAL_MODE 1
-#define MSI_REDIRECTION_HINT_MODE 0
-
-struct msg_data {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 vector : 8;
- __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
- __u32 reserved_1 : 3;
- __u32 level : 1; /* 0: deassert | 1: assert */
- __u32 trigger : 1; /* 0: edge | 1: level */
- __u32 reserved_2 : 16;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 reserved_2 : 16;
- __u32 trigger : 1; /* 0: edge | 1: level */
- __u32 level : 1; /* 0: deassert | 1: assert */
- __u32 reserved_1 : 3;
- __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */
- __u32 vector : 8;
-#else
-#error "Bitfield endianness not defined! Check your byteorder.h"
-#endif
-} __attribute__ ((packed));
-
-struct msg_address {
- union {
- struct {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 reserved_1 : 2;
- __u32 dest_mode : 1; /*0:physic | 1:logic */
- __u32 redirection_hint: 1; /*0: dedicated CPU
- 1: lowest priority */
- __u32 reserved_2 : 4;
- __u32 dest_id : 24; /* Destination ID */
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u32 dest_id : 24; /* Destination ID */
- __u32 reserved_2 : 4;
- __u32 redirection_hint: 1; /*0: dedicated CPU
- 1: lowest priority */
- __u32 dest_mode : 1; /*0:physic | 1:logic */
- __u32 reserved_1 : 2;
-#else
-#error "Bitfield endianness not defined! Check your byteorder.h"
-#endif
- }u;
- __u32 value;
- }lo_address;
- __u32 hi_address;
-} __attribute__ ((packed));
-
struct msi_desc {
struct {
__u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
__u8 reserved: 1; /* reserved */
__u8 entry_nr; /* specific enabled entry */
__u8 default_vector; /* default pre-assigned vector */
- __u8 current_cpu; /* current destination cpu */
+ __u8 unused; /* formerly unused destination cpu*/
}msi_attrib;
struct {
void __iomem *mask_base;
struct pci_dev *dev;
+
+#ifdef CONFIG_PM
+ /* PM save area for MSIX address/data */
+
+ u32 address_hi_save;
+ u32 address_lo_save;
+ u32 data_save;
+#endif
};
#endif /* MSI_H */