int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev);
int (*adapter_restart)(struct aac_dev *dev, int bled);
+ void (*adapter_start)(struct aac_dev *dev);
/* Transport operations */
int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
irq_handler_t adapter_intr;
#define aac_adapter_restart(dev,bled) \
(dev)->a_ops.adapter_restart(dev,bled)
+#define aac_adapter_start(dev) \
+ ((dev)->a_ops.adapter_start(dev))
+
#define aac_adapter_ioremap(dev, size) \
(dev)->a_ops.adapter_ioremap(dev, size)
int aac_src_init(struct aac_dev *dev);
int aac_srcv_init(struct aac_dev *dev);
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
+void aac_define_int_mode(struct aac_dev *dev);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
#include "aacraid.h"
-static void aac_define_int_mode(struct aac_dev *dev);
-
struct aac_common aac_config = {
.irq_mod = 1
};
return 0;
}
+void aac_define_int_mode(struct aac_dev *dev)
+{
+ int i, msi_count;
+
+ msi_count = i = 0;
+ /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
+ if (dev->max_msix == 0 ||
+ dev->pdev->device == PMC_DEVICE_S6 ||
+ dev->sync_mode) {
+ dev->max_msix = 1;
+ dev->vector_cap =
+ dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB;
+ return;
+ }
+
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ msi_count = min(dev->max_msix,
+ (unsigned int)num_online_cpus());
+
+ dev->max_msix = msi_count;
+
+ if (msi_count > AAC_MAX_MSIX)
+ msi_count = AAC_MAX_MSIX;
+
+ for (i = 0; i < msi_count; i++)
+ dev->msixentry[i].entry = i;
+
+ if (msi_count > 1 &&
+ pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+ i = pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count);
+ /* Check how many MSIX vectors are allocated */
+ if (i >= 0) {
+ dev->msi_enabled = 1;
+ if (i) {
+ msi_count = i;
+ if (pci_enable_msix(dev->pdev,
+ dev->msixentry,
+ msi_count)) {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+ } else {
+ dev->msi_enabled = 0;
+ printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled) {
+ msi_count = 1;
+ i = pci_enable_msi(dev->pdev);
+
+ if (!i) {
+ dev->msi_enabled = 1;
+ dev->msi = 1;
+ } else {
+ printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
+ dev->name, dev->id, i);
+ }
+ }
+
+ if (!dev->msi_enabled)
+ dev->max_msix = msi_count = 1;
+ else {
+ if (dev->max_msix > msi_count)
+ dev->max_msix = msi_count;
+ }
+ dev->vector_cap =
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
+ msi_count;
+}
struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
u32 status[5];
return dev;
}
-static void aac_define_int_mode(struct aac_dev *dev)
-{
-
- int i, msi_count;
-
- msi_count = i = 0;
- /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
- if (dev->max_msix == 0 ||
- dev->pdev->device == PMC_DEVICE_S6 ||
- dev->sync_mode) {
- dev->max_msix = 1;
- dev->vector_cap =
- dev->scsi_host_ptr->can_queue +
- AAC_NUM_MGT_FIB;
- return;
- }
-
- msi_count = min(dev->max_msix,
- (unsigned int)num_online_cpus());
-
- dev->max_msix = msi_count;
-
- if (msi_count > AAC_MAX_MSIX)
- msi_count = AAC_MAX_MSIX;
-
- for (i = 0; i < msi_count; i++)
- dev->msixentry[i].entry = i;
-
- if (msi_count > 1 &&
- pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
- i = pci_enable_msix(dev->pdev,
- dev->msixentry,
- msi_count);
- /* Check how many MSIX vectors are allocated */
- if (i >= 0) {
- dev->msi_enabled = 1;
- if (i) {
- msi_count = i;
- if (pci_enable_msix(dev->pdev,
- dev->msixentry,
- msi_count)) {
- dev->msi_enabled = 0;
- printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
- dev->name, dev->id, i);
- }
- }
- } else {
- dev->msi_enabled = 0;
- printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
- dev->name, dev->id, i);
- }
- }
-
- if (!dev->msi_enabled) {
- msi_count = 1;
- i = pci_enable_msi(dev->pdev);
-
- if (!i) {
- dev->msi_enabled = 1;
- dev->msi = 1;
- } else {
- printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
- dev->name, dev->id, i);
- }
- }
-
- if (!dev->msi_enabled)
- dev->max_msix = msi_count = 1;
- else {
- if (dev->max_msix > msi_count)
- dev->max_msix = msi_count;
- }
- dev->vector_cap =
- (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
- msi_count;
-}
return error;
}
+#if (defined(CONFIG_PM))
+void aac_release_resources(struct aac_dev *aac)
+{
+ int i;
+
+ aac_adapter_disable_int(aac);
+ if (aac->pdev->device == PMC_DEVICE_S6 ||
+ aac->pdev->device == PMC_DEVICE_S7 ||
+ aac->pdev->device == PMC_DEVICE_S8 ||
+ aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac->max_msix > 1) {
+ for (i = 0; i < aac->max_msix; i++)
+ free_irq(aac->msixentry[i].vector,
+ &(aac->aac_msix[i]));
+ } else {
+ free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
+ }
+ } else {
+ free_irq(aac->pdev->irq, aac);
+ }
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
+ else if (aac->max_msix > 1)
+ pci_disable_msix(aac->pdev);
+
+}
+
+static int aac_acquire_resources(struct aac_dev *dev)
+{
+ int i, j;
+ int instance = dev->id;
+ const char *name = dev->name;
+ unsigned long status;
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
+ || status == 0xffffffff)
+ msleep(20);
+
+ aac_adapter_disable_int(dev);
+ aac_adapter_enable_int(dev);
+
+
+ if ((dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9))
+ aac_define_int_mode(dev);
+
+ if (dev->msi_enabled)
+ aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+
+ if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
+ for (i = 0; i < dev->max_msix; i++) {
+ dev->aac_msix[i].vector_no = i;
+ dev->aac_msix[i].dev = dev;
+
+ if (request_irq(dev->msixentry[i].vector,
+ dev->a_ops.adapter_intr,
+ 0, "aacraid", &(dev->aac_msix[i]))) {
+ printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
+ name, instance, i);
+ for (j = 0 ; j < i ; j++)
+ free_irq(dev->msixentry[j].vector,
+ &(dev->aac_msix[j]));
+ pci_disable_msix(dev->pdev);
+ goto error_iounmap;
+ }
+ }
+ } else {
+ dev->aac_msix[0].vector_no = 0;
+ dev->aac_msix[0].dev = dev;
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid",
+ &(dev->aac_msix[0])) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ }
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode)
+ aac_adapter_start(dev);
+ return 0;
+
+error_iounmap:
+ return -1;
+
+}
+static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+ scsi_block_requests(shost);
+ aac_send_shutdown(aac);
+
+ aac_release_resources(aac);
+
+ pci_set_drvdata(pdev, shost);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int aac_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ int r;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ r = pci_enable_device(pdev);
+
+ if (r)
+ goto fail_device;
+
+ pci_set_master(pdev);
+ if (aac_acquire_resources(aac))
+ goto fail_device;
+ scsi_unblock_requests(shost);
+
+ return 0;
+
+fail_device:
+ printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
+ scsi_host_put(shost);
+ pci_disable_device(pdev);
+ return -ENODEV;
+}
+#endif
+
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
.remove = aac_remove_one,
+#if (defined(CONFIG_PM))
+ .suspend = aac_suspend,
+ .resume = aac_resume,
+#endif
.shutdown = aac_shutdown,
};
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health;
dev->a_ops.adapter_restart = aac_rx_restart_adapter;
+ dev->a_ops.adapter_start = aac_rx_start_adapter;
/*
* First clear out all interrupts. Then enable the one's that we
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health;
dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
dev->a_ops.adapter_intr = aac_sa_intr;
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
dev->a_ops.adapter_restart = aac_src_restart_adapter;
+ dev->a_ops.adapter_start = aac_src_start_adapter;
/*
* First clear out all interrupts. Then enable the one's that we
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
dev->a_ops.adapter_restart = aac_src_restart_adapter;
+ dev->a_ops.adapter_start = aac_src_start_adapter;
/*
* First clear out all interrupts. Then enable the one's that we