struct completion kobj_complete;
/* work struct for this MC */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
struct delayed_work work;
-#else
- struct work_struct work;
-#endif
+
/* the internal state of this controller instance */
int op_state;
};
/* the internal state of this controller instance */
int op_state;
/* work struct for this instance */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
struct delayed_work work;
-#else
- struct work_struct work;
-#endif
/* pointer to edac polling checking routine:
* If NOT NULL: points to polling check routine
/* the internal state of this controller instance */
int op_state;
/* work struct for this instance */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
struct delayed_work work;
-#else
- struct work_struct work;
-#endif
/* pointer to edac polling checking routine:
* If NOT NULL: points to polling check routine
* edac_device_workq_function
* performs the operation scheduled by a workq request
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
static void edac_device_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = (struct delayed_work *)work_req;
struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
-#else
-static void edac_device_workq_function(void *ptr)
-{
- struct edac_device_ctl_info *edac_dev =
- (struct edac_device_ctl_info *)ptr;
-#endif
//debugf0("%s() here and running\n", __func__);
lock_device_list();
edac_dev->poll_msec = msec;
edac_calc_delay(edac_dev); /* Calc delay jiffies */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
-#else
- INIT_WORK(&edac_dev->work, edac_device_workq_function, edac_dev);
-#endif
queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay);
}
* edac_mc_workq_function
* performs the operation scheduled by a workq request
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
static void edac_mc_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = (struct delayed_work *)work_req;
struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
-#else
-static void edac_mc_workq_function(void *ptr)
-{
- struct mem_ctl_info *mci = (struct mem_ctl_info *)ptr;
-#endif
mutex_lock(&mem_ctls_mutex);
{
debugf0("%s()\n", __func__);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-#else
- INIT_WORK(&mci->work, edac_mc_workq_function, mci);
-#endif
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
* edac_pci_workq_function()
* performs the operation scheduled by a workq request
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
static void edac_pci_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = (struct delayed_work *)work_req;
struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
-#else
-static void edac_pci_workq_function(void *ptr)
-{
- struct edac_pci_ctl_info *pci = ptr;
-#endif
edac_lock_pci_list();
{
debugf0("%s()\n", __func__);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
-#else
- INIT_WORK(&pci->work, edac_pci_workq_function, pci);
-#endif
queue_delayed_work(edac_workqueue, &pci->work,
msecs_to_jiffies(edac_pci_get_poll_msec()));
}