wl1271: remove unnecessary joins and join only when the bssid changes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / cio / css.c
index 393c73c47f87280678f4db2263c44b7f1fca2a66..91c25706fa8334dad0768f9ddda2d521fd84219c 100644 (file)
@@ -31,8 +31,7 @@
 #include "chp.h"
 
 int css_init_done = 0;
-static int need_reprobe = 0;
-static int max_ssid = 0;
+int max_ssid;
 
 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
 
@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid)
        int ret;
        struct subchannel *sch;
 
-       sch = css_alloc_subchannel(schid);
-       if (IS_ERR(sch))
-               return PTR_ERR(sch);
+       if (cio_is_console(schid))
+               sch = cio_get_console_subchannel();
+       else {
+               sch = css_alloc_subchannel(schid);
+               if (IS_ERR(sch))
+                       return PTR_ERR(sch);
+       }
        ret = css_register_subchannel(sch);
-       if (ret)
-               put_device(&sch->dev);
+       if (ret) {
+               if (!cio_is_console(schid))
+                       put_device(&sch->dev);
+       }
        return ret;
 }
 
@@ -409,10 +414,14 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
 
 static struct idset *slow_subchannel_set;
 static spinlock_t slow_subchannel_lock;
+static wait_queue_head_t css_eval_wq;
+static atomic_t css_eval_scheduled;
 
 static int __init slow_subchannel_init(void)
 {
        spin_lock_init(&slow_subchannel_lock);
+       atomic_set(&css_eval_scheduled, 0);
+       init_waitqueue_head(&css_eval_wq);
        slow_subchannel_set = idset_sch_new();
        if (!slow_subchannel_set) {
                CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
@@ -468,9 +477,17 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
 
 static void css_slow_path_func(struct work_struct *unused)
 {
+       unsigned long flags;
+
        CIO_TRACE_EVENT(4, "slowpath");
        for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
                                   NULL);
+       spin_lock_irqsave(&slow_subchannel_lock, flags);
+       if (idset_is_empty(slow_subchannel_set)) {
+               atomic_set(&css_eval_scheduled, 0);
+               wake_up(&css_eval_wq);
+       }
+       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 }
 
 static DECLARE_WORK(slow_path_work, css_slow_path_func);
@@ -482,6 +499,7 @@ void css_schedule_eval(struct subchannel_id schid)
 
        spin_lock_irqsave(&slow_subchannel_lock, flags);
        idset_sch_add(slow_subchannel_set, schid);
+       atomic_set(&css_eval_scheduled, 1);
        queue_work(slow_path_wq, &slow_path_work);
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 }
@@ -492,80 +510,53 @@ void css_schedule_eval_all(void)
 
        spin_lock_irqsave(&slow_subchannel_lock, flags);
        idset_fill(slow_subchannel_set);
+       atomic_set(&css_eval_scheduled, 1);
        queue_work(slow_path_wq, &slow_path_work);
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
 }
 
-void css_wait_for_slow_path(void)
+static int __unset_registered(struct device *dev, void *data)
 {
-       flush_workqueue(slow_path_wq);
-}
-
-/* Reprobe subchannel if unregistered. */
-static int reprobe_subchannel(struct subchannel_id schid, void *data)
-{
-       int ret;
-
-       CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
-                     schid.ssid, schid.sch_no);
-       if (need_reprobe)
-               return -EAGAIN;
-
-       ret = css_probe_device(schid);
-       switch (ret) {
-       case 0:
-               break;
-       case -ENXIO:
-       case -ENOMEM:
-       case -EIO:
-               /* These should abort looping */
-               break;
-       default:
-               ret = 0;
-       }
-
-       return ret;
-}
+       struct idset *set = data;
+       struct subchannel *sch = to_subchannel(dev);
 
-static void reprobe_after_idle(struct work_struct *unused)
-{
-       /* Make sure initial subchannel scan is done. */
-       wait_event(ccw_device_init_wq,
-                  atomic_read(&ccw_device_init_count) == 0);
-       if (need_reprobe)
-               css_schedule_reprobe();
+       idset_sch_del(set, sch->schid);
+       return 0;
 }
 
-static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
-
-/* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(struct work_struct *unused)
+void css_schedule_eval_all_unreg(void)
 {
-       int ret;
-
-       CIO_MSG_EVENT(4, "reprobe start\n");
+       unsigned long flags;
+       struct idset *unreg_set;
 
-       /* Make sure initial subchannel scan is done. */
-       if (atomic_read(&ccw_device_init_count) != 0) {
-               queue_work(ccw_device_work, &reprobe_idle_work);
+       /* Find unregistered subchannels. */
+       unreg_set = idset_sch_new();
+       if (!unreg_set) {
+               /* Fallback. */
+               css_schedule_eval_all();
                return;
        }
-       need_reprobe = 0;
-       ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
-
-       CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
-                     need_reprobe);
+       idset_fill(unreg_set);
+       bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+       /* Apply to slow_subchannel_set. */
+       spin_lock_irqsave(&slow_subchannel_lock, flags);
+       idset_add_set(slow_subchannel_set, unreg_set);
+       atomic_set(&css_eval_scheduled, 1);
+       queue_work(slow_path_wq, &slow_path_work);
+       spin_unlock_irqrestore(&slow_subchannel_lock, flags);
+       idset_free(unreg_set);
 }
 
-static DECLARE_WORK(css_reprobe_work, reprobe_all);
+void css_wait_for_slow_path(void)
+{
+       flush_workqueue(slow_path_wq);
+}
 
 /* Schedule reprobing of all unregistered subchannels. */
 void css_schedule_reprobe(void)
 {
-       need_reprobe = 1;
-       queue_work(slow_path_wq, &css_reprobe_work);
+       css_schedule_eval_all_unreg();
 }
-
 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 
 /*
@@ -601,49 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
        css_evaluate_subchannel(mchk_schid, 0);
 }
 
-static int __init
-__init_channel_subsystem(struct subchannel_id schid, void *data)
-{
-       struct subchannel *sch;
-       int ret;
-
-       if (cio_is_console(schid))
-               sch = cio_get_console_subchannel();
-       else {
-               sch = css_alloc_subchannel(schid);
-               if (IS_ERR(sch))
-                       ret = PTR_ERR(sch);
-               else
-                       ret = 0;
-               switch (ret) {
-               case 0:
-                       break;
-               case -ENOMEM:
-                       panic("Out of memory in init_channel_subsystem\n");
-               /* -ENXIO: no more subchannels. */
-               case -ENXIO:
-                       return ret;
-               /* -EIO: this subchannel set not supported. */
-               case -EIO:
-                       return ret;
-               default:
-                       return 0;
-               }
-       }
-       /*
-        * We register ALL valid subchannels in ioinfo, even those
-        * that have been present before init_channel_subsystem.
-        * These subchannels can't have been registered yet (kmalloc
-        * not working) so we do it now. This is true e.g. for the
-        * console subchannel.
-        */
-       if (css_register_subchannel(sch)) {
-               if (!cio_is_console(schid))
-                       put_device(&sch->dev);
-       }
-       return 0;
-}
-
 static void __init
 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
 {
@@ -854,19 +802,30 @@ static struct notifier_block css_power_notifier = {
  * The struct subchannel's are created during probing (except for the
  * static console subchannel).
  */
-static int __init
-init_channel_subsystem (void)
+static int __init css_bus_init(void)
 {
        int ret, i;
 
        ret = chsc_determine_css_characteristics();
        if (ret == -ENOMEM)
-               goto out; /* No need to continue. */
+               goto out;
 
        ret = chsc_alloc_sei_area();
        if (ret)
                goto out;
 
+       /* Try to enable MSS. */
+       ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+       switch (ret) {
+       case 0: /* Success. */
+               max_ssid = __MAX_SSID;
+               break;
+       case -ENOMEM:
+               goto out;
+       default:
+               max_ssid = 0;
+       }
+
        ret = slow_subchannel_init();
        if (ret)
                goto out;
@@ -878,17 +837,6 @@ init_channel_subsystem (void)
        if ((ret = bus_register(&css_bus_type)))
                goto out;
 
-       /* Try to enable MSS. */
-       ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
-       switch (ret) {
-       case 0: /* Success. */
-               max_ssid = __MAX_SSID;
-               break;
-       case -ENOMEM:
-               goto out_bus;
-       default:
-               max_ssid = 0;
-       }
        /* Setup css structure. */
        for (i = 0; i <= __MAX_CSSID; i++) {
                struct channel_subsystem *css;
@@ -934,7 +882,6 @@ init_channel_subsystem (void)
        /* Enable default isc for I/O subchannels. */
        isc_register(IO_SCH_ISC);
 
-       for_each_subchannel(__init_channel_subsystem, NULL);
        return 0;
 out_file:
        if (css_chsc_characteristics.secm)
@@ -955,17 +902,76 @@ out_unregister:
                                           &dev_attr_cm_enable);
                device_unregister(&css->device);
        }
-out_bus:
        bus_unregister(&css_bus_type);
 out:
        crw_unregister_handler(CRW_RSC_CSS);
        chsc_free_sei_area();
-       kfree(slow_subchannel_set);
+       idset_free(slow_subchannel_set);
        pr_alert("The CSS device driver initialization failed with "
                 "errno=%d\n", ret);
        return ret;
 }
 
+static void __init css_bus_cleanup(void)
+{
+       struct channel_subsystem *css;
+       int i;
+
+       for (i = 0; i <= __MAX_CSSID; i++) {
+               css = channel_subsystems[i];
+               device_unregister(&css->pseudo_subchannel->dev);
+               css->pseudo_subchannel = NULL;
+               if (css_chsc_characteristics.secm)
+                       device_remove_file(&css->device, &dev_attr_cm_enable);
+               device_unregister(&css->device);
+       }
+       bus_unregister(&css_bus_type);
+       crw_unregister_handler(CRW_RSC_CSS);
+       chsc_free_sei_area();
+       idset_free(slow_subchannel_set);
+       isc_unregister(IO_SCH_ISC);
+}
+
+static int __init channel_subsystem_init(void)
+{
+       int ret;
+
+       ret = css_bus_init();
+       if (ret)
+               return ret;
+
+       ret = io_subchannel_init();
+       if (ret)
+               css_bus_cleanup();
+
+       return ret;
+}
+subsys_initcall(channel_subsystem_init);
+
+static int css_settle(struct device_driver *drv, void *unused)
+{
+       struct css_driver *cssdrv = to_cssdriver(drv);
+
+       if (cssdrv->settle)
+               cssdrv->settle();
+       return 0;
+}
+
+/*
+ * Wait for the initialization of devices to finish, to make sure we are
+ * done with our setup if the search for the root device starts.
+ */
+static int __init channel_subsystem_init_sync(void)
+{
+       /* Start initial subchannel evaluation. */
+       css_schedule_eval_all();
+       /* Wait for the evaluation of subchannels to finish. */
+       wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
+       /* Wait for the subchannel type specific initialization to finish */
+       return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
+}
+subsys_initcall_sync(channel_subsystem_init_sync);
+
 int sch_is_pseudo_sch(struct subchannel *sch)
 {
        return sch == to_css(sch->dev.parent)->pseudo_subchannel;
@@ -1135,7 +1141,5 @@ void css_driver_unregister(struct css_driver *cdrv)
 }
 EXPORT_SYMBOL_GPL(css_driver_unregister);
 
-subsys_initcall(init_channel_subsystem);
-
 MODULE_LICENSE("GPL");
 EXPORT_SYMBOL(css_bus_type);