md/raid1: add documentation to r1_private_data_s data structure.
authorNeilBrown <neilb@suse.de>
Fri, 7 Oct 2011 03:22:33 +0000 (14:22 +1100)
committerNeilBrown <neilb@suse.de>
Fri, 7 Oct 2011 03:22:33 +0000 (14:22 +1100)
There wasn't much and it is inconsistent.
Also rearrange fields to keep related fields together.

Reported-by: Aapo Laine <aapo.laine@shiftmail.org>
Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid1.h

index e0d676b4897498d22390600012da67e751b185ea..a3d5483873a2ae549370e8cea69d4d14fe753d41 100644 (file)
@@ -28,42 +28,67 @@ struct r1_private_data_s {
        mddev_t                 *mddev;
        mirror_info_t           *mirrors;
        int                     raid_disks;
+
+       /* When choose the best device for a read (read_balance())
+        * we try to keep sequential reads one the same device
+        * using 'last_used' and 'next_seq_sect'
+        */
        int                     last_used;
        sector_t                next_seq_sect;
+       /* During resync, read_balancing is only allowed on the part
+        * of the array that has been resynced.  'next_resync' tells us
+        * where that is.
+        */
+       sector_t                next_resync;
+
        spinlock_t              device_lock;
 
+       /* list of 'r1bio_t' that need to be processed by raid1d, whether
+        * to retry a read, writeout a resync or recovery block, or
+        * anything else.
+        */
        struct list_head        retry_list;
-       /* queue pending writes and submit them on unplug */
-       struct bio_list         pending_bio_list;
 
-       /* for use when syncing mirrors: */
+       /* queue pending writes to be submitted on unplug */
+       struct bio_list         pending_bio_list;
 
+       /* for use when syncing mirrors:
+        * We don't allow both normal IO and resync/recovery IO at
+        * the same time - resync/recovery can only happen when there
+        * is no other IO.  So when either is active, the other has to wait.
+        * See more details description in raid1.c near raise_barrier().
+        */
+       wait_queue_head_t       wait_barrier;
        spinlock_t              resync_lock;
        int                     nr_pending;
        int                     nr_waiting;
        int                     nr_queued;
        int                     barrier;
-       sector_t                next_resync;
-       int                     fullsync;  /* set to 1 if a full sync is needed,
-                                           * (fresh device added).
-                                           * Cleared when a sync completes.
-                                           */
-       int                     recovery_disabled; /* when the same as
-                                                   * mddev->recovery_disabled
-                                                   * we don't allow recovery
-                                                   * to be attempted as we
-                                                   * expect a read error
-                                                   */
 
-       wait_queue_head_t       wait_barrier;
+       /* Set to 1 if a full sync is needed, (fresh device added).
+        * Cleared when a sync completes.
+        */
+       int                     fullsync;
 
-       struct pool_info        *poolinfo;
+       /* When the same as mddev->recovery_disabled we don't allow
+        * recovery to be attempted as we expect a read error.
+        */
+       int                     recovery_disabled;
 
-       struct page             *tmppage;
 
+       /* poolinfo contains information about the content of the
+        * mempools - it changes when the array grows or shrinks
+        */
+       struct pool_info        *poolinfo;
        mempool_t *r1bio_pool;
        mempool_t *r1buf_pool;
 
+       /* temporary buffer to synchronous IO when attempting to repair
+        * a read error.
+        */
+       struct page             *tmppage;
+
+
        /* When taking over an array from a different personality, we store
         * the new thread here until we fully activate the array.
         */