return ret;
}
+static ssize_t queue_random_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_add_random(q), page);
+}
+
+static ssize_t queue_random_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long val;
+ ssize_t ret = queue_var_store(&val, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
static ssize_t queue_iostats_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_io_stat(q), page);
.store = queue_iostats_store,
};
+static struct queue_sysfs_entry queue_random_entry = {
+ .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_random_show,
+ .store = queue_random_store,
+};
+
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
+ &queue_random_entry.attr,
NULL,
};
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_ADD_RANDOM))
static inline int queue_is_locked(struct request_queue *q)
{
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
+#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)