return atomic_read(&hctx->nr_active) < depth;
}
-static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
+static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
+ bool nowrap)
{
int tag, org_last_tag = last_tag;
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
- if (org_last_tag && last_tag) {
+ if (org_last_tag && last_tag && !nowrap) {
last_tag = org_last_tag = 0;
continue;
}
return tag;
}
+#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
+
/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
- unsigned int *tag_cache)
+ unsigned int *tag_cache, struct blk_mq_tags *tags)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) {
- tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
+ tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
+ BT_ALLOC_RR(tags));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
* up using the specific cached tag.
*/
done:
- if (tag == org_last_tag) {
+ if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
static int bt_get(struct blk_mq_alloc_data *data,
struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx,
- unsigned int *last_tag)
+ unsigned int *last_tag, struct blk_mq_tags *tags)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
return tag;
do {
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, last_tag);
+ tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
- &data->ctx->last_tag);
+ &data->ctx->last_tag, data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
return BLK_MQ_TAG_FAIL;
}
- tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
+ tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
+ data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
BUG_ON(real_tag >= tags->nr_tags);
bt_clear_tag(&tags->bitmap_tags, real_tag);
- *last_tag = real_tag;
+ if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
+ *last_tag = real_tag;
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
bt_clear_tag(&tags->breserved_tags, tag);
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
- int node)
+ int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ tags->alloc_policy = alloc_policy;
+
if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
- unsigned int reserved_tags, int node)
+ unsigned int reserved_tags,
+ int node, int alloc_policy)
{
struct blk_mq_tags *tags;
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
- return blk_mq_init_bitmap_tags(tags, node);
+ return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
void blk_mq_free_tags(struct blk_mq_tags *tags)