Add round_jiffies_up and related routines
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-timeout.c
CommitLineData
242f9dcb
JA
1/*
2 * Functions related to generic timeout handling of requests.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/blkdev.h>
581d4e28 7#include <linux/fault-inject.h>
242f9dcb
JA
8
9#include "blk.h"
10
581d4e28
JA
11#ifdef CONFIG_FAIL_IO_TIMEOUT
12
13static DECLARE_FAULT_ATTR(fail_io_timeout);
14
15static int __init setup_fail_io_timeout(char *str)
16{
17 return setup_fault_attr(&fail_io_timeout, str);
18}
19__setup("fail_io_timeout=", setup_fail_io_timeout);
20
21int blk_should_fake_timeout(struct request_queue *q)
22{
23 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
24 return 0;
25
26 return should_fail(&fail_io_timeout, 1);
27}
28
29static int __init fail_io_timeout_debugfs(void)
30{
31 return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
32}
33
34late_initcall(fail_io_timeout_debugfs);
35
36ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
37 char *buf)
38{
39 struct gendisk *disk = dev_to_disk(dev);
40 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
41
42 return sprintf(buf, "%d\n", set != 0);
43}
44
45ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
46 const char *buf, size_t count)
47{
48 struct gendisk *disk = dev_to_disk(dev);
49 int val;
50
51 if (count) {
52 struct request_queue *q = disk->queue;
53 char *p = (char *) buf;
54
55 val = simple_strtoul(p, &p, 10);
56 spin_lock_irq(q->queue_lock);
57 if (val)
58 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
59 else
60 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
61 spin_unlock_irq(q->queue_lock);
62 }
63
64 return count;
65}
66
67#endif /* CONFIG_FAIL_IO_TIMEOUT */
68
242f9dcb
JA
69/*
70 * blk_delete_timer - Delete/cancel timer for a given function.
71 * @req: request that we are canceling timer for
72 *
73 */
74void blk_delete_timer(struct request *req)
75{
76 struct request_queue *q = req->q;
77
78 /*
79 * Nothing to detach
80 */
81 if (!q->rq_timed_out_fn || !req->deadline)
82 return;
83
84 list_del_init(&req->timeout_list);
85
86 if (list_empty(&q->timeout_list))
87 del_timer(&q->timeout);
88}
89
90static void blk_rq_timed_out(struct request *req)
91{
92 struct request_queue *q = req->q;
93 enum blk_eh_timer_return ret;
94
95 ret = q->rq_timed_out_fn(req);
96 switch (ret) {
97 case BLK_EH_HANDLED:
98 __blk_complete_request(req);
99 break;
100 case BLK_EH_RESET_TIMER:
101 blk_clear_rq_complete(req);
102 blk_add_timer(req);
103 break;
104 case BLK_EH_NOT_HANDLED:
105 /*
106 * LLD handles this for now but in the future
107 * we can send a request msg to abort the command
108 * and we can move more of the generic scsi eh code to
109 * the blk layer.
110 */
111 break;
112 default:
113 printk(KERN_ERR "block: bad eh return: %d\n", ret);
114 break;
115 }
116}
117
118void blk_rq_timed_out_timer(unsigned long data)
119{
120 struct request_queue *q = (struct request_queue *) data;
121 unsigned long flags, uninitialized_var(next), next_set = 0;
122 struct request *rq, *tmp;
123
124 spin_lock_irqsave(q->queue_lock, flags);
125
126 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
127 if (time_after_eq(jiffies, rq->deadline)) {
128 list_del_init(&rq->timeout_list);
129
130 /*
131 * Check if we raced with end io completion
132 */
133 if (blk_mark_rq_complete(rq))
134 continue;
135 blk_rq_timed_out(rq);
136 }
137 if (!next_set) {
138 next = rq->deadline;
139 next_set = 1;
140 } else if (time_after(next, rq->deadline))
141 next = rq->deadline;
142 }
143
144 if (next_set && !list_empty(&q->timeout_list))
145 mod_timer(&q->timeout, round_jiffies(next));
146
147 spin_unlock_irqrestore(q->queue_lock, flags);
148}
149
150/**
151 * blk_abort_request -- Request request recovery for the specified command
152 * @req: pointer to the request of interest
153 *
154 * This function requests that the block layer start recovery for the
155 * request by deleting the timer and calling the q's timeout function.
156 * LLDDs who implement their own error recovery MAY ignore the timeout
157 * event if they generated blk_abort_req. Must hold queue lock.
158 */
159void blk_abort_request(struct request *req)
160{
7ba1fbaa
JA
161 if (blk_mark_rq_complete(req))
162 return;
242f9dcb
JA
163 blk_delete_timer(req);
164 blk_rq_timed_out(req);
165}
166EXPORT_SYMBOL_GPL(blk_abort_request);
167
168/**
169 * blk_add_timer - Start timeout timer for a single request
170 * @req: request that is about to start running.
171 *
172 * Notes:
173 * Each request has its own timer, and as it is added to the queue, we
174 * set up the timer. When the request completes, we cancel the timer.
175 */
176void blk_add_timer(struct request *req)
177{
178 struct request_queue *q = req->q;
179 unsigned long expiry;
180
181 if (!q->rq_timed_out_fn)
182 return;
183
184 BUG_ON(!list_empty(&req->timeout_list));
185 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
186
187 if (req->timeout)
188 req->deadline = jiffies + req->timeout;
189 else {
190 req->deadline = jiffies + q->rq_timeout;
191 /*
192 * Some LLDs, like scsi, peek at the timeout to prevent
193 * a command from being retried forever.
194 */
195 req->timeout = q->rq_timeout;
196 }
197 list_add_tail(&req->timeout_list, &q->timeout_list);
198
199 /*
200 * If the timer isn't already pending or this timeout is earlier
201 * than an existing one, modify the timer. Round to next nearest
202 * second.
203 */
204 expiry = round_jiffies(req->deadline);
205
206 /*
207 * We use ->deadline == 0 to detect whether a timer was added or
208 * not, so just increase to next jiffy for that specific case
209 */
210 if (unlikely(!req->deadline))
211 req->deadline = 1;
212
213 if (!timer_pending(&q->timeout) ||
214 time_before(expiry, q->timeout.expires))
215 mod_timer(&q->timeout, expiry);
216}
11914a53
MA
217
218/**
219 * blk_abort_queue -- Abort all request on given queue
220 * @queue: pointer to queue
221 *
222 */
223void blk_abort_queue(struct request_queue *q)
224{
225 unsigned long flags;
226 struct request *rq, *tmp;
227
228 spin_lock_irqsave(q->queue_lock, flags);
229
230 elv_abort_queue(q);
231
232 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
233 blk_abort_request(rq);
234
235 spin_unlock_irqrestore(q->queue_lock, flags);
236
237}
238EXPORT_SYMBOL_GPL(blk_abort_queue);