Commit | Line | Data |
---|---|---|
2056a782 | 1 | /* |
0fe23479 | 2 | * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> |
2056a782 JA |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | * | |
17 | */ | |
2056a782 JA |
18 | #include <linux/kernel.h> |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/blktrace_api.h> | |
21 | #include <linux/percpu.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/mutex.h> | |
24 | #include <linux/debugfs.h> | |
be1c6341 | 25 | #include <linux/time.h> |
5f3ea37c | 26 | #include <trace/block.h> |
2056a782 | 27 | #include <asm/uaccess.h> |
c71a8961 | 28 | #include <../kernel/trace/trace_output.h> |
2056a782 | 29 | |
2056a782 JA |
30 | static unsigned int blktrace_seq __read_mostly = 1; |
31 | ||
c71a8961 ACM |
32 | static struct trace_array *blk_tr; |
33 | static int __read_mostly blk_tracer_enabled; | |
34 | ||
35 | /* Select an alternative, minimalistic output than the original one */ | |
36 | #define TRACE_BLK_OPT_CLASSIC 0x1 | |
37 | ||
38 | static struct tracer_opt blk_tracer_opts[] = { | |
39 | /* Default disable the minimalistic output */ | |
157f9c00 | 40 | { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, |
c71a8961 ACM |
41 | { } |
42 | }; | |
43 | ||
44 | static struct tracer_flags blk_tracer_flags = { | |
45 | .val = 0, | |
46 | .opts = blk_tracer_opts, | |
47 | }; | |
48 | ||
5f3ea37c ACM |
49 | /* Global reference count of probes */ |
50 | static DEFINE_MUTEX(blk_probe_mutex); | |
51 | static atomic_t blk_probes_ref = ATOMIC_INIT(0); | |
52 | ||
53 | static int blk_register_tracepoints(void); | |
54 | static void blk_unregister_tracepoints(void); | |
55 | ||
be1c6341 OK |
56 | /* |
57 | * Send out a notify message. | |
58 | */ | |
a863055b JA |
59 | static void trace_note(struct blk_trace *bt, pid_t pid, int action, |
60 | const void *data, size_t len) | |
be1c6341 OK |
61 | { |
62 | struct blk_io_trace *t; | |
be1c6341 | 63 | |
c71a8961 ACM |
64 | if (!bt->rchan) |
65 | return; | |
66 | ||
be1c6341 | 67 | t = relay_reserve(bt->rchan, sizeof(*t) + len); |
d3d9d2a5 JA |
68 | if (t) { |
69 | const int cpu = smp_processor_id(); | |
70 | ||
71 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
2997c8c4 | 72 | t->time = ktime_to_ns(ktime_get()); |
d3d9d2a5 JA |
73 | t->device = bt->dev; |
74 | t->action = action; | |
75 | t->pid = pid; | |
76 | t->cpu = cpu; | |
77 | t->pdu_len = len; | |
78 | memcpy((void *) t + sizeof(*t), data, len); | |
79 | } | |
be1c6341 OK |
80 | } |
81 | ||
2056a782 JA |
82 | /* |
83 | * Send out a notify for this process, if we haven't done so since a trace | |
84 | * started | |
85 | */ | |
86 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | |
87 | { | |
a863055b JA |
88 | tsk->btrace_seq = blktrace_seq; |
89 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | |
be1c6341 | 90 | } |
2056a782 | 91 | |
be1c6341 OK |
92 | static void trace_note_time(struct blk_trace *bt) |
93 | { | |
94 | struct timespec now; | |
95 | unsigned long flags; | |
96 | u32 words[2]; | |
97 | ||
98 | getnstimeofday(&now); | |
99 | words[0] = now.tv_sec; | |
100 | words[1] = now.tv_nsec; | |
101 | ||
102 | local_irq_save(flags); | |
103 | trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); | |
104 | local_irq_restore(flags); | |
2056a782 JA |
105 | } |
106 | ||
9d5f09a4 AB |
107 | void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) |
108 | { | |
109 | int n; | |
110 | va_list args; | |
14a73f54 | 111 | unsigned long flags; |
64565911 | 112 | char *buf; |
9d5f09a4 | 113 | |
c71a8961 ACM |
114 | if (blk_tr) { |
115 | va_start(args, fmt); | |
116 | ftrace_vprintk(fmt, args); | |
117 | va_end(args); | |
118 | return; | |
119 | } | |
120 | ||
121 | if (!bt->msg_data) | |
122 | return; | |
123 | ||
14a73f54 | 124 | local_irq_save(flags); |
64565911 | 125 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
9d5f09a4 | 126 | va_start(args, fmt); |
64565911 | 127 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); |
9d5f09a4 AB |
128 | va_end(args); |
129 | ||
64565911 | 130 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); |
14a73f54 | 131 | local_irq_restore(flags); |
9d5f09a4 AB |
132 | } |
133 | EXPORT_SYMBOL_GPL(__trace_note_message); | |
134 | ||
2056a782 JA |
135 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, |
136 | pid_t pid) | |
137 | { | |
138 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | |
139 | return 1; | |
140 | if (sector < bt->start_lba || sector > bt->end_lba) | |
141 | return 1; | |
142 | if (bt->pid && pid != bt->pid) | |
143 | return 1; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | /* | |
149 | * Data direction bit lookup | |
150 | */ | |
151 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; | |
152 | ||
35ba8f70 DW |
153 | /* The ilog2() calls fall out because they're constant */ |
154 | #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \ | |
155 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) ) | |
2056a782 JA |
156 | |
157 | /* | |
158 | * The worker for the various blk_add_trace*() types. Fills out a | |
159 | * blk_io_trace structure and places it in a per-cpu subbuffer. | |
160 | */ | |
5f3ea37c | 161 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, |
2056a782 JA |
162 | int rw, u32 what, int error, int pdu_len, void *pdu_data) |
163 | { | |
164 | struct task_struct *tsk = current; | |
c71a8961 | 165 | struct ring_buffer_event *event = NULL; |
2056a782 JA |
166 | struct blk_io_trace *t; |
167 | unsigned long flags; | |
168 | unsigned long *sequence; | |
169 | pid_t pid; | |
c71a8961 | 170 | int cpu, pc = 0; |
2056a782 | 171 | |
157f9c00 ACM |
172 | if (unlikely(bt->trace_state != Blktrace_running || |
173 | !blk_tracer_enabled)) | |
2056a782 JA |
174 | return; |
175 | ||
176 | what |= ddir_act[rw & WRITE]; | |
35ba8f70 DW |
177 | what |= MASK_TC_BIT(rw, BARRIER); |
178 | what |= MASK_TC_BIT(rw, SYNC); | |
179 | what |= MASK_TC_BIT(rw, AHEAD); | |
180 | what |= MASK_TC_BIT(rw, META); | |
181 | what |= MASK_TC_BIT(rw, DISCARD); | |
2056a782 JA |
182 | |
183 | pid = tsk->pid; | |
184 | if (unlikely(act_log_check(bt, what, sector, pid))) | |
185 | return; | |
c71a8961 ACM |
186 | cpu = raw_smp_processor_id(); |
187 | ||
188 | if (blk_tr) { | |
189 | struct trace_entry *ent; | |
190 | tracing_record_cmdline(current); | |
191 | ||
192 | event = ring_buffer_lock_reserve(blk_tr->buffer, | |
193 | sizeof(*t) + pdu_len, &flags); | |
194 | if (!event) | |
195 | return; | |
157f9c00 | 196 | |
c71a8961 ACM |
197 | ent = ring_buffer_event_data(event); |
198 | t = (struct blk_io_trace *)ent; | |
199 | pc = preempt_count(); | |
200 | tracing_generic_entry_update(ent, 0, pc); | |
201 | ent->type = TRACE_BLK; | |
202 | goto record_it; | |
203 | } | |
2056a782 JA |
204 | |
205 | /* | |
206 | * A word about the locking here - we disable interrupts to reserve | |
207 | * some space in the relay per-cpu buffer, to prevent an irq | |
14a73f54 | 208 | * from coming in and stepping on our toes. |
2056a782 JA |
209 | */ |
210 | local_irq_save(flags); | |
211 | ||
212 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | |
213 | trace_note_tsk(bt, tsk); | |
214 | ||
215 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | |
216 | if (t) { | |
2056a782 JA |
217 | sequence = per_cpu_ptr(bt->sequence, cpu); |
218 | ||
219 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
220 | t->sequence = ++(*sequence); | |
2997c8c4 | 221 | t->time = ktime_to_ns(ktime_get()); |
c71a8961 ACM |
222 | t->cpu = cpu; |
223 | t->pid = pid; | |
224 | record_it: | |
2056a782 JA |
225 | t->sector = sector; |
226 | t->bytes = bytes; | |
227 | t->action = what; | |
2056a782 | 228 | t->device = bt->dev; |
2056a782 JA |
229 | t->error = error; |
230 | t->pdu_len = pdu_len; | |
231 | ||
232 | if (pdu_len) | |
233 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | |
c71a8961 ACM |
234 | |
235 | if (blk_tr) { | |
236 | ring_buffer_unlock_commit(blk_tr->buffer, event, flags); | |
237 | if (pid != 0 && | |
157f9c00 | 238 | !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && |
c71a8961 ACM |
239 | (trace_flags & TRACE_ITER_STACKTRACE) != 0) |
240 | __trace_stack(blk_tr, NULL, flags, 5, pc); | |
241 | trace_wake_up(); | |
242 | return; | |
243 | } | |
2056a782 JA |
244 | } |
245 | ||
246 | local_irq_restore(flags); | |
247 | } | |
248 | ||
2056a782 | 249 | static struct dentry *blk_tree_root; |
11a57153 | 250 | static DEFINE_MUTEX(blk_tree_mutex); |
2056a782 JA |
251 | static unsigned int root_users; |
252 | ||
253 | static inline void blk_remove_root(void) | |
254 | { | |
255 | if (blk_tree_root) { | |
256 | debugfs_remove(blk_tree_root); | |
257 | blk_tree_root = NULL; | |
258 | } | |
259 | } | |
260 | ||
261 | static void blk_remove_tree(struct dentry *dir) | |
262 | { | |
263 | mutex_lock(&blk_tree_mutex); | |
264 | debugfs_remove(dir); | |
265 | if (--root_users == 0) | |
266 | blk_remove_root(); | |
267 | mutex_unlock(&blk_tree_mutex); | |
268 | } | |
269 | ||
270 | static struct dentry *blk_create_tree(const char *blk_name) | |
271 | { | |
272 | struct dentry *dir = NULL; | |
35fc51e7 | 273 | int created = 0; |
2056a782 JA |
274 | |
275 | mutex_lock(&blk_tree_mutex); | |
276 | ||
277 | if (!blk_tree_root) { | |
278 | blk_tree_root = debugfs_create_dir("block", NULL); | |
279 | if (!blk_tree_root) | |
280 | goto err; | |
35fc51e7 | 281 | created = 1; |
2056a782 JA |
282 | } |
283 | ||
284 | dir = debugfs_create_dir(blk_name, blk_tree_root); | |
285 | if (dir) | |
286 | root_users++; | |
35fc51e7 AK |
287 | else { |
288 | /* Delete root only if we created it */ | |
289 | if (created) | |
290 | blk_remove_root(); | |
291 | } | |
2056a782 JA |
292 | |
293 | err: | |
294 | mutex_unlock(&blk_tree_mutex); | |
295 | return dir; | |
296 | } | |
297 | ||
298 | static void blk_trace_cleanup(struct blk_trace *bt) | |
299 | { | |
300 | relay_close(bt->rchan); | |
02c62304 | 301 | debugfs_remove(bt->msg_file); |
2056a782 JA |
302 | debugfs_remove(bt->dropped_file); |
303 | blk_remove_tree(bt->dir); | |
304 | free_percpu(bt->sequence); | |
64565911 | 305 | free_percpu(bt->msg_data); |
2056a782 | 306 | kfree(bt); |
5f3ea37c ACM |
307 | mutex_lock(&blk_probe_mutex); |
308 | if (atomic_dec_and_test(&blk_probes_ref)) | |
309 | blk_unregister_tracepoints(); | |
310 | mutex_unlock(&blk_probe_mutex); | |
2056a782 JA |
311 | } |
312 | ||
6da127ad | 313 | int blk_trace_remove(struct request_queue *q) |
2056a782 JA |
314 | { |
315 | struct blk_trace *bt; | |
316 | ||
317 | bt = xchg(&q->blk_trace, NULL); | |
318 | if (!bt) | |
319 | return -EINVAL; | |
320 | ||
321 | if (bt->trace_state == Blktrace_setup || | |
322 | bt->trace_state == Blktrace_stopped) | |
323 | blk_trace_cleanup(bt); | |
324 | ||
325 | return 0; | |
326 | } | |
6da127ad | 327 | EXPORT_SYMBOL_GPL(blk_trace_remove); |
2056a782 JA |
328 | |
329 | static int blk_dropped_open(struct inode *inode, struct file *filp) | |
330 | { | |
8e18e294 | 331 | filp->private_data = inode->i_private; |
2056a782 JA |
332 | |
333 | return 0; | |
334 | } | |
335 | ||
336 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | |
337 | size_t count, loff_t *ppos) | |
338 | { | |
339 | struct blk_trace *bt = filp->private_data; | |
340 | char buf[16]; | |
341 | ||
342 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | |
343 | ||
344 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | |
345 | } | |
346 | ||
2b8693c0 | 347 | static const struct file_operations blk_dropped_fops = { |
2056a782 JA |
348 | .owner = THIS_MODULE, |
349 | .open = blk_dropped_open, | |
350 | .read = blk_dropped_read, | |
351 | }; | |
352 | ||
02c62304 AB |
353 | static int blk_msg_open(struct inode *inode, struct file *filp) |
354 | { | |
355 | filp->private_data = inode->i_private; | |
356 | ||
357 | return 0; | |
358 | } | |
359 | ||
360 | static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, | |
361 | size_t count, loff_t *ppos) | |
362 | { | |
363 | char *msg; | |
364 | struct blk_trace *bt; | |
365 | ||
366 | if (count > BLK_TN_MAX_MSG) | |
367 | return -EINVAL; | |
368 | ||
369 | msg = kmalloc(count, GFP_KERNEL); | |
370 | if (msg == NULL) | |
371 | return -ENOMEM; | |
372 | ||
373 | if (copy_from_user(msg, buffer, count)) { | |
374 | kfree(msg); | |
375 | return -EFAULT; | |
376 | } | |
377 | ||
378 | bt = filp->private_data; | |
379 | __trace_note_message(bt, "%s", msg); | |
380 | kfree(msg); | |
381 | ||
382 | return count; | |
383 | } | |
384 | ||
385 | static const struct file_operations blk_msg_fops = { | |
386 | .owner = THIS_MODULE, | |
387 | .open = blk_msg_open, | |
388 | .write = blk_msg_write, | |
389 | }; | |
390 | ||
2056a782 JA |
391 | /* |
392 | * Keep track of how many times we encountered a full subbuffer, to aid | |
393 | * the user space app in telling how many lost events there were. | |
394 | */ | |
395 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |
396 | void *prev_subbuf, size_t prev_padding) | |
397 | { | |
398 | struct blk_trace *bt; | |
399 | ||
400 | if (!relay_buf_full(buf)) | |
401 | return 1; | |
402 | ||
403 | bt = buf->chan->private_data; | |
404 | atomic_inc(&bt->dropped); | |
405 | return 0; | |
406 | } | |
407 | ||
408 | static int blk_remove_buf_file_callback(struct dentry *dentry) | |
409 | { | |
410 | debugfs_remove(dentry); | |
411 | return 0; | |
412 | } | |
413 | ||
414 | static struct dentry *blk_create_buf_file_callback(const char *filename, | |
415 | struct dentry *parent, | |
416 | int mode, | |
417 | struct rchan_buf *buf, | |
418 | int *is_global) | |
419 | { | |
420 | return debugfs_create_file(filename, mode, parent, buf, | |
421 | &relay_file_operations); | |
422 | } | |
423 | ||
424 | static struct rchan_callbacks blk_relay_callbacks = { | |
425 | .subbuf_start = blk_subbuf_start_callback, | |
426 | .create_buf_file = blk_create_buf_file_callback, | |
427 | .remove_buf_file = blk_remove_buf_file_callback, | |
428 | }; | |
429 | ||
430 | /* | |
431 | * Setup everything required to start tracing | |
432 | */ | |
6da127ad | 433 | int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
171044d4 | 434 | struct blk_user_trace_setup *buts) |
2056a782 | 435 | { |
2056a782 JA |
436 | struct blk_trace *old_bt, *bt = NULL; |
437 | struct dentry *dir = NULL; | |
2056a782 JA |
438 | int ret, i; |
439 | ||
171044d4 | 440 | if (!buts->buf_size || !buts->buf_nr) |
2056a782 JA |
441 | return -EINVAL; |
442 | ||
0497b345 JA |
443 | strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); |
444 | buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; | |
2056a782 JA |
445 | |
446 | /* | |
447 | * some device names have larger paths - convert the slashes | |
448 | * to underscores for this to work as expected | |
449 | */ | |
171044d4 AB |
450 | for (i = 0; i < strlen(buts->name); i++) |
451 | if (buts->name[i] == '/') | |
452 | buts->name[i] = '_'; | |
2056a782 JA |
453 | |
454 | ret = -ENOMEM; | |
455 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | |
456 | if (!bt) | |
457 | goto err; | |
458 | ||
459 | bt->sequence = alloc_percpu(unsigned long); | |
460 | if (!bt->sequence) | |
461 | goto err; | |
462 | ||
64565911 JA |
463 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); |
464 | if (!bt->msg_data) | |
465 | goto err; | |
466 | ||
2056a782 | 467 | ret = -ENOENT; |
171044d4 | 468 | dir = blk_create_tree(buts->name); |
2056a782 JA |
469 | if (!dir) |
470 | goto err; | |
471 | ||
472 | bt->dir = dir; | |
6da127ad | 473 | bt->dev = dev; |
2056a782 JA |
474 | atomic_set(&bt->dropped, 0); |
475 | ||
476 | ret = -EIO; | |
477 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); | |
478 | if (!bt->dropped_file) | |
479 | goto err; | |
480 | ||
02c62304 AB |
481 | bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); |
482 | if (!bt->msg_file) | |
483 | goto err; | |
484 | ||
171044d4 AB |
485 | bt->rchan = relay_open("trace", dir, buts->buf_size, |
486 | buts->buf_nr, &blk_relay_callbacks, bt); | |
2056a782 JA |
487 | if (!bt->rchan) |
488 | goto err; | |
2056a782 | 489 | |
171044d4 | 490 | bt->act_mask = buts->act_mask; |
2056a782 JA |
491 | if (!bt->act_mask) |
492 | bt->act_mask = (u16) -1; | |
493 | ||
171044d4 AB |
494 | bt->start_lba = buts->start_lba; |
495 | bt->end_lba = buts->end_lba; | |
2056a782 JA |
496 | if (!bt->end_lba) |
497 | bt->end_lba = -1ULL; | |
498 | ||
171044d4 | 499 | bt->pid = buts->pid; |
2056a782 JA |
500 | bt->trace_state = Blktrace_setup; |
501 | ||
5f3ea37c ACM |
502 | mutex_lock(&blk_probe_mutex); |
503 | if (atomic_add_return(1, &blk_probes_ref) == 1) { | |
504 | ret = blk_register_tracepoints(); | |
505 | if (ret) | |
506 | goto probe_err; | |
507 | } | |
508 | mutex_unlock(&blk_probe_mutex); | |
509 | ||
2056a782 JA |
510 | ret = -EBUSY; |
511 | old_bt = xchg(&q->blk_trace, bt); | |
512 | if (old_bt) { | |
513 | (void) xchg(&q->blk_trace, old_bt); | |
514 | goto err; | |
515 | } | |
516 | ||
517 | return 0; | |
5f3ea37c ACM |
518 | probe_err: |
519 | atomic_dec(&blk_probes_ref); | |
520 | mutex_unlock(&blk_probe_mutex); | |
2056a782 JA |
521 | err: |
522 | if (dir) | |
523 | blk_remove_tree(dir); | |
524 | if (bt) { | |
02c62304 AB |
525 | if (bt->msg_file) |
526 | debugfs_remove(bt->msg_file); | |
2056a782 JA |
527 | if (bt->dropped_file) |
528 | debugfs_remove(bt->dropped_file); | |
a1205868 | 529 | free_percpu(bt->sequence); |
64565911 | 530 | free_percpu(bt->msg_data); |
2056a782 JA |
531 | if (bt->rchan) |
532 | relay_close(bt->rchan); | |
533 | kfree(bt); | |
534 | } | |
535 | return ret; | |
536 | } | |
171044d4 | 537 | |
6da127ad CS |
538 | int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
539 | char __user *arg) | |
171044d4 AB |
540 | { |
541 | struct blk_user_trace_setup buts; | |
542 | int ret; | |
543 | ||
544 | ret = copy_from_user(&buts, arg, sizeof(buts)); | |
545 | if (ret) | |
546 | return -EFAULT; | |
547 | ||
6da127ad | 548 | ret = do_blk_trace_setup(q, name, dev, &buts); |
171044d4 AB |
549 | if (ret) |
550 | return ret; | |
551 | ||
552 | if (copy_to_user(arg, &buts, sizeof(buts))) | |
553 | return -EFAULT; | |
554 | ||
555 | return 0; | |
556 | } | |
6da127ad | 557 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
2056a782 | 558 | |
6da127ad | 559 | int blk_trace_startstop(struct request_queue *q, int start) |
2056a782 JA |
560 | { |
561 | struct blk_trace *bt; | |
562 | int ret; | |
563 | ||
564 | if ((bt = q->blk_trace) == NULL) | |
565 | return -EINVAL; | |
566 | ||
567 | /* | |
568 | * For starting a trace, we can transition from a setup or stopped | |
569 | * trace. For stopping a trace, the state must be running | |
570 | */ | |
571 | ret = -EINVAL; | |
572 | if (start) { | |
573 | if (bt->trace_state == Blktrace_setup || | |
574 | bt->trace_state == Blktrace_stopped) { | |
575 | blktrace_seq++; | |
576 | smp_mb(); | |
577 | bt->trace_state = Blktrace_running; | |
be1c6341 OK |
578 | |
579 | trace_note_time(bt); | |
2056a782 JA |
580 | ret = 0; |
581 | } | |
582 | } else { | |
583 | if (bt->trace_state == Blktrace_running) { | |
584 | bt->trace_state = Blktrace_stopped; | |
585 | relay_flush(bt->rchan); | |
586 | ret = 0; | |
587 | } | |
588 | } | |
589 | ||
590 | return ret; | |
591 | } | |
6da127ad | 592 | EXPORT_SYMBOL_GPL(blk_trace_startstop); |
2056a782 JA |
593 | |
594 | /** | |
595 | * blk_trace_ioctl: - handle the ioctls associated with tracing | |
596 | * @bdev: the block device | |
597 | * @cmd: the ioctl cmd | |
598 | * @arg: the argument data, if any | |
599 | * | |
600 | **/ | |
601 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |
602 | { | |
165125e1 | 603 | struct request_queue *q; |
2056a782 | 604 | int ret, start = 0; |
6da127ad | 605 | char b[BDEVNAME_SIZE]; |
2056a782 JA |
606 | |
607 | q = bdev_get_queue(bdev); | |
608 | if (!q) | |
609 | return -ENXIO; | |
610 | ||
611 | mutex_lock(&bdev->bd_mutex); | |
612 | ||
613 | switch (cmd) { | |
614 | case BLKTRACESETUP: | |
f36f21ec | 615 | bdevname(bdev, b); |
6da127ad | 616 | ret = blk_trace_setup(q, b, bdev->bd_dev, arg); |
2056a782 JA |
617 | break; |
618 | case BLKTRACESTART: | |
619 | start = 1; | |
620 | case BLKTRACESTOP: | |
621 | ret = blk_trace_startstop(q, start); | |
622 | break; | |
623 | case BLKTRACETEARDOWN: | |
624 | ret = blk_trace_remove(q); | |
625 | break; | |
626 | default: | |
627 | ret = -ENOTTY; | |
628 | break; | |
629 | } | |
630 | ||
631 | mutex_unlock(&bdev->bd_mutex); | |
632 | return ret; | |
633 | } | |
634 | ||
635 | /** | |
636 | * blk_trace_shutdown: - stop and cleanup trace structures | |
637 | * @q: the request queue associated with the device | |
638 | * | |
639 | **/ | |
165125e1 | 640 | void blk_trace_shutdown(struct request_queue *q) |
2056a782 | 641 | { |
6c5c9341 AD |
642 | if (q->blk_trace) { |
643 | blk_trace_startstop(q, 0); | |
644 | blk_trace_remove(q); | |
645 | } | |
2056a782 | 646 | } |
5f3ea37c ACM |
647 | |
648 | /* | |
649 | * blktrace probes | |
650 | */ | |
651 | ||
652 | /** | |
653 | * blk_add_trace_rq - Add a trace for a request oriented action | |
654 | * @q: queue the io is for | |
655 | * @rq: the source request | |
656 | * @what: the action | |
657 | * | |
658 | * Description: | |
659 | * Records an action against a request. Will log the bio offset + size. | |
660 | * | |
661 | **/ | |
662 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |
663 | u32 what) | |
664 | { | |
665 | struct blk_trace *bt = q->blk_trace; | |
666 | int rw = rq->cmd_flags & 0x03; | |
667 | ||
668 | if (likely(!bt)) | |
669 | return; | |
670 | ||
671 | if (blk_discard_rq(rq)) | |
672 | rw |= (1 << BIO_RW_DISCARD); | |
673 | ||
674 | if (blk_pc_request(rq)) { | |
675 | what |= BLK_TC_ACT(BLK_TC_PC); | |
676 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, | |
677 | sizeof(rq->cmd), rq->cmd); | |
678 | } else { | |
679 | what |= BLK_TC_ACT(BLK_TC_FS); | |
680 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | |
681 | rw, what, rq->errors, 0, NULL); | |
682 | } | |
683 | } | |
684 | ||
685 | static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) | |
686 | { | |
687 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | |
688 | } | |
689 | ||
690 | static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) | |
691 | { | |
692 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | |
693 | } | |
694 | ||
695 | static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) | |
696 | { | |
697 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | |
698 | } | |
699 | ||
700 | static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) | |
701 | { | |
702 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | |
703 | } | |
704 | ||
705 | static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) | |
706 | { | |
707 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | |
708 | } | |
709 | ||
710 | /** | |
711 | * blk_add_trace_bio - Add a trace for a bio oriented action | |
712 | * @q: queue the io is for | |
713 | * @bio: the source bio | |
714 | * @what: the action | |
715 | * | |
716 | * Description: | |
717 | * Records an action against a bio. Will log the bio offset + size. | |
718 | * | |
719 | **/ | |
720 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |
721 | u32 what) | |
722 | { | |
723 | struct blk_trace *bt = q->blk_trace; | |
724 | ||
725 | if (likely(!bt)) | |
726 | return; | |
727 | ||
728 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, | |
729 | !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | |
730 | } | |
731 | ||
732 | static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) | |
733 | { | |
734 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); | |
735 | } | |
736 | ||
737 | static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) | |
738 | { | |
739 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | |
740 | } | |
741 | ||
742 | static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) | |
743 | { | |
744 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | |
745 | } | |
746 | ||
747 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) | |
748 | { | |
749 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | |
750 | } | |
751 | ||
752 | static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) | |
753 | { | |
754 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | |
755 | } | |
756 | ||
757 | static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) | |
758 | { | |
759 | if (bio) | |
760 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | |
761 | else { | |
762 | struct blk_trace *bt = q->blk_trace; | |
763 | ||
764 | if (bt) | |
765 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | |
766 | } | |
767 | } | |
768 | ||
769 | ||
770 | static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) | |
771 | { | |
772 | if (bio) | |
773 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | |
774 | else { | |
775 | struct blk_trace *bt = q->blk_trace; | |
776 | ||
777 | if (bt) | |
778 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); | |
779 | } | |
780 | } | |
781 | ||
782 | static void blk_add_trace_plug(struct request_queue *q) | |
783 | { | |
784 | struct blk_trace *bt = q->blk_trace; | |
785 | ||
786 | if (bt) | |
787 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | |
788 | } | |
789 | ||
790 | static void blk_add_trace_unplug_io(struct request_queue *q) | |
791 | { | |
792 | struct blk_trace *bt = q->blk_trace; | |
793 | ||
794 | if (bt) { | |
795 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
796 | __be64 rpdu = cpu_to_be64(pdu); | |
797 | ||
798 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | |
799 | sizeof(rpdu), &rpdu); | |
800 | } | |
801 | } | |
802 | ||
803 | static void blk_add_trace_unplug_timer(struct request_queue *q) | |
804 | { | |
805 | struct blk_trace *bt = q->blk_trace; | |
806 | ||
807 | if (bt) { | |
808 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
809 | __be64 rpdu = cpu_to_be64(pdu); | |
810 | ||
811 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | |
812 | sizeof(rpdu), &rpdu); | |
813 | } | |
814 | } | |
815 | ||
816 | static void blk_add_trace_split(struct request_queue *q, struct bio *bio, | |
817 | unsigned int pdu) | |
818 | { | |
819 | struct blk_trace *bt = q->blk_trace; | |
820 | ||
821 | if (bt) { | |
822 | __be64 rpdu = cpu_to_be64(pdu); | |
823 | ||
824 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, | |
825 | BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), | |
826 | sizeof(rpdu), &rpdu); | |
827 | } | |
828 | } | |
829 | ||
830 | /** | |
831 | * blk_add_trace_remap - Add a trace for a remap operation | |
832 | * @q: queue the io is for | |
833 | * @bio: the source bio | |
834 | * @dev: target device | |
835 | * @from: source sector | |
836 | * @to: target sector | |
837 | * | |
838 | * Description: | |
839 | * Device mapper or raid target sometimes need to split a bio because | |
840 | * it spans a stripe (or similar). Add a trace for that action. | |
841 | * | |
842 | **/ | |
843 | static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |
844 | dev_t dev, sector_t from, sector_t to) | |
845 | { | |
846 | struct blk_trace *bt = q->blk_trace; | |
847 | struct blk_io_trace_remap r; | |
848 | ||
849 | if (likely(!bt)) | |
850 | return; | |
851 | ||
852 | r.device = cpu_to_be32(dev); | |
853 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); | |
854 | r.sector = cpu_to_be64(to); | |
855 | ||
856 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, | |
857 | !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | |
858 | } | |
859 | ||
860 | /** | |
861 | * blk_add_driver_data - Add binary message with driver-specific data | |
862 | * @q: queue the io is for | |
863 | * @rq: io request | |
864 | * @data: driver-specific data | |
865 | * @len: length of driver-specific data | |
866 | * | |
867 | * Description: | |
868 | * Some drivers might want to write driver-specific data per request. | |
869 | * | |
870 | **/ | |
871 | void blk_add_driver_data(struct request_queue *q, | |
872 | struct request *rq, | |
873 | void *data, size_t len) | |
874 | { | |
875 | struct blk_trace *bt = q->blk_trace; | |
876 | ||
877 | if (likely(!bt)) | |
878 | return; | |
879 | ||
880 | if (blk_pc_request(rq)) | |
881 | __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, | |
882 | rq->errors, len, data); | |
883 | else | |
884 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | |
885 | 0, BLK_TA_DRV_DATA, rq->errors, len, data); | |
886 | } | |
887 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | |
888 | ||
889 | static int blk_register_tracepoints(void) | |
890 | { | |
891 | int ret; | |
892 | ||
893 | ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); | |
894 | WARN_ON(ret); | |
895 | ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); | |
896 | WARN_ON(ret); | |
897 | ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); | |
898 | WARN_ON(ret); | |
899 | ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); | |
900 | WARN_ON(ret); | |
901 | ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); | |
902 | WARN_ON(ret); | |
903 | ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); | |
904 | WARN_ON(ret); | |
905 | ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); | |
906 | WARN_ON(ret); | |
907 | ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | |
908 | WARN_ON(ret); | |
909 | ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | |
910 | WARN_ON(ret); | |
911 | ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); | |
912 | WARN_ON(ret); | |
913 | ret = register_trace_block_getrq(blk_add_trace_getrq); | |
914 | WARN_ON(ret); | |
915 | ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); | |
916 | WARN_ON(ret); | |
917 | ret = register_trace_block_plug(blk_add_trace_plug); | |
918 | WARN_ON(ret); | |
919 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); | |
920 | WARN_ON(ret); | |
921 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); | |
922 | WARN_ON(ret); | |
923 | ret = register_trace_block_split(blk_add_trace_split); | |
924 | WARN_ON(ret); | |
925 | ret = register_trace_block_remap(blk_add_trace_remap); | |
926 | WARN_ON(ret); | |
927 | return 0; | |
928 | } | |
929 | ||
930 | static void blk_unregister_tracepoints(void) | |
931 | { | |
932 | unregister_trace_block_remap(blk_add_trace_remap); | |
933 | unregister_trace_block_split(blk_add_trace_split); | |
934 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | |
935 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); | |
936 | unregister_trace_block_plug(blk_add_trace_plug); | |
937 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq); | |
938 | unregister_trace_block_getrq(blk_add_trace_getrq); | |
939 | unregister_trace_block_bio_queue(blk_add_trace_bio_queue); | |
940 | unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | |
941 | unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | |
942 | unregister_trace_block_bio_complete(blk_add_trace_bio_complete); | |
943 | unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); | |
944 | unregister_trace_block_rq_complete(blk_add_trace_rq_complete); | |
945 | unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); | |
946 | unregister_trace_block_rq_issue(blk_add_trace_rq_issue); | |
947 | unregister_trace_block_rq_insert(blk_add_trace_rq_insert); | |
948 | unregister_trace_block_rq_abort(blk_add_trace_rq_abort); | |
949 | ||
950 | tracepoint_synchronize_unregister(); | |
951 | } | |
c71a8961 ACM |
952 | |
953 | /* | |
954 | * struct blk_io_tracer formatting routines | |
955 | */ | |
956 | ||
957 | static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |
958 | { | |
157f9c00 ACM |
959 | int i = 0; |
960 | ||
961 | if (t->action & BLK_TC_DISCARD) | |
962 | rwbs[i++] = 'D'; | |
963 | else if (t->action & BLK_TC_WRITE) | |
964 | rwbs[i++] = 'W'; | |
965 | else if (t->bytes) | |
966 | rwbs[i++] = 'R'; | |
967 | else | |
968 | rwbs[i++] = 'N'; | |
969 | ||
970 | if (t->action & BLK_TC_AHEAD) | |
971 | rwbs[i++] = 'A'; | |
972 | if (t->action & BLK_TC_BARRIER) | |
973 | rwbs[i++] = 'B'; | |
974 | if (t->action & BLK_TC_SYNC) | |
975 | rwbs[i++] = 'S'; | |
976 | if (t->action & BLK_TC_META) | |
977 | rwbs[i++] = 'M'; | |
978 | ||
979 | rwbs[i] = '\0'; | |
c71a8961 ACM |
980 | } |
981 | ||
982 | static inline | |
983 | const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) | |
984 | { | |
985 | return (const struct blk_io_trace *)ent; | |
986 | } | |
987 | ||
988 | static inline const void *pdu_start(const struct trace_entry *ent) | |
989 | { | |
990 | return te_blk_io_trace(ent) + 1; | |
991 | } | |
992 | ||
993 | static inline u32 t_sec(const struct trace_entry *ent) | |
994 | { | |
995 | return te_blk_io_trace(ent)->bytes >> 9; | |
996 | } | |
997 | ||
998 | static inline unsigned long long t_sector(const struct trace_entry *ent) | |
999 | { | |
1000 | return te_blk_io_trace(ent)->sector; | |
1001 | } | |
1002 | ||
1003 | static inline __u16 t_error(const struct trace_entry *ent) | |
1004 | { | |
1005 | return te_blk_io_trace(ent)->sector; | |
1006 | } | |
1007 | ||
1008 | static __u64 get_pdu_int(const struct trace_entry *ent) | |
1009 | { | |
1010 | const __u64 *val = pdu_start(ent); | |
1011 | return be64_to_cpu(*val); | |
1012 | } | |
1013 | ||
1014 | static void get_pdu_remap(const struct trace_entry *ent, | |
1015 | struct blk_io_trace_remap *r) | |
1016 | { | |
1017 | const struct blk_io_trace_remap *__r = pdu_start(ent); | |
1018 | __u64 sector = __r->sector; | |
1019 | ||
1020 | r->device = be32_to_cpu(__r->device); | |
1021 | r->device_from = be32_to_cpu(__r->device_from); | |
1022 | r->sector = be64_to_cpu(sector); | |
1023 | } | |
1024 | ||
1025 | static int blk_log_action_iter(struct trace_iterator *iter, const char *act) | |
1026 | { | |
1027 | char rwbs[6]; | |
1028 | unsigned long long ts = ns2usecs(iter->ts); | |
1029 | unsigned long usec_rem = do_div(ts, USEC_PER_SEC); | |
1030 | unsigned secs = (unsigned long)ts; | |
1031 | const struct trace_entry *ent = iter->ent; | |
1032 | const struct blk_io_trace *t = (const struct blk_io_trace *)ent; | |
1033 | ||
1034 | fill_rwbs(rwbs, t); | |
1035 | ||
1036 | return trace_seq_printf(&iter->seq, | |
1037 | "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ", | |
1038 | MAJOR(t->device), MINOR(t->device), iter->cpu, | |
1039 | secs, usec_rem, ent->pid, act, rwbs); | |
1040 | } | |
1041 | ||
1042 | static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t, | |
1043 | const char *act) | |
1044 | { | |
1045 | char rwbs[6]; | |
1046 | fill_rwbs(rwbs, t); | |
1047 | return trace_seq_printf(s, "%3d,%-3d %2s %3s ", | |
1048 | MAJOR(t->device), MINOR(t->device), act, rwbs); | |
1049 | } | |
1050 | ||
1051 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | |
1052 | { | |
1053 | const char *cmd = trace_find_cmdline(ent->pid); | |
1054 | ||
1055 | if (t_sec(ent)) | |
1056 | return trace_seq_printf(s, "%llu + %u [%s]\n", | |
1057 | t_sector(ent), t_sec(ent), cmd); | |
1058 | return trace_seq_printf(s, "[%s]\n", cmd); | |
1059 | } | |
1060 | ||
157f9c00 ACM |
1061 | static int blk_log_with_error(struct trace_seq *s, |
1062 | const struct trace_entry *ent) | |
c71a8961 ACM |
1063 | { |
1064 | if (t_sec(ent)) | |
1065 | return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), | |
1066 | t_sec(ent), t_error(ent)); | |
1067 | return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); | |
1068 | } | |
1069 | ||
1070 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | |
1071 | { | |
1072 | struct blk_io_trace_remap r = { .device = 0, }; | |
1073 | ||
1074 | get_pdu_remap(ent, &r); | |
1075 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | |
1076 | t_sector(ent), | |
1077 | t_sec(ent), MAJOR(r.device), MINOR(r.device), | |
1078 | (unsigned long long)r.sector); | |
1079 | } | |
1080 | ||
1081 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | |
1082 | { | |
1083 | return trace_seq_printf(s, "[%s]\n", trace_find_cmdline(ent->pid)); | |
1084 | } | |
1085 | ||
1086 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | |
1087 | { | |
1088 | return trace_seq_printf(s, "[%s] %llu\n", trace_find_cmdline(ent->pid), | |
1089 | get_pdu_int(ent)); | |
1090 | } | |
1091 | ||
1092 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | |
1093 | { | |
1094 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), | |
1095 | get_pdu_int(ent), trace_find_cmdline(ent->pid)); | |
1096 | } | |
1097 | ||
1098 | /* | |
1099 | * struct tracer operations | |
1100 | */ | |
1101 | ||
1102 | static void blk_tracer_print_header(struct seq_file *m) | |
1103 | { | |
1104 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | |
1105 | return; | |
1106 | seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" | |
1107 | "# | | | | | |\n"); | |
1108 | } | |
1109 | ||
1110 | static void blk_tracer_start(struct trace_array *tr) | |
1111 | { | |
1112 | int cpu; | |
1113 | ||
1114 | tr->time_start = ftrace_now(tr->cpu); | |
1115 | ||
1116 | for_each_online_cpu(cpu) | |
1117 | tracing_reset(tr, cpu); | |
1118 | ||
1119 | mutex_lock(&blk_probe_mutex); | |
1120 | if (atomic_add_return(1, &blk_probes_ref) == 1) | |
1121 | if (blk_register_tracepoints()) | |
1122 | atomic_dec(&blk_probes_ref); | |
1123 | mutex_unlock(&blk_probe_mutex); | |
1124 | } | |
1125 | ||
1126 | static int blk_tracer_init(struct trace_array *tr) | |
1127 | { | |
1128 | blk_tr = tr; | |
1129 | blk_tracer_start(tr); | |
1130 | mutex_lock(&blk_probe_mutex); | |
1131 | blk_tracer_enabled++; | |
1132 | mutex_unlock(&blk_probe_mutex); | |
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | static void blk_tracer_stop(struct trace_array *tr) | |
1137 | { | |
1138 | mutex_lock(&blk_probe_mutex); | |
1139 | if (atomic_dec_and_test(&blk_probes_ref)) | |
1140 | blk_unregister_tracepoints(); | |
1141 | mutex_unlock(&blk_probe_mutex); | |
1142 | } | |
1143 | ||
1144 | static void blk_tracer_reset(struct trace_array *tr) | |
1145 | { | |
1146 | if (!atomic_read(&blk_probes_ref)) | |
1147 | return; | |
1148 | ||
1149 | mutex_lock(&blk_probe_mutex); | |
1150 | blk_tracer_enabled--; | |
1151 | WARN_ON(blk_tracer_enabled < 0); | |
1152 | mutex_unlock(&blk_probe_mutex); | |
1153 | ||
1154 | blk_tracer_stop(tr); | |
1155 | } | |
1156 | ||
1157 | static struct { | |
1158 | const char *act[2]; | |
1159 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | |
1160 | } what2act[] __read_mostly = { | |
1161 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | |
1162 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | |
1163 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | |
1164 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | |
1165 | [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, | |
1166 | [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, | |
1167 | [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, | |
1168 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, | |
1169 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, | |
1170 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, | |
1171 | [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, | |
1172 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, | |
1173 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, | |
1174 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, | |
1175 | [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, | |
1176 | }; | |
1177 | ||
1178 | static int blk_trace_event_print(struct trace_seq *s, struct trace_entry *ent, | |
1179 | int flags) | |
1180 | { | |
1181 | const struct blk_io_trace *t = (struct blk_io_trace *)ent; | |
1182 | const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1); | |
1183 | int ret; | |
1184 | ||
1185 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) | |
1186 | ret = trace_seq_printf(s, "Bad pc action %x\n", what); | |
1187 | else { | |
1188 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | |
1189 | ret = blk_log_action_seq(s, t, what2act[what].act[long_act]); | |
1190 | if (ret) | |
1191 | ret = what2act[what].print(s, ent); | |
1192 | } | |
1193 | ||
1194 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1195 | } | |
1196 | ||
1197 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | |
1198 | { | |
1199 | const struct blk_io_trace *t; | |
1200 | u16 what; | |
1201 | int ret; | |
1202 | ||
1203 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | |
1204 | return TRACE_TYPE_UNHANDLED; | |
1205 | ||
1206 | t = (const struct blk_io_trace *)iter->ent; | |
1207 | what = t->action & ((1 << BLK_TC_SHIFT) - 1); | |
1208 | ||
1209 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) | |
1210 | ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what); | |
1211 | else { | |
1212 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | |
1213 | ret = blk_log_action_iter(iter, what2act[what].act[long_act]); | |
1214 | if (ret) | |
1215 | ret = what2act[what].print(&iter->seq, iter->ent); | |
1216 | } | |
1217 | ||
1218 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1219 | } | |
1220 | ||
1221 | static struct tracer blk_tracer __read_mostly = { | |
1222 | .name = "blk", | |
1223 | .init = blk_tracer_init, | |
1224 | .reset = blk_tracer_reset, | |
1225 | .start = blk_tracer_start, | |
1226 | .stop = blk_tracer_stop, | |
1227 | .print_header = blk_tracer_print_header, | |
1228 | .print_line = blk_tracer_print_line, | |
1229 | .flags = &blk_tracer_flags, | |
1230 | }; | |
1231 | ||
1232 | static struct trace_event trace_blk_event = { | |
1233 | .type = TRACE_BLK, | |
1234 | .trace = blk_trace_event_print, | |
1235 | .latency_trace = blk_trace_event_print, | |
1236 | .raw = trace_nop_print, | |
1237 | .hex = trace_nop_print, | |
1238 | .binary = trace_nop_print, | |
1239 | }; | |
1240 | ||
1241 | static int __init init_blk_tracer(void) | |
1242 | { | |
1243 | if (!register_ftrace_event(&trace_blk_event)) { | |
1244 | pr_warning("Warning: could not register block events\n"); | |
1245 | return 1; | |
1246 | } | |
1247 | ||
1248 | if (register_tracer(&blk_tracer) != 0) { | |
1249 | pr_warning("Warning: could not register the block tracer\n"); | |
1250 | unregister_ftrace_event(&trace_blk_event); | |
1251 | return 1; | |
1252 | } | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
1257 | device_initcall(init_blk_tracer); | |
1258 | ||
1259 | static int blk_trace_remove_queue(struct request_queue *q) | |
1260 | { | |
1261 | struct blk_trace *bt; | |
1262 | ||
1263 | bt = xchg(&q->blk_trace, NULL); | |
1264 | if (bt == NULL) | |
1265 | return -EINVAL; | |
1266 | ||
1267 | kfree(bt); | |
1268 | return 0; | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * Setup everything required to start tracing | |
1273 | */ | |
1274 | static int blk_trace_setup_queue(struct request_queue *q, dev_t dev) | |
1275 | { | |
1276 | struct blk_trace *old_bt, *bt = NULL; | |
1277 | int ret; | |
1278 | ||
1279 | ret = -ENOMEM; | |
1280 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | |
1281 | if (!bt) | |
1282 | goto err; | |
1283 | ||
1284 | bt->dev = dev; | |
1285 | bt->act_mask = (u16)-1; | |
1286 | bt->end_lba = -1ULL; | |
1287 | bt->trace_state = Blktrace_running; | |
1288 | ||
1289 | old_bt = xchg(&q->blk_trace, bt); | |
1290 | if (old_bt != NULL) { | |
1291 | (void)xchg(&q->blk_trace, old_bt); | |
1292 | kfree(bt); | |
1293 | ret = -EBUSY; | |
1294 | } | |
1295 | return 0; | |
1296 | err: | |
1297 | return ret; | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * sysfs interface to enable and configure tracing | |
1302 | */ | |
1303 | ||
1304 | static ssize_t sysfs_blk_trace_enable_show(struct device *dev, | |
1305 | struct device_attribute *attr, | |
1306 | char *buf) | |
1307 | { | |
1308 | struct hd_struct *p = dev_to_part(dev); | |
1309 | struct block_device *bdev; | |
1310 | ssize_t ret = -ENXIO; | |
1311 | ||
1312 | lock_kernel(); | |
1313 | bdev = bdget(part_devt(p)); | |
1314 | if (bdev != NULL) { | |
1315 | struct request_queue *q = bdev_get_queue(bdev); | |
1316 | ||
1317 | if (q != NULL) { | |
1318 | mutex_lock(&bdev->bd_mutex); | |
1319 | ret = sprintf(buf, "%u\n", !!q->blk_trace); | |
1320 | mutex_unlock(&bdev->bd_mutex); | |
1321 | } | |
1322 | ||
1323 | bdput(bdev); | |
1324 | } | |
1325 | ||
1326 | unlock_kernel(); | |
1327 | return ret; | |
1328 | } | |
1329 | ||
1330 | static ssize_t sysfs_blk_trace_enable_store(struct device *dev, | |
1331 | struct device_attribute *attr, | |
1332 | const char *buf, size_t count) | |
1333 | { | |
1334 | struct block_device *bdev; | |
1335 | struct request_queue *q; | |
1336 | struct hd_struct *p; | |
1337 | int value; | |
1338 | ssize_t ret = -ENXIO; | |
1339 | ||
1340 | if (count == 0 || sscanf(buf, "%d", &value) != 1) | |
1341 | goto out; | |
1342 | ||
1343 | lock_kernel(); | |
1344 | p = dev_to_part(dev); | |
1345 | bdev = bdget(part_devt(p)); | |
1346 | if (bdev == NULL) | |
1347 | goto out_unlock_kernel; | |
1348 | ||
1349 | q = bdev_get_queue(bdev); | |
1350 | if (q == NULL) | |
1351 | goto out_bdput; | |
1352 | ||
1353 | mutex_lock(&bdev->bd_mutex); | |
1354 | if (value) | |
1355 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | |
1356 | else | |
1357 | ret = blk_trace_remove_queue(q); | |
1358 | mutex_unlock(&bdev->bd_mutex); | |
1359 | ||
1360 | if (ret == 0) | |
1361 | ret = count; | |
1362 | out_bdput: | |
1363 | bdput(bdev); | |
1364 | out_unlock_kernel: | |
1365 | unlock_kernel(); | |
1366 | out: | |
1367 | return ret; | |
1368 | } | |
1369 | ||
1370 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |
1371 | struct device_attribute *attr, | |
1372 | char *buf); | |
1373 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1374 | struct device_attribute *attr, | |
1375 | const char *buf, size_t count); | |
1376 | #define BLK_TRACE_DEVICE_ATTR(_name) \ | |
1377 | DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ | |
1378 | sysfs_blk_trace_attr_show, \ | |
1379 | sysfs_blk_trace_attr_store) | |
1380 | ||
1381 | static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, | |
1382 | sysfs_blk_trace_enable_show, sysfs_blk_trace_enable_store); | |
1383 | static BLK_TRACE_DEVICE_ATTR(act_mask); | |
1384 | static BLK_TRACE_DEVICE_ATTR(pid); | |
1385 | static BLK_TRACE_DEVICE_ATTR(start_lba); | |
1386 | static BLK_TRACE_DEVICE_ATTR(end_lba); | |
1387 | ||
1388 | static struct attribute *blk_trace_attrs[] = { | |
1389 | &dev_attr_enable.attr, | |
1390 | &dev_attr_act_mask.attr, | |
1391 | &dev_attr_pid.attr, | |
1392 | &dev_attr_start_lba.attr, | |
1393 | &dev_attr_end_lba.attr, | |
1394 | NULL | |
1395 | }; | |
1396 | ||
1397 | struct attribute_group blk_trace_attr_group = { | |
1398 | .name = "trace", | |
1399 | .attrs = blk_trace_attrs, | |
1400 | }; | |
1401 | ||
1402 | static int blk_str2act_mask(const char *str) | |
1403 | { | |
1404 | int mask = 0; | |
1405 | char *copy = kstrdup(str, GFP_KERNEL), *s; | |
1406 | ||
1407 | if (copy == NULL) | |
1408 | return -ENOMEM; | |
1409 | ||
1410 | s = strstrip(copy); | |
1411 | ||
1412 | while (1) { | |
1413 | char *sep = strchr(s, ','); | |
1414 | ||
1415 | if (sep != NULL) | |
1416 | *sep = '\0'; | |
1417 | ||
1418 | if (strcasecmp(s, "barrier") == 0) | |
1419 | mask |= BLK_TC_BARRIER; | |
1420 | else if (strcasecmp(s, "complete") == 0) | |
1421 | mask |= BLK_TC_COMPLETE; | |
1422 | else if (strcasecmp(s, "fs") == 0) | |
1423 | mask |= BLK_TC_FS; | |
1424 | else if (strcasecmp(s, "issue") == 0) | |
1425 | mask |= BLK_TC_ISSUE; | |
1426 | else if (strcasecmp(s, "pc") == 0) | |
1427 | mask |= BLK_TC_PC; | |
1428 | else if (strcasecmp(s, "queue") == 0) | |
1429 | mask |= BLK_TC_QUEUE; | |
1430 | else if (strcasecmp(s, "read") == 0) | |
1431 | mask |= BLK_TC_READ; | |
1432 | else if (strcasecmp(s, "requeue") == 0) | |
1433 | mask |= BLK_TC_REQUEUE; | |
1434 | else if (strcasecmp(s, "sync") == 0) | |
1435 | mask |= BLK_TC_SYNC; | |
1436 | else if (strcasecmp(s, "write") == 0) | |
1437 | mask |= BLK_TC_WRITE; | |
1438 | ||
1439 | if (sep == NULL) | |
1440 | break; | |
1441 | ||
1442 | s = sep + 1; | |
1443 | } | |
1444 | kfree(copy); | |
1445 | ||
1446 | return mask; | |
1447 | } | |
1448 | ||
1449 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |
1450 | struct device_attribute *attr, | |
1451 | char *buf) | |
1452 | { | |
1453 | struct hd_struct *p = dev_to_part(dev); | |
1454 | struct request_queue *q; | |
1455 | struct block_device *bdev; | |
1456 | ssize_t ret = -ENXIO; | |
1457 | ||
1458 | lock_kernel(); | |
1459 | bdev = bdget(part_devt(p)); | |
1460 | if (bdev == NULL) | |
1461 | goto out_unlock_kernel; | |
1462 | ||
1463 | q = bdev_get_queue(bdev); | |
1464 | if (q == NULL) | |
1465 | goto out_bdput; | |
1466 | mutex_lock(&bdev->bd_mutex); | |
1467 | if (q->blk_trace == NULL) | |
1468 | ret = sprintf(buf, "disabled\n"); | |
1469 | else if (attr == &dev_attr_act_mask) | |
1470 | ret = sprintf(buf, "%#x\n", q->blk_trace->act_mask); | |
1471 | else if (attr == &dev_attr_pid) | |
1472 | ret = sprintf(buf, "%u\n", q->blk_trace->pid); | |
1473 | else if (attr == &dev_attr_start_lba) | |
1474 | ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); | |
1475 | else if (attr == &dev_attr_end_lba) | |
1476 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); | |
1477 | mutex_unlock(&bdev->bd_mutex); | |
1478 | out_bdput: | |
1479 | bdput(bdev); | |
1480 | out_unlock_kernel: | |
1481 | unlock_kernel(); | |
1482 | return ret; | |
1483 | } | |
1484 | ||
1485 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1486 | struct device_attribute *attr, | |
1487 | const char *buf, size_t count) | |
1488 | { | |
1489 | struct block_device *bdev; | |
1490 | struct request_queue *q; | |
1491 | struct hd_struct *p; | |
1492 | u64 value; | |
1493 | ssize_t ret = -ENXIO; | |
1494 | ||
1495 | if (count == 0) | |
1496 | goto out; | |
1497 | ||
1498 | if (attr == &dev_attr_act_mask) { | |
1499 | if (sscanf(buf, "%llx", &value) != 1) { | |
1500 | /* Assume it is a list of trace category names */ | |
1501 | value = blk_str2act_mask(buf); | |
1502 | if (value < 0) | |
1503 | goto out; | |
1504 | } | |
1505 | } else if (sscanf(buf, "%llu", &value) != 1) | |
1506 | goto out; | |
1507 | ||
1508 | lock_kernel(); | |
1509 | p = dev_to_part(dev); | |
1510 | bdev = bdget(part_devt(p)); | |
1511 | if (bdev == NULL) | |
1512 | goto out_unlock_kernel; | |
1513 | ||
1514 | q = bdev_get_queue(bdev); | |
1515 | if (q == NULL) | |
1516 | goto out_bdput; | |
1517 | ||
1518 | mutex_lock(&bdev->bd_mutex); | |
1519 | ret = 0; | |
1520 | if (q->blk_trace == NULL) | |
1521 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | |
1522 | ||
1523 | if (ret == 0) { | |
1524 | if (attr == &dev_attr_act_mask) | |
1525 | q->blk_trace->act_mask = value; | |
1526 | else if (attr == &dev_attr_pid) | |
1527 | q->blk_trace->pid = value; | |
1528 | else if (attr == &dev_attr_start_lba) | |
1529 | q->blk_trace->start_lba = value; | |
1530 | else if (attr == &dev_attr_end_lba) | |
1531 | q->blk_trace->end_lba = value; | |
1532 | ret = count; | |
1533 | } | |
1534 | mutex_unlock(&bdev->bd_mutex); | |
1535 | out_bdput: | |
1536 | bdput(bdev); | |
1537 | out_unlock_kernel: | |
1538 | unlock_kernel(); | |
1539 | out: | |
1540 | return ret; | |
1541 | } |