[PATCH] kfree cleanup: drivers/scsi
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / sg.c
1 /*
2 * History:
3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
6 *
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
11 *
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 */
20
21 static int sg_version_num = 30533; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.33"
23
24 /*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
29 *
30 */
31 #include <linux/config.h>
32 #include <linux/module.h>
33
34 #include <linux/fs.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/errno.h>
40 #include <linux/mtio.h>
41 #include <linux/ioctl.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/smp_lock.h>
46 #include <linux/moduleparam.h>
47 #include <linux/devfs_fs_kernel.h>
48 #include <linux/cdev.h>
49 #include <linux/seq_file.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/scatterlist.h>
53
54 #include "scsi.h"
55 #include <scsi/scsi_dbg.h>
56 #include <scsi/scsi_host.h>
57 #include <scsi/scsi_driver.h>
58 #include <scsi/scsi_ioctl.h>
59 #include <scsi/sg.h>
60
61 #include "scsi_logging.h"
62
63 #ifdef CONFIG_SCSI_PROC_FS
64 #include <linux/proc_fs.h>
65 static char *sg_version_date = "20050908";
66
67 static int sg_proc_init(void);
68 static void sg_proc_cleanup(void);
69 #endif
70
71 #ifndef LINUX_VERSION_CODE
72 #include <linux/version.h>
73 #endif /* LINUX_VERSION_CODE */
74
75 #define SG_ALLOW_DIO_DEF 0
76 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
77
78 #define SG_MAX_DEVS 32768
79
80 /*
81 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
82 * Then when using 32 bit integers x * m may overflow during the calculation.
83 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
84 * calculates the same, but prevents the overflow when both m and d
85 * are "small" numbers (like HZ and USER_HZ).
86 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
87 * in 32 bits.
88 */
89 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
90
91 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
92
93 int sg_big_buff = SG_DEF_RESERVED_SIZE;
94 /* N.B. This variable is readable and writeable via
95 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
96 of this size (or less if there is not enough memory) will be reserved
97 for use by this file descriptor. [Deprecated usage: this variable is also
98 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
99 the kernel (i.e. it is not a module).] */
100 static int def_reserved_size = -1; /* picks up init parameter */
101 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
102
103 #define SG_SECTOR_SZ 512
104 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
105
106 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
107
108 static int sg_add(struct class_device *, struct class_interface *);
109 static void sg_remove(struct class_device *, struct class_interface *);
110
111 static Scsi_Request *dummy_cmdp; /* only used for sizeof */
112
113 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock
114 file descriptor list for device */
115
116 static struct class_interface sg_interface = {
117 .add = sg_add,
118 .remove = sg_remove,
119 };
120
121 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
122 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
123 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
124 unsigned bufflen; /* Size of (aggregate) data buffer */
125 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
126 void *buffer; /* Data buffer or scatter list (k_use_sg>0) */
127 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
128 unsigned char cmd_opcode; /* first byte of command */
129 } Sg_scatter_hold;
130
131 struct sg_device; /* forward declarations */
132 struct sg_fd;
133
134 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
135 Scsi_Request *my_cmdp; /* != 0 when request with lower levels */
136 struct sg_request *nextrp; /* NULL -> tail request (slist) */
137 struct sg_fd *parentfp; /* NULL -> not in use */
138 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
139 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
140 unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)];
141 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
142 char orphan; /* 1 -> drop on sight, 0 -> normal */
143 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
144 volatile char done; /* 0->before bh, 1->before read, 2->read */
145 } Sg_request;
146
147 typedef struct sg_fd { /* holds the state of a file descriptor */
148 struct sg_fd *nextfp; /* NULL when last opened fd on this device */
149 struct sg_device *parentdp; /* owning device */
150 wait_queue_head_t read_wait; /* queue read until command done */
151 rwlock_t rq_list_lock; /* protect access to list in req_arr */
152 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
153 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
154 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
155 unsigned save_scat_len; /* original length of trunc. scat. element */
156 Sg_request *headrp; /* head of request slist, NULL->empty */
157 struct fasync_struct *async_qp; /* used by asynchronous notification */
158 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
159 char low_dma; /* as in parent but possibly overridden to 1 */
160 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
161 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
162 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
163 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
164 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
165 char mmap_called; /* 0 -> mmap() never called on this fd */
166 } Sg_fd;
167
168 typedef struct sg_device { /* holds the state of each scsi generic device */
169 struct scsi_device *device;
170 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
171 int sg_tablesize; /* adapter's max scatter-gather table size */
172 Sg_fd *headfp; /* first open fd belonging to this device */
173 volatile char detached; /* 0->attached, 1->detached pending removal */
174 volatile char exclude; /* opened for exclusive access */
175 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
176 struct gendisk *disk;
177 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
178 } Sg_device;
179
180 static int sg_fasync(int fd, struct file *filp, int mode);
181 static void sg_cmd_done(Scsi_Cmnd * SCpnt); /* tasklet or soft irq callback */
182 static int sg_start_req(Sg_request * srp);
183 static void sg_finish_rem_req(Sg_request * srp);
184 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
185 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
186 int tablesize);
187 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
188 Sg_request * srp);
189 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
190 int blocking, int read_only, Sg_request ** o_srp);
191 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
192 unsigned char *cmnd, int timeout, int blocking);
193 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
194 int wr_xf, int *countp, unsigned char __user **up);
195 static int sg_write_xfer(Sg_request * srp);
196 static int sg_read_xfer(Sg_request * srp);
197 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
198 static void sg_remove_scat(Sg_scatter_hold * schp);
199 static void sg_build_reserve(Sg_fd * sfp, int req_size);
200 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
201 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
202 static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
203 static void sg_page_free(char *buff, int size);
204 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
205 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
206 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
207 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
208 static Sg_request *sg_add_request(Sg_fd * sfp);
209 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
210 static int sg_res_in_use(Sg_fd * sfp);
211 static int sg_allow_access(unsigned char opcode, char dev_type);
212 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
213 static Sg_device *sg_get_dev(int dev);
214 static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp);
215 #ifdef CONFIG_SCSI_PROC_FS
216 static int sg_last_dev(void);
217 #endif
218
219 static Sg_device **sg_dev_arr = NULL;
220 static int sg_dev_max;
221 static int sg_nr_dev;
222
223 #define SZ_SG_HEADER sizeof(struct sg_header)
224 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
225 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
226 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
227
228 static int
229 sg_open(struct inode *inode, struct file *filp)
230 {
231 int dev = iminor(inode);
232 int flags = filp->f_flags;
233 Sg_device *sdp;
234 Sg_fd *sfp;
235 int res;
236 int retval;
237
238 nonseekable_open(inode, filp);
239 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
240 sdp = sg_get_dev(dev);
241 if ((!sdp) || (!sdp->device))
242 return -ENXIO;
243 if (sdp->detached)
244 return -ENODEV;
245
246 /* This driver's module count bumped by fops_get in <linux/fs.h> */
247 /* Prevent the device driver from vanishing while we sleep */
248 retval = scsi_device_get(sdp->device);
249 if (retval)
250 return retval;
251
252 if (!((flags & O_NONBLOCK) ||
253 scsi_block_when_processing_errors(sdp->device))) {
254 retval = -ENXIO;
255 /* we are in error recovery for this device */
256 goto error_out;
257 }
258
259 if (flags & O_EXCL) {
260 if (O_RDONLY == (flags & O_ACCMODE)) {
261 retval = -EPERM; /* Can't lock it with read only access */
262 goto error_out;
263 }
264 if (sdp->headfp && (flags & O_NONBLOCK)) {
265 retval = -EBUSY;
266 goto error_out;
267 }
268 res = 0;
269 __wait_event_interruptible(sdp->o_excl_wait,
270 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
271 if (res) {
272 retval = res; /* -ERESTARTSYS because signal hit process */
273 goto error_out;
274 }
275 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
276 if (flags & O_NONBLOCK) {
277 retval = -EBUSY;
278 goto error_out;
279 }
280 res = 0;
281 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
282 res);
283 if (res) {
284 retval = res; /* -ERESTARTSYS because signal hit process */
285 goto error_out;
286 }
287 }
288 if (sdp->detached) {
289 retval = -ENODEV;
290 goto error_out;
291 }
292 if (!sdp->headfp) { /* no existing opens on this device */
293 sdp->sgdebug = 0;
294 sdp->sg_tablesize = sdp->device->host->sg_tablesize;
295 }
296 if ((sfp = sg_add_sfp(sdp, dev)))
297 filp->private_data = sfp;
298 else {
299 if (flags & O_EXCL)
300 sdp->exclude = 0; /* undo if error */
301 retval = -ENOMEM;
302 goto error_out;
303 }
304 return 0;
305
306 error_out:
307 scsi_device_put(sdp->device);
308 return retval;
309 }
310
311 /* Following function was formerly called 'sg_close' */
312 static int
313 sg_release(struct inode *inode, struct file *filp)
314 {
315 Sg_device *sdp;
316 Sg_fd *sfp;
317
318 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
319 return -ENXIO;
320 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
321 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
322 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
323 if (!sdp->detached) {
324 scsi_device_put(sdp->device);
325 }
326 sdp->exclude = 0;
327 wake_up_interruptible(&sdp->o_excl_wait);
328 }
329 return 0;
330 }
331
332 static ssize_t
333 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
334 {
335 Sg_device *sdp;
336 Sg_fd *sfp;
337 Sg_request *srp;
338 int req_pack_id = -1;
339 sg_io_hdr_t *hp;
340 struct sg_header *old_hdr = NULL;
341 int retval = 0;
342
343 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
344 return -ENXIO;
345 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
346 sdp->disk->disk_name, (int) count));
347 if (!access_ok(VERIFY_WRITE, buf, count))
348 return -EFAULT;
349 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
350 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
351 if (!old_hdr)
352 return -ENOMEM;
353 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
354 retval = -EFAULT;
355 goto free_old_hdr;
356 }
357 if (old_hdr->reply_len < 0) {
358 if (count >= SZ_SG_IO_HDR) {
359 sg_io_hdr_t *new_hdr;
360 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
361 if (!new_hdr) {
362 retval = -ENOMEM;
363 goto free_old_hdr;
364 }
365 retval =__copy_from_user
366 (new_hdr, buf, SZ_SG_IO_HDR);
367 req_pack_id = new_hdr->pack_id;
368 kfree(new_hdr);
369 if (retval) {
370 retval = -EFAULT;
371 goto free_old_hdr;
372 }
373 }
374 } else
375 req_pack_id = old_hdr->pack_id;
376 }
377 srp = sg_get_rq_mark(sfp, req_pack_id);
378 if (!srp) { /* now wait on packet to arrive */
379 if (sdp->detached) {
380 retval = -ENODEV;
381 goto free_old_hdr;
382 }
383 if (filp->f_flags & O_NONBLOCK) {
384 retval = -EAGAIN;
385 goto free_old_hdr;
386 }
387 while (1) {
388 retval = 0; /* following macro beats race condition */
389 __wait_event_interruptible(sfp->read_wait,
390 (sdp->detached ||
391 (srp = sg_get_rq_mark(sfp, req_pack_id))),
392 retval);
393 if (sdp->detached) {
394 retval = -ENODEV;
395 goto free_old_hdr;
396 }
397 if (0 == retval)
398 break;
399
400 /* -ERESTARTSYS as signal hit process */
401 goto free_old_hdr;
402 }
403 }
404 if (srp->header.interface_id != '\0') {
405 retval = sg_new_read(sfp, buf, count, srp);
406 goto free_old_hdr;
407 }
408
409 hp = &srp->header;
410 if (old_hdr == NULL) {
411 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
412 if (! old_hdr) {
413 retval = -ENOMEM;
414 goto free_old_hdr;
415 }
416 }
417 memset(old_hdr, 0, SZ_SG_HEADER);
418 old_hdr->reply_len = (int) hp->timeout;
419 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
420 old_hdr->pack_id = hp->pack_id;
421 old_hdr->twelve_byte =
422 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
423 old_hdr->target_status = hp->masked_status;
424 old_hdr->host_status = hp->host_status;
425 old_hdr->driver_status = hp->driver_status;
426 if ((CHECK_CONDITION & hp->masked_status) ||
427 (DRIVER_SENSE & hp->driver_status))
428 memcpy(old_hdr->sense_buffer, srp->sense_b,
429 sizeof (old_hdr->sense_buffer));
430 switch (hp->host_status) {
431 /* This setup of 'result' is for backward compatibility and is best
432 ignored by the user who should use target, host + driver status */
433 case DID_OK:
434 case DID_PASSTHROUGH:
435 case DID_SOFT_ERROR:
436 old_hdr->result = 0;
437 break;
438 case DID_NO_CONNECT:
439 case DID_BUS_BUSY:
440 case DID_TIME_OUT:
441 old_hdr->result = EBUSY;
442 break;
443 case DID_BAD_TARGET:
444 case DID_ABORT:
445 case DID_PARITY:
446 case DID_RESET:
447 case DID_BAD_INTR:
448 old_hdr->result = EIO;
449 break;
450 case DID_ERROR:
451 old_hdr->result = (srp->sense_b[0] == 0 &&
452 hp->masked_status == GOOD) ? 0 : EIO;
453 break;
454 default:
455 old_hdr->result = EIO;
456 break;
457 }
458
459 /* Now copy the result back to the user buffer. */
460 if (count >= SZ_SG_HEADER) {
461 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
462 retval = -EFAULT;
463 goto free_old_hdr;
464 }
465 buf += SZ_SG_HEADER;
466 if (count > old_hdr->reply_len)
467 count = old_hdr->reply_len;
468 if (count > SZ_SG_HEADER) {
469 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
470 retval = -EFAULT;
471 goto free_old_hdr;
472 }
473 }
474 } else
475 count = (old_hdr->result == 0) ? 0 : -EIO;
476 sg_finish_rem_req(srp);
477 retval = count;
478 free_old_hdr:
479 kfree(old_hdr);
480 return retval;
481 }
482
483 static ssize_t
484 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
485 {
486 sg_io_hdr_t *hp = &srp->header;
487 int err = 0;
488 int len;
489
490 if (count < SZ_SG_IO_HDR) {
491 err = -EINVAL;
492 goto err_out;
493 }
494 hp->sb_len_wr = 0;
495 if ((hp->mx_sb_len > 0) && hp->sbp) {
496 if ((CHECK_CONDITION & hp->masked_status) ||
497 (DRIVER_SENSE & hp->driver_status)) {
498 int sb_len = sizeof (dummy_cmdp->sr_sense_buffer);
499 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
500 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
501 len = (len > sb_len) ? sb_len : len;
502 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
503 err = -EFAULT;
504 goto err_out;
505 }
506 hp->sb_len_wr = len;
507 }
508 }
509 if (hp->masked_status || hp->host_status || hp->driver_status)
510 hp->info |= SG_INFO_CHECK;
511 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
512 err = -EFAULT;
513 goto err_out;
514 }
515 err = sg_read_xfer(srp);
516 err_out:
517 sg_finish_rem_req(srp);
518 return (0 == err) ? count : err;
519 }
520
521 static ssize_t
522 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
523 {
524 int mxsize, cmd_size, k;
525 int input_size, blocking;
526 unsigned char opcode;
527 Sg_device *sdp;
528 Sg_fd *sfp;
529 Sg_request *srp;
530 struct sg_header old_hdr;
531 sg_io_hdr_t *hp;
532 unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
533
534 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
535 return -ENXIO;
536 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
537 sdp->disk->disk_name, (int) count));
538 if (sdp->detached)
539 return -ENODEV;
540 if (!((filp->f_flags & O_NONBLOCK) ||
541 scsi_block_when_processing_errors(sdp->device)))
542 return -ENXIO;
543
544 if (!access_ok(VERIFY_READ, buf, count))
545 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
546 if (count < SZ_SG_HEADER)
547 return -EIO;
548 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
549 return -EFAULT;
550 blocking = !(filp->f_flags & O_NONBLOCK);
551 if (old_hdr.reply_len < 0)
552 return sg_new_write(sfp, buf, count, blocking, 0, NULL);
553 if (count < (SZ_SG_HEADER + 6))
554 return -EIO; /* The minimum scsi command length is 6 bytes. */
555
556 if (!(srp = sg_add_request(sfp))) {
557 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
558 return -EDOM;
559 }
560 buf += SZ_SG_HEADER;
561 __get_user(opcode, buf);
562 if (sfp->next_cmd_len > 0) {
563 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
564 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
565 sfp->next_cmd_len = 0;
566 sg_remove_request(sfp, srp);
567 return -EIO;
568 }
569 cmd_size = sfp->next_cmd_len;
570 sfp->next_cmd_len = 0; /* reset so only this write() effected */
571 } else {
572 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
573 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
574 cmd_size = 12;
575 }
576 SCSI_LOG_TIMEOUT(4, printk(
577 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
578 /* Determine buffer size. */
579 input_size = count - cmd_size;
580 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
581 mxsize -= SZ_SG_HEADER;
582 input_size -= SZ_SG_HEADER;
583 if (input_size < 0) {
584 sg_remove_request(sfp, srp);
585 return -EIO; /* User did not pass enough bytes for this command. */
586 }
587 hp = &srp->header;
588 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
589 hp->cmd_len = (unsigned char) cmd_size;
590 hp->iovec_count = 0;
591 hp->mx_sb_len = 0;
592 if (input_size > 0)
593 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
594 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
595 else
596 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
597 hp->dxfer_len = mxsize;
598 hp->dxferp = (char __user *)buf + cmd_size;
599 hp->sbp = NULL;
600 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
601 hp->flags = input_size; /* structure abuse ... */
602 hp->pack_id = old_hdr.pack_id;
603 hp->usr_ptr = NULL;
604 if (__copy_from_user(cmnd, buf, cmd_size))
605 return -EFAULT;
606 /*
607 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
608 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
609 * is a non-zero input_size, so emit a warning.
610 */
611 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
612 if (printk_ratelimit())
613 printk(KERN_WARNING
614 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
615 "guessing data in;\n" KERN_WARNING " "
616 "program %s not setting count and/or reply_len properly\n",
617 old_hdr.reply_len - (int)SZ_SG_HEADER,
618 input_size, (unsigned int) cmnd[0],
619 current->comm);
620 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
621 return (k < 0) ? k : count;
622 }
623
624 static ssize_t
625 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
626 int blocking, int read_only, Sg_request ** o_srp)
627 {
628 int k;
629 Sg_request *srp;
630 sg_io_hdr_t *hp;
631 unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
632 int timeout;
633 unsigned long ul_timeout;
634
635 if (count < SZ_SG_IO_HDR)
636 return -EINVAL;
637 if (!access_ok(VERIFY_READ, buf, count))
638 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
639
640 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
641 if (!(srp = sg_add_request(sfp))) {
642 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
643 return -EDOM;
644 }
645 hp = &srp->header;
646 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
647 sg_remove_request(sfp, srp);
648 return -EFAULT;
649 }
650 if (hp->interface_id != 'S') {
651 sg_remove_request(sfp, srp);
652 return -ENOSYS;
653 }
654 if (hp->flags & SG_FLAG_MMAP_IO) {
655 if (hp->dxfer_len > sfp->reserve.bufflen) {
656 sg_remove_request(sfp, srp);
657 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
658 }
659 if (hp->flags & SG_FLAG_DIRECT_IO) {
660 sg_remove_request(sfp, srp);
661 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
662 }
663 if (sg_res_in_use(sfp)) {
664 sg_remove_request(sfp, srp);
665 return -EBUSY; /* reserve buffer already being used */
666 }
667 }
668 ul_timeout = msecs_to_jiffies(srp->header.timeout);
669 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
670 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
671 sg_remove_request(sfp, srp);
672 return -EMSGSIZE;
673 }
674 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
675 sg_remove_request(sfp, srp);
676 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
677 }
678 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
679 sg_remove_request(sfp, srp);
680 return -EFAULT;
681 }
682 if (read_only &&
683 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
684 sg_remove_request(sfp, srp);
685 return -EPERM;
686 }
687 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
688 if (k < 0)
689 return k;
690 if (o_srp)
691 *o_srp = srp;
692 return count;
693 }
694
695 static int
696 sg_common_write(Sg_fd * sfp, Sg_request * srp,
697 unsigned char *cmnd, int timeout, int blocking)
698 {
699 int k;
700 Scsi_Request *SRpnt;
701 Sg_device *sdp = sfp->parentdp;
702 sg_io_hdr_t *hp = &srp->header;
703 request_queue_t *q;
704
705 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
706 hp->status = 0;
707 hp->masked_status = 0;
708 hp->msg_status = 0;
709 hp->info = 0;
710 hp->host_status = 0;
711 hp->driver_status = 0;
712 hp->resid = 0;
713 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
714 (int) cmnd[0], (int) hp->cmd_len));
715
716 if ((k = sg_start_req(srp))) {
717 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
718 sg_finish_rem_req(srp);
719 return k; /* probably out of space --> ENOMEM */
720 }
721 if ((k = sg_write_xfer(srp))) {
722 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
723 sg_finish_rem_req(srp);
724 return k;
725 }
726 if (sdp->detached) {
727 sg_finish_rem_req(srp);
728 return -ENODEV;
729 }
730 SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC);
731 if (SRpnt == NULL) {
732 SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
733 sg_finish_rem_req(srp);
734 return -ENOMEM;
735 }
736
737 srp->my_cmdp = SRpnt;
738 q = SRpnt->sr_device->request_queue;
739 SRpnt->sr_request->rq_disk = sdp->disk;
740 SRpnt->sr_sense_buffer[0] = 0;
741 SRpnt->sr_cmd_len = hp->cmd_len;
742 SRpnt->sr_use_sg = srp->data.k_use_sg;
743 SRpnt->sr_sglist_len = srp->data.sglist_len;
744 SRpnt->sr_bufflen = srp->data.bufflen;
745 SRpnt->sr_underflow = 0;
746 SRpnt->sr_buffer = srp->data.buffer;
747 switch (hp->dxfer_direction) {
748 case SG_DXFER_TO_FROM_DEV:
749 case SG_DXFER_FROM_DEV:
750 SRpnt->sr_data_direction = DMA_FROM_DEVICE;
751 break;
752 case SG_DXFER_TO_DEV:
753 SRpnt->sr_data_direction = DMA_TO_DEVICE;
754 break;
755 case SG_DXFER_UNKNOWN:
756 SRpnt->sr_data_direction = DMA_BIDIRECTIONAL;
757 break;
758 default:
759 SRpnt->sr_data_direction = DMA_NONE;
760 break;
761 }
762 SRpnt->upper_private_data = srp;
763 srp->data.k_use_sg = 0;
764 srp->data.sglist_len = 0;
765 srp->data.bufflen = 0;
766 srp->data.buffer = NULL;
767 hp->duration = jiffies_to_msecs(jiffies);
768 /* Now send everything of to mid-level. The next time we hear about this
769 packet is when sg_cmd_done() is called (i.e. a callback). */
770 scsi_do_req(SRpnt, (void *) cmnd,
771 (void *) SRpnt->sr_buffer, hp->dxfer_len,
772 sg_cmd_done, timeout, SG_DEFAULT_RETRIES);
773 /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
774 return 0;
775 }
776
777 static int
778 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
779 {
780 unsigned long iflags;
781 int done;
782
783 read_lock_irqsave(&sfp->rq_list_lock, iflags);
784 done = srp->done;
785 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
786 return done;
787 }
788
789 static int
790 sg_ioctl(struct inode *inode, struct file *filp,
791 unsigned int cmd_in, unsigned long arg)
792 {
793 void __user *p = (void __user *)arg;
794 int __user *ip = p;
795 int result, val, read_only;
796 Sg_device *sdp;
797 Sg_fd *sfp;
798 Sg_request *srp;
799 unsigned long iflags;
800
801 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
802 return -ENXIO;
803 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
804 sdp->disk->disk_name, (int) cmd_in));
805 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
806
807 switch (cmd_in) {
808 case SG_IO:
809 {
810 int blocking = 1; /* ignore O_NONBLOCK flag */
811
812 if (sdp->detached)
813 return -ENODEV;
814 if (!scsi_block_when_processing_errors(sdp->device))
815 return -ENXIO;
816 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
817 return -EFAULT;
818 result =
819 sg_new_write(sfp, p, SZ_SG_IO_HDR,
820 blocking, read_only, &srp);
821 if (result < 0)
822 return result;
823 srp->sg_io_owned = 1;
824 while (1) {
825 result = 0; /* following macro to beat race condition */
826 __wait_event_interruptible(sfp->read_wait,
827 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
828 result);
829 if (sdp->detached)
830 return -ENODEV;
831 if (sfp->closed)
832 return 0; /* request packet dropped already */
833 if (0 == result)
834 break;
835 srp->orphan = 1;
836 return result; /* -ERESTARTSYS because signal hit process */
837 }
838 write_lock_irqsave(&sfp->rq_list_lock, iflags);
839 srp->done = 2;
840 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
841 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
842 return (result < 0) ? result : 0;
843 }
844 case SG_SET_TIMEOUT:
845 result = get_user(val, ip);
846 if (result)
847 return result;
848 if (val < 0)
849 return -EIO;
850 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
851 val = MULDIV (INT_MAX, USER_HZ, HZ);
852 sfp->timeout_user = val;
853 sfp->timeout = MULDIV (val, HZ, USER_HZ);
854
855 return 0;
856 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
857 /* strange ..., for backward compatibility */
858 return sfp->timeout_user;
859 case SG_SET_FORCE_LOW_DMA:
860 result = get_user(val, ip);
861 if (result)
862 return result;
863 if (val) {
864 sfp->low_dma = 1;
865 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
866 val = (int) sfp->reserve.bufflen;
867 sg_remove_scat(&sfp->reserve);
868 sg_build_reserve(sfp, val);
869 }
870 } else {
871 if (sdp->detached)
872 return -ENODEV;
873 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
874 }
875 return 0;
876 case SG_GET_LOW_DMA:
877 return put_user((int) sfp->low_dma, ip);
878 case SG_GET_SCSI_ID:
879 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
880 return -EFAULT;
881 else {
882 sg_scsi_id_t __user *sg_idp = p;
883
884 if (sdp->detached)
885 return -ENODEV;
886 __put_user((int) sdp->device->host->host_no,
887 &sg_idp->host_no);
888 __put_user((int) sdp->device->channel,
889 &sg_idp->channel);
890 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
891 __put_user((int) sdp->device->lun, &sg_idp->lun);
892 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
893 __put_user((short) sdp->device->host->cmd_per_lun,
894 &sg_idp->h_cmd_per_lun);
895 __put_user((short) sdp->device->queue_depth,
896 &sg_idp->d_queue_depth);
897 __put_user(0, &sg_idp->unused[0]);
898 __put_user(0, &sg_idp->unused[1]);
899 return 0;
900 }
901 case SG_SET_FORCE_PACK_ID:
902 result = get_user(val, ip);
903 if (result)
904 return result;
905 sfp->force_packid = val ? 1 : 0;
906 return 0;
907 case SG_GET_PACK_ID:
908 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
909 return -EFAULT;
910 read_lock_irqsave(&sfp->rq_list_lock, iflags);
911 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
912 if ((1 == srp->done) && (!srp->sg_io_owned)) {
913 read_unlock_irqrestore(&sfp->rq_list_lock,
914 iflags);
915 __put_user(srp->header.pack_id, ip);
916 return 0;
917 }
918 }
919 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
920 __put_user(-1, ip);
921 return 0;
922 case SG_GET_NUM_WAITING:
923 read_lock_irqsave(&sfp->rq_list_lock, iflags);
924 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
925 if ((1 == srp->done) && (!srp->sg_io_owned))
926 ++val;
927 }
928 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
929 return put_user(val, ip);
930 case SG_GET_SG_TABLESIZE:
931 return put_user(sdp->sg_tablesize, ip);
932 case SG_SET_RESERVED_SIZE:
933 result = get_user(val, ip);
934 if (result)
935 return result;
936 if (val < 0)
937 return -EINVAL;
938 if (val != sfp->reserve.bufflen) {
939 if (sg_res_in_use(sfp) || sfp->mmap_called)
940 return -EBUSY;
941 sg_remove_scat(&sfp->reserve);
942 sg_build_reserve(sfp, val);
943 }
944 return 0;
945 case SG_GET_RESERVED_SIZE:
946 val = (int) sfp->reserve.bufflen;
947 return put_user(val, ip);
948 case SG_SET_COMMAND_Q:
949 result = get_user(val, ip);
950 if (result)
951 return result;
952 sfp->cmd_q = val ? 1 : 0;
953 return 0;
954 case SG_GET_COMMAND_Q:
955 return put_user((int) sfp->cmd_q, ip);
956 case SG_SET_KEEP_ORPHAN:
957 result = get_user(val, ip);
958 if (result)
959 return result;
960 sfp->keep_orphan = val;
961 return 0;
962 case SG_GET_KEEP_ORPHAN:
963 return put_user((int) sfp->keep_orphan, ip);
964 case SG_NEXT_CMD_LEN:
965 result = get_user(val, ip);
966 if (result)
967 return result;
968 sfp->next_cmd_len = (val > 0) ? val : 0;
969 return 0;
970 case SG_GET_VERSION_NUM:
971 return put_user(sg_version_num, ip);
972 case SG_GET_ACCESS_COUNT:
973 /* faked - we don't have a real access count anymore */
974 val = (sdp->device ? 1 : 0);
975 return put_user(val, ip);
976 case SG_GET_REQUEST_TABLE:
977 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
978 return -EFAULT;
979 else {
980 sg_req_info_t *rinfo;
981 unsigned int ms;
982
983 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
984 GFP_KERNEL);
985 if (!rinfo)
986 return -ENOMEM;
987 read_lock_irqsave(&sfp->rq_list_lock, iflags);
988 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
989 ++val, srp = srp ? srp->nextrp : srp) {
990 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
991 if (srp) {
992 rinfo[val].req_state = srp->done + 1;
993 rinfo[val].problem =
994 srp->header.masked_status &
995 srp->header.host_status &
996 srp->header.driver_status;
997 if (srp->done)
998 rinfo[val].duration =
999 srp->header.duration;
1000 else {
1001 ms = jiffies_to_msecs(jiffies);
1002 rinfo[val].duration =
1003 (ms > srp->header.duration) ?
1004 (ms - srp->header.duration) : 0;
1005 }
1006 rinfo[val].orphan = srp->orphan;
1007 rinfo[val].sg_io_owned =
1008 srp->sg_io_owned;
1009 rinfo[val].pack_id =
1010 srp->header.pack_id;
1011 rinfo[val].usr_ptr =
1012 srp->header.usr_ptr;
1013 }
1014 }
1015 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1016 result = __copy_to_user(p, rinfo,
1017 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1018 result = result ? -EFAULT : 0;
1019 kfree(rinfo);
1020 return result;
1021 }
1022 case SG_EMULATED_HOST:
1023 if (sdp->detached)
1024 return -ENODEV;
1025 return put_user(sdp->device->host->hostt->emulated, ip);
1026 case SG_SCSI_RESET:
1027 if (sdp->detached)
1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (scsi_host_in_recovery(sdp->device->host))
1031 return -EBUSY;
1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1033 return -EBUSY;
1034 result = get_user(val, ip);
1035 if (result)
1036 return result;
1037 if (SG_SCSI_RESET_NOTHING == val)
1038 return 0;
1039 switch (val) {
1040 case SG_SCSI_RESET_DEVICE:
1041 val = SCSI_TRY_RESET_DEVICE;
1042 break;
1043 case SG_SCSI_RESET_BUS:
1044 val = SCSI_TRY_RESET_BUS;
1045 break;
1046 case SG_SCSI_RESET_HOST:
1047 val = SCSI_TRY_RESET_HOST;
1048 break;
1049 default:
1050 return -EINVAL;
1051 }
1052 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1053 return -EACCES;
1054 return (scsi_reset_provider(sdp->device, val) ==
1055 SUCCESS) ? 0 : -EIO;
1056 case SCSI_IOCTL_SEND_COMMAND:
1057 if (sdp->detached)
1058 return -ENODEV;
1059 if (read_only) {
1060 unsigned char opcode = WRITE_6;
1061 Scsi_Ioctl_Command __user *siocp = p;
1062
1063 if (copy_from_user(&opcode, siocp->data, 1))
1064 return -EFAULT;
1065 if (!sg_allow_access(opcode, sdp->device->type))
1066 return -EPERM;
1067 }
1068 return scsi_ioctl_send_command(sdp->device, p);
1069 case SG_SET_DEBUG:
1070 result = get_user(val, ip);
1071 if (result)
1072 return result;
1073 sdp->sgdebug = (char) val;
1074 return 0;
1075 case SCSI_IOCTL_GET_IDLUN:
1076 case SCSI_IOCTL_GET_BUS_NUMBER:
1077 case SCSI_IOCTL_PROBE_HOST:
1078 case SG_GET_TRANSFORM:
1079 if (sdp->detached)
1080 return -ENODEV;
1081 return scsi_ioctl(sdp->device, cmd_in, p);
1082 default:
1083 if (read_only)
1084 return -EPERM; /* don't know so take safe approach */
1085 return scsi_ioctl(sdp->device, cmd_in, p);
1086 }
1087 }
1088
1089 #ifdef CONFIG_COMPAT
1090 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1091 {
1092 Sg_device *sdp;
1093 Sg_fd *sfp;
1094 struct scsi_device *sdev;
1095
1096 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1097 return -ENXIO;
1098
1099 sdev = sdp->device;
1100 if (sdev->host->hostt->compat_ioctl) {
1101 int ret;
1102
1103 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1104
1105 return ret;
1106 }
1107
1108 return -ENOIOCTLCMD;
1109 }
1110 #endif
1111
1112 static unsigned int
1113 sg_poll(struct file *filp, poll_table * wait)
1114 {
1115 unsigned int res = 0;
1116 Sg_device *sdp;
1117 Sg_fd *sfp;
1118 Sg_request *srp;
1119 int count = 0;
1120 unsigned long iflags;
1121
1122 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1123 || sfp->closed)
1124 return POLLERR;
1125 poll_wait(filp, &sfp->read_wait, wait);
1126 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1127 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1128 /* if any read waiting, flag it */
1129 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1130 res = POLLIN | POLLRDNORM;
1131 ++count;
1132 }
1133 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1134
1135 if (sdp->detached)
1136 res |= POLLHUP;
1137 else if (!sfp->cmd_q) {
1138 if (0 == count)
1139 res |= POLLOUT | POLLWRNORM;
1140 } else if (count < SG_MAX_QUEUE)
1141 res |= POLLOUT | POLLWRNORM;
1142 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1143 sdp->disk->disk_name, (int) res));
1144 return res;
1145 }
1146
1147 static int
1148 sg_fasync(int fd, struct file *filp, int mode)
1149 {
1150 int retval;
1151 Sg_device *sdp;
1152 Sg_fd *sfp;
1153
1154 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1155 return -ENXIO;
1156 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1157 sdp->disk->disk_name, mode));
1158
1159 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1160 return (retval < 0) ? retval : 0;
1161 }
1162
1163 static inline unsigned char *
1164 sg_scatg2virt(const struct scatterlist *sclp)
1165 {
1166 return (sclp && sclp->page) ?
1167 (unsigned char *) page_address(sclp->page) + sclp->offset : NULL;
1168 }
1169
1170 /* When startFinish==1 increments page counts for pages other than the
1171 first of scatter gather elements obtained from __get_free_pages().
1172 When startFinish==0 decrements ... */
1173 static void
1174 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1175 {
1176 void *page_ptr;
1177 struct page *page;
1178 int k, m;
1179
1180 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
1181 startFinish, rsv_schp->k_use_sg));
1182 /* N.B. correction _not_ applied to base page of each allocation */
1183 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1184 struct scatterlist *sclp = rsv_schp->buffer;
1185
1186 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
1187 for (m = PAGE_SIZE; m < sclp->length; m += PAGE_SIZE) {
1188 page_ptr = sg_scatg2virt(sclp) + m;
1189 page = virt_to_page(page_ptr);
1190 if (startFinish)
1191 get_page(page);
1192 else {
1193 if (page_count(page) > 0)
1194 __put_page(page);
1195 }
1196 }
1197 }
1198 } else { /* reserve buffer is just a single allocation */
1199 for (m = PAGE_SIZE; m < rsv_schp->bufflen; m += PAGE_SIZE) {
1200 page_ptr = (unsigned char *) rsv_schp->buffer + m;
1201 page = virt_to_page(page_ptr);
1202 if (startFinish)
1203 get_page(page);
1204 else {
1205 if (page_count(page) > 0)
1206 __put_page(page);
1207 }
1208 }
1209 }
1210 }
1211
1212 static struct page *
1213 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1214 {
1215 Sg_fd *sfp;
1216 struct page *page = NOPAGE_SIGBUS;
1217 void *page_ptr = NULL;
1218 unsigned long offset;
1219 Sg_scatter_hold *rsv_schp;
1220
1221 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1222 return page;
1223 rsv_schp = &sfp->reserve;
1224 offset = addr - vma->vm_start;
1225 if (offset >= rsv_schp->bufflen)
1226 return page;
1227 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1228 offset, rsv_schp->k_use_sg));
1229 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1230 int k;
1231 unsigned long sa = vma->vm_start;
1232 unsigned long len;
1233 struct scatterlist *sclp = rsv_schp->buffer;
1234
1235 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1236 ++k, ++sclp) {
1237 len = vma->vm_end - sa;
1238 len = (len < sclp->length) ? len : sclp->length;
1239 if (offset < len) {
1240 page_ptr = sg_scatg2virt(sclp) + offset;
1241 page = virt_to_page(page_ptr);
1242 get_page(page); /* increment page count */
1243 break;
1244 }
1245 sa += len;
1246 offset -= len;
1247 }
1248 } else { /* reserve buffer is just a single allocation */
1249 page_ptr = (unsigned char *) rsv_schp->buffer + offset;
1250 page = virt_to_page(page_ptr);
1251 get_page(page); /* increment page count */
1252 }
1253 if (type)
1254 *type = VM_FAULT_MINOR;
1255 return page;
1256 }
1257
1258 static struct vm_operations_struct sg_mmap_vm_ops = {
1259 .nopage = sg_vma_nopage,
1260 };
1261
1262 static int
1263 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1264 {
1265 Sg_fd *sfp;
1266 unsigned long req_sz;
1267 Sg_scatter_hold *rsv_schp;
1268
1269 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1270 return -ENXIO;
1271 req_sz = vma->vm_end - vma->vm_start;
1272 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1273 (void *) vma->vm_start, (int) req_sz));
1274 if (vma->vm_pgoff)
1275 return -EINVAL; /* want no offset */
1276 rsv_schp = &sfp->reserve;
1277 if (req_sz > rsv_schp->bufflen)
1278 return -ENOMEM; /* cannot map more than reserved buffer */
1279
1280 if (rsv_schp->k_use_sg) { /* reserve buffer is a scatter gather list */
1281 int k;
1282 unsigned long sa = vma->vm_start;
1283 unsigned long len;
1284 struct scatterlist *sclp = rsv_schp->buffer;
1285
1286 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1287 ++k, ++sclp) {
1288 if (0 != sclp->offset)
1289 return -EFAULT; /* non page aligned memory ?? */
1290 len = vma->vm_end - sa;
1291 len = (len < sclp->length) ? len : sclp->length;
1292 sa += len;
1293 }
1294 } else { /* reserve buffer is just a single allocation */
1295 if ((unsigned long) rsv_schp->buffer & (PAGE_SIZE - 1))
1296 return -EFAULT; /* non page aligned memory ?? */
1297 }
1298 if (0 == sfp->mmap_called) {
1299 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
1300 sfp->mmap_called = 1;
1301 }
1302 vma->vm_flags |= VM_RESERVED;
1303 vma->vm_private_data = sfp;
1304 vma->vm_ops = &sg_mmap_vm_ops;
1305 return 0;
1306 }
1307
1308 /* This function is a "bottom half" handler that is called by the
1309 * mid level when a command is completed (or has failed). */
1310 static void
1311 sg_cmd_done(Scsi_Cmnd * SCpnt)
1312 {
1313 Scsi_Request *SRpnt = NULL;
1314 Sg_device *sdp = NULL;
1315 Sg_fd *sfp;
1316 Sg_request *srp = NULL;
1317 unsigned long iflags;
1318 unsigned int ms;
1319
1320 if (SCpnt && (SRpnt = SCpnt->sc_request))
1321 srp = (Sg_request *) SRpnt->upper_private_data;
1322 if (NULL == srp) {
1323 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1324 if (SRpnt)
1325 scsi_release_request(SRpnt);
1326 return;
1327 }
1328 sfp = srp->parentfp;
1329 if (sfp)
1330 sdp = sfp->parentdp;
1331 if ((NULL == sdp) || sdp->detached) {
1332 printk(KERN_INFO "sg_cmd_done: device detached\n");
1333 scsi_release_request(SRpnt);
1334 return;
1335 }
1336
1337 /* First transfer ownership of data buffers to sg_device object. */
1338 srp->data.k_use_sg = SRpnt->sr_use_sg;
1339 srp->data.sglist_len = SRpnt->sr_sglist_len;
1340 srp->data.bufflen = SRpnt->sr_bufflen;
1341 srp->data.buffer = SRpnt->sr_buffer;
1342 /* now clear out request structure */
1343 SRpnt->sr_use_sg = 0;
1344 SRpnt->sr_sglist_len = 0;
1345 SRpnt->sr_bufflen = 0;
1346 SRpnt->sr_buffer = NULL;
1347 SRpnt->sr_underflow = 0;
1348 SRpnt->sr_request->rq_disk = NULL; /* "sg" _disowns_ request blk */
1349
1350 srp->my_cmdp = NULL;
1351
1352 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1353 sdp->disk->disk_name, srp->header.pack_id, (int) SRpnt->sr_result));
1354 srp->header.resid = SCpnt->resid;
1355 ms = jiffies_to_msecs(jiffies);
1356 srp->header.duration = (ms > srp->header.duration) ?
1357 (ms - srp->header.duration) : 0;
1358 if (0 != SRpnt->sr_result) {
1359 struct scsi_sense_hdr sshdr;
1360
1361 memcpy(srp->sense_b, SRpnt->sr_sense_buffer,
1362 sizeof (srp->sense_b));
1363 srp->header.status = 0xff & SRpnt->sr_result;
1364 srp->header.masked_status = status_byte(SRpnt->sr_result);
1365 srp->header.msg_status = msg_byte(SRpnt->sr_result);
1366 srp->header.host_status = host_byte(SRpnt->sr_result);
1367 srp->header.driver_status = driver_byte(SRpnt->sr_result);
1368 if ((sdp->sgdebug > 0) &&
1369 ((CHECK_CONDITION == srp->header.masked_status) ||
1370 (COMMAND_TERMINATED == srp->header.masked_status)))
1371 scsi_print_req_sense("sg_cmd_done", SRpnt);
1372
1373 /* Following if statement is a patch supplied by Eric Youngdale */
1374 if (driver_byte(SRpnt->sr_result) != 0
1375 && scsi_command_normalize_sense(SCpnt, &sshdr)
1376 && !scsi_sense_is_deferred(&sshdr)
1377 && sshdr.sense_key == UNIT_ATTENTION
1378 && sdp->device->removable) {
1379 /* Detected possible disc change. Set the bit - this */
1380 /* may be used if there are filesystems using this device */
1381 sdp->device->changed = 1;
1382 }
1383 }
1384 /* Rely on write phase to clean out srp status values, so no "else" */
1385
1386 scsi_release_request(SRpnt);
1387 SRpnt = NULL;
1388 if (sfp->closed) { /* whoops this fd already released, cleanup */
1389 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1390 sg_finish_rem_req(srp);
1391 srp = NULL;
1392 if (NULL == sfp->headrp) {
1393 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1394 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1395 scsi_device_put(sdp->device);
1396 }
1397 sfp = NULL;
1398 }
1399 } else if (srp && srp->orphan) {
1400 if (sfp->keep_orphan)
1401 srp->sg_io_owned = 0;
1402 else {
1403 sg_finish_rem_req(srp);
1404 srp = NULL;
1405 }
1406 }
1407 if (sfp && srp) {
1408 /* Now wake up any sg_read() that is waiting for this packet. */
1409 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1410 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1411 srp->done = 1;
1412 wake_up_interruptible(&sfp->read_wait);
1413 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1414 }
1415 }
1416
1417 static struct file_operations sg_fops = {
1418 .owner = THIS_MODULE,
1419 .read = sg_read,
1420 .write = sg_write,
1421 .poll = sg_poll,
1422 .ioctl = sg_ioctl,
1423 #ifdef CONFIG_COMPAT
1424 .compat_ioctl = sg_compat_ioctl,
1425 #endif
1426 .open = sg_open,
1427 .mmap = sg_mmap,
1428 .release = sg_release,
1429 .fasync = sg_fasync,
1430 };
1431
1432 static struct class *sg_sysfs_class;
1433
1434 static int sg_sysfs_valid = 0;
1435
1436 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1437 {
1438 Sg_device *sdp;
1439 unsigned long iflags;
1440 void *old_sg_dev_arr = NULL;
1441 int k, error;
1442
1443 sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
1444 if (!sdp) {
1445 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1446 return -ENOMEM;
1447 }
1448
1449 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1450 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */
1451 Sg_device **tmp_da;
1452 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1453 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1454
1455 tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1456 if (unlikely(!tmp_da))
1457 goto expand_failed;
1458
1459 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1460 memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
1461 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1462 old_sg_dev_arr = sg_dev_arr;
1463 sg_dev_arr = tmp_da;
1464 sg_dev_max = tmp_dev_max;
1465 }
1466
1467 for (k = 0; k < sg_dev_max; k++)
1468 if (!sg_dev_arr[k])
1469 break;
1470 if (unlikely(k >= SG_MAX_DEVS))
1471 goto overflow;
1472
1473 memset(sdp, 0, sizeof(*sdp));
1474 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1475 sprintf(disk->disk_name, "sg%d", k);
1476 disk->first_minor = k;
1477 sdp->disk = disk;
1478 sdp->device = scsidp;
1479 init_waitqueue_head(&sdp->o_excl_wait);
1480 sdp->sg_tablesize = scsidp->host ? scsidp->host->sg_tablesize : 0;
1481
1482 sg_nr_dev++;
1483 sg_dev_arr[k] = sdp;
1484 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1485 error = k;
1486
1487 out:
1488 if (error < 0)
1489 kfree(sdp);
1490 kfree(old_sg_dev_arr);
1491 return error;
1492
1493 expand_failed:
1494 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1495 error = -ENOMEM;
1496 goto out;
1497
1498 overflow:
1499 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1500 sdev_printk(KERN_WARNING, scsidp,
1501 "Unable to attach sg device type=%d, minor "
1502 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1503 error = -ENODEV;
1504 goto out;
1505 }
1506
1507 static int
1508 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1509 {
1510 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1511 struct gendisk *disk;
1512 Sg_device *sdp = NULL;
1513 struct cdev * cdev = NULL;
1514 int error, k;
1515
1516 disk = alloc_disk(1);
1517 if (!disk) {
1518 printk(KERN_WARNING "alloc_disk failed\n");
1519 return -ENOMEM;
1520 }
1521 disk->major = SCSI_GENERIC_MAJOR;
1522
1523 error = -ENOMEM;
1524 cdev = cdev_alloc();
1525 if (!cdev) {
1526 printk(KERN_WARNING "cdev_alloc failed\n");
1527 goto out;
1528 }
1529 cdev->owner = THIS_MODULE;
1530 cdev->ops = &sg_fops;
1531
1532 error = sg_alloc(disk, scsidp);
1533 if (error < 0) {
1534 printk(KERN_WARNING "sg_alloc failed\n");
1535 goto out;
1536 }
1537 k = error;
1538 sdp = sg_dev_arr[k];
1539
1540 devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
1541 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1542 "%s/generic", scsidp->devfs_name);
1543 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1544 if (error) {
1545 devfs_remove("%s/generic", scsidp->devfs_name);
1546 goto out;
1547 }
1548 sdp->cdev = cdev;
1549 if (sg_sysfs_valid) {
1550 struct class_device * sg_class_member;
1551
1552 sg_class_member = class_device_create(sg_sysfs_class, NULL,
1553 MKDEV(SCSI_GENERIC_MAJOR, k),
1554 cl_dev->dev, "%s",
1555 disk->disk_name);
1556 if (IS_ERR(sg_class_member))
1557 printk(KERN_WARNING "sg_add: "
1558 "class_device_create failed\n");
1559 class_set_devdata(sg_class_member, sdp);
1560 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1561 &sg_class_member->kobj, "generic");
1562 if (error)
1563 printk(KERN_ERR "sg_add: unable to make symlink "
1564 "'generic' back to sg%d\n", k);
1565 } else
1566 printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1567
1568 sdev_printk(KERN_NOTICE, scsidp,
1569 "Attached scsi generic sg%d type %d\n", k,scsidp->type);
1570
1571 return 0;
1572
1573 out:
1574 put_disk(disk);
1575 if (cdev)
1576 cdev_del(cdev);
1577 return error;
1578 }
1579
1580 static void
1581 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1582 {
1583 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1584 Sg_device *sdp = NULL;
1585 unsigned long iflags;
1586 Sg_fd *sfp;
1587 Sg_fd *tsfp;
1588 Sg_request *srp;
1589 Sg_request *tsrp;
1590 int k, delay;
1591
1592 if (NULL == sg_dev_arr)
1593 return;
1594 delay = 0;
1595 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1596 for (k = 0; k < sg_dev_max; k++) {
1597 sdp = sg_dev_arr[k];
1598 if ((NULL == sdp) || (sdp->device != scsidp))
1599 continue; /* dirty but lowers nesting */
1600 if (sdp->headfp) {
1601 sdp->detached = 1;
1602 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1603 tsfp = sfp->nextfp;
1604 for (srp = sfp->headrp; srp; srp = tsrp) {
1605 tsrp = srp->nextrp;
1606 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1607 sg_finish_rem_req(srp);
1608 }
1609 if (sfp->closed) {
1610 scsi_device_put(sdp->device);
1611 __sg_remove_sfp(sdp, sfp);
1612 } else {
1613 delay = 1;
1614 wake_up_interruptible(&sfp->read_wait);
1615 kill_fasync(&sfp->async_qp, SIGPOLL,
1616 POLL_HUP);
1617 }
1618 }
1619 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1620 if (NULL == sdp->headfp) {
1621 sg_dev_arr[k] = NULL;
1622 }
1623 } else { /* nothing active, simple case */
1624 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1625 sg_dev_arr[k] = NULL;
1626 }
1627 sg_nr_dev--;
1628 break;
1629 }
1630 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1631
1632 if (sdp) {
1633 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1634 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
1635 cdev_del(sdp->cdev);
1636 sdp->cdev = NULL;
1637 devfs_remove("%s/generic", scsidp->devfs_name);
1638 put_disk(sdp->disk);
1639 sdp->disk = NULL;
1640 if (NULL == sdp->headfp)
1641 kfree((char *) sdp);
1642 }
1643
1644 if (delay)
1645 msleep(10); /* dirty detach so delay device destruction */
1646 }
1647
1648 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1649 * of sysfs parameters (which module_param doesn't yet support).
1650 * Sysfs parameters defined explicitly below.
1651 */
1652 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1653 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1654
1655 MODULE_AUTHOR("Douglas Gilbert");
1656 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1657 MODULE_LICENSE("GPL");
1658 MODULE_VERSION(SG_VERSION_STR);
1659
1660 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1661 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1662
1663 static int __init
1664 init_sg(void)
1665 {
1666 int rc;
1667
1668 if (def_reserved_size >= 0)
1669 sg_big_buff = def_reserved_size;
1670
1671 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1672 SG_MAX_DEVS, "sg");
1673 if (rc)
1674 return rc;
1675 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1676 if ( IS_ERR(sg_sysfs_class) ) {
1677 rc = PTR_ERR(sg_sysfs_class);
1678 goto err_out;
1679 }
1680 sg_sysfs_valid = 1;
1681 rc = scsi_register_interface(&sg_interface);
1682 if (0 == rc) {
1683 #ifdef CONFIG_SCSI_PROC_FS
1684 sg_proc_init();
1685 #endif /* CONFIG_SCSI_PROC_FS */
1686 return 0;
1687 }
1688 class_destroy(sg_sysfs_class);
1689 err_out:
1690 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1691 return rc;
1692 }
1693
1694 static void __exit
1695 exit_sg(void)
1696 {
1697 #ifdef CONFIG_SCSI_PROC_FS
1698 sg_proc_cleanup();
1699 #endif /* CONFIG_SCSI_PROC_FS */
1700 scsi_unregister_interface(&sg_interface);
1701 class_destroy(sg_sysfs_class);
1702 sg_sysfs_valid = 0;
1703 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1704 SG_MAX_DEVS);
1705 kfree((char *)sg_dev_arr);
1706 sg_dev_arr = NULL;
1707 sg_dev_max = 0;
1708 }
1709
1710 static int
1711 sg_start_req(Sg_request * srp)
1712 {
1713 int res;
1714 Sg_fd *sfp = srp->parentfp;
1715 sg_io_hdr_t *hp = &srp->header;
1716 int dxfer_len = (int) hp->dxfer_len;
1717 int dxfer_dir = hp->dxfer_direction;
1718 Sg_scatter_hold *req_schp = &srp->data;
1719 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1720
1721 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1722 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1723 return 0;
1724 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1725 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1726 (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1727 res = sg_build_direct(srp, sfp, dxfer_len);
1728 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1729 return res;
1730 }
1731 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1732 sg_link_reserve(sfp, srp, dxfer_len);
1733 else {
1734 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1735 if (res) {
1736 sg_remove_scat(req_schp);
1737 return res;
1738 }
1739 }
1740 return 0;
1741 }
1742
1743 static void
1744 sg_finish_rem_req(Sg_request * srp)
1745 {
1746 Sg_fd *sfp = srp->parentfp;
1747 Sg_scatter_hold *req_schp = &srp->data;
1748
1749 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1750 if (srp->res_used)
1751 sg_unlink_reserve(sfp, srp);
1752 else
1753 sg_remove_scat(req_schp);
1754 sg_remove_request(sfp, srp);
1755 }
1756
1757 static int
1758 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1759 {
1760 int ret_sz;
1761 int elem_sz = sizeof (struct scatterlist);
1762 int sg_bufflen = tablesize * elem_sz;
1763 int mx_sc_elems = tablesize;
1764
1765 schp->buffer = sg_page_malloc(sg_bufflen, sfp->low_dma, &ret_sz);
1766 if (!schp->buffer)
1767 return -ENOMEM;
1768 else if (ret_sz != sg_bufflen) {
1769 sg_bufflen = ret_sz;
1770 mx_sc_elems = sg_bufflen / elem_sz;
1771 }
1772 schp->sglist_len = sg_bufflen;
1773 memset(schp->buffer, 0, sg_bufflen);
1774 return mx_sc_elems; /* number of scat_gath elements allocated */
1775 }
1776
1777 #ifdef SG_ALLOW_DIO_CODE
1778 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1779 /* hopefully this generic code will moved to a library */
1780
1781 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1782 - mapping of all pages not successful
1783 - any page is above max_pfn
1784 (i.e., either completely successful or fails)
1785 */
1786 static int
1787 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1788 unsigned long uaddr, size_t count, int rw,
1789 unsigned long max_pfn)
1790 {
1791 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1792 unsigned long start = uaddr >> PAGE_SHIFT;
1793 const int nr_pages = end - start;
1794 int res, i, j;
1795 struct page **pages;
1796
1797 /* User attempted Overflow! */
1798 if ((uaddr + count) < uaddr)
1799 return -EINVAL;
1800
1801 /* Too big */
1802 if (nr_pages > max_pages)
1803 return -ENOMEM;
1804
1805 /* Hmm? */
1806 if (count == 0)
1807 return 0;
1808
1809 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1810 return -ENOMEM;
1811
1812 /* Try to fault in all of the necessary pages */
1813 down_read(&current->mm->mmap_sem);
1814 /* rw==READ means read from drive, write into memory area */
1815 res = get_user_pages(
1816 current,
1817 current->mm,
1818 uaddr,
1819 nr_pages,
1820 rw == READ,
1821 0, /* don't force */
1822 pages,
1823 NULL);
1824 up_read(&current->mm->mmap_sem);
1825
1826 /* Errors and no page mapped should return here */
1827 if (res < nr_pages)
1828 goto out_unmap;
1829
1830 for (i=0; i < nr_pages; i++) {
1831 /* FIXME: flush superflous for rw==READ,
1832 * probably wrong function for rw==WRITE
1833 */
1834 flush_dcache_page(pages[i]);
1835 if (page_to_pfn(pages[i]) > max_pfn)
1836 goto out_unlock;
1837 /* ?? Is locking needed? I don't think so */
1838 /* if (TestSetPageLocked(pages[i]))
1839 goto out_unlock; */
1840 }
1841
1842 /* Populate the scatter/gather list */
1843 sgl[0].page = pages[0];
1844 sgl[0].offset = uaddr & ~PAGE_MASK;
1845 if (nr_pages > 1) {
1846 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1847 count -= sgl[0].length;
1848 for (i=1; i < nr_pages ; i++) {
1849 sgl[i].offset = 0;
1850 sgl[i].page = pages[i];
1851 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1852 count -= PAGE_SIZE;
1853 }
1854 }
1855 else {
1856 sgl[0].length = count;
1857 }
1858
1859 kfree(pages);
1860 return nr_pages;
1861
1862 out_unlock:
1863 /* for (j=0; j < i; j++)
1864 unlock_page(pages[j]); */
1865 res = 0;
1866 out_unmap:
1867 if (res > 0)
1868 for (j=0; j < res; j++)
1869 page_cache_release(pages[j]);
1870 kfree(pages);
1871 return res;
1872 }
1873
1874
1875 /* And unmap them... */
1876 static int
1877 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1878 int dirtied)
1879 {
1880 int i;
1881
1882 for (i=0; i < nr_pages; i++) {
1883 struct page *page = sgl[i].page;
1884
1885 /* XXX: just for debug. Remove when PageReserved is removed */
1886 BUG_ON(PageReserved(page));
1887 if (dirtied)
1888 SetPageDirty(page);
1889 /* unlock_page(page); */
1890 /* FIXME: cache flush missing for rw==READ
1891 * FIXME: call the correct reference counting function
1892 */
1893 page_cache_release(page);
1894 }
1895
1896 return 0;
1897 }
1898
1899 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1900 #endif
1901
1902
1903 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1904 static int
1905 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1906 {
1907 #ifdef SG_ALLOW_DIO_CODE
1908 sg_io_hdr_t *hp = &srp->header;
1909 Sg_scatter_hold *schp = &srp->data;
1910 int sg_tablesize = sfp->parentdp->sg_tablesize;
1911 struct scatterlist *sgl;
1912 int mx_sc_elems, res;
1913 struct scsi_device *sdev = sfp->parentdp->device;
1914
1915 if (((unsigned long)hp->dxferp &
1916 queue_dma_alignment(sdev->request_queue)) != 0)
1917 return 1;
1918 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1919 if (mx_sc_elems <= 0) {
1920 return 1;
1921 }
1922 sgl = (struct scatterlist *)schp->buffer;
1923 res = st_map_user_pages(sgl, mx_sc_elems, (unsigned long)hp->dxferp, dxfer_len,
1924 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0, ULONG_MAX);
1925 if (res <= 0)
1926 return 1;
1927 schp->k_use_sg = res;
1928 schp->dio_in_use = 1;
1929 hp->info |= SG_INFO_DIRECT_IO;
1930 return 0;
1931 #else
1932 return 1;
1933 #endif
1934 }
1935
1936 static int
1937 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1938 {
1939 int ret_sz;
1940 int blk_size = buff_size;
1941 unsigned char *p = NULL;
1942
1943 if ((blk_size < 0) || (!sfp))
1944 return -EFAULT;
1945 if (0 == blk_size)
1946 ++blk_size; /* don't know why */
1947 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1948 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1949 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1950 buff_size, blk_size));
1951 if (blk_size <= SG_SCATTER_SZ) {
1952 p = sg_page_malloc(blk_size, sfp->low_dma, &ret_sz);
1953 if (!p)
1954 return -ENOMEM;
1955 if (blk_size == ret_sz) { /* got it on the first attempt */
1956 schp->k_use_sg = 0;
1957 schp->buffer = p;
1958 schp->bufflen = blk_size;
1959 schp->b_malloc_len = blk_size;
1960 return 0;
1961 }
1962 } else {
1963 p = sg_page_malloc(SG_SCATTER_SZ, sfp->low_dma, &ret_sz);
1964 if (!p)
1965 return -ENOMEM;
1966 }
1967 /* Want some local declarations, so start new block ... */
1968 { /* lets try and build a scatter gather list */
1969 struct scatterlist *sclp;
1970 int k, rem_sz, num;
1971 int mx_sc_elems;
1972 int sg_tablesize = sfp->parentdp->sg_tablesize;
1973 int first = 1;
1974
1975 /* N.B. ret_sz carried into this block ... */
1976 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1977 if (mx_sc_elems < 0)
1978 return mx_sc_elems; /* most likely -ENOMEM */
1979
1980 for (k = 0, sclp = schp->buffer, rem_sz = blk_size;
1981 (rem_sz > 0) && (k < mx_sc_elems);
1982 ++k, rem_sz -= ret_sz, ++sclp) {
1983 if (first)
1984 first = 0;
1985 else {
1986 num =
1987 (rem_sz >
1988 SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1989 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1990 if (!p)
1991 break;
1992 }
1993 sg_set_buf(sclp, p, ret_sz);
1994
1995 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
1996 k, sg_scatg2virt(sclp), ret_sz));
1997 } /* end of for loop */
1998 schp->k_use_sg = k;
1999 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
2000 schp->bufflen = blk_size;
2001 if (rem_sz > 0) /* must have failed */
2002 return -ENOMEM;
2003 }
2004 return 0;
2005 }
2006
2007 static int
2008 sg_write_xfer(Sg_request * srp)
2009 {
2010 sg_io_hdr_t *hp = &srp->header;
2011 Sg_scatter_hold *schp = &srp->data;
2012 int num_xfer = 0;
2013 int j, k, onum, usglen, ksglen, res;
2014 int iovec_count = (int) hp->iovec_count;
2015 int dxfer_dir = hp->dxfer_direction;
2016 unsigned char *p;
2017 unsigned char __user *up;
2018 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2019
2020 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
2021 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2022 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
2023 if (schp->bufflen < num_xfer)
2024 num_xfer = schp->bufflen;
2025 }
2026 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2027 (new_interface
2028 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2029 return 0;
2030
2031 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2032 num_xfer, iovec_count, schp->k_use_sg));
2033 if (iovec_count) {
2034 onum = iovec_count;
2035 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2036 return -EFAULT;
2037 } else
2038 onum = 1;
2039
2040 if (0 == schp->k_use_sg) { /* kernel has single buffer */
2041 for (j = 0, p = schp->buffer; j < onum; ++j) {
2042 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
2043 if (res)
2044 return res;
2045 usglen = (num_xfer > usglen) ? usglen : num_xfer;
2046 if (__copy_from_user(p, up, usglen))
2047 return -EFAULT;
2048 p += usglen;
2049 num_xfer -= usglen;
2050 if (num_xfer <= 0)
2051 return 0;
2052 }
2053 } else { /* kernel using scatter gather list */
2054 struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2055
2056 ksglen = (int) sclp->length;
2057 p = sg_scatg2virt(sclp);
2058 for (j = 0, k = 0; j < onum; ++j) {
2059 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
2060 if (res)
2061 return res;
2062
2063 for (; p; ++sclp, ksglen = (int) sclp->length,
2064 p = sg_scatg2virt(sclp)) {
2065 if (usglen <= 0)
2066 break;
2067 if (ksglen > usglen) {
2068 if (usglen >= num_xfer) {
2069 if (__copy_from_user
2070 (p, up, num_xfer))
2071 return -EFAULT;
2072 return 0;
2073 }
2074 if (__copy_from_user(p, up, usglen))
2075 return -EFAULT;
2076 p += usglen;
2077 ksglen -= usglen;
2078 break;
2079 } else {
2080 if (ksglen >= num_xfer) {
2081 if (__copy_from_user
2082 (p, up, num_xfer))
2083 return -EFAULT;
2084 return 0;
2085 }
2086 if (__copy_from_user(p, up, ksglen))
2087 return -EFAULT;
2088 up += ksglen;
2089 usglen -= ksglen;
2090 }
2091 ++k;
2092 if (k >= schp->k_use_sg)
2093 return 0;
2094 }
2095 }
2096 }
2097 return 0;
2098 }
2099
2100 static int
2101 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
2102 int wr_xf, int *countp, unsigned char __user **up)
2103 {
2104 int num_xfer = (int) hp->dxfer_len;
2105 unsigned char __user *p = hp->dxferp;
2106 int count;
2107
2108 if (0 == sg_num) {
2109 if (wr_xf && ('\0' == hp->interface_id))
2110 count = (int) hp->flags; /* holds "old" input_size */
2111 else
2112 count = num_xfer;
2113 } else {
2114 sg_iovec_t iovec;
2115 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2116 return -EFAULT;
2117 p = iovec.iov_base;
2118 count = (int) iovec.iov_len;
2119 }
2120 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2121 return -EFAULT;
2122 if (up)
2123 *up = p;
2124 if (countp)
2125 *countp = count;
2126 return 0;
2127 }
2128
2129 static void
2130 sg_remove_scat(Sg_scatter_hold * schp)
2131 {
2132 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2133 if (schp->buffer && (schp->sglist_len > 0)) {
2134 struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2135
2136 if (schp->dio_in_use) {
2137 #ifdef SG_ALLOW_DIO_CODE
2138 st_unmap_user_pages(sclp, schp->k_use_sg, TRUE);
2139 #endif
2140 } else {
2141 int k;
2142
2143 for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2144 ++k, ++sclp) {
2145 SCSI_LOG_TIMEOUT(5, printk(
2146 "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
2147 k, sg_scatg2virt(sclp), sclp->length));
2148 sg_page_free(sg_scatg2virt(sclp), sclp->length);
2149 sclp->page = NULL;
2150 sclp->offset = 0;
2151 sclp->length = 0;
2152 }
2153 }
2154 sg_page_free(schp->buffer, schp->sglist_len);
2155 } else if (schp->buffer)
2156 sg_page_free(schp->buffer, schp->b_malloc_len);
2157 memset(schp, 0, sizeof (*schp));
2158 }
2159
2160 static int
2161 sg_read_xfer(Sg_request * srp)
2162 {
2163 sg_io_hdr_t *hp = &srp->header;
2164 Sg_scatter_hold *schp = &srp->data;
2165 int num_xfer = 0;
2166 int j, k, onum, usglen, ksglen, res;
2167 int iovec_count = (int) hp->iovec_count;
2168 int dxfer_dir = hp->dxfer_direction;
2169 unsigned char *p;
2170 unsigned char __user *up;
2171 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2172
2173 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2174 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2175 num_xfer = hp->dxfer_len;
2176 if (schp->bufflen < num_xfer)
2177 num_xfer = schp->bufflen;
2178 }
2179 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2180 (new_interface
2181 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2182 return 0;
2183
2184 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2185 num_xfer, iovec_count, schp->k_use_sg));
2186 if (iovec_count) {
2187 onum = iovec_count;
2188 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2189 return -EFAULT;
2190 } else
2191 onum = 1;
2192
2193 if (0 == schp->k_use_sg) { /* kernel has single buffer */
2194 for (j = 0, p = schp->buffer; j < onum; ++j) {
2195 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2196 if (res)
2197 return res;
2198 usglen = (num_xfer > usglen) ? usglen : num_xfer;
2199 if (__copy_to_user(up, p, usglen))
2200 return -EFAULT;
2201 p += usglen;
2202 num_xfer -= usglen;
2203 if (num_xfer <= 0)
2204 return 0;
2205 }
2206 } else { /* kernel using scatter gather list */
2207 struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2208
2209 ksglen = (int) sclp->length;
2210 p = sg_scatg2virt(sclp);
2211 for (j = 0, k = 0; j < onum; ++j) {
2212 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2213 if (res)
2214 return res;
2215
2216 for (; p; ++sclp, ksglen = (int) sclp->length,
2217 p = sg_scatg2virt(sclp)) {
2218 if (usglen <= 0)
2219 break;
2220 if (ksglen > usglen) {
2221 if (usglen >= num_xfer) {
2222 if (__copy_to_user
2223 (up, p, num_xfer))
2224 return -EFAULT;
2225 return 0;
2226 }
2227 if (__copy_to_user(up, p, usglen))
2228 return -EFAULT;
2229 p += usglen;
2230 ksglen -= usglen;
2231 break;
2232 } else {
2233 if (ksglen >= num_xfer) {
2234 if (__copy_to_user
2235 (up, p, num_xfer))
2236 return -EFAULT;
2237 return 0;
2238 }
2239 if (__copy_to_user(up, p, ksglen))
2240 return -EFAULT;
2241 up += ksglen;
2242 usglen -= ksglen;
2243 }
2244 ++k;
2245 if (k >= schp->k_use_sg)
2246 return 0;
2247 }
2248 }
2249 }
2250 return 0;
2251 }
2252
2253 static int
2254 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2255 {
2256 Sg_scatter_hold *schp = &srp->data;
2257
2258 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2259 num_read_xfer));
2260 if ((!outp) || (num_read_xfer <= 0))
2261 return 0;
2262 if (schp->k_use_sg > 0) {
2263 int k, num;
2264 struct scatterlist *sclp = (struct scatterlist *) schp->buffer;
2265
2266 for (k = 0; (k < schp->k_use_sg) && sg_scatg2virt(sclp);
2267 ++k, ++sclp) {
2268 num = (int) sclp->length;
2269 if (num > num_read_xfer) {
2270 if (__copy_to_user
2271 (outp, sg_scatg2virt(sclp), num_read_xfer))
2272 return -EFAULT;
2273 break;
2274 } else {
2275 if (__copy_to_user
2276 (outp, sg_scatg2virt(sclp), num))
2277 return -EFAULT;
2278 num_read_xfer -= num;
2279 if (num_read_xfer <= 0)
2280 break;
2281 outp += num;
2282 }
2283 }
2284 } else {
2285 if (__copy_to_user(outp, schp->buffer, num_read_xfer))
2286 return -EFAULT;
2287 }
2288 return 0;
2289 }
2290
2291 static void
2292 sg_build_reserve(Sg_fd * sfp, int req_size)
2293 {
2294 Sg_scatter_hold *schp = &sfp->reserve;
2295
2296 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2297 do {
2298 if (req_size < PAGE_SIZE)
2299 req_size = PAGE_SIZE;
2300 if (0 == sg_build_indirect(schp, sfp, req_size))
2301 return;
2302 else
2303 sg_remove_scat(schp);
2304 req_size >>= 1; /* divide by 2 */
2305 } while (req_size > (PAGE_SIZE / 2));
2306 }
2307
2308 static void
2309 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2310 {
2311 Sg_scatter_hold *req_schp = &srp->data;
2312 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2313
2314 srp->res_used = 1;
2315 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2316 size = (size + 1) & (~1); /* round to even for aha1542 */
2317 if (rsv_schp->k_use_sg > 0) {
2318 int k, num;
2319 int rem = size;
2320 struct scatterlist *sclp =
2321 (struct scatterlist *) rsv_schp->buffer;
2322
2323 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sclp) {
2324 num = (int) sclp->length;
2325 if (rem <= num) {
2326 if (0 == k) {
2327 req_schp->k_use_sg = 0;
2328 req_schp->buffer = sg_scatg2virt(sclp);
2329 } else {
2330 sfp->save_scat_len = num;
2331 sclp->length = (unsigned) rem;
2332 req_schp->k_use_sg = k + 1;
2333 req_schp->sglist_len =
2334 rsv_schp->sglist_len;
2335 req_schp->buffer = rsv_schp->buffer;
2336 }
2337 req_schp->bufflen = size;
2338 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2339 break;
2340 } else
2341 rem -= num;
2342 }
2343 if (k >= rsv_schp->k_use_sg)
2344 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2345 } else {
2346 req_schp->k_use_sg = 0;
2347 req_schp->bufflen = size;
2348 req_schp->buffer = rsv_schp->buffer;
2349 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2350 }
2351 }
2352
2353 static void
2354 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2355 {
2356 Sg_scatter_hold *req_schp = &srp->data;
2357 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2358
2359 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2360 (int) req_schp->k_use_sg));
2361 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2362 struct scatterlist *sclp =
2363 (struct scatterlist *) rsv_schp->buffer;
2364
2365 if (sfp->save_scat_len > 0)
2366 (sclp + (req_schp->k_use_sg - 1))->length =
2367 (unsigned) sfp->save_scat_len;
2368 else
2369 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2370 }
2371 req_schp->k_use_sg = 0;
2372 req_schp->bufflen = 0;
2373 req_schp->buffer = NULL;
2374 req_schp->sglist_len = 0;
2375 sfp->save_scat_len = 0;
2376 srp->res_used = 0;
2377 }
2378
2379 static Sg_request *
2380 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2381 {
2382 Sg_request *resp;
2383 unsigned long iflags;
2384
2385 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2386 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2387 /* look for requests that are ready + not SG_IO owned */
2388 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2389 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2390 resp->done = 2; /* guard against other readers */
2391 break;
2392 }
2393 }
2394 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2395 return resp;
2396 }
2397
2398 #ifdef CONFIG_SCSI_PROC_FS
2399 static Sg_request *
2400 sg_get_nth_request(Sg_fd * sfp, int nth)
2401 {
2402 Sg_request *resp;
2403 unsigned long iflags;
2404 int k;
2405
2406 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2407 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2408 ++k, resp = resp->nextrp) ;
2409 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2410 return resp;
2411 }
2412 #endif
2413
2414 /* always adds to end of list */
2415 static Sg_request *
2416 sg_add_request(Sg_fd * sfp)
2417 {
2418 int k;
2419 unsigned long iflags;
2420 Sg_request *resp;
2421 Sg_request *rp = sfp->req_arr;
2422
2423 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2424 resp = sfp->headrp;
2425 if (!resp) {
2426 memset(rp, 0, sizeof (Sg_request));
2427 rp->parentfp = sfp;
2428 resp = rp;
2429 sfp->headrp = resp;
2430 } else {
2431 if (0 == sfp->cmd_q)
2432 resp = NULL; /* command queuing disallowed */
2433 else {
2434 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2435 if (!rp->parentfp)
2436 break;
2437 }
2438 if (k < SG_MAX_QUEUE) {
2439 memset(rp, 0, sizeof (Sg_request));
2440 rp->parentfp = sfp;
2441 while (resp->nextrp)
2442 resp = resp->nextrp;
2443 resp->nextrp = rp;
2444 resp = rp;
2445 } else
2446 resp = NULL;
2447 }
2448 }
2449 if (resp) {
2450 resp->nextrp = NULL;
2451 resp->header.duration = jiffies_to_msecs(jiffies);
2452 resp->my_cmdp = NULL;
2453 }
2454 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2455 return resp;
2456 }
2457
2458 /* Return of 1 for found; 0 for not found */
2459 static int
2460 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2461 {
2462 Sg_request *prev_rp;
2463 Sg_request *rp;
2464 unsigned long iflags;
2465 int res = 0;
2466
2467 if ((!sfp) || (!srp) || (!sfp->headrp))
2468 return res;
2469 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2470 if (srp->my_cmdp)
2471 srp->my_cmdp->upper_private_data = NULL;
2472 prev_rp = sfp->headrp;
2473 if (srp == prev_rp) {
2474 sfp->headrp = prev_rp->nextrp;
2475 prev_rp->parentfp = NULL;
2476 res = 1;
2477 } else {
2478 while ((rp = prev_rp->nextrp)) {
2479 if (srp == rp) {
2480 prev_rp->nextrp = rp->nextrp;
2481 rp->parentfp = NULL;
2482 res = 1;
2483 break;
2484 }
2485 prev_rp = rp;
2486 }
2487 }
2488 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2489 return res;
2490 }
2491
2492 #ifdef CONFIG_SCSI_PROC_FS
2493 static Sg_fd *
2494 sg_get_nth_sfp(Sg_device * sdp, int nth)
2495 {
2496 Sg_fd *resp;
2497 unsigned long iflags;
2498 int k;
2499
2500 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2501 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2502 ++k, resp = resp->nextfp) ;
2503 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2504 return resp;
2505 }
2506 #endif
2507
2508 static Sg_fd *
2509 sg_add_sfp(Sg_device * sdp, int dev)
2510 {
2511 Sg_fd *sfp;
2512 unsigned long iflags;
2513
2514 sfp = (Sg_fd *) sg_page_malloc(sizeof (Sg_fd), 0, NULL);
2515 if (!sfp)
2516 return NULL;
2517 memset(sfp, 0, sizeof (Sg_fd));
2518 init_waitqueue_head(&sfp->read_wait);
2519 rwlock_init(&sfp->rq_list_lock);
2520
2521 sfp->timeout = SG_DEFAULT_TIMEOUT;
2522 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2523 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2524 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2525 sdp->device->host->unchecked_isa_dma : 1;
2526 sfp->cmd_q = SG_DEF_COMMAND_Q;
2527 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2528 sfp->parentdp = sdp;
2529 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2530 if (!sdp->headfp)
2531 sdp->headfp = sfp;
2532 else { /* add to tail of existing list */
2533 Sg_fd *pfp = sdp->headfp;
2534 while (pfp->nextfp)
2535 pfp = pfp->nextfp;
2536 pfp->nextfp = sfp;
2537 }
2538 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2539 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2540 sg_build_reserve(sfp, sg_big_buff);
2541 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2542 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2543 return sfp;
2544 }
2545
2546 static void
2547 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2548 {
2549 Sg_fd *fp;
2550 Sg_fd *prev_fp;
2551
2552 prev_fp = sdp->headfp;
2553 if (sfp == prev_fp)
2554 sdp->headfp = prev_fp->nextfp;
2555 else {
2556 while ((fp = prev_fp->nextfp)) {
2557 if (sfp == fp) {
2558 prev_fp->nextfp = fp->nextfp;
2559 break;
2560 }
2561 prev_fp = fp;
2562 }
2563 }
2564 if (sfp->reserve.bufflen > 0) {
2565 SCSI_LOG_TIMEOUT(6,
2566 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2567 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2568 if (sfp->mmap_called)
2569 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
2570 sg_remove_scat(&sfp->reserve);
2571 }
2572 sfp->parentdp = NULL;
2573 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2574 sg_page_free((char *) sfp, sizeof (Sg_fd));
2575 }
2576
2577 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2578 static int
2579 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2580 {
2581 Sg_request *srp;
2582 Sg_request *tsrp;
2583 int dirty = 0;
2584 int res = 0;
2585
2586 for (srp = sfp->headrp; srp; srp = tsrp) {
2587 tsrp = srp->nextrp;
2588 if (sg_srp_done(srp, sfp))
2589 sg_finish_rem_req(srp);
2590 else
2591 ++dirty;
2592 }
2593 if (0 == dirty) {
2594 unsigned long iflags;
2595
2596 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2597 __sg_remove_sfp(sdp, sfp);
2598 if (sdp->detached && (NULL == sdp->headfp)) {
2599 int k, maxd;
2600
2601 maxd = sg_dev_max;
2602 for (k = 0; k < maxd; ++k) {
2603 if (sdp == sg_dev_arr[k])
2604 break;
2605 }
2606 if (k < maxd)
2607 sg_dev_arr[k] = NULL;
2608 kfree((char *) sdp);
2609 res = 1;
2610 }
2611 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2612 } else {
2613 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2614 /* only bump the access_count if we actually succeeded in
2615 * throwing another counter on the host module */
2616 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2617 sfp->closed = 1; /* flag dirty state on this fd */
2618 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2619 dirty));
2620 }
2621 return res;
2622 }
2623
2624 static int
2625 sg_res_in_use(Sg_fd * sfp)
2626 {
2627 const Sg_request *srp;
2628 unsigned long iflags;
2629
2630 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2631 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2632 if (srp->res_used)
2633 break;
2634 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2635 return srp ? 1 : 0;
2636 }
2637
2638 /* If retSzp==NULL want exact size or fail */
2639 static char *
2640 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2641 {
2642 char *resp = NULL;
2643 gfp_t page_mask;
2644 int order, a_size;
2645 int resSz = rqSz;
2646
2647 if (rqSz <= 0)
2648 return resp;
2649
2650 if (lowDma)
2651 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
2652 else
2653 page_mask = GFP_ATOMIC | __GFP_NOWARN;
2654
2655 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2656 order++, a_size <<= 1) ;
2657 resp = (char *) __get_free_pages(page_mask, order);
2658 while ((!resp) && order && retSzp) {
2659 --order;
2660 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2661 resp = (char *) __get_free_pages(page_mask, order); /* try half */
2662 resSz = a_size;
2663 }
2664 if (resp) {
2665 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2666 memset(resp, 0, resSz);
2667 if (retSzp)
2668 *retSzp = resSz;
2669 }
2670 return resp;
2671 }
2672
2673 static void
2674 sg_page_free(char *buff, int size)
2675 {
2676 int order, a_size;
2677
2678 if (!buff)
2679 return;
2680 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2681 order++, a_size <<= 1) ;
2682 free_pages((unsigned long) buff, order);
2683 }
2684
2685 #ifndef MAINTENANCE_IN_CMD
2686 #define MAINTENANCE_IN_CMD 0xa3
2687 #endif
2688
2689 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2690 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2691 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2692 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2693 };
2694
2695 static int
2696 sg_allow_access(unsigned char opcode, char dev_type)
2697 {
2698 int k;
2699
2700 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2701 return 1;
2702 for (k = 0; k < sizeof (allow_ops); ++k) {
2703 if (opcode == allow_ops[k])
2704 return 1;
2705 }
2706 return 0;
2707 }
2708
2709 #ifdef CONFIG_SCSI_PROC_FS
2710 static int
2711 sg_last_dev(void)
2712 {
2713 int k;
2714 unsigned long iflags;
2715
2716 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2717 for (k = sg_dev_max - 1; k >= 0; --k)
2718 if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2719 break;
2720 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2721 return k + 1; /* origin 1 */
2722 }
2723 #endif
2724
2725 static Sg_device *
2726 sg_get_dev(int dev)
2727 {
2728 Sg_device *sdp = NULL;
2729 unsigned long iflags;
2730
2731 if (sg_dev_arr && (dev >= 0)) {
2732 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2733 if (dev < sg_dev_max)
2734 sdp = sg_dev_arr[dev];
2735 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2736 }
2737 return sdp;
2738 }
2739
2740 #ifdef CONFIG_SCSI_PROC_FS
2741
2742 static struct proc_dir_entry *sg_proc_sgp = NULL;
2743
2744 static char sg_proc_sg_dirname[] = "scsi/sg";
2745
2746 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2747
2748 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2749 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2750 size_t count, loff_t *off);
2751 static struct file_operations adio_fops = {
2752 /* .owner, .read and .llseek added in sg_proc_init() */
2753 .open = sg_proc_single_open_adio,
2754 .write = sg_proc_write_adio,
2755 .release = single_release,
2756 };
2757
2758 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2759 static ssize_t sg_proc_write_dressz(struct file *filp,
2760 const char __user *buffer, size_t count, loff_t *off);
2761 static struct file_operations dressz_fops = {
2762 .open = sg_proc_single_open_dressz,
2763 .write = sg_proc_write_dressz,
2764 .release = single_release,
2765 };
2766
2767 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2768 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2769 static struct file_operations version_fops = {
2770 .open = sg_proc_single_open_version,
2771 .release = single_release,
2772 };
2773
2774 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2775 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2776 static struct file_operations devhdr_fops = {
2777 .open = sg_proc_single_open_devhdr,
2778 .release = single_release,
2779 };
2780
2781 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2782 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2783 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2784 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2785 static void dev_seq_stop(struct seq_file *s, void *v);
2786 static struct file_operations dev_fops = {
2787 .open = sg_proc_open_dev,
2788 .release = seq_release,
2789 };
2790 static struct seq_operations dev_seq_ops = {
2791 .start = dev_seq_start,
2792 .next = dev_seq_next,
2793 .stop = dev_seq_stop,
2794 .show = sg_proc_seq_show_dev,
2795 };
2796
2797 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2798 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2799 static struct file_operations devstrs_fops = {
2800 .open = sg_proc_open_devstrs,
2801 .release = seq_release,
2802 };
2803 static struct seq_operations devstrs_seq_ops = {
2804 .start = dev_seq_start,
2805 .next = dev_seq_next,
2806 .stop = dev_seq_stop,
2807 .show = sg_proc_seq_show_devstrs,
2808 };
2809
2810 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2811 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2812 static struct file_operations debug_fops = {
2813 .open = sg_proc_open_debug,
2814 .release = seq_release,
2815 };
2816 static struct seq_operations debug_seq_ops = {
2817 .start = dev_seq_start,
2818 .next = dev_seq_next,
2819 .stop = dev_seq_stop,
2820 .show = sg_proc_seq_show_debug,
2821 };
2822
2823
2824 struct sg_proc_leaf {
2825 const char * name;
2826 struct file_operations * fops;
2827 };
2828
2829 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2830 {"allow_dio", &adio_fops},
2831 {"debug", &debug_fops},
2832 {"def_reserved_size", &dressz_fops},
2833 {"device_hdr", &devhdr_fops},
2834 {"devices", &dev_fops},
2835 {"device_strs", &devstrs_fops},
2836 {"version", &version_fops}
2837 };
2838
2839 static int
2840 sg_proc_init(void)
2841 {
2842 int k, mask;
2843 int num_leaves =
2844 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2845 struct proc_dir_entry *pdep;
2846 struct sg_proc_leaf * leaf;
2847
2848 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2849 if (!sg_proc_sgp)
2850 return 1;
2851 for (k = 0; k < num_leaves; ++k) {
2852 leaf = &sg_proc_leaf_arr[k];
2853 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2854 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2855 if (pdep) {
2856 leaf->fops->owner = THIS_MODULE,
2857 leaf->fops->read = seq_read,
2858 leaf->fops->llseek = seq_lseek,
2859 pdep->proc_fops = leaf->fops;
2860 }
2861 }
2862 return 0;
2863 }
2864
2865 static void
2866 sg_proc_cleanup(void)
2867 {
2868 int k;
2869 int num_leaves =
2870 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2871
2872 if (!sg_proc_sgp)
2873 return;
2874 for (k = 0; k < num_leaves; ++k)
2875 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2876 remove_proc_entry(sg_proc_sg_dirname, NULL);
2877 }
2878
2879
2880 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2881 {
2882 seq_printf(s, "%d\n", *((int *)s->private));
2883 return 0;
2884 }
2885
2886 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2887 {
2888 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2889 }
2890
2891 static ssize_t
2892 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2893 size_t count, loff_t *off)
2894 {
2895 int num;
2896 char buff[11];
2897
2898 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2899 return -EACCES;
2900 num = (count < 10) ? count : 10;
2901 if (copy_from_user(buff, buffer, num))
2902 return -EFAULT;
2903 buff[num] = '\0';
2904 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2905 return count;
2906 }
2907
2908 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2909 {
2910 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2911 }
2912
2913 static ssize_t
2914 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2915 size_t count, loff_t *off)
2916 {
2917 int num;
2918 unsigned long k = ULONG_MAX;
2919 char buff[11];
2920
2921 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2922 return -EACCES;
2923 num = (count < 10) ? count : 10;
2924 if (copy_from_user(buff, buffer, num))
2925 return -EFAULT;
2926 buff[num] = '\0';
2927 k = simple_strtoul(buff, NULL, 10);
2928 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2929 sg_big_buff = k;
2930 return count;
2931 }
2932 return -ERANGE;
2933 }
2934
2935 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2936 {
2937 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2938 sg_version_date);
2939 return 0;
2940 }
2941
2942 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2943 {
2944 return single_open(file, sg_proc_seq_show_version, NULL);
2945 }
2946
2947 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2948 {
2949 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2950 "online\n");
2951 return 0;
2952 }
2953
2954 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2955 {
2956 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2957 }
2958
2959 struct sg_proc_deviter {
2960 loff_t index;
2961 size_t max;
2962 };
2963
2964 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2965 {
2966 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2967
2968 s->private = it;
2969 if (! it)
2970 return NULL;
2971
2972 if (NULL == sg_dev_arr)
2973 return NULL;
2974 it->index = *pos;
2975 it->max = sg_last_dev();
2976 if (it->index >= it->max)
2977 return NULL;
2978 return it;
2979 }
2980
2981 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2982 {
2983 struct sg_proc_deviter * it = s->private;
2984
2985 *pos = ++it->index;
2986 return (it->index < it->max) ? it : NULL;
2987 }
2988
2989 static void dev_seq_stop(struct seq_file *s, void *v)
2990 {
2991 kfree(s->private);
2992 }
2993
2994 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2995 {
2996 return seq_open(file, &dev_seq_ops);
2997 }
2998
2999 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
3000 {
3001 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3002 Sg_device *sdp;
3003 struct scsi_device *scsidp;
3004
3005 sdp = it ? sg_get_dev(it->index) : NULL;
3006 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3007 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
3008 scsidp->host->host_no, scsidp->channel,
3009 scsidp->id, scsidp->lun, (int) scsidp->type,
3010 1,
3011 (int) scsidp->queue_depth,
3012 (int) scsidp->device_busy,
3013 (int) scsi_device_online(scsidp));
3014 else
3015 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
3016 return 0;
3017 }
3018
3019 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
3020 {
3021 return seq_open(file, &devstrs_seq_ops);
3022 }
3023
3024 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
3025 {
3026 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3027 Sg_device *sdp;
3028 struct scsi_device *scsidp;
3029
3030 sdp = it ? sg_get_dev(it->index) : NULL;
3031 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3032 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
3033 scsidp->vendor, scsidp->model, scsidp->rev);
3034 else
3035 seq_printf(s, "<no active device>\n");
3036 return 0;
3037 }
3038
3039 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3040 {
3041 int k, m, new_interface, blen, usg;
3042 Sg_request *srp;
3043 Sg_fd *fp;
3044 const sg_io_hdr_t *hp;
3045 const char * cp;
3046 unsigned int ms;
3047
3048 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
3049 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
3050 "(res)sgat=%d low_dma=%d\n", k + 1,
3051 jiffies_to_msecs(fp->timeout),
3052 fp->reserve.bufflen,
3053 (int) fp->reserve.k_use_sg,
3054 (int) fp->low_dma);
3055 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
3056 (int) fp->cmd_q, (int) fp->force_packid,
3057 (int) fp->keep_orphan, (int) fp->closed);
3058 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
3059 hp = &srp->header;
3060 new_interface = (hp->interface_id == '\0') ? 0 : 1;
3061 if (srp->res_used) {
3062 if (new_interface &&
3063 (SG_FLAG_MMAP_IO & hp->flags))
3064 cp = " mmap>> ";
3065 else
3066 cp = " rb>> ";
3067 } else {
3068 if (SG_INFO_DIRECT_IO_MASK & hp->info)
3069 cp = " dio>> ";
3070 else
3071 cp = " ";
3072 }
3073 seq_printf(s, cp);
3074 blen = srp->my_cmdp ?
3075 srp->my_cmdp->sr_bufflen : srp->data.bufflen;
3076 usg = srp->my_cmdp ?
3077 srp->my_cmdp->sr_use_sg : srp->data.k_use_sg;
3078 seq_printf(s, srp->done ?
3079 ((1 == srp->done) ? "rcv:" : "fin:")
3080 : (srp->my_cmdp ? "act:" : "prior:"));
3081 seq_printf(s, " id=%d blen=%d",
3082 srp->header.pack_id, blen);
3083 if (srp->done)
3084 seq_printf(s, " dur=%d", hp->duration);
3085 else {
3086 ms = jiffies_to_msecs(jiffies);
3087 seq_printf(s, " t_o/elap=%d/%d",
3088 (new_interface ? hp->timeout :
3089 jiffies_to_msecs(fp->timeout)),
3090 (ms > hp->duration ? ms - hp->duration : 0));
3091 }
3092 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
3093 (int) srp->data.cmd_opcode);
3094 }
3095 if (0 == m)
3096 seq_printf(s, " No requests active\n");
3097 }
3098 }
3099
3100 static int sg_proc_open_debug(struct inode *inode, struct file *file)
3101 {
3102 return seq_open(file, &debug_seq_ops);
3103 }
3104
3105 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
3106 {
3107 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3108 Sg_device *sdp;
3109
3110 if (it && (0 == it->index)) {
3111 seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
3112 "(origin 1)\n", sg_dev_max, (int)it->max);
3113 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
3114 }
3115 sdp = it ? sg_get_dev(it->index) : NULL;
3116 if (sdp) {
3117 struct scsi_device *scsidp = sdp->device;
3118
3119 if (NULL == scsidp) {
3120 seq_printf(s, "device %d detached ??\n",
3121 (int)it->index);
3122 return 0;
3123 }
3124
3125 if (sg_get_nth_sfp(sdp, 0)) {
3126 seq_printf(s, " >>> device=%s ",
3127 sdp->disk->disk_name);
3128 if (sdp->detached)
3129 seq_printf(s, "detached pending close ");
3130 else
3131 seq_printf
3132 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
3133 scsidp->host->host_no,
3134 scsidp->channel, scsidp->id,
3135 scsidp->lun,
3136 scsidp->host->hostt->emulated);
3137 seq_printf(s, " sg_tablesize=%d excl=%d\n",
3138 sdp->sg_tablesize, sdp->exclude);
3139 }
3140 sg_proc_debug_helper(s, sdp);
3141 }
3142 return 0;
3143 }
3144
3145 #endif /* CONFIG_SCSI_PROC_FS */
3146
3147 module_init(init_sg);
3148 module_exit(exit_sg);
3149 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);