Merge branch 'master'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
67 0x0E11, 0x4070, 0, 0, 0},
68 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
69 0x0E11, 0x4080, 0, 0, 0},
70 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
71 0x0E11, 0x4082, 0, 0, 0},
72 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
73 0x0E11, 0x4083, 0, 0, 0},
74 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
75 0x0E11, 0x409A, 0, 0, 0},
76 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
77 0x0E11, 0x409B, 0, 0, 0},
78 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
79 0x0E11, 0x409C, 0, 0, 0},
80 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
81 0x0E11, 0x409D, 0, 0, 0},
82 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
102 {0,}
103 };
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
105
106 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
107
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
111 */
112 static struct board_type products[] = {
113 { 0x40700E11, "Smart Array 5300", &SA5_access },
114 { 0x40800E11, "Smart Array 5i", &SA5B_access},
115 { 0x40820E11, "Smart Array 532", &SA5B_access},
116 { 0x40830E11, "Smart Array 5312", &SA5B_access},
117 { 0x409A0E11, "Smart Array 641", &SA5_access},
118 { 0x409B0E11, "Smart Array 642", &SA5_access},
119 { 0x409C0E11, "Smart Array 6400", &SA5_access},
120 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 { 0x40910E11, "Smart Array 6i", &SA5_access},
122 { 0x3225103C, "Smart Array P600", &SA5_access},
123 { 0x3223103C, "Smart Array P800", &SA5_access},
124 { 0x3234103C, "Smart Array P400", &SA5_access},
125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
131 };
132
133 /* How long to wait (in millesconds) for board to go into simple mode */
134 #define MAX_CONFIG_WAIT 30000
135 #define MAX_IOCTL_CONFIG_WAIT 1000
136
137 /*define how many times we will try a command because of bus resets */
138 #define MAX_CMD_RETRIES 3
139
140 #define READ_AHEAD 1024
141 #define NR_CMDS 384 /* #commands that can be outstanding */
142 #define MAX_CTLR 32
143
144 /* Originally cciss driver only supports 8 major numbers */
145 #define MAX_CTLR_ORIG 8
146
147
148 static ctlr_info_t *hba[MAX_CTLR];
149
150 static void do_cciss_request(request_queue_t *q);
151 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
152 static int cciss_open(struct inode *inode, struct file *filep);
153 static int cciss_release(struct inode *inode, struct file *filep);
154 static int cciss_ioctl(struct inode *inode, struct file *filep,
155 unsigned int cmd, unsigned long arg);
156
157 static int revalidate_allvol(ctlr_info_t *host);
158 static int cciss_revalidate(struct gendisk *disk);
159 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
160 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
161
162 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
163 int withirq, unsigned int *total_size, unsigned int *block_size);
164 static void cciss_geometry_inquiry(int ctlr, int logvol,
165 int withirq, unsigned int total_size,
166 unsigned int block_size, InquiryData_struct *inq_buff,
167 drive_info_struct *drv);
168 static void cciss_getgeometry(int cntl_num);
169
170 static void start_io( ctlr_info_t *h);
171 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
172 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
173 unsigned char *scsi3addr, int cmd_type);
174 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
175 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
176 int cmd_type);
177
178 static void fail_all_cmds(unsigned long ctlr);
179
180 #ifdef CONFIG_PROC_FS
181 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
182 int length, int *eof, void *data);
183 static void cciss_procinit(int i);
184 #else
185 static void cciss_procinit(int i) {}
186 #endif /* CONFIG_PROC_FS */
187
188 #ifdef CONFIG_COMPAT
189 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
190 #endif
191
192 static struct block_device_operations cciss_fops = {
193 .owner = THIS_MODULE,
194 .open = cciss_open,
195 .release = cciss_release,
196 .ioctl = cciss_ioctl,
197 #ifdef CONFIG_COMPAT
198 .compat_ioctl = cciss_compat_ioctl,
199 #endif
200 .revalidate_disk= cciss_revalidate,
201 };
202
203 /*
204 * Enqueuing and dequeuing functions for cmdlists.
205 */
206 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
207 {
208 if (*Qptr == NULL) {
209 *Qptr = c;
210 c->next = c->prev = c;
211 } else {
212 c->prev = (*Qptr)->prev;
213 c->next = (*Qptr);
214 (*Qptr)->prev->next = c;
215 (*Qptr)->prev = c;
216 }
217 }
218
219 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
220 CommandList_struct *c)
221 {
222 if (c && c->next != c) {
223 if (*Qptr == c) *Qptr = c->next;
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
226 } else {
227 *Qptr = NULL;
228 }
229 return c;
230 }
231
232 #include "cciss_scsi.c" /* For SCSI tape support */
233
234 #ifdef CONFIG_PROC_FS
235
236 /*
237 * Report information about this controller.
238 */
239 #define ENG_GIG 1000000000
240 #define ENG_GIG_FACTOR (ENG_GIG/512)
241 #define RAID_UNKNOWN 6
242 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
243 "UNKNOWN"};
244
245 static struct proc_dir_entry *proc_cciss;
246
247 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
248 int length, int *eof, void *data)
249 {
250 off_t pos = 0;
251 off_t len = 0;
252 int size, i, ctlr;
253 ctlr_info_t *h = (ctlr_info_t*)data;
254 drive_info_struct *drv;
255 unsigned long flags;
256 sector_t vol_sz, vol_sz_frac;
257
258 ctlr = h->ctlr;
259
260 /* prevent displaying bogus info during configuration
261 * or deconfiguration of a logical volume
262 */
263 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
264 if (h->busy_configuring) {
265 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
266 return -EBUSY;
267 }
268 h->busy_configuring = 1;
269 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
270
271 size = sprintf(buffer, "%s: HP %s Controller\n"
272 "Board ID: 0x%08lx\n"
273 "Firmware Version: %c%c%c%c\n"
274 "IRQ: %d\n"
275 "Logical drives: %d\n"
276 "Current Q depth: %d\n"
277 "Current # commands on controller: %d\n"
278 "Max Q depth since init: %d\n"
279 "Max # commands on controller since init: %d\n"
280 "Max SG entries since init: %d\n\n",
281 h->devname,
282 h->product_name,
283 (unsigned long)h->board_id,
284 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
285 (unsigned int)h->intr,
286 h->num_luns,
287 h->Qdepth, h->commands_outstanding,
288 h->maxQsinceinit, h->max_outstanding, h->maxSG);
289
290 pos += size; len += size;
291 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
292 for(i=0; i<=h->highest_lun; i++) {
293
294 drv = &h->drv[i];
295 if (drv->heads == 0)
296 continue;
297
298 vol_sz = drv->nr_blocks;
299 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
300 vol_sz_frac *= 100;
301 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
302
303 if (drv->raid_level > 5)
304 drv->raid_level = RAID_UNKNOWN;
305 size = sprintf(buffer+len, "cciss/c%dd%d:"
306 "\t%4u.%02uGB\tRAID %s\n",
307 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
308 raid_label[drv->raid_level]);
309 pos += size; len += size;
310 }
311
312 *eof = 1;
313 *start = buffer+offset;
314 len -= offset;
315 if (len>length)
316 len = length;
317 h->busy_configuring = 0;
318 return len;
319 }
320
321 static int
322 cciss_proc_write(struct file *file, const char __user *buffer,
323 unsigned long count, void *data)
324 {
325 unsigned char cmd[80];
326 int len;
327 #ifdef CONFIG_CISS_SCSI_TAPE
328 ctlr_info_t *h = (ctlr_info_t *) data;
329 int rc;
330 #endif
331
332 if (count > sizeof(cmd)-1) return -EINVAL;
333 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
334 cmd[count] = '\0';
335 len = strlen(cmd); // above 3 lines ensure safety
336 if (len && cmd[len-1] == '\n')
337 cmd[--len] = '\0';
338 # ifdef CONFIG_CISS_SCSI_TAPE
339 if (strcmp("engage scsi", cmd)==0) {
340 rc = cciss_engage_scsi(h->ctlr);
341 if (rc != 0) return -rc;
342 return count;
343 }
344 /* might be nice to have "disengage" too, but it's not
345 safely possible. (only 1 module use count, lock issues.) */
346 # endif
347 return -EINVAL;
348 }
349
350 /*
351 * Get us a file in /proc/cciss that says something about each controller.
352 * Create /proc/cciss if it doesn't exist yet.
353 */
354 static void __devinit cciss_procinit(int i)
355 {
356 struct proc_dir_entry *pde;
357
358 if (proc_cciss == NULL) {
359 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 if (!proc_cciss)
361 return;
362 }
363
364 pde = create_proc_read_entry(hba[i]->devname,
365 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
366 proc_cciss, cciss_proc_get_info, hba[i]);
367 pde->write_proc = cciss_proc_write;
368 }
369 #endif /* CONFIG_PROC_FS */
370
371 /*
372 * For operations that cannot sleep, a command block is allocated at init,
373 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
374 * which ones are free or in use. For operations that can wait for kmalloc
375 * to possible sleep, this routine can be called with get_from_pool set to 0.
376 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
377 */
378 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
379 {
380 CommandList_struct *c;
381 int i;
382 u64bit temp64;
383 dma_addr_t cmd_dma_handle, err_dma_handle;
384
385 if (!get_from_pool)
386 {
387 c = (CommandList_struct *) pci_alloc_consistent(
388 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
389 if(c==NULL)
390 return NULL;
391 memset(c, 0, sizeof(CommandList_struct));
392
393 c->cmdindex = -1;
394
395 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
396 h->pdev, sizeof(ErrorInfo_struct),
397 &err_dma_handle);
398
399 if (c->err_info == NULL)
400 {
401 pci_free_consistent(h->pdev,
402 sizeof(CommandList_struct), c, cmd_dma_handle);
403 return NULL;
404 }
405 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
406 } else /* get it out of the controllers pool */
407 {
408 do {
409 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
410 if (i == NR_CMDS)
411 return NULL;
412 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
413 #ifdef CCISS_DEBUG
414 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
415 #endif
416 c = h->cmd_pool + i;
417 memset(c, 0, sizeof(CommandList_struct));
418 cmd_dma_handle = h->cmd_pool_dhandle
419 + i*sizeof(CommandList_struct);
420 c->err_info = h->errinfo_pool + i;
421 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
422 err_dma_handle = h->errinfo_pool_dhandle
423 + i*sizeof(ErrorInfo_struct);
424 h->nr_allocs++;
425
426 c->cmdindex = i;
427 }
428
429 c->busaddr = (__u32) cmd_dma_handle;
430 temp64.val = (__u64) err_dma_handle;
431 c->ErrDesc.Addr.lower = temp64.val32.lower;
432 c->ErrDesc.Addr.upper = temp64.val32.upper;
433 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
434
435 c->ctlr = h->ctlr;
436 return c;
437
438
439 }
440
441 /*
442 * Frees a command block that was previously allocated with cmd_alloc().
443 */
444 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
445 {
446 int i;
447 u64bit temp64;
448
449 if( !got_from_pool)
450 {
451 temp64.val32.lower = c->ErrDesc.Addr.lower;
452 temp64.val32.upper = c->ErrDesc.Addr.upper;
453 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
454 c->err_info, (dma_addr_t) temp64.val);
455 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
456 c, (dma_addr_t) c->busaddr);
457 } else
458 {
459 i = c - h->cmd_pool;
460 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
461 h->nr_frees++;
462 }
463 }
464
465 static inline ctlr_info_t *get_host(struct gendisk *disk)
466 {
467 return disk->queue->queuedata;
468 }
469
470 static inline drive_info_struct *get_drv(struct gendisk *disk)
471 {
472 return disk->private_data;
473 }
474
475 /*
476 * Open. Make sure the device is really there.
477 */
478 static int cciss_open(struct inode *inode, struct file *filep)
479 {
480 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
481 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
482
483 #ifdef CCISS_DEBUG
484 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
485 #endif /* CCISS_DEBUG */
486
487 if (host->busy_initializing || drv->busy_configuring)
488 return -EBUSY;
489 /*
490 * Root is allowed to open raw volume zero even if it's not configured
491 * so array config can still work. Root is also allowed to open any
492 * volume that has a LUN ID, so it can issue IOCTL to reread the
493 * disk information. I don't think I really like this
494 * but I'm already using way to many device nodes to claim another one
495 * for "raw controller".
496 */
497 if (drv->nr_blocks == 0) {
498 if (iminor(inode) != 0) { /* not node 0? */
499 /* if not node 0 make sure it is a partition = 0 */
500 if (iminor(inode) & 0x0f) {
501 return -ENXIO;
502 /* if it is, make sure we have a LUN ID */
503 } else if (drv->LunID == 0) {
504 return -ENXIO;
505 }
506 }
507 if (!capable(CAP_SYS_ADMIN))
508 return -EPERM;
509 }
510 drv->usage_count++;
511 host->usage_count++;
512 return 0;
513 }
514 /*
515 * Close. Sync first.
516 */
517 static int cciss_release(struct inode *inode, struct file *filep)
518 {
519 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
520 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
521
522 #ifdef CCISS_DEBUG
523 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
524 #endif /* CCISS_DEBUG */
525
526 drv->usage_count--;
527 host->usage_count--;
528 return 0;
529 }
530
531 #ifdef CONFIG_COMPAT
532
533 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
534 {
535 int ret;
536 lock_kernel();
537 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
538 unlock_kernel();
539 return ret;
540 }
541
542 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
543 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
544
545 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
546 {
547 switch (cmd) {
548 case CCISS_GETPCIINFO:
549 case CCISS_GETINTINFO:
550 case CCISS_SETINTINFO:
551 case CCISS_GETNODENAME:
552 case CCISS_SETNODENAME:
553 case CCISS_GETHEARTBEAT:
554 case CCISS_GETBUSTYPES:
555 case CCISS_GETFIRMVER:
556 case CCISS_GETDRIVVER:
557 case CCISS_REVALIDVOLS:
558 case CCISS_DEREGDISK:
559 case CCISS_REGNEWDISK:
560 case CCISS_REGNEWD:
561 case CCISS_RESCANDISK:
562 case CCISS_GETLUNINFO:
563 return do_ioctl(f, cmd, arg);
564
565 case CCISS_PASSTHRU32:
566 return cciss_ioctl32_passthru(f, cmd, arg);
567 case CCISS_BIG_PASSTHRU32:
568 return cciss_ioctl32_big_passthru(f, cmd, arg);
569
570 default:
571 return -ENOIOCTLCMD;
572 }
573 }
574
575 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
576 {
577 IOCTL32_Command_struct __user *arg32 =
578 (IOCTL32_Command_struct __user *) arg;
579 IOCTL_Command_struct arg64;
580 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
581 int err;
582 u32 cp;
583
584 err = 0;
585 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
586 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
587 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
588 err |= get_user(arg64.buf_size, &arg32->buf_size);
589 err |= get_user(cp, &arg32->buf);
590 arg64.buf = compat_ptr(cp);
591 err |= copy_to_user(p, &arg64, sizeof(arg64));
592
593 if (err)
594 return -EFAULT;
595
596 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
597 if (err)
598 return err;
599 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
600 if (err)
601 return -EFAULT;
602 return err;
603 }
604
605 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
606 {
607 BIG_IOCTL32_Command_struct __user *arg32 =
608 (BIG_IOCTL32_Command_struct __user *) arg;
609 BIG_IOCTL_Command_struct arg64;
610 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
611 int err;
612 u32 cp;
613
614 err = 0;
615 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
616 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
617 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
618 err |= get_user(arg64.buf_size, &arg32->buf_size);
619 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
620 err |= get_user(cp, &arg32->buf);
621 arg64.buf = compat_ptr(cp);
622 err |= copy_to_user(p, &arg64, sizeof(arg64));
623
624 if (err)
625 return -EFAULT;
626
627 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
628 if (err)
629 return err;
630 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
631 if (err)
632 return -EFAULT;
633 return err;
634 }
635 #endif
636 /*
637 * ioctl
638 */
639 static int cciss_ioctl(struct inode *inode, struct file *filep,
640 unsigned int cmd, unsigned long arg)
641 {
642 struct block_device *bdev = inode->i_bdev;
643 struct gendisk *disk = bdev->bd_disk;
644 ctlr_info_t *host = get_host(disk);
645 drive_info_struct *drv = get_drv(disk);
646 int ctlr = host->ctlr;
647 void __user *argp = (void __user *)arg;
648
649 #ifdef CCISS_DEBUG
650 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
651 #endif /* CCISS_DEBUG */
652
653 switch(cmd) {
654 case HDIO_GETGEO:
655 {
656 struct hd_geometry driver_geo;
657 if (drv->cylinders) {
658 driver_geo.heads = drv->heads;
659 driver_geo.sectors = drv->sectors;
660 driver_geo.cylinders = drv->cylinders;
661 } else
662 return -ENXIO;
663 driver_geo.start= get_start_sect(inode->i_bdev);
664 if (copy_to_user(argp, &driver_geo, sizeof(struct hd_geometry)))
665 return -EFAULT;
666 return(0);
667 }
668
669 case CCISS_GETPCIINFO:
670 {
671 cciss_pci_info_struct pciinfo;
672
673 if (!arg) return -EINVAL;
674 pciinfo.domain = pci_domain_nr(host->pdev->bus);
675 pciinfo.bus = host->pdev->bus->number;
676 pciinfo.dev_fn = host->pdev->devfn;
677 pciinfo.board_id = host->board_id;
678 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
679 return -EFAULT;
680 return(0);
681 }
682 case CCISS_GETINTINFO:
683 {
684 cciss_coalint_struct intinfo;
685 if (!arg) return -EINVAL;
686 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
687 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
688 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
689 return -EFAULT;
690 return(0);
691 }
692 case CCISS_SETINTINFO:
693 {
694 cciss_coalint_struct intinfo;
695 unsigned long flags;
696 int i;
697
698 if (!arg) return -EINVAL;
699 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
700 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
701 return -EFAULT;
702 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
703
704 {
705 // printk("cciss_ioctl: delay and count cannot be 0\n");
706 return( -EINVAL);
707 }
708 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
709 /* Update the field, and then ring the doorbell */
710 writel( intinfo.delay,
711 &(host->cfgtable->HostWrite.CoalIntDelay));
712 writel( intinfo.count,
713 &(host->cfgtable->HostWrite.CoalIntCount));
714 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
715
716 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
717 if (!(readl(host->vaddr + SA5_DOORBELL)
718 & CFGTBL_ChangeReq))
719 break;
720 /* delay and try again */
721 udelay(1000);
722 }
723 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
724 if (i >= MAX_IOCTL_CONFIG_WAIT)
725 return -EAGAIN;
726 return(0);
727 }
728 case CCISS_GETNODENAME:
729 {
730 NodeName_type NodeName;
731 int i;
732
733 if (!arg) return -EINVAL;
734 for(i=0;i<16;i++)
735 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
736 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
737 return -EFAULT;
738 return(0);
739 }
740 case CCISS_SETNODENAME:
741 {
742 NodeName_type NodeName;
743 unsigned long flags;
744 int i;
745
746 if (!arg) return -EINVAL;
747 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
748
749 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
750 return -EFAULT;
751
752 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
753
754 /* Update the field, and then ring the doorbell */
755 for(i=0;i<16;i++)
756 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
757
758 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
759
760 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
761 if (!(readl(host->vaddr + SA5_DOORBELL)
762 & CFGTBL_ChangeReq))
763 break;
764 /* delay and try again */
765 udelay(1000);
766 }
767 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
768 if (i >= MAX_IOCTL_CONFIG_WAIT)
769 return -EAGAIN;
770 return(0);
771 }
772
773 case CCISS_GETHEARTBEAT:
774 {
775 Heartbeat_type heartbeat;
776
777 if (!arg) return -EINVAL;
778 heartbeat = readl(&host->cfgtable->HeartBeat);
779 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
780 return -EFAULT;
781 return(0);
782 }
783 case CCISS_GETBUSTYPES:
784 {
785 BusTypes_type BusTypes;
786
787 if (!arg) return -EINVAL;
788 BusTypes = readl(&host->cfgtable->BusTypes);
789 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
790 return -EFAULT;
791 return(0);
792 }
793 case CCISS_GETFIRMVER:
794 {
795 FirmwareVer_type firmware;
796
797 if (!arg) return -EINVAL;
798 memcpy(firmware, host->firm_ver, 4);
799
800 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
801 return -EFAULT;
802 return(0);
803 }
804 case CCISS_GETDRIVVER:
805 {
806 DriverVer_type DriverVer = DRIVER_VERSION;
807
808 if (!arg) return -EINVAL;
809
810 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
811 return -EFAULT;
812 return(0);
813 }
814
815 case CCISS_REVALIDVOLS:
816 if (bdev != bdev->bd_contains || drv != host->drv)
817 return -ENXIO;
818 return revalidate_allvol(host);
819
820 case CCISS_GETLUNINFO: {
821 LogvolInfo_struct luninfo;
822
823 luninfo.LunID = drv->LunID;
824 luninfo.num_opens = drv->usage_count;
825 luninfo.num_parts = 0;
826 if (copy_to_user(argp, &luninfo,
827 sizeof(LogvolInfo_struct)))
828 return -EFAULT;
829 return(0);
830 }
831 case CCISS_DEREGDISK:
832 return rebuild_lun_table(host, disk);
833
834 case CCISS_REGNEWD:
835 return rebuild_lun_table(host, NULL);
836
837 case CCISS_PASSTHRU:
838 {
839 IOCTL_Command_struct iocommand;
840 CommandList_struct *c;
841 char *buff = NULL;
842 u64bit temp64;
843 unsigned long flags;
844 DECLARE_COMPLETION(wait);
845
846 if (!arg) return -EINVAL;
847
848 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
849
850 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
851 return -EFAULT;
852 if((iocommand.buf_size < 1) &&
853 (iocommand.Request.Type.Direction != XFER_NONE))
854 {
855 return -EINVAL;
856 }
857 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
858 /* Check kmalloc limits */
859 if(iocommand.buf_size > 128000)
860 return -EINVAL;
861 #endif
862 if(iocommand.buf_size > 0)
863 {
864 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
865 if( buff == NULL)
866 return -EFAULT;
867 }
868 if (iocommand.Request.Type.Direction == XFER_WRITE)
869 {
870 /* Copy the data into the buffer we created */
871 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
872 {
873 kfree(buff);
874 return -EFAULT;
875 }
876 } else {
877 memset(buff, 0, iocommand.buf_size);
878 }
879 if ((c = cmd_alloc(host , 0)) == NULL)
880 {
881 kfree(buff);
882 return -ENOMEM;
883 }
884 // Fill in the command type
885 c->cmd_type = CMD_IOCTL_PEND;
886 // Fill in Command Header
887 c->Header.ReplyQueue = 0; // unused in simple mode
888 if( iocommand.buf_size > 0) // buffer to fill
889 {
890 c->Header.SGList = 1;
891 c->Header.SGTotal= 1;
892 } else // no buffers to fill
893 {
894 c->Header.SGList = 0;
895 c->Header.SGTotal= 0;
896 }
897 c->Header.LUN = iocommand.LUN_info;
898 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
899
900 // Fill in Request block
901 c->Request = iocommand.Request;
902
903 // Fill in the scatter gather information
904 if (iocommand.buf_size > 0 )
905 {
906 temp64.val = pci_map_single( host->pdev, buff,
907 iocommand.buf_size,
908 PCI_DMA_BIDIRECTIONAL);
909 c->SG[0].Addr.lower = temp64.val32.lower;
910 c->SG[0].Addr.upper = temp64.val32.upper;
911 c->SG[0].Len = iocommand.buf_size;
912 c->SG[0].Ext = 0; // we are not chaining
913 }
914 c->waiting = &wait;
915
916 /* Put the request on the tail of the request queue */
917 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
918 addQ(&host->reqQ, c);
919 host->Qdepth++;
920 start_io(host);
921 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
922
923 wait_for_completion(&wait);
924
925 /* unlock the buffers from DMA */
926 temp64.val32.lower = c->SG[0].Addr.lower;
927 temp64.val32.upper = c->SG[0].Addr.upper;
928 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
929 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
930
931 /* Copy the error information out */
932 iocommand.error_info = *(c->err_info);
933 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
934 {
935 kfree(buff);
936 cmd_free(host, c, 0);
937 return( -EFAULT);
938 }
939
940 if (iocommand.Request.Type.Direction == XFER_READ)
941 {
942 /* Copy the data out of the buffer we created */
943 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
944 {
945 kfree(buff);
946 cmd_free(host, c, 0);
947 return -EFAULT;
948 }
949 }
950 kfree(buff);
951 cmd_free(host, c, 0);
952 return(0);
953 }
954 case CCISS_BIG_PASSTHRU: {
955 BIG_IOCTL_Command_struct *ioc;
956 CommandList_struct *c;
957 unsigned char **buff = NULL;
958 int *buff_size = NULL;
959 u64bit temp64;
960 unsigned long flags;
961 BYTE sg_used = 0;
962 int status = 0;
963 int i;
964 DECLARE_COMPLETION(wait);
965 __u32 left;
966 __u32 sz;
967 BYTE __user *data_ptr;
968
969 if (!arg)
970 return -EINVAL;
971 if (!capable(CAP_SYS_RAWIO))
972 return -EPERM;
973 ioc = (BIG_IOCTL_Command_struct *)
974 kmalloc(sizeof(*ioc), GFP_KERNEL);
975 if (!ioc) {
976 status = -ENOMEM;
977 goto cleanup1;
978 }
979 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
980 status = -EFAULT;
981 goto cleanup1;
982 }
983 if ((ioc->buf_size < 1) &&
984 (ioc->Request.Type.Direction != XFER_NONE)) {
985 status = -EINVAL;
986 goto cleanup1;
987 }
988 /* Check kmalloc limits using all SGs */
989 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
990 status = -EINVAL;
991 goto cleanup1;
992 }
993 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
994 status = -EINVAL;
995 goto cleanup1;
996 }
997 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
998 sizeof(char *), GFP_KERNEL);
999 if (!buff) {
1000 status = -ENOMEM;
1001 goto cleanup1;
1002 }
1003 memset(buff, 0, MAXSGENTRIES);
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1009 }
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM;
1018 goto cleanup1;
1019 }
1020 if (ioc->Request.Type.Direction == XFER_WRITE) {
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1022 status = -ENOMEM;
1023 goto cleanup1;
1024 }
1025 } else {
1026 memset(buff[sg_used], 0, sz);
1027 }
1028 left -= sz;
1029 data_ptr += sz;
1030 sg_used++;
1031 }
1032 if ((c = cmd_alloc(host , 0)) == NULL) {
1033 status = -ENOMEM;
1034 goto cleanup1;
1035 }
1036 c->cmd_type = CMD_IOCTL_PEND;
1037 c->Header.ReplyQueue = 0;
1038
1039 if( ioc->buf_size > 0) {
1040 c->Header.SGList = sg_used;
1041 c->Header.SGTotal= sg_used;
1042 } else {
1043 c->Header.SGList = 0;
1044 c->Header.SGTotal= 0;
1045 }
1046 c->Header.LUN = ioc->LUN_info;
1047 c->Header.Tag.lower = c->busaddr;
1048
1049 c->Request = ioc->Request;
1050 if (ioc->buf_size > 0 ) {
1051 int i;
1052 for(i=0; i<sg_used; i++) {
1053 temp64.val = pci_map_single( host->pdev, buff[i],
1054 buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1060 }
1061 }
1062 c->waiting = &wait;
1063 /* Put the request on the tail of the request queue */
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1065 addQ(&host->reqQ, c);
1066 host->Qdepth++;
1067 start_io(host);
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1069 wait_for_completion(&wait);
1070 /* unlock the buffers from DMA */
1071 for(i=0; i<sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower;
1073 temp64.val32.upper = c->SG[i].Addr.upper;
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1076 }
1077 /* Copy the error information out */
1078 ioc->error_info = *(c->err_info);
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1083 }
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1092 }
1093 ptr += buff_size[i];
1094 }
1095 }
1096 cmd_free(host, c, 0);
1097 status = 0;
1098 cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
1101 kfree(buff[i]);
1102 kfree(buff);
1103 }
1104 kfree(buff_size);
1105 kfree(ioc);
1106 return(status);
1107 }
1108 default:
1109 return -ENOTTY;
1110 }
1111
1112 }
1113
1114 /*
1115 * revalidate_allvol is for online array config utilities. After a
1116 * utility reconfigures the drives in the array, it can use this function
1117 * (through an ioctl) to make the driver zap any previous disk structs for
1118 * that controller and get new ones.
1119 *
1120 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular
1123 * controller).
1124 */
1125 static int revalidate_allvol(ctlr_info_t *host)
1126 {
1127 int ctlr = host->ctlr, i;
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY;
1136 }
1137 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1139
1140 for(i=0; i< NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i];
1142 if (disk) {
1143 request_queue_t *q = disk->queue;
1144
1145 if (disk->flags & GENHD_FL_UP)
1146 del_gendisk(disk);
1147 if (q)
1148 blk_cleanup_queue(q);
1149 put_disk(disk);
1150 }
1151 }
1152
1153 /*
1154 * Set the partition and block size structures for all volumes
1155 * on this controller to zero. We will reread all of this data
1156 */
1157 memset(host->drv, 0, sizeof(drive_info_struct)
1158 * CISS_MAX_LUN);
1159 /*
1160 * Tell the array controller not to give us any interrupts while
1161 * we check the new geometry. Then turn interrupts back on when
1162 * we're done.
1163 */
1164 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1165 cciss_getgeometry(ctlr);
1166 host->access.set_intr_mask(host, CCISS_INTR_ON);
1167
1168 /* Loop through each real device */
1169 for (i = 0; i < NWD; i++) {
1170 struct gendisk *disk = host->gendisk[i];
1171 drive_info_struct *drv = &(host->drv[i]);
1172 /* we must register the controller even if no disks exist */
1173 /* this is for the online array utilities */
1174 if (!drv->heads && i)
1175 continue;
1176 blk_queue_hardsect_size(drv->queue, drv->block_size);
1177 set_capacity(disk, drv->nr_blocks);
1178 add_disk(disk);
1179 }
1180 host->usage_count--;
1181 return 0;
1182 }
1183
1184 /* This function will check the usage_count of the drive to be updated/added.
1185 * If the usage_count is zero then the drive information will be updated and
1186 * the disk will be re-registered with the kernel. If not then it will be
1187 * left alone for the next reboot. The exception to this is disk 0 which
1188 * will always be left registered with the kernel since it is also the
1189 * controller node. Any changes to disk 0 will show up on the next
1190 * reboot.
1191 */
1192 static void cciss_update_drive_info(int ctlr, int drv_index)
1193 {
1194 ctlr_info_t *h = hba[ctlr];
1195 struct gendisk *disk;
1196 ReadCapdata_struct *size_buff = NULL;
1197 InquiryData_struct *inq_buff = NULL;
1198 unsigned int block_size;
1199 unsigned int total_size;
1200 unsigned long flags = 0;
1201 int ret = 0;
1202
1203 /* if the disk already exists then deregister it before proceeding*/
1204 if (h->drv[drv_index].raid_level != -1){
1205 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1206 h->drv[drv_index].busy_configuring = 1;
1207 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1208 ret = deregister_disk(h->gendisk[drv_index],
1209 &h->drv[drv_index], 0);
1210 h->drv[drv_index].busy_configuring = 0;
1211 }
1212
1213 /* If the disk is in use return */
1214 if (ret)
1215 return;
1216
1217
1218 /* Get information about the disk and modify the driver sturcture */
1219 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1220 if (size_buff == NULL)
1221 goto mem_msg;
1222 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1223 if (inq_buff == NULL)
1224 goto mem_msg;
1225
1226 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1227 &total_size, &block_size);
1228 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1229 inq_buff, &h->drv[drv_index]);
1230
1231 ++h->num_luns;
1232 disk = h->gendisk[drv_index];
1233 set_capacity(disk, h->drv[drv_index].nr_blocks);
1234
1235
1236 /* if it's the controller it's already added */
1237 if (drv_index){
1238 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1239
1240 /* Set up queue information */
1241 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1242 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1243
1244 /* This is a hardware imposed limit. */
1245 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1246
1247 /* This is a limit in the driver and could be eliminated. */
1248 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1249
1250 blk_queue_max_sectors(disk->queue, 512);
1251
1252 disk->queue->queuedata = hba[ctlr];
1253
1254 blk_queue_hardsect_size(disk->queue,
1255 hba[ctlr]->drv[drv_index].block_size);
1256
1257 h->drv[drv_index].queue = disk->queue;
1258 add_disk(disk);
1259 }
1260
1261 freeret:
1262 kfree(size_buff);
1263 kfree(inq_buff);
1264 return;
1265 mem_msg:
1266 printk(KERN_ERR "cciss: out of memory\n");
1267 goto freeret;
1268 }
1269
1270 /* This function will find the first index of the controllers drive array
1271 * that has a -1 for the raid_level and will return that index. This is
1272 * where new drives will be added. If the index to be returned is greater
1273 * than the highest_lun index for the controller then highest_lun is set
1274 * to this new index. If there are no available indexes then -1 is returned.
1275 */
1276 static int cciss_find_free_drive_index(int ctlr)
1277 {
1278 int i;
1279
1280 for (i=0; i < CISS_MAX_LUN; i++){
1281 if (hba[ctlr]->drv[i].raid_level == -1){
1282 if (i > hba[ctlr]->highest_lun)
1283 hba[ctlr]->highest_lun = i;
1284 return i;
1285 }
1286 }
1287 return -1;
1288 }
1289
1290 /* This function will add and remove logical drives from the Logical
1291 * drive array of the controller and maintain persistancy of ordering
1292 * so that mount points are preserved until the next reboot. This allows
1293 * for the removal of logical drives in the middle of the drive array
1294 * without a re-ordering of those drives.
1295 * INPUT
1296 * h = The controller to perform the operations on
1297 * del_disk = The disk to remove if specified. If the value given
1298 * is NULL then no disk is removed.
1299 */
1300 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1301 {
1302 int ctlr = h->ctlr;
1303 int num_luns;
1304 ReportLunData_struct *ld_buff = NULL;
1305 drive_info_struct *drv = NULL;
1306 int return_code;
1307 int listlength = 0;
1308 int i;
1309 int drv_found;
1310 int drv_index = 0;
1311 __u32 lunid = 0;
1312 unsigned long flags;
1313
1314 /* Set busy_configuring flag for this operation */
1315 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1316 if (h->num_luns >= CISS_MAX_LUN){
1317 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1318 return -EINVAL;
1319 }
1320
1321 if (h->busy_configuring){
1322 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1323 return -EBUSY;
1324 }
1325 h->busy_configuring = 1;
1326
1327 /* if del_disk is NULL then we are being called to add a new disk
1328 * and update the logical drive table. If it is not NULL then
1329 * we will check if the disk is in use or not.
1330 */
1331 if (del_disk != NULL){
1332 drv = get_drv(del_disk);
1333 drv->busy_configuring = 1;
1334 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1335 return_code = deregister_disk(del_disk, drv, 1);
1336 drv->busy_configuring = 0;
1337 h->busy_configuring = 0;
1338 return return_code;
1339 } else {
1340 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1341 if (!capable(CAP_SYS_RAWIO))
1342 return -EPERM;
1343
1344 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1345 if (ld_buff == NULL)
1346 goto mem_msg;
1347
1348 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1349 sizeof(ReportLunData_struct), 0, 0, 0,
1350 TYPE_CMD);
1351
1352 if (return_code == IO_OK){
1353 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1354 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1355 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1356 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1357 } else{ /* reading number of logical volumes failed */
1358 printk(KERN_WARNING "cciss: report logical volume"
1359 " command failed\n");
1360 listlength = 0;
1361 goto freeret;
1362 }
1363
1364 num_luns = listlength / 8; /* 8 bytes per entry */
1365 if (num_luns > CISS_MAX_LUN){
1366 num_luns = CISS_MAX_LUN;
1367 printk(KERN_WARNING "cciss: more luns configured"
1368 " on controller than can be handled by"
1369 " this driver.\n");
1370 }
1371
1372 /* Compare controller drive array to drivers drive array.
1373 * Check for updates in the drive information and any new drives
1374 * on the controller.
1375 */
1376 for (i=0; i < num_luns; i++){
1377 int j;
1378
1379 drv_found = 0;
1380
1381 lunid = (0xff &
1382 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1383 lunid |= (0xff &
1384 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1385 lunid |= (0xff &
1386 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1387 lunid |= 0xff &
1388 (unsigned int)(ld_buff->LUN[i][0]);
1389
1390 /* Find if the LUN is already in the drive array
1391 * of the controller. If so then update its info
1392 * if not is use. If it does not exist then find
1393 * the first free index and add it.
1394 */
1395 for (j=0; j <= h->highest_lun; j++){
1396 if (h->drv[j].LunID == lunid){
1397 drv_index = j;
1398 drv_found = 1;
1399 }
1400 }
1401
1402 /* check if the drive was found already in the array */
1403 if (!drv_found){
1404 drv_index = cciss_find_free_drive_index(ctlr);
1405 if (drv_index == -1)
1406 goto freeret;
1407
1408 }
1409 h->drv[drv_index].LunID = lunid;
1410 cciss_update_drive_info(ctlr, drv_index);
1411 } /* end for */
1412 } /* end else */
1413
1414 freeret:
1415 kfree(ld_buff);
1416 h->busy_configuring = 0;
1417 /* We return -1 here to tell the ACU that we have registered/updated
1418 * all of the drives that we can and to keep it from calling us
1419 * additional times.
1420 */
1421 return -1;
1422 mem_msg:
1423 printk(KERN_ERR "cciss: out of memory\n");
1424 goto freeret;
1425 }
1426
1427 /* This function will deregister the disk and it's queue from the
1428 * kernel. It must be called with the controller lock held and the
1429 * drv structures busy_configuring flag set. It's parameters are:
1430 *
1431 * disk = This is the disk to be deregistered
1432 * drv = This is the drive_info_struct associated with the disk to be
1433 * deregistered. It contains information about the disk used
1434 * by the driver.
1435 * clear_all = This flag determines whether or not the disk information
1436 * is going to be completely cleared out and the highest_lun
1437 * reset. Sometimes we want to clear out information about
1438 * the disk in preperation for re-adding it. In this case
1439 * the highest_lun should be left unchanged and the LunID
1440 * should not be cleared.
1441 */
1442 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1443 int clear_all)
1444 {
1445 ctlr_info_t *h = get_host(disk);
1446
1447 if (!capable(CAP_SYS_RAWIO))
1448 return -EPERM;
1449
1450 /* make sure logical volume is NOT is use */
1451 if(clear_all || (h->gendisk[0] == disk)) {
1452 if (drv->usage_count > 1)
1453 return -EBUSY;
1454 }
1455 else
1456 if( drv->usage_count > 0 )
1457 return -EBUSY;
1458
1459 /* invalidate the devices and deregister the disk. If it is disk
1460 * zero do not deregister it but just zero out it's values. This
1461 * allows us to delete disk zero but keep the controller registered.
1462 */
1463 if (h->gendisk[0] != disk){
1464 if (disk) {
1465 request_queue_t *q = disk->queue;
1466 if (disk->flags & GENHD_FL_UP)
1467 del_gendisk(disk);
1468 if (q)
1469 blk_cleanup_queue(q);
1470 put_disk(disk);
1471 }
1472 }
1473
1474 --h->num_luns;
1475 /* zero out the disk size info */
1476 drv->nr_blocks = 0;
1477 drv->block_size = 0;
1478 drv->heads = 0;
1479 drv->sectors = 0;
1480 drv->cylinders = 0;
1481 drv->raid_level = -1; /* This can be used as a flag variable to
1482 * indicate that this element of the drive
1483 * array is free.
1484 */
1485
1486 if (clear_all){
1487 /* check to see if it was the last disk */
1488 if (drv == h->drv + h->highest_lun) {
1489 /* if so, find the new hightest lun */
1490 int i, newhighest =-1;
1491 for(i=0; i<h->highest_lun; i++) {
1492 /* if the disk has size > 0, it is available */
1493 if (h->drv[i].heads)
1494 newhighest = i;
1495 }
1496 h->highest_lun = newhighest;
1497 }
1498
1499 drv->LunID = 0;
1500 }
1501 return(0);
1502 }
1503
1504 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1505 size_t size,
1506 unsigned int use_unit_num, /* 0: address the controller,
1507 1: address logical volume log_unit,
1508 2: periph device address is scsi3addr */
1509 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1510 int cmd_type)
1511 {
1512 ctlr_info_t *h= hba[ctlr];
1513 u64bit buff_dma_handle;
1514 int status = IO_OK;
1515
1516 c->cmd_type = CMD_IOCTL_PEND;
1517 c->Header.ReplyQueue = 0;
1518 if( buff != NULL) {
1519 c->Header.SGList = 1;
1520 c->Header.SGTotal= 1;
1521 } else {
1522 c->Header.SGList = 0;
1523 c->Header.SGTotal= 0;
1524 }
1525 c->Header.Tag.lower = c->busaddr;
1526
1527 c->Request.Type.Type = cmd_type;
1528 if (cmd_type == TYPE_CMD) {
1529 switch(cmd) {
1530 case CISS_INQUIRY:
1531 /* If the logical unit number is 0 then, this is going
1532 to controller so It's a physical command
1533 mode = 0 target = 0. So we have nothing to write.
1534 otherwise, if use_unit_num == 1,
1535 mode = 1(volume set addressing) target = LUNID
1536 otherwise, if use_unit_num == 2,
1537 mode = 0(periph dev addr) target = scsi3addr */
1538 if (use_unit_num == 1) {
1539 c->Header.LUN.LogDev.VolId=
1540 h->drv[log_unit].LunID;
1541 c->Header.LUN.LogDev.Mode = 1;
1542 } else if (use_unit_num == 2) {
1543 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1544 c->Header.LUN.LogDev.Mode = 0;
1545 }
1546 /* are we trying to read a vital product page */
1547 if(page_code != 0) {
1548 c->Request.CDB[1] = 0x01;
1549 c->Request.CDB[2] = page_code;
1550 }
1551 c->Request.CDBLen = 6;
1552 c->Request.Type.Attribute = ATTR_SIMPLE;
1553 c->Request.Type.Direction = XFER_READ;
1554 c->Request.Timeout = 0;
1555 c->Request.CDB[0] = CISS_INQUIRY;
1556 c->Request.CDB[4] = size & 0xFF;
1557 break;
1558 case CISS_REPORT_LOG:
1559 case CISS_REPORT_PHYS:
1560 /* Talking to controller so It's a physical command
1561 mode = 00 target = 0. Nothing to write.
1562 */
1563 c->Request.CDBLen = 12;
1564 c->Request.Type.Attribute = ATTR_SIMPLE;
1565 c->Request.Type.Direction = XFER_READ;
1566 c->Request.Timeout = 0;
1567 c->Request.CDB[0] = cmd;
1568 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1569 c->Request.CDB[7] = (size >> 16) & 0xFF;
1570 c->Request.CDB[8] = (size >> 8) & 0xFF;
1571 c->Request.CDB[9] = size & 0xFF;
1572 break;
1573
1574 case CCISS_READ_CAPACITY:
1575 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1576 c->Header.LUN.LogDev.Mode = 1;
1577 c->Request.CDBLen = 10;
1578 c->Request.Type.Attribute = ATTR_SIMPLE;
1579 c->Request.Type.Direction = XFER_READ;
1580 c->Request.Timeout = 0;
1581 c->Request.CDB[0] = cmd;
1582 break;
1583 case CCISS_CACHE_FLUSH:
1584 c->Request.CDBLen = 12;
1585 c->Request.Type.Attribute = ATTR_SIMPLE;
1586 c->Request.Type.Direction = XFER_WRITE;
1587 c->Request.Timeout = 0;
1588 c->Request.CDB[0] = BMIC_WRITE;
1589 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1590 break;
1591 default:
1592 printk(KERN_WARNING
1593 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1594 return(IO_ERROR);
1595 }
1596 } else if (cmd_type == TYPE_MSG) {
1597 switch (cmd) {
1598 case 0: /* ABORT message */
1599 c->Request.CDBLen = 12;
1600 c->Request.Type.Attribute = ATTR_SIMPLE;
1601 c->Request.Type.Direction = XFER_WRITE;
1602 c->Request.Timeout = 0;
1603 c->Request.CDB[0] = cmd; /* abort */
1604 c->Request.CDB[1] = 0; /* abort a command */
1605 /* buff contains the tag of the command to abort */
1606 memcpy(&c->Request.CDB[4], buff, 8);
1607 break;
1608 case 1: /* RESET message */
1609 c->Request.CDBLen = 12;
1610 c->Request.Type.Attribute = ATTR_SIMPLE;
1611 c->Request.Type.Direction = XFER_WRITE;
1612 c->Request.Timeout = 0;
1613 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1614 c->Request.CDB[0] = cmd; /* reset */
1615 c->Request.CDB[1] = 0x04; /* reset a LUN */
1616 case 3: /* No-Op message */
1617 c->Request.CDBLen = 1;
1618 c->Request.Type.Attribute = ATTR_SIMPLE;
1619 c->Request.Type.Direction = XFER_WRITE;
1620 c->Request.Timeout = 0;
1621 c->Request.CDB[0] = cmd;
1622 break;
1623 default:
1624 printk(KERN_WARNING
1625 "cciss%d: unknown message type %d\n",
1626 ctlr, cmd);
1627 return IO_ERROR;
1628 }
1629 } else {
1630 printk(KERN_WARNING
1631 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1632 return IO_ERROR;
1633 }
1634 /* Fill in the scatter gather information */
1635 if (size > 0) {
1636 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1637 buff, size, PCI_DMA_BIDIRECTIONAL);
1638 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1639 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1640 c->SG[0].Len = size;
1641 c->SG[0].Ext = 0; /* we are not chaining */
1642 }
1643 return status;
1644 }
1645 static int sendcmd_withirq(__u8 cmd,
1646 int ctlr,
1647 void *buff,
1648 size_t size,
1649 unsigned int use_unit_num,
1650 unsigned int log_unit,
1651 __u8 page_code,
1652 int cmd_type)
1653 {
1654 ctlr_info_t *h = hba[ctlr];
1655 CommandList_struct *c;
1656 u64bit buff_dma_handle;
1657 unsigned long flags;
1658 int return_status;
1659 DECLARE_COMPLETION(wait);
1660
1661 if ((c = cmd_alloc(h , 0)) == NULL)
1662 return -ENOMEM;
1663 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1664 log_unit, page_code, NULL, cmd_type);
1665 if (return_status != IO_OK) {
1666 cmd_free(h, c, 0);
1667 return return_status;
1668 }
1669 resend_cmd2:
1670 c->waiting = &wait;
1671
1672 /* Put the request on the tail of the queue and send it */
1673 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1674 addQ(&h->reqQ, c);
1675 h->Qdepth++;
1676 start_io(h);
1677 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1678
1679 wait_for_completion(&wait);
1680
1681 if(c->err_info->CommandStatus != 0)
1682 { /* an error has occurred */
1683 switch(c->err_info->CommandStatus)
1684 {
1685 case CMD_TARGET_STATUS:
1686 printk(KERN_WARNING "cciss: cmd %p has "
1687 " completed with errors\n", c);
1688 if( c->err_info->ScsiStatus)
1689 {
1690 printk(KERN_WARNING "cciss: cmd %p "
1691 "has SCSI Status = %x\n",
1692 c,
1693 c->err_info->ScsiStatus);
1694 }
1695
1696 break;
1697 case CMD_DATA_UNDERRUN:
1698 case CMD_DATA_OVERRUN:
1699 /* expected for inquire and report lun commands */
1700 break;
1701 case CMD_INVALID:
1702 printk(KERN_WARNING "cciss: Cmd %p is "
1703 "reported invalid\n", c);
1704 return_status = IO_ERROR;
1705 break;
1706 case CMD_PROTOCOL_ERR:
1707 printk(KERN_WARNING "cciss: cmd %p has "
1708 "protocol error \n", c);
1709 return_status = IO_ERROR;
1710 break;
1711 case CMD_HARDWARE_ERR:
1712 printk(KERN_WARNING "cciss: cmd %p had "
1713 " hardware error\n", c);
1714 return_status = IO_ERROR;
1715 break;
1716 case CMD_CONNECTION_LOST:
1717 printk(KERN_WARNING "cciss: cmd %p had "
1718 "connection lost\n", c);
1719 return_status = IO_ERROR;
1720 break;
1721 case CMD_ABORTED:
1722 printk(KERN_WARNING "cciss: cmd %p was "
1723 "aborted\n", c);
1724 return_status = IO_ERROR;
1725 break;
1726 case CMD_ABORT_FAILED:
1727 printk(KERN_WARNING "cciss: cmd %p reports "
1728 "abort failed\n", c);
1729 return_status = IO_ERROR;
1730 break;
1731 case CMD_UNSOLICITED_ABORT:
1732 printk(KERN_WARNING
1733 "cciss%d: unsolicited abort %p\n",
1734 ctlr, c);
1735 if (c->retry_count < MAX_CMD_RETRIES) {
1736 printk(KERN_WARNING
1737 "cciss%d: retrying %p\n",
1738 ctlr, c);
1739 c->retry_count++;
1740 /* erase the old error information */
1741 memset(c->err_info, 0,
1742 sizeof(ErrorInfo_struct));
1743 return_status = IO_OK;
1744 INIT_COMPLETION(wait);
1745 goto resend_cmd2;
1746 }
1747 return_status = IO_ERROR;
1748 break;
1749 default:
1750 printk(KERN_WARNING "cciss: cmd %p returned "
1751 "unknown status %x\n", c,
1752 c->err_info->CommandStatus);
1753 return_status = IO_ERROR;
1754 }
1755 }
1756 /* unlock the buffers from DMA */
1757 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1758 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1759 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1760 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1761 cmd_free(h, c, 0);
1762 return(return_status);
1763
1764 }
1765 static void cciss_geometry_inquiry(int ctlr, int logvol,
1766 int withirq, unsigned int total_size,
1767 unsigned int block_size, InquiryData_struct *inq_buff,
1768 drive_info_struct *drv)
1769 {
1770 int return_code;
1771 memset(inq_buff, 0, sizeof(InquiryData_struct));
1772 if (withirq)
1773 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1774 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1775 else
1776 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1777 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1778 if (return_code == IO_OK) {
1779 if(inq_buff->data_byte[8] == 0xFF) {
1780 printk(KERN_WARNING
1781 "cciss: reading geometry failed, volume "
1782 "does not support reading geometry\n");
1783 drv->block_size = block_size;
1784 drv->nr_blocks = total_size;
1785 drv->heads = 255;
1786 drv->sectors = 32; // Sectors per track
1787 drv->cylinders = total_size / 255 / 32;
1788 } else {
1789 unsigned int t;
1790
1791 drv->block_size = block_size;
1792 drv->nr_blocks = total_size;
1793 drv->heads = inq_buff->data_byte[6];
1794 drv->sectors = inq_buff->data_byte[7];
1795 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1796 drv->cylinders += inq_buff->data_byte[5];
1797 drv->raid_level = inq_buff->data_byte[8];
1798 t = drv->heads * drv->sectors;
1799 if (t > 1) {
1800 drv->cylinders = total_size/t;
1801 }
1802 }
1803 } else { /* Get geometry failed */
1804 printk(KERN_WARNING "cciss: reading geometry failed\n");
1805 }
1806 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1807 drv->heads, drv->sectors, drv->cylinders);
1808 }
1809 static void
1810 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1811 int withirq, unsigned int *total_size, unsigned int *block_size)
1812 {
1813 int return_code;
1814 memset(buf, 0, sizeof(*buf));
1815 if (withirq)
1816 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1817 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1818 else
1819 return_code = sendcmd(CCISS_READ_CAPACITY,
1820 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1821 if (return_code == IO_OK) {
1822 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1823 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1824 } else { /* read capacity command failed */
1825 printk(KERN_WARNING "cciss: read capacity failed\n");
1826 *total_size = 0;
1827 *block_size = BLOCK_SIZE;
1828 }
1829 printk(KERN_INFO " blocks= %u block_size= %d\n",
1830 *total_size, *block_size);
1831 return;
1832 }
1833
1834 static int cciss_revalidate(struct gendisk *disk)
1835 {
1836 ctlr_info_t *h = get_host(disk);
1837 drive_info_struct *drv = get_drv(disk);
1838 int logvol;
1839 int FOUND=0;
1840 unsigned int block_size;
1841 unsigned int total_size;
1842 ReadCapdata_struct *size_buff = NULL;
1843 InquiryData_struct *inq_buff = NULL;
1844
1845 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1846 {
1847 if(h->drv[logvol].LunID == drv->LunID) {
1848 FOUND=1;
1849 break;
1850 }
1851 }
1852
1853 if (!FOUND) return 1;
1854
1855 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1856 if (size_buff == NULL)
1857 {
1858 printk(KERN_WARNING "cciss: out of memory\n");
1859 return 1;
1860 }
1861 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1862 if (inq_buff == NULL)
1863 {
1864 printk(KERN_WARNING "cciss: out of memory\n");
1865 kfree(size_buff);
1866 return 1;
1867 }
1868
1869 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1870 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1871
1872 blk_queue_hardsect_size(drv->queue, drv->block_size);
1873 set_capacity(disk, drv->nr_blocks);
1874
1875 kfree(size_buff);
1876 kfree(inq_buff);
1877 return 0;
1878 }
1879
1880 /*
1881 * Wait polling for a command to complete.
1882 * The memory mapped FIFO is polled for the completion.
1883 * Used only at init time, interrupts from the HBA are disabled.
1884 */
1885 static unsigned long pollcomplete(int ctlr)
1886 {
1887 unsigned long done;
1888 int i;
1889
1890 /* Wait (up to 20 seconds) for a command to complete */
1891
1892 for (i = 20 * HZ; i > 0; i--) {
1893 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1894 if (done == FIFO_EMPTY)
1895 schedule_timeout_uninterruptible(1);
1896 else
1897 return (done);
1898 }
1899 /* Invalid address to tell caller we ran out of time */
1900 return 1;
1901 }
1902
1903 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1904 {
1905 /* We get in here if sendcmd() is polling for completions
1906 and gets some command back that it wasn't expecting --
1907 something other than that which it just sent down.
1908 Ordinarily, that shouldn't happen, but it can happen when
1909 the scsi tape stuff gets into error handling mode, and
1910 starts using sendcmd() to try to abort commands and
1911 reset tape drives. In that case, sendcmd may pick up
1912 completions of commands that were sent to logical drives
1913 through the block i/o system, or cciss ioctls completing, etc.
1914 In that case, we need to save those completions for later
1915 processing by the interrupt handler.
1916 */
1917
1918 #ifdef CONFIG_CISS_SCSI_TAPE
1919 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1920
1921 /* If it's not the scsi tape stuff doing error handling, (abort */
1922 /* or reset) then we don't expect anything weird. */
1923 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1924 #endif
1925 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1926 "Invalid command list address returned! (%lx)\n",
1927 ctlr, complete);
1928 /* not much we can do. */
1929 #ifdef CONFIG_CISS_SCSI_TAPE
1930 return 1;
1931 }
1932
1933 /* We've sent down an abort or reset, but something else
1934 has completed */
1935 if (srl->ncompletions >= (NR_CMDS + 2)) {
1936 /* Uh oh. No room to save it for later... */
1937 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1938 "reject list overflow, command lost!\n", ctlr);
1939 return 1;
1940 }
1941 /* Save it for later */
1942 srl->complete[srl->ncompletions] = complete;
1943 srl->ncompletions++;
1944 #endif
1945 return 0;
1946 }
1947
1948 /*
1949 * Send a command to the controller, and wait for it to complete.
1950 * Only used at init time.
1951 */
1952 static int sendcmd(
1953 __u8 cmd,
1954 int ctlr,
1955 void *buff,
1956 size_t size,
1957 unsigned int use_unit_num, /* 0: address the controller,
1958 1: address logical volume log_unit,
1959 2: periph device address is scsi3addr */
1960 unsigned int log_unit,
1961 __u8 page_code,
1962 unsigned char *scsi3addr,
1963 int cmd_type)
1964 {
1965 CommandList_struct *c;
1966 int i;
1967 unsigned long complete;
1968 ctlr_info_t *info_p= hba[ctlr];
1969 u64bit buff_dma_handle;
1970 int status, done = 0;
1971
1972 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1973 printk(KERN_WARNING "cciss: unable to get memory");
1974 return(IO_ERROR);
1975 }
1976 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1977 log_unit, page_code, scsi3addr, cmd_type);
1978 if (status != IO_OK) {
1979 cmd_free(info_p, c, 1);
1980 return status;
1981 }
1982 resend_cmd1:
1983 /*
1984 * Disable interrupt
1985 */
1986 #ifdef CCISS_DEBUG
1987 printk(KERN_DEBUG "cciss: turning intr off\n");
1988 #endif /* CCISS_DEBUG */
1989 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1990
1991 /* Make sure there is room in the command FIFO */
1992 /* Actually it should be completely empty at this time */
1993 /* unless we are in here doing error handling for the scsi */
1994 /* tape side of the driver. */
1995 for (i = 200000; i > 0; i--)
1996 {
1997 /* if fifo isn't full go */
1998 if (!(info_p->access.fifo_full(info_p)))
1999 {
2000
2001 break;
2002 }
2003 udelay(10);
2004 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2005 " waiting!\n", ctlr);
2006 }
2007 /*
2008 * Send the cmd
2009 */
2010 info_p->access.submit_command(info_p, c);
2011 done = 0;
2012 do {
2013 complete = pollcomplete(ctlr);
2014
2015 #ifdef CCISS_DEBUG
2016 printk(KERN_DEBUG "cciss: command completed\n");
2017 #endif /* CCISS_DEBUG */
2018
2019 if (complete == 1) {
2020 printk( KERN_WARNING
2021 "cciss cciss%d: SendCmd Timeout out, "
2022 "No command list address returned!\n",
2023 ctlr);
2024 status = IO_ERROR;
2025 done = 1;
2026 break;
2027 }
2028
2029 /* This will need to change for direct lookup completions */
2030 if ( (complete & CISS_ERROR_BIT)
2031 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2032 {
2033 /* if data overrun or underun on Report command
2034 ignore it
2035 */
2036 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2037 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2038 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2039 ((c->err_info->CommandStatus ==
2040 CMD_DATA_OVERRUN) ||
2041 (c->err_info->CommandStatus ==
2042 CMD_DATA_UNDERRUN)
2043 ))
2044 {
2045 complete = c->busaddr;
2046 } else {
2047 if (c->err_info->CommandStatus ==
2048 CMD_UNSOLICITED_ABORT) {
2049 printk(KERN_WARNING "cciss%d: "
2050 "unsolicited abort %p\n",
2051 ctlr, c);
2052 if (c->retry_count < MAX_CMD_RETRIES) {
2053 printk(KERN_WARNING
2054 "cciss%d: retrying %p\n",
2055 ctlr, c);
2056 c->retry_count++;
2057 /* erase the old error */
2058 /* information */
2059 memset(c->err_info, 0,
2060 sizeof(ErrorInfo_struct));
2061 goto resend_cmd1;
2062 } else {
2063 printk(KERN_WARNING
2064 "cciss%d: retried %p too "
2065 "many times\n", ctlr, c);
2066 status = IO_ERROR;
2067 goto cleanup1;
2068 }
2069 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2070 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2071 status = IO_ERROR;
2072 goto cleanup1;
2073 }
2074 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2075 " Error %x \n", ctlr,
2076 c->err_info->CommandStatus);
2077 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2078 " offensive info\n"
2079 " size %x\n num %x value %x\n", ctlr,
2080 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2081 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2082 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2083 status = IO_ERROR;
2084 goto cleanup1;
2085 }
2086 }
2087 /* This will need changing for direct lookup completions */
2088 if (complete != c->busaddr) {
2089 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2090 BUG(); /* we are pretty much hosed if we get here. */
2091 }
2092 continue;
2093 } else
2094 done = 1;
2095 } while (!done);
2096
2097 cleanup1:
2098 /* unlock the data buffer from DMA */
2099 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2100 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2101 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2102 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2103 #ifdef CONFIG_CISS_SCSI_TAPE
2104 /* if we saved some commands for later, process them now. */
2105 if (info_p->scsi_rejects.ncompletions > 0)
2106 do_cciss_intr(0, info_p, NULL);
2107 #endif
2108 cmd_free(info_p, c, 1);
2109 return (status);
2110 }
2111 /*
2112 * Map (physical) PCI mem into (virtual) kernel space
2113 */
2114 static void __iomem *remap_pci_mem(ulong base, ulong size)
2115 {
2116 ulong page_base = ((ulong) base) & PAGE_MASK;
2117 ulong page_offs = ((ulong) base) - page_base;
2118 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2119
2120 return page_remapped ? (page_remapped + page_offs) : NULL;
2121 }
2122
2123 /*
2124 * Takes jobs of the Q and sends them to the hardware, then puts it on
2125 * the Q to wait for completion.
2126 */
2127 static void start_io( ctlr_info_t *h)
2128 {
2129 CommandList_struct *c;
2130
2131 while(( c = h->reqQ) != NULL )
2132 {
2133 /* can't do anything if fifo is full */
2134 if ((h->access.fifo_full(h))) {
2135 printk(KERN_WARNING "cciss: fifo full\n");
2136 break;
2137 }
2138
2139 /* Get the frist entry from the Request Q */
2140 removeQ(&(h->reqQ), c);
2141 h->Qdepth--;
2142
2143 /* Tell the controller execute command */
2144 h->access.submit_command(h, c);
2145
2146 /* Put job onto the completed Q */
2147 addQ (&(h->cmpQ), c);
2148 }
2149 }
2150
2151 static inline void complete_buffers(struct bio *bio, int status)
2152 {
2153 while (bio) {
2154 struct bio *xbh = bio->bi_next;
2155 int nr_sectors = bio_sectors(bio);
2156
2157 bio->bi_next = NULL;
2158 blk_finished_io(len);
2159 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
2160 bio = xbh;
2161 }
2162
2163 }
2164 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2165 /* Zeros out the error record and then resends the command back */
2166 /* to the controller */
2167 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2168 {
2169 /* erase the old error information */
2170 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2171
2172 /* add it to software queue and then send it to the controller */
2173 addQ(&(h->reqQ),c);
2174 h->Qdepth++;
2175 if(h->Qdepth > h->maxQsinceinit)
2176 h->maxQsinceinit = h->Qdepth;
2177
2178 start_io(h);
2179 }
2180 /* checks the status of the job and calls complete buffers to mark all
2181 * buffers for the completed job.
2182 */
2183 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2184 int timeout)
2185 {
2186 int status = 1;
2187 int i;
2188 int retry_cmd = 0;
2189 u64bit temp64;
2190
2191 if (timeout)
2192 status = 0;
2193
2194 if(cmd->err_info->CommandStatus != 0)
2195 { /* an error has occurred */
2196 switch(cmd->err_info->CommandStatus)
2197 {
2198 unsigned char sense_key;
2199 case CMD_TARGET_STATUS:
2200 status = 0;
2201
2202 if( cmd->err_info->ScsiStatus == 0x02)
2203 {
2204 printk(KERN_WARNING "cciss: cmd %p "
2205 "has CHECK CONDITION "
2206 " byte 2 = 0x%x\n", cmd,
2207 cmd->err_info->SenseInfo[2]
2208 );
2209 /* check the sense key */
2210 sense_key = 0xf &
2211 cmd->err_info->SenseInfo[2];
2212 /* no status or recovered error */
2213 if((sense_key == 0x0) ||
2214 (sense_key == 0x1))
2215 {
2216 status = 1;
2217 }
2218 } else
2219 {
2220 printk(KERN_WARNING "cciss: cmd %p "
2221 "has SCSI Status 0x%x\n",
2222 cmd, cmd->err_info->ScsiStatus);
2223 }
2224 break;
2225 case CMD_DATA_UNDERRUN:
2226 printk(KERN_WARNING "cciss: cmd %p has"
2227 " completed with data underrun "
2228 "reported\n", cmd);
2229 break;
2230 case CMD_DATA_OVERRUN:
2231 printk(KERN_WARNING "cciss: cmd %p has"
2232 " completed with data overrun "
2233 "reported\n", cmd);
2234 break;
2235 case CMD_INVALID:
2236 printk(KERN_WARNING "cciss: cmd %p is "
2237 "reported invalid\n", cmd);
2238 status = 0;
2239 break;
2240 case CMD_PROTOCOL_ERR:
2241 printk(KERN_WARNING "cciss: cmd %p has "
2242 "protocol error \n", cmd);
2243 status = 0;
2244 break;
2245 case CMD_HARDWARE_ERR:
2246 printk(KERN_WARNING "cciss: cmd %p had "
2247 " hardware error\n", cmd);
2248 status = 0;
2249 break;
2250 case CMD_CONNECTION_LOST:
2251 printk(KERN_WARNING "cciss: cmd %p had "
2252 "connection lost\n", cmd);
2253 status=0;
2254 break;
2255 case CMD_ABORTED:
2256 printk(KERN_WARNING "cciss: cmd %p was "
2257 "aborted\n", cmd);
2258 status=0;
2259 break;
2260 case CMD_ABORT_FAILED:
2261 printk(KERN_WARNING "cciss: cmd %p reports "
2262 "abort failed\n", cmd);
2263 status=0;
2264 break;
2265 case CMD_UNSOLICITED_ABORT:
2266 printk(KERN_WARNING "cciss%d: unsolicited "
2267 "abort %p\n", h->ctlr, cmd);
2268 if (cmd->retry_count < MAX_CMD_RETRIES) {
2269 retry_cmd=1;
2270 printk(KERN_WARNING
2271 "cciss%d: retrying %p\n",
2272 h->ctlr, cmd);
2273 cmd->retry_count++;
2274 } else
2275 printk(KERN_WARNING
2276 "cciss%d: %p retried too "
2277 "many times\n", h->ctlr, cmd);
2278 status=0;
2279 break;
2280 case CMD_TIMEOUT:
2281 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2282 cmd);
2283 status=0;
2284 break;
2285 default:
2286 printk(KERN_WARNING "cciss: cmd %p returned "
2287 "unknown status %x\n", cmd,
2288 cmd->err_info->CommandStatus);
2289 status=0;
2290 }
2291 }
2292 /* We need to return this command */
2293 if(retry_cmd) {
2294 resend_cciss_cmd(h,cmd);
2295 return;
2296 }
2297 /* command did not need to be retried */
2298 /* unmap the DMA mapping for all the scatter gather elements */
2299 for(i=0; i<cmd->Header.SGList; i++) {
2300 temp64.val32.lower = cmd->SG[i].Addr.lower;
2301 temp64.val32.upper = cmd->SG[i].Addr.upper;
2302 pci_unmap_page(hba[cmd->ctlr]->pdev,
2303 temp64.val, cmd->SG[i].Len,
2304 (cmd->Request.Type.Direction == XFER_READ) ?
2305 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2306 }
2307 complete_buffers(cmd->rq->bio, status);
2308
2309 #ifdef CCISS_DEBUG
2310 printk("Done with %p\n", cmd->rq);
2311 #endif /* CCISS_DEBUG */
2312
2313 end_that_request_last(cmd->rq);
2314 cmd_free(h,cmd,1);
2315 }
2316
2317 /*
2318 * Get a request and submit it to the controller.
2319 */
2320 static void do_cciss_request(request_queue_t *q)
2321 {
2322 ctlr_info_t *h= q->queuedata;
2323 CommandList_struct *c;
2324 int start_blk, seg;
2325 struct request *creq;
2326 u64bit temp64;
2327 struct scatterlist tmp_sg[MAXSGENTRIES];
2328 drive_info_struct *drv;
2329 int i, dir;
2330
2331 /* We call start_io here in case there is a command waiting on the
2332 * queue that has not been sent.
2333 */
2334 if (blk_queue_plugged(q))
2335 goto startio;
2336
2337 queue:
2338 creq = elv_next_request(q);
2339 if (!creq)
2340 goto startio;
2341
2342 if (creq->nr_phys_segments > MAXSGENTRIES)
2343 BUG();
2344
2345 if (( c = cmd_alloc(h, 1)) == NULL)
2346 goto full;
2347
2348 blkdev_dequeue_request(creq);
2349
2350 spin_unlock_irq(q->queue_lock);
2351
2352 c->cmd_type = CMD_RWREQ;
2353 c->rq = creq;
2354
2355 /* fill in the request */
2356 drv = creq->rq_disk->private_data;
2357 c->Header.ReplyQueue = 0; // unused in simple mode
2358 /* got command from pool, so use the command block index instead */
2359 /* for direct lookups. */
2360 /* The first 2 bits are reserved for controller error reporting. */
2361 c->Header.Tag.lower = (c->cmdindex << 3);
2362 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2363 c->Header.LUN.LogDev.VolId= drv->LunID;
2364 c->Header.LUN.LogDev.Mode = 1;
2365 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2366 c->Request.Type.Type = TYPE_CMD; // It is a command.
2367 c->Request.Type.Attribute = ATTR_SIMPLE;
2368 c->Request.Type.Direction =
2369 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2370 c->Request.Timeout = 0; // Don't time out
2371 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2372 start_blk = creq->sector;
2373 #ifdef CCISS_DEBUG
2374 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2375 (int) creq->nr_sectors);
2376 #endif /* CCISS_DEBUG */
2377
2378 seg = blk_rq_map_sg(q, creq, tmp_sg);
2379
2380 /* get the DMA records for the setup */
2381 if (c->Request.Type.Direction == XFER_READ)
2382 dir = PCI_DMA_FROMDEVICE;
2383 else
2384 dir = PCI_DMA_TODEVICE;
2385
2386 for (i=0; i<seg; i++)
2387 {
2388 c->SG[i].Len = tmp_sg[i].length;
2389 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2390 tmp_sg[i].offset, tmp_sg[i].length,
2391 dir);
2392 c->SG[i].Addr.lower = temp64.val32.lower;
2393 c->SG[i].Addr.upper = temp64.val32.upper;
2394 c->SG[i].Ext = 0; // we are not chaining
2395 }
2396 /* track how many SG entries we are using */
2397 if( seg > h->maxSG)
2398 h->maxSG = seg;
2399
2400 #ifdef CCISS_DEBUG
2401 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2402 #endif /* CCISS_DEBUG */
2403
2404 c->Header.SGList = c->Header.SGTotal = seg;
2405 c->Request.CDB[1]= 0;
2406 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2407 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2408 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2409 c->Request.CDB[5]= start_blk & 0xff;
2410 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2411 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2412 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2413 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2414
2415 spin_lock_irq(q->queue_lock);
2416
2417 addQ(&(h->reqQ),c);
2418 h->Qdepth++;
2419 if(h->Qdepth > h->maxQsinceinit)
2420 h->maxQsinceinit = h->Qdepth;
2421
2422 goto queue;
2423 full:
2424 blk_stop_queue(q);
2425 startio:
2426 /* We will already have the driver lock here so not need
2427 * to lock it.
2428 */
2429 start_io(h);
2430 }
2431
2432 static inline unsigned long get_next_completion(ctlr_info_t *h)
2433 {
2434 #ifdef CONFIG_CISS_SCSI_TAPE
2435 /* Any rejects from sendcmd() lying around? Process them first */
2436 if (h->scsi_rejects.ncompletions == 0)
2437 return h->access.command_completed(h);
2438 else {
2439 struct sendcmd_reject_list *srl;
2440 int n;
2441 srl = &h->scsi_rejects;
2442 n = --srl->ncompletions;
2443 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2444 printk("p");
2445 return srl->complete[n];
2446 }
2447 #else
2448 return h->access.command_completed(h);
2449 #endif
2450 }
2451
2452 static inline int interrupt_pending(ctlr_info_t *h)
2453 {
2454 #ifdef CONFIG_CISS_SCSI_TAPE
2455 return ( h->access.intr_pending(h)
2456 || (h->scsi_rejects.ncompletions > 0));
2457 #else
2458 return h->access.intr_pending(h);
2459 #endif
2460 }
2461
2462 static inline long interrupt_not_for_us(ctlr_info_t *h)
2463 {
2464 #ifdef CONFIG_CISS_SCSI_TAPE
2465 return (((h->access.intr_pending(h) == 0) ||
2466 (h->interrupts_enabled == 0))
2467 && (h->scsi_rejects.ncompletions == 0));
2468 #else
2469 return (((h->access.intr_pending(h) == 0) ||
2470 (h->interrupts_enabled == 0)));
2471 #endif
2472 }
2473
2474 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2475 {
2476 ctlr_info_t *h = dev_id;
2477 CommandList_struct *c;
2478 unsigned long flags;
2479 __u32 a, a1, a2;
2480 int j;
2481 int start_queue = h->next_to_run;
2482
2483 if (interrupt_not_for_us(h))
2484 return IRQ_NONE;
2485 /*
2486 * If there are completed commands in the completion queue,
2487 * we had better do something about it.
2488 */
2489 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2490 while (interrupt_pending(h)) {
2491 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2492 a1 = a;
2493 if ((a & 0x04)) {
2494 a2 = (a >> 3);
2495 if (a2 >= NR_CMDS) {
2496 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2497 fail_all_cmds(h->ctlr);
2498 return IRQ_HANDLED;
2499 }
2500
2501 c = h->cmd_pool + a2;
2502 a = c->busaddr;
2503
2504 } else {
2505 a &= ~3;
2506 if ((c = h->cmpQ) == NULL) {
2507 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2508 continue;
2509 }
2510 while(c->busaddr != a) {
2511 c = c->next;
2512 if (c == h->cmpQ)
2513 break;
2514 }
2515 }
2516 /*
2517 * If we've found the command, take it off the
2518 * completion Q and free it
2519 */
2520 if (c->busaddr == a) {
2521 removeQ(&h->cmpQ, c);
2522 if (c->cmd_type == CMD_RWREQ) {
2523 complete_command(h, c, 0);
2524 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2525 complete(c->waiting);
2526 }
2527 # ifdef CONFIG_CISS_SCSI_TAPE
2528 else if (c->cmd_type == CMD_SCSI)
2529 complete_scsi_command(c, 0, a1);
2530 # endif
2531 continue;
2532 }
2533 }
2534 }
2535
2536 /* check to see if we have maxed out the number of commands that can
2537 * be placed on the queue. If so then exit. We do this check here
2538 * in case the interrupt we serviced was from an ioctl and did not
2539 * free any new commands.
2540 */
2541 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2542 goto cleanup;
2543
2544 /* We have room on the queue for more commands. Now we need to queue
2545 * them up. We will also keep track of the next queue to run so
2546 * that every queue gets a chance to be started first.
2547 */
2548 for (j=0; j < h->highest_lun + 1; j++){
2549 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2550 /* make sure the disk has been added and the drive is real
2551 * because this can be called from the middle of init_one.
2552 */
2553 if(!(h->drv[curr_queue].queue) ||
2554 !(h->drv[curr_queue].heads))
2555 continue;
2556 blk_start_queue(h->gendisk[curr_queue]->queue);
2557
2558 /* check to see if we have maxed out the number of commands
2559 * that can be placed on the queue.
2560 */
2561 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2562 {
2563 if (curr_queue == start_queue){
2564 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2565 goto cleanup;
2566 } else {
2567 h->next_to_run = curr_queue;
2568 goto cleanup;
2569 }
2570 } else {
2571 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2572 }
2573 }
2574
2575 cleanup:
2576 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2577 return IRQ_HANDLED;
2578 }
2579 /*
2580 * We cannot read the structure directly, for portablity we must use
2581 * the io functions.
2582 * This is for debug only.
2583 */
2584 #ifdef CCISS_DEBUG
2585 static void print_cfg_table( CfgTable_struct *tb)
2586 {
2587 int i;
2588 char temp_name[17];
2589
2590 printk("Controller Configuration information\n");
2591 printk("------------------------------------\n");
2592 for(i=0;i<4;i++)
2593 temp_name[i] = readb(&(tb->Signature[i]));
2594 temp_name[4]='\0';
2595 printk(" Signature = %s\n", temp_name);
2596 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2597 printk(" Transport methods supported = 0x%x\n",
2598 readl(&(tb-> TransportSupport)));
2599 printk(" Transport methods active = 0x%x\n",
2600 readl(&(tb->TransportActive)));
2601 printk(" Requested transport Method = 0x%x\n",
2602 readl(&(tb->HostWrite.TransportRequest)));
2603 printk(" Coalese Interrupt Delay = 0x%x\n",
2604 readl(&(tb->HostWrite.CoalIntDelay)));
2605 printk(" Coalese Interrupt Count = 0x%x\n",
2606 readl(&(tb->HostWrite.CoalIntCount)));
2607 printk(" Max outstanding commands = 0x%d\n",
2608 readl(&(tb->CmdsOutMax)));
2609 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2610 for(i=0;i<16;i++)
2611 temp_name[i] = readb(&(tb->ServerName[i]));
2612 temp_name[16] = '\0';
2613 printk(" Server Name = %s\n", temp_name);
2614 printk(" Heartbeat Counter = 0x%x\n\n\n",
2615 readl(&(tb->HeartBeat)));
2616 }
2617 #endif /* CCISS_DEBUG */
2618
2619 static void release_io_mem(ctlr_info_t *c)
2620 {
2621 /* if IO mem was not protected do nothing */
2622 if( c->io_mem_addr == 0)
2623 return;
2624 release_region(c->io_mem_addr, c->io_mem_length);
2625 c->io_mem_addr = 0;
2626 c->io_mem_length = 0;
2627 }
2628
2629 static int find_PCI_BAR_index(struct pci_dev *pdev,
2630 unsigned long pci_bar_addr)
2631 {
2632 int i, offset, mem_type, bar_type;
2633 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2634 return 0;
2635 offset = 0;
2636 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2637 bar_type = pci_resource_flags(pdev, i) &
2638 PCI_BASE_ADDRESS_SPACE;
2639 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2640 offset += 4;
2641 else {
2642 mem_type = pci_resource_flags(pdev, i) &
2643 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2644 switch (mem_type) {
2645 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2646 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2647 offset += 4; /* 32 bit */
2648 break;
2649 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2650 offset += 8;
2651 break;
2652 default: /* reserved in PCI 2.2 */
2653 printk(KERN_WARNING "Base address is invalid\n");
2654 return -1;
2655 break;
2656 }
2657 }
2658 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2659 return i+1;
2660 }
2661 return -1;
2662 }
2663
2664 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2665 {
2666 ushort subsystem_vendor_id, subsystem_device_id, command;
2667 __u32 board_id, scratchpad = 0;
2668 __u64 cfg_offset;
2669 __u32 cfg_base_addr;
2670 __u64 cfg_base_addr_index;
2671 int i;
2672
2673 /* check to see if controller has been disabled */
2674 /* BEFORE trying to enable it */
2675 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2676 if(!(command & 0x02))
2677 {
2678 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2679 return(-1);
2680 }
2681
2682 if (pci_enable_device(pdev))
2683 {
2684 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2685 return( -1);
2686 }
2687
2688 subsystem_vendor_id = pdev->subsystem_vendor;
2689 subsystem_device_id = pdev->subsystem_device;
2690 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2691 subsystem_vendor_id);
2692
2693 /* search for our IO range so we can protect it */
2694 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2695 {
2696 /* is this an IO range */
2697 if( pci_resource_flags(pdev, i) & 0x01 ) {
2698 c->io_mem_addr = pci_resource_start(pdev, i);
2699 c->io_mem_length = pci_resource_end(pdev, i) -
2700 pci_resource_start(pdev, i) +1;
2701 #ifdef CCISS_DEBUG
2702 printk("IO value found base_addr[%d] %lx %lx\n", i,
2703 c->io_mem_addr, c->io_mem_length);
2704 #endif /* CCISS_DEBUG */
2705 /* register the IO range */
2706 if(!request_region( c->io_mem_addr,
2707 c->io_mem_length, "cciss"))
2708 {
2709 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2710 c->io_mem_addr, c->io_mem_length);
2711 c->io_mem_addr= 0;
2712 c->io_mem_length = 0;
2713 }
2714 break;
2715 }
2716 }
2717
2718 #ifdef CCISS_DEBUG
2719 printk("command = %x\n", command);
2720 printk("irq = %x\n", pdev->irq);
2721 printk("board_id = %x\n", board_id);
2722 #endif /* CCISS_DEBUG */
2723
2724 c->intr = pdev->irq;
2725
2726 /*
2727 * Memory base addr is first addr , the second points to the config
2728 * table
2729 */
2730
2731 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2732 #ifdef CCISS_DEBUG
2733 printk("address 0 = %x\n", c->paddr);
2734 #endif /* CCISS_DEBUG */
2735 c->vaddr = remap_pci_mem(c->paddr, 200);
2736
2737 /* Wait for the board to become ready. (PCI hotplug needs this.)
2738 * We poll for up to 120 secs, once per 100ms. */
2739 for (i=0; i < 1200; i++) {
2740 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2741 if (scratchpad == CCISS_FIRMWARE_READY)
2742 break;
2743 set_current_state(TASK_INTERRUPTIBLE);
2744 schedule_timeout(HZ / 10); /* wait 100ms */
2745 }
2746 if (scratchpad != CCISS_FIRMWARE_READY) {
2747 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2748 return -1;
2749 }
2750
2751 /* get the address index number */
2752 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2753 cfg_base_addr &= (__u32) 0x0000ffff;
2754 #ifdef CCISS_DEBUG
2755 printk("cfg base address = %x\n", cfg_base_addr);
2756 #endif /* CCISS_DEBUG */
2757 cfg_base_addr_index =
2758 find_PCI_BAR_index(pdev, cfg_base_addr);
2759 #ifdef CCISS_DEBUG
2760 printk("cfg base address index = %x\n", cfg_base_addr_index);
2761 #endif /* CCISS_DEBUG */
2762 if (cfg_base_addr_index == -1) {
2763 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2764 release_io_mem(c);
2765 return -1;
2766 }
2767
2768 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2769 #ifdef CCISS_DEBUG
2770 printk("cfg offset = %x\n", cfg_offset);
2771 #endif /* CCISS_DEBUG */
2772 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2773 cfg_base_addr_index) + cfg_offset,
2774 sizeof(CfgTable_struct));
2775 c->board_id = board_id;
2776
2777 #ifdef CCISS_DEBUG
2778 print_cfg_table(c->cfgtable);
2779 #endif /* CCISS_DEBUG */
2780
2781 for(i=0; i<NR_PRODUCTS; i++) {
2782 if (board_id == products[i].board_id) {
2783 c->product_name = products[i].product_name;
2784 c->access = *(products[i].access);
2785 break;
2786 }
2787 }
2788 if (i == NR_PRODUCTS) {
2789 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2790 " to access the Smart Array controller %08lx\n",
2791 (unsigned long)board_id);
2792 return -1;
2793 }
2794 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2795 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2796 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2797 (readb(&c->cfgtable->Signature[3]) != 'S') )
2798 {
2799 printk("Does not appear to be a valid CISS config table\n");
2800 return -1;
2801 }
2802
2803 #ifdef CONFIG_X86
2804 {
2805 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2806 __u32 prefetch;
2807 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2808 prefetch |= 0x100;
2809 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2810 }
2811 #endif
2812
2813 #ifdef CCISS_DEBUG
2814 printk("Trying to put board into Simple mode\n");
2815 #endif /* CCISS_DEBUG */
2816 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2817 /* Update the field, and then ring the doorbell */
2818 writel( CFGTBL_Trans_Simple,
2819 &(c->cfgtable->HostWrite.TransportRequest));
2820 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2821
2822 /* under certain very rare conditions, this can take awhile.
2823 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2824 * as we enter this code.) */
2825 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2826 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2827 break;
2828 /* delay and try again */
2829 set_current_state(TASK_INTERRUPTIBLE);
2830 schedule_timeout(10);
2831 }
2832
2833 #ifdef CCISS_DEBUG
2834 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2835 #endif /* CCISS_DEBUG */
2836 #ifdef CCISS_DEBUG
2837 print_cfg_table(c->cfgtable);
2838 #endif /* CCISS_DEBUG */
2839
2840 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2841 {
2842 printk(KERN_WARNING "cciss: unable to get board into"
2843 " simple mode\n");
2844 return -1;
2845 }
2846 return 0;
2847
2848 }
2849
2850 /*
2851 * Gets information about the local volumes attached to the controller.
2852 */
2853 static void cciss_getgeometry(int cntl_num)
2854 {
2855 ReportLunData_struct *ld_buff;
2856 ReadCapdata_struct *size_buff;
2857 InquiryData_struct *inq_buff;
2858 int return_code;
2859 int i;
2860 int listlength = 0;
2861 __u32 lunid = 0;
2862 int block_size;
2863 int total_size;
2864
2865 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2866 if (ld_buff == NULL)
2867 {
2868 printk(KERN_ERR "cciss: out of memory\n");
2869 return;
2870 }
2871 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2872 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2873 if (size_buff == NULL)
2874 {
2875 printk(KERN_ERR "cciss: out of memory\n");
2876 kfree(ld_buff);
2877 return;
2878 }
2879 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2880 if (inq_buff == NULL)
2881 {
2882 printk(KERN_ERR "cciss: out of memory\n");
2883 kfree(ld_buff);
2884 kfree(size_buff);
2885 return;
2886 }
2887 /* Get the firmware version */
2888 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2889 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2890 if (return_code == IO_OK)
2891 {
2892 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2893 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2894 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2895 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2896 } else /* send command failed */
2897 {
2898 printk(KERN_WARNING "cciss: unable to determine firmware"
2899 " version of controller\n");
2900 }
2901 /* Get the number of logical volumes */
2902 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2903 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2904
2905 if( return_code == IO_OK)
2906 {
2907 #ifdef CCISS_DEBUG
2908 printk("LUN Data\n--------------------------\n");
2909 #endif /* CCISS_DEBUG */
2910
2911 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2912 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2913 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2914 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2915 } else /* reading number of logical volumes failed */
2916 {
2917 printk(KERN_WARNING "cciss: report logical volume"
2918 " command failed\n");
2919 listlength = 0;
2920 }
2921 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2922 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
2923 {
2924 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
2925 CISS_MAX_LUN);
2926 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2927 }
2928 #ifdef CCISS_DEBUG
2929 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2930 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2931 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2932 #endif /* CCISS_DEBUG */
2933
2934 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2935 // for(i=0; i< hba[cntl_num]->num_luns; i++)
2936 for(i=0; i < CISS_MAX_LUN; i++)
2937 {
2938 if (i < hba[cntl_num]->num_luns){
2939 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
2940 << 24;
2941 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
2942 << 16;
2943 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
2944 << 8;
2945 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2946
2947 hba[cntl_num]->drv[i].LunID = lunid;
2948
2949
2950 #ifdef CCISS_DEBUG
2951 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2952 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
2953 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
2954 hba[cntl_num]->drv[i].LunID);
2955 #endif /* CCISS_DEBUG */
2956 cciss_read_capacity(cntl_num, i, size_buff, 0,
2957 &total_size, &block_size);
2958 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
2959 block_size, inq_buff, &hba[cntl_num]->drv[i]);
2960 } else {
2961 /* initialize raid_level to indicate a free space */
2962 hba[cntl_num]->drv[i].raid_level = -1;
2963 }
2964 }
2965 kfree(ld_buff);
2966 kfree(size_buff);
2967 kfree(inq_buff);
2968 }
2969
2970 /* Function to find the first free pointer into our hba[] array */
2971 /* Returns -1 if no free entries are left. */
2972 static int alloc_cciss_hba(void)
2973 {
2974 struct gendisk *disk[NWD];
2975 int i, n;
2976 for (n = 0; n < NWD; n++) {
2977 disk[n] = alloc_disk(1 << NWD_SHIFT);
2978 if (!disk[n])
2979 goto out;
2980 }
2981
2982 for(i=0; i< MAX_CTLR; i++) {
2983 if (!hba[i]) {
2984 ctlr_info_t *p;
2985 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2986 if (!p)
2987 goto Enomem;
2988 memset(p, 0, sizeof(ctlr_info_t));
2989 for (n = 0; n < NWD; n++)
2990 p->gendisk[n] = disk[n];
2991 hba[i] = p;
2992 return i;
2993 }
2994 }
2995 printk(KERN_WARNING "cciss: This driver supports a maximum"
2996 " of %d controllers.\n", MAX_CTLR);
2997 goto out;
2998 Enomem:
2999 printk(KERN_ERR "cciss: out of memory.\n");
3000 out:
3001 while (n--)
3002 put_disk(disk[n]);
3003 return -1;
3004 }
3005
3006 static void free_hba(int i)
3007 {
3008 ctlr_info_t *p = hba[i];
3009 int n;
3010
3011 hba[i] = NULL;
3012 for (n = 0; n < NWD; n++)
3013 put_disk(p->gendisk[n]);
3014 kfree(p);
3015 }
3016
3017 /*
3018 * This is it. Find all the controllers and register them. I really hate
3019 * stealing all these major device numbers.
3020 * returns the number of block devices registered.
3021 */
3022 static int __devinit cciss_init_one(struct pci_dev *pdev,
3023 const struct pci_device_id *ent)
3024 {
3025 request_queue_t *q;
3026 int i;
3027 int j;
3028 int rc;
3029
3030 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3031 " bus %d dev %d func %d\n",
3032 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3033 PCI_FUNC(pdev->devfn));
3034 i = alloc_cciss_hba();
3035 if(i < 0)
3036 return (-1);
3037
3038 hba[i]->busy_initializing = 1;
3039
3040 if (cciss_pci_init(hba[i], pdev) != 0)
3041 goto clean1;
3042
3043 sprintf(hba[i]->devname, "cciss%d", i);
3044 hba[i]->ctlr = i;
3045 hba[i]->pdev = pdev;
3046
3047 /* configure PCI DMA stuff */
3048 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3049 printk("cciss: using DAC cycles\n");
3050 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3051 printk("cciss: not using DAC cycles\n");
3052 else {
3053 printk("cciss: no suitable DMA available\n");
3054 goto clean1;
3055 }
3056
3057 /*
3058 * register with the major number, or get a dynamic major number
3059 * by passing 0 as argument. This is done for greater than
3060 * 8 controller support.
3061 */
3062 if (i < MAX_CTLR_ORIG)
3063 hba[i]->major = MAJOR_NR + i;
3064 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3065 if(rc == -EBUSY || rc == -EINVAL) {
3066 printk(KERN_ERR
3067 "cciss: Unable to get major number %d for %s "
3068 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3069 goto clean1;
3070 }
3071 else {
3072 if (i >= MAX_CTLR_ORIG)
3073 hba[i]->major = rc;
3074 }
3075
3076 /* make sure the board interrupts are off */
3077 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3078 if( request_irq(hba[i]->intr, do_cciss_intr,
3079 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3080 hba[i]->devname, hba[i])) {
3081 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3082 hba[i]->intr, hba[i]->devname);
3083 goto clean2;
3084 }
3085 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3086 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3087 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3088 &(hba[i]->cmd_pool_dhandle));
3089 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3090 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3091 &(hba[i]->errinfo_pool_dhandle));
3092 if((hba[i]->cmd_pool_bits == NULL)
3093 || (hba[i]->cmd_pool == NULL)
3094 || (hba[i]->errinfo_pool == NULL)) {
3095 printk( KERN_ERR "cciss: out of memory");
3096 goto clean4;
3097 }
3098 #ifdef CONFIG_CISS_SCSI_TAPE
3099 hba[i]->scsi_rejects.complete =
3100 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3101 (NR_CMDS + 5), GFP_KERNEL);
3102 if (hba[i]->scsi_rejects.complete == NULL) {
3103 printk( KERN_ERR "cciss: out of memory");
3104 goto clean4;
3105 }
3106 #endif
3107 spin_lock_init(&hba[i]->lock);
3108
3109 /* Initialize the pdev driver private data.
3110 have it point to hba[i]. */
3111 pci_set_drvdata(pdev, hba[i]);
3112 /* command and error info recs zeroed out before
3113 they are used */
3114 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3115
3116 #ifdef CCISS_DEBUG
3117 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3118 #endif /* CCISS_DEBUG */
3119
3120 cciss_getgeometry(i);
3121
3122 cciss_scsi_setup(i);
3123
3124 /* Turn the interrupts on so we can service requests */
3125 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3126
3127 cciss_procinit(i);
3128 hba[i]->busy_initializing = 0;
3129
3130 for(j=0; j < NWD; j++) { /* mfm */
3131 drive_info_struct *drv = &(hba[i]->drv[j]);
3132 struct gendisk *disk = hba[i]->gendisk[j];
3133
3134 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3135 if (!q) {
3136 printk(KERN_ERR
3137 "cciss: unable to allocate queue for disk %d\n",
3138 j);
3139 break;
3140 }
3141 drv->queue = q;
3142
3143 q->backing_dev_info.ra_pages = READ_AHEAD;
3144 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3145
3146 /* This is a hardware imposed limit. */
3147 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3148
3149 /* This is a limit in the driver and could be eliminated. */
3150 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3151
3152 blk_queue_max_sectors(q, 512);
3153
3154 q->queuedata = hba[i];
3155 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3156 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3157 disk->major = hba[i]->major;
3158 disk->first_minor = j << NWD_SHIFT;
3159 disk->fops = &cciss_fops;
3160 disk->queue = q;
3161 disk->private_data = drv;
3162 /* we must register the controller even if no disks exist */
3163 /* this is for the online array utilities */
3164 if(!drv->heads && j)
3165 continue;
3166 blk_queue_hardsect_size(q, drv->block_size);
3167 set_capacity(disk, drv->nr_blocks);
3168 add_disk(disk);
3169 }
3170
3171 return(1);
3172
3173 clean4:
3174 #ifdef CONFIG_CISS_SCSI_TAPE
3175 if(hba[i]->scsi_rejects.complete)
3176 kfree(hba[i]->scsi_rejects.complete);
3177 #endif
3178 kfree(hba[i]->cmd_pool_bits);
3179 if(hba[i]->cmd_pool)
3180 pci_free_consistent(hba[i]->pdev,
3181 NR_CMDS * sizeof(CommandList_struct),
3182 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3183 if(hba[i]->errinfo_pool)
3184 pci_free_consistent(hba[i]->pdev,
3185 NR_CMDS * sizeof( ErrorInfo_struct),
3186 hba[i]->errinfo_pool,
3187 hba[i]->errinfo_pool_dhandle);
3188 free_irq(hba[i]->intr, hba[i]);
3189 clean2:
3190 unregister_blkdev(hba[i]->major, hba[i]->devname);
3191 clean1:
3192 release_io_mem(hba[i]);
3193 free_hba(i);
3194 hba[i]->busy_initializing = 0;
3195 return(-1);
3196 }
3197
3198 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3199 {
3200 ctlr_info_t *tmp_ptr;
3201 int i, j;
3202 char flush_buf[4];
3203 int return_code;
3204
3205 if (pci_get_drvdata(pdev) == NULL)
3206 {
3207 printk( KERN_ERR "cciss: Unable to remove device \n");
3208 return;
3209 }
3210 tmp_ptr = pci_get_drvdata(pdev);
3211 i = tmp_ptr->ctlr;
3212 if (hba[i] == NULL)
3213 {
3214 printk(KERN_ERR "cciss: device appears to "
3215 "already be removed \n");
3216 return;
3217 }
3218 /* Turn board interrupts off and send the flush cache command */
3219 /* sendcmd will turn off interrupt, and send the flush...
3220 * To write all data in the battery backed cache to disks */
3221 memset(flush_buf, 0, 4);
3222 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3223 TYPE_CMD);
3224 if(return_code != IO_OK)
3225 {
3226 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3227 i);
3228 }
3229 free_irq(hba[i]->intr, hba[i]);
3230 pci_set_drvdata(pdev, NULL);
3231 iounmap(hba[i]->vaddr);
3232 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3233 unregister_blkdev(hba[i]->major, hba[i]->devname);
3234 remove_proc_entry(hba[i]->devname, proc_cciss);
3235
3236 /* remove it from the disk list */
3237 for (j = 0; j < NWD; j++) {
3238 struct gendisk *disk = hba[i]->gendisk[j];
3239 if (disk) {
3240 request_queue_t *q = disk->queue;
3241
3242 if (disk->flags & GENHD_FL_UP)
3243 del_gendisk(disk);
3244 if (q)
3245 blk_cleanup_queue(q);
3246 put_disk(disk);
3247 }
3248 }
3249
3250 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3251 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3252 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3253 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3254 kfree(hba[i]->cmd_pool_bits);
3255 #ifdef CONFIG_CISS_SCSI_TAPE
3256 kfree(hba[i]->scsi_rejects.complete);
3257 #endif
3258 release_io_mem(hba[i]);
3259 free_hba(i);
3260 }
3261
3262 static struct pci_driver cciss_pci_driver = {
3263 .name = "cciss",
3264 .probe = cciss_init_one,
3265 .remove = __devexit_p(cciss_remove_one),
3266 .id_table = cciss_pci_device_id, /* id_table */
3267 };
3268
3269 /*
3270 * This is it. Register the PCI driver information for the cards we control
3271 * the OS will call our registered routines when it finds one of our cards.
3272 */
3273 static int __init cciss_init(void)
3274 {
3275 printk(KERN_INFO DRIVER_NAME "\n");
3276
3277 /* Register for our PCI devices */
3278 return pci_module_init(&cciss_pci_driver);
3279 }
3280
3281 static void __exit cciss_cleanup(void)
3282 {
3283 int i;
3284
3285 pci_unregister_driver(&cciss_pci_driver);
3286 /* double check that all controller entrys have been removed */
3287 for (i=0; i< MAX_CTLR; i++)
3288 {
3289 if (hba[i] != NULL)
3290 {
3291 printk(KERN_WARNING "cciss: had to remove"
3292 " controller %d\n", i);
3293 cciss_remove_one(hba[i]->pdev);
3294 }
3295 }
3296 remove_proc_entry("cciss", proc_root_driver);
3297 }
3298
3299 static void fail_all_cmds(unsigned long ctlr)
3300 {
3301 /* If we get here, the board is apparently dead. */
3302 ctlr_info_t *h = hba[ctlr];
3303 CommandList_struct *c;
3304 unsigned long flags;
3305
3306 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3307 h->alive = 0; /* the controller apparently died... */
3308
3309 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3310
3311 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3312
3313 /* move everything off the request queue onto the completed queue */
3314 while( (c = h->reqQ) != NULL ) {
3315 removeQ(&(h->reqQ), c);
3316 h->Qdepth--;
3317 addQ (&(h->cmpQ), c);
3318 }
3319
3320 /* Now, fail everything on the completed queue with a HW error */
3321 while( (c = h->cmpQ) != NULL ) {
3322 removeQ(&h->cmpQ, c);
3323 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3324 if (c->cmd_type == CMD_RWREQ) {
3325 complete_command(h, c, 0);
3326 } else if (c->cmd_type == CMD_IOCTL_PEND)
3327 complete(c->waiting);
3328 #ifdef CONFIG_CISS_SCSI_TAPE
3329 else if (c->cmd_type == CMD_SCSI)
3330 complete_scsi_command(c, 0, 0);
3331 #endif
3332 }
3333 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3334 return;
3335 }
3336
3337 module_init(cciss_init);
3338 module_exit(cciss_cleanup);