Merge branch 'master'
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / cciss.c
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
48
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
52
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i");
58 MODULE_LICENSE("GPL");
59
60 #include "cciss_cmd.h"
61 #include "cciss.h"
62 #include <linux/cciss_ioctl.h>
63
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
67 0x0E11, 0x4070, 0, 0, 0},
68 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
69 0x0E11, 0x4080, 0, 0, 0},
70 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
71 0x0E11, 0x4082, 0, 0, 0},
72 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
73 0x0E11, 0x4083, 0, 0, 0},
74 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
75 0x0E11, 0x409A, 0, 0, 0},
76 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
77 0x0E11, 0x409B, 0, 0, 0},
78 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
79 0x0E11, 0x409C, 0, 0, 0},
80 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
81 0x0E11, 0x409D, 0, 0, 0},
82 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
102 {0,}
103 };
104 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
105
106 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
107
108 /* board_id = Subsystem Device ID & Vendor ID
109 * product = Marketing Name for the board
110 * access = Address of the struct of function pointers
111 */
112 static struct board_type products[] = {
113 { 0x40700E11, "Smart Array 5300", &SA5_access },
114 { 0x40800E11, "Smart Array 5i", &SA5B_access},
115 { 0x40820E11, "Smart Array 532", &SA5B_access},
116 { 0x40830E11, "Smart Array 5312", &SA5B_access},
117 { 0x409A0E11, "Smart Array 641", &SA5_access},
118 { 0x409B0E11, "Smart Array 642", &SA5_access},
119 { 0x409C0E11, "Smart Array 6400", &SA5_access},
120 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
121 { 0x40910E11, "Smart Array 6i", &SA5_access},
122 { 0x3225103C, "Smart Array P600", &SA5_access},
123 { 0x3223103C, "Smart Array P800", &SA5_access},
124 { 0x3234103C, "Smart Array P400", &SA5_access},
125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
131 };
132
133 /* How long to wait (in millesconds) for board to go into simple mode */
134 #define MAX_CONFIG_WAIT 30000
135 #define MAX_IOCTL_CONFIG_WAIT 1000
136
137 /*define how many times we will try a command because of bus resets */
138 #define MAX_CMD_RETRIES 3
139
140 #define READ_AHEAD 1024
141 #define NR_CMDS 384 /* #commands that can be outstanding */
142 #define MAX_CTLR 32
143
144 /* Originally cciss driver only supports 8 major numbers */
145 #define MAX_CTLR_ORIG 8
146
147
148 static ctlr_info_t *hba[MAX_CTLR];
149
150 static void do_cciss_request(request_queue_t *q);
151 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
152 static int cciss_open(struct inode *inode, struct file *filep);
153 static int cciss_release(struct inode *inode, struct file *filep);
154 static int cciss_ioctl(struct inode *inode, struct file *filep,
155 unsigned int cmd, unsigned long arg);
156
157 static int revalidate_allvol(ctlr_info_t *host);
158 static int cciss_revalidate(struct gendisk *disk);
159 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
160 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
161
162 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
163 int withirq, unsigned int *total_size, unsigned int *block_size);
164 static void cciss_geometry_inquiry(int ctlr, int logvol,
165 int withirq, unsigned int total_size,
166 unsigned int block_size, InquiryData_struct *inq_buff,
167 drive_info_struct *drv);
168 static void cciss_getgeometry(int cntl_num);
169
170 static void start_io( ctlr_info_t *h);
171 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
172 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
173 unsigned char *scsi3addr, int cmd_type);
174 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
175 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
176 int cmd_type);
177
178 static void fail_all_cmds(unsigned long ctlr);
179
180 #ifdef CONFIG_PROC_FS
181 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
182 int length, int *eof, void *data);
183 static void cciss_procinit(int i);
184 #else
185 static void cciss_procinit(int i) {}
186 #endif /* CONFIG_PROC_FS */
187
188 #ifdef CONFIG_COMPAT
189 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
190 #endif
191
192 static struct block_device_operations cciss_fops = {
193 .owner = THIS_MODULE,
194 .open = cciss_open,
195 .release = cciss_release,
196 .ioctl = cciss_ioctl,
197 #ifdef CONFIG_COMPAT
198 .compat_ioctl = cciss_compat_ioctl,
199 #endif
200 .revalidate_disk= cciss_revalidate,
201 };
202
203 /*
204 * Enqueuing and dequeuing functions for cmdlists.
205 */
206 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
207 {
208 if (*Qptr == NULL) {
209 *Qptr = c;
210 c->next = c->prev = c;
211 } else {
212 c->prev = (*Qptr)->prev;
213 c->next = (*Qptr);
214 (*Qptr)->prev->next = c;
215 (*Qptr)->prev = c;
216 }
217 }
218
219 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
220 CommandList_struct *c)
221 {
222 if (c && c->next != c) {
223 if (*Qptr == c) *Qptr = c->next;
224 c->prev->next = c->next;
225 c->next->prev = c->prev;
226 } else {
227 *Qptr = NULL;
228 }
229 return c;
230 }
231
232 #include "cciss_scsi.c" /* For SCSI tape support */
233
234 #ifdef CONFIG_PROC_FS
235
236 /*
237 * Report information about this controller.
238 */
239 #define ENG_GIG 1000000000
240 #define ENG_GIG_FACTOR (ENG_GIG/512)
241 #define RAID_UNKNOWN 6
242 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
243 "UNKNOWN"};
244
245 static struct proc_dir_entry *proc_cciss;
246
247 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
248 int length, int *eof, void *data)
249 {
250 off_t pos = 0;
251 off_t len = 0;
252 int size, i, ctlr;
253 ctlr_info_t *h = (ctlr_info_t*)data;
254 drive_info_struct *drv;
255 unsigned long flags;
256 sector_t vol_sz, vol_sz_frac;
257
258 ctlr = h->ctlr;
259
260 /* prevent displaying bogus info during configuration
261 * or deconfiguration of a logical volume
262 */
263 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
264 if (h->busy_configuring) {
265 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
266 return -EBUSY;
267 }
268 h->busy_configuring = 1;
269 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
270
271 size = sprintf(buffer, "%s: HP %s Controller\n"
272 "Board ID: 0x%08lx\n"
273 "Firmware Version: %c%c%c%c\n"
274 "IRQ: %d\n"
275 "Logical drives: %d\n"
276 "Current Q depth: %d\n"
277 "Current # commands on controller: %d\n"
278 "Max Q depth since init: %d\n"
279 "Max # commands on controller since init: %d\n"
280 "Max SG entries since init: %d\n\n",
281 h->devname,
282 h->product_name,
283 (unsigned long)h->board_id,
284 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
285 (unsigned int)h->intr,
286 h->num_luns,
287 h->Qdepth, h->commands_outstanding,
288 h->maxQsinceinit, h->max_outstanding, h->maxSG);
289
290 pos += size; len += size;
291 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
292 for(i=0; i<=h->highest_lun; i++) {
293
294 drv = &h->drv[i];
295 if (drv->heads == 0)
296 continue;
297
298 vol_sz = drv->nr_blocks;
299 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
300 vol_sz_frac *= 100;
301 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
302
303 if (drv->raid_level > 5)
304 drv->raid_level = RAID_UNKNOWN;
305 size = sprintf(buffer+len, "cciss/c%dd%d:"
306 "\t%4u.%02uGB\tRAID %s\n",
307 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
308 raid_label[drv->raid_level]);
309 pos += size; len += size;
310 }
311
312 *eof = 1;
313 *start = buffer+offset;
314 len -= offset;
315 if (len>length)
316 len = length;
317 h->busy_configuring = 0;
318 return len;
319 }
320
321 static int
322 cciss_proc_write(struct file *file, const char __user *buffer,
323 unsigned long count, void *data)
324 {
325 unsigned char cmd[80];
326 int len;
327 #ifdef CONFIG_CISS_SCSI_TAPE
328 ctlr_info_t *h = (ctlr_info_t *) data;
329 int rc;
330 #endif
331
332 if (count > sizeof(cmd)-1) return -EINVAL;
333 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
334 cmd[count] = '\0';
335 len = strlen(cmd); // above 3 lines ensure safety
336 if (len && cmd[len-1] == '\n')
337 cmd[--len] = '\0';
338 # ifdef CONFIG_CISS_SCSI_TAPE
339 if (strcmp("engage scsi", cmd)==0) {
340 rc = cciss_engage_scsi(h->ctlr);
341 if (rc != 0) return -rc;
342 return count;
343 }
344 /* might be nice to have "disengage" too, but it's not
345 safely possible. (only 1 module use count, lock issues.) */
346 # endif
347 return -EINVAL;
348 }
349
350 /*
351 * Get us a file in /proc/cciss that says something about each controller.
352 * Create /proc/cciss if it doesn't exist yet.
353 */
354 static void __devinit cciss_procinit(int i)
355 {
356 struct proc_dir_entry *pde;
357
358 if (proc_cciss == NULL) {
359 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 if (!proc_cciss)
361 return;
362 }
363
364 pde = create_proc_read_entry(hba[i]->devname,
365 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
366 proc_cciss, cciss_proc_get_info, hba[i]);
367 pde->write_proc = cciss_proc_write;
368 }
369 #endif /* CONFIG_PROC_FS */
370
371 /*
372 * For operations that cannot sleep, a command block is allocated at init,
373 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
374 * which ones are free or in use. For operations that can wait for kmalloc
375 * to possible sleep, this routine can be called with get_from_pool set to 0.
376 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
377 */
378 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
379 {
380 CommandList_struct *c;
381 int i;
382 u64bit temp64;
383 dma_addr_t cmd_dma_handle, err_dma_handle;
384
385 if (!get_from_pool)
386 {
387 c = (CommandList_struct *) pci_alloc_consistent(
388 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
389 if(c==NULL)
390 return NULL;
391 memset(c, 0, sizeof(CommandList_struct));
392
393 c->cmdindex = -1;
394
395 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
396 h->pdev, sizeof(ErrorInfo_struct),
397 &err_dma_handle);
398
399 if (c->err_info == NULL)
400 {
401 pci_free_consistent(h->pdev,
402 sizeof(CommandList_struct), c, cmd_dma_handle);
403 return NULL;
404 }
405 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
406 } else /* get it out of the controllers pool */
407 {
408 do {
409 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
410 if (i == NR_CMDS)
411 return NULL;
412 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
413 #ifdef CCISS_DEBUG
414 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
415 #endif
416 c = h->cmd_pool + i;
417 memset(c, 0, sizeof(CommandList_struct));
418 cmd_dma_handle = h->cmd_pool_dhandle
419 + i*sizeof(CommandList_struct);
420 c->err_info = h->errinfo_pool + i;
421 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
422 err_dma_handle = h->errinfo_pool_dhandle
423 + i*sizeof(ErrorInfo_struct);
424 h->nr_allocs++;
425
426 c->cmdindex = i;
427 }
428
429 c->busaddr = (__u32) cmd_dma_handle;
430 temp64.val = (__u64) err_dma_handle;
431 c->ErrDesc.Addr.lower = temp64.val32.lower;
432 c->ErrDesc.Addr.upper = temp64.val32.upper;
433 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
434
435 c->ctlr = h->ctlr;
436 return c;
437
438
439 }
440
441 /*
442 * Frees a command block that was previously allocated with cmd_alloc().
443 */
444 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
445 {
446 int i;
447 u64bit temp64;
448
449 if( !got_from_pool)
450 {
451 temp64.val32.lower = c->ErrDesc.Addr.lower;
452 temp64.val32.upper = c->ErrDesc.Addr.upper;
453 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
454 c->err_info, (dma_addr_t) temp64.val);
455 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
456 c, (dma_addr_t) c->busaddr);
457 } else
458 {
459 i = c - h->cmd_pool;
460 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
461 h->nr_frees++;
462 }
463 }
464
465 static inline ctlr_info_t *get_host(struct gendisk *disk)
466 {
467 return disk->queue->queuedata;
468 }
469
470 static inline drive_info_struct *get_drv(struct gendisk *disk)
471 {
472 return disk->private_data;
473 }
474
475 /*
476 * Open. Make sure the device is really there.
477 */
478 static int cciss_open(struct inode *inode, struct file *filep)
479 {
480 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
481 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
482
483 #ifdef CCISS_DEBUG
484 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
485 #endif /* CCISS_DEBUG */
486
487 if (host->busy_initializing || drv->busy_configuring)
488 return -EBUSY;
489 /*
490 * Root is allowed to open raw volume zero even if it's not configured
491 * so array config can still work. Root is also allowed to open any
492 * volume that has a LUN ID, so it can issue IOCTL to reread the
493 * disk information. I don't think I really like this
494 * but I'm already using way to many device nodes to claim another one
495 * for "raw controller".
496 */
497 if (drv->nr_blocks == 0) {
498 if (iminor(inode) != 0) { /* not node 0? */
499 /* if not node 0 make sure it is a partition = 0 */
500 if (iminor(inode) & 0x0f) {
501 return -ENXIO;
502 /* if it is, make sure we have a LUN ID */
503 } else if (drv->LunID == 0) {
504 return -ENXIO;
505 }
506 }
507 if (!capable(CAP_SYS_ADMIN))
508 return -EPERM;
509 }
510 drv->usage_count++;
511 host->usage_count++;
512 return 0;
513 }
514 /*
515 * Close. Sync first.
516 */
517 static int cciss_release(struct inode *inode, struct file *filep)
518 {
519 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
520 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
521
522 #ifdef CCISS_DEBUG
523 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
524 #endif /* CCISS_DEBUG */
525
526 drv->usage_count--;
527 host->usage_count--;
528 return 0;
529 }
530
531 #ifdef CONFIG_COMPAT
532
533 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
534 {
535 int ret;
536 lock_kernel();
537 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
538 unlock_kernel();
539 return ret;
540 }
541
542 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
543 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
544
545 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
546 {
547 switch (cmd) {
548 case CCISS_GETPCIINFO:
549 case CCISS_GETINTINFO:
550 case CCISS_SETINTINFO:
551 case CCISS_GETNODENAME:
552 case CCISS_SETNODENAME:
553 case CCISS_GETHEARTBEAT:
554 case CCISS_GETBUSTYPES:
555 case CCISS_GETFIRMVER:
556 case CCISS_GETDRIVVER:
557 case CCISS_REVALIDVOLS:
558 case CCISS_DEREGDISK:
559 case CCISS_REGNEWDISK:
560 case CCISS_REGNEWD:
561 case CCISS_RESCANDISK:
562 case CCISS_GETLUNINFO:
563 return do_ioctl(f, cmd, arg);
564
565 case CCISS_PASSTHRU32:
566 return cciss_ioctl32_passthru(f, cmd, arg);
567 case CCISS_BIG_PASSTHRU32:
568 return cciss_ioctl32_big_passthru(f, cmd, arg);
569
570 default:
571 return -ENOIOCTLCMD;
572 }
573 }
574
575 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
576 {
577 IOCTL32_Command_struct __user *arg32 =
578 (IOCTL32_Command_struct __user *) arg;
579 IOCTL_Command_struct arg64;
580 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
581 int err;
582 u32 cp;
583
584 err = 0;
585 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
586 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
587 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
588 err |= get_user(arg64.buf_size, &arg32->buf_size);
589 err |= get_user(cp, &arg32->buf);
590 arg64.buf = compat_ptr(cp);
591 err |= copy_to_user(p, &arg64, sizeof(arg64));
592
593 if (err)
594 return -EFAULT;
595
596 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
597 if (err)
598 return err;
599 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
600 if (err)
601 return -EFAULT;
602 return err;
603 }
604
605 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
606 {
607 BIG_IOCTL32_Command_struct __user *arg32 =
608 (BIG_IOCTL32_Command_struct __user *) arg;
609 BIG_IOCTL_Command_struct arg64;
610 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
611 int err;
612 u32 cp;
613
614 err = 0;
615 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
616 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
617 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
618 err |= get_user(arg64.buf_size, &arg32->buf_size);
619 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
620 err |= get_user(cp, &arg32->buf);
621 arg64.buf = compat_ptr(cp);
622 err |= copy_to_user(p, &arg64, sizeof(arg64));
623
624 if (err)
625 return -EFAULT;
626
627 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
628 if (err)
629 return err;
630 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
631 if (err)
632 return -EFAULT;
633 return err;
634 }
635 #endif
636 /*
637 * ioctl
638 */
639 static int cciss_ioctl(struct inode *inode, struct file *filep,
640 unsigned int cmd, unsigned long arg)
641 {
642 struct block_device *bdev = inode->i_bdev;
643 struct gendisk *disk = bdev->bd_disk;
644 ctlr_info_t *host = get_host(disk);
645 drive_info_struct *drv = get_drv(disk);
646 int ctlr = host->ctlr;
647 void __user *argp = (void __user *)arg;
648
649 #ifdef CCISS_DEBUG
650 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
651 #endif /* CCISS_DEBUG */
652
653 switch(cmd) {
654 case HDIO_GETGEO:
655 {
656 struct hd_geometry driver_geo;
657 if (drv->cylinders) {
658 driver_geo.heads = drv->heads;
659 driver_geo.sectors = drv->sectors;
660 driver_geo.cylinders = drv->cylinders;
661 } else
662 return -ENXIO;
663 driver_geo.start= get_start_sect(inode->i_bdev);
664 if (copy_to_user(argp, &driver_geo, sizeof(struct hd_geometry)))
665 return -EFAULT;
666 return(0);
667 }
668
669 case CCISS_GETPCIINFO:
670 {
671 cciss_pci_info_struct pciinfo;
672
673 if (!arg) return -EINVAL;
674 pciinfo.domain = pci_domain_nr(host->pdev->bus);
675 pciinfo.bus = host->pdev->bus->number;
676 pciinfo.dev_fn = host->pdev->devfn;
677 pciinfo.board_id = host->board_id;
678 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
679 return -EFAULT;
680 return(0);
681 }
682 case CCISS_GETINTINFO:
683 {
684 cciss_coalint_struct intinfo;
685 if (!arg) return -EINVAL;
686 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
687 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
688 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
689 return -EFAULT;
690 return(0);
691 }
692 case CCISS_SETINTINFO:
693 {
694 cciss_coalint_struct intinfo;
695 unsigned long flags;
696 int i;
697
698 if (!arg) return -EINVAL;
699 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
700 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
701 return -EFAULT;
702 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
703
704 {
705 // printk("cciss_ioctl: delay and count cannot be 0\n");
706 return( -EINVAL);
707 }
708 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
709 /* Update the field, and then ring the doorbell */
710 writel( intinfo.delay,
711 &(host->cfgtable->HostWrite.CoalIntDelay));
712 writel( intinfo.count,
713 &(host->cfgtable->HostWrite.CoalIntCount));
714 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
715
716 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
717 if (!(readl(host->vaddr + SA5_DOORBELL)
718 & CFGTBL_ChangeReq))
719 break;
720 /* delay and try again */
721 udelay(1000);
722 }
723 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
724 if (i >= MAX_IOCTL_CONFIG_WAIT)
725 return -EAGAIN;
726 return(0);
727 }
728 case CCISS_GETNODENAME:
729 {
730 NodeName_type NodeName;
731 int i;
732
733 if (!arg) return -EINVAL;
734 for(i=0;i<16;i++)
735 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
736 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
737 return -EFAULT;
738 return(0);
739 }
740 case CCISS_SETNODENAME:
741 {
742 NodeName_type NodeName;
743 unsigned long flags;
744 int i;
745
746 if (!arg) return -EINVAL;
747 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
748
749 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
750 return -EFAULT;
751
752 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
753
754 /* Update the field, and then ring the doorbell */
755 for(i=0;i<16;i++)
756 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
757
758 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
759
760 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
761 if (!(readl(host->vaddr + SA5_DOORBELL)
762 & CFGTBL_ChangeReq))
763 break;
764 /* delay and try again */
765 udelay(1000);
766 }
767 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
768 if (i >= MAX_IOCTL_CONFIG_WAIT)
769 return -EAGAIN;
770 return(0);
771 }
772
773 case CCISS_GETHEARTBEAT:
774 {
775 Heartbeat_type heartbeat;
776
777 if (!arg) return -EINVAL;
778 heartbeat = readl(&host->cfgtable->HeartBeat);
779 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
780 return -EFAULT;
781 return(0);
782 }
783 case CCISS_GETBUSTYPES:
784 {
785 BusTypes_type BusTypes;
786
787 if (!arg) return -EINVAL;
788 BusTypes = readl(&host->cfgtable->BusTypes);
789 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
790 return -EFAULT;
791 return(0);
792 }
793 case CCISS_GETFIRMVER:
794 {
795 FirmwareVer_type firmware;
796
797 if (!arg) return -EINVAL;
798 memcpy(firmware, host->firm_ver, 4);
799
800 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
801 return -EFAULT;
802 return(0);
803 }
804 case CCISS_GETDRIVVER:
805 {
806 DriverVer_type DriverVer = DRIVER_VERSION;
807
808 if (!arg) return -EINVAL;
809
810 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
811 return -EFAULT;
812 return(0);
813 }
814
815 case CCISS_REVALIDVOLS:
816 if (bdev != bdev->bd_contains || drv != host->drv)
817 return -ENXIO;
818 return revalidate_allvol(host);
819
820 case CCISS_GETLUNINFO: {
821 LogvolInfo_struct luninfo;
822
823 luninfo.LunID = drv->LunID;
824 luninfo.num_opens = drv->usage_count;
825 luninfo.num_parts = 0;
826 if (copy_to_user(argp, &luninfo,
827 sizeof(LogvolInfo_struct)))
828 return -EFAULT;
829 return(0);
830 }
831 case CCISS_DEREGDISK:
832 return rebuild_lun_table(host, disk);
833
834 case CCISS_REGNEWD:
835 return rebuild_lun_table(host, NULL);
836
837 case CCISS_PASSTHRU:
838 {
839 IOCTL_Command_struct iocommand;
840 CommandList_struct *c;
841 char *buff = NULL;
842 u64bit temp64;
843 unsigned long flags;
844 DECLARE_COMPLETION(wait);
845
846 if (!arg) return -EINVAL;
847
848 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
849
850 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
851 return -EFAULT;
852 if((iocommand.buf_size < 1) &&
853 (iocommand.Request.Type.Direction != XFER_NONE))
854 {
855 return -EINVAL;
856 }
857 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
858 /* Check kmalloc limits */
859 if(iocommand.buf_size > 128000)
860 return -EINVAL;
861 #endif
862 if(iocommand.buf_size > 0)
863 {
864 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
865 if( buff == NULL)
866 return -EFAULT;
867 }
868 if (iocommand.Request.Type.Direction == XFER_WRITE)
869 {
870 /* Copy the data into the buffer we created */
871 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
872 {
873 kfree(buff);
874 return -EFAULT;
875 }
876 } else {
877 memset(buff, 0, iocommand.buf_size);
878 }
879 if ((c = cmd_alloc(host , 0)) == NULL)
880 {
881 kfree(buff);
882 return -ENOMEM;
883 }
884 // Fill in the command type
885 c->cmd_type = CMD_IOCTL_PEND;
886 // Fill in Command Header
887 c->Header.ReplyQueue = 0; // unused in simple mode
888 if( iocommand.buf_size > 0) // buffer to fill
889 {
890 c->Header.SGList = 1;
891 c->Header.SGTotal= 1;
892 } else // no buffers to fill
893 {
894 c->Header.SGList = 0;
895 c->Header.SGTotal= 0;
896 }
897 c->Header.LUN = iocommand.LUN_info;
898 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
899
900 // Fill in Request block
901 c->Request = iocommand.Request;
902
903 // Fill in the scatter gather information
904 if (iocommand.buf_size > 0 )
905 {
906 temp64.val = pci_map_single( host->pdev, buff,
907 iocommand.buf_size,
908 PCI_DMA_BIDIRECTIONAL);
909 c->SG[0].Addr.lower = temp64.val32.lower;
910 c->SG[0].Addr.upper = temp64.val32.upper;
911 c->SG[0].Len = iocommand.buf_size;
912 c->SG[0].Ext = 0; // we are not chaining
913 }
914 c->waiting = &wait;
915
916 /* Put the request on the tail of the request queue */
917 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
918 addQ(&host->reqQ, c);
919 host->Qdepth++;
920 start_io(host);
921 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
922
923 wait_for_completion(&wait);
924
925 /* unlock the buffers from DMA */
926 temp64.val32.lower = c->SG[0].Addr.lower;
927 temp64.val32.upper = c->SG[0].Addr.upper;
928 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
929 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
930
931 /* Copy the error information out */
932 iocommand.error_info = *(c->err_info);
933 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
934 {
935 kfree(buff);
936 cmd_free(host, c, 0);
937 return( -EFAULT);
938 }
939
940 if (iocommand.Request.Type.Direction == XFER_READ)
941 {
942 /* Copy the data out of the buffer we created */
943 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
944 {
945 kfree(buff);
946 cmd_free(host, c, 0);
947 return -EFAULT;
948 }
949 }
950 kfree(buff);
951 cmd_free(host, c, 0);
952 return(0);
953 }
954 case CCISS_BIG_PASSTHRU: {
955 BIG_IOCTL_Command_struct *ioc;
956 CommandList_struct *c;
957 unsigned char **buff = NULL;
958 int *buff_size = NULL;
959 u64bit temp64;
960 unsigned long flags;
961 BYTE sg_used = 0;
962 int status = 0;
963 int i;
964 DECLARE_COMPLETION(wait);
965 __u32 left;
966 __u32 sz;
967 BYTE __user *data_ptr;
968
969 if (!arg)
970 return -EINVAL;
971 if (!capable(CAP_SYS_RAWIO))
972 return -EPERM;
973 ioc = (BIG_IOCTL_Command_struct *)
974 kmalloc(sizeof(*ioc), GFP_KERNEL);
975 if (!ioc) {
976 status = -ENOMEM;
977 goto cleanup1;
978 }
979 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
980 status = -EFAULT;
981 goto cleanup1;
982 }
983 if ((ioc->buf_size < 1) &&
984 (ioc->Request.Type.Direction != XFER_NONE)) {
985 status = -EINVAL;
986 goto cleanup1;
987 }
988 /* Check kmalloc limits using all SGs */
989 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
990 status = -EINVAL;
991 goto cleanup1;
992 }
993 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
994 status = -EINVAL;
995 goto cleanup1;
996 }
997 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
998 sizeof(char *), GFP_KERNEL);
999 if (!buff) {
1000 status = -ENOMEM;
1001 goto cleanup1;
1002 }
1003 memset(buff, 0, MAXSGENTRIES);
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1009 }
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM;
1018 goto cleanup1;
1019 }
1020 if (ioc->Request.Type.Direction == XFER_WRITE) {
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1022 status = -ENOMEM;
1023 goto cleanup1;
1024 }
1025 } else {
1026 memset(buff[sg_used], 0, sz);
1027 }
1028 left -= sz;
1029 data_ptr += sz;
1030 sg_used++;
1031 }
1032 if ((c = cmd_alloc(host , 0)) == NULL) {
1033 status = -ENOMEM;
1034 goto cleanup1;
1035 }
1036 c->cmd_type = CMD_IOCTL_PEND;
1037 c->Header.ReplyQueue = 0;
1038
1039 if( ioc->buf_size > 0) {
1040 c->Header.SGList = sg_used;
1041 c->Header.SGTotal= sg_used;
1042 } else {
1043 c->Header.SGList = 0;
1044 c->Header.SGTotal= 0;
1045 }
1046 c->Header.LUN = ioc->LUN_info;
1047 c->Header.Tag.lower = c->busaddr;
1048
1049 c->Request = ioc->Request;
1050 if (ioc->buf_size > 0 ) {
1051 int i;
1052 for(i=0; i<sg_used; i++) {
1053 temp64.val = pci_map_single( host->pdev, buff[i],
1054 buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1060 }
1061 }
1062 c->waiting = &wait;
1063 /* Put the request on the tail of the request queue */
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1065 addQ(&host->reqQ, c);
1066 host->Qdepth++;
1067 start_io(host);
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1069 wait_for_completion(&wait);
1070 /* unlock the buffers from DMA */
1071 for(i=0; i<sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower;
1073 temp64.val32.upper = c->SG[i].Addr.upper;
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1076 }
1077 /* Copy the error information out */
1078 ioc->error_info = *(c->err_info);
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1083 }
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1092 }
1093 ptr += buff_size[i];
1094 }
1095 }
1096 cmd_free(host, c, 0);
1097 status = 0;
1098 cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
1101 kfree(buff[i]);
1102 kfree(buff);
1103 }
1104 kfree(buff_size);
1105 kfree(ioc);
1106 return(status);
1107 }
1108 default:
1109 return -ENOTTY;
1110 }
1111
1112 }
1113
1114 /*
1115 * revalidate_allvol is for online array config utilities. After a
1116 * utility reconfigures the drives in the array, it can use this function
1117 * (through an ioctl) to make the driver zap any previous disk structs for
1118 * that controller and get new ones.
1119 *
1120 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular
1123 * controller).
1124 */
1125 static int revalidate_allvol(ctlr_info_t *host)
1126 {
1127 int ctlr = host->ctlr, i;
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY;
1136 }
1137 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1139
1140 for(i=0; i< NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i];
1142 if (disk) {
1143 request_queue_t *q = disk->queue;
1144
1145 if (disk->flags & GENHD_FL_UP)
1146 del_gendisk(disk);
1147 if (q)
1148 blk_cleanup_queue(q);
1149 }
1150 }
1151
1152 /*
1153 * Set the partition and block size structures for all volumes
1154 * on this controller to zero. We will reread all of this data
1155 */
1156 memset(host->drv, 0, sizeof(drive_info_struct)
1157 * CISS_MAX_LUN);
1158 /*
1159 * Tell the array controller not to give us any interrupts while
1160 * we check the new geometry. Then turn interrupts back on when
1161 * we're done.
1162 */
1163 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1164 cciss_getgeometry(ctlr);
1165 host->access.set_intr_mask(host, CCISS_INTR_ON);
1166
1167 /* Loop through each real device */
1168 for (i = 0; i < NWD; i++) {
1169 struct gendisk *disk = host->gendisk[i];
1170 drive_info_struct *drv = &(host->drv[i]);
1171 /* we must register the controller even if no disks exist */
1172 /* this is for the online array utilities */
1173 if (!drv->heads && i)
1174 continue;
1175 blk_queue_hardsect_size(drv->queue, drv->block_size);
1176 set_capacity(disk, drv->nr_blocks);
1177 add_disk(disk);
1178 }
1179 host->usage_count--;
1180 return 0;
1181 }
1182
1183 /* This function will check the usage_count of the drive to be updated/added.
1184 * If the usage_count is zero then the drive information will be updated and
1185 * the disk will be re-registered with the kernel. If not then it will be
1186 * left alone for the next reboot. The exception to this is disk 0 which
1187 * will always be left registered with the kernel since it is also the
1188 * controller node. Any changes to disk 0 will show up on the next
1189 * reboot.
1190 */
1191 static void cciss_update_drive_info(int ctlr, int drv_index)
1192 {
1193 ctlr_info_t *h = hba[ctlr];
1194 struct gendisk *disk;
1195 ReadCapdata_struct *size_buff = NULL;
1196 InquiryData_struct *inq_buff = NULL;
1197 unsigned int block_size;
1198 unsigned int total_size;
1199 unsigned long flags = 0;
1200 int ret = 0;
1201
1202 /* if the disk already exists then deregister it before proceeding*/
1203 if (h->drv[drv_index].raid_level != -1){
1204 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1205 h->drv[drv_index].busy_configuring = 1;
1206 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1207 ret = deregister_disk(h->gendisk[drv_index],
1208 &h->drv[drv_index], 0);
1209 h->drv[drv_index].busy_configuring = 0;
1210 }
1211
1212 /* If the disk is in use return */
1213 if (ret)
1214 return;
1215
1216
1217 /* Get information about the disk and modify the driver sturcture */
1218 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1219 if (size_buff == NULL)
1220 goto mem_msg;
1221 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1222 if (inq_buff == NULL)
1223 goto mem_msg;
1224
1225 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1226 &total_size, &block_size);
1227 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1228 inq_buff, &h->drv[drv_index]);
1229
1230 ++h->num_luns;
1231 disk = h->gendisk[drv_index];
1232 set_capacity(disk, h->drv[drv_index].nr_blocks);
1233
1234
1235 /* if it's the controller it's already added */
1236 if (drv_index){
1237 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1238
1239 /* Set up queue information */
1240 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1241 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1242
1243 /* This is a hardware imposed limit. */
1244 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1245
1246 /* This is a limit in the driver and could be eliminated. */
1247 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1248
1249 blk_queue_max_sectors(disk->queue, 512);
1250
1251 disk->queue->queuedata = hba[ctlr];
1252
1253 blk_queue_hardsect_size(disk->queue,
1254 hba[ctlr]->drv[drv_index].block_size);
1255
1256 h->drv[drv_index].queue = disk->queue;
1257 add_disk(disk);
1258 }
1259
1260 freeret:
1261 kfree(size_buff);
1262 kfree(inq_buff);
1263 return;
1264 mem_msg:
1265 printk(KERN_ERR "cciss: out of memory\n");
1266 goto freeret;
1267 }
1268
1269 /* This function will find the first index of the controllers drive array
1270 * that has a -1 for the raid_level and will return that index. This is
1271 * where new drives will be added. If the index to be returned is greater
1272 * than the highest_lun index for the controller then highest_lun is set
1273 * to this new index. If there are no available indexes then -1 is returned.
1274 */
1275 static int cciss_find_free_drive_index(int ctlr)
1276 {
1277 int i;
1278
1279 for (i=0; i < CISS_MAX_LUN; i++){
1280 if (hba[ctlr]->drv[i].raid_level == -1){
1281 if (i > hba[ctlr]->highest_lun)
1282 hba[ctlr]->highest_lun = i;
1283 return i;
1284 }
1285 }
1286 return -1;
1287 }
1288
1289 /* This function will add and remove logical drives from the Logical
1290 * drive array of the controller and maintain persistancy of ordering
1291 * so that mount points are preserved until the next reboot. This allows
1292 * for the removal of logical drives in the middle of the drive array
1293 * without a re-ordering of those drives.
1294 * INPUT
1295 * h = The controller to perform the operations on
1296 * del_disk = The disk to remove if specified. If the value given
1297 * is NULL then no disk is removed.
1298 */
1299 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1300 {
1301 int ctlr = h->ctlr;
1302 int num_luns;
1303 ReportLunData_struct *ld_buff = NULL;
1304 drive_info_struct *drv = NULL;
1305 int return_code;
1306 int listlength = 0;
1307 int i;
1308 int drv_found;
1309 int drv_index = 0;
1310 __u32 lunid = 0;
1311 unsigned long flags;
1312
1313 /* Set busy_configuring flag for this operation */
1314 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1315 if (h->num_luns >= CISS_MAX_LUN){
1316 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1317 return -EINVAL;
1318 }
1319
1320 if (h->busy_configuring){
1321 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1322 return -EBUSY;
1323 }
1324 h->busy_configuring = 1;
1325
1326 /* if del_disk is NULL then we are being called to add a new disk
1327 * and update the logical drive table. If it is not NULL then
1328 * we will check if the disk is in use or not.
1329 */
1330 if (del_disk != NULL){
1331 drv = get_drv(del_disk);
1332 drv->busy_configuring = 1;
1333 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1334 return_code = deregister_disk(del_disk, drv, 1);
1335 drv->busy_configuring = 0;
1336 h->busy_configuring = 0;
1337 return return_code;
1338 } else {
1339 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1340 if (!capable(CAP_SYS_RAWIO))
1341 return -EPERM;
1342
1343 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1344 if (ld_buff == NULL)
1345 goto mem_msg;
1346
1347 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1348 sizeof(ReportLunData_struct), 0, 0, 0,
1349 TYPE_CMD);
1350
1351 if (return_code == IO_OK){
1352 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1353 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1354 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1355 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1356 } else{ /* reading number of logical volumes failed */
1357 printk(KERN_WARNING "cciss: report logical volume"
1358 " command failed\n");
1359 listlength = 0;
1360 goto freeret;
1361 }
1362
1363 num_luns = listlength / 8; /* 8 bytes per entry */
1364 if (num_luns > CISS_MAX_LUN){
1365 num_luns = CISS_MAX_LUN;
1366 printk(KERN_WARNING "cciss: more luns configured"
1367 " on controller than can be handled by"
1368 " this driver.\n");
1369 }
1370
1371 /* Compare controller drive array to drivers drive array.
1372 * Check for updates in the drive information and any new drives
1373 * on the controller.
1374 */
1375 for (i=0; i < num_luns; i++){
1376 int j;
1377
1378 drv_found = 0;
1379
1380 lunid = (0xff &
1381 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1382 lunid |= (0xff &
1383 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1384 lunid |= (0xff &
1385 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1386 lunid |= 0xff &
1387 (unsigned int)(ld_buff->LUN[i][0]);
1388
1389 /* Find if the LUN is already in the drive array
1390 * of the controller. If so then update its info
1391 * if not is use. If it does not exist then find
1392 * the first free index and add it.
1393 */
1394 for (j=0; j <= h->highest_lun; j++){
1395 if (h->drv[j].LunID == lunid){
1396 drv_index = j;
1397 drv_found = 1;
1398 }
1399 }
1400
1401 /* check if the drive was found already in the array */
1402 if (!drv_found){
1403 drv_index = cciss_find_free_drive_index(ctlr);
1404 if (drv_index == -1)
1405 goto freeret;
1406
1407 }
1408 h->drv[drv_index].LunID = lunid;
1409 cciss_update_drive_info(ctlr, drv_index);
1410 } /* end for */
1411 } /* end else */
1412
1413 freeret:
1414 kfree(ld_buff);
1415 h->busy_configuring = 0;
1416 /* We return -1 here to tell the ACU that we have registered/updated
1417 * all of the drives that we can and to keep it from calling us
1418 * additional times.
1419 */
1420 return -1;
1421 mem_msg:
1422 printk(KERN_ERR "cciss: out of memory\n");
1423 goto freeret;
1424 }
1425
1426 /* This function will deregister the disk and it's queue from the
1427 * kernel. It must be called with the controller lock held and the
1428 * drv structures busy_configuring flag set. It's parameters are:
1429 *
1430 * disk = This is the disk to be deregistered
1431 * drv = This is the drive_info_struct associated with the disk to be
1432 * deregistered. It contains information about the disk used
1433 * by the driver.
1434 * clear_all = This flag determines whether or not the disk information
1435 * is going to be completely cleared out and the highest_lun
1436 * reset. Sometimes we want to clear out information about
1437 * the disk in preperation for re-adding it. In this case
1438 * the highest_lun should be left unchanged and the LunID
1439 * should not be cleared.
1440 */
1441 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1442 int clear_all)
1443 {
1444 ctlr_info_t *h = get_host(disk);
1445
1446 if (!capable(CAP_SYS_RAWIO))
1447 return -EPERM;
1448
1449 /* make sure logical volume is NOT is use */
1450 if(clear_all || (h->gendisk[0] == disk)) {
1451 if (drv->usage_count > 1)
1452 return -EBUSY;
1453 }
1454 else
1455 if( drv->usage_count > 0 )
1456 return -EBUSY;
1457
1458 /* invalidate the devices and deregister the disk. If it is disk
1459 * zero do not deregister it but just zero out it's values. This
1460 * allows us to delete disk zero but keep the controller registered.
1461 */
1462 if (h->gendisk[0] != disk){
1463 if (disk) {
1464 request_queue_t *q = disk->queue;
1465 if (disk->flags & GENHD_FL_UP)
1466 del_gendisk(disk);
1467 if (q)
1468 blk_cleanup_queue(q);
1469 }
1470 }
1471
1472 --h->num_luns;
1473 /* zero out the disk size info */
1474 drv->nr_blocks = 0;
1475 drv->block_size = 0;
1476 drv->heads = 0;
1477 drv->sectors = 0;
1478 drv->cylinders = 0;
1479 drv->raid_level = -1; /* This can be used as a flag variable to
1480 * indicate that this element of the drive
1481 * array is free.
1482 */
1483
1484 if (clear_all){
1485 /* check to see if it was the last disk */
1486 if (drv == h->drv + h->highest_lun) {
1487 /* if so, find the new hightest lun */
1488 int i, newhighest =-1;
1489 for(i=0; i<h->highest_lun; i++) {
1490 /* if the disk has size > 0, it is available */
1491 if (h->drv[i].heads)
1492 newhighest = i;
1493 }
1494 h->highest_lun = newhighest;
1495 }
1496
1497 drv->LunID = 0;
1498 }
1499 return(0);
1500 }
1501
1502 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1503 size_t size,
1504 unsigned int use_unit_num, /* 0: address the controller,
1505 1: address logical volume log_unit,
1506 2: periph device address is scsi3addr */
1507 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1508 int cmd_type)
1509 {
1510 ctlr_info_t *h= hba[ctlr];
1511 u64bit buff_dma_handle;
1512 int status = IO_OK;
1513
1514 c->cmd_type = CMD_IOCTL_PEND;
1515 c->Header.ReplyQueue = 0;
1516 if( buff != NULL) {
1517 c->Header.SGList = 1;
1518 c->Header.SGTotal= 1;
1519 } else {
1520 c->Header.SGList = 0;
1521 c->Header.SGTotal= 0;
1522 }
1523 c->Header.Tag.lower = c->busaddr;
1524
1525 c->Request.Type.Type = cmd_type;
1526 if (cmd_type == TYPE_CMD) {
1527 switch(cmd) {
1528 case CISS_INQUIRY:
1529 /* If the logical unit number is 0 then, this is going
1530 to controller so It's a physical command
1531 mode = 0 target = 0. So we have nothing to write.
1532 otherwise, if use_unit_num == 1,
1533 mode = 1(volume set addressing) target = LUNID
1534 otherwise, if use_unit_num == 2,
1535 mode = 0(periph dev addr) target = scsi3addr */
1536 if (use_unit_num == 1) {
1537 c->Header.LUN.LogDev.VolId=
1538 h->drv[log_unit].LunID;
1539 c->Header.LUN.LogDev.Mode = 1;
1540 } else if (use_unit_num == 2) {
1541 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1542 c->Header.LUN.LogDev.Mode = 0;
1543 }
1544 /* are we trying to read a vital product page */
1545 if(page_code != 0) {
1546 c->Request.CDB[1] = 0x01;
1547 c->Request.CDB[2] = page_code;
1548 }
1549 c->Request.CDBLen = 6;
1550 c->Request.Type.Attribute = ATTR_SIMPLE;
1551 c->Request.Type.Direction = XFER_READ;
1552 c->Request.Timeout = 0;
1553 c->Request.CDB[0] = CISS_INQUIRY;
1554 c->Request.CDB[4] = size & 0xFF;
1555 break;
1556 case CISS_REPORT_LOG:
1557 case CISS_REPORT_PHYS:
1558 /* Talking to controller so It's a physical command
1559 mode = 00 target = 0. Nothing to write.
1560 */
1561 c->Request.CDBLen = 12;
1562 c->Request.Type.Attribute = ATTR_SIMPLE;
1563 c->Request.Type.Direction = XFER_READ;
1564 c->Request.Timeout = 0;
1565 c->Request.CDB[0] = cmd;
1566 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1567 c->Request.CDB[7] = (size >> 16) & 0xFF;
1568 c->Request.CDB[8] = (size >> 8) & 0xFF;
1569 c->Request.CDB[9] = size & 0xFF;
1570 break;
1571
1572 case CCISS_READ_CAPACITY:
1573 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1574 c->Header.LUN.LogDev.Mode = 1;
1575 c->Request.CDBLen = 10;
1576 c->Request.Type.Attribute = ATTR_SIMPLE;
1577 c->Request.Type.Direction = XFER_READ;
1578 c->Request.Timeout = 0;
1579 c->Request.CDB[0] = cmd;
1580 break;
1581 case CCISS_CACHE_FLUSH:
1582 c->Request.CDBLen = 12;
1583 c->Request.Type.Attribute = ATTR_SIMPLE;
1584 c->Request.Type.Direction = XFER_WRITE;
1585 c->Request.Timeout = 0;
1586 c->Request.CDB[0] = BMIC_WRITE;
1587 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1588 break;
1589 default:
1590 printk(KERN_WARNING
1591 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1592 return(IO_ERROR);
1593 }
1594 } else if (cmd_type == TYPE_MSG) {
1595 switch (cmd) {
1596 case 0: /* ABORT message */
1597 c->Request.CDBLen = 12;
1598 c->Request.Type.Attribute = ATTR_SIMPLE;
1599 c->Request.Type.Direction = XFER_WRITE;
1600 c->Request.Timeout = 0;
1601 c->Request.CDB[0] = cmd; /* abort */
1602 c->Request.CDB[1] = 0; /* abort a command */
1603 /* buff contains the tag of the command to abort */
1604 memcpy(&c->Request.CDB[4], buff, 8);
1605 break;
1606 case 1: /* RESET message */
1607 c->Request.CDBLen = 12;
1608 c->Request.Type.Attribute = ATTR_SIMPLE;
1609 c->Request.Type.Direction = XFER_WRITE;
1610 c->Request.Timeout = 0;
1611 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1612 c->Request.CDB[0] = cmd; /* reset */
1613 c->Request.CDB[1] = 0x04; /* reset a LUN */
1614 case 3: /* No-Op message */
1615 c->Request.CDBLen = 1;
1616 c->Request.Type.Attribute = ATTR_SIMPLE;
1617 c->Request.Type.Direction = XFER_WRITE;
1618 c->Request.Timeout = 0;
1619 c->Request.CDB[0] = cmd;
1620 break;
1621 default:
1622 printk(KERN_WARNING
1623 "cciss%d: unknown message type %d\n",
1624 ctlr, cmd);
1625 return IO_ERROR;
1626 }
1627 } else {
1628 printk(KERN_WARNING
1629 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1630 return IO_ERROR;
1631 }
1632 /* Fill in the scatter gather information */
1633 if (size > 0) {
1634 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1635 buff, size, PCI_DMA_BIDIRECTIONAL);
1636 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1637 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1638 c->SG[0].Len = size;
1639 c->SG[0].Ext = 0; /* we are not chaining */
1640 }
1641 return status;
1642 }
1643 static int sendcmd_withirq(__u8 cmd,
1644 int ctlr,
1645 void *buff,
1646 size_t size,
1647 unsigned int use_unit_num,
1648 unsigned int log_unit,
1649 __u8 page_code,
1650 int cmd_type)
1651 {
1652 ctlr_info_t *h = hba[ctlr];
1653 CommandList_struct *c;
1654 u64bit buff_dma_handle;
1655 unsigned long flags;
1656 int return_status;
1657 DECLARE_COMPLETION(wait);
1658
1659 if ((c = cmd_alloc(h , 0)) == NULL)
1660 return -ENOMEM;
1661 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1662 log_unit, page_code, NULL, cmd_type);
1663 if (return_status != IO_OK) {
1664 cmd_free(h, c, 0);
1665 return return_status;
1666 }
1667 resend_cmd2:
1668 c->waiting = &wait;
1669
1670 /* Put the request on the tail of the queue and send it */
1671 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1672 addQ(&h->reqQ, c);
1673 h->Qdepth++;
1674 start_io(h);
1675 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1676
1677 wait_for_completion(&wait);
1678
1679 if(c->err_info->CommandStatus != 0)
1680 { /* an error has occurred */
1681 switch(c->err_info->CommandStatus)
1682 {
1683 case CMD_TARGET_STATUS:
1684 printk(KERN_WARNING "cciss: cmd %p has "
1685 " completed with errors\n", c);
1686 if( c->err_info->ScsiStatus)
1687 {
1688 printk(KERN_WARNING "cciss: cmd %p "
1689 "has SCSI Status = %x\n",
1690 c,
1691 c->err_info->ScsiStatus);
1692 }
1693
1694 break;
1695 case CMD_DATA_UNDERRUN:
1696 case CMD_DATA_OVERRUN:
1697 /* expected for inquire and report lun commands */
1698 break;
1699 case CMD_INVALID:
1700 printk(KERN_WARNING "cciss: Cmd %p is "
1701 "reported invalid\n", c);
1702 return_status = IO_ERROR;
1703 break;
1704 case CMD_PROTOCOL_ERR:
1705 printk(KERN_WARNING "cciss: cmd %p has "
1706 "protocol error \n", c);
1707 return_status = IO_ERROR;
1708 break;
1709 case CMD_HARDWARE_ERR:
1710 printk(KERN_WARNING "cciss: cmd %p had "
1711 " hardware error\n", c);
1712 return_status = IO_ERROR;
1713 break;
1714 case CMD_CONNECTION_LOST:
1715 printk(KERN_WARNING "cciss: cmd %p had "
1716 "connection lost\n", c);
1717 return_status = IO_ERROR;
1718 break;
1719 case CMD_ABORTED:
1720 printk(KERN_WARNING "cciss: cmd %p was "
1721 "aborted\n", c);
1722 return_status = IO_ERROR;
1723 break;
1724 case CMD_ABORT_FAILED:
1725 printk(KERN_WARNING "cciss: cmd %p reports "
1726 "abort failed\n", c);
1727 return_status = IO_ERROR;
1728 break;
1729 case CMD_UNSOLICITED_ABORT:
1730 printk(KERN_WARNING
1731 "cciss%d: unsolicited abort %p\n",
1732 ctlr, c);
1733 if (c->retry_count < MAX_CMD_RETRIES) {
1734 printk(KERN_WARNING
1735 "cciss%d: retrying %p\n",
1736 ctlr, c);
1737 c->retry_count++;
1738 /* erase the old error information */
1739 memset(c->err_info, 0,
1740 sizeof(ErrorInfo_struct));
1741 return_status = IO_OK;
1742 INIT_COMPLETION(wait);
1743 goto resend_cmd2;
1744 }
1745 return_status = IO_ERROR;
1746 break;
1747 default:
1748 printk(KERN_WARNING "cciss: cmd %p returned "
1749 "unknown status %x\n", c,
1750 c->err_info->CommandStatus);
1751 return_status = IO_ERROR;
1752 }
1753 }
1754 /* unlock the buffers from DMA */
1755 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1756 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1757 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1758 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1759 cmd_free(h, c, 0);
1760 return(return_status);
1761
1762 }
1763 static void cciss_geometry_inquiry(int ctlr, int logvol,
1764 int withirq, unsigned int total_size,
1765 unsigned int block_size, InquiryData_struct *inq_buff,
1766 drive_info_struct *drv)
1767 {
1768 int return_code;
1769 memset(inq_buff, 0, sizeof(InquiryData_struct));
1770 if (withirq)
1771 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1772 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1773 else
1774 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1775 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1776 if (return_code == IO_OK) {
1777 if(inq_buff->data_byte[8] == 0xFF) {
1778 printk(KERN_WARNING
1779 "cciss: reading geometry failed, volume "
1780 "does not support reading geometry\n");
1781 drv->block_size = block_size;
1782 drv->nr_blocks = total_size;
1783 drv->heads = 255;
1784 drv->sectors = 32; // Sectors per track
1785 drv->cylinders = total_size / 255 / 32;
1786 } else {
1787 unsigned int t;
1788
1789 drv->block_size = block_size;
1790 drv->nr_blocks = total_size;
1791 drv->heads = inq_buff->data_byte[6];
1792 drv->sectors = inq_buff->data_byte[7];
1793 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1794 drv->cylinders += inq_buff->data_byte[5];
1795 drv->raid_level = inq_buff->data_byte[8];
1796 t = drv->heads * drv->sectors;
1797 if (t > 1) {
1798 drv->cylinders = total_size/t;
1799 }
1800 }
1801 } else { /* Get geometry failed */
1802 printk(KERN_WARNING "cciss: reading geometry failed\n");
1803 }
1804 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1805 drv->heads, drv->sectors, drv->cylinders);
1806 }
1807 static void
1808 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1809 int withirq, unsigned int *total_size, unsigned int *block_size)
1810 {
1811 int return_code;
1812 memset(buf, 0, sizeof(*buf));
1813 if (withirq)
1814 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1815 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1816 else
1817 return_code = sendcmd(CCISS_READ_CAPACITY,
1818 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1819 if (return_code == IO_OK) {
1820 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1821 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1822 } else { /* read capacity command failed */
1823 printk(KERN_WARNING "cciss: read capacity failed\n");
1824 *total_size = 0;
1825 *block_size = BLOCK_SIZE;
1826 }
1827 printk(KERN_INFO " blocks= %u block_size= %d\n",
1828 *total_size, *block_size);
1829 return;
1830 }
1831
1832 static int cciss_revalidate(struct gendisk *disk)
1833 {
1834 ctlr_info_t *h = get_host(disk);
1835 drive_info_struct *drv = get_drv(disk);
1836 int logvol;
1837 int FOUND=0;
1838 unsigned int block_size;
1839 unsigned int total_size;
1840 ReadCapdata_struct *size_buff = NULL;
1841 InquiryData_struct *inq_buff = NULL;
1842
1843 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1844 {
1845 if(h->drv[logvol].LunID == drv->LunID) {
1846 FOUND=1;
1847 break;
1848 }
1849 }
1850
1851 if (!FOUND) return 1;
1852
1853 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1854 if (size_buff == NULL)
1855 {
1856 printk(KERN_WARNING "cciss: out of memory\n");
1857 return 1;
1858 }
1859 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1860 if (inq_buff == NULL)
1861 {
1862 printk(KERN_WARNING "cciss: out of memory\n");
1863 kfree(size_buff);
1864 return 1;
1865 }
1866
1867 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1868 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1869
1870 blk_queue_hardsect_size(drv->queue, drv->block_size);
1871 set_capacity(disk, drv->nr_blocks);
1872
1873 kfree(size_buff);
1874 kfree(inq_buff);
1875 return 0;
1876 }
1877
1878 /*
1879 * Wait polling for a command to complete.
1880 * The memory mapped FIFO is polled for the completion.
1881 * Used only at init time, interrupts from the HBA are disabled.
1882 */
1883 static unsigned long pollcomplete(int ctlr)
1884 {
1885 unsigned long done;
1886 int i;
1887
1888 /* Wait (up to 20 seconds) for a command to complete */
1889
1890 for (i = 20 * HZ; i > 0; i--) {
1891 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1892 if (done == FIFO_EMPTY)
1893 schedule_timeout_uninterruptible(1);
1894 else
1895 return (done);
1896 }
1897 /* Invalid address to tell caller we ran out of time */
1898 return 1;
1899 }
1900
1901 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1902 {
1903 /* We get in here if sendcmd() is polling for completions
1904 and gets some command back that it wasn't expecting --
1905 something other than that which it just sent down.
1906 Ordinarily, that shouldn't happen, but it can happen when
1907 the scsi tape stuff gets into error handling mode, and
1908 starts using sendcmd() to try to abort commands and
1909 reset tape drives. In that case, sendcmd may pick up
1910 completions of commands that were sent to logical drives
1911 through the block i/o system, or cciss ioctls completing, etc.
1912 In that case, we need to save those completions for later
1913 processing by the interrupt handler.
1914 */
1915
1916 #ifdef CONFIG_CISS_SCSI_TAPE
1917 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1918
1919 /* If it's not the scsi tape stuff doing error handling, (abort */
1920 /* or reset) then we don't expect anything weird. */
1921 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1922 #endif
1923 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1924 "Invalid command list address returned! (%lx)\n",
1925 ctlr, complete);
1926 /* not much we can do. */
1927 #ifdef CONFIG_CISS_SCSI_TAPE
1928 return 1;
1929 }
1930
1931 /* We've sent down an abort or reset, but something else
1932 has completed */
1933 if (srl->ncompletions >= (NR_CMDS + 2)) {
1934 /* Uh oh. No room to save it for later... */
1935 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1936 "reject list overflow, command lost!\n", ctlr);
1937 return 1;
1938 }
1939 /* Save it for later */
1940 srl->complete[srl->ncompletions] = complete;
1941 srl->ncompletions++;
1942 #endif
1943 return 0;
1944 }
1945
1946 /*
1947 * Send a command to the controller, and wait for it to complete.
1948 * Only used at init time.
1949 */
1950 static int sendcmd(
1951 __u8 cmd,
1952 int ctlr,
1953 void *buff,
1954 size_t size,
1955 unsigned int use_unit_num, /* 0: address the controller,
1956 1: address logical volume log_unit,
1957 2: periph device address is scsi3addr */
1958 unsigned int log_unit,
1959 __u8 page_code,
1960 unsigned char *scsi3addr,
1961 int cmd_type)
1962 {
1963 CommandList_struct *c;
1964 int i;
1965 unsigned long complete;
1966 ctlr_info_t *info_p= hba[ctlr];
1967 u64bit buff_dma_handle;
1968 int status, done = 0;
1969
1970 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1971 printk(KERN_WARNING "cciss: unable to get memory");
1972 return(IO_ERROR);
1973 }
1974 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1975 log_unit, page_code, scsi3addr, cmd_type);
1976 if (status != IO_OK) {
1977 cmd_free(info_p, c, 1);
1978 return status;
1979 }
1980 resend_cmd1:
1981 /*
1982 * Disable interrupt
1983 */
1984 #ifdef CCISS_DEBUG
1985 printk(KERN_DEBUG "cciss: turning intr off\n");
1986 #endif /* CCISS_DEBUG */
1987 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1988
1989 /* Make sure there is room in the command FIFO */
1990 /* Actually it should be completely empty at this time */
1991 /* unless we are in here doing error handling for the scsi */
1992 /* tape side of the driver. */
1993 for (i = 200000; i > 0; i--)
1994 {
1995 /* if fifo isn't full go */
1996 if (!(info_p->access.fifo_full(info_p)))
1997 {
1998
1999 break;
2000 }
2001 udelay(10);
2002 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2003 " waiting!\n", ctlr);
2004 }
2005 /*
2006 * Send the cmd
2007 */
2008 info_p->access.submit_command(info_p, c);
2009 done = 0;
2010 do {
2011 complete = pollcomplete(ctlr);
2012
2013 #ifdef CCISS_DEBUG
2014 printk(KERN_DEBUG "cciss: command completed\n");
2015 #endif /* CCISS_DEBUG */
2016
2017 if (complete == 1) {
2018 printk( KERN_WARNING
2019 "cciss cciss%d: SendCmd Timeout out, "
2020 "No command list address returned!\n",
2021 ctlr);
2022 status = IO_ERROR;
2023 done = 1;
2024 break;
2025 }
2026
2027 /* This will need to change for direct lookup completions */
2028 if ( (complete & CISS_ERROR_BIT)
2029 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2030 {
2031 /* if data overrun or underun on Report command
2032 ignore it
2033 */
2034 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2035 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2036 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2037 ((c->err_info->CommandStatus ==
2038 CMD_DATA_OVERRUN) ||
2039 (c->err_info->CommandStatus ==
2040 CMD_DATA_UNDERRUN)
2041 ))
2042 {
2043 complete = c->busaddr;
2044 } else {
2045 if (c->err_info->CommandStatus ==
2046 CMD_UNSOLICITED_ABORT) {
2047 printk(KERN_WARNING "cciss%d: "
2048 "unsolicited abort %p\n",
2049 ctlr, c);
2050 if (c->retry_count < MAX_CMD_RETRIES) {
2051 printk(KERN_WARNING
2052 "cciss%d: retrying %p\n",
2053 ctlr, c);
2054 c->retry_count++;
2055 /* erase the old error */
2056 /* information */
2057 memset(c->err_info, 0,
2058 sizeof(ErrorInfo_struct));
2059 goto resend_cmd1;
2060 } else {
2061 printk(KERN_WARNING
2062 "cciss%d: retried %p too "
2063 "many times\n", ctlr, c);
2064 status = IO_ERROR;
2065 goto cleanup1;
2066 }
2067 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2068 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2069 status = IO_ERROR;
2070 goto cleanup1;
2071 }
2072 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2073 " Error %x \n", ctlr,
2074 c->err_info->CommandStatus);
2075 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2076 " offensive info\n"
2077 " size %x\n num %x value %x\n", ctlr,
2078 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2079 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2080 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2081 status = IO_ERROR;
2082 goto cleanup1;
2083 }
2084 }
2085 /* This will need changing for direct lookup completions */
2086 if (complete != c->busaddr) {
2087 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2088 BUG(); /* we are pretty much hosed if we get here. */
2089 }
2090 continue;
2091 } else
2092 done = 1;
2093 } while (!done);
2094
2095 cleanup1:
2096 /* unlock the data buffer from DMA */
2097 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2098 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2099 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2100 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2101 #ifdef CONFIG_CISS_SCSI_TAPE
2102 /* if we saved some commands for later, process them now. */
2103 if (info_p->scsi_rejects.ncompletions > 0)
2104 do_cciss_intr(0, info_p, NULL);
2105 #endif
2106 cmd_free(info_p, c, 1);
2107 return (status);
2108 }
2109 /*
2110 * Map (physical) PCI mem into (virtual) kernel space
2111 */
2112 static void __iomem *remap_pci_mem(ulong base, ulong size)
2113 {
2114 ulong page_base = ((ulong) base) & PAGE_MASK;
2115 ulong page_offs = ((ulong) base) - page_base;
2116 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2117
2118 return page_remapped ? (page_remapped + page_offs) : NULL;
2119 }
2120
2121 /*
2122 * Takes jobs of the Q and sends them to the hardware, then puts it on
2123 * the Q to wait for completion.
2124 */
2125 static void start_io( ctlr_info_t *h)
2126 {
2127 CommandList_struct *c;
2128
2129 while(( c = h->reqQ) != NULL )
2130 {
2131 /* can't do anything if fifo is full */
2132 if ((h->access.fifo_full(h))) {
2133 printk(KERN_WARNING "cciss: fifo full\n");
2134 break;
2135 }
2136
2137 /* Get the frist entry from the Request Q */
2138 removeQ(&(h->reqQ), c);
2139 h->Qdepth--;
2140
2141 /* Tell the controller execute command */
2142 h->access.submit_command(h, c);
2143
2144 /* Put job onto the completed Q */
2145 addQ (&(h->cmpQ), c);
2146 }
2147 }
2148
2149 static inline void complete_buffers(struct bio *bio, int status)
2150 {
2151 while (bio) {
2152 struct bio *xbh = bio->bi_next;
2153 int nr_sectors = bio_sectors(bio);
2154
2155 bio->bi_next = NULL;
2156 blk_finished_io(len);
2157 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
2158 bio = xbh;
2159 }
2160
2161 }
2162 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2163 /* Zeros out the error record and then resends the command back */
2164 /* to the controller */
2165 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2166 {
2167 /* erase the old error information */
2168 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2169
2170 /* add it to software queue and then send it to the controller */
2171 addQ(&(h->reqQ),c);
2172 h->Qdepth++;
2173 if(h->Qdepth > h->maxQsinceinit)
2174 h->maxQsinceinit = h->Qdepth;
2175
2176 start_io(h);
2177 }
2178 /* checks the status of the job and calls complete buffers to mark all
2179 * buffers for the completed job.
2180 */
2181 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2182 int timeout)
2183 {
2184 int status = 1;
2185 int i;
2186 int retry_cmd = 0;
2187 u64bit temp64;
2188
2189 if (timeout)
2190 status = 0;
2191
2192 if(cmd->err_info->CommandStatus != 0)
2193 { /* an error has occurred */
2194 switch(cmd->err_info->CommandStatus)
2195 {
2196 unsigned char sense_key;
2197 case CMD_TARGET_STATUS:
2198 status = 0;
2199
2200 if( cmd->err_info->ScsiStatus == 0x02)
2201 {
2202 printk(KERN_WARNING "cciss: cmd %p "
2203 "has CHECK CONDITION "
2204 " byte 2 = 0x%x\n", cmd,
2205 cmd->err_info->SenseInfo[2]
2206 );
2207 /* check the sense key */
2208 sense_key = 0xf &
2209 cmd->err_info->SenseInfo[2];
2210 /* no status or recovered error */
2211 if((sense_key == 0x0) ||
2212 (sense_key == 0x1))
2213 {
2214 status = 1;
2215 }
2216 } else
2217 {
2218 printk(KERN_WARNING "cciss: cmd %p "
2219 "has SCSI Status 0x%x\n",
2220 cmd, cmd->err_info->ScsiStatus);
2221 }
2222 break;
2223 case CMD_DATA_UNDERRUN:
2224 printk(KERN_WARNING "cciss: cmd %p has"
2225 " completed with data underrun "
2226 "reported\n", cmd);
2227 break;
2228 case CMD_DATA_OVERRUN:
2229 printk(KERN_WARNING "cciss: cmd %p has"
2230 " completed with data overrun "
2231 "reported\n", cmd);
2232 break;
2233 case CMD_INVALID:
2234 printk(KERN_WARNING "cciss: cmd %p is "
2235 "reported invalid\n", cmd);
2236 status = 0;
2237 break;
2238 case CMD_PROTOCOL_ERR:
2239 printk(KERN_WARNING "cciss: cmd %p has "
2240 "protocol error \n", cmd);
2241 status = 0;
2242 break;
2243 case CMD_HARDWARE_ERR:
2244 printk(KERN_WARNING "cciss: cmd %p had "
2245 " hardware error\n", cmd);
2246 status = 0;
2247 break;
2248 case CMD_CONNECTION_LOST:
2249 printk(KERN_WARNING "cciss: cmd %p had "
2250 "connection lost\n", cmd);
2251 status=0;
2252 break;
2253 case CMD_ABORTED:
2254 printk(KERN_WARNING "cciss: cmd %p was "
2255 "aborted\n", cmd);
2256 status=0;
2257 break;
2258 case CMD_ABORT_FAILED:
2259 printk(KERN_WARNING "cciss: cmd %p reports "
2260 "abort failed\n", cmd);
2261 status=0;
2262 break;
2263 case CMD_UNSOLICITED_ABORT:
2264 printk(KERN_WARNING "cciss%d: unsolicited "
2265 "abort %p\n", h->ctlr, cmd);
2266 if (cmd->retry_count < MAX_CMD_RETRIES) {
2267 retry_cmd=1;
2268 printk(KERN_WARNING
2269 "cciss%d: retrying %p\n",
2270 h->ctlr, cmd);
2271 cmd->retry_count++;
2272 } else
2273 printk(KERN_WARNING
2274 "cciss%d: %p retried too "
2275 "many times\n", h->ctlr, cmd);
2276 status=0;
2277 break;
2278 case CMD_TIMEOUT:
2279 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2280 cmd);
2281 status=0;
2282 break;
2283 default:
2284 printk(KERN_WARNING "cciss: cmd %p returned "
2285 "unknown status %x\n", cmd,
2286 cmd->err_info->CommandStatus);
2287 status=0;
2288 }
2289 }
2290 /* We need to return this command */
2291 if(retry_cmd) {
2292 resend_cciss_cmd(h,cmd);
2293 return;
2294 }
2295 /* command did not need to be retried */
2296 /* unmap the DMA mapping for all the scatter gather elements */
2297 for(i=0; i<cmd->Header.SGList; i++) {
2298 temp64.val32.lower = cmd->SG[i].Addr.lower;
2299 temp64.val32.upper = cmd->SG[i].Addr.upper;
2300 pci_unmap_page(hba[cmd->ctlr]->pdev,
2301 temp64.val, cmd->SG[i].Len,
2302 (cmd->Request.Type.Direction == XFER_READ) ?
2303 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2304 }
2305 complete_buffers(cmd->rq->bio, status);
2306
2307 #ifdef CCISS_DEBUG
2308 printk("Done with %p\n", cmd->rq);
2309 #endif /* CCISS_DEBUG */
2310
2311 end_that_request_last(cmd->rq);
2312 cmd_free(h,cmd,1);
2313 }
2314
2315 /*
2316 * Get a request and submit it to the controller.
2317 */
2318 static void do_cciss_request(request_queue_t *q)
2319 {
2320 ctlr_info_t *h= q->queuedata;
2321 CommandList_struct *c;
2322 int start_blk, seg;
2323 struct request *creq;
2324 u64bit temp64;
2325 struct scatterlist tmp_sg[MAXSGENTRIES];
2326 drive_info_struct *drv;
2327 int i, dir;
2328
2329 /* We call start_io here in case there is a command waiting on the
2330 * queue that has not been sent.
2331 */
2332 if (blk_queue_plugged(q))
2333 goto startio;
2334
2335 queue:
2336 creq = elv_next_request(q);
2337 if (!creq)
2338 goto startio;
2339
2340 if (creq->nr_phys_segments > MAXSGENTRIES)
2341 BUG();
2342
2343 if (( c = cmd_alloc(h, 1)) == NULL)
2344 goto full;
2345
2346 blkdev_dequeue_request(creq);
2347
2348 spin_unlock_irq(q->queue_lock);
2349
2350 c->cmd_type = CMD_RWREQ;
2351 c->rq = creq;
2352
2353 /* fill in the request */
2354 drv = creq->rq_disk->private_data;
2355 c->Header.ReplyQueue = 0; // unused in simple mode
2356 /* got command from pool, so use the command block index instead */
2357 /* for direct lookups. */
2358 /* The first 2 bits are reserved for controller error reporting. */
2359 c->Header.Tag.lower = (c->cmdindex << 3);
2360 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2361 c->Header.LUN.LogDev.VolId= drv->LunID;
2362 c->Header.LUN.LogDev.Mode = 1;
2363 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2364 c->Request.Type.Type = TYPE_CMD; // It is a command.
2365 c->Request.Type.Attribute = ATTR_SIMPLE;
2366 c->Request.Type.Direction =
2367 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2368 c->Request.Timeout = 0; // Don't time out
2369 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2370 start_blk = creq->sector;
2371 #ifdef CCISS_DEBUG
2372 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2373 (int) creq->nr_sectors);
2374 #endif /* CCISS_DEBUG */
2375
2376 seg = blk_rq_map_sg(q, creq, tmp_sg);
2377
2378 /* get the DMA records for the setup */
2379 if (c->Request.Type.Direction == XFER_READ)
2380 dir = PCI_DMA_FROMDEVICE;
2381 else
2382 dir = PCI_DMA_TODEVICE;
2383
2384 for (i=0; i<seg; i++)
2385 {
2386 c->SG[i].Len = tmp_sg[i].length;
2387 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2388 tmp_sg[i].offset, tmp_sg[i].length,
2389 dir);
2390 c->SG[i].Addr.lower = temp64.val32.lower;
2391 c->SG[i].Addr.upper = temp64.val32.upper;
2392 c->SG[i].Ext = 0; // we are not chaining
2393 }
2394 /* track how many SG entries we are using */
2395 if( seg > h->maxSG)
2396 h->maxSG = seg;
2397
2398 #ifdef CCISS_DEBUG
2399 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2400 #endif /* CCISS_DEBUG */
2401
2402 c->Header.SGList = c->Header.SGTotal = seg;
2403 c->Request.CDB[1]= 0;
2404 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2405 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2406 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2407 c->Request.CDB[5]= start_blk & 0xff;
2408 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2409 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2410 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2411 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2412
2413 spin_lock_irq(q->queue_lock);
2414
2415 addQ(&(h->reqQ),c);
2416 h->Qdepth++;
2417 if(h->Qdepth > h->maxQsinceinit)
2418 h->maxQsinceinit = h->Qdepth;
2419
2420 goto queue;
2421 full:
2422 blk_stop_queue(q);
2423 startio:
2424 /* We will already have the driver lock here so not need
2425 * to lock it.
2426 */
2427 start_io(h);
2428 }
2429
2430 static inline unsigned long get_next_completion(ctlr_info_t *h)
2431 {
2432 #ifdef CONFIG_CISS_SCSI_TAPE
2433 /* Any rejects from sendcmd() lying around? Process them first */
2434 if (h->scsi_rejects.ncompletions == 0)
2435 return h->access.command_completed(h);
2436 else {
2437 struct sendcmd_reject_list *srl;
2438 int n;
2439 srl = &h->scsi_rejects;
2440 n = --srl->ncompletions;
2441 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2442 printk("p");
2443 return srl->complete[n];
2444 }
2445 #else
2446 return h->access.command_completed(h);
2447 #endif
2448 }
2449
2450 static inline int interrupt_pending(ctlr_info_t *h)
2451 {
2452 #ifdef CONFIG_CISS_SCSI_TAPE
2453 return ( h->access.intr_pending(h)
2454 || (h->scsi_rejects.ncompletions > 0));
2455 #else
2456 return h->access.intr_pending(h);
2457 #endif
2458 }
2459
2460 static inline long interrupt_not_for_us(ctlr_info_t *h)
2461 {
2462 #ifdef CONFIG_CISS_SCSI_TAPE
2463 return (((h->access.intr_pending(h) == 0) ||
2464 (h->interrupts_enabled == 0))
2465 && (h->scsi_rejects.ncompletions == 0));
2466 #else
2467 return (((h->access.intr_pending(h) == 0) ||
2468 (h->interrupts_enabled == 0)));
2469 #endif
2470 }
2471
2472 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2473 {
2474 ctlr_info_t *h = dev_id;
2475 CommandList_struct *c;
2476 unsigned long flags;
2477 __u32 a, a1, a2;
2478 int j;
2479 int start_queue = h->next_to_run;
2480
2481 if (interrupt_not_for_us(h))
2482 return IRQ_NONE;
2483 /*
2484 * If there are completed commands in the completion queue,
2485 * we had better do something about it.
2486 */
2487 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2488 while (interrupt_pending(h)) {
2489 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2490 a1 = a;
2491 if ((a & 0x04)) {
2492 a2 = (a >> 3);
2493 if (a2 >= NR_CMDS) {
2494 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2495 fail_all_cmds(h->ctlr);
2496 return IRQ_HANDLED;
2497 }
2498
2499 c = h->cmd_pool + a2;
2500 a = c->busaddr;
2501
2502 } else {
2503 a &= ~3;
2504 if ((c = h->cmpQ) == NULL) {
2505 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2506 continue;
2507 }
2508 while(c->busaddr != a) {
2509 c = c->next;
2510 if (c == h->cmpQ)
2511 break;
2512 }
2513 }
2514 /*
2515 * If we've found the command, take it off the
2516 * completion Q and free it
2517 */
2518 if (c->busaddr == a) {
2519 removeQ(&h->cmpQ, c);
2520 if (c->cmd_type == CMD_RWREQ) {
2521 complete_command(h, c, 0);
2522 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2523 complete(c->waiting);
2524 }
2525 # ifdef CONFIG_CISS_SCSI_TAPE
2526 else if (c->cmd_type == CMD_SCSI)
2527 complete_scsi_command(c, 0, a1);
2528 # endif
2529 continue;
2530 }
2531 }
2532 }
2533
2534 /* check to see if we have maxed out the number of commands that can
2535 * be placed on the queue. If so then exit. We do this check here
2536 * in case the interrupt we serviced was from an ioctl and did not
2537 * free any new commands.
2538 */
2539 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2540 goto cleanup;
2541
2542 /* We have room on the queue for more commands. Now we need to queue
2543 * them up. We will also keep track of the next queue to run so
2544 * that every queue gets a chance to be started first.
2545 */
2546 for (j=0; j < h->highest_lun + 1; j++){
2547 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2548 /* make sure the disk has been added and the drive is real
2549 * because this can be called from the middle of init_one.
2550 */
2551 if(!(h->drv[curr_queue].queue) ||
2552 !(h->drv[curr_queue].heads))
2553 continue;
2554 blk_start_queue(h->gendisk[curr_queue]->queue);
2555
2556 /* check to see if we have maxed out the number of commands
2557 * that can be placed on the queue.
2558 */
2559 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2560 {
2561 if (curr_queue == start_queue){
2562 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2563 goto cleanup;
2564 } else {
2565 h->next_to_run = curr_queue;
2566 goto cleanup;
2567 }
2568 } else {
2569 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2570 }
2571 }
2572
2573 cleanup:
2574 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2575 return IRQ_HANDLED;
2576 }
2577 /*
2578 * We cannot read the structure directly, for portablity we must use
2579 * the io functions.
2580 * This is for debug only.
2581 */
2582 #ifdef CCISS_DEBUG
2583 static void print_cfg_table( CfgTable_struct *tb)
2584 {
2585 int i;
2586 char temp_name[17];
2587
2588 printk("Controller Configuration information\n");
2589 printk("------------------------------------\n");
2590 for(i=0;i<4;i++)
2591 temp_name[i] = readb(&(tb->Signature[i]));
2592 temp_name[4]='\0';
2593 printk(" Signature = %s\n", temp_name);
2594 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2595 printk(" Transport methods supported = 0x%x\n",
2596 readl(&(tb-> TransportSupport)));
2597 printk(" Transport methods active = 0x%x\n",
2598 readl(&(tb->TransportActive)));
2599 printk(" Requested transport Method = 0x%x\n",
2600 readl(&(tb->HostWrite.TransportRequest)));
2601 printk(" Coalese Interrupt Delay = 0x%x\n",
2602 readl(&(tb->HostWrite.CoalIntDelay)));
2603 printk(" Coalese Interrupt Count = 0x%x\n",
2604 readl(&(tb->HostWrite.CoalIntCount)));
2605 printk(" Max outstanding commands = 0x%d\n",
2606 readl(&(tb->CmdsOutMax)));
2607 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2608 for(i=0;i<16;i++)
2609 temp_name[i] = readb(&(tb->ServerName[i]));
2610 temp_name[16] = '\0';
2611 printk(" Server Name = %s\n", temp_name);
2612 printk(" Heartbeat Counter = 0x%x\n\n\n",
2613 readl(&(tb->HeartBeat)));
2614 }
2615 #endif /* CCISS_DEBUG */
2616
2617 static void release_io_mem(ctlr_info_t *c)
2618 {
2619 /* if IO mem was not protected do nothing */
2620 if( c->io_mem_addr == 0)
2621 return;
2622 release_region(c->io_mem_addr, c->io_mem_length);
2623 c->io_mem_addr = 0;
2624 c->io_mem_length = 0;
2625 }
2626
2627 static int find_PCI_BAR_index(struct pci_dev *pdev,
2628 unsigned long pci_bar_addr)
2629 {
2630 int i, offset, mem_type, bar_type;
2631 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2632 return 0;
2633 offset = 0;
2634 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2635 bar_type = pci_resource_flags(pdev, i) &
2636 PCI_BASE_ADDRESS_SPACE;
2637 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2638 offset += 4;
2639 else {
2640 mem_type = pci_resource_flags(pdev, i) &
2641 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2642 switch (mem_type) {
2643 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2644 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2645 offset += 4; /* 32 bit */
2646 break;
2647 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2648 offset += 8;
2649 break;
2650 default: /* reserved in PCI 2.2 */
2651 printk(KERN_WARNING "Base address is invalid\n");
2652 return -1;
2653 break;
2654 }
2655 }
2656 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2657 return i+1;
2658 }
2659 return -1;
2660 }
2661
2662 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2663 {
2664 ushort subsystem_vendor_id, subsystem_device_id, command;
2665 __u32 board_id, scratchpad = 0;
2666 __u64 cfg_offset;
2667 __u32 cfg_base_addr;
2668 __u64 cfg_base_addr_index;
2669 int i;
2670
2671 /* check to see if controller has been disabled */
2672 /* BEFORE trying to enable it */
2673 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2674 if(!(command & 0x02))
2675 {
2676 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2677 return(-1);
2678 }
2679
2680 if (pci_enable_device(pdev))
2681 {
2682 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2683 return( -1);
2684 }
2685
2686 subsystem_vendor_id = pdev->subsystem_vendor;
2687 subsystem_device_id = pdev->subsystem_device;
2688 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2689 subsystem_vendor_id);
2690
2691 /* search for our IO range so we can protect it */
2692 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2693 {
2694 /* is this an IO range */
2695 if( pci_resource_flags(pdev, i) & 0x01 ) {
2696 c->io_mem_addr = pci_resource_start(pdev, i);
2697 c->io_mem_length = pci_resource_end(pdev, i) -
2698 pci_resource_start(pdev, i) +1;
2699 #ifdef CCISS_DEBUG
2700 printk("IO value found base_addr[%d] %lx %lx\n", i,
2701 c->io_mem_addr, c->io_mem_length);
2702 #endif /* CCISS_DEBUG */
2703 /* register the IO range */
2704 if(!request_region( c->io_mem_addr,
2705 c->io_mem_length, "cciss"))
2706 {
2707 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2708 c->io_mem_addr, c->io_mem_length);
2709 c->io_mem_addr= 0;
2710 c->io_mem_length = 0;
2711 }
2712 break;
2713 }
2714 }
2715
2716 #ifdef CCISS_DEBUG
2717 printk("command = %x\n", command);
2718 printk("irq = %x\n", pdev->irq);
2719 printk("board_id = %x\n", board_id);
2720 #endif /* CCISS_DEBUG */
2721
2722 c->intr = pdev->irq;
2723
2724 /*
2725 * Memory base addr is first addr , the second points to the config
2726 * table
2727 */
2728
2729 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2730 #ifdef CCISS_DEBUG
2731 printk("address 0 = %x\n", c->paddr);
2732 #endif /* CCISS_DEBUG */
2733 c->vaddr = remap_pci_mem(c->paddr, 200);
2734
2735 /* Wait for the board to become ready. (PCI hotplug needs this.)
2736 * We poll for up to 120 secs, once per 100ms. */
2737 for (i=0; i < 1200; i++) {
2738 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2739 if (scratchpad == CCISS_FIRMWARE_READY)
2740 break;
2741 set_current_state(TASK_INTERRUPTIBLE);
2742 schedule_timeout(HZ / 10); /* wait 100ms */
2743 }
2744 if (scratchpad != CCISS_FIRMWARE_READY) {
2745 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2746 return -1;
2747 }
2748
2749 /* get the address index number */
2750 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2751 cfg_base_addr &= (__u32) 0x0000ffff;
2752 #ifdef CCISS_DEBUG
2753 printk("cfg base address = %x\n", cfg_base_addr);
2754 #endif /* CCISS_DEBUG */
2755 cfg_base_addr_index =
2756 find_PCI_BAR_index(pdev, cfg_base_addr);
2757 #ifdef CCISS_DEBUG
2758 printk("cfg base address index = %x\n", cfg_base_addr_index);
2759 #endif /* CCISS_DEBUG */
2760 if (cfg_base_addr_index == -1) {
2761 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2762 release_io_mem(c);
2763 return -1;
2764 }
2765
2766 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2767 #ifdef CCISS_DEBUG
2768 printk("cfg offset = %x\n", cfg_offset);
2769 #endif /* CCISS_DEBUG */
2770 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2771 cfg_base_addr_index) + cfg_offset,
2772 sizeof(CfgTable_struct));
2773 c->board_id = board_id;
2774
2775 #ifdef CCISS_DEBUG
2776 print_cfg_table(c->cfgtable);
2777 #endif /* CCISS_DEBUG */
2778
2779 for(i=0; i<NR_PRODUCTS; i++) {
2780 if (board_id == products[i].board_id) {
2781 c->product_name = products[i].product_name;
2782 c->access = *(products[i].access);
2783 break;
2784 }
2785 }
2786 if (i == NR_PRODUCTS) {
2787 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2788 " to access the Smart Array controller %08lx\n",
2789 (unsigned long)board_id);
2790 return -1;
2791 }
2792 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2793 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2794 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2795 (readb(&c->cfgtable->Signature[3]) != 'S') )
2796 {
2797 printk("Does not appear to be a valid CISS config table\n");
2798 return -1;
2799 }
2800
2801 #ifdef CONFIG_X86
2802 {
2803 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2804 __u32 prefetch;
2805 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2806 prefetch |= 0x100;
2807 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2808 }
2809 #endif
2810
2811 #ifdef CCISS_DEBUG
2812 printk("Trying to put board into Simple mode\n");
2813 #endif /* CCISS_DEBUG */
2814 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2815 /* Update the field, and then ring the doorbell */
2816 writel( CFGTBL_Trans_Simple,
2817 &(c->cfgtable->HostWrite.TransportRequest));
2818 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2819
2820 /* under certain very rare conditions, this can take awhile.
2821 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2822 * as we enter this code.) */
2823 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2824 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2825 break;
2826 /* delay and try again */
2827 set_current_state(TASK_INTERRUPTIBLE);
2828 schedule_timeout(10);
2829 }
2830
2831 #ifdef CCISS_DEBUG
2832 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2833 #endif /* CCISS_DEBUG */
2834 #ifdef CCISS_DEBUG
2835 print_cfg_table(c->cfgtable);
2836 #endif /* CCISS_DEBUG */
2837
2838 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2839 {
2840 printk(KERN_WARNING "cciss: unable to get board into"
2841 " simple mode\n");
2842 return -1;
2843 }
2844 return 0;
2845
2846 }
2847
2848 /*
2849 * Gets information about the local volumes attached to the controller.
2850 */
2851 static void cciss_getgeometry(int cntl_num)
2852 {
2853 ReportLunData_struct *ld_buff;
2854 ReadCapdata_struct *size_buff;
2855 InquiryData_struct *inq_buff;
2856 int return_code;
2857 int i;
2858 int listlength = 0;
2859 __u32 lunid = 0;
2860 int block_size;
2861 int total_size;
2862
2863 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2864 if (ld_buff == NULL)
2865 {
2866 printk(KERN_ERR "cciss: out of memory\n");
2867 return;
2868 }
2869 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2870 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2871 if (size_buff == NULL)
2872 {
2873 printk(KERN_ERR "cciss: out of memory\n");
2874 kfree(ld_buff);
2875 return;
2876 }
2877 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2878 if (inq_buff == NULL)
2879 {
2880 printk(KERN_ERR "cciss: out of memory\n");
2881 kfree(ld_buff);
2882 kfree(size_buff);
2883 return;
2884 }
2885 /* Get the firmware version */
2886 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2887 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2888 if (return_code == IO_OK)
2889 {
2890 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2891 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2892 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2893 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2894 } else /* send command failed */
2895 {
2896 printk(KERN_WARNING "cciss: unable to determine firmware"
2897 " version of controller\n");
2898 }
2899 /* Get the number of logical volumes */
2900 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2901 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2902
2903 if( return_code == IO_OK)
2904 {
2905 #ifdef CCISS_DEBUG
2906 printk("LUN Data\n--------------------------\n");
2907 #endif /* CCISS_DEBUG */
2908
2909 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2910 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2911 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2912 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2913 } else /* reading number of logical volumes failed */
2914 {
2915 printk(KERN_WARNING "cciss: report logical volume"
2916 " command failed\n");
2917 listlength = 0;
2918 }
2919 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2920 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
2921 {
2922 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
2923 CISS_MAX_LUN);
2924 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2925 }
2926 #ifdef CCISS_DEBUG
2927 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2928 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2929 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2930 #endif /* CCISS_DEBUG */
2931
2932 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2933 // for(i=0; i< hba[cntl_num]->num_luns; i++)
2934 for(i=0; i < CISS_MAX_LUN; i++)
2935 {
2936 if (i < hba[cntl_num]->num_luns){
2937 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
2938 << 24;
2939 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
2940 << 16;
2941 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
2942 << 8;
2943 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2944
2945 hba[cntl_num]->drv[i].LunID = lunid;
2946
2947
2948 #ifdef CCISS_DEBUG
2949 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2950 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
2951 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
2952 hba[cntl_num]->drv[i].LunID);
2953 #endif /* CCISS_DEBUG */
2954 cciss_read_capacity(cntl_num, i, size_buff, 0,
2955 &total_size, &block_size);
2956 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
2957 block_size, inq_buff, &hba[cntl_num]->drv[i]);
2958 } else {
2959 /* initialize raid_level to indicate a free space */
2960 hba[cntl_num]->drv[i].raid_level = -1;
2961 }
2962 }
2963 kfree(ld_buff);
2964 kfree(size_buff);
2965 kfree(inq_buff);
2966 }
2967
2968 /* Function to find the first free pointer into our hba[] array */
2969 /* Returns -1 if no free entries are left. */
2970 static int alloc_cciss_hba(void)
2971 {
2972 struct gendisk *disk[NWD];
2973 int i, n;
2974 for (n = 0; n < NWD; n++) {
2975 disk[n] = alloc_disk(1 << NWD_SHIFT);
2976 if (!disk[n])
2977 goto out;
2978 }
2979
2980 for(i=0; i< MAX_CTLR; i++) {
2981 if (!hba[i]) {
2982 ctlr_info_t *p;
2983 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2984 if (!p)
2985 goto Enomem;
2986 memset(p, 0, sizeof(ctlr_info_t));
2987 for (n = 0; n < NWD; n++)
2988 p->gendisk[n] = disk[n];
2989 hba[i] = p;
2990 return i;
2991 }
2992 }
2993 printk(KERN_WARNING "cciss: This driver supports a maximum"
2994 " of %d controllers.\n", MAX_CTLR);
2995 goto out;
2996 Enomem:
2997 printk(KERN_ERR "cciss: out of memory.\n");
2998 out:
2999 while (n--)
3000 put_disk(disk[n]);
3001 return -1;
3002 }
3003
3004 static void free_hba(int i)
3005 {
3006 ctlr_info_t *p = hba[i];
3007 int n;
3008
3009 hba[i] = NULL;
3010 for (n = 0; n < NWD; n++)
3011 put_disk(p->gendisk[n]);
3012 kfree(p);
3013 }
3014
3015 /*
3016 * This is it. Find all the controllers and register them. I really hate
3017 * stealing all these major device numbers.
3018 * returns the number of block devices registered.
3019 */
3020 static int __devinit cciss_init_one(struct pci_dev *pdev,
3021 const struct pci_device_id *ent)
3022 {
3023 request_queue_t *q;
3024 int i;
3025 int j;
3026 int rc;
3027
3028 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3029 " bus %d dev %d func %d\n",
3030 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3031 PCI_FUNC(pdev->devfn));
3032 i = alloc_cciss_hba();
3033 if(i < 0)
3034 return (-1);
3035
3036 hba[i]->busy_initializing = 1;
3037
3038 if (cciss_pci_init(hba[i], pdev) != 0)
3039 goto clean1;
3040
3041 sprintf(hba[i]->devname, "cciss%d", i);
3042 hba[i]->ctlr = i;
3043 hba[i]->pdev = pdev;
3044
3045 /* configure PCI DMA stuff */
3046 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3047 printk("cciss: using DAC cycles\n");
3048 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3049 printk("cciss: not using DAC cycles\n");
3050 else {
3051 printk("cciss: no suitable DMA available\n");
3052 goto clean1;
3053 }
3054
3055 /*
3056 * register with the major number, or get a dynamic major number
3057 * by passing 0 as argument. This is done for greater than
3058 * 8 controller support.
3059 */
3060 if (i < MAX_CTLR_ORIG)
3061 hba[i]->major = MAJOR_NR + i;
3062 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3063 if(rc == -EBUSY || rc == -EINVAL) {
3064 printk(KERN_ERR
3065 "cciss: Unable to get major number %d for %s "
3066 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3067 goto clean1;
3068 }
3069 else {
3070 if (i >= MAX_CTLR_ORIG)
3071 hba[i]->major = rc;
3072 }
3073
3074 /* make sure the board interrupts are off */
3075 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3076 if( request_irq(hba[i]->intr, do_cciss_intr,
3077 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3078 hba[i]->devname, hba[i])) {
3079 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3080 hba[i]->intr, hba[i]->devname);
3081 goto clean2;
3082 }
3083 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3084 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3085 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3086 &(hba[i]->cmd_pool_dhandle));
3087 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3088 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3089 &(hba[i]->errinfo_pool_dhandle));
3090 if((hba[i]->cmd_pool_bits == NULL)
3091 || (hba[i]->cmd_pool == NULL)
3092 || (hba[i]->errinfo_pool == NULL)) {
3093 printk( KERN_ERR "cciss: out of memory");
3094 goto clean4;
3095 }
3096 #ifdef CONFIG_CISS_SCSI_TAPE
3097 hba[i]->scsi_rejects.complete =
3098 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3099 (NR_CMDS + 5), GFP_KERNEL);
3100 if (hba[i]->scsi_rejects.complete == NULL) {
3101 printk( KERN_ERR "cciss: out of memory");
3102 goto clean4;
3103 }
3104 #endif
3105 spin_lock_init(&hba[i]->lock);
3106
3107 /* Initialize the pdev driver private data.
3108 have it point to hba[i]. */
3109 pci_set_drvdata(pdev, hba[i]);
3110 /* command and error info recs zeroed out before
3111 they are used */
3112 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3113
3114 #ifdef CCISS_DEBUG
3115 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3116 #endif /* CCISS_DEBUG */
3117
3118 cciss_getgeometry(i);
3119
3120 cciss_scsi_setup(i);
3121
3122 /* Turn the interrupts on so we can service requests */
3123 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3124
3125 cciss_procinit(i);
3126 hba[i]->busy_initializing = 0;
3127
3128 for(j=0; j < NWD; j++) { /* mfm */
3129 drive_info_struct *drv = &(hba[i]->drv[j]);
3130 struct gendisk *disk = hba[i]->gendisk[j];
3131
3132 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3133 if (!q) {
3134 printk(KERN_ERR
3135 "cciss: unable to allocate queue for disk %d\n",
3136 j);
3137 break;
3138 }
3139 drv->queue = q;
3140
3141 q->backing_dev_info.ra_pages = READ_AHEAD;
3142 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3143
3144 /* This is a hardware imposed limit. */
3145 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3146
3147 /* This is a limit in the driver and could be eliminated. */
3148 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3149
3150 blk_queue_max_sectors(q, 512);
3151
3152 q->queuedata = hba[i];
3153 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3154 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3155 disk->major = hba[i]->major;
3156 disk->first_minor = j << NWD_SHIFT;
3157 disk->fops = &cciss_fops;
3158 disk->queue = q;
3159 disk->private_data = drv;
3160 /* we must register the controller even if no disks exist */
3161 /* this is for the online array utilities */
3162 if(!drv->heads && j)
3163 continue;
3164 blk_queue_hardsect_size(q, drv->block_size);
3165 set_capacity(disk, drv->nr_blocks);
3166 add_disk(disk);
3167 }
3168
3169 return(1);
3170
3171 clean4:
3172 #ifdef CONFIG_CISS_SCSI_TAPE
3173 if(hba[i]->scsi_rejects.complete)
3174 kfree(hba[i]->scsi_rejects.complete);
3175 #endif
3176 kfree(hba[i]->cmd_pool_bits);
3177 if(hba[i]->cmd_pool)
3178 pci_free_consistent(hba[i]->pdev,
3179 NR_CMDS * sizeof(CommandList_struct),
3180 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3181 if(hba[i]->errinfo_pool)
3182 pci_free_consistent(hba[i]->pdev,
3183 NR_CMDS * sizeof( ErrorInfo_struct),
3184 hba[i]->errinfo_pool,
3185 hba[i]->errinfo_pool_dhandle);
3186 free_irq(hba[i]->intr, hba[i]);
3187 clean2:
3188 unregister_blkdev(hba[i]->major, hba[i]->devname);
3189 clean1:
3190 release_io_mem(hba[i]);
3191 free_hba(i);
3192 hba[i]->busy_initializing = 0;
3193 return(-1);
3194 }
3195
3196 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3197 {
3198 ctlr_info_t *tmp_ptr;
3199 int i, j;
3200 char flush_buf[4];
3201 int return_code;
3202
3203 if (pci_get_drvdata(pdev) == NULL)
3204 {
3205 printk( KERN_ERR "cciss: Unable to remove device \n");
3206 return;
3207 }
3208 tmp_ptr = pci_get_drvdata(pdev);
3209 i = tmp_ptr->ctlr;
3210 if (hba[i] == NULL)
3211 {
3212 printk(KERN_ERR "cciss: device appears to "
3213 "already be removed \n");
3214 return;
3215 }
3216 /* Turn board interrupts off and send the flush cache command */
3217 /* sendcmd will turn off interrupt, and send the flush...
3218 * To write all data in the battery backed cache to disks */
3219 memset(flush_buf, 0, 4);
3220 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3221 TYPE_CMD);
3222 if(return_code != IO_OK)
3223 {
3224 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3225 i);
3226 }
3227 free_irq(hba[i]->intr, hba[i]);
3228 pci_set_drvdata(pdev, NULL);
3229 iounmap(hba[i]->vaddr);
3230 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3231 unregister_blkdev(hba[i]->major, hba[i]->devname);
3232 remove_proc_entry(hba[i]->devname, proc_cciss);
3233
3234 /* remove it from the disk list */
3235 for (j = 0; j < NWD; j++) {
3236 struct gendisk *disk = hba[i]->gendisk[j];
3237 if (disk) {
3238 request_queue_t *q = disk->queue;
3239
3240 if (disk->flags & GENHD_FL_UP)
3241 del_gendisk(disk);
3242 if (q)
3243 blk_cleanup_queue(q);
3244 }
3245 }
3246
3247 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3248 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3249 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3250 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3251 kfree(hba[i]->cmd_pool_bits);
3252 #ifdef CONFIG_CISS_SCSI_TAPE
3253 kfree(hba[i]->scsi_rejects.complete);
3254 #endif
3255 release_io_mem(hba[i]);
3256 free_hba(i);
3257 }
3258
3259 static struct pci_driver cciss_pci_driver = {
3260 .name = "cciss",
3261 .probe = cciss_init_one,
3262 .remove = __devexit_p(cciss_remove_one),
3263 .id_table = cciss_pci_device_id, /* id_table */
3264 };
3265
3266 /*
3267 * This is it. Register the PCI driver information for the cards we control
3268 * the OS will call our registered routines when it finds one of our cards.
3269 */
3270 static int __init cciss_init(void)
3271 {
3272 printk(KERN_INFO DRIVER_NAME "\n");
3273
3274 /* Register for our PCI devices */
3275 return pci_module_init(&cciss_pci_driver);
3276 }
3277
3278 static void __exit cciss_cleanup(void)
3279 {
3280 int i;
3281
3282 pci_unregister_driver(&cciss_pci_driver);
3283 /* double check that all controller entrys have been removed */
3284 for (i=0; i< MAX_CTLR; i++)
3285 {
3286 if (hba[i] != NULL)
3287 {
3288 printk(KERN_WARNING "cciss: had to remove"
3289 " controller %d\n", i);
3290 cciss_remove_one(hba[i]->pdev);
3291 }
3292 }
3293 remove_proc_entry("cciss", proc_root_driver);
3294 }
3295
3296 static void fail_all_cmds(unsigned long ctlr)
3297 {
3298 /* If we get here, the board is apparently dead. */
3299 ctlr_info_t *h = hba[ctlr];
3300 CommandList_struct *c;
3301 unsigned long flags;
3302
3303 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3304 h->alive = 0; /* the controller apparently died... */
3305
3306 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3307
3308 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3309
3310 /* move everything off the request queue onto the completed queue */
3311 while( (c = h->reqQ) != NULL ) {
3312 removeQ(&(h->reqQ), c);
3313 h->Qdepth--;
3314 addQ (&(h->cmpQ), c);
3315 }
3316
3317 /* Now, fail everything on the completed queue with a HW error */
3318 while( (c = h->cmpQ) != NULL ) {
3319 removeQ(&h->cmpQ, c);
3320 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3321 if (c->cmd_type == CMD_RWREQ) {
3322 complete_command(h, c, 0);
3323 } else if (c->cmd_type == CMD_IOCTL_PEND)
3324 complete(c->waiting);
3325 #ifdef CONFIG_CISS_SCSI_TAPE
3326 else if (c->cmd_type == CMD_SCSI)
3327 complete_scsi_command(c, 0, 0);
3328 #endif
3329 }
3330 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3331 return;
3332 }
3333
3334 module_init(cciss_init);
3335 module_exit(cciss_cleanup);