[PATCH] Add block_device_operations.getgeo block device method
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / cpqarray.c
1 /*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22 #include <linux/config.h> /* CONFIG_PROC_FS */
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/bio.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/devfs_fs_kernel.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/blkdev.h>
41 #include <linux/genhd.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44
45
46 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
47
48 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
49 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
50
51 /* Embedded module documentation macros - see modules.h */
52 /* Original author Chris Frantz - Compaq Computer Corporation */
53 MODULE_AUTHOR("Compaq Computer Corporation");
54 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
55 MODULE_LICENSE("GPL");
56
57 #include "cpqarray.h"
58 #include "ida_cmd.h"
59 #include "smart1,2.h"
60 #include "ida_ioctl.h"
61
62 #define READ_AHEAD 128
63 #define NR_CMDS 128 /* This could probably go as high as ~400 */
64
65 #define MAX_CTLR 8
66 #define CTLR_SHIFT 8
67
68 #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
69
70 static int nr_ctlr;
71 static ctlr_info_t *hba[MAX_CTLR];
72
73 static int eisa[8];
74
75 #define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
76
77 /* board_id = Subsystem Device ID & Vendor ID
78 * product = Marketing Name for the board
79 * access = Address of the struct of function pointers
80 */
81 static struct board_type products[] = {
82 { 0x0040110E, "IDA", &smart1_access },
83 { 0x0140110E, "IDA-2", &smart1_access },
84 { 0x1040110E, "IAES", &smart1_access },
85 { 0x2040110E, "SMART", &smart1_access },
86 { 0x3040110E, "SMART-2/E", &smart2e_access },
87 { 0x40300E11, "SMART-2/P", &smart2_access },
88 { 0x40310E11, "SMART-2SL", &smart2_access },
89 { 0x40320E11, "Smart Array 3200", &smart2_access },
90 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
91 { 0x40340E11, "Smart Array 221", &smart2_access },
92 { 0x40400E11, "Integrated Array", &smart4_access },
93 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
94 { 0x40500E11, "Smart Array 4200", &smart4_access },
95 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
96 { 0x40580E11, "Smart Array 431", &smart4_access },
97 };
98
99 /* define the PCI info for the PCI cards this driver can control */
100 static const struct pci_device_id cpqarray_pci_device_id[] =
101 {
102 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
103 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
106 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
108 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
110 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
112 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
113 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
122 { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
126
127 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
128
129 /* Debug... */
130 #define DBG(s) do { s } while(0)
131 /* Debug (general info)... */
132 #define DBGINFO(s) do { } while(0)
133 /* Debug Paranoid... */
134 #define DBGP(s) do { } while(0)
135 /* Debug Extra Paranoid... */
136 #define DBGPX(s) do { } while(0)
137
138 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
139 static void __iomem *remap_pci_mem(ulong base, ulong size);
140 static int cpqarray_eisa_detect(void);
141 static int pollcomplete(int ctlr);
142 static void getgeometry(int ctlr);
143 static void start_fwbk(int ctlr);
144
145 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
146 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
147
148 static void free_hba(int i);
149 static int alloc_cpqarray_hba(void);
150
151 static int sendcmd(
152 __u8 cmd,
153 int ctlr,
154 void *buff,
155 size_t size,
156 unsigned int blk,
157 unsigned int blkcnt,
158 unsigned int log_unit );
159
160 static int ida_open(struct inode *inode, struct file *filep);
161 static int ida_release(struct inode *inode, struct file *filep);
162 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
163 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
164 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
165
166 static void do_ida_request(request_queue_t *q);
167 static void start_io(ctlr_info_t *h);
168
169 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
170 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
171 static inline void complete_buffers(struct bio *bio, int ok);
172 static inline void complete_command(cmdlist_t *cmd, int timeout);
173
174 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
175 static void ida_timer(unsigned long tdata);
176 static int ida_revalidate(struct gendisk *disk);
177 static int revalidate_allvol(ctlr_info_t *host);
178 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
179
180 #ifdef CONFIG_PROC_FS
181 static void ida_procinit(int i);
182 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
183 #else
184 static void ida_procinit(int i) {}
185 #endif
186
187 static inline drv_info_t *get_drv(struct gendisk *disk)
188 {
189 return disk->private_data;
190 }
191
192 static inline ctlr_info_t *get_host(struct gendisk *disk)
193 {
194 return disk->queue->queuedata;
195 }
196
197
198 static struct block_device_operations ida_fops = {
199 .owner = THIS_MODULE,
200 .open = ida_open,
201 .release = ida_release,
202 .ioctl = ida_ioctl,
203 .getgeo = ida_getgeo,
204 .revalidate_disk= ida_revalidate,
205 };
206
207
208 #ifdef CONFIG_PROC_FS
209
210 static struct proc_dir_entry *proc_array;
211
212 /*
213 * Get us a file in /proc/array that says something about each controller.
214 * Create /proc/array if it doesn't exist yet.
215 */
216 static void __init ida_procinit(int i)
217 {
218 if (proc_array == NULL) {
219 proc_array = proc_mkdir("cpqarray", proc_root_driver);
220 if (!proc_array) return;
221 }
222
223 create_proc_read_entry(hba[i]->devname, 0, proc_array,
224 ida_proc_get_info, hba[i]);
225 }
226
227 /*
228 * Report information about this controller.
229 */
230 static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
231 {
232 off_t pos = 0;
233 off_t len = 0;
234 int size, i, ctlr;
235 ctlr_info_t *h = (ctlr_info_t*)data;
236 drv_info_t *drv;
237 #ifdef CPQ_PROC_PRINT_QUEUES
238 cmdlist_t *c;
239 unsigned long flags;
240 #endif
241
242 ctlr = h->ctlr;
243 size = sprintf(buffer, "%s: Compaq %s Controller\n"
244 " Board ID: 0x%08lx\n"
245 " Firmware Revision: %c%c%c%c\n"
246 " Controller Sig: 0x%08lx\n"
247 " Memory Address: 0x%08lx\n"
248 " I/O Port: 0x%04x\n"
249 " IRQ: %d\n"
250 " Logical drives: %d\n"
251 " Physical drives: %d\n\n"
252 " Current Q depth: %d\n"
253 " Max Q depth since init: %d\n\n",
254 h->devname,
255 h->product_name,
256 (unsigned long)h->board_id,
257 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
258 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
259 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
260 h->log_drives, h->phys_drives,
261 h->Qdepth, h->maxQsinceinit);
262
263 pos += size; len += size;
264
265 size = sprintf(buffer+len, "Logical Drive Info:\n");
266 pos += size; len += size;
267
268 for(i=0; i<h->log_drives; i++) {
269 drv = &h->drv[i];
270 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
271 ctlr, i, drv->blk_size, drv->nr_blks);
272 pos += size; len += size;
273 }
274
275 #ifdef CPQ_PROC_PRINT_QUEUES
276 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
277 size = sprintf(buffer+len, "\nCurrent Queues:\n");
278 pos += size; len += size;
279
280 c = h->reqQ;
281 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
282 if (c) c=c->next;
283 while(c && c != h->reqQ) {
284 size = sprintf(buffer+len, "->%p", c);
285 pos += size; len += size;
286 c=c->next;
287 }
288
289 c = h->cmpQ;
290 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
291 if (c) c=c->next;
292 while(c && c != h->cmpQ) {
293 size = sprintf(buffer+len, "->%p", c);
294 pos += size; len += size;
295 c=c->next;
296 }
297
298 size = sprintf(buffer+len, "\n"); pos += size; len += size;
299 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
300 #endif
301 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
302 h->nr_allocs, h->nr_frees);
303 pos += size; len += size;
304
305 *eof = 1;
306 *start = buffer+offset;
307 len -= offset;
308 if (len>length)
309 len = length;
310 return len;
311 }
312 #endif /* CONFIG_PROC_FS */
313
314 module_param_array(eisa, int, NULL, 0);
315
316 static void release_io_mem(ctlr_info_t *c)
317 {
318 /* if IO mem was not protected do nothing */
319 if( c->io_mem_addr == 0)
320 return;
321 release_region(c->io_mem_addr, c->io_mem_length);
322 c->io_mem_addr = 0;
323 c->io_mem_length = 0;
324 }
325
326 static void __devexit cpqarray_remove_one(int i)
327 {
328 int j;
329 char buff[4];
330
331 /* sendcmd will turn off interrupt, and send the flush...
332 * To write all data in the battery backed cache to disks
333 * no data returned, but don't want to send NULL to sendcmd */
334 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
335 {
336 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
337 i);
338 }
339 free_irq(hba[i]->intr, hba[i]);
340 iounmap(hba[i]->vaddr);
341 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
342 del_timer(&hba[i]->timer);
343 remove_proc_entry(hba[i]->devname, proc_array);
344 pci_free_consistent(hba[i]->pci_dev,
345 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
346 hba[i]->cmd_pool_dhandle);
347 kfree(hba[i]->cmd_pool_bits);
348 for(j = 0; j < NWD; j++) {
349 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
350 del_gendisk(ida_gendisk[i][j]);
351 devfs_remove("ida/c%dd%d",i,j);
352 put_disk(ida_gendisk[i][j]);
353 }
354 blk_cleanup_queue(hba[i]->queue);
355 release_io_mem(hba[i]);
356 free_hba(i);
357 }
358
359 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
360 {
361 int i;
362 ctlr_info_t *tmp_ptr;
363
364 if (pci_get_drvdata(pdev) == NULL) {
365 printk( KERN_ERR "cpqarray: Unable to remove device \n");
366 return;
367 }
368
369 tmp_ptr = pci_get_drvdata(pdev);
370 i = tmp_ptr->ctlr;
371 if (hba[i] == NULL) {
372 printk(KERN_ERR "cpqarray: controller %d appears to have"
373 "already been removed \n", i);
374 return;
375 }
376 pci_set_drvdata(pdev, NULL);
377
378 cpqarray_remove_one(i);
379 }
380
381 /* removing an instance that was not removed automatically..
382 * must be an eisa card.
383 */
384 static void __devexit cpqarray_remove_one_eisa (int i)
385 {
386 if (hba[i] == NULL) {
387 printk(KERN_ERR "cpqarray: controller %d appears to have"
388 "already been removed \n", i);
389 return;
390 }
391 cpqarray_remove_one(i);
392 }
393
394 /* pdev is NULL for eisa */
395 static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
396 {
397 request_queue_t *q;
398 int j;
399
400 /*
401 * register block devices
402 * Find disks and fill in structs
403 * Get an interrupt, set the Q depth and get into /proc
404 */
405
406 /* If this successful it should insure that we are the only */
407 /* instance of the driver */
408 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
409 goto Enomem4;
410 }
411 hba[i]->access.set_intr_mask(hba[i], 0);
412 if (request_irq(hba[i]->intr, do_ida_intr,
413 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
414 hba[i]->devname, hba[i]))
415 {
416 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
417 hba[i]->intr, hba[i]->devname);
418 goto Enomem3;
419 }
420
421 for (j=0; j<NWD; j++) {
422 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
423 if (!ida_gendisk[i][j])
424 goto Enomem2;
425 }
426
427 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
428 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
429 &(hba[i]->cmd_pool_dhandle));
430 hba[i]->cmd_pool_bits = kmalloc(
431 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
432 GFP_KERNEL);
433
434 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
435 goto Enomem1;
436
437 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
438 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
439 printk(KERN_INFO "cpqarray: Finding drives on %s",
440 hba[i]->devname);
441
442 spin_lock_init(&hba[i]->lock);
443 q = blk_init_queue(do_ida_request, &hba[i]->lock);
444 if (!q)
445 goto Enomem1;
446
447 hba[i]->queue = q;
448 q->queuedata = hba[i];
449
450 getgeometry(i);
451 start_fwbk(i);
452
453 ida_procinit(i);
454
455 if (pdev)
456 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
457
458 /* This is a hardware imposed limit. */
459 blk_queue_max_hw_segments(q, SG_MAX);
460
461 /* This is a driver limit and could be eliminated. */
462 blk_queue_max_phys_segments(q, SG_MAX);
463
464 init_timer(&hba[i]->timer);
465 hba[i]->timer.expires = jiffies + IDA_TIMER;
466 hba[i]->timer.data = (unsigned long)hba[i];
467 hba[i]->timer.function = ida_timer;
468 add_timer(&hba[i]->timer);
469
470 /* Enable IRQ now that spinlock and rate limit timer are set up */
471 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
472
473 for(j=0; j<NWD; j++) {
474 struct gendisk *disk = ida_gendisk[i][j];
475 drv_info_t *drv = &hba[i]->drv[j];
476 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
477 disk->major = COMPAQ_SMART2_MAJOR + i;
478 disk->first_minor = j<<NWD_SHIFT;
479 disk->fops = &ida_fops;
480 if (j && !drv->nr_blks)
481 continue;
482 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
483 set_capacity(disk, drv->nr_blks);
484 disk->queue = hba[i]->queue;
485 disk->private_data = drv;
486 add_disk(disk);
487 }
488
489 /* done ! */
490 return(i);
491
492 Enomem1:
493 nr_ctlr = i;
494 kfree(hba[i]->cmd_pool_bits);
495 if (hba[i]->cmd_pool)
496 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
497 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
498 Enomem2:
499 while (j--) {
500 put_disk(ida_gendisk[i][j]);
501 ida_gendisk[i][j] = NULL;
502 }
503 free_irq(hba[i]->intr, hba[i]);
504 Enomem3:
505 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
506 Enomem4:
507 if (pdev)
508 pci_set_drvdata(pdev, NULL);
509 release_io_mem(hba[i]);
510 free_hba(i);
511
512 printk( KERN_ERR "cpqarray: out of memory");
513
514 return -1;
515 }
516
517 static int __init cpqarray_init_one( struct pci_dev *pdev,
518 const struct pci_device_id *ent)
519 {
520 int i;
521
522 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
523 " bus %d dev %d func %d\n",
524 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
525 PCI_FUNC(pdev->devfn));
526 i = alloc_cpqarray_hba();
527 if( i < 0 )
528 return (-1);
529 memset(hba[i], 0, sizeof(ctlr_info_t));
530 sprintf(hba[i]->devname, "ida%d", i);
531 hba[i]->ctlr = i;
532 /* Initialize the pdev driver private data */
533 pci_set_drvdata(pdev, hba[i]);
534
535 if (cpqarray_pci_init(hba[i], pdev) != 0) {
536 pci_set_drvdata(pdev, NULL);
537 release_io_mem(hba[i]);
538 free_hba(i);
539 return -1;
540 }
541
542 return (cpqarray_register_ctlr(i, pdev));
543 }
544
545 static struct pci_driver cpqarray_pci_driver = {
546 .name = "cpqarray",
547 .probe = cpqarray_init_one,
548 .remove = __devexit_p(cpqarray_remove_one_pci),
549 .id_table = cpqarray_pci_device_id,
550 };
551
552 /*
553 * This is it. Find all the controllers and register them.
554 * returns the number of block devices registered.
555 */
556 static int __init cpqarray_init(void)
557 {
558 int num_cntlrs_reg = 0;
559 int i;
560 int rc = 0;
561
562 /* detect controllers */
563 printk(DRIVER_NAME "\n");
564
565 rc = pci_register_driver(&cpqarray_pci_driver);
566 if (rc)
567 return rc;
568 cpqarray_eisa_detect();
569
570 for (i=0; i < MAX_CTLR; i++) {
571 if (hba[i] != NULL)
572 num_cntlrs_reg++;
573 }
574
575 return(num_cntlrs_reg);
576 }
577
578 /* Function to find the first free pointer into our hba[] array */
579 /* Returns -1 if no free entries are left. */
580 static int alloc_cpqarray_hba(void)
581 {
582 int i;
583
584 for(i=0; i< MAX_CTLR; i++) {
585 if (hba[i] == NULL) {
586 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
587 if(hba[i]==NULL) {
588 printk(KERN_ERR "cpqarray: out of memory.\n");
589 return (-1);
590 }
591 return (i);
592 }
593 }
594 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
595 " of 8 controllers.\n");
596 return(-1);
597 }
598
599 static void free_hba(int i)
600 {
601 kfree(hba[i]);
602 hba[i]=NULL;
603 }
604
605 /*
606 * Find the IO address of the controller, its IRQ and so forth. Fill
607 * in some basic stuff into the ctlr_info_t structure.
608 */
609 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
610 {
611 ushort vendor_id, device_id, command;
612 unchar cache_line_size, latency_timer;
613 unchar irq, revision;
614 unsigned long addr[6];
615 __u32 board_id;
616
617 int i;
618
619 c->pci_dev = pdev;
620 if (pci_enable_device(pdev)) {
621 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
622 return -1;
623 }
624 vendor_id = pdev->vendor;
625 device_id = pdev->device;
626 irq = pdev->irq;
627
628 for(i=0; i<6; i++)
629 addr[i] = pci_resource_start(pdev, i);
630
631 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
632 {
633 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
634 return -1;
635 }
636
637 pci_read_config_word(pdev, PCI_COMMAND, &command);
638 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
639 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
640 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
641
642 pci_read_config_dword(pdev, 0x2c, &board_id);
643
644 /* check to see if controller has been disabled */
645 if(!(command & 0x02)) {
646 printk(KERN_WARNING
647 "cpqarray: controller appears to be disabled\n");
648 return(-1);
649 }
650
651 DBGINFO(
652 printk("vendor_id = %x\n", vendor_id);
653 printk("device_id = %x\n", device_id);
654 printk("command = %x\n", command);
655 for(i=0; i<6; i++)
656 printk("addr[%d] = %lx\n", i, addr[i]);
657 printk("revision = %x\n", revision);
658 printk("irq = %x\n", irq);
659 printk("cache_line_size = %x\n", cache_line_size);
660 printk("latency_timer = %x\n", latency_timer);
661 printk("board_id = %x\n", board_id);
662 );
663
664 c->intr = irq;
665
666 for(i=0; i<6; i++) {
667 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
668 { /* IO space */
669 c->io_mem_addr = addr[i];
670 c->io_mem_length = pci_resource_end(pdev, i)
671 - pci_resource_start(pdev, i) + 1;
672 if(!request_region( c->io_mem_addr, c->io_mem_length,
673 "cpqarray"))
674 {
675 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
676 c->io_mem_addr = 0;
677 c->io_mem_length = 0;
678 }
679 break;
680 }
681 }
682
683 c->paddr = 0;
684 for(i=0; i<6; i++)
685 if (!(pci_resource_flags(pdev, i) &
686 PCI_BASE_ADDRESS_SPACE_IO)) {
687 c->paddr = pci_resource_start (pdev, i);
688 break;
689 }
690 if (!c->paddr)
691 return -1;
692 c->vaddr = remap_pci_mem(c->paddr, 128);
693 if (!c->vaddr)
694 return -1;
695 c->board_id = board_id;
696
697 for(i=0; i<NR_PRODUCTS; i++) {
698 if (board_id == products[i].board_id) {
699 c->product_name = products[i].product_name;
700 c->access = *(products[i].access);
701 break;
702 }
703 }
704 if (i == NR_PRODUCTS) {
705 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
706 " to access the SMART Array controller %08lx\n",
707 (unsigned long)board_id);
708 return -1;
709 }
710
711 return 0;
712 }
713
714 /*
715 * Map (physical) PCI mem into (virtual) kernel space
716 */
717 static void __iomem *remap_pci_mem(ulong base, ulong size)
718 {
719 ulong page_base = ((ulong) base) & PAGE_MASK;
720 ulong page_offs = ((ulong) base) - page_base;
721 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
722
723 return (page_remapped ? (page_remapped + page_offs) : NULL);
724 }
725
726 #ifndef MODULE
727 /*
728 * Config string is a comma separated set of i/o addresses of EISA cards.
729 */
730 static int cpqarray_setup(char *str)
731 {
732 int i, ints[9];
733
734 (void)get_options(str, ARRAY_SIZE(ints), ints);
735
736 for(i=0; i<ints[0] && i<8; i++)
737 eisa[i] = ints[i+1];
738 return 1;
739 }
740
741 __setup("smart2=", cpqarray_setup);
742
743 #endif
744
745 /*
746 * Find an EISA controller's signature. Set up an hba if we find it.
747 */
748 static int cpqarray_eisa_detect(void)
749 {
750 int i=0, j;
751 __u32 board_id;
752 int intr;
753 int ctlr;
754 int num_ctlr = 0;
755
756 while(i<8 && eisa[i]) {
757 ctlr = alloc_cpqarray_hba();
758 if(ctlr == -1)
759 break;
760 board_id = inl(eisa[i]+0xC80);
761 for(j=0; j < NR_PRODUCTS; j++)
762 if (board_id == products[j].board_id)
763 break;
764
765 if (j == NR_PRODUCTS) {
766 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
767 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
768 continue;
769 }
770
771 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
772 hba[ctlr]->io_mem_addr = eisa[i];
773 hba[ctlr]->io_mem_length = 0x7FF;
774 if(!request_region(hba[ctlr]->io_mem_addr,
775 hba[ctlr]->io_mem_length,
776 "cpqarray"))
777 {
778 printk(KERN_WARNING "cpqarray: I/O range already in "
779 "use addr = %lx length = %ld\n",
780 hba[ctlr]->io_mem_addr,
781 hba[ctlr]->io_mem_length);
782 free_hba(ctlr);
783 continue;
784 }
785
786 /*
787 * Read the config register to find our interrupt
788 */
789 intr = inb(eisa[i]+0xCC0) >> 4;
790 if (intr & 1) intr = 11;
791 else if (intr & 2) intr = 10;
792 else if (intr & 4) intr = 14;
793 else if (intr & 8) intr = 15;
794
795 hba[ctlr]->intr = intr;
796 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
797 hba[ctlr]->product_name = products[j].product_name;
798 hba[ctlr]->access = *(products[j].access);
799 hba[ctlr]->ctlr = ctlr;
800 hba[ctlr]->board_id = board_id;
801 hba[ctlr]->pci_dev = NULL; /* not PCI */
802
803 DBGINFO(
804 printk("i = %d, j = %d\n", i, j);
805 printk("irq = %x\n", intr);
806 printk("product name = %s\n", products[j].product_name);
807 printk("board_id = %x\n", board_id);
808 );
809
810 num_ctlr++;
811 i++;
812
813 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
814 printk(KERN_WARNING
815 "cpqarray: Can't register EISA controller %d\n",
816 ctlr);
817
818 }
819
820 return num_ctlr;
821 }
822
823 /*
824 * Open. Make sure the device is really there.
825 */
826 static int ida_open(struct inode *inode, struct file *filep)
827 {
828 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
829 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
830
831 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
832 /*
833 * Root is allowed to open raw volume zero even if it's not configured
834 * so array config can still work. I don't think I really like this,
835 * but I'm already using way to many device nodes to claim another one
836 * for "raw controller".
837 */
838 if (!drv->nr_blks) {
839 if (!capable(CAP_SYS_RAWIO))
840 return -ENXIO;
841 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
842 return -ENXIO;
843 }
844 host->usage_count++;
845 return 0;
846 }
847
848 /*
849 * Close. Sync first.
850 */
851 static int ida_release(struct inode *inode, struct file *filep)
852 {
853 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
854 host->usage_count--;
855 return 0;
856 }
857
858 /*
859 * Enqueuing and dequeuing functions for cmdlists.
860 */
861 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
862 {
863 if (*Qptr == NULL) {
864 *Qptr = c;
865 c->next = c->prev = c;
866 } else {
867 c->prev = (*Qptr)->prev;
868 c->next = (*Qptr);
869 (*Qptr)->prev->next = c;
870 (*Qptr)->prev = c;
871 }
872 }
873
874 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
875 {
876 if (c && c->next != c) {
877 if (*Qptr == c) *Qptr = c->next;
878 c->prev->next = c->next;
879 c->next->prev = c->prev;
880 } else {
881 *Qptr = NULL;
882 }
883 return c;
884 }
885
886 /*
887 * Get a request and submit it to the controller.
888 * This routine needs to grab all the requests it possibly can from the
889 * req Q and submit them. Interrupts are off (and need to be off) when you
890 * are in here (either via the dummy do_ida_request functions or by being
891 * called from the interrupt handler
892 */
893 static void do_ida_request(request_queue_t *q)
894 {
895 ctlr_info_t *h = q->queuedata;
896 cmdlist_t *c;
897 struct request *creq;
898 struct scatterlist tmp_sg[SG_MAX];
899 int i, dir, seg;
900
901 if (blk_queue_plugged(q))
902 goto startio;
903
904 queue_next:
905 creq = elv_next_request(q);
906 if (!creq)
907 goto startio;
908
909 if (creq->nr_phys_segments > SG_MAX)
910 BUG();
911
912 if ((c = cmd_alloc(h,1)) == NULL)
913 goto startio;
914
915 blkdev_dequeue_request(creq);
916
917 c->ctlr = h->ctlr;
918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t);
921
922 c->req.hdr.blk = creq->sector;
923 c->rq = creq;
924 DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
926 );
927 seg = blk_rq_map_sg(q, creq, tmp_sg);
928
929 /* Now do all the DMA Mappings */
930 if (rq_data_dir(creq) == READ)
931 dir = PCI_DMA_FROMDEVICE;
932 else
933 dir = PCI_DMA_TODEVICE;
934 for( i=0; i < seg; i++)
935 {
936 c->req.sg[i].size = tmp_sg[i].length;
937 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
938 tmp_sg[i].page,
939 tmp_sg[i].offset,
940 tmp_sg[i].length, dir);
941 }
942 DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
943 c->req.hdr.sg_cnt = seg;
944 c->req.hdr.blk_cnt = creq->nr_sectors;
945 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
946 c->type = CMD_RWREQ;
947
948 /* Put the request on the tail of the request queue */
949 addQ(&h->reqQ, c);
950 h->Qdepth++;
951 if (h->Qdepth > h->maxQsinceinit)
952 h->maxQsinceinit = h->Qdepth;
953
954 goto queue_next;
955
956 startio:
957 start_io(h);
958 }
959
960 /*
961 * start_io submits everything on a controller's request queue
962 * and moves it to the completion queue.
963 *
964 * Interrupts had better be off if you're in here
965 */
966 static void start_io(ctlr_info_t *h)
967 {
968 cmdlist_t *c;
969
970 while((c = h->reqQ) != NULL) {
971 /* Can't do anything if we're busy */
972 if (h->access.fifo_full(h) == 0)
973 return;
974
975 /* Get the first entry from the request Q */
976 removeQ(&h->reqQ, c);
977 h->Qdepth--;
978
979 /* Tell the controller to do our bidding */
980 h->access.submit_command(h, c);
981
982 /* Get onto the completion Q */
983 addQ(&h->cmpQ, c);
984 }
985 }
986
987 static inline void complete_buffers(struct bio *bio, int ok)
988 {
989 struct bio *xbh;
990 while(bio) {
991 int nr_sectors = bio_sectors(bio);
992
993 xbh = bio->bi_next;
994 bio->bi_next = NULL;
995
996 blk_finished_io(nr_sectors);
997 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
998
999 bio = xbh;
1000 }
1001 }
1002 /*
1003 * Mark all buffers that cmd was responsible for
1004 */
1005 static inline void complete_command(cmdlist_t *cmd, int timeout)
1006 {
1007 int ok=1;
1008 int i, ddir;
1009
1010 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1011 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1012 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1013 cmd->ctlr, cmd->hdr.unit);
1014 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1015 }
1016 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1017 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1018 cmd->ctlr, cmd->hdr.unit);
1019 ok = 0;
1020 }
1021 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1022 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1023 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1024 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1025 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1026 ok = 0;
1027 }
1028 if (timeout) ok = 0;
1029 /* unmap the DMA mapping for all the scatter gather elements */
1030 if (cmd->req.hdr.cmd == IDA_READ)
1031 ddir = PCI_DMA_FROMDEVICE;
1032 else
1033 ddir = PCI_DMA_TODEVICE;
1034 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1035 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1036 cmd->req.sg[i].size, ddir);
1037
1038 complete_buffers(cmd->rq->bio, ok);
1039
1040 DBGPX(printk("Done with %p\n", cmd->rq););
1041 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1042 }
1043
1044 /*
1045 * The controller will interrupt us upon completion of commands.
1046 * Find the command on the completion queue, remove it, tell the OS and
1047 * try to queue up more IO
1048 */
1049 static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1050 {
1051 ctlr_info_t *h = dev_id;
1052 cmdlist_t *c;
1053 unsigned long istat;
1054 unsigned long flags;
1055 __u32 a,a1;
1056
1057 istat = h->access.intr_pending(h);
1058 /* Is this interrupt for us? */
1059 if (istat == 0)
1060 return IRQ_NONE;
1061
1062 /*
1063 * If there are completed commands in the completion queue,
1064 * we had better do something about it.
1065 */
1066 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1067 if (istat & FIFO_NOT_EMPTY) {
1068 while((a = h->access.command_completed(h))) {
1069 a1 = a; a &= ~3;
1070 if ((c = h->cmpQ) == NULL)
1071 {
1072 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1073 continue;
1074 }
1075 while(c->busaddr != a) {
1076 c = c->next;
1077 if (c == h->cmpQ)
1078 break;
1079 }
1080 /*
1081 * If we've found the command, take it off the
1082 * completion Q and free it
1083 */
1084 if (c->busaddr == a) {
1085 removeQ(&h->cmpQ, c);
1086 /* Check for invalid command.
1087 * Controller returns command error,
1088 * But rcode = 0.
1089 */
1090
1091 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1092 {
1093 c->req.hdr.rcode = RCODE_INVREQ;
1094 }
1095 if (c->type == CMD_RWREQ) {
1096 complete_command(c, 0);
1097 cmd_free(h, c, 1);
1098 } else if (c->type == CMD_IOCTL_PEND) {
1099 c->type = CMD_IOCTL_DONE;
1100 }
1101 continue;
1102 }
1103 }
1104 }
1105
1106 /*
1107 * See if we can queue up some more IO
1108 */
1109 do_ida_request(h->queue);
1110 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1111 return IRQ_HANDLED;
1112 }
1113
1114 /*
1115 * This timer was for timing out requests that haven't happened after
1116 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1117 * reset a flags structure so we don't flood the user with
1118 * "Non-Fatal error" messages.
1119 */
1120 static void ida_timer(unsigned long tdata)
1121 {
1122 ctlr_info_t *h = (ctlr_info_t*)tdata;
1123
1124 h->timer.expires = jiffies + IDA_TIMER;
1125 add_timer(&h->timer);
1126 h->misc_tflags = 0;
1127 }
1128
1129 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1130 {
1131 drv_info_t *drv = get_drv(bdev->bd_disk);
1132
1133 if (drv->cylinders) {
1134 geo->heads = drv->heads;
1135 geo->sectors = drv->sectors;
1136 geo->cylinders = drv->cylinders;
1137 } else {
1138 geo->heads = 0xff;
1139 geo->sectors = 0x3f;
1140 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1141 }
1142
1143 return 0;
1144 }
1145
1146 /*
1147 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1148 * setting readahead and submitting commands from userspace to the controller.
1149 */
1150 static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1151 {
1152 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1153 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1154 int error;
1155 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1156 ida_ioctl_t *my_io;
1157
1158 switch(cmd) {
1159 case IDAGETDRVINFO:
1160 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1161 return -EFAULT;
1162 return 0;
1163 case IDAPASSTHRU:
1164 if (!capable(CAP_SYS_RAWIO))
1165 return -EPERM;
1166 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1167 if (!my_io)
1168 return -ENOMEM;
1169 error = -EFAULT;
1170 if (copy_from_user(my_io, io, sizeof(*my_io)))
1171 goto out_passthru;
1172 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1173 if (error)
1174 goto out_passthru;
1175 error = -EFAULT;
1176 if (copy_to_user(io, my_io, sizeof(*my_io)))
1177 goto out_passthru;
1178 error = 0;
1179 out_passthru:
1180 kfree(my_io);
1181 return error;
1182 case IDAGETCTLRSIG:
1183 if (!arg) return -EINVAL;
1184 put_user(host->ctlr_sig, (int __user *)arg);
1185 return 0;
1186 case IDAREVALIDATEVOLS:
1187 if (iminor(inode) != 0)
1188 return -ENXIO;
1189 return revalidate_allvol(host);
1190 case IDADRIVERVERSION:
1191 if (!arg) return -EINVAL;
1192 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1193 return 0;
1194 case IDAGETPCIINFO:
1195 {
1196
1197 ida_pci_info_struct pciinfo;
1198
1199 if (!arg) return -EINVAL;
1200 pciinfo.bus = host->pci_dev->bus->number;
1201 pciinfo.dev_fn = host->pci_dev->devfn;
1202 pciinfo.board_id = host->board_id;
1203 if(copy_to_user((void __user *) arg, &pciinfo,
1204 sizeof( ida_pci_info_struct)))
1205 return -EFAULT;
1206 return(0);
1207 }
1208
1209 default:
1210 return -EINVAL;
1211 }
1212
1213 }
1214 /*
1215 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1216 * The command block (io) has already been copied to kernel space for us,
1217 * however, any elements in the sglist need to be copied to kernel space
1218 * or copied back to userspace.
1219 *
1220 * Only root may perform a controller passthru command, however I'm not doing
1221 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1222 * putting a 64M buffer in the sglist is probably a *bad* idea.
1223 */
1224 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1225 {
1226 int ctlr = h->ctlr;
1227 cmdlist_t *c;
1228 void *p = NULL;
1229 unsigned long flags;
1230 int error;
1231
1232 if ((c = cmd_alloc(h, 0)) == NULL)
1233 return -ENOMEM;
1234 c->ctlr = ctlr;
1235 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1236 c->hdr.size = sizeof(rblk_t) >> 2;
1237 c->size += sizeof(rblk_t);
1238
1239 c->req.hdr.cmd = io->cmd;
1240 c->req.hdr.blk = io->blk;
1241 c->req.hdr.blk_cnt = io->blk_cnt;
1242 c->type = CMD_IOCTL_PEND;
1243
1244 /* Pre submit processing */
1245 switch(io->cmd) {
1246 case PASSTHRU_A:
1247 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1248 if (!p)
1249 {
1250 error = -ENOMEM;
1251 cmd_free(h, c, 0);
1252 return(error);
1253 }
1254 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1255 kfree(p);
1256 cmd_free(h, c, 0);
1257 return -EFAULT;
1258 }
1259 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1260 sizeof(ida_ioctl_t),
1261 PCI_DMA_BIDIRECTIONAL);
1262 c->req.sg[0].size = io->sg[0].size;
1263 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1264 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1265 c->req.hdr.sg_cnt = 1;
1266 break;
1267 case IDA_READ:
1268 case READ_FLASH_ROM:
1269 case SENSE_CONTROLLER_PERFORMANCE:
1270 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1271 if (!p)
1272 {
1273 error = -ENOMEM;
1274 cmd_free(h, c, 0);
1275 return(error);
1276 }
1277
1278 c->req.sg[0].size = io->sg[0].size;
1279 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1280 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1281 c->req.hdr.sg_cnt = 1;
1282 break;
1283 case IDA_WRITE:
1284 case IDA_WRITE_MEDIA:
1285 case DIAG_PASS_THRU:
1286 case COLLECT_BUFFER:
1287 case WRITE_FLASH_ROM:
1288 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1289 if (!p)
1290 {
1291 error = -ENOMEM;
1292 cmd_free(h, c, 0);
1293 return(error);
1294 }
1295 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1296 kfree(p);
1297 cmd_free(h, c, 0);
1298 return -EFAULT;
1299 }
1300 c->req.sg[0].size = io->sg[0].size;
1301 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1302 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1303 c->req.hdr.sg_cnt = 1;
1304 break;
1305 default:
1306 c->req.sg[0].size = sizeof(io->c);
1307 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1308 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1309 c->req.hdr.sg_cnt = 1;
1310 }
1311
1312 /* Put the request on the tail of the request queue */
1313 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1314 addQ(&h->reqQ, c);
1315 h->Qdepth++;
1316 start_io(h);
1317 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1318
1319 /* Wait for completion */
1320 while(c->type != CMD_IOCTL_DONE)
1321 schedule();
1322
1323 /* Unmap the DMA */
1324 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1325 PCI_DMA_BIDIRECTIONAL);
1326 /* Post submit processing */
1327 switch(io->cmd) {
1328 case PASSTHRU_A:
1329 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1330 sizeof(ida_ioctl_t),
1331 PCI_DMA_BIDIRECTIONAL);
1332 case IDA_READ:
1333 case DIAG_PASS_THRU:
1334 case SENSE_CONTROLLER_PERFORMANCE:
1335 case READ_FLASH_ROM:
1336 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1337 kfree(p);
1338 return -EFAULT;
1339 }
1340 /* fall through and free p */
1341 case IDA_WRITE:
1342 case IDA_WRITE_MEDIA:
1343 case COLLECT_BUFFER:
1344 case WRITE_FLASH_ROM:
1345 kfree(p);
1346 break;
1347 default:;
1348 /* Nothing to do */
1349 }
1350
1351 io->rcode = c->req.hdr.rcode;
1352 cmd_free(h, c, 0);
1353 return(0);
1354 }
1355
1356 /*
1357 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1358 * scheme to suballocte them to the driver. Operations that are not time
1359 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1360 * as the first argument to get a new command.
1361 */
1362 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1363 {
1364 cmdlist_t * c;
1365 int i;
1366 dma_addr_t cmd_dhandle;
1367
1368 if (!get_from_pool) {
1369 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1370 sizeof(cmdlist_t), &cmd_dhandle);
1371 if(c==NULL)
1372 return NULL;
1373 } else {
1374 do {
1375 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1376 if (i == NR_CMDS)
1377 return NULL;
1378 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1379 c = h->cmd_pool + i;
1380 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1381 h->nr_allocs++;
1382 }
1383
1384 memset(c, 0, sizeof(cmdlist_t));
1385 c->busaddr = cmd_dhandle;
1386 return c;
1387 }
1388
1389 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1390 {
1391 int i;
1392
1393 if (!got_from_pool) {
1394 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1395 c->busaddr);
1396 } else {
1397 i = c - h->cmd_pool;
1398 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1399 h->nr_frees++;
1400 }
1401 }
1402
1403 /***********************************************************************
1404 name: sendcmd
1405 Send a command to an IDA using the memory mapped FIFO interface
1406 and wait for it to complete.
1407 This routine should only be called at init time.
1408 ***********************************************************************/
1409 static int sendcmd(
1410 __u8 cmd,
1411 int ctlr,
1412 void *buff,
1413 size_t size,
1414 unsigned int blk,
1415 unsigned int blkcnt,
1416 unsigned int log_unit )
1417 {
1418 cmdlist_t *c;
1419 int complete;
1420 unsigned long temp;
1421 unsigned long i;
1422 ctlr_info_t *info_p = hba[ctlr];
1423
1424 c = cmd_alloc(info_p, 1);
1425 if(!c)
1426 return IO_ERROR;
1427 c->ctlr = ctlr;
1428 c->hdr.unit = log_unit;
1429 c->hdr.prio = 0;
1430 c->hdr.size = sizeof(rblk_t) >> 2;
1431 c->size += sizeof(rblk_t);
1432
1433 /* The request information. */
1434 c->req.hdr.next = 0;
1435 c->req.hdr.rcode = 0;
1436 c->req.bp = 0;
1437 c->req.hdr.sg_cnt = 1;
1438 c->req.hdr.reserved = 0;
1439
1440 if (size == 0)
1441 c->req.sg[0].size = 512;
1442 else
1443 c->req.sg[0].size = size;
1444
1445 c->req.hdr.blk = blk;
1446 c->req.hdr.blk_cnt = blkcnt;
1447 c->req.hdr.cmd = (unsigned char) cmd;
1448 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1449 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1450 /*
1451 * Disable interrupt
1452 */
1453 info_p->access.set_intr_mask(info_p, 0);
1454 /* Make sure there is room in the command FIFO */
1455 /* Actually it should be completely empty at this time. */
1456 for (i = 200000; i > 0; i--) {
1457 temp = info_p->access.fifo_full(info_p);
1458 if (temp != 0) {
1459 break;
1460 }
1461 udelay(10);
1462 DBG(
1463 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1464 " waiting!\n", ctlr);
1465 );
1466 }
1467 /*
1468 * Send the cmd
1469 */
1470 info_p->access.submit_command(info_p, c);
1471 complete = pollcomplete(ctlr);
1472
1473 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1474 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1475 if (complete != 1) {
1476 if (complete != c->busaddr) {
1477 printk( KERN_WARNING
1478 "cpqarray ida%d: idaSendPciCmd "
1479 "Invalid command list address returned! (%08lx)\n",
1480 ctlr, (unsigned long)complete);
1481 cmd_free(info_p, c, 1);
1482 return (IO_ERROR);
1483 }
1484 } else {
1485 printk( KERN_WARNING
1486 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1487 "No command list address returned!\n",
1488 ctlr);
1489 cmd_free(info_p, c, 1);
1490 return (IO_ERROR);
1491 }
1492
1493 if (c->req.hdr.rcode & 0x00FE) {
1494 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1495 printk( KERN_WARNING
1496 "cpqarray ida%d: idaSendPciCmd, error: "
1497 "Controller failed at init time "
1498 "cmd: 0x%x, return code = 0x%x\n",
1499 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1500
1501 cmd_free(info_p, c, 1);
1502 return (IO_ERROR);
1503 }
1504 }
1505 cmd_free(info_p, c, 1);
1506 return (IO_OK);
1507 }
1508
1509 /*
1510 * revalidate_allvol is for online array config utilities. After a
1511 * utility reconfigures the drives in the array, it can use this function
1512 * (through an ioctl) to make the driver zap any previous disk structs for
1513 * that controller and get new ones.
1514 *
1515 * Right now I'm using the getgeometry() function to do this, but this
1516 * function should probably be finer grained and allow you to revalidate one
1517 * particualar logical volume (instead of all of them on a particular
1518 * controller).
1519 */
1520 static int revalidate_allvol(ctlr_info_t *host)
1521 {
1522 int ctlr = host->ctlr;
1523 int i;
1524 unsigned long flags;
1525
1526 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1527 if (host->usage_count > 1) {
1528 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1529 printk(KERN_WARNING "cpqarray: Device busy for volume"
1530 " revalidation (usage=%d)\n", host->usage_count);
1531 return -EBUSY;
1532 }
1533 host->usage_count++;
1534 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1535
1536 /*
1537 * Set the partition and block size structures for all volumes
1538 * on this controller to zero. We will reread all of this data
1539 */
1540 set_capacity(ida_gendisk[ctlr][0], 0);
1541 for (i = 1; i < NWD; i++) {
1542 struct gendisk *disk = ida_gendisk[ctlr][i];
1543 if (disk->flags & GENHD_FL_UP)
1544 del_gendisk(disk);
1545 }
1546 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1547
1548 /*
1549 * Tell the array controller not to give us any interrupts while
1550 * we check the new geometry. Then turn interrupts back on when
1551 * we're done.
1552 */
1553 host->access.set_intr_mask(host, 0);
1554 getgeometry(ctlr);
1555 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1556
1557 for(i=0; i<NWD; i++) {
1558 struct gendisk *disk = ida_gendisk[ctlr][i];
1559 drv_info_t *drv = &host->drv[i];
1560 if (i && !drv->nr_blks)
1561 continue;
1562 blk_queue_hardsect_size(host->queue, drv->blk_size);
1563 set_capacity(disk, drv->nr_blks);
1564 disk->queue = host->queue;
1565 disk->private_data = drv;
1566 if (i)
1567 add_disk(disk);
1568 }
1569
1570 host->usage_count--;
1571 return 0;
1572 }
1573
1574 static int ida_revalidate(struct gendisk *disk)
1575 {
1576 drv_info_t *drv = disk->private_data;
1577 set_capacity(disk, drv->nr_blks);
1578 return 0;
1579 }
1580
1581 /********************************************************************
1582 name: pollcomplete
1583 Wait polling for a command to complete.
1584 The memory mapped FIFO is polled for the completion.
1585 Used only at init time, interrupts disabled.
1586 ********************************************************************/
1587 static int pollcomplete(int ctlr)
1588 {
1589 int done;
1590 int i;
1591
1592 /* Wait (up to 2 seconds) for a command to complete */
1593
1594 for (i = 200000; i > 0; i--) {
1595 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1596 if (done == 0) {
1597 udelay(10); /* a short fixed delay */
1598 } else
1599 return (done);
1600 }
1601 /* Invalid address to tell caller we ran out of time */
1602 return 1;
1603 }
1604 /*****************************************************************
1605 start_fwbk
1606 Starts controller firmwares background processing.
1607 Currently only the Integrated Raid controller needs this done.
1608 If the PCI mem address registers are written to after this,
1609 data corruption may occur
1610 *****************************************************************/
1611 static void start_fwbk(int ctlr)
1612 {
1613 id_ctlr_t *id_ctlr_buf;
1614 int ret_code;
1615
1616 if( (hba[ctlr]->board_id != 0x40400E11)
1617 && (hba[ctlr]->board_id != 0x40480E11) )
1618
1619 /* Not a Integrated Raid, so there is nothing for us to do */
1620 return;
1621 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1622 " processing\n");
1623 /* Command does not return anything, but idasend command needs a
1624 buffer */
1625 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1626 if(id_ctlr_buf==NULL)
1627 {
1628 printk(KERN_WARNING "cpqarray: Out of memory. "
1629 "Unable to start background processing.\n");
1630 return;
1631 }
1632 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1633 id_ctlr_buf, 0, 0, 0, 0);
1634 if(ret_code != IO_OK)
1635 printk(KERN_WARNING "cpqarray: Unable to start"
1636 " background processing\n");
1637
1638 kfree(id_ctlr_buf);
1639 }
1640 /*****************************************************************
1641 getgeometry
1642 Get ida logical volume geometry from the controller
1643 This is a large bit of code which once existed in two flavors,
1644 It is used only at init time.
1645 *****************************************************************/
1646 static void getgeometry(int ctlr)
1647 {
1648 id_log_drv_t *id_ldrive;
1649 id_ctlr_t *id_ctlr_buf;
1650 sense_log_drv_stat_t *id_lstatus_buf;
1651 config_t *sense_config_buf;
1652 unsigned int log_unit, log_index;
1653 int ret_code, size;
1654 drv_info_t *drv;
1655 ctlr_info_t *info_p = hba[ctlr];
1656 int i;
1657
1658 info_p->log_drv_map = 0;
1659
1660 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1661 if(id_ldrive == NULL)
1662 {
1663 printk( KERN_ERR "cpqarray: out of memory.\n");
1664 return;
1665 }
1666
1667 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1668 if(id_ctlr_buf == NULL)
1669 {
1670 kfree(id_ldrive);
1671 printk( KERN_ERR "cpqarray: out of memory.\n");
1672 return;
1673 }
1674
1675 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1676 if(id_lstatus_buf == NULL)
1677 {
1678 kfree(id_ctlr_buf);
1679 kfree(id_ldrive);
1680 printk( KERN_ERR "cpqarray: out of memory.\n");
1681 return;
1682 }
1683
1684 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1685 if(sense_config_buf == NULL)
1686 {
1687 kfree(id_lstatus_buf);
1688 kfree(id_ctlr_buf);
1689 kfree(id_ldrive);
1690 printk( KERN_ERR "cpqarray: out of memory.\n");
1691 return;
1692 }
1693
1694 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1695 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1696 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1697 memset(sense_config_buf, 0, sizeof(config_t));
1698
1699 info_p->phys_drives = 0;
1700 info_p->log_drv_map = 0;
1701 info_p->drv_assign_map = 0;
1702 info_p->drv_spare_map = 0;
1703 info_p->mp_failed_drv_map = 0; /* only initialized here */
1704 /* Get controllers info for this logical drive */
1705 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1706 if (ret_code == IO_ERROR) {
1707 /*
1708 * If can't get controller info, set the logical drive map to 0,
1709 * so the idastubopen will fail on all logical drives
1710 * on the controller.
1711 */
1712 /* Free all the buffers and return */
1713 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1714 kfree(sense_config_buf);
1715 kfree(id_lstatus_buf);
1716 kfree(id_ctlr_buf);
1717 kfree(id_ldrive);
1718 return;
1719 }
1720
1721 info_p->log_drives = id_ctlr_buf->nr_drvs;
1722 for(i=0;i<4;i++)
1723 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1724 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1725
1726 printk(" (%s)\n", info_p->product_name);
1727 /*
1728 * Initialize logical drive map to zero
1729 */
1730 log_index = 0;
1731 /*
1732 * Get drive geometry for all logical drives
1733 */
1734 if (id_ctlr_buf->nr_drvs > 16)
1735 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1736 "16 logical drives per controller.\n. "
1737 " Additional drives will not be "
1738 "detected\n", ctlr);
1739
1740 for (log_unit = 0;
1741 (log_index < id_ctlr_buf->nr_drvs)
1742 && (log_unit < NWD);
1743 log_unit++) {
1744 struct gendisk *disk = ida_gendisk[ctlr][log_unit];
1745
1746 size = sizeof(sense_log_drv_stat_t);
1747
1748 /*
1749 Send "Identify logical drive status" cmd
1750 */
1751 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1752 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1753 if (ret_code == IO_ERROR) {
1754 /*
1755 If can't get logical drive status, set
1756 the logical drive map to 0, so the
1757 idastubopen will fail for all logical drives
1758 on the controller.
1759 */
1760 info_p->log_drv_map = 0;
1761 printk( KERN_WARNING
1762 "cpqarray ida%d: idaGetGeometry - Controller"
1763 " failed to report status of logical drive %d\n"
1764 "Access to this controller has been disabled\n",
1765 ctlr, log_unit);
1766 /* Free all the buffers and return */
1767 kfree(sense_config_buf);
1768 kfree(id_lstatus_buf);
1769 kfree(id_ctlr_buf);
1770 kfree(id_ldrive);
1771 return;
1772 }
1773 /*
1774 Make sure the logical drive is configured
1775 */
1776 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1777 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1778 sizeof(id_log_drv_t), 0, 0, log_unit);
1779 /*
1780 If error, the bit for this
1781 logical drive won't be set and
1782 idastubopen will return error.
1783 */
1784 if (ret_code != IO_ERROR) {
1785 drv = &info_p->drv[log_unit];
1786 drv->blk_size = id_ldrive->blk_size;
1787 drv->nr_blks = id_ldrive->nr_blks;
1788 drv->cylinders = id_ldrive->drv.cyl;
1789 drv->heads = id_ldrive->drv.heads;
1790 drv->sectors = id_ldrive->drv.sect_per_track;
1791 info_p->log_drv_map |= (1 << log_unit);
1792
1793 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1794 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1795 ret_code = sendcmd(SENSE_CONFIG,
1796 ctlr, sense_config_buf,
1797 sizeof(config_t), 0, 0, log_unit);
1798 if (ret_code == IO_ERROR) {
1799 info_p->log_drv_map = 0;
1800 /* Free all the buffers and return */
1801 printk(KERN_ERR "cpqarray: error sending sense config\n");
1802 kfree(sense_config_buf);
1803 kfree(id_lstatus_buf);
1804 kfree(id_ctlr_buf);
1805 kfree(id_ldrive);
1806 return;
1807
1808 }
1809
1810 sprintf(disk->devfs_name, "ida/c%dd%d", ctlr, log_unit);
1811
1812 info_p->phys_drives =
1813 sense_config_buf->ctlr_phys_drv;
1814 info_p->drv_assign_map
1815 |= sense_config_buf->drv_asgn_map;
1816 info_p->drv_assign_map
1817 |= sense_config_buf->spare_asgn_map;
1818 info_p->drv_spare_map
1819 |= sense_config_buf->spare_asgn_map;
1820 } /* end of if no error on id_ldrive */
1821 log_index = log_index + 1;
1822 } /* end of if logical drive configured */
1823 } /* end of for log_unit */
1824 kfree(sense_config_buf);
1825 kfree(id_ldrive);
1826 kfree(id_lstatus_buf);
1827 kfree(id_ctlr_buf);
1828 return;
1829
1830 }
1831
1832 static void __exit cpqarray_exit(void)
1833 {
1834 int i;
1835
1836 pci_unregister_driver(&cpqarray_pci_driver);
1837
1838 /* Double check that all controller entries have been removed */
1839 for(i=0; i<MAX_CTLR; i++) {
1840 if (hba[i] != NULL) {
1841 printk(KERN_WARNING "cpqarray: Removing EISA "
1842 "controller %d\n", i);
1843 cpqarray_remove_one_eisa(i);
1844 }
1845 }
1846
1847 devfs_remove("ida");
1848 remove_proc_entry("cpqarray", proc_root_driver);
1849 }
1850
1851 module_init(cpqarray_init)
1852 module_exit(cpqarray_exit)