[PATCH] irq-flags: generic irq: Use the new IRQF_ constants
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / cpqarray.c
CommitLineData
1da177e4
LT
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#include <linux/config.h> /* CONFIG_PROC_FS */
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/bio.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/blkpg.h>
34#include <linux/timer.h>
35#include <linux/proc_fs.h>
1da177e4
LT
36#include <linux/init.h>
37#include <linux/hdreg.h>
38#include <linux/spinlock.h>
39#include <linux/blkdev.h>
40#include <linux/genhd.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43
44
45#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
46
47#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
48#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
49
50/* Embedded module documentation macros - see modules.h */
51/* Original author Chris Frantz - Compaq Computer Corporation */
52MODULE_AUTHOR("Compaq Computer Corporation");
53MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
54MODULE_LICENSE("GPL");
55
56#include "cpqarray.h"
57#include "ida_cmd.h"
58#include "smart1,2.h"
59#include "ida_ioctl.h"
60
61#define READ_AHEAD 128
62#define NR_CMDS 128 /* This could probably go as high as ~400 */
63
64#define MAX_CTLR 8
65#define CTLR_SHIFT 8
66
67#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
68
69static int nr_ctlr;
70static ctlr_info_t *hba[MAX_CTLR];
71
72static int eisa[8];
73
945f390f 74#define NR_PRODUCTS ARRAY_SIZE(products)
1da177e4
LT
75
76/* board_id = Subsystem Device ID & Vendor ID
77 * product = Marketing Name for the board
945f390f 78 * access = Address of the struct of function pointers
1da177e4
LT
79 */
80static struct board_type products[] = {
81 { 0x0040110E, "IDA", &smart1_access },
82 { 0x0140110E, "IDA-2", &smart1_access },
83 { 0x1040110E, "IAES", &smart1_access },
84 { 0x2040110E, "SMART", &smart1_access },
85 { 0x3040110E, "SMART-2/E", &smart2e_access },
86 { 0x40300E11, "SMART-2/P", &smart2_access },
87 { 0x40310E11, "SMART-2SL", &smart2_access },
88 { 0x40320E11, "Smart Array 3200", &smart2_access },
89 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
90 { 0x40340E11, "Smart Array 221", &smart2_access },
91 { 0x40400E11, "Integrated Array", &smart4_access },
92 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
93 { 0x40500E11, "Smart Array 4200", &smart4_access },
94 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
95 { 0x40580E11, "Smart Array 431", &smart4_access },
96};
97
98/* define the PCI info for the PCI cards this driver can control */
99static const struct pci_device_id cpqarray_pci_device_id[] =
100{
101 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
102 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
104 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
106 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
107 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
108 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
110 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
111 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
112 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
114 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
116 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
118 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
120 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
125
126static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
127
128/* Debug... */
129#define DBG(s) do { s } while(0)
130/* Debug (general info)... */
131#define DBGINFO(s) do { } while(0)
132/* Debug Paranoid... */
133#define DBGP(s) do { } while(0)
134/* Debug Extra Paranoid... */
135#define DBGPX(s) do { } while(0)
136
137static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
138static void __iomem *remap_pci_mem(ulong base, ulong size);
139static int cpqarray_eisa_detect(void);
140static int pollcomplete(int ctlr);
141static void getgeometry(int ctlr);
142static void start_fwbk(int ctlr);
143
144static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
145static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
146
147static void free_hba(int i);
148static int alloc_cpqarray_hba(void);
149
150static int sendcmd(
151 __u8 cmd,
152 int ctlr,
153 void *buff,
154 size_t size,
155 unsigned int blk,
156 unsigned int blkcnt,
157 unsigned int log_unit );
158
159static int ida_open(struct inode *inode, struct file *filep);
160static int ida_release(struct inode *inode, struct file *filep);
161static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
a885c8c4 162static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1da177e4
LT
163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164
165static void do_ida_request(request_queue_t *q);
166static void start_io(ctlr_info_t *h);
167
168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170static inline void complete_buffers(struct bio *bio, int ok);
171static inline void complete_command(cmdlist_t *cmd, int timeout);
172
173static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
174static void ida_timer(unsigned long tdata);
175static int ida_revalidate(struct gendisk *disk);
176static int revalidate_allvol(ctlr_info_t *host);
177static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
178
179#ifdef CONFIG_PROC_FS
180static void ida_procinit(int i);
181static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
182#else
183static void ida_procinit(int i) {}
184#endif
185
186static inline drv_info_t *get_drv(struct gendisk *disk)
187{
188 return disk->private_data;
189}
190
191static inline ctlr_info_t *get_host(struct gendisk *disk)
192{
193 return disk->queue->queuedata;
194}
195
196
197static struct block_device_operations ida_fops = {
198 .owner = THIS_MODULE,
199 .open = ida_open,
200 .release = ida_release,
201 .ioctl = ida_ioctl,
a885c8c4 202 .getgeo = ida_getgeo,
1da177e4
LT
203 .revalidate_disk= ida_revalidate,
204};
205
206
207#ifdef CONFIG_PROC_FS
208
209static struct proc_dir_entry *proc_array;
210
211/*
212 * Get us a file in /proc/array that says something about each controller.
213 * Create /proc/array if it doesn't exist yet.
214 */
215static void __init ida_procinit(int i)
216{
217 if (proc_array == NULL) {
218 proc_array = proc_mkdir("cpqarray", proc_root_driver);
219 if (!proc_array) return;
220 }
221
222 create_proc_read_entry(hba[i]->devname, 0, proc_array,
223 ida_proc_get_info, hba[i]);
224}
225
226/*
227 * Report information about this controller.
228 */
229static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
230{
231 off_t pos = 0;
232 off_t len = 0;
233 int size, i, ctlr;
234 ctlr_info_t *h = (ctlr_info_t*)data;
235 drv_info_t *drv;
236#ifdef CPQ_PROC_PRINT_QUEUES
237 cmdlist_t *c;
238 unsigned long flags;
239#endif
240
241 ctlr = h->ctlr;
242 size = sprintf(buffer, "%s: Compaq %s Controller\n"
243 " Board ID: 0x%08lx\n"
244 " Firmware Revision: %c%c%c%c\n"
245 " Controller Sig: 0x%08lx\n"
246 " Memory Address: 0x%08lx\n"
247 " I/O Port: 0x%04x\n"
248 " IRQ: %d\n"
249 " Logical drives: %d\n"
250 " Physical drives: %d\n\n"
251 " Current Q depth: %d\n"
252 " Max Q depth since init: %d\n\n",
253 h->devname,
254 h->product_name,
255 (unsigned long)h->board_id,
256 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
257 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
258 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
259 h->log_drives, h->phys_drives,
260 h->Qdepth, h->maxQsinceinit);
261
262 pos += size; len += size;
263
264 size = sprintf(buffer+len, "Logical Drive Info:\n");
265 pos += size; len += size;
266
267 for(i=0; i<h->log_drives; i++) {
268 drv = &h->drv[i];
269 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
270 ctlr, i, drv->blk_size, drv->nr_blks);
271 pos += size; len += size;
272 }
273
274#ifdef CPQ_PROC_PRINT_QUEUES
275 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
276 size = sprintf(buffer+len, "\nCurrent Queues:\n");
277 pos += size; len += size;
278
279 c = h->reqQ;
280 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
281 if (c) c=c->next;
282 while(c && c != h->reqQ) {
283 size = sprintf(buffer+len, "->%p", c);
284 pos += size; len += size;
285 c=c->next;
286 }
287
288 c = h->cmpQ;
289 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
290 if (c) c=c->next;
291 while(c && c != h->cmpQ) {
292 size = sprintf(buffer+len, "->%p", c);
293 pos += size; len += size;
294 c=c->next;
295 }
296
297 size = sprintf(buffer+len, "\n"); pos += size; len += size;
298 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
299#endif
300 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
301 h->nr_allocs, h->nr_frees);
302 pos += size; len += size;
303
304 *eof = 1;
305 *start = buffer+offset;
306 len -= offset;
307 if (len>length)
308 len = length;
309 return len;
310}
311#endif /* CONFIG_PROC_FS */
312
313module_param_array(eisa, int, NULL, 0);
314
315static void release_io_mem(ctlr_info_t *c)
316{
317 /* if IO mem was not protected do nothing */
318 if( c->io_mem_addr == 0)
319 return;
320 release_region(c->io_mem_addr, c->io_mem_length);
321 c->io_mem_addr = 0;
322 c->io_mem_length = 0;
323}
324
325static void __devexit cpqarray_remove_one(int i)
326{
327 int j;
328 char buff[4];
329
330 /* sendcmd will turn off interrupt, and send the flush...
331 * To write all data in the battery backed cache to disks
332 * no data returned, but don't want to send NULL to sendcmd */
333 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
334 {
335 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
336 i);
337 }
338 free_irq(hba[i]->intr, hba[i]);
339 iounmap(hba[i]->vaddr);
340 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
341 del_timer(&hba[i]->timer);
342 remove_proc_entry(hba[i]->devname, proc_array);
343 pci_free_consistent(hba[i]->pci_dev,
344 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
345 hba[i]->cmd_pool_dhandle);
346 kfree(hba[i]->cmd_pool_bits);
347 for(j = 0; j < NWD; j++) {
348 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
349 del_gendisk(ida_gendisk[i][j]);
1da177e4
LT
350 put_disk(ida_gendisk[i][j]);
351 }
352 blk_cleanup_queue(hba[i]->queue);
353 release_io_mem(hba[i]);
354 free_hba(i);
355}
356
357static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
358{
359 int i;
360 ctlr_info_t *tmp_ptr;
361
362 if (pci_get_drvdata(pdev) == NULL) {
363 printk( KERN_ERR "cpqarray: Unable to remove device \n");
364 return;
365 }
366
367 tmp_ptr = pci_get_drvdata(pdev);
368 i = tmp_ptr->ctlr;
369 if (hba[i] == NULL) {
370 printk(KERN_ERR "cpqarray: controller %d appears to have"
371 "already been removed \n", i);
372 return;
373 }
374 pci_set_drvdata(pdev, NULL);
375
376 cpqarray_remove_one(i);
377}
378
379/* removing an instance that was not removed automatically..
380 * must be an eisa card.
381 */
382static void __devexit cpqarray_remove_one_eisa (int i)
383{
384 if (hba[i] == NULL) {
385 printk(KERN_ERR "cpqarray: controller %d appears to have"
386 "already been removed \n", i);
387 return;
388 }
389 cpqarray_remove_one(i);
390}
391
392/* pdev is NULL for eisa */
bc648638 393static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
1da177e4
LT
394{
395 request_queue_t *q;
396 int j;
397
398 /*
399 * register block devices
400 * Find disks and fill in structs
401 * Get an interrupt, set the Q depth and get into /proc
402 */
403
404 /* If this successful it should insure that we are the only */
405 /* instance of the driver */
406 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
407 goto Enomem4;
408 }
409 hba[i]->access.set_intr_mask(hba[i], 0);
410 if (request_irq(hba[i]->intr, do_ida_intr,
0f41a53a 411 SA_INTERRUPT|SA_SHIRQ, hba[i]->devname, hba[i]))
1da177e4
LT
412 {
413 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
414 hba[i]->intr, hba[i]->devname);
415 goto Enomem3;
416 }
417
418 for (j=0; j<NWD; j++) {
419 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
420 if (!ida_gendisk[i][j])
421 goto Enomem2;
422 }
423
424 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
425 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
426 &(hba[i]->cmd_pool_dhandle));
427 hba[i]->cmd_pool_bits = kmalloc(
428 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
429 GFP_KERNEL);
430
431 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
432 goto Enomem1;
433
434 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
435 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
436 printk(KERN_INFO "cpqarray: Finding drives on %s",
437 hba[i]->devname);
438
439 spin_lock_init(&hba[i]->lock);
440 q = blk_init_queue(do_ida_request, &hba[i]->lock);
441 if (!q)
442 goto Enomem1;
443
444 hba[i]->queue = q;
445 q->queuedata = hba[i];
446
447 getgeometry(i);
448 start_fwbk(i);
449
450 ida_procinit(i);
451
452 if (pdev)
453 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
454
455 /* This is a hardware imposed limit. */
456 blk_queue_max_hw_segments(q, SG_MAX);
457
458 /* This is a driver limit and could be eliminated. */
459 blk_queue_max_phys_segments(q, SG_MAX);
460
461 init_timer(&hba[i]->timer);
462 hba[i]->timer.expires = jiffies + IDA_TIMER;
463 hba[i]->timer.data = (unsigned long)hba[i];
464 hba[i]->timer.function = ida_timer;
465 add_timer(&hba[i]->timer);
466
467 /* Enable IRQ now that spinlock and rate limit timer are set up */
468 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
469
470 for(j=0; j<NWD; j++) {
471 struct gendisk *disk = ida_gendisk[i][j];
472 drv_info_t *drv = &hba[i]->drv[j];
473 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
474 disk->major = COMPAQ_SMART2_MAJOR + i;
475 disk->first_minor = j<<NWD_SHIFT;
476 disk->fops = &ida_fops;
477 if (j && !drv->nr_blks)
478 continue;
479 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
480 set_capacity(disk, drv->nr_blks);
481 disk->queue = hba[i]->queue;
482 disk->private_data = drv;
483 add_disk(disk);
484 }
485
486 /* done ! */
487 return(i);
488
489Enomem1:
490 nr_ctlr = i;
491 kfree(hba[i]->cmd_pool_bits);
492 if (hba[i]->cmd_pool)
493 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
494 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
495Enomem2:
496 while (j--) {
497 put_disk(ida_gendisk[i][j]);
498 ida_gendisk[i][j] = NULL;
499 }
500 free_irq(hba[i]->intr, hba[i]);
501Enomem3:
502 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
503Enomem4:
504 if (pdev)
505 pci_set_drvdata(pdev, NULL);
506 release_io_mem(hba[i]);
507 free_hba(i);
508
509 printk( KERN_ERR "cpqarray: out of memory");
510
511 return -1;
512}
513
514static int __init cpqarray_init_one( struct pci_dev *pdev,
515 const struct pci_device_id *ent)
516{
517 int i;
518
519 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
520 " bus %d dev %d func %d\n",
521 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
522 PCI_FUNC(pdev->devfn));
523 i = alloc_cpqarray_hba();
524 if( i < 0 )
525 return (-1);
526 memset(hba[i], 0, sizeof(ctlr_info_t));
527 sprintf(hba[i]->devname, "ida%d", i);
528 hba[i]->ctlr = i;
529 /* Initialize the pdev driver private data */
530 pci_set_drvdata(pdev, hba[i]);
531
532 if (cpqarray_pci_init(hba[i], pdev) != 0) {
533 pci_set_drvdata(pdev, NULL);
534 release_io_mem(hba[i]);
535 free_hba(i);
536 return -1;
537 }
538
539 return (cpqarray_register_ctlr(i, pdev));
540}
541
542static struct pci_driver cpqarray_pci_driver = {
543 .name = "cpqarray",
544 .probe = cpqarray_init_one,
545 .remove = __devexit_p(cpqarray_remove_one_pci),
546 .id_table = cpqarray_pci_device_id,
547};
548
549/*
550 * This is it. Find all the controllers and register them.
551 * returns the number of block devices registered.
552 */
553static int __init cpqarray_init(void)
554{
555 int num_cntlrs_reg = 0;
556 int i;
557 int rc = 0;
558
559 /* detect controllers */
560 printk(DRIVER_NAME "\n");
561
562 rc = pci_register_driver(&cpqarray_pci_driver);
563 if (rc)
564 return rc;
565 cpqarray_eisa_detect();
566
567 for (i=0; i < MAX_CTLR; i++) {
568 if (hba[i] != NULL)
569 num_cntlrs_reg++;
570 }
571
572 return(num_cntlrs_reg);
573}
574
575/* Function to find the first free pointer into our hba[] array */
576/* Returns -1 if no free entries are left. */
577static int alloc_cpqarray_hba(void)
578{
579 int i;
580
581 for(i=0; i< MAX_CTLR; i++) {
582 if (hba[i] == NULL) {
583 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
584 if(hba[i]==NULL) {
585 printk(KERN_ERR "cpqarray: out of memory.\n");
586 return (-1);
587 }
588 return (i);
589 }
590 }
591 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
592 " of 8 controllers.\n");
593 return(-1);
594}
595
596static void free_hba(int i)
597{
598 kfree(hba[i]);
599 hba[i]=NULL;
600}
601
602/*
603 * Find the IO address of the controller, its IRQ and so forth. Fill
604 * in some basic stuff into the ctlr_info_t structure.
605 */
606static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
607{
608 ushort vendor_id, device_id, command;
609 unchar cache_line_size, latency_timer;
610 unchar irq, revision;
611 unsigned long addr[6];
612 __u32 board_id;
613
614 int i;
615
616 c->pci_dev = pdev;
617 if (pci_enable_device(pdev)) {
618 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
619 return -1;
620 }
621 vendor_id = pdev->vendor;
622 device_id = pdev->device;
623 irq = pdev->irq;
624
625 for(i=0; i<6; i++)
626 addr[i] = pci_resource_start(pdev, i);
627
628 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
629 {
630 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
631 return -1;
632 }
633
634 pci_read_config_word(pdev, PCI_COMMAND, &command);
635 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
636 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
637 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
638
639 pci_read_config_dword(pdev, 0x2c, &board_id);
640
641 /* check to see if controller has been disabled */
642 if(!(command & 0x02)) {
643 printk(KERN_WARNING
644 "cpqarray: controller appears to be disabled\n");
645 return(-1);
646 }
647
648DBGINFO(
649 printk("vendor_id = %x\n", vendor_id);
650 printk("device_id = %x\n", device_id);
651 printk("command = %x\n", command);
652 for(i=0; i<6; i++)
653 printk("addr[%d] = %lx\n", i, addr[i]);
654 printk("revision = %x\n", revision);
655 printk("irq = %x\n", irq);
656 printk("cache_line_size = %x\n", cache_line_size);
657 printk("latency_timer = %x\n", latency_timer);
658 printk("board_id = %x\n", board_id);
659);
660
661 c->intr = irq;
662
663 for(i=0; i<6; i++) {
664 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
665 { /* IO space */
666 c->io_mem_addr = addr[i];
667 c->io_mem_length = pci_resource_end(pdev, i)
668 - pci_resource_start(pdev, i) + 1;
669 if(!request_region( c->io_mem_addr, c->io_mem_length,
670 "cpqarray"))
671 {
672 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
673 c->io_mem_addr = 0;
674 c->io_mem_length = 0;
675 }
676 break;
677 }
678 }
679
680 c->paddr = 0;
681 for(i=0; i<6; i++)
682 if (!(pci_resource_flags(pdev, i) &
683 PCI_BASE_ADDRESS_SPACE_IO)) {
684 c->paddr = pci_resource_start (pdev, i);
685 break;
686 }
687 if (!c->paddr)
688 return -1;
689 c->vaddr = remap_pci_mem(c->paddr, 128);
690 if (!c->vaddr)
691 return -1;
692 c->board_id = board_id;
693
694 for(i=0; i<NR_PRODUCTS; i++) {
695 if (board_id == products[i].board_id) {
696 c->product_name = products[i].product_name;
697 c->access = *(products[i].access);
698 break;
699 }
700 }
701 if (i == NR_PRODUCTS) {
702 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
703 " to access the SMART Array controller %08lx\n",
704 (unsigned long)board_id);
705 return -1;
706 }
707
708 return 0;
709}
710
711/*
712 * Map (physical) PCI mem into (virtual) kernel space
713 */
714static void __iomem *remap_pci_mem(ulong base, ulong size)
715{
716 ulong page_base = ((ulong) base) & PAGE_MASK;
717 ulong page_offs = ((ulong) base) - page_base;
718 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
719
720 return (page_remapped ? (page_remapped + page_offs) : NULL);
721}
722
723#ifndef MODULE
724/*
725 * Config string is a comma separated set of i/o addresses of EISA cards.
726 */
727static int cpqarray_setup(char *str)
728{
729 int i, ints[9];
730
731 (void)get_options(str, ARRAY_SIZE(ints), ints);
732
733 for(i=0; i<ints[0] && i<8; i++)
734 eisa[i] = ints[i+1];
735 return 1;
736}
737
738__setup("smart2=", cpqarray_setup);
739
740#endif
741
742/*
743 * Find an EISA controller's signature. Set up an hba if we find it.
744 */
bc648638 745static int __init cpqarray_eisa_detect(void)
1da177e4
LT
746{
747 int i=0, j;
748 __u32 board_id;
749 int intr;
750 int ctlr;
751 int num_ctlr = 0;
752
753 while(i<8 && eisa[i]) {
754 ctlr = alloc_cpqarray_hba();
755 if(ctlr == -1)
756 break;
757 board_id = inl(eisa[i]+0xC80);
758 for(j=0; j < NR_PRODUCTS; j++)
759 if (board_id == products[j].board_id)
760 break;
761
762 if (j == NR_PRODUCTS) {
763 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
764 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
765 continue;
766 }
767
768 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
769 hba[ctlr]->io_mem_addr = eisa[i];
770 hba[ctlr]->io_mem_length = 0x7FF;
771 if(!request_region(hba[ctlr]->io_mem_addr,
772 hba[ctlr]->io_mem_length,
773 "cpqarray"))
774 {
775 printk(KERN_WARNING "cpqarray: I/O range already in "
776 "use addr = %lx length = %ld\n",
777 hba[ctlr]->io_mem_addr,
778 hba[ctlr]->io_mem_length);
779 free_hba(ctlr);
780 continue;
781 }
782
783 /*
784 * Read the config register to find our interrupt
785 */
786 intr = inb(eisa[i]+0xCC0) >> 4;
787 if (intr & 1) intr = 11;
788 else if (intr & 2) intr = 10;
789 else if (intr & 4) intr = 14;
790 else if (intr & 8) intr = 15;
791
792 hba[ctlr]->intr = intr;
793 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
794 hba[ctlr]->product_name = products[j].product_name;
795 hba[ctlr]->access = *(products[j].access);
796 hba[ctlr]->ctlr = ctlr;
797 hba[ctlr]->board_id = board_id;
798 hba[ctlr]->pci_dev = NULL; /* not PCI */
799
800DBGINFO(
801 printk("i = %d, j = %d\n", i, j);
802 printk("irq = %x\n", intr);
803 printk("product name = %s\n", products[j].product_name);
804 printk("board_id = %x\n", board_id);
805);
806
807 num_ctlr++;
808 i++;
809
810 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
811 printk(KERN_WARNING
812 "cpqarray: Can't register EISA controller %d\n",
813 ctlr);
814
815 }
816
817 return num_ctlr;
818}
819
820/*
821 * Open. Make sure the device is really there.
822 */
823static int ida_open(struct inode *inode, struct file *filep)
824{
825 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
826 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
827
828 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
829 /*
830 * Root is allowed to open raw volume zero even if it's not configured
831 * so array config can still work. I don't think I really like this,
832 * but I'm already using way to many device nodes to claim another one
833 * for "raw controller".
834 */
835 if (!drv->nr_blks) {
836 if (!capable(CAP_SYS_RAWIO))
837 return -ENXIO;
838 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
839 return -ENXIO;
840 }
841 host->usage_count++;
842 return 0;
843}
844
845/*
846 * Close. Sync first.
847 */
848static int ida_release(struct inode *inode, struct file *filep)
849{
850 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
851 host->usage_count--;
852 return 0;
853}
854
855/*
856 * Enqueuing and dequeuing functions for cmdlists.
857 */
858static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
859{
860 if (*Qptr == NULL) {
861 *Qptr = c;
862 c->next = c->prev = c;
863 } else {
864 c->prev = (*Qptr)->prev;
865 c->next = (*Qptr);
866 (*Qptr)->prev->next = c;
867 (*Qptr)->prev = c;
868 }
869}
870
871static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
872{
873 if (c && c->next != c) {
874 if (*Qptr == c) *Qptr = c->next;
875 c->prev->next = c->next;
876 c->next->prev = c->prev;
877 } else {
878 *Qptr = NULL;
879 }
880 return c;
881}
882
883/*
884 * Get a request and submit it to the controller.
885 * This routine needs to grab all the requests it possibly can from the
886 * req Q and submit them. Interrupts are off (and need to be off) when you
887 * are in here (either via the dummy do_ida_request functions or by being
888 * called from the interrupt handler
889 */
890static void do_ida_request(request_queue_t *q)
891{
892 ctlr_info_t *h = q->queuedata;
893 cmdlist_t *c;
894 struct request *creq;
895 struct scatterlist tmp_sg[SG_MAX];
896 int i, dir, seg;
897
898 if (blk_queue_plugged(q))
899 goto startio;
900
901queue_next:
902 creq = elv_next_request(q);
903 if (!creq)
904 goto startio;
905
089fe1b2 906 BUG_ON(creq->nr_phys_segments > SG_MAX);
1da177e4
LT
907
908 if ((c = cmd_alloc(h,1)) == NULL)
909 goto startio;
910
911 blkdev_dequeue_request(creq);
912
913 c->ctlr = h->ctlr;
914 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
915 c->hdr.size = sizeof(rblk_t) >> 2;
916 c->size += sizeof(rblk_t);
917
918 c->req.hdr.blk = creq->sector;
919 c->rq = creq;
920DBGPX(
921 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
922);
923 seg = blk_rq_map_sg(q, creq, tmp_sg);
924
925 /* Now do all the DMA Mappings */
926 if (rq_data_dir(creq) == READ)
927 dir = PCI_DMA_FROMDEVICE;
928 else
929 dir = PCI_DMA_TODEVICE;
930 for( i=0; i < seg; i++)
931 {
932 c->req.sg[i].size = tmp_sg[i].length;
933 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
934 tmp_sg[i].page,
935 tmp_sg[i].offset,
936 tmp_sg[i].length, dir);
937 }
938DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
939 c->req.hdr.sg_cnt = seg;
940 c->req.hdr.blk_cnt = creq->nr_sectors;
941 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
942 c->type = CMD_RWREQ;
943
944 /* Put the request on the tail of the request queue */
945 addQ(&h->reqQ, c);
946 h->Qdepth++;
947 if (h->Qdepth > h->maxQsinceinit)
948 h->maxQsinceinit = h->Qdepth;
949
950 goto queue_next;
951
952startio:
953 start_io(h);
954}
955
956/*
957 * start_io submits everything on a controller's request queue
958 * and moves it to the completion queue.
959 *
960 * Interrupts had better be off if you're in here
961 */
962static void start_io(ctlr_info_t *h)
963{
964 cmdlist_t *c;
965
966 while((c = h->reqQ) != NULL) {
967 /* Can't do anything if we're busy */
968 if (h->access.fifo_full(h) == 0)
969 return;
970
971 /* Get the first entry from the request Q */
972 removeQ(&h->reqQ, c);
973 h->Qdepth--;
974
975 /* Tell the controller to do our bidding */
976 h->access.submit_command(h, c);
977
978 /* Get onto the completion Q */
979 addQ(&h->cmpQ, c);
980 }
981}
982
983static inline void complete_buffers(struct bio *bio, int ok)
984{
985 struct bio *xbh;
986 while(bio) {
987 int nr_sectors = bio_sectors(bio);
988
989 xbh = bio->bi_next;
990 bio->bi_next = NULL;
991
992 blk_finished_io(nr_sectors);
993 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
994
995 bio = xbh;
996 }
997}
998/*
999 * Mark all buffers that cmd was responsible for
1000 */
1001static inline void complete_command(cmdlist_t *cmd, int timeout)
1002{
1003 int ok=1;
1004 int i, ddir;
1005
1006 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1007 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1008 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1009 cmd->ctlr, cmd->hdr.unit);
1010 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1011 }
1012 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1013 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1014 cmd->ctlr, cmd->hdr.unit);
1015 ok = 0;
1016 }
1017 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1018 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1019 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1020 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1021 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1022 ok = 0;
1023 }
1024 if (timeout) ok = 0;
1025 /* unmap the DMA mapping for all the scatter gather elements */
1026 if (cmd->req.hdr.cmd == IDA_READ)
1027 ddir = PCI_DMA_FROMDEVICE;
1028 else
1029 ddir = PCI_DMA_TODEVICE;
1030 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1031 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1032 cmd->req.sg[i].size, ddir);
1033
1034 complete_buffers(cmd->rq->bio, ok);
1035
0f41a53a
MM
1036 add_disk_randomness(cmd->rq->rq_disk);
1037
1da177e4 1038 DBGPX(printk("Done with %p\n", cmd->rq););
8ffdc655 1039 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1da177e4
LT
1040}
1041
1042/*
1043 * The controller will interrupt us upon completion of commands.
1044 * Find the command on the completion queue, remove it, tell the OS and
1045 * try to queue up more IO
1046 */
1047static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1048{
1049 ctlr_info_t *h = dev_id;
1050 cmdlist_t *c;
1051 unsigned long istat;
1052 unsigned long flags;
1053 __u32 a,a1;
1054
1055 istat = h->access.intr_pending(h);
1056 /* Is this interrupt for us? */
1057 if (istat == 0)
1058 return IRQ_NONE;
1059
1060 /*
1061 * If there are completed commands in the completion queue,
1062 * we had better do something about it.
1063 */
1064 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1065 if (istat & FIFO_NOT_EMPTY) {
1066 while((a = h->access.command_completed(h))) {
1067 a1 = a; a &= ~3;
1068 if ((c = h->cmpQ) == NULL)
1069 {
1070 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1071 continue;
1072 }
1073 while(c->busaddr != a) {
1074 c = c->next;
1075 if (c == h->cmpQ)
1076 break;
1077 }
1078 /*
1079 * If we've found the command, take it off the
1080 * completion Q and free it
1081 */
1082 if (c->busaddr == a) {
1083 removeQ(&h->cmpQ, c);
1084 /* Check for invalid command.
1085 * Controller returns command error,
1086 * But rcode = 0.
1087 */
1088
1089 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1090 {
1091 c->req.hdr.rcode = RCODE_INVREQ;
1092 }
1093 if (c->type == CMD_RWREQ) {
1094 complete_command(c, 0);
1095 cmd_free(h, c, 1);
1096 } else if (c->type == CMD_IOCTL_PEND) {
1097 c->type = CMD_IOCTL_DONE;
1098 }
1099 continue;
1100 }
1101 }
1102 }
1103
1104 /*
1105 * See if we can queue up some more IO
1106 */
1107 do_ida_request(h->queue);
1108 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1109 return IRQ_HANDLED;
1110}
1111
1112/*
1113 * This timer was for timing out requests that haven't happened after
1114 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1115 * reset a flags structure so we don't flood the user with
1116 * "Non-Fatal error" messages.
1117 */
1118static void ida_timer(unsigned long tdata)
1119{
1120 ctlr_info_t *h = (ctlr_info_t*)tdata;
1121
1122 h->timer.expires = jiffies + IDA_TIMER;
1123 add_timer(&h->timer);
1124 h->misc_tflags = 0;
1125}
1126
a885c8c4
CH
1127static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1128{
1129 drv_info_t *drv = get_drv(bdev->bd_disk);
1130
1131 if (drv->cylinders) {
1132 geo->heads = drv->heads;
1133 geo->sectors = drv->sectors;
1134 geo->cylinders = drv->cylinders;
1135 } else {
1136 geo->heads = 0xff;
1137 geo->sectors = 0x3f;
1138 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1139 }
1140
1141 return 0;
1142}
1143
1da177e4
LT
1144/*
1145 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1146 * setting readahead and submitting commands from userspace to the controller.
1147 */
1148static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1149{
1150 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1151 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1152 int error;
1da177e4
LT
1153 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1154 ida_ioctl_t *my_io;
1155
1156 switch(cmd) {
1da177e4
LT
1157 case IDAGETDRVINFO:
1158 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1159 return -EFAULT;
1160 return 0;
1161 case IDAPASSTHRU:
1162 if (!capable(CAP_SYS_RAWIO))
1163 return -EPERM;
1164 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1165 if (!my_io)
1166 return -ENOMEM;
1167 error = -EFAULT;
1168 if (copy_from_user(my_io, io, sizeof(*my_io)))
1169 goto out_passthru;
1170 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1171 if (error)
1172 goto out_passthru;
1173 error = -EFAULT;
1174 if (copy_to_user(io, my_io, sizeof(*my_io)))
1175 goto out_passthru;
1176 error = 0;
1177out_passthru:
1178 kfree(my_io);
1179 return error;
1180 case IDAGETCTLRSIG:
1181 if (!arg) return -EINVAL;
1182 put_user(host->ctlr_sig, (int __user *)arg);
1183 return 0;
1184 case IDAREVALIDATEVOLS:
1185 if (iminor(inode) != 0)
1186 return -ENXIO;
1187 return revalidate_allvol(host);
1188 case IDADRIVERVERSION:
1189 if (!arg) return -EINVAL;
1190 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1191 return 0;
1192 case IDAGETPCIINFO:
1193 {
1194
1195 ida_pci_info_struct pciinfo;
1196
1197 if (!arg) return -EINVAL;
1198 pciinfo.bus = host->pci_dev->bus->number;
1199 pciinfo.dev_fn = host->pci_dev->devfn;
1200 pciinfo.board_id = host->board_id;
1201 if(copy_to_user((void __user *) arg, &pciinfo,
1202 sizeof( ida_pci_info_struct)))
1203 return -EFAULT;
1204 return(0);
1205 }
1206
1207 default:
1208 return -EINVAL;
1209 }
1210
1211}
1212/*
1213 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1214 * The command block (io) has already been copied to kernel space for us,
1215 * however, any elements in the sglist need to be copied to kernel space
1216 * or copied back to userspace.
1217 *
1218 * Only root may perform a controller passthru command, however I'm not doing
1219 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1220 * putting a 64M buffer in the sglist is probably a *bad* idea.
1221 */
1222static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1223{
1224 int ctlr = h->ctlr;
1225 cmdlist_t *c;
1226 void *p = NULL;
1227 unsigned long flags;
1228 int error;
1229
1230 if ((c = cmd_alloc(h, 0)) == NULL)
1231 return -ENOMEM;
1232 c->ctlr = ctlr;
1233 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1234 c->hdr.size = sizeof(rblk_t) >> 2;
1235 c->size += sizeof(rblk_t);
1236
1237 c->req.hdr.cmd = io->cmd;
1238 c->req.hdr.blk = io->blk;
1239 c->req.hdr.blk_cnt = io->blk_cnt;
1240 c->type = CMD_IOCTL_PEND;
1241
1242 /* Pre submit processing */
1243 switch(io->cmd) {
1244 case PASSTHRU_A:
1245 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1246 if (!p)
1247 {
1248 error = -ENOMEM;
1249 cmd_free(h, c, 0);
1250 return(error);
1251 }
1252 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1253 kfree(p);
1254 cmd_free(h, c, 0);
1255 return -EFAULT;
1256 }
1257 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1258 sizeof(ida_ioctl_t),
1259 PCI_DMA_BIDIRECTIONAL);
1260 c->req.sg[0].size = io->sg[0].size;
1261 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1262 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1263 c->req.hdr.sg_cnt = 1;
1264 break;
1265 case IDA_READ:
1266 case READ_FLASH_ROM:
1267 case SENSE_CONTROLLER_PERFORMANCE:
1268 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1269 if (!p)
1270 {
1271 error = -ENOMEM;
1272 cmd_free(h, c, 0);
1273 return(error);
1274 }
1275
1276 c->req.sg[0].size = io->sg[0].size;
1277 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1278 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1279 c->req.hdr.sg_cnt = 1;
1280 break;
1281 case IDA_WRITE:
1282 case IDA_WRITE_MEDIA:
1283 case DIAG_PASS_THRU:
1284 case COLLECT_BUFFER:
1285 case WRITE_FLASH_ROM:
1286 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1287 if (!p)
1288 {
1289 error = -ENOMEM;
1290 cmd_free(h, c, 0);
1291 return(error);
1292 }
1293 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1294 kfree(p);
1295 cmd_free(h, c, 0);
1296 return -EFAULT;
1297 }
1298 c->req.sg[0].size = io->sg[0].size;
1299 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1300 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1301 c->req.hdr.sg_cnt = 1;
1302 break;
1303 default:
1304 c->req.sg[0].size = sizeof(io->c);
1305 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1306 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1307 c->req.hdr.sg_cnt = 1;
1308 }
1309
1310 /* Put the request on the tail of the request queue */
1311 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1312 addQ(&h->reqQ, c);
1313 h->Qdepth++;
1314 start_io(h);
1315 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1316
1317 /* Wait for completion */
1318 while(c->type != CMD_IOCTL_DONE)
1319 schedule();
1320
1321 /* Unmap the DMA */
1322 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1323 PCI_DMA_BIDIRECTIONAL);
1324 /* Post submit processing */
1325 switch(io->cmd) {
1326 case PASSTHRU_A:
1327 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1328 sizeof(ida_ioctl_t),
1329 PCI_DMA_BIDIRECTIONAL);
1330 case IDA_READ:
1331 case DIAG_PASS_THRU:
1332 case SENSE_CONTROLLER_PERFORMANCE:
1333 case READ_FLASH_ROM:
1334 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1335 kfree(p);
1336 return -EFAULT;
1337 }
1338 /* fall through and free p */
1339 case IDA_WRITE:
1340 case IDA_WRITE_MEDIA:
1341 case COLLECT_BUFFER:
1342 case WRITE_FLASH_ROM:
1343 kfree(p);
1344 break;
1345 default:;
1346 /* Nothing to do */
1347 }
1348
1349 io->rcode = c->req.hdr.rcode;
1350 cmd_free(h, c, 0);
1351 return(0);
1352}
1353
1354/*
1355 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1356 * scheme to suballocte them to the driver. Operations that are not time
1357 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1358 * as the first argument to get a new command.
1359 */
1360static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1361{
1362 cmdlist_t * c;
1363 int i;
1364 dma_addr_t cmd_dhandle;
1365
1366 if (!get_from_pool) {
1367 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1368 sizeof(cmdlist_t), &cmd_dhandle);
1369 if(c==NULL)
1370 return NULL;
1371 } else {
1372 do {
1373 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1374 if (i == NR_CMDS)
1375 return NULL;
1376 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1377 c = h->cmd_pool + i;
1378 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1379 h->nr_allocs++;
1380 }
1381
1382 memset(c, 0, sizeof(cmdlist_t));
1383 c->busaddr = cmd_dhandle;
1384 return c;
1385}
1386
1387static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1388{
1389 int i;
1390
1391 if (!got_from_pool) {
1392 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1393 c->busaddr);
1394 } else {
1395 i = c - h->cmd_pool;
1396 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1397 h->nr_frees++;
1398 }
1399}
1400
1401/***********************************************************************
1402 name: sendcmd
1403 Send a command to an IDA using the memory mapped FIFO interface
1404 and wait for it to complete.
1405 This routine should only be called at init time.
1406***********************************************************************/
1407static int sendcmd(
1408 __u8 cmd,
1409 int ctlr,
1410 void *buff,
1411 size_t size,
1412 unsigned int blk,
1413 unsigned int blkcnt,
1414 unsigned int log_unit )
1415{
1416 cmdlist_t *c;
1417 int complete;
1418 unsigned long temp;
1419 unsigned long i;
1420 ctlr_info_t *info_p = hba[ctlr];
1421
1422 c = cmd_alloc(info_p, 1);
1423 if(!c)
1424 return IO_ERROR;
1425 c->ctlr = ctlr;
1426 c->hdr.unit = log_unit;
1427 c->hdr.prio = 0;
1428 c->hdr.size = sizeof(rblk_t) >> 2;
1429 c->size += sizeof(rblk_t);
1430
1431 /* The request information. */
1432 c->req.hdr.next = 0;
1433 c->req.hdr.rcode = 0;
1434 c->req.bp = 0;
1435 c->req.hdr.sg_cnt = 1;
1436 c->req.hdr.reserved = 0;
1437
1438 if (size == 0)
1439 c->req.sg[0].size = 512;
1440 else
1441 c->req.sg[0].size = size;
1442
1443 c->req.hdr.blk = blk;
1444 c->req.hdr.blk_cnt = blkcnt;
1445 c->req.hdr.cmd = (unsigned char) cmd;
1446 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1447 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1448 /*
1449 * Disable interrupt
1450 */
1451 info_p->access.set_intr_mask(info_p, 0);
1452 /* Make sure there is room in the command FIFO */
1453 /* Actually it should be completely empty at this time. */
1454 for (i = 200000; i > 0; i--) {
1455 temp = info_p->access.fifo_full(info_p);
1456 if (temp != 0) {
1457 break;
1458 }
1459 udelay(10);
1460DBG(
1461 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1462 " waiting!\n", ctlr);
1463);
1464 }
1465 /*
1466 * Send the cmd
1467 */
1468 info_p->access.submit_command(info_p, c);
1469 complete = pollcomplete(ctlr);
1470
1471 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1472 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1473 if (complete != 1) {
1474 if (complete != c->busaddr) {
1475 printk( KERN_WARNING
1476 "cpqarray ida%d: idaSendPciCmd "
1477 "Invalid command list address returned! (%08lx)\n",
1478 ctlr, (unsigned long)complete);
1479 cmd_free(info_p, c, 1);
1480 return (IO_ERROR);
1481 }
1482 } else {
1483 printk( KERN_WARNING
1484 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1485 "No command list address returned!\n",
1486 ctlr);
1487 cmd_free(info_p, c, 1);
1488 return (IO_ERROR);
1489 }
1490
1491 if (c->req.hdr.rcode & 0x00FE) {
1492 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1493 printk( KERN_WARNING
1494 "cpqarray ida%d: idaSendPciCmd, error: "
1495 "Controller failed at init time "
1496 "cmd: 0x%x, return code = 0x%x\n",
1497 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1498
1499 cmd_free(info_p, c, 1);
1500 return (IO_ERROR);
1501 }
1502 }
1503 cmd_free(info_p, c, 1);
1504 return (IO_OK);
1505}
1506
1507/*
1508 * revalidate_allvol is for online array config utilities. After a
1509 * utility reconfigures the drives in the array, it can use this function
1510 * (through an ioctl) to make the driver zap any previous disk structs for
1511 * that controller and get new ones.
1512 *
1513 * Right now I'm using the getgeometry() function to do this, but this
1514 * function should probably be finer grained and allow you to revalidate one
1515 * particualar logical volume (instead of all of them on a particular
1516 * controller).
1517 */
1518static int revalidate_allvol(ctlr_info_t *host)
1519{
1520 int ctlr = host->ctlr;
1521 int i;
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1525 if (host->usage_count > 1) {
1526 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1527 printk(KERN_WARNING "cpqarray: Device busy for volume"
1528 " revalidation (usage=%d)\n", host->usage_count);
1529 return -EBUSY;
1530 }
1531 host->usage_count++;
1532 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1533
1534 /*
1535 * Set the partition and block size structures for all volumes
1536 * on this controller to zero. We will reread all of this data
1537 */
1538 set_capacity(ida_gendisk[ctlr][0], 0);
1539 for (i = 1; i < NWD; i++) {
1540 struct gendisk *disk = ida_gendisk[ctlr][i];
1541 if (disk->flags & GENHD_FL_UP)
1542 del_gendisk(disk);
1543 }
1544 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1545
1546 /*
1547 * Tell the array controller not to give us any interrupts while
1548 * we check the new geometry. Then turn interrupts back on when
1549 * we're done.
1550 */
1551 host->access.set_intr_mask(host, 0);
1552 getgeometry(ctlr);
1553 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1554
1555 for(i=0; i<NWD; i++) {
1556 struct gendisk *disk = ida_gendisk[ctlr][i];
1557 drv_info_t *drv = &host->drv[i];
1558 if (i && !drv->nr_blks)
1559 continue;
1560 blk_queue_hardsect_size(host->queue, drv->blk_size);
1561 set_capacity(disk, drv->nr_blks);
1562 disk->queue = host->queue;
1563 disk->private_data = drv;
1564 if (i)
1565 add_disk(disk);
1566 }
1567
1568 host->usage_count--;
1569 return 0;
1570}
1571
1572static int ida_revalidate(struct gendisk *disk)
1573{
1574 drv_info_t *drv = disk->private_data;
1575 set_capacity(disk, drv->nr_blks);
1576 return 0;
1577}
1578
1579/********************************************************************
1580 name: pollcomplete
1581 Wait polling for a command to complete.
1582 The memory mapped FIFO is polled for the completion.
1583 Used only at init time, interrupts disabled.
1584 ********************************************************************/
1585static int pollcomplete(int ctlr)
1586{
1587 int done;
1588 int i;
1589
1590 /* Wait (up to 2 seconds) for a command to complete */
1591
1592 for (i = 200000; i > 0; i--) {
1593 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1594 if (done == 0) {
1595 udelay(10); /* a short fixed delay */
1596 } else
1597 return (done);
1598 }
1599 /* Invalid address to tell caller we ran out of time */
1600 return 1;
1601}
1602/*****************************************************************
1603 start_fwbk
1604 Starts controller firmwares background processing.
1605 Currently only the Integrated Raid controller needs this done.
1606 If the PCI mem address registers are written to after this,
1607 data corruption may occur
1608*****************************************************************/
1609static void start_fwbk(int ctlr)
1610{
1611 id_ctlr_t *id_ctlr_buf;
1612 int ret_code;
1613
1614 if( (hba[ctlr]->board_id != 0x40400E11)
1615 && (hba[ctlr]->board_id != 0x40480E11) )
1616
1617 /* Not a Integrated Raid, so there is nothing for us to do */
1618 return;
1619 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1620 " processing\n");
1621 /* Command does not return anything, but idasend command needs a
1622 buffer */
1623 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1624 if(id_ctlr_buf==NULL)
1625 {
1626 printk(KERN_WARNING "cpqarray: Out of memory. "
1627 "Unable to start background processing.\n");
1628 return;
1629 }
1630 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1631 id_ctlr_buf, 0, 0, 0, 0);
1632 if(ret_code != IO_OK)
1633 printk(KERN_WARNING "cpqarray: Unable to start"
1634 " background processing\n");
1635
1636 kfree(id_ctlr_buf);
1637}
1638/*****************************************************************
1639 getgeometry
1640 Get ida logical volume geometry from the controller
1641 This is a large bit of code which once existed in two flavors,
1642 It is used only at init time.
1643*****************************************************************/
1644static void getgeometry(int ctlr)
1645{
1646 id_log_drv_t *id_ldrive;
1647 id_ctlr_t *id_ctlr_buf;
1648 sense_log_drv_stat_t *id_lstatus_buf;
1649 config_t *sense_config_buf;
1650 unsigned int log_unit, log_index;
1651 int ret_code, size;
1652 drv_info_t *drv;
1653 ctlr_info_t *info_p = hba[ctlr];
1654 int i;
1655
1656 info_p->log_drv_map = 0;
1657
1658 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1659 if(id_ldrive == NULL)
1660 {
1661 printk( KERN_ERR "cpqarray: out of memory.\n");
1662 return;
1663 }
1664
1665 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1666 if(id_ctlr_buf == NULL)
1667 {
1668 kfree(id_ldrive);
1669 printk( KERN_ERR "cpqarray: out of memory.\n");
1670 return;
1671 }
1672
1673 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1674 if(id_lstatus_buf == NULL)
1675 {
1676 kfree(id_ctlr_buf);
1677 kfree(id_ldrive);
1678 printk( KERN_ERR "cpqarray: out of memory.\n");
1679 return;
1680 }
1681
1682 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1683 if(sense_config_buf == NULL)
1684 {
1685 kfree(id_lstatus_buf);
1686 kfree(id_ctlr_buf);
1687 kfree(id_ldrive);
1688 printk( KERN_ERR "cpqarray: out of memory.\n");
1689 return;
1690 }
1691
1692 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1693 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1694 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1695 memset(sense_config_buf, 0, sizeof(config_t));
1696
1697 info_p->phys_drives = 0;
1698 info_p->log_drv_map = 0;
1699 info_p->drv_assign_map = 0;
1700 info_p->drv_spare_map = 0;
1701 info_p->mp_failed_drv_map = 0; /* only initialized here */
1702 /* Get controllers info for this logical drive */
1703 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1704 if (ret_code == IO_ERROR) {
1705 /*
1706 * If can't get controller info, set the logical drive map to 0,
1707 * so the idastubopen will fail on all logical drives
1708 * on the controller.
1709 */
1710 /* Free all the buffers and return */
1711 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1712 kfree(sense_config_buf);
1713 kfree(id_lstatus_buf);
1714 kfree(id_ctlr_buf);
1715 kfree(id_ldrive);
1716 return;
1717 }
1718
1719 info_p->log_drives = id_ctlr_buf->nr_drvs;
1720 for(i=0;i<4;i++)
1721 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1722 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1723
1724 printk(" (%s)\n", info_p->product_name);
1725 /*
1726 * Initialize logical drive map to zero
1727 */
1728 log_index = 0;
1729 /*
1730 * Get drive geometry for all logical drives
1731 */
1732 if (id_ctlr_buf->nr_drvs > 16)
1733 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1734 "16 logical drives per controller.\n. "
1735 " Additional drives will not be "
1736 "detected\n", ctlr);
1737
1738 for (log_unit = 0;
1739 (log_index < id_ctlr_buf->nr_drvs)
1740 && (log_unit < NWD);
1741 log_unit++) {
1742 struct gendisk *disk = ida_gendisk[ctlr][log_unit];
1743
1744 size = sizeof(sense_log_drv_stat_t);
1745
1746 /*
1747 Send "Identify logical drive status" cmd
1748 */
1749 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1750 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1751 if (ret_code == IO_ERROR) {
1752 /*
1753 If can't get logical drive status, set
1754 the logical drive map to 0, so the
1755 idastubopen will fail for all logical drives
1756 on the controller.
1757 */
1758 info_p->log_drv_map = 0;
1759 printk( KERN_WARNING
1760 "cpqarray ida%d: idaGetGeometry - Controller"
1761 " failed to report status of logical drive %d\n"
1762 "Access to this controller has been disabled\n",
1763 ctlr, log_unit);
1764 /* Free all the buffers and return */
1765 kfree(sense_config_buf);
1766 kfree(id_lstatus_buf);
1767 kfree(id_ctlr_buf);
1768 kfree(id_ldrive);
1769 return;
1770 }
1771 /*
1772 Make sure the logical drive is configured
1773 */
1774 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1775 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1776 sizeof(id_log_drv_t), 0, 0, log_unit);
1777 /*
1778 If error, the bit for this
1779 logical drive won't be set and
1780 idastubopen will return error.
1781 */
1782 if (ret_code != IO_ERROR) {
1783 drv = &info_p->drv[log_unit];
1784 drv->blk_size = id_ldrive->blk_size;
1785 drv->nr_blks = id_ldrive->nr_blks;
1786 drv->cylinders = id_ldrive->drv.cyl;
1787 drv->heads = id_ldrive->drv.heads;
1788 drv->sectors = id_ldrive->drv.sect_per_track;
1789 info_p->log_drv_map |= (1 << log_unit);
1790
1791 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1792 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1793 ret_code = sendcmd(SENSE_CONFIG,
1794 ctlr, sense_config_buf,
1795 sizeof(config_t), 0, 0, log_unit);
1796 if (ret_code == IO_ERROR) {
1797 info_p->log_drv_map = 0;
1798 /* Free all the buffers and return */
1799 printk(KERN_ERR "cpqarray: error sending sense config\n");
1800 kfree(sense_config_buf);
1801 kfree(id_lstatus_buf);
1802 kfree(id_ctlr_buf);
1803 kfree(id_ldrive);
1804 return;
1805
1806 }
1807
1da177e4
LT
1808 info_p->phys_drives =
1809 sense_config_buf->ctlr_phys_drv;
1810 info_p->drv_assign_map
1811 |= sense_config_buf->drv_asgn_map;
1812 info_p->drv_assign_map
1813 |= sense_config_buf->spare_asgn_map;
1814 info_p->drv_spare_map
1815 |= sense_config_buf->spare_asgn_map;
1816 } /* end of if no error on id_ldrive */
1817 log_index = log_index + 1;
1818 } /* end of if logical drive configured */
1819 } /* end of for log_unit */
1820 kfree(sense_config_buf);
1821 kfree(id_ldrive);
1822 kfree(id_lstatus_buf);
1823 kfree(id_ctlr_buf);
1824 return;
1825
1826}
1827
1828static void __exit cpqarray_exit(void)
1829{
1830 int i;
1831
1832 pci_unregister_driver(&cpqarray_pci_driver);
1833
1834 /* Double check that all controller entries have been removed */
1835 for(i=0; i<MAX_CTLR; i++) {
1836 if (hba[i] != NULL) {
1837 printk(KERN_WARNING "cpqarray: Removing EISA "
1838 "controller %d\n", i);
1839 cpqarray_remove_one_eisa(i);
1840 }
1841 }
1842
1da177e4
LT
1843 remove_proc_entry("cpqarray", proc_root_driver);
1844}
1845
1846module_init(cpqarray_init)
1847module_exit(cpqarray_exit)