rbd: fix double free on rbd_dev->header_name
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / block / cpqarray.c
CommitLineData
1da177e4
LT
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
1da177e4
LT
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/bio.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/blkpg.h>
33#include <linux/timer.h>
34#include <linux/proc_fs.h>
ff2c3de3 35#include <linux/seq_file.h>
1da177e4
LT
36#include <linux/init.h>
37#include <linux/hdreg.h>
2a48fc0a 38#include <linux/mutex.h>
1da177e4
LT
39#include <linux/spinlock.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
11763609 42#include <linux/scatterlist.h>
1da177e4
LT
43#include <asm/uaccess.h>
44#include <asm/io.h>
45
46
47#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
48
49#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
50#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
51
52/* Embedded module documentation macros - see modules.h */
53/* Original author Chris Frantz - Compaq Computer Corporation */
54MODULE_AUTHOR("Compaq Computer Corporation");
55MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
56MODULE_LICENSE("GPL");
57
58#include "cpqarray.h"
59#include "ida_cmd.h"
60#include "smart1,2.h"
61#include "ida_ioctl.h"
62
63#define READ_AHEAD 128
64#define NR_CMDS 128 /* This could probably go as high as ~400 */
65
66#define MAX_CTLR 8
67#define CTLR_SHIFT 8
68
69#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
70
2a48fc0a 71static DEFINE_MUTEX(cpqarray_mutex);
1da177e4
LT
72static int nr_ctlr;
73static ctlr_info_t *hba[MAX_CTLR];
74
75static int eisa[8];
76
945f390f 77#define NR_PRODUCTS ARRAY_SIZE(products)
1da177e4
LT
78
79/* board_id = Subsystem Device ID & Vendor ID
80 * product = Marketing Name for the board
945f390f 81 * access = Address of the struct of function pointers
1da177e4
LT
82 */
83static struct board_type products[] = {
84 { 0x0040110E, "IDA", &smart1_access },
85 { 0x0140110E, "IDA-2", &smart1_access },
86 { 0x1040110E, "IAES", &smart1_access },
87 { 0x2040110E, "SMART", &smart1_access },
88 { 0x3040110E, "SMART-2/E", &smart2e_access },
89 { 0x40300E11, "SMART-2/P", &smart2_access },
90 { 0x40310E11, "SMART-2SL", &smart2_access },
91 { 0x40320E11, "Smart Array 3200", &smart2_access },
92 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
93 { 0x40340E11, "Smart Array 221", &smart2_access },
94 { 0x40400E11, "Integrated Array", &smart4_access },
95 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
96 { 0x40500E11, "Smart Array 4200", &smart4_access },
97 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
98 { 0x40580E11, "Smart Array 431", &smart4_access },
99};
100
101/* define the PCI info for the PCI cards this driver can control */
102static const struct pci_device_id cpqarray_pci_device_id[] =
103{
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
106 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
108 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
109 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
110 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
112 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
113 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
123 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
124 { 0 }
125};
126
127MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
128
129static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
130
131/* Debug... */
132#define DBG(s) do { s } while(0)
133/* Debug (general info)... */
134#define DBGINFO(s) do { } while(0)
135/* Debug Paranoid... */
136#define DBGP(s) do { } while(0)
137/* Debug Extra Paranoid... */
138#define DBGPX(s) do { } while(0)
139
140static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
141static void __iomem *remap_pci_mem(ulong base, ulong size);
142static int cpqarray_eisa_detect(void);
143static int pollcomplete(int ctlr);
144static void getgeometry(int ctlr);
145static void start_fwbk(int ctlr);
146
147static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
148static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
149
150static void free_hba(int i);
151static int alloc_cpqarray_hba(void);
152
153static int sendcmd(
154 __u8 cmd,
155 int ctlr,
156 void *buff,
157 size_t size,
158 unsigned int blk,
159 unsigned int blkcnt,
160 unsigned int log_unit );
161
6e9624b8 162static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
db2a144b 163static void ida_release(struct gendisk *disk, fmode_t mode);
47844fad 164static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
a885c8c4 165static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1da177e4
LT
166static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
167
165125e1 168static void do_ida_request(struct request_queue *q);
1da177e4
LT
169static void start_io(ctlr_info_t *h);
170
171static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
172static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
1da177e4
LT
173static inline void complete_command(cmdlist_t *cmd, int timeout);
174
7d12e780 175static irqreturn_t do_ida_intr(int irq, void *dev_id);
1da177e4
LT
176static void ida_timer(unsigned long tdata);
177static int ida_revalidate(struct gendisk *disk);
178static int revalidate_allvol(ctlr_info_t *host);
179static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
180
181#ifdef CONFIG_PROC_FS
182static void ida_procinit(int i);
1da177e4
LT
183#else
184static void ida_procinit(int i) {}
185#endif
186
187static inline drv_info_t *get_drv(struct gendisk *disk)
188{
189 return disk->private_data;
190}
191
192static inline ctlr_info_t *get_host(struct gendisk *disk)
193{
194 return disk->queue->queuedata;
195}
196
197
83d5cde4 198static const struct block_device_operations ida_fops = {
1da177e4 199 .owner = THIS_MODULE,
6e9624b8 200 .open = ida_unlocked_open,
47844fad 201 .release = ida_release,
8a6cfeb6 202 .ioctl = ida_ioctl,
a885c8c4 203 .getgeo = ida_getgeo,
1da177e4
LT
204 .revalidate_disk= ida_revalidate,
205};
206
207
208#ifdef CONFIG_PROC_FS
209
210static struct proc_dir_entry *proc_array;
ff2c3de3 211static const struct file_operations ida_proc_fops;
1da177e4
LT
212
213/*
214 * Get us a file in /proc/array that says something about each controller.
215 * Create /proc/array if it doesn't exist yet.
216 */
217static void __init ida_procinit(int i)
218{
219 if (proc_array == NULL) {
928b4d8c 220 proc_array = proc_mkdir("driver/cpqarray", NULL);
1da177e4
LT
221 if (!proc_array) return;
222 }
223
ff2c3de3 224 proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
1da177e4
LT
225}
226
227/*
228 * Report information about this controller.
229 */
ff2c3de3 230static int ida_proc_show(struct seq_file *m, void *v)
1da177e4 231{
ff2c3de3
AD
232 int i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)m->private;
1da177e4
LT
234 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c;
237 unsigned long flags;
238#endif
239
240 ctlr = h->ctlr;
ff2c3de3 241 seq_printf(m, "%s: Compaq %s Controller\n"
1da177e4
LT
242 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n"
245 " Memory Address: 0x%08lx\n"
246 " I/O Port: 0x%04x\n"
247 " IRQ: %d\n"
248 " Logical drives: %d\n"
249 " Physical drives: %d\n\n"
250 " Current Q depth: %d\n"
251 " Max Q depth since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit);
260
ff2c3de3 261 seq_puts(m, "Logical Drive Info:\n");
1da177e4
LT
262
263 for(i=0; i<h->log_drives; i++) {
264 drv = &h->drv[i];
ff2c3de3 265 seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
1da177e4 266 ctlr, i, drv->blk_size, drv->nr_blks);
1da177e4
LT
267 }
268
269#ifdef CPQ_PROC_PRINT_QUEUES
270 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
ff2c3de3 271 seq_puts(m, "\nCurrent Queues:\n");
1da177e4
LT
272
273 c = h->reqQ;
ff2c3de3 274 seq_printf(m, "reqQ = %p", c);
1da177e4
LT
275 if (c) c=c->next;
276 while(c && c != h->reqQ) {
ff2c3de3 277 seq_printf(m, "->%p", c);
1da177e4
LT
278 c=c->next;
279 }
280
281 c = h->cmpQ;
ff2c3de3 282 seq_printf(m, "\ncmpQ = %p", c);
1da177e4
LT
283 if (c) c=c->next;
284 while(c && c != h->cmpQ) {
ff2c3de3 285 seq_printf(m, "->%p", c);
1da177e4
LT
286 c=c->next;
287 }
288
ff2c3de3 289 seq_putc(m, '\n');
1da177e4
LT
290 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
291#endif
ff2c3de3 292 seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
1da177e4 293 h->nr_allocs, h->nr_frees);
ff2c3de3 294 return 0;
1da177e4 295}
ff2c3de3
AD
296
297static int ida_proc_open(struct inode *inode, struct file *file)
298{
d9dda78b 299 return single_open(file, ida_proc_show, PDE_DATA(inode));
ff2c3de3
AD
300}
301
302static const struct file_operations ida_proc_fops = {
303 .owner = THIS_MODULE,
304 .open = ida_proc_open,
305 .read = seq_read,
306 .llseek = seq_lseek,
307 .release = single_release,
308};
1da177e4
LT
309#endif /* CONFIG_PROC_FS */
310
311module_param_array(eisa, int, NULL, 0);
312
313static void release_io_mem(ctlr_info_t *c)
314{
315 /* if IO mem was not protected do nothing */
316 if( c->io_mem_addr == 0)
317 return;
318 release_region(c->io_mem_addr, c->io_mem_length);
319 c->io_mem_addr = 0;
320 c->io_mem_length = 0;
321}
322
8d85fce7 323static void cpqarray_remove_one(int i)
1da177e4
LT
324{
325 int j;
326 char buff[4];
327
328 /* sendcmd will turn off interrupt, and send the flush...
329 * To write all data in the battery backed cache to disks
330 * no data returned, but don't want to send NULL to sendcmd */
331 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
332 {
333 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
334 i);
335 }
336 free_irq(hba[i]->intr, hba[i]);
337 iounmap(hba[i]->vaddr);
338 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
339 del_timer(&hba[i]->timer);
340 remove_proc_entry(hba[i]->devname, proc_array);
341 pci_free_consistent(hba[i]->pci_dev,
342 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
343 hba[i]->cmd_pool_dhandle);
344 kfree(hba[i]->cmd_pool_bits);
345 for(j = 0; j < NWD; j++) {
346 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
347 del_gendisk(ida_gendisk[i][j]);
1da177e4
LT
348 put_disk(ida_gendisk[i][j]);
349 }
350 blk_cleanup_queue(hba[i]->queue);
351 release_io_mem(hba[i]);
352 free_hba(i);
353}
354
8d85fce7 355static void cpqarray_remove_one_pci(struct pci_dev *pdev)
1da177e4
LT
356{
357 int i;
358 ctlr_info_t *tmp_ptr;
359
360 if (pci_get_drvdata(pdev) == NULL) {
361 printk( KERN_ERR "cpqarray: Unable to remove device \n");
362 return;
363 }
364
365 tmp_ptr = pci_get_drvdata(pdev);
366 i = tmp_ptr->ctlr;
367 if (hba[i] == NULL) {
368 printk(KERN_ERR "cpqarray: controller %d appears to have"
369 "already been removed \n", i);
370 return;
371 }
372 pci_set_drvdata(pdev, NULL);
373
374 cpqarray_remove_one(i);
375}
376
377/* removing an instance that was not removed automatically..
378 * must be an eisa card.
379 */
8d85fce7 380static void cpqarray_remove_one_eisa(int i)
1da177e4
LT
381{
382 if (hba[i] == NULL) {
383 printk(KERN_ERR "cpqarray: controller %d appears to have"
384 "already been removed \n", i);
385 return;
386 }
387 cpqarray_remove_one(i);
388}
389
390/* pdev is NULL for eisa */
8d85fce7 391static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
1da177e4 392{
165125e1 393 struct request_queue *q;
1da177e4
LT
394 int j;
395
396 /*
397 * register block devices
398 * Find disks and fill in structs
399 * Get an interrupt, set the Q depth and get into /proc
400 */
401
402 /* If this successful it should insure that we are the only */
403 /* instance of the driver */
404 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
405 goto Enomem4;
406 }
407 hba[i]->access.set_intr_mask(hba[i], 0);
408 if (request_irq(hba[i]->intr, do_ida_intr,
69ab3912 409 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
1da177e4
LT
410 {
411 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
412 hba[i]->intr, hba[i]->devname);
413 goto Enomem3;
414 }
415
416 for (j=0; j<NWD; j++) {
417 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
418 if (!ida_gendisk[i][j])
419 goto Enomem2;
420 }
421
2e4934aa 422 hba[i]->cmd_pool = pci_alloc_consistent(
1da177e4
LT
423 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
424 &(hba[i]->cmd_pool_dhandle));
2e4934aa 425 hba[i]->cmd_pool_bits = kcalloc(
061837bc 426 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
1da177e4
LT
427 GFP_KERNEL);
428
429 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
430 goto Enomem1;
431
432 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
1da177e4
LT
433 printk(KERN_INFO "cpqarray: Finding drives on %s",
434 hba[i]->devname);
435
436 spin_lock_init(&hba[i]->lock);
437 q = blk_init_queue(do_ida_request, &hba[i]->lock);
438 if (!q)
439 goto Enomem1;
440
441 hba[i]->queue = q;
442 q->queuedata = hba[i];
443
444 getgeometry(i);
445 start_fwbk(i);
446
447 ida_procinit(i);
448
449 if (pdev)
450 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
451
452 /* This is a hardware imposed limit. */
8a78362c 453 blk_queue_max_segments(q, SG_MAX);
1da177e4 454
1da177e4
LT
455 init_timer(&hba[i]->timer);
456 hba[i]->timer.expires = jiffies + IDA_TIMER;
457 hba[i]->timer.data = (unsigned long)hba[i];
458 hba[i]->timer.function = ida_timer;
459 add_timer(&hba[i]->timer);
460
461 /* Enable IRQ now that spinlock and rate limit timer are set up */
462 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
463
464 for(j=0; j<NWD; j++) {
465 struct gendisk *disk = ida_gendisk[i][j];
466 drv_info_t *drv = &hba[i]->drv[j];
467 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
468 disk->major = COMPAQ_SMART2_MAJOR + i;
469 disk->first_minor = j<<NWD_SHIFT;
470 disk->fops = &ida_fops;
471 if (j && !drv->nr_blks)
472 continue;
e1defc4f 473 blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
1da177e4
LT
474 set_capacity(disk, drv->nr_blks);
475 disk->queue = hba[i]->queue;
476 disk->private_data = drv;
477 add_disk(disk);
478 }
479
480 /* done ! */
481 return(i);
482
483Enomem1:
484 nr_ctlr = i;
485 kfree(hba[i]->cmd_pool_bits);
486 if (hba[i]->cmd_pool)
487 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
488 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
489Enomem2:
490 while (j--) {
491 put_disk(ida_gendisk[i][j]);
492 ida_gendisk[i][j] = NULL;
493 }
494 free_irq(hba[i]->intr, hba[i]);
495Enomem3:
496 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
497Enomem4:
498 if (pdev)
499 pci_set_drvdata(pdev, NULL);
500 release_io_mem(hba[i]);
501 free_hba(i);
502
503 printk( KERN_ERR "cpqarray: out of memory");
504
505 return -1;
506}
507
8d85fce7
GKH
508static int cpqarray_init_one(struct pci_dev *pdev,
509 const struct pci_device_id *ent)
1da177e4
LT
510{
511 int i;
512
513 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
514 " bus %d dev %d func %d\n",
515 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
516 PCI_FUNC(pdev->devfn));
517 i = alloc_cpqarray_hba();
518 if( i < 0 )
519 return (-1);
520 memset(hba[i], 0, sizeof(ctlr_info_t));
521 sprintf(hba[i]->devname, "ida%d", i);
522 hba[i]->ctlr = i;
523 /* Initialize the pdev driver private data */
524 pci_set_drvdata(pdev, hba[i]);
525
526 if (cpqarray_pci_init(hba[i], pdev) != 0) {
527 pci_set_drvdata(pdev, NULL);
528 release_io_mem(hba[i]);
529 free_hba(i);
530 return -1;
531 }
532
533 return (cpqarray_register_ctlr(i, pdev));
534}
535
536static struct pci_driver cpqarray_pci_driver = {
537 .name = "cpqarray",
538 .probe = cpqarray_init_one,
8d85fce7 539 .remove = cpqarray_remove_one_pci,
1da177e4
LT
540 .id_table = cpqarray_pci_device_id,
541};
542
543/*
544 * This is it. Find all the controllers and register them.
545 * returns the number of block devices registered.
546 */
547static int __init cpqarray_init(void)
548{
549 int num_cntlrs_reg = 0;
550 int i;
551 int rc = 0;
552
553 /* detect controllers */
554 printk(DRIVER_NAME "\n");
555
556 rc = pci_register_driver(&cpqarray_pci_driver);
557 if (rc)
558 return rc;
559 cpqarray_eisa_detect();
560
561 for (i=0; i < MAX_CTLR; i++) {
562 if (hba[i] != NULL)
563 num_cntlrs_reg++;
564 }
565
2197d18d
AB
566 if (num_cntlrs_reg)
567 return 0;
568 else {
569 pci_unregister_driver(&cpqarray_pci_driver);
570 return -ENODEV;
571 }
1da177e4
LT
572}
573
574/* Function to find the first free pointer into our hba[] array */
575/* Returns -1 if no free entries are left. */
576static int alloc_cpqarray_hba(void)
577{
578 int i;
579
580 for(i=0; i< MAX_CTLR; i++) {
581 if (hba[i] == NULL) {
582 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
583 if(hba[i]==NULL) {
584 printk(KERN_ERR "cpqarray: out of memory.\n");
585 return (-1);
586 }
587 return (i);
588 }
589 }
590 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
591 " of 8 controllers.\n");
592 return(-1);
593}
594
595static void free_hba(int i)
596{
597 kfree(hba[i]);
598 hba[i]=NULL;
599}
600
601/*
602 * Find the IO address of the controller, its IRQ and so forth. Fill
603 * in some basic stuff into the ctlr_info_t structure.
604 */
605static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
606{
607 ushort vendor_id, device_id, command;
608 unchar cache_line_size, latency_timer;
609 unchar irq, revision;
610 unsigned long addr[6];
611 __u32 board_id;
612
613 int i;
614
615 c->pci_dev = pdev;
0061d386 616 pci_set_master(pdev);
1da177e4
LT
617 if (pci_enable_device(pdev)) {
618 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
619 return -1;
620 }
621 vendor_id = pdev->vendor;
622 device_id = pdev->device;
5a3a76e6 623 revision = pdev->revision;
1da177e4
LT
624 irq = pdev->irq;
625
626 for(i=0; i<6; i++)
627 addr[i] = pci_resource_start(pdev, i);
628
629 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
630 {
631 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
632 return -1;
633 }
634
635 pci_read_config_word(pdev, PCI_COMMAND, &command);
1da177e4
LT
636 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
637 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
638
639 pci_read_config_dword(pdev, 0x2c, &board_id);
640
641 /* check to see if controller has been disabled */
642 if(!(command & 0x02)) {
643 printk(KERN_WARNING
644 "cpqarray: controller appears to be disabled\n");
645 return(-1);
646 }
647
648DBGINFO(
649 printk("vendor_id = %x\n", vendor_id);
650 printk("device_id = %x\n", device_id);
651 printk("command = %x\n", command);
652 for(i=0; i<6; i++)
653 printk("addr[%d] = %lx\n", i, addr[i]);
654 printk("revision = %x\n", revision);
655 printk("irq = %x\n", irq);
656 printk("cache_line_size = %x\n", cache_line_size);
657 printk("latency_timer = %x\n", latency_timer);
658 printk("board_id = %x\n", board_id);
659);
660
661 c->intr = irq;
662
663 for(i=0; i<6; i++) {
664 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
665 { /* IO space */
666 c->io_mem_addr = addr[i];
667 c->io_mem_length = pci_resource_end(pdev, i)
668 - pci_resource_start(pdev, i) + 1;
669 if(!request_region( c->io_mem_addr, c->io_mem_length,
670 "cpqarray"))
671 {
672 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
673 c->io_mem_addr = 0;
674 c->io_mem_length = 0;
675 }
676 break;
677 }
678 }
679
680 c->paddr = 0;
681 for(i=0; i<6; i++)
682 if (!(pci_resource_flags(pdev, i) &
683 PCI_BASE_ADDRESS_SPACE_IO)) {
684 c->paddr = pci_resource_start (pdev, i);
685 break;
686 }
687 if (!c->paddr)
688 return -1;
689 c->vaddr = remap_pci_mem(c->paddr, 128);
690 if (!c->vaddr)
691 return -1;
692 c->board_id = board_id;
693
694 for(i=0; i<NR_PRODUCTS; i++) {
695 if (board_id == products[i].board_id) {
696 c->product_name = products[i].product_name;
697 c->access = *(products[i].access);
698 break;
699 }
700 }
701 if (i == NR_PRODUCTS) {
702 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
703 " to access the SMART Array controller %08lx\n",
704 (unsigned long)board_id);
705 return -1;
706 }
707
708 return 0;
709}
710
711/*
712 * Map (physical) PCI mem into (virtual) kernel space
713 */
714static void __iomem *remap_pci_mem(ulong base, ulong size)
715{
716 ulong page_base = ((ulong) base) & PAGE_MASK;
717 ulong page_offs = ((ulong) base) - page_base;
718 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
719
720 return (page_remapped ? (page_remapped + page_offs) : NULL);
721}
722
723#ifndef MODULE
724/*
725 * Config string is a comma separated set of i/o addresses of EISA cards.
726 */
727static int cpqarray_setup(char *str)
728{
729 int i, ints[9];
730
731 (void)get_options(str, ARRAY_SIZE(ints), ints);
732
733 for(i=0; i<ints[0] && i<8; i++)
734 eisa[i] = ints[i+1];
735 return 1;
736}
737
738__setup("smart2=", cpqarray_setup);
739
740#endif
741
742/*
743 * Find an EISA controller's signature. Set up an hba if we find it.
744 */
8d85fce7 745static int cpqarray_eisa_detect(void)
1da177e4
LT
746{
747 int i=0, j;
748 __u32 board_id;
749 int intr;
750 int ctlr;
751 int num_ctlr = 0;
752
753 while(i<8 && eisa[i]) {
754 ctlr = alloc_cpqarray_hba();
755 if(ctlr == -1)
756 break;
757 board_id = inl(eisa[i]+0xC80);
758 for(j=0; j < NR_PRODUCTS; j++)
759 if (board_id == products[j].board_id)
760 break;
761
762 if (j == NR_PRODUCTS) {
763 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
764 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
765 continue;
766 }
767
768 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
769 hba[ctlr]->io_mem_addr = eisa[i];
770 hba[ctlr]->io_mem_length = 0x7FF;
771 if(!request_region(hba[ctlr]->io_mem_addr,
772 hba[ctlr]->io_mem_length,
773 "cpqarray"))
774 {
775 printk(KERN_WARNING "cpqarray: I/O range already in "
776 "use addr = %lx length = %ld\n",
777 hba[ctlr]->io_mem_addr,
778 hba[ctlr]->io_mem_length);
779 free_hba(ctlr);
780 continue;
781 }
782
783 /*
784 * Read the config register to find our interrupt
785 */
786 intr = inb(eisa[i]+0xCC0) >> 4;
787 if (intr & 1) intr = 11;
788 else if (intr & 2) intr = 10;
789 else if (intr & 4) intr = 14;
790 else if (intr & 8) intr = 15;
791
792 hba[ctlr]->intr = intr;
793 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
794 hba[ctlr]->product_name = products[j].product_name;
795 hba[ctlr]->access = *(products[j].access);
796 hba[ctlr]->ctlr = ctlr;
797 hba[ctlr]->board_id = board_id;
798 hba[ctlr]->pci_dev = NULL; /* not PCI */
799
800DBGINFO(
801 printk("i = %d, j = %d\n", i, j);
802 printk("irq = %x\n", intr);
803 printk("product name = %s\n", products[j].product_name);
804 printk("board_id = %x\n", board_id);
805);
806
807 num_ctlr++;
808 i++;
809
810 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
811 printk(KERN_WARNING
812 "cpqarray: Can't register EISA controller %d\n",
813 ctlr);
814
815 }
816
817 return num_ctlr;
818}
819
820/*
821 * Open. Make sure the device is really there.
822 */
47844fad 823static int ida_open(struct block_device *bdev, fmode_t mode)
1da177e4 824{
47844fad
AV
825 drv_info_t *drv = get_drv(bdev->bd_disk);
826 ctlr_info_t *host = get_host(bdev->bd_disk);
1da177e4 827
47844fad 828 DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
1da177e4
LT
829 /*
830 * Root is allowed to open raw volume zero even if it's not configured
831 * so array config can still work. I don't think I really like this,
832 * but I'm already using way to many device nodes to claim another one
833 * for "raw controller".
834 */
835 if (!drv->nr_blks) {
836 if (!capable(CAP_SYS_RAWIO))
837 return -ENXIO;
838 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
839 return -ENXIO;
840 }
841 host->usage_count++;
842 return 0;
843}
844
6e9624b8
AB
845static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
846{
847 int ret;
848
2a48fc0a 849 mutex_lock(&cpqarray_mutex);
6e9624b8 850 ret = ida_open(bdev, mode);
2a48fc0a 851 mutex_unlock(&cpqarray_mutex);
6e9624b8
AB
852
853 return ret;
854}
855
1da177e4
LT
856/*
857 * Close. Sync first.
858 */
db2a144b 859static void ida_release(struct gendisk *disk, fmode_t mode)
1da177e4 860{
6e9624b8
AB
861 ctlr_info_t *host;
862
2a48fc0a 863 mutex_lock(&cpqarray_mutex);
6e9624b8 864 host = get_host(disk);
1da177e4 865 host->usage_count--;
2a48fc0a 866 mutex_unlock(&cpqarray_mutex);
1da177e4
LT
867}
868
869/*
870 * Enqueuing and dequeuing functions for cmdlists.
871 */
872static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
873{
874 if (*Qptr == NULL) {
875 *Qptr = c;
876 c->next = c->prev = c;
877 } else {
878 c->prev = (*Qptr)->prev;
879 c->next = (*Qptr);
880 (*Qptr)->prev->next = c;
881 (*Qptr)->prev = c;
882 }
883}
884
885static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
886{
887 if (c && c->next != c) {
888 if (*Qptr == c) *Qptr = c->next;
889 c->prev->next = c->next;
890 c->next->prev = c->prev;
891 } else {
892 *Qptr = NULL;
893 }
894 return c;
895}
896
897/*
898 * Get a request and submit it to the controller.
899 * This routine needs to grab all the requests it possibly can from the
900 * req Q and submit them. Interrupts are off (and need to be off) when you
901 * are in here (either via the dummy do_ida_request functions or by being
902 * called from the interrupt handler
903 */
165125e1 904static void do_ida_request(struct request_queue *q)
1da177e4
LT
905{
906 ctlr_info_t *h = q->queuedata;
907 cmdlist_t *c;
908 struct request *creq;
909 struct scatterlist tmp_sg[SG_MAX];
910 int i, dir, seg;
911
1da177e4 912queue_next:
9934c8c0 913 creq = blk_peek_request(q);
1da177e4
LT
914 if (!creq)
915 goto startio;
916
089fe1b2 917 BUG_ON(creq->nr_phys_segments > SG_MAX);
1da177e4
LT
918
919 if ((c = cmd_alloc(h,1)) == NULL)
920 goto startio;
921
9934c8c0 922 blk_start_request(creq);
1da177e4
LT
923
924 c->ctlr = h->ctlr;
925 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
926 c->hdr.size = sizeof(rblk_t) >> 2;
927 c->size += sizeof(rblk_t);
928
83096ebf 929 c->req.hdr.blk = blk_rq_pos(creq);
1da177e4
LT
930 c->rq = creq;
931DBGPX(
83096ebf
TH
932 printk("sector=%d, nr_sectors=%u\n",
933 blk_rq_pos(creq), blk_rq_sectors(creq));
1da177e4 934);
45711f1a 935 sg_init_table(tmp_sg, SG_MAX);
1da177e4
LT
936 seg = blk_rq_map_sg(q, creq, tmp_sg);
937
938 /* Now do all the DMA Mappings */
939 if (rq_data_dir(creq) == READ)
940 dir = PCI_DMA_FROMDEVICE;
941 else
942 dir = PCI_DMA_TODEVICE;
943 for( i=0; i < seg; i++)
944 {
945 c->req.sg[i].size = tmp_sg[i].length;
946 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
45711f1a 947 sg_page(&tmp_sg[i]),
1da177e4
LT
948 tmp_sg[i].offset,
949 tmp_sg[i].length, dir);
950 }
83096ebf 951DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
1da177e4 952 c->req.hdr.sg_cnt = seg;
83096ebf 953 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
1da177e4
LT
954 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
955 c->type = CMD_RWREQ;
956
957 /* Put the request on the tail of the request queue */
958 addQ(&h->reqQ, c);
959 h->Qdepth++;
960 if (h->Qdepth > h->maxQsinceinit)
961 h->maxQsinceinit = h->Qdepth;
962
963 goto queue_next;
964
965startio:
966 start_io(h);
967}
968
969/*
970 * start_io submits everything on a controller's request queue
971 * and moves it to the completion queue.
972 *
973 * Interrupts had better be off if you're in here
974 */
975static void start_io(ctlr_info_t *h)
976{
977 cmdlist_t *c;
978
979 while((c = h->reqQ) != NULL) {
980 /* Can't do anything if we're busy */
981 if (h->access.fifo_full(h) == 0)
982 return;
983
984 /* Get the first entry from the request Q */
985 removeQ(&h->reqQ, c);
986 h->Qdepth--;
987
988 /* Tell the controller to do our bidding */
989 h->access.submit_command(h, c);
990
991 /* Get onto the completion Q */
992 addQ(&h->cmpQ, c);
993 }
994}
995
1da177e4
LT
996/*
997 * Mark all buffers that cmd was responsible for
998 */
999static inline void complete_command(cmdlist_t *cmd, int timeout)
1000{
1f794b60 1001 struct request *rq = cmd->rq;
ea6f06f4 1002 int error = 0;
1da177e4
LT
1003 int i, ddir;
1004
1005 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1006 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1007 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1008 cmd->ctlr, cmd->hdr.unit);
1009 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1010 }
1011 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1012 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1013 cmd->ctlr, cmd->hdr.unit);
ea6f06f4 1014 error = -EIO;
1da177e4
LT
1015 }
1016 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1017 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1018 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1019 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1020 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
ea6f06f4 1021 error = -EIO;
1da177e4 1022 }
ea6f06f4
KU
1023 if (timeout)
1024 error = -EIO;
1da177e4
LT
1025 /* unmap the DMA mapping for all the scatter gather elements */
1026 if (cmd->req.hdr.cmd == IDA_READ)
1027 ddir = PCI_DMA_FROMDEVICE;
1028 else
1029 ddir = PCI_DMA_TODEVICE;
1030 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1031 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1032 cmd->req.sg[i].size, ddir);
1033
1f794b60 1034 DBGPX(printk("Done with %p\n", rq););
40cbbb78 1035 __blk_end_request_all(rq, error);
1da177e4
LT
1036}
1037
1038/*
1039 * The controller will interrupt us upon completion of commands.
1040 * Find the command on the completion queue, remove it, tell the OS and
1041 * try to queue up more IO
1042 */
7d12e780 1043static irqreturn_t do_ida_intr(int irq, void *dev_id)
1da177e4
LT
1044{
1045 ctlr_info_t *h = dev_id;
1046 cmdlist_t *c;
1047 unsigned long istat;
1048 unsigned long flags;
1049 __u32 a,a1;
1050
1051 istat = h->access.intr_pending(h);
1052 /* Is this interrupt for us? */
1053 if (istat == 0)
1054 return IRQ_NONE;
1055
1056 /*
1057 * If there are completed commands in the completion queue,
1058 * we had better do something about it.
1059 */
1060 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1061 if (istat & FIFO_NOT_EMPTY) {
1062 while((a = h->access.command_completed(h))) {
1063 a1 = a; a &= ~3;
1064 if ((c = h->cmpQ) == NULL)
1065 {
1066 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1067 continue;
1068 }
1069 while(c->busaddr != a) {
1070 c = c->next;
1071 if (c == h->cmpQ)
1072 break;
1073 }
1074 /*
1075 * If we've found the command, take it off the
1076 * completion Q and free it
1077 */
1078 if (c->busaddr == a) {
1079 removeQ(&h->cmpQ, c);
1080 /* Check for invalid command.
1081 * Controller returns command error,
1082 * But rcode = 0.
1083 */
1084
1085 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1086 {
1087 c->req.hdr.rcode = RCODE_INVREQ;
1088 }
1089 if (c->type == CMD_RWREQ) {
1090 complete_command(c, 0);
1091 cmd_free(h, c, 1);
1092 } else if (c->type == CMD_IOCTL_PEND) {
1093 c->type = CMD_IOCTL_DONE;
1094 }
1095 continue;
1096 }
1097 }
1098 }
1099
1100 /*
1101 * See if we can queue up some more IO
1102 */
1103 do_ida_request(h->queue);
1104 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1105 return IRQ_HANDLED;
1106}
1107
1108/*
1109 * This timer was for timing out requests that haven't happened after
1110 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1111 * reset a flags structure so we don't flood the user with
1112 * "Non-Fatal error" messages.
1113 */
1114static void ida_timer(unsigned long tdata)
1115{
1116 ctlr_info_t *h = (ctlr_info_t*)tdata;
1117
1118 h->timer.expires = jiffies + IDA_TIMER;
1119 add_timer(&h->timer);
1120 h->misc_tflags = 0;
1121}
1122
a885c8c4
CH
1123static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1124{
1125 drv_info_t *drv = get_drv(bdev->bd_disk);
1126
1127 if (drv->cylinders) {
1128 geo->heads = drv->heads;
1129 geo->sectors = drv->sectors;
1130 geo->cylinders = drv->cylinders;
1131 } else {
1132 geo->heads = 0xff;
1133 geo->sectors = 0x3f;
1134 geo->cylinders = drv->nr_blks / (0xff*0x3f);
1135 }
1136
1137 return 0;
1138}
1139
1da177e4
LT
1140/*
1141 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1142 * setting readahead and submitting commands from userspace to the controller.
1143 */
8a6cfeb6 1144static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1da177e4 1145{
47844fad
AV
1146 drv_info_t *drv = get_drv(bdev->bd_disk);
1147 ctlr_info_t *host = get_host(bdev->bd_disk);
1da177e4 1148 int error;
1da177e4
LT
1149 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1150 ida_ioctl_t *my_io;
1151
1152 switch(cmd) {
1da177e4
LT
1153 case IDAGETDRVINFO:
1154 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1155 return -EFAULT;
1156 return 0;
1157 case IDAPASSTHRU:
1158 if (!capable(CAP_SYS_RAWIO))
1159 return -EPERM;
1160 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1161 if (!my_io)
1162 return -ENOMEM;
1163 error = -EFAULT;
1164 if (copy_from_user(my_io, io, sizeof(*my_io)))
1165 goto out_passthru;
1166 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1167 if (error)
1168 goto out_passthru;
1169 error = -EFAULT;
1170 if (copy_to_user(io, my_io, sizeof(*my_io)))
1171 goto out_passthru;
1172 error = 0;
1173out_passthru:
1174 kfree(my_io);
1175 return error;
1176 case IDAGETCTLRSIG:
1177 if (!arg) return -EINVAL;
f6c4c8e1
KV
1178 if (put_user(host->ctlr_sig, (int __user *)arg))
1179 return -EFAULT;
1da177e4
LT
1180 return 0;
1181 case IDAREVALIDATEVOLS:
47844fad 1182 if (MINOR(bdev->bd_dev) != 0)
1da177e4
LT
1183 return -ENXIO;
1184 return revalidate_allvol(host);
1185 case IDADRIVERVERSION:
1186 if (!arg) return -EINVAL;
f6c4c8e1
KV
1187 if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
1188 return -EFAULT;
1da177e4
LT
1189 return 0;
1190 case IDAGETPCIINFO:
1191 {
1192
1193 ida_pci_info_struct pciinfo;
1194
1195 if (!arg) return -EINVAL;
d862d211 1196 memset(&pciinfo, 0, sizeof(pciinfo));
1da177e4
LT
1197 pciinfo.bus = host->pci_dev->bus->number;
1198 pciinfo.dev_fn = host->pci_dev->devfn;
1199 pciinfo.board_id = host->board_id;
1200 if(copy_to_user((void __user *) arg, &pciinfo,
1201 sizeof( ida_pci_info_struct)))
1202 return -EFAULT;
1203 return(0);
1204 }
1205
1206 default:
1207 return -EINVAL;
1208 }
1209
1210}
8a6cfeb6
AB
1211
1212static int ida_ioctl(struct block_device *bdev, fmode_t mode,
1213 unsigned int cmd, unsigned long param)
1214{
1215 int ret;
1216
2a48fc0a 1217 mutex_lock(&cpqarray_mutex);
8a6cfeb6 1218 ret = ida_locked_ioctl(bdev, mode, cmd, param);
2a48fc0a 1219 mutex_unlock(&cpqarray_mutex);
8a6cfeb6
AB
1220
1221 return ret;
1222}
1223
1da177e4
LT
1224/*
1225 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1226 * The command block (io) has already been copied to kernel space for us,
1227 * however, any elements in the sglist need to be copied to kernel space
1228 * or copied back to userspace.
1229 *
1230 * Only root may perform a controller passthru command, however I'm not doing
1231 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1232 * putting a 64M buffer in the sglist is probably a *bad* idea.
1233 */
1234static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1235{
1236 int ctlr = h->ctlr;
1237 cmdlist_t *c;
1238 void *p = NULL;
1239 unsigned long flags;
1240 int error;
1241
1242 if ((c = cmd_alloc(h, 0)) == NULL)
1243 return -ENOMEM;
1244 c->ctlr = ctlr;
1245 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1246 c->hdr.size = sizeof(rblk_t) >> 2;
1247 c->size += sizeof(rblk_t);
1248
1249 c->req.hdr.cmd = io->cmd;
1250 c->req.hdr.blk = io->blk;
1251 c->req.hdr.blk_cnt = io->blk_cnt;
1252 c->type = CMD_IOCTL_PEND;
1253
1254 /* Pre submit processing */
1255 switch(io->cmd) {
1256 case PASSTHRU_A:
ad96a7a7
JL
1257 p = memdup_user(io->sg[0].addr, io->sg[0].size);
1258 if (IS_ERR(p)) {
1259 error = PTR_ERR(p);
1260 cmd_free(h, c, 0);
1261 return error;
1da177e4
LT
1262 }
1263 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1264 sizeof(ida_ioctl_t),
1265 PCI_DMA_BIDIRECTIONAL);
1266 c->req.sg[0].size = io->sg[0].size;
1267 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1268 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1269 c->req.hdr.sg_cnt = 1;
1270 break;
1271 case IDA_READ:
1272 case READ_FLASH_ROM:
1273 case SENSE_CONTROLLER_PERFORMANCE:
1274 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1275 if (!p)
1276 {
1277 error = -ENOMEM;
1278 cmd_free(h, c, 0);
1279 return(error);
1280 }
1281
1282 c->req.sg[0].size = io->sg[0].size;
1283 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1284 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1285 c->req.hdr.sg_cnt = 1;
1286 break;
1287 case IDA_WRITE:
1288 case IDA_WRITE_MEDIA:
1289 case DIAG_PASS_THRU:
1290 case COLLECT_BUFFER:
1291 case WRITE_FLASH_ROM:
ad96a7a7
JL
1292 p = memdup_user(io->sg[0].addr, io->sg[0].size);
1293 if (IS_ERR(p)) {
1294 error = PTR_ERR(p);
1295 cmd_free(h, c, 0);
1296 return error;
1da177e4 1297 }
1da177e4
LT
1298 c->req.sg[0].size = io->sg[0].size;
1299 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1300 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1301 c->req.hdr.sg_cnt = 1;
1302 break;
1303 default:
1304 c->req.sg[0].size = sizeof(io->c);
1305 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1306 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1307 c->req.hdr.sg_cnt = 1;
1308 }
1309
1310 /* Put the request on the tail of the request queue */
1311 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1312 addQ(&h->reqQ, c);
1313 h->Qdepth++;
1314 start_io(h);
1315 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1316
1317 /* Wait for completion */
1318 while(c->type != CMD_IOCTL_DONE)
1319 schedule();
1320
1321 /* Unmap the DMA */
1322 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1323 PCI_DMA_BIDIRECTIONAL);
1324 /* Post submit processing */
1325 switch(io->cmd) {
1326 case PASSTHRU_A:
1327 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1328 sizeof(ida_ioctl_t),
1329 PCI_DMA_BIDIRECTIONAL);
1330 case IDA_READ:
1331 case DIAG_PASS_THRU:
1332 case SENSE_CONTROLLER_PERFORMANCE:
1333 case READ_FLASH_ROM:
1334 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1335 kfree(p);
1336 return -EFAULT;
1337 }
1338 /* fall through and free p */
1339 case IDA_WRITE:
1340 case IDA_WRITE_MEDIA:
1341 case COLLECT_BUFFER:
1342 case WRITE_FLASH_ROM:
1343 kfree(p);
1344 break;
1345 default:;
1346 /* Nothing to do */
1347 }
1348
1349 io->rcode = c->req.hdr.rcode;
1350 cmd_free(h, c, 0);
1351 return(0);
1352}
1353
1354/*
1355 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1356 * scheme to suballocte them to the driver. Operations that are not time
1357 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1358 * as the first argument to get a new command.
1359 */
1360static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1361{
1362 cmdlist_t * c;
1363 int i;
1364 dma_addr_t cmd_dhandle;
1365
1366 if (!get_from_pool) {
1367 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1368 sizeof(cmdlist_t), &cmd_dhandle);
1369 if(c==NULL)
1370 return NULL;
1371 } else {
1372 do {
1373 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1374 if (i == NR_CMDS)
1375 return NULL;
1376 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1377 c = h->cmd_pool + i;
1378 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1379 h->nr_allocs++;
1380 }
1381
1382 memset(c, 0, sizeof(cmdlist_t));
1383 c->busaddr = cmd_dhandle;
1384 return c;
1385}
1386
1387static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1388{
1389 int i;
1390
1391 if (!got_from_pool) {
1392 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1393 c->busaddr);
1394 } else {
1395 i = c - h->cmd_pool;
1396 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1397 h->nr_frees++;
1398 }
1399}
1400
1401/***********************************************************************
1402 name: sendcmd
1403 Send a command to an IDA using the memory mapped FIFO interface
1404 and wait for it to complete.
1405 This routine should only be called at init time.
1406***********************************************************************/
1407static int sendcmd(
1408 __u8 cmd,
1409 int ctlr,
1410 void *buff,
1411 size_t size,
1412 unsigned int blk,
1413 unsigned int blkcnt,
1414 unsigned int log_unit )
1415{
1416 cmdlist_t *c;
1417 int complete;
1418 unsigned long temp;
1419 unsigned long i;
1420 ctlr_info_t *info_p = hba[ctlr];
1421
1422 c = cmd_alloc(info_p, 1);
1423 if(!c)
1424 return IO_ERROR;
1425 c->ctlr = ctlr;
1426 c->hdr.unit = log_unit;
1427 c->hdr.prio = 0;
1428 c->hdr.size = sizeof(rblk_t) >> 2;
1429 c->size += sizeof(rblk_t);
1430
1431 /* The request information. */
1432 c->req.hdr.next = 0;
1433 c->req.hdr.rcode = 0;
1434 c->req.bp = 0;
1435 c->req.hdr.sg_cnt = 1;
1436 c->req.hdr.reserved = 0;
1437
1438 if (size == 0)
1439 c->req.sg[0].size = 512;
1440 else
1441 c->req.sg[0].size = size;
1442
1443 c->req.hdr.blk = blk;
1444 c->req.hdr.blk_cnt = blkcnt;
1445 c->req.hdr.cmd = (unsigned char) cmd;
1446 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1447 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1448 /*
1449 * Disable interrupt
1450 */
1451 info_p->access.set_intr_mask(info_p, 0);
1452 /* Make sure there is room in the command FIFO */
1453 /* Actually it should be completely empty at this time. */
1454 for (i = 200000; i > 0; i--) {
1455 temp = info_p->access.fifo_full(info_p);
1456 if (temp != 0) {
1457 break;
1458 }
1459 udelay(10);
1460DBG(
1461 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1462 " waiting!\n", ctlr);
1463);
1464 }
1465 /*
1466 * Send the cmd
1467 */
1468 info_p->access.submit_command(info_p, c);
1469 complete = pollcomplete(ctlr);
1470
1471 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1472 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1473 if (complete != 1) {
1474 if (complete != c->busaddr) {
1475 printk( KERN_WARNING
1476 "cpqarray ida%d: idaSendPciCmd "
1477 "Invalid command list address returned! (%08lx)\n",
1478 ctlr, (unsigned long)complete);
1479 cmd_free(info_p, c, 1);
1480 return (IO_ERROR);
1481 }
1482 } else {
1483 printk( KERN_WARNING
1484 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1485 "No command list address returned!\n",
1486 ctlr);
1487 cmd_free(info_p, c, 1);
1488 return (IO_ERROR);
1489 }
1490
1491 if (c->req.hdr.rcode & 0x00FE) {
1492 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1493 printk( KERN_WARNING
1494 "cpqarray ida%d: idaSendPciCmd, error: "
1495 "Controller failed at init time "
1496 "cmd: 0x%x, return code = 0x%x\n",
1497 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1498
1499 cmd_free(info_p, c, 1);
1500 return (IO_ERROR);
1501 }
1502 }
1503 cmd_free(info_p, c, 1);
1504 return (IO_OK);
1505}
1506
1507/*
1508 * revalidate_allvol is for online array config utilities. After a
1509 * utility reconfigures the drives in the array, it can use this function
1510 * (through an ioctl) to make the driver zap any previous disk structs for
1511 * that controller and get new ones.
1512 *
1513 * Right now I'm using the getgeometry() function to do this, but this
1514 * function should probably be finer grained and allow you to revalidate one
1515 * particualar logical volume (instead of all of them on a particular
1516 * controller).
1517 */
1518static int revalidate_allvol(ctlr_info_t *host)
1519{
1520 int ctlr = host->ctlr;
1521 int i;
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1525 if (host->usage_count > 1) {
1526 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1527 printk(KERN_WARNING "cpqarray: Device busy for volume"
1528 " revalidation (usage=%d)\n", host->usage_count);
1529 return -EBUSY;
1530 }
1531 host->usage_count++;
1532 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1533
1534 /*
1535 * Set the partition and block size structures for all volumes
1536 * on this controller to zero. We will reread all of this data
1537 */
1538 set_capacity(ida_gendisk[ctlr][0], 0);
1539 for (i = 1; i < NWD; i++) {
1540 struct gendisk *disk = ida_gendisk[ctlr][i];
1541 if (disk->flags & GENHD_FL_UP)
1542 del_gendisk(disk);
1543 }
1544 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1545
1546 /*
1547 * Tell the array controller not to give us any interrupts while
1548 * we check the new geometry. Then turn interrupts back on when
1549 * we're done.
1550 */
1551 host->access.set_intr_mask(host, 0);
1552 getgeometry(ctlr);
1553 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1554
1555 for(i=0; i<NWD; i++) {
1556 struct gendisk *disk = ida_gendisk[ctlr][i];
1557 drv_info_t *drv = &host->drv[i];
1558 if (i && !drv->nr_blks)
1559 continue;
e1defc4f 1560 blk_queue_logical_block_size(host->queue, drv->blk_size);
1da177e4
LT
1561 set_capacity(disk, drv->nr_blks);
1562 disk->queue = host->queue;
1563 disk->private_data = drv;
1564 if (i)
1565 add_disk(disk);
1566 }
1567
1568 host->usage_count--;
1569 return 0;
1570}
1571
1572static int ida_revalidate(struct gendisk *disk)
1573{
1574 drv_info_t *drv = disk->private_data;
1575 set_capacity(disk, drv->nr_blks);
1576 return 0;
1577}
1578
1579/********************************************************************
1580 name: pollcomplete
1581 Wait polling for a command to complete.
1582 The memory mapped FIFO is polled for the completion.
1583 Used only at init time, interrupts disabled.
1584 ********************************************************************/
1585static int pollcomplete(int ctlr)
1586{
1587 int done;
1588 int i;
1589
1590 /* Wait (up to 2 seconds) for a command to complete */
1591
1592 for (i = 200000; i > 0; i--) {
1593 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1594 if (done == 0) {
1595 udelay(10); /* a short fixed delay */
1596 } else
1597 return (done);
1598 }
1599 /* Invalid address to tell caller we ran out of time */
1600 return 1;
1601}
1602/*****************************************************************
1603 start_fwbk
1604 Starts controller firmwares background processing.
1605 Currently only the Integrated Raid controller needs this done.
1606 If the PCI mem address registers are written to after this,
1607 data corruption may occur
1608*****************************************************************/
1609static void start_fwbk(int ctlr)
1610{
1611 id_ctlr_t *id_ctlr_buf;
1612 int ret_code;
1613
1614 if( (hba[ctlr]->board_id != 0x40400E11)
1615 && (hba[ctlr]->board_id != 0x40480E11) )
1616
1617 /* Not a Integrated Raid, so there is nothing for us to do */
1618 return;
1619 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1620 " processing\n");
1621 /* Command does not return anything, but idasend command needs a
1622 buffer */
5cbded58 1623 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1da177e4
LT
1624 if(id_ctlr_buf==NULL)
1625 {
1626 printk(KERN_WARNING "cpqarray: Out of memory. "
1627 "Unable to start background processing.\n");
1628 return;
1629 }
1630 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1631 id_ctlr_buf, 0, 0, 0, 0);
1632 if(ret_code != IO_OK)
1633 printk(KERN_WARNING "cpqarray: Unable to start"
1634 " background processing\n");
1635
1636 kfree(id_ctlr_buf);
1637}
1638/*****************************************************************
1639 getgeometry
1640 Get ida logical volume geometry from the controller
1641 This is a large bit of code which once existed in two flavors,
1642 It is used only at init time.
1643*****************************************************************/
1644static void getgeometry(int ctlr)
1645{
1646 id_log_drv_t *id_ldrive;
1647 id_ctlr_t *id_ctlr_buf;
1648 sense_log_drv_stat_t *id_lstatus_buf;
1649 config_t *sense_config_buf;
1650 unsigned int log_unit, log_index;
1651 int ret_code, size;
1652 drv_info_t *drv;
1653 ctlr_info_t *info_p = hba[ctlr];
1654 int i;
1655
1656 info_p->log_drv_map = 0;
1657
2e4934aa
MK
1658 id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1659 if (!id_ldrive) {
1da177e4 1660 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1661 goto err_0;
1da177e4
LT
1662 }
1663
2e4934aa
MK
1664 id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1665 if (!id_ctlr_buf) {
1da177e4 1666 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1667 goto err_1;
1da177e4
LT
1668 }
1669
2e4934aa
MK
1670 id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1671 if (!id_lstatus_buf) {
1da177e4 1672 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1673 goto err_2;
1da177e4
LT
1674 }
1675
2e4934aa
MK
1676 sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1677 if (!sense_config_buf) {
1da177e4 1678 printk( KERN_ERR "cpqarray: out of memory.\n");
2e4934aa 1679 goto err_3;
1da177e4
LT
1680 }
1681
1da177e4
LT
1682 info_p->phys_drives = 0;
1683 info_p->log_drv_map = 0;
1684 info_p->drv_assign_map = 0;
1685 info_p->drv_spare_map = 0;
1686 info_p->mp_failed_drv_map = 0; /* only initialized here */
1687 /* Get controllers info for this logical drive */
1688 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1689 if (ret_code == IO_ERROR) {
1690 /*
1691 * If can't get controller info, set the logical drive map to 0,
1692 * so the idastubopen will fail on all logical drives
1693 * on the controller.
1694 */
1da177e4 1695 printk(KERN_ERR "cpqarray: error sending ID controller\n");
2e4934aa 1696 goto err_4;
1da177e4
LT
1697 }
1698
1699 info_p->log_drives = id_ctlr_buf->nr_drvs;
1700 for(i=0;i<4;i++)
1701 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1702 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1703
1704 printk(" (%s)\n", info_p->product_name);
1705 /*
1706 * Initialize logical drive map to zero
1707 */
1708 log_index = 0;
1709 /*
1710 * Get drive geometry for all logical drives
1711 */
1712 if (id_ctlr_buf->nr_drvs > 16)
1713 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1714 "16 logical drives per controller.\n. "
1715 " Additional drives will not be "
1716 "detected\n", ctlr);
1717
1718 for (log_unit = 0;
1719 (log_index < id_ctlr_buf->nr_drvs)
1720 && (log_unit < NWD);
1721 log_unit++) {
1da177e4
LT
1722 size = sizeof(sense_log_drv_stat_t);
1723
1724 /*
1725 Send "Identify logical drive status" cmd
1726 */
1727 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1728 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1729 if (ret_code == IO_ERROR) {
1730 /*
1731 If can't get logical drive status, set
1732 the logical drive map to 0, so the
1733 idastubopen will fail for all logical drives
1734 on the controller.
1735 */
1736 info_p->log_drv_map = 0;
1737 printk( KERN_WARNING
1738 "cpqarray ida%d: idaGetGeometry - Controller"
1739 " failed to report status of logical drive %d\n"
1740 "Access to this controller has been disabled\n",
1741 ctlr, log_unit);
2e4934aa 1742 goto err_4;
1da177e4
LT
1743 }
1744 /*
1745 Make sure the logical drive is configured
1746 */
1747 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1748 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1749 sizeof(id_log_drv_t), 0, 0, log_unit);
1750 /*
1751 If error, the bit for this
1752 logical drive won't be set and
1753 idastubopen will return error.
1754 */
1755 if (ret_code != IO_ERROR) {
1756 drv = &info_p->drv[log_unit];
1757 drv->blk_size = id_ldrive->blk_size;
1758 drv->nr_blks = id_ldrive->nr_blks;
1759 drv->cylinders = id_ldrive->drv.cyl;
1760 drv->heads = id_ldrive->drv.heads;
1761 drv->sectors = id_ldrive->drv.sect_per_track;
1762 info_p->log_drv_map |= (1 << log_unit);
1763
1764 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1765 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1766 ret_code = sendcmd(SENSE_CONFIG,
1767 ctlr, sense_config_buf,
1768 sizeof(config_t), 0, 0, log_unit);
1769 if (ret_code == IO_ERROR) {
1770 info_p->log_drv_map = 0;
1da177e4 1771 printk(KERN_ERR "cpqarray: error sending sense config\n");
2e4934aa 1772 goto err_4;
1da177e4
LT
1773 }
1774
1da177e4
LT
1775 info_p->phys_drives =
1776 sense_config_buf->ctlr_phys_drv;
1777 info_p->drv_assign_map
1778 |= sense_config_buf->drv_asgn_map;
1779 info_p->drv_assign_map
1780 |= sense_config_buf->spare_asgn_map;
1781 info_p->drv_spare_map
1782 |= sense_config_buf->spare_asgn_map;
1783 } /* end of if no error on id_ldrive */
1784 log_index = log_index + 1;
1785 } /* end of if logical drive configured */
1786 } /* end of for log_unit */
2e4934aa
MK
1787
1788 /* Free all the buffers and return */
1789err_4:
1da177e4 1790 kfree(sense_config_buf);
2e4934aa 1791err_3:
1da177e4 1792 kfree(id_lstatus_buf);
2e4934aa 1793err_2:
1da177e4 1794 kfree(id_ctlr_buf);
2e4934aa
MK
1795err_1:
1796 kfree(id_ldrive);
1797err_0:
1da177e4 1798 return;
1da177e4
LT
1799}
1800
1801static void __exit cpqarray_exit(void)
1802{
1803 int i;
1804
1805 pci_unregister_driver(&cpqarray_pci_driver);
1806
1807 /* Double check that all controller entries have been removed */
1808 for(i=0; i<MAX_CTLR; i++) {
1809 if (hba[i] != NULL) {
1810 printk(KERN_WARNING "cpqarray: Removing EISA "
1811 "controller %d\n", i);
1812 cpqarray_remove_one_eisa(i);
1813 }
1814 }
1815
928b4d8c 1816 remove_proc_entry("driver/cpqarray", NULL);
1da177e4
LT
1817}
1818
1819module_init(cpqarray_init)
1820module_exit(cpqarray_exit)