| 1 | /* |
| 2 | * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. |
| 3 | * Copyright (C) 2008-2009 MontaVista Software, Inc. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of version 2 of the GNU General Public License |
| 7 | * as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it would be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write the Free Software |
| 15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
| 16 | * |
| 17 | * For further information regarding this notice, see: |
| 18 | * |
| 19 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan |
| 20 | */ |
| 21 | |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/pci.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/kernel.h> |
| 28 | #include <linux/ioport.h> |
| 29 | #include <linux/blkdev.h> |
| 30 | #include <linux/scatterlist.h> |
| 31 | #include <linux/ioc4.h> |
| 32 | #include <linux/io.h> |
| 33 | #include <linux/ide.h> |
| 34 | |
| 35 | #define DRV_NAME "SGIIOC4" |
| 36 | |
| 37 | /* IOC4 Specific Definitions */ |
| 38 | #define IOC4_CMD_OFFSET 0x100 |
| 39 | #define IOC4_CTRL_OFFSET 0x120 |
| 40 | #define IOC4_DMA_OFFSET 0x140 |
| 41 | #define IOC4_INTR_OFFSET 0x0 |
| 42 | |
| 43 | #define IOC4_TIMING 0x00 |
| 44 | #define IOC4_DMA_PTR_L 0x01 |
| 45 | #define IOC4_DMA_PTR_H 0x02 |
| 46 | #define IOC4_DMA_ADDR_L 0x03 |
| 47 | #define IOC4_DMA_ADDR_H 0x04 |
| 48 | #define IOC4_BC_DEV 0x05 |
| 49 | #define IOC4_BC_MEM 0x06 |
| 50 | #define IOC4_DMA_CTRL 0x07 |
| 51 | #define IOC4_DMA_END_ADDR 0x08 |
| 52 | |
| 53 | /* Bits in the IOC4 Control/Status Register */ |
| 54 | #define IOC4_S_DMA_START 0x01 |
| 55 | #define IOC4_S_DMA_STOP 0x02 |
| 56 | #define IOC4_S_DMA_DIR 0x04 |
| 57 | #define IOC4_S_DMA_ACTIVE 0x08 |
| 58 | #define IOC4_S_DMA_ERROR 0x10 |
| 59 | #define IOC4_ATA_MEMERR 0x02 |
| 60 | |
| 61 | /* Read/Write Directions */ |
| 62 | #define IOC4_DMA_WRITE 0x04 |
| 63 | #define IOC4_DMA_READ 0x00 |
| 64 | |
| 65 | /* Interrupt Register Offsets */ |
| 66 | #define IOC4_INTR_REG 0x03 |
| 67 | #define IOC4_INTR_SET 0x05 |
| 68 | #define IOC4_INTR_CLEAR 0x07 |
| 69 | |
| 70 | #define IOC4_IDE_CACHELINE_SIZE 128 |
| 71 | #define IOC4_CMD_CTL_BLK_SIZE 0x20 |
| 72 | #define IOC4_SUPPORTED_FIRMWARE_REV 46 |
| 73 | |
| 74 | struct ioc4_dma_regs { |
| 75 | u32 timing_reg0; |
| 76 | u32 timing_reg1; |
| 77 | u32 low_mem_ptr; |
| 78 | u32 high_mem_ptr; |
| 79 | u32 low_mem_addr; |
| 80 | u32 high_mem_addr; |
| 81 | u32 dev_byte_count; |
| 82 | u32 mem_byte_count; |
| 83 | u32 status; |
| 84 | }; |
| 85 | |
| 86 | /* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */ |
| 87 | /* IOC4 has only 1 IDE channel */ |
| 88 | #define IOC4_PRD_BYTES 16 |
| 89 | #define IOC4_PRD_ENTRIES (PAGE_SIZE / (4 * IOC4_PRD_BYTES)) |
| 90 | |
| 91 | |
| 92 | static void sgiioc4_init_hwif_ports(struct ide_hw *hw, |
| 93 | unsigned long data_port, |
| 94 | unsigned long ctrl_port, |
| 95 | unsigned long irq_port) |
| 96 | { |
| 97 | unsigned long reg = data_port; |
| 98 | int i; |
| 99 | |
| 100 | /* Registers are word (32 bit) aligned */ |
| 101 | for (i = 0; i <= 7; i++) |
| 102 | hw->io_ports_array[i] = reg + i * 4; |
| 103 | |
| 104 | hw->io_ports.ctl_addr = ctrl_port; |
| 105 | hw->io_ports.irq_addr = irq_port; |
| 106 | } |
| 107 | |
| 108 | static int sgiioc4_checkirq(ide_hwif_t *hwif) |
| 109 | { |
| 110 | unsigned long intr_addr = hwif->io_ports.irq_addr + IOC4_INTR_REG * 4; |
| 111 | |
| 112 | if (readl((void __iomem *)intr_addr) & 0x03) |
| 113 | return 1; |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static u8 sgiioc4_read_status(ide_hwif_t *); |
| 119 | |
| 120 | static int sgiioc4_clearirq(ide_drive_t *drive) |
| 121 | { |
| 122 | u32 intr_reg; |
| 123 | ide_hwif_t *hwif = drive->hwif; |
| 124 | struct ide_io_ports *io_ports = &hwif->io_ports; |
| 125 | unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2); |
| 126 | |
| 127 | /* Code to check for PCI error conditions */ |
| 128 | intr_reg = readl((void __iomem *)other_ir); |
| 129 | if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ |
| 130 | /* |
| 131 | * Using sgiioc4_read_status to read the Status register has a |
| 132 | * side effect of clearing the interrupt. The first read should |
| 133 | * clear it if it is set. The second read should return |
| 134 | * a "clear" status if it got cleared. If not, then spin |
| 135 | * for a bit trying to clear it. |
| 136 | */ |
| 137 | u8 stat = sgiioc4_read_status(hwif); |
| 138 | int count = 0; |
| 139 | |
| 140 | stat = sgiioc4_read_status(hwif); |
| 141 | while ((stat & ATA_BUSY) && (count++ < 100)) { |
| 142 | udelay(1); |
| 143 | stat = sgiioc4_read_status(hwif); |
| 144 | } |
| 145 | |
| 146 | if (intr_reg & 0x02) { |
| 147 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
| 148 | /* Error when transferring DMA data on PCI bus */ |
| 149 | u32 pci_err_addr_low, pci_err_addr_high, |
| 150 | pci_stat_cmd_reg; |
| 151 | |
| 152 | pci_err_addr_low = |
| 153 | readl((void __iomem *)io_ports->irq_addr); |
| 154 | pci_err_addr_high = |
| 155 | readl((void __iomem *)(io_ports->irq_addr + 4)); |
| 156 | pci_read_config_dword(dev, PCI_COMMAND, |
| 157 | &pci_stat_cmd_reg); |
| 158 | printk(KERN_ERR "%s(%s): PCI Bus Error when doing DMA: " |
| 159 | "status-cmd reg is 0x%x\n", |
| 160 | __func__, drive->name, pci_stat_cmd_reg); |
| 161 | printk(KERN_ERR "%s(%s): PCI Error Address is 0x%x%x\n", |
| 162 | __func__, drive->name, |
| 163 | pci_err_addr_high, pci_err_addr_low); |
| 164 | /* Clear the PCI Error indicator */ |
| 165 | pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); |
| 166 | } |
| 167 | |
| 168 | /* Clear the Interrupt, Error bits on the IOC4 */ |
| 169 | writel(0x03, (void __iomem *)other_ir); |
| 170 | |
| 171 | intr_reg = readl((void __iomem *)other_ir); |
| 172 | } |
| 173 | |
| 174 | return intr_reg & 3; |
| 175 | } |
| 176 | |
| 177 | static void sgiioc4_dma_start(ide_drive_t *drive) |
| 178 | { |
| 179 | ide_hwif_t *hwif = drive->hwif; |
| 180 | unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; |
| 181 | unsigned int reg = readl((void __iomem *)ioc4_dma_addr); |
| 182 | unsigned int temp_reg = reg | IOC4_S_DMA_START; |
| 183 | |
| 184 | writel(temp_reg, (void __iomem *)ioc4_dma_addr); |
| 185 | } |
| 186 | |
| 187 | static u32 sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base) |
| 188 | { |
| 189 | unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; |
| 190 | u32 ioc4_dma; |
| 191 | int count; |
| 192 | |
| 193 | count = 0; |
| 194 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
| 195 | while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) { |
| 196 | udelay(1); |
| 197 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
| 198 | } |
| 199 | return ioc4_dma; |
| 200 | } |
| 201 | |
| 202 | /* Stops the IOC4 DMA Engine */ |
| 203 | static int sgiioc4_dma_end(ide_drive_t *drive) |
| 204 | { |
| 205 | u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; |
| 206 | ide_hwif_t *hwif = drive->hwif; |
| 207 | unsigned long dma_base = hwif->dma_base; |
| 208 | int dma_stat = 0; |
| 209 | unsigned long *ending_dma = ide_get_hwifdata(hwif); |
| 210 | |
| 211 | writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4)); |
| 212 | |
| 213 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
| 214 | |
| 215 | if (ioc4_dma & IOC4_S_DMA_STOP) { |
| 216 | printk(KERN_ERR |
| 217 | "%s(%s): IOC4 DMA STOP bit is still 1 :" |
| 218 | "ioc4_dma_reg 0x%x\n", |
| 219 | __func__, drive->name, ioc4_dma); |
| 220 | dma_stat = 1; |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * The IOC4 will DMA 1's to the ending DMA area to indicate that |
| 225 | * previous data DMA is complete. This is necessary because of relaxed |
| 226 | * ordering between register reads and DMA writes on the Altix. |
| 227 | */ |
| 228 | while ((cnt++ < 200) && (!valid)) { |
| 229 | for (num = 0; num < 16; num++) { |
| 230 | if (ending_dma[num]) { |
| 231 | valid = 1; |
| 232 | break; |
| 233 | } |
| 234 | } |
| 235 | udelay(1); |
| 236 | } |
| 237 | if (!valid) { |
| 238 | printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__, |
| 239 | drive->name); |
| 240 | dma_stat = 1; |
| 241 | } |
| 242 | |
| 243 | bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4)); |
| 244 | bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4)); |
| 245 | |
| 246 | if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { |
| 247 | if (bc_dev > bc_mem + 8) { |
| 248 | printk(KERN_ERR |
| 249 | "%s(%s): WARNING!! byte_count_dev %d " |
| 250 | "!= byte_count_mem %d\n", |
| 251 | __func__, drive->name, bc_dev, bc_mem); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | return dma_stat; |
| 256 | } |
| 257 | |
| 258 | static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
| 259 | { |
| 260 | } |
| 261 | |
| 262 | /* Returns 1 if DMA IRQ issued, 0 otherwise */ |
| 263 | static int sgiioc4_dma_test_irq(ide_drive_t *drive) |
| 264 | { |
| 265 | return sgiioc4_checkirq(drive->hwif); |
| 266 | } |
| 267 | |
| 268 | static void sgiioc4_dma_host_set(ide_drive_t *drive, int on) |
| 269 | { |
| 270 | if (!on) |
| 271 | sgiioc4_clearirq(drive); |
| 272 | } |
| 273 | |
| 274 | static void sgiioc4_resetproc(ide_drive_t *drive) |
| 275 | { |
| 276 | struct ide_cmd *cmd = &drive->hwif->cmd; |
| 277 | |
| 278 | sgiioc4_dma_end(drive); |
| 279 | ide_dma_unmap_sg(drive, cmd); |
| 280 | sgiioc4_clearirq(drive); |
| 281 | } |
| 282 | |
| 283 | static void sgiioc4_dma_lost_irq(ide_drive_t *drive) |
| 284 | { |
| 285 | sgiioc4_resetproc(drive); |
| 286 | |
| 287 | ide_dma_lost_irq(drive); |
| 288 | } |
| 289 | |
| 290 | static u8 sgiioc4_read_status(ide_hwif_t *hwif) |
| 291 | { |
| 292 | unsigned long port = hwif->io_ports.status_addr; |
| 293 | u8 reg = (u8) readb((void __iomem *) port); |
| 294 | |
| 295 | if (!(reg & ATA_BUSY)) { /* Not busy... check for interrupt */ |
| 296 | unsigned long other_ir = port - 0x110; |
| 297 | unsigned int intr_reg = (u32) readl((void __iomem *) other_ir); |
| 298 | |
| 299 | /* Clear the Interrupt, Error bits on the IOC4 */ |
| 300 | if (intr_reg & 0x03) { |
| 301 | writel(0x03, (void __iomem *) other_ir); |
| 302 | intr_reg = (u32) readl((void __iomem *) other_ir); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | return reg; |
| 307 | } |
| 308 | |
| 309 | /* Creates a DMA map for the scatter-gather list entries */ |
| 310 | static int ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d) |
| 311 | { |
| 312 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
| 313 | unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; |
| 314 | int num_ports = sizeof(struct ioc4_dma_regs); |
| 315 | void *pad; |
| 316 | |
| 317 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); |
| 318 | |
| 319 | if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) { |
| 320 | printk(KERN_ERR "%s(%s) -- ERROR: addresses 0x%08lx to 0x%08lx " |
| 321 | "already in use\n", __func__, hwif->name, |
| 322 | dma_base, dma_base + num_ports - 1); |
| 323 | return -1; |
| 324 | } |
| 325 | |
| 326 | hwif->dma_base = (unsigned long)hwif->io_ports.irq_addr + |
| 327 | IOC4_DMA_OFFSET; |
| 328 | |
| 329 | hwif->sg_max_nents = IOC4_PRD_ENTRIES; |
| 330 | |
| 331 | hwif->prd_max_nents = IOC4_PRD_ENTRIES; |
| 332 | hwif->prd_ent_size = IOC4_PRD_BYTES; |
| 333 | |
| 334 | if (ide_allocate_dma_engine(hwif)) |
| 335 | goto dma_pci_alloc_failure; |
| 336 | |
| 337 | pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE, |
| 338 | (dma_addr_t *)&hwif->extra_base); |
| 339 | if (pad) { |
| 340 | ide_set_hwifdata(hwif, pad); |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | ide_release_dma_engine(hwif); |
| 345 | |
| 346 | printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n", |
| 347 | __func__, hwif->name); |
| 348 | printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name); |
| 349 | |
| 350 | dma_pci_alloc_failure: |
| 351 | release_mem_region(dma_base, num_ports); |
| 352 | |
| 353 | return -1; |
| 354 | } |
| 355 | |
| 356 | /* Initializes the IOC4 DMA Engine */ |
| 357 | static void sgiioc4_configure_for_dma(int dma_direction, ide_drive_t *drive) |
| 358 | { |
| 359 | u32 ioc4_dma; |
| 360 | ide_hwif_t *hwif = drive->hwif; |
| 361 | unsigned long dma_base = hwif->dma_base; |
| 362 | unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; |
| 363 | u32 dma_addr, ending_dma_addr; |
| 364 | |
| 365 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
| 366 | |
| 367 | if (ioc4_dma & IOC4_S_DMA_ACTIVE) { |
| 368 | printk(KERN_WARNING "%s(%s): Warning!! DMA from previous " |
| 369 | "transfer was still active\n", __func__, drive->name); |
| 370 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
| 371 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
| 372 | |
| 373 | if (ioc4_dma & IOC4_S_DMA_STOP) |
| 374 | printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is " |
| 375 | "still 1\n", __func__, drive->name); |
| 376 | } |
| 377 | |
| 378 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
| 379 | if (ioc4_dma & IOC4_S_DMA_ERROR) { |
| 380 | printk(KERN_WARNING "%s(%s): Warning!! DMA Error during " |
| 381 | "previous transfer, status 0x%x\n", |
| 382 | __func__, drive->name, ioc4_dma); |
| 383 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
| 384 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
| 385 | |
| 386 | if (ioc4_dma & IOC4_S_DMA_STOP) |
| 387 | printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is " |
| 388 | "still 1\n", __func__, drive->name); |
| 389 | } |
| 390 | |
| 391 | /* Address of the Scatter Gather List */ |
| 392 | dma_addr = cpu_to_le32(hwif->dmatable_dma); |
| 393 | writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4)); |
| 394 | |
| 395 | /* Address of the Ending DMA */ |
| 396 | memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE); |
| 397 | ending_dma_addr = cpu_to_le32(hwif->extra_base); |
| 398 | writel(ending_dma_addr, (void __iomem *)(dma_base + |
| 399 | IOC4_DMA_END_ADDR * 4)); |
| 400 | |
| 401 | writel(dma_direction, (void __iomem *)ioc4_dma_addr); |
| 402 | } |
| 403 | |
| 404 | /* IOC4 Scatter Gather list Format */ |
| 405 | /* 128 Bit entries to support 64 bit addresses in the future */ |
| 406 | /* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */ |
| 407 | /* --------------------------------------------------------------------- */ |
| 408 | /* | Upper 32 bits - Zero | Lower 32 bits- address | */ |
| 409 | /* --------------------------------------------------------------------- */ |
| 410 | /* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */ |
| 411 | /* --------------------------------------------------------------------- */ |
| 412 | /* Creates the scatter gather list, DMA Table */ |
| 413 | |
| 414 | static int sgiioc4_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) |
| 415 | { |
| 416 | ide_hwif_t *hwif = drive->hwif; |
| 417 | unsigned int *table = hwif->dmatable_cpu; |
| 418 | unsigned int count = 0, i = cmd->sg_nents; |
| 419 | struct scatterlist *sg = hwif->sg_table; |
| 420 | |
| 421 | while (i && sg_dma_len(sg)) { |
| 422 | dma_addr_t cur_addr; |
| 423 | int cur_len; |
| 424 | cur_addr = sg_dma_address(sg); |
| 425 | cur_len = sg_dma_len(sg); |
| 426 | |
| 427 | while (cur_len) { |
| 428 | if (count++ >= IOC4_PRD_ENTRIES) { |
| 429 | printk(KERN_WARNING |
| 430 | "%s: DMA table too small\n", |
| 431 | drive->name); |
| 432 | return 0; |
| 433 | } else { |
| 434 | u32 bcount = |
| 435 | 0x10000 - (cur_addr & 0xffff); |
| 436 | |
| 437 | if (bcount > cur_len) |
| 438 | bcount = cur_len; |
| 439 | |
| 440 | /* |
| 441 | * Put the address, length in |
| 442 | * the IOC4 dma-table format |
| 443 | */ |
| 444 | *table = 0x0; |
| 445 | table++; |
| 446 | *table = cpu_to_be32(cur_addr); |
| 447 | table++; |
| 448 | *table = 0x0; |
| 449 | table++; |
| 450 | |
| 451 | *table = cpu_to_be32(bcount); |
| 452 | table++; |
| 453 | |
| 454 | cur_addr += bcount; |
| 455 | cur_len -= bcount; |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | sg = sg_next(sg); |
| 460 | i--; |
| 461 | } |
| 462 | |
| 463 | if (count) { |
| 464 | table--; |
| 465 | *table |= cpu_to_be32(0x80000000); |
| 466 | return count; |
| 467 | } |
| 468 | |
| 469 | return 0; /* revert to PIO for this request */ |
| 470 | } |
| 471 | |
| 472 | static int sgiioc4_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) |
| 473 | { |
| 474 | int ddir; |
| 475 | u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); |
| 476 | |
| 477 | if (sgiioc4_build_dmatable(drive, cmd) == 0) |
| 478 | /* try PIO instead of DMA */ |
| 479 | return 1; |
| 480 | |
| 481 | if (write) |
| 482 | /* Writes TO the IOC4 FROM Main Memory */ |
| 483 | ddir = IOC4_DMA_READ; |
| 484 | else |
| 485 | /* Writes FROM the IOC4 TO Main Memory */ |
| 486 | ddir = IOC4_DMA_WRITE; |
| 487 | |
| 488 | sgiioc4_configure_for_dma(ddir, drive); |
| 489 | |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | static const struct ide_tp_ops sgiioc4_tp_ops = { |
| 494 | .exec_command = ide_exec_command, |
| 495 | .read_status = sgiioc4_read_status, |
| 496 | .read_altstatus = ide_read_altstatus, |
| 497 | .write_devctl = ide_write_devctl, |
| 498 | |
| 499 | .dev_select = ide_dev_select, |
| 500 | .tf_load = ide_tf_load, |
| 501 | .tf_read = ide_tf_read, |
| 502 | |
| 503 | .input_data = ide_input_data, |
| 504 | .output_data = ide_output_data, |
| 505 | }; |
| 506 | |
| 507 | static const struct ide_port_ops sgiioc4_port_ops = { |
| 508 | .set_dma_mode = sgiioc4_set_dma_mode, |
| 509 | /* reset DMA engine, clear IRQs */ |
| 510 | .resetproc = sgiioc4_resetproc, |
| 511 | }; |
| 512 | |
| 513 | static const struct ide_dma_ops sgiioc4_dma_ops = { |
| 514 | .dma_host_set = sgiioc4_dma_host_set, |
| 515 | .dma_setup = sgiioc4_dma_setup, |
| 516 | .dma_start = sgiioc4_dma_start, |
| 517 | .dma_end = sgiioc4_dma_end, |
| 518 | .dma_test_irq = sgiioc4_dma_test_irq, |
| 519 | .dma_lost_irq = sgiioc4_dma_lost_irq, |
| 520 | }; |
| 521 | |
| 522 | static const struct ide_port_info sgiioc4_port_info = { |
| 523 | .name = DRV_NAME, |
| 524 | .chipset = ide_pci, |
| 525 | .init_dma = ide_dma_sgiioc4, |
| 526 | .tp_ops = &sgiioc4_tp_ops, |
| 527 | .port_ops = &sgiioc4_port_ops, |
| 528 | .dma_ops = &sgiioc4_dma_ops, |
| 529 | .host_flags = IDE_HFLAG_MMIO, |
| 530 | .irq_flags = IRQF_SHARED, |
| 531 | .mwdma_mask = ATA_MWDMA2_ONLY, |
| 532 | }; |
| 533 | |
| 534 | static int sgiioc4_ide_setup_pci_device(struct pci_dev *dev) |
| 535 | { |
| 536 | unsigned long cmd_base, irqport; |
| 537 | unsigned long bar0, cmd_phys_base, ctl; |
| 538 | void __iomem *virt_base; |
| 539 | struct ide_hw hw, *hws[] = { &hw }; |
| 540 | int rc; |
| 541 | |
| 542 | /* Get the CmdBlk and CtrlBlk base registers */ |
| 543 | bar0 = pci_resource_start(dev, 0); |
| 544 | virt_base = pci_ioremap_bar(dev, 0); |
| 545 | if (virt_base == NULL) { |
| 546 | printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n", |
| 547 | DRV_NAME, bar0); |
| 548 | return -ENOMEM; |
| 549 | } |
| 550 | cmd_base = (unsigned long)virt_base + IOC4_CMD_OFFSET; |
| 551 | ctl = (unsigned long)virt_base + IOC4_CTRL_OFFSET; |
| 552 | irqport = (unsigned long)virt_base + IOC4_INTR_OFFSET; |
| 553 | |
| 554 | cmd_phys_base = bar0 + IOC4_CMD_OFFSET; |
| 555 | if (request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, |
| 556 | DRV_NAME) == NULL) { |
| 557 | printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx " |
| 558 | "already in use\n", DRV_NAME, pci_name(dev), |
| 559 | cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); |
| 560 | rc = -EBUSY; |
| 561 | goto req_mem_rgn_err; |
| 562 | } |
| 563 | |
| 564 | /* Initialize the IO registers */ |
| 565 | memset(&hw, 0, sizeof(hw)); |
| 566 | sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); |
| 567 | hw.irq = dev->irq; |
| 568 | hw.dev = &dev->dev; |
| 569 | |
| 570 | /* Initialize chipset IRQ registers */ |
| 571 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); |
| 572 | |
| 573 | rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL); |
| 574 | if (!rc) |
| 575 | return 0; |
| 576 | |
| 577 | release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); |
| 578 | req_mem_rgn_err: |
| 579 | iounmap(virt_base); |
| 580 | return rc; |
| 581 | } |
| 582 | |
| 583 | static unsigned int pci_init_sgiioc4(struct pci_dev *dev) |
| 584 | { |
| 585 | int ret; |
| 586 | |
| 587 | printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n", |
| 588 | DRV_NAME, pci_name(dev), dev->revision); |
| 589 | |
| 590 | if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) { |
| 591 | printk(KERN_ERR "Skipping %s IDE controller in slot %s: " |
| 592 | "firmware is obsolete - please upgrade to " |
| 593 | "revision46 or higher\n", |
| 594 | DRV_NAME, pci_name(dev)); |
| 595 | ret = -EAGAIN; |
| 596 | goto out; |
| 597 | } |
| 598 | ret = sgiioc4_ide_setup_pci_device(dev); |
| 599 | out: |
| 600 | return ret; |
| 601 | } |
| 602 | |
| 603 | int ioc4_ide_attach_one(struct ioc4_driver_data *idd) |
| 604 | { |
| 605 | /* |
| 606 | * PCI-RT does not bring out IDE connection. |
| 607 | * Do not attach to this particular IOC4. |
| 608 | */ |
| 609 | if (idd->idd_variant == IOC4_VARIANT_PCI_RT) |
| 610 | return 0; |
| 611 | |
| 612 | return pci_init_sgiioc4(idd->idd_pdev); |
| 613 | } |
| 614 | |
| 615 | static struct ioc4_submodule ioc4_ide_submodule = { |
| 616 | .is_name = "IOC4_ide", |
| 617 | .is_owner = THIS_MODULE, |
| 618 | .is_probe = ioc4_ide_attach_one, |
| 619 | }; |
| 620 | |
| 621 | static int __init ioc4_ide_init(void) |
| 622 | { |
| 623 | return ioc4_register_submodule(&ioc4_ide_submodule); |
| 624 | } |
| 625 | |
| 626 | late_initcall(ioc4_ide_init); /* Call only after IDE init is done */ |
| 627 | |
| 628 | MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon"); |
| 629 | MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card"); |
| 630 | MODULE_LICENSE("GPL"); |