Commit | Line | Data |
---|---|---|
20f733e7 BR |
1 | /* |
2 | * sata_mv.c - Marvell SATA support | |
3 | * | |
40f21b11 | 4 | * Copyright 2008-2009: Marvell Corporation, all rights reserved. |
8b260248 | 5 | * Copyright 2005: EMC Corporation, all rights reserved. |
e2b1be56 | 6 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
20f733e7 | 7 | * |
40f21b11 ML |
8 | * Originally written by Brett Russ. |
9 | * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. | |
10 | * | |
20f733e7 BR |
11 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License as published by | |
15 | * the Free Software Foundation; version 2 of the License. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | * GNU General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; if not, write to the Free Software | |
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
25 | * | |
26 | */ | |
27 | ||
4a05e209 | 28 | /* |
85afb934 ML |
29 | * sata_mv TODO list: |
30 | * | |
85afb934 ML |
31 | * --> More errata workarounds for PCI-X. |
32 | * | |
33 | * --> Complete a full errata audit for all chipsets to identify others. | |
34 | * | |
85afb934 ML |
35 | * --> Develop a low-power-consumption strategy, and implement it. |
36 | * | |
2b748a0a | 37 | * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. |
85afb934 ML |
38 | * |
39 | * --> [Experiment, Marvell value added] Is it possible to use target | |
40 | * mode to cross-connect two Linux boxes with Marvell cards? If so, | |
41 | * creating LibATA target mode support would be very interesting. | |
42 | * | |
43 | * Target mode, for those without docs, is the ability to directly | |
44 | * connect two SATA ports. | |
45 | */ | |
4a05e209 | 46 | |
20f733e7 BR |
47 | #include <linux/kernel.h> |
48 | #include <linux/module.h> | |
49 | #include <linux/pci.h> | |
50 | #include <linux/init.h> | |
51 | #include <linux/blkdev.h> | |
52 | #include <linux/delay.h> | |
53 | #include <linux/interrupt.h> | |
8d8b6004 | 54 | #include <linux/dmapool.h> |
20f733e7 | 55 | #include <linux/dma-mapping.h> |
a9524a76 | 56 | #include <linux/device.h> |
f351b2d6 SB |
57 | #include <linux/platform_device.h> |
58 | #include <linux/ata_platform.h> | |
15a32632 | 59 | #include <linux/mbus.h> |
c46938cc | 60 | #include <linux/bitops.h> |
20f733e7 | 61 | #include <scsi/scsi_host.h> |
193515d5 | 62 | #include <scsi/scsi_cmnd.h> |
6c08772e | 63 | #include <scsi/scsi_device.h> |
20f733e7 | 64 | #include <linux/libata.h> |
20f733e7 BR |
65 | |
66 | #define DRV_NAME "sata_mv" | |
2b748a0a | 67 | #define DRV_VERSION "1.27" |
20f733e7 | 68 | |
40f21b11 ML |
69 | /* |
70 | * module options | |
71 | */ | |
72 | ||
73 | static int msi; | |
74 | #ifdef CONFIG_PCI | |
75 | module_param(msi, int, S_IRUGO); | |
76 | MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); | |
77 | #endif | |
78 | ||
2b748a0a ML |
79 | static int irq_coalescing_io_count; |
80 | module_param(irq_coalescing_io_count, int, S_IRUGO); | |
81 | MODULE_PARM_DESC(irq_coalescing_io_count, | |
82 | "IRQ coalescing I/O count threshold (0..255)"); | |
83 | ||
84 | static int irq_coalescing_usecs; | |
85 | module_param(irq_coalescing_usecs, int, S_IRUGO); | |
86 | MODULE_PARM_DESC(irq_coalescing_usecs, | |
87 | "IRQ coalescing time threshold in usecs"); | |
88 | ||
20f733e7 BR |
89 | enum { |
90 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | |
91 | MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ | |
92 | MV_IO_BAR = 2, /* offset 0x18: IO space */ | |
93 | MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ | |
94 | ||
95 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ | |
96 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ | |
97 | ||
2b748a0a ML |
98 | /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ |
99 | COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ | |
100 | MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ | |
101 | MAX_COAL_IO_COUNT = 255, /* completed I/O count */ | |
102 | ||
20f733e7 | 103 | MV_PCI_REG_BASE = 0, |
615ab953 | 104 | |
2b748a0a ML |
105 | /* |
106 | * Per-chip ("all ports") interrupt coalescing feature. | |
107 | * This is only for GEN_II / GEN_IIE hardware. | |
108 | * | |
109 | * Coalescing defers the interrupt until either the IO_THRESHOLD | |
110 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | |
111 | */ | |
112 | MV_COAL_REG_BASE = 0x18000, | |
113 | MV_IRQ_COAL_CAUSE = (MV_COAL_REG_BASE + 0x08), | |
114 | ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ | |
115 | ||
116 | MV_IRQ_COAL_IO_THRESHOLD = (MV_COAL_REG_BASE + 0xcc), | |
117 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_COAL_REG_BASE + 0xd0), | |
118 | ||
119 | /* | |
120 | * Registers for the (unused here) transaction coalescing feature: | |
121 | */ | |
122 | MV_TRAN_COAL_CAUSE_LO = (MV_COAL_REG_BASE + 0x88), | |
123 | MV_TRAN_COAL_CAUSE_HI = (MV_COAL_REG_BASE + 0x8c), | |
124 | ||
20f733e7 | 125 | MV_SATAHC0_REG_BASE = 0x20000, |
8e7decdb ML |
126 | MV_FLASH_CTL_OFS = 0x1046c, |
127 | MV_GPIO_PORT_CTL_OFS = 0x104f0, | |
128 | MV_RESET_CFG_OFS = 0x180d8, | |
20f733e7 BR |
129 | |
130 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
131 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, | |
132 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ | |
133 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, | |
134 | ||
31961943 BR |
135 | MV_MAX_Q_DEPTH = 32, |
136 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, | |
137 | ||
138 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB | |
139 | * CRPB needs alignment on a 256B boundary. Size == 256B | |
31961943 BR |
140 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B |
141 | */ | |
142 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), | |
143 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), | |
da2fa9ba | 144 | MV_MAX_SG_CT = 256, |
31961943 | 145 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), |
31961943 | 146 | |
352fab70 | 147 | /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ |
20f733e7 | 148 | MV_PORT_HC_SHIFT = 2, |
352fab70 ML |
149 | MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ |
150 | /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ | |
151 | MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ | |
20f733e7 BR |
152 | |
153 | /* Host Flags */ | |
154 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | |
7bb3c529 | 155 | |
c5d3e45a | 156 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
91b1a84c | 157 | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, |
ad3aef51 | 158 | |
91b1a84c | 159 | MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, |
20f733e7 | 160 | |
40f21b11 ML |
161 | MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | |
162 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, | |
91b1a84c ML |
163 | |
164 | MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, | |
ad3aef51 | 165 | |
31961943 BR |
166 | CRQB_FLAG_READ = (1 << 0), |
167 | CRQB_TAG_SHIFT = 1, | |
c5d3e45a | 168 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
e12bef50 | 169 | CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ |
c5d3e45a | 170 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ |
31961943 BR |
171 | CRQB_CMD_ADDR_SHIFT = 8, |
172 | CRQB_CMD_CS = (0x2 << 11), | |
173 | CRQB_CMD_LAST = (1 << 15), | |
174 | ||
175 | CRPB_FLAG_STATUS_SHIFT = 8, | |
c5d3e45a JG |
176 | CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ |
177 | CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ | |
31961943 BR |
178 | |
179 | EPRD_FLAG_END_OF_TBL = (1 << 31), | |
180 | ||
20f733e7 BR |
181 | /* PCI interface registers */ |
182 | ||
31961943 | 183 | PCI_COMMAND_OFS = 0xc00, |
8e7decdb | 184 | PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ |
31961943 | 185 | |
20f733e7 BR |
186 | PCI_MAIN_CMD_STS_OFS = 0xd30, |
187 | STOP_PCI_MASTER = (1 << 2), | |
188 | PCI_MASTER_EMPTY = (1 << 3), | |
189 | GLOB_SFT_RST = (1 << 4), | |
190 | ||
8e7decdb ML |
191 | MV_PCI_MODE_OFS = 0xd00, |
192 | MV_PCI_MODE_MASK = 0x30, | |
193 | ||
522479fb JG |
194 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
195 | MV_PCI_DISC_TIMER = 0xd04, | |
196 | MV_PCI_MSI_TRIGGER = 0xc38, | |
197 | MV_PCI_SERR_MASK = 0xc28, | |
8e7decdb | 198 | MV_PCI_XBAR_TMOUT_OFS = 0x1d04, |
522479fb JG |
199 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
200 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, | |
201 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, | |
202 | MV_PCI_ERR_COMMAND = 0x1d50, | |
203 | ||
02a121da ML |
204 | PCI_IRQ_CAUSE_OFS = 0x1d58, |
205 | PCI_IRQ_MASK_OFS = 0x1d5c, | |
20f733e7 BR |
206 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ |
207 | ||
02a121da ML |
208 | PCIE_IRQ_CAUSE_OFS = 0x1900, |
209 | PCIE_IRQ_MASK_OFS = 0x1910, | |
646a4da5 | 210 | PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ |
02a121da | 211 | |
7368f919 ML |
212 | /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ |
213 | PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, | |
214 | PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, | |
215 | SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, | |
216 | SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, | |
40f21b11 ML |
217 | ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ |
218 | DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ | |
20f733e7 BR |
219 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ |
220 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ | |
2b748a0a ML |
221 | DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ |
222 | DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ | |
20f733e7 | 223 | PCI_ERR = (1 << 18), |
40f21b11 ML |
224 | TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ |
225 | TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ | |
226 | PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ | |
227 | PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ | |
228 | ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ | |
20f733e7 BR |
229 | GPIO_INT = (1 << 22), |
230 | SELF_INT = (1 << 23), | |
231 | TWSI_INT = (1 << 24), | |
232 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | |
fb621e2f | 233 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
e12bef50 | 234 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
20f733e7 BR |
235 | |
236 | /* SATAHC registers */ | |
237 | HC_CFG_OFS = 0, | |
238 | ||
239 | HC_IRQ_CAUSE_OFS = 0x14, | |
352fab70 ML |
240 | DMA_IRQ = (1 << 0), /* shift by port # */ |
241 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ | |
20f733e7 BR |
242 | DEV_IRQ = (1 << 8), /* shift by port # */ |
243 | ||
2b748a0a ML |
244 | /* |
245 | * Per-HC (Host-Controller) interrupt coalescing feature. | |
246 | * This is present on all chip generations. | |
247 | * | |
248 | * Coalescing defers the interrupt until either the IO_THRESHOLD | |
249 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. | |
250 | */ | |
251 | HC_IRQ_COAL_IO_THRESHOLD_OFS = 0x000c, | |
252 | HC_IRQ_COAL_TIME_THRESHOLD_OFS = 0x0010, | |
253 | ||
20f733e7 | 254 | /* Shadow block registers */ |
31961943 BR |
255 | SHD_BLK_OFS = 0x100, |
256 | SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ | |
20f733e7 BR |
257 | |
258 | /* SATA registers */ | |
259 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | |
260 | SATA_ACTIVE_OFS = 0x350, | |
0c58912e | 261 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
c443c500 | 262 | SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ |
17c5aab5 | 263 | |
e12bef50 | 264 | LTMODE_OFS = 0x30c, |
17c5aab5 ML |
265 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
266 | ||
47c2b677 | 267 | PHY_MODE3 = 0x310, |
bca1c4eb | 268 | PHY_MODE4 = 0x314, |
ba069e37 ML |
269 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ |
270 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ | |
271 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ | |
272 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ | |
273 | ||
bca1c4eb | 274 | PHY_MODE2 = 0x330, |
e12bef50 | 275 | SATA_IFCTL_OFS = 0x344, |
8e7decdb | 276 | SATA_TESTCTL_OFS = 0x348, |
e12bef50 ML |
277 | SATA_IFSTAT_OFS = 0x34c, |
278 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | |
17c5aab5 | 279 | |
8e7decdb ML |
280 | FISCFG_OFS = 0x360, |
281 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ | |
282 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ | |
17c5aab5 | 283 | |
c9d39130 | 284 | MV5_PHY_MODE = 0x74, |
8e7decdb ML |
285 | MV5_LTMODE_OFS = 0x30, |
286 | MV5_PHY_CTL_OFS = 0x0C, | |
287 | SATA_INTERFACE_CFG_OFS = 0x050, | |
bca1c4eb JG |
288 | |
289 | MV_M2_PREAMP_MASK = 0x7e0, | |
20f733e7 BR |
290 | |
291 | /* Port registers */ | |
292 | EDMA_CFG_OFS = 0, | |
0c58912e ML |
293 | EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ |
294 | EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ | |
295 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | |
296 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | |
297 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | |
e12bef50 ML |
298 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ |
299 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ | |
20f733e7 BR |
300 | |
301 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | |
302 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | |
6c1153e0 JG |
303 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ |
304 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ | |
305 | EDMA_ERR_DEV = (1 << 2), /* device error */ | |
306 | EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ | |
307 | EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ | |
308 | EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ | |
c5d3e45a JG |
309 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ |
310 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ | |
6c1153e0 | 311 | EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ |
c5d3e45a | 312 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ |
6c1153e0 JG |
313 | EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ |
314 | EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ | |
315 | EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ | |
316 | EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ | |
646a4da5 | 317 | |
6c1153e0 | 318 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ |
646a4da5 ML |
319 | EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ |
320 | EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ | |
321 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ | |
322 | EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ | |
323 | ||
6c1153e0 | 324 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ |
646a4da5 | 325 | |
6c1153e0 | 326 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ |
646a4da5 ML |
327 | EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ |
328 | EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ | |
329 | EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ | |
330 | EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ | |
331 | EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ | |
332 | ||
6c1153e0 | 333 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ |
646a4da5 | 334 | |
6c1153e0 | 335 | EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ |
c5d3e45a JG |
336 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
337 | EDMA_ERR_UNDERRUN_5 = (1 << 6), | |
646a4da5 ML |
338 | |
339 | EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | | |
340 | EDMA_ERR_LNK_CTRL_RX_1 | | |
341 | EDMA_ERR_LNK_CTRL_RX_3 | | |
85afb934 | 342 | EDMA_ERR_LNK_CTRL_TX, |
646a4da5 | 343 | |
bdd4ddde JG |
344 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
345 | EDMA_ERR_PRD_PAR | | |
346 | EDMA_ERR_DEV_DCON | | |
347 | EDMA_ERR_DEV_CON | | |
348 | EDMA_ERR_SERR | | |
349 | EDMA_ERR_SELF_DIS | | |
6c1153e0 | 350 | EDMA_ERR_CRQB_PAR | |
bdd4ddde JG |
351 | EDMA_ERR_CRPB_PAR | |
352 | EDMA_ERR_INTRL_PAR | | |
353 | EDMA_ERR_IORDY | | |
354 | EDMA_ERR_LNK_CTRL_RX_2 | | |
355 | EDMA_ERR_LNK_DATA_RX | | |
356 | EDMA_ERR_LNK_DATA_TX | | |
357 | EDMA_ERR_TRANS_PROTO, | |
e12bef50 | 358 | |
bdd4ddde JG |
359 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | |
360 | EDMA_ERR_PRD_PAR | | |
361 | EDMA_ERR_DEV_DCON | | |
362 | EDMA_ERR_DEV_CON | | |
363 | EDMA_ERR_OVERRUN_5 | | |
364 | EDMA_ERR_UNDERRUN_5 | | |
365 | EDMA_ERR_SELF_DIS_5 | | |
6c1153e0 | 366 | EDMA_ERR_CRQB_PAR | |
bdd4ddde JG |
367 | EDMA_ERR_CRPB_PAR | |
368 | EDMA_ERR_INTRL_PAR | | |
369 | EDMA_ERR_IORDY, | |
20f733e7 | 370 | |
31961943 BR |
371 | EDMA_REQ_Q_BASE_HI_OFS = 0x10, |
372 | EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ | |
31961943 BR |
373 | |
374 | EDMA_REQ_Q_OUT_PTR_OFS = 0x18, | |
375 | EDMA_REQ_Q_PTR_SHIFT = 5, | |
376 | ||
377 | EDMA_RSP_Q_BASE_HI_OFS = 0x1c, | |
378 | EDMA_RSP_Q_IN_PTR_OFS = 0x20, | |
379 | EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ | |
31961943 BR |
380 | EDMA_RSP_Q_PTR_SHIFT = 3, |
381 | ||
0ea9e179 JG |
382 | EDMA_CMD_OFS = 0x28, /* EDMA command register */ |
383 | EDMA_EN = (1 << 0), /* enable EDMA */ | |
384 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ | |
8e7decdb ML |
385 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
386 | ||
387 | EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ | |
388 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ | |
389 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ | |
20f733e7 | 390 | |
8e7decdb ML |
391 | EDMA_IORDY_TMOUT_OFS = 0x34, |
392 | EDMA_ARB_CFG_OFS = 0x38, | |
393 | ||
394 | EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ | |
c01e8a23 | 395 | EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */ |
da14265e ML |
396 | |
397 | BMDMA_CMD_OFS = 0x224, /* bmdma command register */ | |
398 | BMDMA_STATUS_OFS = 0x228, /* bmdma status register */ | |
399 | BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */ | |
400 | BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */ | |
401 | ||
31961943 BR |
402 | /* Host private flags (hp_flags) */ |
403 | MV_HP_FLAG_MSI = (1 << 0), | |
47c2b677 JG |
404 | MV_HP_ERRATA_50XXB0 = (1 << 1), |
405 | MV_HP_ERRATA_50XXB2 = (1 << 2), | |
406 | MV_HP_ERRATA_60X1B2 = (1 << 3), | |
407 | MV_HP_ERRATA_60X1C0 = (1 << 4), | |
0ea9e179 JG |
408 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
409 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | |
410 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | |
02a121da | 411 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
616d4a98 | 412 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
1f398472 | 413 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ |
20f733e7 | 414 | |
31961943 | 415 | /* Port private flags (pp_flags) */ |
0ea9e179 | 416 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
72109168 | 417 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
00f42eab | 418 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ |
29d187bb | 419 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ |
d16ab3f6 | 420 | MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ |
20f733e7 BR |
421 | }; |
422 | ||
ee9ccdf7 JG |
423 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
424 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | |
e4e7b892 | 425 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
8e7decdb | 426 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
1f398472 | 427 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
bca1c4eb | 428 | |
15a32632 LB |
429 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
430 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) | |
431 | ||
095fec88 | 432 | enum { |
baf14aa1 JG |
433 | /* DMA boundary 0xffff is required by the s/g splitting |
434 | * we need on /length/ in mv_fill-sg(). | |
435 | */ | |
436 | MV_DMA_BOUNDARY = 0xffffU, | |
095fec88 | 437 | |
0ea9e179 JG |
438 | /* mask of register bits containing lower 32 bits |
439 | * of EDMA request queue DMA address | |
440 | */ | |
095fec88 JG |
441 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
442 | ||
0ea9e179 | 443 | /* ditto, for response queue */ |
095fec88 JG |
444 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, |
445 | }; | |
446 | ||
522479fb JG |
447 | enum chip_type { |
448 | chip_504x, | |
449 | chip_508x, | |
450 | chip_5080, | |
451 | chip_604x, | |
452 | chip_608x, | |
e4e7b892 JG |
453 | chip_6042, |
454 | chip_7042, | |
f351b2d6 | 455 | chip_soc, |
522479fb JG |
456 | }; |
457 | ||
31961943 BR |
458 | /* Command ReQuest Block: 32B */ |
459 | struct mv_crqb { | |
e1469874 ML |
460 | __le32 sg_addr; |
461 | __le32 sg_addr_hi; | |
462 | __le16 ctrl_flags; | |
463 | __le16 ata_cmd[11]; | |
31961943 | 464 | }; |
20f733e7 | 465 | |
e4e7b892 | 466 | struct mv_crqb_iie { |
e1469874 ML |
467 | __le32 addr; |
468 | __le32 addr_hi; | |
469 | __le32 flags; | |
470 | __le32 len; | |
471 | __le32 ata_cmd[4]; | |
e4e7b892 JG |
472 | }; |
473 | ||
31961943 BR |
474 | /* Command ResPonse Block: 8B */ |
475 | struct mv_crpb { | |
e1469874 ML |
476 | __le16 id; |
477 | __le16 flags; | |
478 | __le32 tmstmp; | |
20f733e7 BR |
479 | }; |
480 | ||
31961943 BR |
481 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
482 | struct mv_sg { | |
e1469874 ML |
483 | __le32 addr; |
484 | __le32 flags_size; | |
485 | __le32 addr_hi; | |
486 | __le32 reserved; | |
31961943 | 487 | }; |
20f733e7 | 488 | |
08da1759 ML |
489 | /* |
490 | * We keep a local cache of a few frequently accessed port | |
491 | * registers here, to avoid having to read them (very slow) | |
492 | * when switching between EDMA and non-EDMA modes. | |
493 | */ | |
494 | struct mv_cached_regs { | |
495 | u32 fiscfg; | |
496 | u32 ltmode; | |
497 | u32 haltcond; | |
c01e8a23 | 498 | u32 unknown_rsvd; |
08da1759 ML |
499 | }; |
500 | ||
31961943 BR |
501 | struct mv_port_priv { |
502 | struct mv_crqb *crqb; | |
503 | dma_addr_t crqb_dma; | |
504 | struct mv_crpb *crpb; | |
505 | dma_addr_t crpb_dma; | |
eb73d558 ML |
506 | struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; |
507 | dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; | |
bdd4ddde JG |
508 | |
509 | unsigned int req_idx; | |
510 | unsigned int resp_idx; | |
511 | ||
31961943 | 512 | u32 pp_flags; |
08da1759 | 513 | struct mv_cached_regs cached; |
29d187bb | 514 | unsigned int delayed_eh_pmp_map; |
31961943 BR |
515 | }; |
516 | ||
bca1c4eb JG |
517 | struct mv_port_signal { |
518 | u32 amps; | |
519 | u32 pre; | |
520 | }; | |
521 | ||
02a121da ML |
522 | struct mv_host_priv { |
523 | u32 hp_flags; | |
96e2c487 | 524 | u32 main_irq_mask; |
02a121da ML |
525 | struct mv_port_signal signal[8]; |
526 | const struct mv_hw_ops *ops; | |
f351b2d6 SB |
527 | int n_ports; |
528 | void __iomem *base; | |
7368f919 ML |
529 | void __iomem *main_irq_cause_addr; |
530 | void __iomem *main_irq_mask_addr; | |
02a121da ML |
531 | u32 irq_cause_ofs; |
532 | u32 irq_mask_ofs; | |
533 | u32 unmask_all_irqs; | |
da2fa9ba ML |
534 | /* |
535 | * These consistent DMA memory pools give us guaranteed | |
536 | * alignment for hardware-accessed data structures, | |
537 | * and less memory waste in accomplishing the alignment. | |
538 | */ | |
539 | struct dma_pool *crqb_pool; | |
540 | struct dma_pool *crpb_pool; | |
541 | struct dma_pool *sg_tbl_pool; | |
02a121da ML |
542 | }; |
543 | ||
47c2b677 | 544 | struct mv_hw_ops { |
2a47ce06 JG |
545 | void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, |
546 | unsigned int port); | |
47c2b677 JG |
547 | void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); |
548 | void (*read_preamp)(struct mv_host_priv *hpriv, int idx, | |
549 | void __iomem *mmio); | |
c9d39130 JG |
550 | int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, |
551 | unsigned int n_hc); | |
522479fb | 552 | void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); |
7bb3c529 | 553 | void (*reset_bus)(struct ata_host *host, void __iomem *mmio); |
47c2b677 JG |
554 | }; |
555 | ||
82ef04fb TH |
556 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); |
557 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); | |
558 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); | |
559 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); | |
31961943 BR |
560 | static int mv_port_start(struct ata_port *ap); |
561 | static void mv_port_stop(struct ata_port *ap); | |
3e4a1391 | 562 | static int mv_qc_defer(struct ata_queued_cmd *qc); |
31961943 | 563 | static void mv_qc_prep(struct ata_queued_cmd *qc); |
e4e7b892 | 564 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc); |
9a3d9eb0 | 565 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
a1efdaba TH |
566 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
567 | unsigned long deadline); | |
bdd4ddde JG |
568 | static void mv_eh_freeze(struct ata_port *ap); |
569 | static void mv_eh_thaw(struct ata_port *ap); | |
f273827e | 570 | static void mv6_dev_config(struct ata_device *dev); |
20f733e7 | 571 | |
2a47ce06 JG |
572 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
573 | unsigned int port); | |
47c2b677 JG |
574 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
575 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, | |
576 | void __iomem *mmio); | |
c9d39130 JG |
577 | static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
578 | unsigned int n_hc); | |
522479fb | 579 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
7bb3c529 | 580 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); |
47c2b677 | 581 | |
2a47ce06 JG |
582 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
583 | unsigned int port); | |
47c2b677 JG |
584 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
585 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, | |
586 | void __iomem *mmio); | |
c9d39130 JG |
587 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
588 | unsigned int n_hc); | |
522479fb | 589 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
f351b2d6 SB |
590 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, |
591 | void __iomem *mmio); | |
592 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, | |
593 | void __iomem *mmio); | |
594 | static int mv_soc_reset_hc(struct mv_host_priv *hpriv, | |
595 | void __iomem *mmio, unsigned int n_hc); | |
596 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |
597 | void __iomem *mmio); | |
598 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); | |
7bb3c529 | 599 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); |
e12bef50 | 600 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
c9d39130 | 601 | unsigned int port_no); |
e12bef50 | 602 | static int mv_stop_edma(struct ata_port *ap); |
b562468c | 603 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
00b81235 | 604 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); |
47c2b677 | 605 | |
e49856d8 ML |
606 | static void mv_pmp_select(struct ata_port *ap, int pmp); |
607 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | |
608 | unsigned long deadline); | |
609 | static int mv_softreset(struct ata_link *link, unsigned int *class, | |
610 | unsigned long deadline); | |
29d187bb | 611 | static void mv_pmp_error_handler(struct ata_port *ap); |
4c299ca3 ML |
612 | static void mv_process_crpb_entries(struct ata_port *ap, |
613 | struct mv_port_priv *pp); | |
47c2b677 | 614 | |
da14265e ML |
615 | static void mv_sff_irq_clear(struct ata_port *ap); |
616 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc); | |
617 | static void mv_bmdma_setup(struct ata_queued_cmd *qc); | |
618 | static void mv_bmdma_start(struct ata_queued_cmd *qc); | |
619 | static void mv_bmdma_stop(struct ata_queued_cmd *qc); | |
620 | static u8 mv_bmdma_status(struct ata_port *ap); | |
d16ab3f6 | 621 | static u8 mv_sff_check_status(struct ata_port *ap); |
da14265e | 622 | |
eb73d558 ML |
623 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
624 | * because we have to allow room for worst case splitting of | |
625 | * PRDs for 64K boundaries in mv_fill_sg(). | |
626 | */ | |
c5d3e45a | 627 | static struct scsi_host_template mv5_sht = { |
68d1d07b | 628 | ATA_BASE_SHT(DRV_NAME), |
baf14aa1 | 629 | .sg_tablesize = MV_MAX_SG_CT / 2, |
c5d3e45a | 630 | .dma_boundary = MV_DMA_BOUNDARY, |
c5d3e45a JG |
631 | }; |
632 | ||
633 | static struct scsi_host_template mv6_sht = { | |
68d1d07b | 634 | ATA_NCQ_SHT(DRV_NAME), |
138bfdd0 | 635 | .can_queue = MV_MAX_Q_DEPTH - 1, |
baf14aa1 | 636 | .sg_tablesize = MV_MAX_SG_CT / 2, |
20f733e7 | 637 | .dma_boundary = MV_DMA_BOUNDARY, |
20f733e7 BR |
638 | }; |
639 | ||
029cfd6b TH |
640 | static struct ata_port_operations mv5_ops = { |
641 | .inherits = &ata_sff_port_ops, | |
c9d39130 | 642 | |
3e4a1391 | 643 | .qc_defer = mv_qc_defer, |
c9d39130 JG |
644 | .qc_prep = mv_qc_prep, |
645 | .qc_issue = mv_qc_issue, | |
c9d39130 | 646 | |
bdd4ddde JG |
647 | .freeze = mv_eh_freeze, |
648 | .thaw = mv_eh_thaw, | |
a1efdaba | 649 | .hardreset = mv_hardreset, |
a1efdaba | 650 | .error_handler = ata_std_error_handler, /* avoid SFF EH */ |
029cfd6b | 651 | .post_internal_cmd = ATA_OP_NULL, |
bdd4ddde | 652 | |
c9d39130 JG |
653 | .scr_read = mv5_scr_read, |
654 | .scr_write = mv5_scr_write, | |
655 | ||
656 | .port_start = mv_port_start, | |
657 | .port_stop = mv_port_stop, | |
c9d39130 JG |
658 | }; |
659 | ||
029cfd6b TH |
660 | static struct ata_port_operations mv6_ops = { |
661 | .inherits = &mv5_ops, | |
f273827e | 662 | .dev_config = mv6_dev_config, |
20f733e7 BR |
663 | .scr_read = mv_scr_read, |
664 | .scr_write = mv_scr_write, | |
665 | ||
e49856d8 ML |
666 | .pmp_hardreset = mv_pmp_hardreset, |
667 | .pmp_softreset = mv_softreset, | |
668 | .softreset = mv_softreset, | |
29d187bb | 669 | .error_handler = mv_pmp_error_handler, |
da14265e | 670 | |
40f21b11 | 671 | .sff_check_status = mv_sff_check_status, |
da14265e ML |
672 | .sff_irq_clear = mv_sff_irq_clear, |
673 | .check_atapi_dma = mv_check_atapi_dma, | |
674 | .bmdma_setup = mv_bmdma_setup, | |
675 | .bmdma_start = mv_bmdma_start, | |
676 | .bmdma_stop = mv_bmdma_stop, | |
677 | .bmdma_status = mv_bmdma_status, | |
20f733e7 BR |
678 | }; |
679 | ||
029cfd6b TH |
680 | static struct ata_port_operations mv_iie_ops = { |
681 | .inherits = &mv6_ops, | |
682 | .dev_config = ATA_OP_NULL, | |
e4e7b892 | 683 | .qc_prep = mv_qc_prep_iie, |
e4e7b892 JG |
684 | }; |
685 | ||
98ac62de | 686 | static const struct ata_port_info mv_port_info[] = { |
20f733e7 | 687 | { /* chip_504x */ |
91b1a84c | 688 | .flags = MV_GEN_I_FLAGS, |
31961943 | 689 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 690 | .udma_mask = ATA_UDMA6, |
c9d39130 | 691 | .port_ops = &mv5_ops, |
20f733e7 BR |
692 | }, |
693 | { /* chip_508x */ | |
91b1a84c | 694 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
31961943 | 695 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 696 | .udma_mask = ATA_UDMA6, |
c9d39130 | 697 | .port_ops = &mv5_ops, |
20f733e7 | 698 | }, |
47c2b677 | 699 | { /* chip_5080 */ |
91b1a84c | 700 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
47c2b677 | 701 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 702 | .udma_mask = ATA_UDMA6, |
c9d39130 | 703 | .port_ops = &mv5_ops, |
47c2b677 | 704 | }, |
20f733e7 | 705 | { /* chip_604x */ |
91b1a84c | 706 | .flags = MV_GEN_II_FLAGS, |
31961943 | 707 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 708 | .udma_mask = ATA_UDMA6, |
c9d39130 | 709 | .port_ops = &mv6_ops, |
20f733e7 BR |
710 | }, |
711 | { /* chip_608x */ | |
91b1a84c | 712 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, |
31961943 | 713 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 714 | .udma_mask = ATA_UDMA6, |
c9d39130 | 715 | .port_ops = &mv6_ops, |
20f733e7 | 716 | }, |
e4e7b892 | 717 | { /* chip_6042 */ |
91b1a84c | 718 | .flags = MV_GEN_IIE_FLAGS, |
e4e7b892 | 719 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 720 | .udma_mask = ATA_UDMA6, |
e4e7b892 JG |
721 | .port_ops = &mv_iie_ops, |
722 | }, | |
723 | { /* chip_7042 */ | |
91b1a84c | 724 | .flags = MV_GEN_IIE_FLAGS, |
e4e7b892 | 725 | .pio_mask = 0x1f, /* pio0-4 */ |
bf6263a8 | 726 | .udma_mask = ATA_UDMA6, |
e4e7b892 JG |
727 | .port_ops = &mv_iie_ops, |
728 | }, | |
f351b2d6 | 729 | { /* chip_soc */ |
91b1a84c | 730 | .flags = MV_GEN_IIE_FLAGS, |
17c5aab5 ML |
731 | .pio_mask = 0x1f, /* pio0-4 */ |
732 | .udma_mask = ATA_UDMA6, | |
733 | .port_ops = &mv_iie_ops, | |
f351b2d6 | 734 | }, |
20f733e7 BR |
735 | }; |
736 | ||
3b7d697d | 737 | static const struct pci_device_id mv_pci_tbl[] = { |
2d2744fc JG |
738 | { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, |
739 | { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, | |
740 | { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, | |
741 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, | |
46c5784c ML |
742 | /* RocketRAID 1720/174x have different identifiers */ |
743 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, | |
4462254a ML |
744 | { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, |
745 | { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, | |
2d2744fc JG |
746 | |
747 | { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, | |
748 | { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, | |
749 | { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, | |
750 | { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, | |
751 | { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, | |
752 | ||
753 | { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, | |
754 | ||
d9f9c6bc FA |
755 | /* Adaptec 1430SA */ |
756 | { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, | |
757 | ||
02a121da | 758 | /* Marvell 7042 support */ |
6a3d586d MT |
759 | { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, |
760 | ||
02a121da ML |
761 | /* Highpoint RocketRAID PCIe series */ |
762 | { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, | |
763 | { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, | |
764 | ||
2d2744fc | 765 | { } /* terminate list */ |
20f733e7 BR |
766 | }; |
767 | ||
47c2b677 JG |
768 | static const struct mv_hw_ops mv5xxx_ops = { |
769 | .phy_errata = mv5_phy_errata, | |
770 | .enable_leds = mv5_enable_leds, | |
771 | .read_preamp = mv5_read_preamp, | |
772 | .reset_hc = mv5_reset_hc, | |
522479fb JG |
773 | .reset_flash = mv5_reset_flash, |
774 | .reset_bus = mv5_reset_bus, | |
47c2b677 JG |
775 | }; |
776 | ||
777 | static const struct mv_hw_ops mv6xxx_ops = { | |
778 | .phy_errata = mv6_phy_errata, | |
779 | .enable_leds = mv6_enable_leds, | |
780 | .read_preamp = mv6_read_preamp, | |
781 | .reset_hc = mv6_reset_hc, | |
522479fb JG |
782 | .reset_flash = mv6_reset_flash, |
783 | .reset_bus = mv_reset_pci_bus, | |
47c2b677 JG |
784 | }; |
785 | ||
f351b2d6 SB |
786 | static const struct mv_hw_ops mv_soc_ops = { |
787 | .phy_errata = mv6_phy_errata, | |
788 | .enable_leds = mv_soc_enable_leds, | |
789 | .read_preamp = mv_soc_read_preamp, | |
790 | .reset_hc = mv_soc_reset_hc, | |
791 | .reset_flash = mv_soc_reset_flash, | |
792 | .reset_bus = mv_soc_reset_bus, | |
793 | }; | |
794 | ||
20f733e7 BR |
795 | /* |
796 | * Functions | |
797 | */ | |
798 | ||
799 | static inline void writelfl(unsigned long data, void __iomem *addr) | |
800 | { | |
801 | writel(data, addr); | |
802 | (void) readl(addr); /* flush to avoid PCI posted write */ | |
803 | } | |
804 | ||
c9d39130 JG |
805 | static inline unsigned int mv_hc_from_port(unsigned int port) |
806 | { | |
807 | return port >> MV_PORT_HC_SHIFT; | |
808 | } | |
809 | ||
810 | static inline unsigned int mv_hardport_from_port(unsigned int port) | |
811 | { | |
812 | return port & MV_PORT_MASK; | |
813 | } | |
814 | ||
1cfd19ae ML |
815 | /* |
816 | * Consolidate some rather tricky bit shift calculations. | |
817 | * This is hot-path stuff, so not a function. | |
818 | * Simple code, with two return values, so macro rather than inline. | |
819 | * | |
820 | * port is the sole input, in range 0..7. | |
7368f919 ML |
821 | * shift is one output, for use with main_irq_cause / main_irq_mask registers. |
822 | * hardport is the other output, in range 0..3. | |
1cfd19ae ML |
823 | * |
824 | * Note that port and hardport may be the same variable in some cases. | |
825 | */ | |
826 | #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ | |
827 | { \ | |
828 | shift = mv_hc_from_port(port) * HC_SHIFT; \ | |
829 | hardport = mv_hardport_from_port(port); \ | |
830 | shift += hardport * 2; \ | |
831 | } | |
832 | ||
352fab70 ML |
833 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
834 | { | |
835 | return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); | |
836 | } | |
837 | ||
c9d39130 JG |
838 | static inline void __iomem *mv_hc_base_from_port(void __iomem *base, |
839 | unsigned int port) | |
840 | { | |
841 | return mv_hc_base(base, mv_hc_from_port(port)); | |
842 | } | |
843 | ||
20f733e7 BR |
844 | static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) |
845 | { | |
c9d39130 | 846 | return mv_hc_base_from_port(base, port) + |
8b260248 | 847 | MV_SATAHC_ARBTR_REG_SZ + |
c9d39130 | 848 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); |
20f733e7 BR |
849 | } |
850 | ||
e12bef50 ML |
851 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) |
852 | { | |
853 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | |
854 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | |
855 | ||
856 | return hc_mmio + ofs; | |
857 | } | |
858 | ||
f351b2d6 SB |
859 | static inline void __iomem *mv_host_base(struct ata_host *host) |
860 | { | |
861 | struct mv_host_priv *hpriv = host->private_data; | |
862 | return hpriv->base; | |
863 | } | |
864 | ||
20f733e7 BR |
865 | static inline void __iomem *mv_ap_base(struct ata_port *ap) |
866 | { | |
f351b2d6 | 867 | return mv_port_base(mv_host_base(ap->host), ap->port_no); |
20f733e7 BR |
868 | } |
869 | ||
cca3974e | 870 | static inline int mv_get_hc_count(unsigned long port_flags) |
31961943 | 871 | { |
cca3974e | 872 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
31961943 BR |
873 | } |
874 | ||
08da1759 ML |
875 | /** |
876 | * mv_save_cached_regs - (re-)initialize cached port registers | |
877 | * @ap: the port whose registers we are caching | |
878 | * | |
879 | * Initialize the local cache of port registers, | |
880 | * so that reading them over and over again can | |
881 | * be avoided on the hotter paths of this driver. | |
882 | * This saves a few microseconds each time we switch | |
883 | * to/from EDMA mode to perform (eg.) a drive cache flush. | |
884 | */ | |
885 | static void mv_save_cached_regs(struct ata_port *ap) | |
886 | { | |
887 | void __iomem *port_mmio = mv_ap_base(ap); | |
888 | struct mv_port_priv *pp = ap->private_data; | |
889 | ||
890 | pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS); | |
891 | pp->cached.ltmode = readl(port_mmio + LTMODE_OFS); | |
892 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | |
c01e8a23 | 893 | pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS); |
08da1759 ML |
894 | } |
895 | ||
896 | /** | |
897 | * mv_write_cached_reg - write to a cached port register | |
898 | * @addr: hardware address of the register | |
899 | * @old: pointer to cached value of the register | |
900 | * @new: new value for the register | |
901 | * | |
902 | * Write a new value to a cached register, | |
903 | * but only if the value is different from before. | |
904 | */ | |
905 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) | |
906 | { | |
907 | if (new != *old) { | |
908 | *old = new; | |
909 | writel(new, addr); | |
910 | } | |
911 | } | |
912 | ||
c5d3e45a JG |
913 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
914 | struct mv_host_priv *hpriv, | |
915 | struct mv_port_priv *pp) | |
916 | { | |
bdd4ddde JG |
917 | u32 index; |
918 | ||
c5d3e45a JG |
919 | /* |
920 | * initialize request queue | |
921 | */ | |
fcfb1f77 ML |
922 | pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
923 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | |
bdd4ddde | 924 | |
c5d3e45a JG |
925 | WARN_ON(pp->crqb_dma & 0x3ff); |
926 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | |
bdd4ddde | 927 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
c5d3e45a | 928 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
5cf73bfb | 929 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); |
c5d3e45a JG |
930 | |
931 | /* | |
932 | * initialize response queue | |
933 | */ | |
fcfb1f77 ML |
934 | pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
935 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; | |
bdd4ddde | 936 | |
c5d3e45a JG |
937 | WARN_ON(pp->crpb_dma & 0xff); |
938 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | |
5cf73bfb | 939 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); |
bdd4ddde | 940 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
c5d3e45a | 941 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
c5d3e45a JG |
942 | } |
943 | ||
2b748a0a ML |
944 | static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) |
945 | { | |
946 | /* | |
947 | * When writing to the main_irq_mask in hardware, | |
948 | * we must ensure exclusivity between the interrupt coalescing bits | |
949 | * and the corresponding individual port DONE_IRQ bits. | |
950 | * | |
951 | * Note that this register is really an "IRQ enable" register, | |
952 | * not an "IRQ mask" register as Marvell's naming might suggest. | |
953 | */ | |
954 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) | |
955 | mask &= ~DONE_IRQ_0_3; | |
956 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) | |
957 | mask &= ~DONE_IRQ_4_7; | |
958 | writelfl(mask, hpriv->main_irq_mask_addr); | |
959 | } | |
960 | ||
c4de573b ML |
961 | static void mv_set_main_irq_mask(struct ata_host *host, |
962 | u32 disable_bits, u32 enable_bits) | |
963 | { | |
964 | struct mv_host_priv *hpriv = host->private_data; | |
965 | u32 old_mask, new_mask; | |
966 | ||
96e2c487 | 967 | old_mask = hpriv->main_irq_mask; |
c4de573b | 968 | new_mask = (old_mask & ~disable_bits) | enable_bits; |
96e2c487 ML |
969 | if (new_mask != old_mask) { |
970 | hpriv->main_irq_mask = new_mask; | |
2b748a0a | 971 | mv_write_main_irq_mask(new_mask, hpriv); |
96e2c487 | 972 | } |
c4de573b ML |
973 | } |
974 | ||
975 | static void mv_enable_port_irqs(struct ata_port *ap, | |
976 | unsigned int port_bits) | |
977 | { | |
978 | unsigned int shift, hardport, port = ap->port_no; | |
979 | u32 disable_bits, enable_bits; | |
980 | ||
981 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | |
982 | ||
983 | disable_bits = (DONE_IRQ | ERR_IRQ) << shift; | |
984 | enable_bits = port_bits << shift; | |
985 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | |
986 | } | |
987 | ||
00b81235 ML |
988 | static void mv_clear_and_enable_port_irqs(struct ata_port *ap, |
989 | void __iomem *port_mmio, | |
990 | unsigned int port_irqs) | |
991 | { | |
992 | struct mv_host_priv *hpriv = ap->host->private_data; | |
993 | int hardport = mv_hardport_from_port(ap->port_no); | |
994 | void __iomem *hc_mmio = mv_hc_base_from_port( | |
995 | mv_host_base(ap->host), ap->port_no); | |
996 | u32 hc_irq_cause; | |
997 | ||
998 | /* clear EDMA event indicators, if any */ | |
999 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
1000 | ||
1001 | /* clear pending irq events */ | |
1002 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | |
1003 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | |
1004 | ||
1005 | /* clear FIS IRQ Cause */ | |
1006 | if (IS_GEN_IIE(hpriv)) | |
1007 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
1008 | ||
1009 | mv_enable_port_irqs(ap, port_irqs); | |
1010 | } | |
1011 | ||
2b748a0a ML |
1012 | static void mv_set_irq_coalescing(struct ata_host *host, |
1013 | unsigned int count, unsigned int usecs) | |
1014 | { | |
1015 | struct mv_host_priv *hpriv = host->private_data; | |
1016 | void __iomem *mmio = hpriv->base, *hc_mmio; | |
1017 | u32 coal_enable = 0; | |
1018 | unsigned long flags; | |
1019 | unsigned int clks; | |
1020 | const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | |
1021 | ALL_PORTS_COAL_DONE; | |
1022 | ||
1023 | /* Disable IRQ coalescing if either threshold is zero */ | |
1024 | if (!usecs || !count) { | |
1025 | clks = count = 0; | |
1026 | } else { | |
1027 | /* Respect maximum limits of the hardware */ | |
1028 | clks = usecs * COAL_CLOCKS_PER_USEC; | |
1029 | if (clks > MAX_COAL_TIME_THRESHOLD) | |
1030 | clks = MAX_COAL_TIME_THRESHOLD; | |
1031 | if (count > MAX_COAL_IO_COUNT) | |
1032 | count = MAX_COAL_IO_COUNT; | |
1033 | } | |
1034 | ||
1035 | spin_lock_irqsave(&host->lock, flags); | |
1036 | ||
1037 | #if 0 /* disabled pending functional clarification from Marvell */ | |
1038 | if (!IS_GEN_I(hpriv)) { | |
1039 | /* | |
1040 | * GEN_II/GEN_IIE: global thresholds for the entire chip. | |
1041 | */ | |
1042 | writel(clks, mmio + MV_IRQ_COAL_TIME_THRESHOLD); | |
1043 | writel(count, mmio + MV_IRQ_COAL_IO_THRESHOLD); | |
1044 | /* clear leftover coal IRQ bit */ | |
1045 | writelfl(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | |
1046 | clks = count = 0; /* so as to clear the alternate regs below */ | |
1047 | coal_enable = ALL_PORTS_COAL_DONE; | |
1048 | } | |
1049 | #endif | |
1050 | /* | |
1051 | * All chips: independent thresholds for each HC on the chip. | |
1052 | */ | |
1053 | hc_mmio = mv_hc_base_from_port(mmio, 0); | |
1054 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | |
1055 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | |
1056 | coal_enable |= PORTS_0_3_COAL_DONE; | |
1057 | if (hpriv->n_ports > 4) { | |
1058 | hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); | |
1059 | writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS); | |
1060 | writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS); | |
1061 | coal_enable |= PORTS_4_7_COAL_DONE; | |
1062 | } | |
1063 | if (!count) | |
1064 | coal_enable = 0; | |
1065 | mv_set_main_irq_mask(host, coal_disable, coal_enable); | |
1066 | ||
1067 | spin_unlock_irqrestore(&host->lock, flags); | |
1068 | } | |
1069 | ||
05b308e1 | 1070 | /** |
00b81235 | 1071 | * mv_start_edma - Enable eDMA engine |
05b308e1 BR |
1072 | * @base: port base address |
1073 | * @pp: port private data | |
1074 | * | |
beec7dbc TH |
1075 | * Verify the local cache of the eDMA state is accurate with a |
1076 | * WARN_ON. | |
05b308e1 BR |
1077 | * |
1078 | * LOCKING: | |
1079 | * Inherited from caller. | |
1080 | */ | |
00b81235 | 1081 | static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, |
72109168 | 1082 | struct mv_port_priv *pp, u8 protocol) |
20f733e7 | 1083 | { |
72109168 ML |
1084 | int want_ncq = (protocol == ATA_PROT_NCQ); |
1085 | ||
1086 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | |
1087 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); | |
1088 | if (want_ncq != using_ncq) | |
b562468c | 1089 | mv_stop_edma(ap); |
72109168 | 1090 | } |
c5d3e45a | 1091 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
0c58912e | 1092 | struct mv_host_priv *hpriv = ap->host->private_data; |
0c58912e | 1093 | |
00b81235 | 1094 | mv_edma_cfg(ap, want_ncq, 1); |
0c58912e | 1095 | |
f630d562 | 1096 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
00b81235 | 1097 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); |
bdd4ddde | 1098 | |
f630d562 | 1099 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
afb0edd9 BR |
1100 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
1101 | } | |
20f733e7 BR |
1102 | } |
1103 | ||
9b2c4e0b ML |
1104 | static void mv_wait_for_edma_empty_idle(struct ata_port *ap) |
1105 | { | |
1106 | void __iomem *port_mmio = mv_ap_base(ap); | |
1107 | const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); | |
1108 | const int per_loop = 5, timeout = (15 * 1000 / per_loop); | |
1109 | int i; | |
1110 | ||
1111 | /* | |
1112 | * Wait for the EDMA engine to finish transactions in progress. | |
c46938cc ML |
1113 | * No idea what a good "timeout" value might be, but measurements |
1114 | * indicate that it often requires hundreds of microseconds | |
1115 | * with two drives in-use. So we use the 15msec value above | |
1116 | * as a rough guess at what even more drives might require. | |
9b2c4e0b ML |
1117 | */ |
1118 | for (i = 0; i < timeout; ++i) { | |
1119 | u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); | |
1120 | if ((edma_stat & empty_idle) == empty_idle) | |
1121 | break; | |
1122 | udelay(per_loop); | |
1123 | } | |
1124 | /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ | |
1125 | } | |
1126 | ||
05b308e1 | 1127 | /** |
e12bef50 | 1128 | * mv_stop_edma_engine - Disable eDMA engine |
b562468c | 1129 | * @port_mmio: io base address |
05b308e1 BR |
1130 | * |
1131 | * LOCKING: | |
1132 | * Inherited from caller. | |
1133 | */ | |
b562468c | 1134 | static int mv_stop_edma_engine(void __iomem *port_mmio) |
20f733e7 | 1135 | { |
b562468c | 1136 | int i; |
31961943 | 1137 | |
b562468c ML |
1138 | /* Disable eDMA. The disable bit auto clears. */ |
1139 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | |
8b260248 | 1140 | |
b562468c ML |
1141 | /* Wait for the chip to confirm eDMA is off. */ |
1142 | for (i = 10000; i > 0; i--) { | |
1143 | u32 reg = readl(port_mmio + EDMA_CMD_OFS); | |
4537deb5 | 1144 | if (!(reg & EDMA_EN)) |
b562468c ML |
1145 | return 0; |
1146 | udelay(10); | |
31961943 | 1147 | } |
b562468c | 1148 | return -EIO; |
20f733e7 BR |
1149 | } |
1150 | ||
e12bef50 | 1151 | static int mv_stop_edma(struct ata_port *ap) |
0ea9e179 | 1152 | { |
b562468c ML |
1153 | void __iomem *port_mmio = mv_ap_base(ap); |
1154 | struct mv_port_priv *pp = ap->private_data; | |
66e57a2c | 1155 | int err = 0; |
0ea9e179 | 1156 | |
b562468c ML |
1157 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
1158 | return 0; | |
1159 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; | |
9b2c4e0b | 1160 | mv_wait_for_edma_empty_idle(ap); |
b562468c ML |
1161 | if (mv_stop_edma_engine(port_mmio)) { |
1162 | ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); | |
66e57a2c | 1163 | err = -EIO; |
b562468c | 1164 | } |
66e57a2c ML |
1165 | mv_edma_cfg(ap, 0, 0); |
1166 | return err; | |
0ea9e179 JG |
1167 | } |
1168 | ||
8a70f8dc | 1169 | #ifdef ATA_DEBUG |
31961943 | 1170 | static void mv_dump_mem(void __iomem *start, unsigned bytes) |
20f733e7 | 1171 | { |
31961943 BR |
1172 | int b, w; |
1173 | for (b = 0; b < bytes; ) { | |
1174 | DPRINTK("%p: ", start + b); | |
1175 | for (w = 0; b < bytes && w < 4; w++) { | |
2dcb407e | 1176 | printk("%08x ", readl(start + b)); |
31961943 BR |
1177 | b += sizeof(u32); |
1178 | } | |
1179 | printk("\n"); | |
1180 | } | |
31961943 | 1181 | } |
8a70f8dc JG |
1182 | #endif |
1183 | ||
31961943 BR |
1184 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
1185 | { | |
1186 | #ifdef ATA_DEBUG | |
1187 | int b, w; | |
1188 | u32 dw; | |
1189 | for (b = 0; b < bytes; ) { | |
1190 | DPRINTK("%02x: ", b); | |
1191 | for (w = 0; b < bytes && w < 4; w++) { | |
2dcb407e JG |
1192 | (void) pci_read_config_dword(pdev, b, &dw); |
1193 | printk("%08x ", dw); | |
31961943 BR |
1194 | b += sizeof(u32); |
1195 | } | |
1196 | printk("\n"); | |
1197 | } | |
1198 | #endif | |
1199 | } | |
1200 | static void mv_dump_all_regs(void __iomem *mmio_base, int port, | |
1201 | struct pci_dev *pdev) | |
1202 | { | |
1203 | #ifdef ATA_DEBUG | |
8b260248 | 1204 | void __iomem *hc_base = mv_hc_base(mmio_base, |
31961943 BR |
1205 | port >> MV_PORT_HC_SHIFT); |
1206 | void __iomem *port_base; | |
1207 | int start_port, num_ports, p, start_hc, num_hcs, hc; | |
1208 | ||
1209 | if (0 > port) { | |
1210 | start_hc = start_port = 0; | |
1211 | num_ports = 8; /* shld be benign for 4 port devs */ | |
1212 | num_hcs = 2; | |
1213 | } else { | |
1214 | start_hc = port >> MV_PORT_HC_SHIFT; | |
1215 | start_port = port; | |
1216 | num_ports = num_hcs = 1; | |
1217 | } | |
8b260248 | 1218 | DPRINTK("All registers for port(s) %u-%u:\n", start_port, |
31961943 BR |
1219 | num_ports > 1 ? num_ports - 1 : start_port); |
1220 | ||
1221 | if (NULL != pdev) { | |
1222 | DPRINTK("PCI config space regs:\n"); | |
1223 | mv_dump_pci_cfg(pdev, 0x68); | |
1224 | } | |
1225 | DPRINTK("PCI regs:\n"); | |
1226 | mv_dump_mem(mmio_base+0xc00, 0x3c); | |
1227 | mv_dump_mem(mmio_base+0xd00, 0x34); | |
1228 | mv_dump_mem(mmio_base+0xf00, 0x4); | |
1229 | mv_dump_mem(mmio_base+0x1d00, 0x6c); | |
1230 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { | |
d220c37e | 1231 | hc_base = mv_hc_base(mmio_base, hc); |
31961943 BR |
1232 | DPRINTK("HC regs (HC %i):\n", hc); |
1233 | mv_dump_mem(hc_base, 0x1c); | |
1234 | } | |
1235 | for (p = start_port; p < start_port + num_ports; p++) { | |
1236 | port_base = mv_port_base(mmio_base, p); | |
2dcb407e | 1237 | DPRINTK("EDMA regs (port %i):\n", p); |
31961943 | 1238 | mv_dump_mem(port_base, 0x54); |
2dcb407e | 1239 | DPRINTK("SATA regs (port %i):\n", p); |
31961943 BR |
1240 | mv_dump_mem(port_base+0x300, 0x60); |
1241 | } | |
1242 | #endif | |
20f733e7 BR |
1243 | } |
1244 | ||
1245 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) | |
1246 | { | |
1247 | unsigned int ofs; | |
1248 | ||
1249 | switch (sc_reg_in) { | |
1250 | case SCR_STATUS: | |
1251 | case SCR_CONTROL: | |
1252 | case SCR_ERROR: | |
1253 | ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); | |
1254 | break; | |
1255 | case SCR_ACTIVE: | |
1256 | ofs = SATA_ACTIVE_OFS; /* active is not with the others */ | |
1257 | break; | |
1258 | default: | |
1259 | ofs = 0xffffffffU; | |
1260 | break; | |
1261 | } | |
1262 | return ofs; | |
1263 | } | |
1264 | ||
82ef04fb | 1265 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
20f733e7 BR |
1266 | { |
1267 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
1268 | ||
da3dbb17 | 1269 | if (ofs != 0xffffffffU) { |
82ef04fb | 1270 | *val = readl(mv_ap_base(link->ap) + ofs); |
da3dbb17 TH |
1271 | return 0; |
1272 | } else | |
1273 | return -EINVAL; | |
20f733e7 BR |
1274 | } |
1275 | ||
82ef04fb | 1276 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
20f733e7 BR |
1277 | { |
1278 | unsigned int ofs = mv_scr_offset(sc_reg_in); | |
1279 | ||
da3dbb17 | 1280 | if (ofs != 0xffffffffU) { |
82ef04fb | 1281 | writelfl(val, mv_ap_base(link->ap) + ofs); |
da3dbb17 TH |
1282 | return 0; |
1283 | } else | |
1284 | return -EINVAL; | |
20f733e7 BR |
1285 | } |
1286 | ||
f273827e ML |
1287 | static void mv6_dev_config(struct ata_device *adev) |
1288 | { | |
1289 | /* | |
e49856d8 ML |
1290 | * Deal with Gen-II ("mv6") hardware quirks/restrictions: |
1291 | * | |
1292 | * Gen-II does not support NCQ over a port multiplier | |
1293 | * (no FIS-based switching). | |
f273827e | 1294 | */ |
e49856d8 | 1295 | if (adev->flags & ATA_DFLAG_NCQ) { |
352fab70 | 1296 | if (sata_pmp_attached(adev->link->ap)) { |
e49856d8 | 1297 | adev->flags &= ~ATA_DFLAG_NCQ; |
352fab70 ML |
1298 | ata_dev_printk(adev, KERN_INFO, |
1299 | "NCQ disabled for command-based switching\n"); | |
352fab70 | 1300 | } |
e49856d8 | 1301 | } |
f273827e ML |
1302 | } |
1303 | ||
3e4a1391 ML |
1304 | static int mv_qc_defer(struct ata_queued_cmd *qc) |
1305 | { | |
1306 | struct ata_link *link = qc->dev->link; | |
1307 | struct ata_port *ap = link->ap; | |
1308 | struct mv_port_priv *pp = ap->private_data; | |
1309 | ||
29d187bb ML |
1310 | /* |
1311 | * Don't allow new commands if we're in a delayed EH state | |
1312 | * for NCQ and/or FIS-based switching. | |
1313 | */ | |
1314 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) | |
1315 | return ATA_DEFER_PORT; | |
3e4a1391 ML |
1316 | /* |
1317 | * If the port is completely idle, then allow the new qc. | |
1318 | */ | |
1319 | if (ap->nr_active_links == 0) | |
1320 | return 0; | |
1321 | ||
4bdee6c5 TH |
1322 | /* |
1323 | * The port is operating in host queuing mode (EDMA) with NCQ | |
1324 | * enabled, allow multiple NCQ commands. EDMA also allows | |
1325 | * queueing multiple DMA commands but libata core currently | |
1326 | * doesn't allow it. | |
1327 | */ | |
1328 | if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && | |
1329 | (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) | |
1330 | return 0; | |
1331 | ||
3e4a1391 ML |
1332 | return ATA_DEFER_PORT; |
1333 | } | |
1334 | ||
08da1759 | 1335 | static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) |
e49856d8 | 1336 | { |
08da1759 ML |
1337 | struct mv_port_priv *pp = ap->private_data; |
1338 | void __iomem *port_mmio; | |
00f42eab | 1339 | |
08da1759 ML |
1340 | u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; |
1341 | u32 ltmode, *old_ltmode = &pp->cached.ltmode; | |
1342 | u32 haltcond, *old_haltcond = &pp->cached.haltcond; | |
00f42eab | 1343 | |
08da1759 ML |
1344 | ltmode = *old_ltmode & ~LTMODE_BIT8; |
1345 | haltcond = *old_haltcond | EDMA_ERR_DEV; | |
00f42eab ML |
1346 | |
1347 | if (want_fbs) { | |
08da1759 ML |
1348 | fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; |
1349 | ltmode = *old_ltmode | LTMODE_BIT8; | |
4c299ca3 | 1350 | if (want_ncq) |
08da1759 | 1351 | haltcond &= ~EDMA_ERR_DEV; |
4c299ca3 | 1352 | else |
08da1759 ML |
1353 | fiscfg |= FISCFG_WAIT_DEV_ERR; |
1354 | } else { | |
1355 | fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | |
e49856d8 | 1356 | } |
00f42eab | 1357 | |
08da1759 ML |
1358 | port_mmio = mv_ap_base(ap); |
1359 | mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg); | |
1360 | mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode); | |
1361 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond); | |
f273827e ML |
1362 | } |
1363 | ||
dd2890f6 ML |
1364 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
1365 | { | |
1366 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1367 | u32 old, new; | |
1368 | ||
1369 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ | |
1370 | old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); | |
1371 | if (want_ncq) | |
1372 | new = old | (1 << 22); | |
1373 | else | |
1374 | new = old & ~(1 << 22); | |
1375 | if (new != old) | |
1376 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | |
1377 | } | |
1378 | ||
c01e8a23 | 1379 | /** |
40f21b11 ML |
1380 | * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma |
1381 | * @ap: Port being initialized | |
c01e8a23 ML |
1382 | * |
1383 | * There are two DMA modes on these chips: basic DMA, and EDMA. | |
1384 | * | |
1385 | * Bit-0 of the "EDMA RESERVED" register enables/disables use | |
1386 | * of basic DMA on the GEN_IIE versions of the chips. | |
1387 | * | |
1388 | * This bit survives EDMA resets, and must be set for basic DMA | |
1389 | * to function, and should be cleared when EDMA is active. | |
1390 | */ | |
1391 | static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) | |
1392 | { | |
1393 | struct mv_port_priv *pp = ap->private_data; | |
1394 | u32 new, *old = &pp->cached.unknown_rsvd; | |
1395 | ||
1396 | if (enable_bmdma) | |
1397 | new = *old | 1; | |
1398 | else | |
1399 | new = *old & ~1; | |
1400 | mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new); | |
1401 | } | |
1402 | ||
00b81235 | 1403 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) |
e4e7b892 | 1404 | { |
0c58912e | 1405 | u32 cfg; |
e12bef50 ML |
1406 | struct mv_port_priv *pp = ap->private_data; |
1407 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1408 | void __iomem *port_mmio = mv_ap_base(ap); | |
e4e7b892 JG |
1409 | |
1410 | /* set up non-NCQ EDMA configuration */ | |
0c58912e | 1411 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
d16ab3f6 ML |
1412 | pp->pp_flags &= |
1413 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | |
e4e7b892 | 1414 | |
0c58912e | 1415 | if (IS_GEN_I(hpriv)) |
e4e7b892 JG |
1416 | cfg |= (1 << 8); /* enab config burst size mask */ |
1417 | ||
dd2890f6 | 1418 | else if (IS_GEN_II(hpriv)) { |
e4e7b892 | 1419 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; |
dd2890f6 | 1420 | mv_60x1_errata_sata25(ap, want_ncq); |
e4e7b892 | 1421 | |
dd2890f6 | 1422 | } else if (IS_GEN_IIE(hpriv)) { |
00f42eab ML |
1423 | int want_fbs = sata_pmp_attached(ap); |
1424 | /* | |
1425 | * Possible future enhancement: | |
1426 | * | |
1427 | * The chip can use FBS with non-NCQ, if we allow it, | |
1428 | * But first we need to have the error handling in place | |
1429 | * for this mode (datasheet section 7.3.15.4.2.3). | |
1430 | * So disallow non-NCQ FBS for now. | |
1431 | */ | |
1432 | want_fbs &= want_ncq; | |
1433 | ||
08da1759 | 1434 | mv_config_fbs(ap, want_ncq, want_fbs); |
00f42eab ML |
1435 | |
1436 | if (want_fbs) { | |
1437 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | |
1438 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ | |
1439 | } | |
1440 | ||
e728eabe | 1441 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
00b81235 ML |
1442 | if (want_edma) { |
1443 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | |
1444 | if (!IS_SOC(hpriv)) | |
1445 | cfg |= (1 << 18); /* enab early completion */ | |
1446 | } | |
616d4a98 ML |
1447 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1448 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | |
c01e8a23 | 1449 | mv_bmdma_enable_iie(ap, !want_edma); |
e4e7b892 JG |
1450 | } |
1451 | ||
72109168 ML |
1452 | if (want_ncq) { |
1453 | cfg |= EDMA_CFG_NCQ; | |
1454 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; | |
00b81235 | 1455 | } |
72109168 | 1456 | |
e4e7b892 JG |
1457 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
1458 | } | |
1459 | ||
da2fa9ba ML |
1460 | static void mv_port_free_dma_mem(struct ata_port *ap) |
1461 | { | |
1462 | struct mv_host_priv *hpriv = ap->host->private_data; | |
1463 | struct mv_port_priv *pp = ap->private_data; | |
eb73d558 | 1464 | int tag; |
da2fa9ba ML |
1465 | |
1466 | if (pp->crqb) { | |
1467 | dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); | |
1468 | pp->crqb = NULL; | |
1469 | } | |
1470 | if (pp->crpb) { | |
1471 | dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); | |
1472 | pp->crpb = NULL; | |
1473 | } | |
eb73d558 ML |
1474 | /* |
1475 | * For GEN_I, there's no NCQ, so we have only a single sg_tbl. | |
1476 | * For later hardware, we have one unique sg_tbl per NCQ tag. | |
1477 | */ | |
1478 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | |
1479 | if (pp->sg_tbl[tag]) { | |
1480 | if (tag == 0 || !IS_GEN_I(hpriv)) | |
1481 | dma_pool_free(hpriv->sg_tbl_pool, | |
1482 | pp->sg_tbl[tag], | |
1483 | pp->sg_tbl_dma[tag]); | |
1484 | pp->sg_tbl[tag] = NULL; | |
1485 | } | |
da2fa9ba ML |
1486 | } |
1487 | } | |
1488 | ||
05b308e1 BR |
1489 | /** |
1490 | * mv_port_start - Port specific init/start routine. | |
1491 | * @ap: ATA channel to manipulate | |
1492 | * | |
1493 | * Allocate and point to DMA memory, init port private memory, | |
1494 | * zero indices. | |
1495 | * | |
1496 | * LOCKING: | |
1497 | * Inherited from caller. | |
1498 | */ | |
31961943 BR |
1499 | static int mv_port_start(struct ata_port *ap) |
1500 | { | |
cca3974e JG |
1501 | struct device *dev = ap->host->dev; |
1502 | struct mv_host_priv *hpriv = ap->host->private_data; | |
31961943 | 1503 | struct mv_port_priv *pp; |
dde20207 | 1504 | int tag; |
31961943 | 1505 | |
24dc5f33 | 1506 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
6037d6bb | 1507 | if (!pp) |
24dc5f33 | 1508 | return -ENOMEM; |
da2fa9ba | 1509 | ap->private_data = pp; |
31961943 | 1510 | |
da2fa9ba ML |
1511 | pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); |
1512 | if (!pp->crqb) | |
1513 | return -ENOMEM; | |
1514 | memset(pp->crqb, 0, MV_CRQB_Q_SZ); | |
31961943 | 1515 | |
da2fa9ba ML |
1516 | pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); |
1517 | if (!pp->crpb) | |
1518 | goto out_port_free_dma_mem; | |
1519 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); | |
31961943 | 1520 | |
3bd0a70e ML |
1521 | /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ |
1522 | if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) | |
1523 | ap->flags |= ATA_FLAG_AN; | |
eb73d558 ML |
1524 | /* |
1525 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. | |
1526 | * For later hardware, we need one unique sg_tbl per NCQ tag. | |
1527 | */ | |
1528 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { | |
1529 | if (tag == 0 || !IS_GEN_I(hpriv)) { | |
1530 | pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, | |
1531 | GFP_KERNEL, &pp->sg_tbl_dma[tag]); | |
1532 | if (!pp->sg_tbl[tag]) | |
1533 | goto out_port_free_dma_mem; | |
1534 | } else { | |
1535 | pp->sg_tbl[tag] = pp->sg_tbl[0]; | |
1536 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | |
1537 | } | |
1538 | } | |
08da1759 | 1539 | mv_save_cached_regs(ap); |
66e57a2c | 1540 | mv_edma_cfg(ap, 0, 0); |
31961943 | 1541 | return 0; |
da2fa9ba ML |
1542 | |
1543 | out_port_free_dma_mem: | |
1544 | mv_port_free_dma_mem(ap); | |
1545 | return -ENOMEM; | |
31961943 BR |
1546 | } |
1547 | ||
05b308e1 BR |
1548 | /** |
1549 | * mv_port_stop - Port specific cleanup/stop routine. | |
1550 | * @ap: ATA channel to manipulate | |
1551 | * | |
1552 | * Stop DMA, cleanup port memory. | |
1553 | * | |
1554 | * LOCKING: | |
cca3974e | 1555 | * This routine uses the host lock to protect the DMA stop. |
05b308e1 | 1556 | */ |
31961943 BR |
1557 | static void mv_port_stop(struct ata_port *ap) |
1558 | { | |
e12bef50 | 1559 | mv_stop_edma(ap); |
88e675e1 | 1560 | mv_enable_port_irqs(ap, 0); |
da2fa9ba | 1561 | mv_port_free_dma_mem(ap); |
31961943 BR |
1562 | } |
1563 | ||
05b308e1 BR |
1564 | /** |
1565 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries | |
1566 | * @qc: queued command whose SG list to source from | |
1567 | * | |
1568 | * Populate the SG list and mark the last entry. | |
1569 | * | |
1570 | * LOCKING: | |
1571 | * Inherited from caller. | |
1572 | */ | |
6c08772e | 1573 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
31961943 BR |
1574 | { |
1575 | struct mv_port_priv *pp = qc->ap->private_data; | |
972c26bd | 1576 | struct scatterlist *sg; |
3be6cbd7 | 1577 | struct mv_sg *mv_sg, *last_sg = NULL; |
ff2aeb1e | 1578 | unsigned int si; |
31961943 | 1579 | |
eb73d558 | 1580 | mv_sg = pp->sg_tbl[qc->tag]; |
ff2aeb1e | 1581 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
d88184fb JG |
1582 | dma_addr_t addr = sg_dma_address(sg); |
1583 | u32 sg_len = sg_dma_len(sg); | |
22374677 | 1584 | |
4007b493 OJ |
1585 | while (sg_len) { |
1586 | u32 offset = addr & 0xffff; | |
1587 | u32 len = sg_len; | |
22374677 | 1588 | |
32cd11a6 | 1589 | if (offset + len > 0x10000) |
4007b493 OJ |
1590 | len = 0x10000 - offset; |
1591 | ||
1592 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); | |
1593 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
6c08772e | 1594 | mv_sg->flags_size = cpu_to_le32(len & 0xffff); |
32cd11a6 | 1595 | mv_sg->reserved = 0; |
4007b493 OJ |
1596 | |
1597 | sg_len -= len; | |
1598 | addr += len; | |
1599 | ||
3be6cbd7 | 1600 | last_sg = mv_sg; |
4007b493 | 1601 | mv_sg++; |
4007b493 | 1602 | } |
31961943 | 1603 | } |
3be6cbd7 JG |
1604 | |
1605 | if (likely(last_sg)) | |
1606 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | |
32cd11a6 | 1607 | mb(); /* ensure data structure is visible to the chipset */ |
31961943 BR |
1608 | } |
1609 | ||
5796d1c4 | 1610 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
31961943 | 1611 | { |
559eedad | 1612 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
31961943 | 1613 | (last ? CRQB_CMD_LAST : 0); |
559eedad | 1614 | *cmdw = cpu_to_le16(tmp); |
31961943 BR |
1615 | } |
1616 | ||
da14265e ML |
1617 | /** |
1618 | * mv_sff_irq_clear - Clear hardware interrupt after DMA. | |
1619 | * @ap: Port associated with this ATA transaction. | |
1620 | * | |
1621 | * We need this only for ATAPI bmdma transactions, | |
1622 | * as otherwise we experience spurious interrupts | |
1623 | * after libata-sff handles the bmdma interrupts. | |
1624 | */ | |
1625 | static void mv_sff_irq_clear(struct ata_port *ap) | |
1626 | { | |
1627 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); | |
1628 | } | |
1629 | ||
1630 | /** | |
1631 | * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. | |
1632 | * @qc: queued command to check for chipset/DMA compatibility. | |
1633 | * | |
1634 | * The bmdma engines cannot handle speculative data sizes | |
1635 | * (bytecount under/over flow). So only allow DMA for | |
1636 | * data transfer commands with known data sizes. | |
1637 | * | |
1638 | * LOCKING: | |
1639 | * Inherited from caller. | |
1640 | */ | |
1641 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc) | |
1642 | { | |
1643 | struct scsi_cmnd *scmd = qc->scsicmd; | |
1644 | ||
1645 | if (scmd) { | |
1646 | switch (scmd->cmnd[0]) { | |
1647 | case READ_6: | |
1648 | case READ_10: | |
1649 | case READ_12: | |
1650 | case WRITE_6: | |
1651 | case WRITE_10: | |
1652 | case WRITE_12: | |
1653 | case GPCMD_READ_CD: | |
1654 | case GPCMD_SEND_DVD_STRUCTURE: | |
1655 | case GPCMD_SEND_CUE_SHEET: | |
1656 | return 0; /* DMA is safe */ | |
1657 | } | |
1658 | } | |
1659 | return -EOPNOTSUPP; /* use PIO instead */ | |
1660 | } | |
1661 | ||
1662 | /** | |
1663 | * mv_bmdma_setup - Set up BMDMA transaction | |
1664 | * @qc: queued command to prepare DMA for. | |
1665 | * | |
1666 | * LOCKING: | |
1667 | * Inherited from caller. | |
1668 | */ | |
1669 | static void mv_bmdma_setup(struct ata_queued_cmd *qc) | |
1670 | { | |
1671 | struct ata_port *ap = qc->ap; | |
1672 | void __iomem *port_mmio = mv_ap_base(ap); | |
1673 | struct mv_port_priv *pp = ap->private_data; | |
1674 | ||
1675 | mv_fill_sg(qc); | |
1676 | ||
1677 | /* clear all DMA cmd bits */ | |
1678 | writel(0, port_mmio + BMDMA_CMD_OFS); | |
1679 | ||
1680 | /* load PRD table addr. */ | |
1681 | writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, | |
1682 | port_mmio + BMDMA_PRD_HIGH_OFS); | |
1683 | writelfl(pp->sg_tbl_dma[qc->tag], | |
1684 | port_mmio + BMDMA_PRD_LOW_OFS); | |
1685 | ||
1686 | /* issue r/w command */ | |
1687 | ap->ops->sff_exec_command(ap, &qc->tf); | |
1688 | } | |
1689 | ||
1690 | /** | |
1691 | * mv_bmdma_start - Start a BMDMA transaction | |
1692 | * @qc: queued command to start DMA on. | |
1693 | * | |
1694 | * LOCKING: | |
1695 | * Inherited from caller. | |
1696 | */ | |
1697 | static void mv_bmdma_start(struct ata_queued_cmd *qc) | |
1698 | { | |
1699 | struct ata_port *ap = qc->ap; | |
1700 | void __iomem *port_mmio = mv_ap_base(ap); | |
1701 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | |
1702 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; | |
1703 | ||
1704 | /* start host DMA transaction */ | |
1705 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | |
1706 | } | |
1707 | ||
1708 | /** | |
1709 | * mv_bmdma_stop - Stop BMDMA transfer | |
1710 | * @qc: queued command to stop DMA on. | |
1711 | * | |
1712 | * Clears the ATA_DMA_START flag in the bmdma control register | |
1713 | * | |
1714 | * LOCKING: | |
1715 | * Inherited from caller. | |
1716 | */ | |
1717 | static void mv_bmdma_stop(struct ata_queued_cmd *qc) | |
1718 | { | |
1719 | struct ata_port *ap = qc->ap; | |
1720 | void __iomem *port_mmio = mv_ap_base(ap); | |
1721 | u32 cmd; | |
1722 | ||
1723 | /* clear start/stop bit */ | |
1724 | cmd = readl(port_mmio + BMDMA_CMD_OFS); | |
1725 | cmd &= ~ATA_DMA_START; | |
1726 | writelfl(cmd, port_mmio + BMDMA_CMD_OFS); | |
1727 | ||
1728 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | |
1729 | ata_sff_dma_pause(ap); | |
1730 | } | |
1731 | ||
1732 | /** | |
1733 | * mv_bmdma_status - Read BMDMA status | |
1734 | * @ap: port for which to retrieve DMA status. | |
1735 | * | |
1736 | * Read and return equivalent of the sff BMDMA status register. | |
1737 | * | |
1738 | * LOCKING: | |
1739 | * Inherited from caller. | |
1740 | */ | |
1741 | static u8 mv_bmdma_status(struct ata_port *ap) | |
1742 | { | |
1743 | void __iomem *port_mmio = mv_ap_base(ap); | |
1744 | u32 reg, status; | |
1745 | ||
1746 | /* | |
1747 | * Other bits are valid only if ATA_DMA_ACTIVE==0, | |
1748 | * and the ATA_DMA_INTR bit doesn't exist. | |
1749 | */ | |
1750 | reg = readl(port_mmio + BMDMA_STATUS_OFS); | |
1751 | if (reg & ATA_DMA_ACTIVE) | |
1752 | status = ATA_DMA_ACTIVE; | |
1753 | else | |
1754 | status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; | |
1755 | return status; | |
1756 | } | |
1757 | ||
05b308e1 BR |
1758 | /** |
1759 | * mv_qc_prep - Host specific command preparation. | |
1760 | * @qc: queued command to prepare | |
1761 | * | |
1762 | * This routine simply redirects to the general purpose routine | |
1763 | * if command is not DMA. Else, it handles prep of the CRQB | |
1764 | * (command request block), does some sanity checking, and calls | |
1765 | * the SG load routine. | |
1766 | * | |
1767 | * LOCKING: | |
1768 | * Inherited from caller. | |
1769 | */ | |
31961943 BR |
1770 | static void mv_qc_prep(struct ata_queued_cmd *qc) |
1771 | { | |
1772 | struct ata_port *ap = qc->ap; | |
1773 | struct mv_port_priv *pp = ap->private_data; | |
e1469874 | 1774 | __le16 *cw; |
31961943 BR |
1775 | struct ata_taskfile *tf; |
1776 | u16 flags = 0; | |
a6432436 | 1777 | unsigned in_index; |
31961943 | 1778 | |
138bfdd0 ML |
1779 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1780 | (qc->tf.protocol != ATA_PROT_NCQ)) | |
31961943 | 1781 | return; |
20f733e7 | 1782 | |
31961943 BR |
1783 | /* Fill in command request block |
1784 | */ | |
e4e7b892 | 1785 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
31961943 | 1786 | flags |= CRQB_FLAG_READ; |
beec7dbc | 1787 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
31961943 | 1788 | flags |= qc->tag << CRQB_TAG_SHIFT; |
e49856d8 | 1789 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
31961943 | 1790 | |
bdd4ddde | 1791 | /* get current queue index from software */ |
fcfb1f77 | 1792 | in_index = pp->req_idx; |
a6432436 ML |
1793 | |
1794 | pp->crqb[in_index].sg_addr = | |
eb73d558 | 1795 | cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
a6432436 | 1796 | pp->crqb[in_index].sg_addr_hi = |
eb73d558 | 1797 | cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); |
a6432436 | 1798 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
31961943 | 1799 | |
a6432436 | 1800 | cw = &pp->crqb[in_index].ata_cmd[0]; |
31961943 BR |
1801 | tf = &qc->tf; |
1802 | ||
1803 | /* Sadly, the CRQB cannot accomodate all registers--there are | |
1804 | * only 11 bytes...so we must pick and choose required | |
1805 | * registers based on the command. So, we drop feature and | |
1806 | * hob_feature for [RW] DMA commands, but they are needed for | |
cd12e1f7 ML |
1807 | * NCQ. NCQ will drop hob_nsect, which is not needed there |
1808 | * (nsect is used only for the tag; feat/hob_feat hold true nsect). | |
20f733e7 | 1809 | */ |
31961943 BR |
1810 | switch (tf->command) { |
1811 | case ATA_CMD_READ: | |
1812 | case ATA_CMD_READ_EXT: | |
1813 | case ATA_CMD_WRITE: | |
1814 | case ATA_CMD_WRITE_EXT: | |
c15d85c8 | 1815 | case ATA_CMD_WRITE_FUA_EXT: |
31961943 BR |
1816 | mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); |
1817 | break; | |
31961943 BR |
1818 | case ATA_CMD_FPDMA_READ: |
1819 | case ATA_CMD_FPDMA_WRITE: | |
8b260248 | 1820 | mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); |
31961943 BR |
1821 | mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); |
1822 | break; | |
31961943 BR |
1823 | default: |
1824 | /* The only other commands EDMA supports in non-queued and | |
1825 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none | |
1826 | * of which are defined/used by Linux. If we get here, this | |
1827 | * driver needs work. | |
1828 | * | |
1829 | * FIXME: modify libata to give qc_prep a return value and | |
1830 | * return error here. | |
1831 | */ | |
1832 | BUG_ON(tf->command); | |
1833 | break; | |
1834 | } | |
1835 | mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); | |
1836 | mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); | |
1837 | mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); | |
1838 | mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); | |
1839 | mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); | |
1840 | mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); | |
1841 | mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); | |
1842 | mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); | |
1843 | mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ | |
1844 | ||
e4e7b892 JG |
1845 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
1846 | return; | |
1847 | mv_fill_sg(qc); | |
1848 | } | |
1849 | ||
1850 | /** | |
1851 | * mv_qc_prep_iie - Host specific command preparation. | |
1852 | * @qc: queued command to prepare | |
1853 | * | |
1854 | * This routine simply redirects to the general purpose routine | |
1855 | * if command is not DMA. Else, it handles prep of the CRQB | |
1856 | * (command request block), does some sanity checking, and calls | |
1857 | * the SG load routine. | |
1858 | * | |
1859 | * LOCKING: | |
1860 | * Inherited from caller. | |
1861 | */ | |
1862 | static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |
1863 | { | |
1864 | struct ata_port *ap = qc->ap; | |
1865 | struct mv_port_priv *pp = ap->private_data; | |
1866 | struct mv_crqb_iie *crqb; | |
1867 | struct ata_taskfile *tf; | |
a6432436 | 1868 | unsigned in_index; |
e4e7b892 JG |
1869 | u32 flags = 0; |
1870 | ||
138bfdd0 ML |
1871 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1872 | (qc->tf.protocol != ATA_PROT_NCQ)) | |
e4e7b892 JG |
1873 | return; |
1874 | ||
e12bef50 | 1875 | /* Fill in Gen IIE command request block */ |
e4e7b892 JG |
1876 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
1877 | flags |= CRQB_FLAG_READ; | |
1878 | ||
beec7dbc | 1879 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
e4e7b892 | 1880 | flags |= qc->tag << CRQB_TAG_SHIFT; |
8c0aeb4a | 1881 | flags |= qc->tag << CRQB_HOSTQ_SHIFT; |
e49856d8 | 1882 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
e4e7b892 | 1883 | |
bdd4ddde | 1884 | /* get current queue index from software */ |
fcfb1f77 | 1885 | in_index = pp->req_idx; |
a6432436 ML |
1886 | |
1887 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | |
eb73d558 ML |
1888 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
1889 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); | |
e4e7b892 JG |
1890 | crqb->flags = cpu_to_le32(flags); |
1891 | ||
1892 | tf = &qc->tf; | |
1893 | crqb->ata_cmd[0] = cpu_to_le32( | |
1894 | (tf->command << 16) | | |
1895 | (tf->feature << 24) | |
1896 | ); | |
1897 | crqb->ata_cmd[1] = cpu_to_le32( | |
1898 | (tf->lbal << 0) | | |
1899 | (tf->lbam << 8) | | |
1900 | (tf->lbah << 16) | | |
1901 | (tf->device << 24) | |
1902 | ); | |
1903 | crqb->ata_cmd[2] = cpu_to_le32( | |
1904 | (tf->hob_lbal << 0) | | |
1905 | (tf->hob_lbam << 8) | | |
1906 | (tf->hob_lbah << 16) | | |
1907 | (tf->hob_feature << 24) | |
1908 | ); | |
1909 | crqb->ata_cmd[3] = cpu_to_le32( | |
1910 | (tf->nsect << 0) | | |
1911 | (tf->hob_nsect << 8) | |
1912 | ); | |
1913 | ||
1914 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | |
31961943 | 1915 | return; |
31961943 BR |
1916 | mv_fill_sg(qc); |
1917 | } | |
1918 | ||
d16ab3f6 ML |
1919 | /** |
1920 | * mv_sff_check_status - fetch device status, if valid | |
1921 | * @ap: ATA port to fetch status from | |
1922 | * | |
1923 | * When using command issue via mv_qc_issue_fis(), | |
1924 | * the initial ATA_BUSY state does not show up in the | |
1925 | * ATA status (shadow) register. This can confuse libata! | |
1926 | * | |
1927 | * So we have a hook here to fake ATA_BUSY for that situation, | |
1928 | * until the first time a BUSY, DRQ, or ERR bit is seen. | |
1929 | * | |
1930 | * The rest of the time, it simply returns the ATA status register. | |
1931 | */ | |
1932 | static u8 mv_sff_check_status(struct ata_port *ap) | |
1933 | { | |
1934 | u8 stat = ioread8(ap->ioaddr.status_addr); | |
1935 | struct mv_port_priv *pp = ap->private_data; | |
1936 | ||
1937 | if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { | |
1938 | if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) | |
1939 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; | |
1940 | else | |
1941 | stat = ATA_BUSY; | |
1942 | } | |
1943 | return stat; | |
1944 | } | |
1945 | ||
70f8b79c ML |
1946 | /** |
1947 | * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register | |
1948 | * @fis: fis to be sent | |
1949 | * @nwords: number of 32-bit words in the fis | |
1950 | */ | |
1951 | static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) | |
1952 | { | |
1953 | void __iomem *port_mmio = mv_ap_base(ap); | |
1954 | u32 ifctl, old_ifctl, ifstat; | |
1955 | int i, timeout = 200, final_word = nwords - 1; | |
1956 | ||
1957 | /* Initiate FIS transmission mode */ | |
1958 | old_ifctl = readl(port_mmio + SATA_IFCTL_OFS); | |
1959 | ifctl = 0x100 | (old_ifctl & 0xf); | |
1960 | writelfl(ifctl, port_mmio + SATA_IFCTL_OFS); | |
1961 | ||
1962 | /* Send all words of the FIS except for the final word */ | |
1963 | for (i = 0; i < final_word; ++i) | |
1964 | writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS); | |
1965 | ||
1966 | /* Flag end-of-transmission, and then send the final word */ | |
1967 | writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS); | |
1968 | writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS); | |
1969 | ||
1970 | /* | |
1971 | * Wait for FIS transmission to complete. | |
1972 | * This typically takes just a single iteration. | |
1973 | */ | |
1974 | do { | |
1975 | ifstat = readl(port_mmio + SATA_IFSTAT_OFS); | |
1976 | } while (!(ifstat & 0x1000) && --timeout); | |
1977 | ||
1978 | /* Restore original port configuration */ | |
1979 | writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS); | |
1980 | ||
1981 | /* See if it worked */ | |
1982 | if ((ifstat & 0x3000) != 0x1000) { | |
1983 | ata_port_printk(ap, KERN_WARNING, | |
1984 | "%s transmission error, ifstat=%08x\n", | |
1985 | __func__, ifstat); | |
1986 | return AC_ERR_OTHER; | |
1987 | } | |
1988 | return 0; | |
1989 | } | |
1990 | ||
1991 | /** | |
1992 | * mv_qc_issue_fis - Issue a command directly as a FIS | |
1993 | * @qc: queued command to start | |
1994 | * | |
1995 | * Note that the ATA shadow registers are not updated | |
1996 | * after command issue, so the device will appear "READY" | |
1997 | * if polled, even while it is BUSY processing the command. | |
1998 | * | |
1999 | * So we use a status hook to fake ATA_BUSY until the drive changes state. | |
2000 | * | |
2001 | * Note: we don't get updated shadow regs on *completion* | |
2002 | * of non-data commands. So avoid sending them via this function, | |
2003 | * as they will appear to have completed immediately. | |
2004 | * | |
2005 | * GEN_IIE has special registers that we could get the result tf from, | |
2006 | * but earlier chipsets do not. For now, we ignore those registers. | |
2007 | */ | |
2008 | static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) | |
2009 | { | |
2010 | struct ata_port *ap = qc->ap; | |
2011 | struct mv_port_priv *pp = ap->private_data; | |
2012 | struct ata_link *link = qc->dev->link; | |
2013 | u32 fis[5]; | |
2014 | int err = 0; | |
2015 | ||
2016 | ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); | |
2017 | err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0])); | |
2018 | if (err) | |
2019 | return err; | |
2020 | ||
2021 | switch (qc->tf.protocol) { | |
2022 | case ATAPI_PROT_PIO: | |
2023 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | |
2024 | /* fall through */ | |
2025 | case ATAPI_PROT_NODATA: | |
2026 | ap->hsm_task_state = HSM_ST_FIRST; | |
2027 | break; | |
2028 | case ATA_PROT_PIO: | |
2029 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; | |
2030 | if (qc->tf.flags & ATA_TFLAG_WRITE) | |
2031 | ap->hsm_task_state = HSM_ST_FIRST; | |
2032 | else | |
2033 | ap->hsm_task_state = HSM_ST; | |
2034 | break; | |
2035 | default: | |
2036 | ap->hsm_task_state = HSM_ST_LAST; | |
2037 | break; | |
2038 | } | |
2039 | ||
2040 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
2041 | ata_pio_queue_task(ap, qc, 0); | |
2042 | return 0; | |
2043 | } | |
2044 | ||
05b308e1 BR |
2045 | /** |
2046 | * mv_qc_issue - Initiate a command to the host | |
2047 | * @qc: queued command to start | |
2048 | * | |
2049 | * This routine simply redirects to the general purpose routine | |
2050 | * if command is not DMA. Else, it sanity checks our local | |
2051 | * caches of the request producer/consumer indices then enables | |
2052 | * DMA and bumps the request producer index. | |
2053 | * | |
2054 | * LOCKING: | |
2055 | * Inherited from caller. | |
2056 | */ | |
9a3d9eb0 | 2057 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
31961943 | 2058 | { |
f48765cc | 2059 | static int limit_warnings = 10; |
c5d3e45a JG |
2060 | struct ata_port *ap = qc->ap; |
2061 | void __iomem *port_mmio = mv_ap_base(ap); | |
2062 | struct mv_port_priv *pp = ap->private_data; | |
bdd4ddde | 2063 | u32 in_index; |
42ed893d | 2064 | unsigned int port_irqs; |
f48765cc | 2065 | |
d16ab3f6 ML |
2066 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ |
2067 | ||
f48765cc ML |
2068 | switch (qc->tf.protocol) { |
2069 | case ATA_PROT_DMA: | |
2070 | case ATA_PROT_NCQ: | |
2071 | mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); | |
2072 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; | |
2073 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | |
2074 | ||
2075 | /* Write the request in pointer to kick the EDMA to life */ | |
2076 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | |
2077 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | |
2078 | return 0; | |
31961943 | 2079 | |
f48765cc | 2080 | case ATA_PROT_PIO: |
c6112bd8 ML |
2081 | /* |
2082 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. | |
2083 | * | |
2084 | * Someday, we might implement special polling workarounds | |
2085 | * for these, but it all seems rather unnecessary since we | |
2086 | * normally use only DMA for commands which transfer more | |
2087 | * than a single block of data. | |
2088 | * | |
2089 | * Much of the time, this could just work regardless. | |
2090 | * So for now, just log the incident, and allow the attempt. | |
2091 | */ | |
c7843e8f | 2092 | if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { |
c6112bd8 ML |
2093 | --limit_warnings; |
2094 | ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME | |
2095 | ": attempting PIO w/multiple DRQ: " | |
2096 | "this may fail due to h/w errata\n"); | |
2097 | } | |
f48765cc | 2098 | /* drop through */ |
42ed893d | 2099 | case ATA_PROT_NODATA: |
f48765cc | 2100 | case ATAPI_PROT_PIO: |
42ed893d ML |
2101 | case ATAPI_PROT_NODATA: |
2102 | if (ap->flags & ATA_FLAG_PIO_POLLING) | |
2103 | qc->tf.flags |= ATA_TFLAG_POLLING; | |
2104 | break; | |
31961943 | 2105 | } |
42ed893d ML |
2106 | |
2107 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
2108 | port_irqs = ERR_IRQ; /* mask device interrupt when polling */ | |
2109 | else | |
2110 | port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ | |
2111 | ||
2112 | /* | |
2113 | * We're about to send a non-EDMA capable command to the | |
2114 | * port. Turn off EDMA so there won't be problems accessing | |
2115 | * shadow block, etc registers. | |
2116 | */ | |
2117 | mv_stop_edma(ap); | |
2118 | mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); | |
2119 | mv_pmp_select(ap, qc->dev->link->pmp); | |
70f8b79c ML |
2120 | |
2121 | if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { | |
2122 | struct mv_host_priv *hpriv = ap->host->private_data; | |
2123 | /* | |
2124 | * Workaround for 88SX60x1 FEr SATA#25 (part 2). | |
40f21b11 | 2125 | * |
70f8b79c ML |
2126 | * After any NCQ error, the READ_LOG_EXT command |
2127 | * from libata-eh *must* use mv_qc_issue_fis(). | |
2128 | * Otherwise it might fail, due to chip errata. | |
2129 | * | |
2130 | * Rather than special-case it, we'll just *always* | |
2131 | * use this method here for READ_LOG_EXT, making for | |
2132 | * easier testing. | |
2133 | */ | |
2134 | if (IS_GEN_II(hpriv)) | |
2135 | return mv_qc_issue_fis(qc); | |
2136 | } | |
42ed893d | 2137 | return ata_sff_qc_issue(qc); |
31961943 BR |
2138 | } |
2139 | ||
8f767f8a ML |
2140 | static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) |
2141 | { | |
2142 | struct mv_port_priv *pp = ap->private_data; | |
2143 | struct ata_queued_cmd *qc; | |
2144 | ||
2145 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) | |
2146 | return NULL; | |
2147 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | |
95db5051 ML |
2148 | if (qc) { |
2149 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
2150 | qc = NULL; | |
2151 | else if (!(qc->flags & ATA_QCFLAG_ACTIVE)) | |
2152 | qc = NULL; | |
2153 | } | |
8f767f8a ML |
2154 | return qc; |
2155 | } | |
2156 | ||
29d187bb ML |
2157 | static void mv_pmp_error_handler(struct ata_port *ap) |
2158 | { | |
2159 | unsigned int pmp, pmp_map; | |
2160 | struct mv_port_priv *pp = ap->private_data; | |
2161 | ||
2162 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { | |
2163 | /* | |
2164 | * Perform NCQ error analysis on failed PMPs | |
2165 | * before we freeze the port entirely. | |
2166 | * | |
2167 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). | |
2168 | */ | |
2169 | pmp_map = pp->delayed_eh_pmp_map; | |
2170 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; | |
2171 | for (pmp = 0; pmp_map != 0; pmp++) { | |
2172 | unsigned int this_pmp = (1 << pmp); | |
2173 | if (pmp_map & this_pmp) { | |
2174 | struct ata_link *link = &ap->pmp_link[pmp]; | |
2175 | pmp_map &= ~this_pmp; | |
2176 | ata_eh_analyze_ncq_error(link); | |
2177 | } | |
2178 | } | |
2179 | ata_port_freeze(ap); | |
2180 | } | |
2181 | sata_pmp_error_handler(ap); | |
2182 | } | |
2183 | ||
4c299ca3 ML |
2184 | static unsigned int mv_get_err_pmp_map(struct ata_port *ap) |
2185 | { | |
2186 | void __iomem *port_mmio = mv_ap_base(ap); | |
2187 | ||
2188 | return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; | |
2189 | } | |
2190 | ||
4c299ca3 ML |
2191 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) |
2192 | { | |
2193 | struct ata_eh_info *ehi; | |
2194 | unsigned int pmp; | |
2195 | ||
2196 | /* | |
2197 | * Initialize EH info for PMPs which saw device errors | |
2198 | */ | |
2199 | ehi = &ap->link.eh_info; | |
2200 | for (pmp = 0; pmp_map != 0; pmp++) { | |
2201 | unsigned int this_pmp = (1 << pmp); | |
2202 | if (pmp_map & this_pmp) { | |
2203 | struct ata_link *link = &ap->pmp_link[pmp]; | |
2204 | ||
2205 | pmp_map &= ~this_pmp; | |
2206 | ehi = &link->eh_info; | |
2207 | ata_ehi_clear_desc(ehi); | |
2208 | ata_ehi_push_desc(ehi, "dev err"); | |
2209 | ehi->err_mask |= AC_ERR_DEV; | |
2210 | ehi->action |= ATA_EH_RESET; | |
2211 | ata_link_abort(link); | |
2212 | } | |
2213 | } | |
2214 | } | |
2215 | ||
06aaca3f ML |
2216 | static int mv_req_q_empty(struct ata_port *ap) |
2217 | { | |
2218 | void __iomem *port_mmio = mv_ap_base(ap); | |
2219 | u32 in_ptr, out_ptr; | |
2220 | ||
2221 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) | |
2222 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2223 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | |
2224 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2225 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ | |
2226 | } | |
2227 | ||
4c299ca3 ML |
2228 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) |
2229 | { | |
2230 | struct mv_port_priv *pp = ap->private_data; | |
2231 | int failed_links; | |
2232 | unsigned int old_map, new_map; | |
2233 | ||
2234 | /* | |
2235 | * Device error during FBS+NCQ operation: | |
2236 | * | |
2237 | * Set a port flag to prevent further I/O being enqueued. | |
2238 | * Leave the EDMA running to drain outstanding commands from this port. | |
2239 | * Perform the post-mortem/EH only when all responses are complete. | |
2240 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). | |
2241 | */ | |
2242 | if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { | |
2243 | pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; | |
2244 | pp->delayed_eh_pmp_map = 0; | |
2245 | } | |
2246 | old_map = pp->delayed_eh_pmp_map; | |
2247 | new_map = old_map | mv_get_err_pmp_map(ap); | |
2248 | ||
2249 | if (old_map != new_map) { | |
2250 | pp->delayed_eh_pmp_map = new_map; | |
2251 | mv_pmp_eh_prep(ap, new_map & ~old_map); | |
2252 | } | |
c46938cc | 2253 | failed_links = hweight16(new_map); |
4c299ca3 ML |
2254 | |
2255 | ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " | |
2256 | "failed_links=%d nr_active_links=%d\n", | |
2257 | __func__, pp->delayed_eh_pmp_map, | |
2258 | ap->qc_active, failed_links, | |
2259 | ap->nr_active_links); | |
2260 | ||
06aaca3f | 2261 | if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { |
4c299ca3 ML |
2262 | mv_process_crpb_entries(ap, pp); |
2263 | mv_stop_edma(ap); | |
2264 | mv_eh_freeze(ap); | |
2265 | ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); | |
2266 | return 1; /* handled */ | |
2267 | } | |
2268 | ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); | |
2269 | return 1; /* handled */ | |
2270 | } | |
2271 | ||
2272 | static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) | |
2273 | { | |
2274 | /* | |
2275 | * Possible future enhancement: | |
2276 | * | |
2277 | * FBS+non-NCQ operation is not yet implemented. | |
2278 | * See related notes in mv_edma_cfg(). | |
2279 | * | |
2280 | * Device error during FBS+non-NCQ operation: | |
2281 | * | |
2282 | * We need to snapshot the shadow registers for each failed command. | |
2283 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). | |
2284 | */ | |
2285 | return 0; /* not handled */ | |
2286 | } | |
2287 | ||
2288 | static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) | |
2289 | { | |
2290 | struct mv_port_priv *pp = ap->private_data; | |
2291 | ||
2292 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) | |
2293 | return 0; /* EDMA was not active: not handled */ | |
2294 | if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) | |
2295 | return 0; /* FBS was not active: not handled */ | |
2296 | ||
2297 | if (!(edma_err_cause & EDMA_ERR_DEV)) | |
2298 | return 0; /* non DEV error: not handled */ | |
2299 | edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; | |
2300 | if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) | |
2301 | return 0; /* other problems: not handled */ | |
2302 | ||
2303 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { | |
2304 | /* | |
2305 | * EDMA should NOT have self-disabled for this case. | |
2306 | * If it did, then something is wrong elsewhere, | |
2307 | * and we cannot handle it here. | |
2308 | */ | |
2309 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { | |
2310 | ata_port_printk(ap, KERN_WARNING, | |
2311 | "%s: err_cause=0x%x pp_flags=0x%x\n", | |
2312 | __func__, edma_err_cause, pp->pp_flags); | |
2313 | return 0; /* not handled */ | |
2314 | } | |
2315 | return mv_handle_fbs_ncq_dev_err(ap); | |
2316 | } else { | |
2317 | /* | |
2318 | * EDMA should have self-disabled for this case. | |
2319 | * If it did not, then something is wrong elsewhere, | |
2320 | * and we cannot handle it here. | |
2321 | */ | |
2322 | if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { | |
2323 | ata_port_printk(ap, KERN_WARNING, | |
2324 | "%s: err_cause=0x%x pp_flags=0x%x\n", | |
2325 | __func__, edma_err_cause, pp->pp_flags); | |
2326 | return 0; /* not handled */ | |
2327 | } | |
2328 | return mv_handle_fbs_non_ncq_dev_err(ap); | |
2329 | } | |
2330 | return 0; /* not handled */ | |
2331 | } | |
2332 | ||
a9010329 | 2333 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) |
8f767f8a | 2334 | { |
8f767f8a | 2335 | struct ata_eh_info *ehi = &ap->link.eh_info; |
a9010329 | 2336 | char *when = "idle"; |
8f767f8a | 2337 | |
8f767f8a | 2338 | ata_ehi_clear_desc(ehi); |
a9010329 ML |
2339 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { |
2340 | when = "disabled"; | |
2341 | } else if (edma_was_enabled) { | |
2342 | when = "EDMA enabled"; | |
8f767f8a ML |
2343 | } else { |
2344 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); | |
2345 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) | |
a9010329 | 2346 | when = "polling"; |
8f767f8a | 2347 | } |
a9010329 | 2348 | ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); |
8f767f8a ML |
2349 | ehi->err_mask |= AC_ERR_OTHER; |
2350 | ehi->action |= ATA_EH_RESET; | |
2351 | ata_port_freeze(ap); | |
2352 | } | |
2353 | ||
05b308e1 BR |
2354 | /** |
2355 | * mv_err_intr - Handle error interrupts on the port | |
2356 | * @ap: ATA channel to manipulate | |
2357 | * | |
8d07379d ML |
2358 | * Most cases require a full reset of the chip's state machine, |
2359 | * which also performs a COMRESET. | |
2360 | * Also, if the port disabled DMA, update our cached copy to match. | |
05b308e1 BR |
2361 | * |
2362 | * LOCKING: | |
2363 | * Inherited from caller. | |
2364 | */ | |
37b9046a | 2365 | static void mv_err_intr(struct ata_port *ap) |
31961943 BR |
2366 | { |
2367 | void __iomem *port_mmio = mv_ap_base(ap); | |
bdd4ddde | 2368 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
e4006077 | 2369 | u32 fis_cause = 0; |
bdd4ddde JG |
2370 | struct mv_port_priv *pp = ap->private_data; |
2371 | struct mv_host_priv *hpriv = ap->host->private_data; | |
bdd4ddde | 2372 | unsigned int action = 0, err_mask = 0; |
9af5c9c9 | 2373 | struct ata_eh_info *ehi = &ap->link.eh_info; |
37b9046a ML |
2374 | struct ata_queued_cmd *qc; |
2375 | int abort = 0; | |
20f733e7 | 2376 | |
8d07379d | 2377 | /* |
37b9046a | 2378 | * Read and clear the SError and err_cause bits. |
e4006077 ML |
2379 | * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear |
2380 | * the FIS_IRQ_CAUSE register before clearing edma_err_cause. | |
8d07379d | 2381 | */ |
37b9046a ML |
2382 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
2383 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | |
2384 | ||
bdd4ddde | 2385 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
e4006077 ML |
2386 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2387 | fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
2388 | writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | |
2389 | } | |
8d07379d | 2390 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
bdd4ddde | 2391 | |
4c299ca3 ML |
2392 | if (edma_err_cause & EDMA_ERR_DEV) { |
2393 | /* | |
2394 | * Device errors during FIS-based switching operation | |
2395 | * require special handling. | |
2396 | */ | |
2397 | if (mv_handle_dev_err(ap, edma_err_cause)) | |
2398 | return; | |
2399 | } | |
2400 | ||
37b9046a ML |
2401 | qc = mv_get_active_qc(ap); |
2402 | ata_ehi_clear_desc(ehi); | |
2403 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | |
2404 | edma_err_cause, pp->pp_flags); | |
e4006077 | 2405 | |
c443c500 | 2406 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
e4006077 | 2407 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); |
c443c500 ML |
2408 | if (fis_cause & SATA_FIS_IRQ_AN) { |
2409 | u32 ec = edma_err_cause & | |
2410 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); | |
2411 | sata_async_notification(ap); | |
2412 | if (!ec) | |
2413 | return; /* Just an AN; no need for the nukes */ | |
2414 | ata_ehi_push_desc(ehi, "SDB notify"); | |
2415 | } | |
2416 | } | |
bdd4ddde | 2417 | /* |
352fab70 | 2418 | * All generations share these EDMA error cause bits: |
bdd4ddde | 2419 | */ |
37b9046a | 2420 | if (edma_err_cause & EDMA_ERR_DEV) { |
bdd4ddde | 2421 | err_mask |= AC_ERR_DEV; |
37b9046a ML |
2422 | action |= ATA_EH_RESET; |
2423 | ata_ehi_push_desc(ehi, "dev error"); | |
2424 | } | |
bdd4ddde | 2425 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
6c1153e0 | 2426 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
bdd4ddde JG |
2427 | EDMA_ERR_INTRL_PAR)) { |
2428 | err_mask |= AC_ERR_ATA_BUS; | |
cf480626 | 2429 | action |= ATA_EH_RESET; |
b64bbc39 | 2430 | ata_ehi_push_desc(ehi, "parity error"); |
bdd4ddde JG |
2431 | } |
2432 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { | |
2433 | ata_ehi_hotplugged(ehi); | |
2434 | ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? | |
b64bbc39 | 2435 | "dev disconnect" : "dev connect"); |
cf480626 | 2436 | action |= ATA_EH_RESET; |
bdd4ddde JG |
2437 | } |
2438 | ||
352fab70 ML |
2439 | /* |
2440 | * Gen-I has a different SELF_DIS bit, | |
2441 | * different FREEZE bits, and no SERR bit: | |
2442 | */ | |
ee9ccdf7 | 2443 | if (IS_GEN_I(hpriv)) { |
bdd4ddde | 2444 | eh_freeze_mask = EDMA_EH_FREEZE_5; |
bdd4ddde | 2445 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { |
bdd4ddde | 2446 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
b64bbc39 | 2447 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
bdd4ddde JG |
2448 | } |
2449 | } else { | |
2450 | eh_freeze_mask = EDMA_EH_FREEZE; | |
bdd4ddde | 2451 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { |
bdd4ddde | 2452 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
b64bbc39 | 2453 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
bdd4ddde | 2454 | } |
bdd4ddde | 2455 | if (edma_err_cause & EDMA_ERR_SERR) { |
8d07379d ML |
2456 | ata_ehi_push_desc(ehi, "SError=%08x", serr); |
2457 | err_mask |= AC_ERR_ATA_BUS; | |
cf480626 | 2458 | action |= ATA_EH_RESET; |
bdd4ddde | 2459 | } |
afb0edd9 | 2460 | } |
20f733e7 | 2461 | |
bdd4ddde JG |
2462 | if (!err_mask) { |
2463 | err_mask = AC_ERR_OTHER; | |
cf480626 | 2464 | action |= ATA_EH_RESET; |
bdd4ddde JG |
2465 | } |
2466 | ||
2467 | ehi->serror |= serr; | |
2468 | ehi->action |= action; | |
2469 | ||
2470 | if (qc) | |
2471 | qc->err_mask |= err_mask; | |
2472 | else | |
2473 | ehi->err_mask |= err_mask; | |
2474 | ||
37b9046a ML |
2475 | if (err_mask == AC_ERR_DEV) { |
2476 | /* | |
2477 | * Cannot do ata_port_freeze() here, | |
2478 | * because it would kill PIO access, | |
2479 | * which is needed for further diagnosis. | |
2480 | */ | |
2481 | mv_eh_freeze(ap); | |
2482 | abort = 1; | |
2483 | } else if (edma_err_cause & eh_freeze_mask) { | |
2484 | /* | |
2485 | * Note to self: ata_port_freeze() calls ata_port_abort() | |
2486 | */ | |
bdd4ddde | 2487 | ata_port_freeze(ap); |
37b9046a ML |
2488 | } else { |
2489 | abort = 1; | |
2490 | } | |
2491 | ||
2492 | if (abort) { | |
2493 | if (qc) | |
2494 | ata_link_abort(qc->dev->link); | |
2495 | else | |
2496 | ata_port_abort(ap); | |
2497 | } | |
bdd4ddde JG |
2498 | } |
2499 | ||
fcfb1f77 ML |
2500 | static void mv_process_crpb_response(struct ata_port *ap, |
2501 | struct mv_crpb *response, unsigned int tag, int ncq_enabled) | |
2502 | { | |
2503 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | |
2504 | ||
2505 | if (qc) { | |
2506 | u8 ata_status; | |
2507 | u16 edma_status = le16_to_cpu(response->flags); | |
2508 | /* | |
2509 | * edma_status from a response queue entry: | |
2510 | * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). | |
2511 | * MSB is saved ATA status from command completion. | |
2512 | */ | |
2513 | if (!ncq_enabled) { | |
2514 | u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; | |
2515 | if (err_cause) { | |
2516 | /* | |
2517 | * Error will be seen/handled by mv_err_intr(). | |
2518 | * So do nothing at all here. | |
2519 | */ | |
2520 | return; | |
2521 | } | |
2522 | } | |
2523 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | |
37b9046a ML |
2524 | if (!ac_err_mask(ata_status)) |
2525 | ata_qc_complete(qc); | |
2526 | /* else: leave it for mv_err_intr() */ | |
fcfb1f77 ML |
2527 | } else { |
2528 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | |
2529 | __func__, tag); | |
2530 | } | |
2531 | } | |
2532 | ||
2533 | static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) | |
bdd4ddde JG |
2534 | { |
2535 | void __iomem *port_mmio = mv_ap_base(ap); | |
2536 | struct mv_host_priv *hpriv = ap->host->private_data; | |
fcfb1f77 | 2537 | u32 in_index; |
bdd4ddde | 2538 | bool work_done = false; |
fcfb1f77 | 2539 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); |
bdd4ddde | 2540 | |
fcfb1f77 | 2541 | /* Get the hardware queue position index */ |
bdd4ddde JG |
2542 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) |
2543 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | |
2544 | ||
fcfb1f77 ML |
2545 | /* Process new responses from since the last time we looked */ |
2546 | while (in_index != pp->resp_idx) { | |
6c1153e0 | 2547 | unsigned int tag; |
fcfb1f77 | 2548 | struct mv_crpb *response = &pp->crpb[pp->resp_idx]; |
bdd4ddde | 2549 | |
fcfb1f77 | 2550 | pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
bdd4ddde | 2551 | |
fcfb1f77 ML |
2552 | if (IS_GEN_I(hpriv)) { |
2553 | /* 50xx: no NCQ, only one command active at a time */ | |
9af5c9c9 | 2554 | tag = ap->link.active_tag; |
fcfb1f77 ML |
2555 | } else { |
2556 | /* Gen II/IIE: get command tag from CRPB entry */ | |
2557 | tag = le16_to_cpu(response->id) & 0x1f; | |
bdd4ddde | 2558 | } |
fcfb1f77 | 2559 | mv_process_crpb_response(ap, response, tag, ncq_enabled); |
bdd4ddde | 2560 | work_done = true; |
bdd4ddde JG |
2561 | } |
2562 | ||
352fab70 | 2563 | /* Update the software queue position index in hardware */ |
bdd4ddde JG |
2564 | if (work_done) |
2565 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | |
fcfb1f77 | 2566 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), |
bdd4ddde | 2567 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
20f733e7 BR |
2568 | } |
2569 | ||
a9010329 ML |
2570 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) |
2571 | { | |
2572 | struct mv_port_priv *pp; | |
2573 | int edma_was_enabled; | |
2574 | ||
2575 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | |
2576 | mv_unexpected_intr(ap, 0); | |
2577 | return; | |
2578 | } | |
2579 | /* | |
2580 | * Grab a snapshot of the EDMA_EN flag setting, | |
2581 | * so that we have a consistent view for this port, | |
2582 | * even if something we call of our routines changes it. | |
2583 | */ | |
2584 | pp = ap->private_data; | |
2585 | edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); | |
2586 | /* | |
2587 | * Process completed CRPB response(s) before other events. | |
2588 | */ | |
2589 | if (edma_was_enabled && (port_cause & DONE_IRQ)) { | |
2590 | mv_process_crpb_entries(ap, pp); | |
4c299ca3 ML |
2591 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) |
2592 | mv_handle_fbs_ncq_dev_err(ap); | |
a9010329 ML |
2593 | } |
2594 | /* | |
2595 | * Handle chip-reported errors, or continue on to handle PIO. | |
2596 | */ | |
2597 | if (unlikely(port_cause & ERR_IRQ)) { | |
2598 | mv_err_intr(ap); | |
2599 | } else if (!edma_was_enabled) { | |
2600 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | |
2601 | if (qc) | |
2602 | ata_sff_host_intr(ap, qc); | |
2603 | else | |
2604 | mv_unexpected_intr(ap, edma_was_enabled); | |
2605 | } | |
2606 | } | |
2607 | ||
05b308e1 BR |
2608 | /** |
2609 | * mv_host_intr - Handle all interrupts on the given host controller | |
cca3974e | 2610 | * @host: host specific structure |
7368f919 | 2611 | * @main_irq_cause: Main interrupt cause register for the chip. |
05b308e1 BR |
2612 | * |
2613 | * LOCKING: | |
2614 | * Inherited from caller. | |
2615 | */ | |
7368f919 | 2616 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) |
20f733e7 | 2617 | { |
f351b2d6 | 2618 | struct mv_host_priv *hpriv = host->private_data; |
eabd5eb1 | 2619 | void __iomem *mmio = hpriv->base, *hc_mmio; |
a3718c1f | 2620 | unsigned int handled = 0, port; |
20f733e7 | 2621 | |
2b748a0a ML |
2622 | /* If asserted, clear the "all ports" IRQ coalescing bit */ |
2623 | if (main_irq_cause & ALL_PORTS_COAL_DONE) | |
2624 | writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE); | |
2625 | ||
a3718c1f | 2626 | for (port = 0; port < hpriv->n_ports; port++) { |
cca3974e | 2627 | struct ata_port *ap = host->ports[port]; |
eabd5eb1 ML |
2628 | unsigned int p, shift, hardport, port_cause; |
2629 | ||
a3718c1f | 2630 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
a3718c1f | 2631 | /* |
eabd5eb1 ML |
2632 | * Each hc within the host has its own hc_irq_cause register, |
2633 | * where the interrupting ports bits get ack'd. | |
a3718c1f | 2634 | */ |
eabd5eb1 ML |
2635 | if (hardport == 0) { /* first port on this hc ? */ |
2636 | u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; | |
2637 | u32 port_mask, ack_irqs; | |
2638 | /* | |
2639 | * Skip this entire hc if nothing pending for any ports | |
2640 | */ | |
2641 | if (!hc_cause) { | |
2642 | port += MV_PORTS_PER_HC - 1; | |
2643 | continue; | |
2644 | } | |
2645 | /* | |
2646 | * We don't need/want to read the hc_irq_cause register, | |
2647 | * because doing so hurts performance, and | |
2648 | * main_irq_cause already gives us everything we need. | |
2649 | * | |
2650 | * But we do have to *write* to the hc_irq_cause to ack | |
2651 | * the ports that we are handling this time through. | |
2652 | * | |
2653 | * This requires that we create a bitmap for those | |
2654 | * ports which interrupted us, and use that bitmap | |
2655 | * to ack (only) those ports via hc_irq_cause. | |
2656 | */ | |
2657 | ack_irqs = 0; | |
2b748a0a ML |
2658 | if (hc_cause & PORTS_0_3_COAL_DONE) |
2659 | ack_irqs = HC_COAL_IRQ; | |
eabd5eb1 ML |
2660 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { |
2661 | if ((port + p) >= hpriv->n_ports) | |
2662 | break; | |
2663 | port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); | |
2664 | if (hc_cause & port_mask) | |
2665 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; | |
2666 | } | |
a3718c1f | 2667 | hc_mmio = mv_hc_base_from_port(mmio, port); |
eabd5eb1 | 2668 | writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); |
a3718c1f ML |
2669 | handled = 1; |
2670 | } | |
8f767f8a | 2671 | /* |
a9010329 | 2672 | * Handle interrupts signalled for this port: |
8f767f8a | 2673 | */ |
a9010329 ML |
2674 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); |
2675 | if (port_cause) | |
2676 | mv_port_intr(ap, port_cause); | |
20f733e7 | 2677 | } |
a3718c1f | 2678 | return handled; |
20f733e7 BR |
2679 | } |
2680 | ||
a3718c1f | 2681 | static int mv_pci_error(struct ata_host *host, void __iomem *mmio) |
bdd4ddde | 2682 | { |
02a121da | 2683 | struct mv_host_priv *hpriv = host->private_data; |
bdd4ddde JG |
2684 | struct ata_port *ap; |
2685 | struct ata_queued_cmd *qc; | |
2686 | struct ata_eh_info *ehi; | |
2687 | unsigned int i, err_mask, printed = 0; | |
2688 | u32 err_cause; | |
2689 | ||
02a121da | 2690 | err_cause = readl(mmio + hpriv->irq_cause_ofs); |
bdd4ddde JG |
2691 | |
2692 | dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", | |
2693 | err_cause); | |
2694 | ||
2695 | DPRINTK("All regs @ PCI error\n"); | |
2696 | mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); | |
2697 | ||
02a121da | 2698 | writelfl(0, mmio + hpriv->irq_cause_ofs); |
bdd4ddde JG |
2699 | |
2700 | for (i = 0; i < host->n_ports; i++) { | |
2701 | ap = host->ports[i]; | |
936fd732 | 2702 | if (!ata_link_offline(&ap->link)) { |
9af5c9c9 | 2703 | ehi = &ap->link.eh_info; |
bdd4ddde JG |
2704 | ata_ehi_clear_desc(ehi); |
2705 | if (!printed++) | |
2706 | ata_ehi_push_desc(ehi, | |
2707 | "PCI err cause 0x%08x", err_cause); | |
2708 | err_mask = AC_ERR_HOST_BUS; | |
cf480626 | 2709 | ehi->action = ATA_EH_RESET; |
9af5c9c9 | 2710 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
bdd4ddde JG |
2711 | if (qc) |
2712 | qc->err_mask |= err_mask; | |
2713 | else | |
2714 | ehi->err_mask |= err_mask; | |
2715 | ||
2716 | ata_port_freeze(ap); | |
2717 | } | |
2718 | } | |
a3718c1f | 2719 | return 1; /* handled */ |
bdd4ddde JG |
2720 | } |
2721 | ||
05b308e1 | 2722 | /** |
c5d3e45a | 2723 | * mv_interrupt - Main interrupt event handler |
05b308e1 BR |
2724 | * @irq: unused |
2725 | * @dev_instance: private data; in this case the host structure | |
05b308e1 BR |
2726 | * |
2727 | * Read the read only register to determine if any host | |
2728 | * controllers have pending interrupts. If so, call lower level | |
2729 | * routine to handle. Also check for PCI errors which are only | |
2730 | * reported here. | |
2731 | * | |
8b260248 | 2732 | * LOCKING: |
cca3974e | 2733 | * This routine holds the host lock while processing pending |
05b308e1 BR |
2734 | * interrupts. |
2735 | */ | |
7d12e780 | 2736 | static irqreturn_t mv_interrupt(int irq, void *dev_instance) |
20f733e7 | 2737 | { |
cca3974e | 2738 | struct ata_host *host = dev_instance; |
f351b2d6 | 2739 | struct mv_host_priv *hpriv = host->private_data; |
a3718c1f | 2740 | unsigned int handled = 0; |
6d3c30ef | 2741 | int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; |
96e2c487 | 2742 | u32 main_irq_cause, pending_irqs; |
20f733e7 | 2743 | |
646a4da5 | 2744 | spin_lock(&host->lock); |
6d3c30ef ML |
2745 | |
2746 | /* for MSI: block new interrupts while in here */ | |
2747 | if (using_msi) | |
2b748a0a | 2748 | mv_write_main_irq_mask(0, hpriv); |
6d3c30ef | 2749 | |
7368f919 | 2750 | main_irq_cause = readl(hpriv->main_irq_cause_addr); |
96e2c487 | 2751 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
352fab70 ML |
2752 | /* |
2753 | * Deal with cases where we either have nothing pending, or have read | |
2754 | * a bogus register value which can indicate HW removal or PCI fault. | |
20f733e7 | 2755 | */ |
a44253d2 | 2756 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
1f398472 | 2757 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
a3718c1f ML |
2758 | handled = mv_pci_error(host, hpriv->base); |
2759 | else | |
a44253d2 | 2760 | handled = mv_host_intr(host, pending_irqs); |
bdd4ddde | 2761 | } |
6d3c30ef ML |
2762 | |
2763 | /* for MSI: unmask; interrupt cause bits will retrigger now */ | |
2764 | if (using_msi) | |
2b748a0a | 2765 | mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); |
6d3c30ef | 2766 | |
9d51af7b ML |
2767 | spin_unlock(&host->lock); |
2768 | ||
20f733e7 BR |
2769 | return IRQ_RETVAL(handled); |
2770 | } | |
2771 | ||
c9d39130 JG |
2772 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) |
2773 | { | |
2774 | unsigned int ofs; | |
2775 | ||
2776 | switch (sc_reg_in) { | |
2777 | case SCR_STATUS: | |
2778 | case SCR_ERROR: | |
2779 | case SCR_CONTROL: | |
2780 | ofs = sc_reg_in * sizeof(u32); | |
2781 | break; | |
2782 | default: | |
2783 | ofs = 0xffffffffU; | |
2784 | break; | |
2785 | } | |
2786 | return ofs; | |
2787 | } | |
2788 | ||
82ef04fb | 2789 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
c9d39130 | 2790 | { |
82ef04fb | 2791 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
f351b2d6 | 2792 | void __iomem *mmio = hpriv->base; |
82ef04fb | 2793 | void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); |
c9d39130 JG |
2794 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
2795 | ||
da3dbb17 TH |
2796 | if (ofs != 0xffffffffU) { |
2797 | *val = readl(addr + ofs); | |
2798 | return 0; | |
2799 | } else | |
2800 | return -EINVAL; | |
c9d39130 JG |
2801 | } |
2802 | ||
82ef04fb | 2803 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
c9d39130 | 2804 | { |
82ef04fb | 2805 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
f351b2d6 | 2806 | void __iomem *mmio = hpriv->base; |
82ef04fb | 2807 | void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); |
c9d39130 JG |
2808 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
2809 | ||
da3dbb17 | 2810 | if (ofs != 0xffffffffU) { |
0d5ff566 | 2811 | writelfl(val, addr + ofs); |
da3dbb17 TH |
2812 | return 0; |
2813 | } else | |
2814 | return -EINVAL; | |
c9d39130 JG |
2815 | } |
2816 | ||
7bb3c529 | 2817 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) |
522479fb | 2818 | { |
7bb3c529 | 2819 | struct pci_dev *pdev = to_pci_dev(host->dev); |
522479fb JG |
2820 | int early_5080; |
2821 | ||
44c10138 | 2822 | early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); |
522479fb JG |
2823 | |
2824 | if (!early_5080) { | |
2825 | u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2826 | tmp |= (1 << 0); | |
2827 | writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2828 | } | |
2829 | ||
7bb3c529 | 2830 | mv_reset_pci_bus(host, mmio); |
522479fb JG |
2831 | } |
2832 | ||
2833 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |
2834 | { | |
8e7decdb | 2835 | writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); |
522479fb JG |
2836 | } |
2837 | ||
47c2b677 | 2838 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
ba3fe8fb JG |
2839 | void __iomem *mmio) |
2840 | { | |
c9d39130 JG |
2841 | void __iomem *phy_mmio = mv5_phy_base(mmio, idx); |
2842 | u32 tmp; | |
2843 | ||
2844 | tmp = readl(phy_mmio + MV5_PHY_MODE); | |
2845 | ||
2846 | hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ | |
2847 | hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ | |
ba3fe8fb JG |
2848 | } |
2849 | ||
47c2b677 | 2850 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
ba3fe8fb | 2851 | { |
522479fb JG |
2852 | u32 tmp; |
2853 | ||
8e7decdb | 2854 | writel(0, mmio + MV_GPIO_PORT_CTL_OFS); |
522479fb JG |
2855 | |
2856 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ | |
2857 | ||
2858 | tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
2859 | tmp |= ~(1 << 0); | |
2860 | writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); | |
ba3fe8fb JG |
2861 | } |
2862 | ||
2a47ce06 JG |
2863 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
2864 | unsigned int port) | |
bca1c4eb | 2865 | { |
c9d39130 JG |
2866 | void __iomem *phy_mmio = mv5_phy_base(mmio, port); |
2867 | const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); | |
2868 | u32 tmp; | |
2869 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); | |
2870 | ||
2871 | if (fix_apm_sq) { | |
8e7decdb | 2872 | tmp = readl(phy_mmio + MV5_LTMODE_OFS); |
c9d39130 | 2873 | tmp |= (1 << 19); |
8e7decdb | 2874 | writel(tmp, phy_mmio + MV5_LTMODE_OFS); |
c9d39130 | 2875 | |
8e7decdb | 2876 | tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); |
c9d39130 JG |
2877 | tmp &= ~0x3; |
2878 | tmp |= 0x1; | |
8e7decdb | 2879 | writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); |
c9d39130 JG |
2880 | } |
2881 | ||
2882 | tmp = readl(phy_mmio + MV5_PHY_MODE); | |
2883 | tmp &= ~mask; | |
2884 | tmp |= hpriv->signal[port].pre; | |
2885 | tmp |= hpriv->signal[port].amps; | |
2886 | writel(tmp, phy_mmio + MV5_PHY_MODE); | |
bca1c4eb JG |
2887 | } |
2888 | ||
c9d39130 JG |
2889 | |
2890 | #undef ZERO | |
2891 | #define ZERO(reg) writel(0, port_mmio + (reg)) | |
2892 | static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2893 | unsigned int port) | |
2894 | { | |
2895 | void __iomem *port_mmio = mv_port_base(mmio, port); | |
2896 | ||
e12bef50 | 2897 | mv_reset_channel(hpriv, mmio, port); |
c9d39130 JG |
2898 | |
2899 | ZERO(0x028); /* command */ | |
2900 | writel(0x11f, port_mmio + EDMA_CFG_OFS); | |
2901 | ZERO(0x004); /* timer */ | |
2902 | ZERO(0x008); /* irq err cause */ | |
2903 | ZERO(0x00c); /* irq err mask */ | |
2904 | ZERO(0x010); /* rq bah */ | |
2905 | ZERO(0x014); /* rq inp */ | |
2906 | ZERO(0x018); /* rq outp */ | |
2907 | ZERO(0x01c); /* respq bah */ | |
2908 | ZERO(0x024); /* respq outp */ | |
2909 | ZERO(0x020); /* respq inp */ | |
2910 | ZERO(0x02c); /* test control */ | |
8e7decdb | 2911 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
c9d39130 JG |
2912 | } |
2913 | #undef ZERO | |
2914 | ||
2915 | #define ZERO(reg) writel(0, hc_mmio + (reg)) | |
2916 | static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2917 | unsigned int hc) | |
47c2b677 | 2918 | { |
c9d39130 JG |
2919 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
2920 | u32 tmp; | |
2921 | ||
2922 | ZERO(0x00c); | |
2923 | ZERO(0x010); | |
2924 | ZERO(0x014); | |
2925 | ZERO(0x018); | |
2926 | ||
2927 | tmp = readl(hc_mmio + 0x20); | |
2928 | tmp &= 0x1c1c1c1c; | |
2929 | tmp |= 0x03030303; | |
2930 | writel(tmp, hc_mmio + 0x20); | |
2931 | } | |
2932 | #undef ZERO | |
2933 | ||
2934 | static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, | |
2935 | unsigned int n_hc) | |
2936 | { | |
2937 | unsigned int hc, port; | |
2938 | ||
2939 | for (hc = 0; hc < n_hc; hc++) { | |
2940 | for (port = 0; port < MV_PORTS_PER_HC; port++) | |
2941 | mv5_reset_hc_port(hpriv, mmio, | |
2942 | (hc * MV_PORTS_PER_HC) + port); | |
2943 | ||
2944 | mv5_reset_one_hc(hpriv, mmio, hc); | |
2945 | } | |
2946 | ||
2947 | return 0; | |
47c2b677 JG |
2948 | } |
2949 | ||
101ffae2 JG |
2950 | #undef ZERO |
2951 | #define ZERO(reg) writel(0, mmio + (reg)) | |
7bb3c529 | 2952 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) |
101ffae2 | 2953 | { |
02a121da | 2954 | struct mv_host_priv *hpriv = host->private_data; |
101ffae2 JG |
2955 | u32 tmp; |
2956 | ||
8e7decdb | 2957 | tmp = readl(mmio + MV_PCI_MODE_OFS); |
101ffae2 | 2958 | tmp &= 0xff00ffff; |
8e7decdb | 2959 | writel(tmp, mmio + MV_PCI_MODE_OFS); |
101ffae2 JG |
2960 | |
2961 | ZERO(MV_PCI_DISC_TIMER); | |
2962 | ZERO(MV_PCI_MSI_TRIGGER); | |
8e7decdb | 2963 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
101ffae2 | 2964 | ZERO(MV_PCI_SERR_MASK); |
02a121da ML |
2965 | ZERO(hpriv->irq_cause_ofs); |
2966 | ZERO(hpriv->irq_mask_ofs); | |
101ffae2 JG |
2967 | ZERO(MV_PCI_ERR_LOW_ADDRESS); |
2968 | ZERO(MV_PCI_ERR_HIGH_ADDRESS); | |
2969 | ZERO(MV_PCI_ERR_ATTRIBUTE); | |
2970 | ZERO(MV_PCI_ERR_COMMAND); | |
2971 | } | |
2972 | #undef ZERO | |
2973 | ||
2974 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) | |
2975 | { | |
2976 | u32 tmp; | |
2977 | ||
2978 | mv5_reset_flash(hpriv, mmio); | |
2979 | ||
8e7decdb | 2980 | tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); |
101ffae2 JG |
2981 | tmp &= 0x3; |
2982 | tmp |= (1 << 5) | (1 << 6); | |
8e7decdb | 2983 | writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); |
101ffae2 JG |
2984 | } |
2985 | ||
2986 | /** | |
2987 | * mv6_reset_hc - Perform the 6xxx global soft reset | |
2988 | * @mmio: base address of the HBA | |
2989 | * | |
2990 | * This routine only applies to 6xxx parts. | |
2991 | * | |
2992 | * LOCKING: | |
2993 | * Inherited from caller. | |
2994 | */ | |
c9d39130 JG |
2995 | static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
2996 | unsigned int n_hc) | |
101ffae2 JG |
2997 | { |
2998 | void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; | |
2999 | int i, rc = 0; | |
3000 | u32 t; | |
3001 | ||
3002 | /* Following procedure defined in PCI "main command and status | |
3003 | * register" table. | |
3004 | */ | |
3005 | t = readl(reg); | |
3006 | writel(t | STOP_PCI_MASTER, reg); | |
3007 | ||
3008 | for (i = 0; i < 1000; i++) { | |
3009 | udelay(1); | |
3010 | t = readl(reg); | |
2dcb407e | 3011 | if (PCI_MASTER_EMPTY & t) |
101ffae2 | 3012 | break; |
101ffae2 JG |
3013 | } |
3014 | if (!(PCI_MASTER_EMPTY & t)) { | |
3015 | printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); | |
3016 | rc = 1; | |
3017 | goto done; | |
3018 | } | |
3019 | ||
3020 | /* set reset */ | |
3021 | i = 5; | |
3022 | do { | |
3023 | writel(t | GLOB_SFT_RST, reg); | |
3024 | t = readl(reg); | |
3025 | udelay(1); | |
3026 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); | |
3027 | ||
3028 | if (!(GLOB_SFT_RST & t)) { | |
3029 | printk(KERN_ERR DRV_NAME ": can't set global reset\n"); | |
3030 | rc = 1; | |
3031 | goto done; | |
3032 | } | |
3033 | ||
3034 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ | |
3035 | i = 5; | |
3036 | do { | |
3037 | writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); | |
3038 | t = readl(reg); | |
3039 | udelay(1); | |
3040 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); | |
3041 | ||
3042 | if (GLOB_SFT_RST & t) { | |
3043 | printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); | |
3044 | rc = 1; | |
3045 | } | |
3046 | done: | |
3047 | return rc; | |
3048 | } | |
3049 | ||
47c2b677 | 3050 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, |
ba3fe8fb JG |
3051 | void __iomem *mmio) |
3052 | { | |
3053 | void __iomem *port_mmio; | |
3054 | u32 tmp; | |
3055 | ||
8e7decdb | 3056 | tmp = readl(mmio + MV_RESET_CFG_OFS); |
ba3fe8fb | 3057 | if ((tmp & (1 << 0)) == 0) { |
47c2b677 | 3058 | hpriv->signal[idx].amps = 0x7 << 8; |
ba3fe8fb JG |
3059 | hpriv->signal[idx].pre = 0x1 << 5; |
3060 | return; | |
3061 | } | |
3062 | ||
3063 | port_mmio = mv_port_base(mmio, idx); | |
3064 | tmp = readl(port_mmio + PHY_MODE2); | |
3065 | ||
3066 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ | |
3067 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ | |
3068 | } | |
3069 | ||
47c2b677 | 3070 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
ba3fe8fb | 3071 | { |
8e7decdb | 3072 | writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); |
ba3fe8fb JG |
3073 | } |
3074 | ||
c9d39130 | 3075 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
2a47ce06 | 3076 | unsigned int port) |
bca1c4eb | 3077 | { |
c9d39130 JG |
3078 | void __iomem *port_mmio = mv_port_base(mmio, port); |
3079 | ||
bca1c4eb | 3080 | u32 hp_flags = hpriv->hp_flags; |
47c2b677 JG |
3081 | int fix_phy_mode2 = |
3082 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | |
bca1c4eb | 3083 | int fix_phy_mode4 = |
47c2b677 | 3084 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
8c30a8b9 | 3085 | u32 m2, m3; |
47c2b677 JG |
3086 | |
3087 | if (fix_phy_mode2) { | |
3088 | m2 = readl(port_mmio + PHY_MODE2); | |
3089 | m2 &= ~(1 << 16); | |
3090 | m2 |= (1 << 31); | |
3091 | writel(m2, port_mmio + PHY_MODE2); | |
3092 | ||
3093 | udelay(200); | |
3094 | ||
3095 | m2 = readl(port_mmio + PHY_MODE2); | |
3096 | m2 &= ~((1 << 16) | (1 << 31)); | |
3097 | writel(m2, port_mmio + PHY_MODE2); | |
3098 | ||
3099 | udelay(200); | |
3100 | } | |
3101 | ||
8c30a8b9 ML |
3102 | /* |
3103 | * Gen-II/IIe PHY_MODE3 errata RM#2: | |
3104 | * Achieves better receiver noise performance than the h/w default: | |
3105 | */ | |
3106 | m3 = readl(port_mmio + PHY_MODE3); | |
3107 | m3 = (m3 & 0x1f) | (0x5555601 << 5); | |
bca1c4eb | 3108 | |
0388a8c0 ML |
3109 | /* Guideline 88F5182 (GL# SATA-S11) */ |
3110 | if (IS_SOC(hpriv)) | |
3111 | m3 &= ~0x1c; | |
3112 | ||
bca1c4eb | 3113 | if (fix_phy_mode4) { |
ba069e37 ML |
3114 | u32 m4 = readl(port_mmio + PHY_MODE4); |
3115 | /* | |
3116 | * Enforce reserved-bit restrictions on GenIIe devices only. | |
3117 | * For earlier chipsets, force only the internal config field | |
3118 | * (workaround for errata FEr SATA#10 part 1). | |
3119 | */ | |
8c30a8b9 | 3120 | if (IS_GEN_IIE(hpriv)) |
ba069e37 ML |
3121 | m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; |
3122 | else | |
3123 | m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; | |
8c30a8b9 | 3124 | writel(m4, port_mmio + PHY_MODE4); |
bca1c4eb | 3125 | } |
b406c7a6 ML |
3126 | /* |
3127 | * Workaround for 60x1-B2 errata SATA#13: | |
3128 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | |
3129 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | |
3130 | */ | |
3131 | writel(m3, port_mmio + PHY_MODE3); | |
bca1c4eb JG |
3132 | |
3133 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | |
3134 | m2 = readl(port_mmio + PHY_MODE2); | |
3135 | ||
3136 | m2 &= ~MV_M2_PREAMP_MASK; | |
2a47ce06 JG |
3137 | m2 |= hpriv->signal[port].amps; |
3138 | m2 |= hpriv->signal[port].pre; | |
47c2b677 | 3139 | m2 &= ~(1 << 16); |
bca1c4eb | 3140 | |
e4e7b892 JG |
3141 | /* according to mvSata 3.6.1, some IIE values are fixed */ |
3142 | if (IS_GEN_IIE(hpriv)) { | |
3143 | m2 &= ~0xC30FF01F; | |
3144 | m2 |= 0x0000900F; | |
3145 | } | |
3146 | ||
bca1c4eb JG |
3147 | writel(m2, port_mmio + PHY_MODE2); |
3148 | } | |
3149 | ||
f351b2d6 SB |
3150 | /* TODO: use the generic LED interface to configure the SATA Presence */ |
3151 | /* & Acitivy LEDs on the board */ | |
3152 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, | |
3153 | void __iomem *mmio) | |
3154 | { | |
3155 | return; | |
3156 | } | |
3157 | ||
3158 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, | |
3159 | void __iomem *mmio) | |
3160 | { | |
3161 | void __iomem *port_mmio; | |
3162 | u32 tmp; | |
3163 | ||
3164 | port_mmio = mv_port_base(mmio, idx); | |
3165 | tmp = readl(port_mmio + PHY_MODE2); | |
3166 | ||
3167 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ | |
3168 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ | |
3169 | } | |
3170 | ||
3171 | #undef ZERO | |
3172 | #define ZERO(reg) writel(0, port_mmio + (reg)) | |
3173 | static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |
3174 | void __iomem *mmio, unsigned int port) | |
3175 | { | |
3176 | void __iomem *port_mmio = mv_port_base(mmio, port); | |
3177 | ||
e12bef50 | 3178 | mv_reset_channel(hpriv, mmio, port); |
f351b2d6 SB |
3179 | |
3180 | ZERO(0x028); /* command */ | |
3181 | writel(0x101f, port_mmio + EDMA_CFG_OFS); | |
3182 | ZERO(0x004); /* timer */ | |
3183 | ZERO(0x008); /* irq err cause */ | |
3184 | ZERO(0x00c); /* irq err mask */ | |
3185 | ZERO(0x010); /* rq bah */ | |
3186 | ZERO(0x014); /* rq inp */ | |
3187 | ZERO(0x018); /* rq outp */ | |
3188 | ZERO(0x01c); /* respq bah */ | |
3189 | ZERO(0x024); /* respq outp */ | |
3190 | ZERO(0x020); /* respq inp */ | |
3191 | ZERO(0x02c); /* test control */ | |
8e7decdb | 3192 | writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); |
f351b2d6 SB |
3193 | } |
3194 | ||
3195 | #undef ZERO | |
3196 | ||
3197 | #define ZERO(reg) writel(0, hc_mmio + (reg)) | |
3198 | static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, | |
3199 | void __iomem *mmio) | |
3200 | { | |
3201 | void __iomem *hc_mmio = mv_hc_base(mmio, 0); | |
3202 | ||
3203 | ZERO(0x00c); | |
3204 | ZERO(0x010); | |
3205 | ZERO(0x014); | |
3206 | ||
3207 | } | |
3208 | ||
3209 | #undef ZERO | |
3210 | ||
3211 | static int mv_soc_reset_hc(struct mv_host_priv *hpriv, | |
3212 | void __iomem *mmio, unsigned int n_hc) | |
3213 | { | |
3214 | unsigned int port; | |
3215 | ||
3216 | for (port = 0; port < hpriv->n_ports; port++) | |
3217 | mv_soc_reset_hc_port(hpriv, mmio, port); | |
3218 | ||
3219 | mv_soc_reset_one_hc(hpriv, mmio); | |
3220 | ||
3221 | return 0; | |
3222 | } | |
3223 | ||
3224 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |
3225 | void __iomem *mmio) | |
3226 | { | |
3227 | return; | |
3228 | } | |
3229 | ||
3230 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |
3231 | { | |
3232 | return; | |
3233 | } | |
3234 | ||
8e7decdb | 3235 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
b67a1064 | 3236 | { |
8e7decdb | 3237 | u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); |
b67a1064 | 3238 | |
8e7decdb | 3239 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
b67a1064 | 3240 | if (want_gen2i) |
8e7decdb ML |
3241 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
3242 | writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); | |
b67a1064 ML |
3243 | } |
3244 | ||
e12bef50 | 3245 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
c9d39130 JG |
3246 | unsigned int port_no) |
3247 | { | |
3248 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | |
3249 | ||
8e7decdb ML |
3250 | /* |
3251 | * The datasheet warns against setting EDMA_RESET when EDMA is active | |
3252 | * (but doesn't say what the problem might be). So we first try | |
3253 | * to disable the EDMA engine before doing the EDMA_RESET operation. | |
3254 | */ | |
0d8be5cb | 3255 | mv_stop_edma_engine(port_mmio); |
8e7decdb | 3256 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
c9d39130 | 3257 | |
b67a1064 | 3258 | if (!IS_GEN_I(hpriv)) { |
8e7decdb ML |
3259 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
3260 | mv_setup_ifcfg(port_mmio, 1); | |
c9d39130 | 3261 | } |
b67a1064 | 3262 | /* |
8e7decdb | 3263 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
b67a1064 ML |
3264 | * link, and physical layers. It resets all SATA interface registers |
3265 | * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. | |
c9d39130 | 3266 | */ |
8e7decdb | 3267 | writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); |
b67a1064 | 3268 | udelay(25); /* allow reset propagation */ |
c9d39130 JG |
3269 | writelfl(0, port_mmio + EDMA_CMD_OFS); |
3270 | ||
3271 | hpriv->ops->phy_errata(hpriv, mmio, port_no); | |
3272 | ||
ee9ccdf7 | 3273 | if (IS_GEN_I(hpriv)) |
c9d39130 JG |
3274 | mdelay(1); |
3275 | } | |
3276 | ||
e49856d8 | 3277 | static void mv_pmp_select(struct ata_port *ap, int pmp) |
20f733e7 | 3278 | { |
e49856d8 ML |
3279 | if (sata_pmp_supported(ap)) { |
3280 | void __iomem *port_mmio = mv_ap_base(ap); | |
3281 | u32 reg = readl(port_mmio + SATA_IFCTL_OFS); | |
3282 | int old = reg & 0xf; | |
22374677 | 3283 | |
e49856d8 ML |
3284 | if (old != pmp) { |
3285 | reg = (reg & ~0xf) | pmp; | |
3286 | writelfl(reg, port_mmio + SATA_IFCTL_OFS); | |
3287 | } | |
22374677 | 3288 | } |
20f733e7 BR |
3289 | } |
3290 | ||
e49856d8 ML |
3291 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
3292 | unsigned long deadline) | |
22374677 | 3293 | { |
e49856d8 ML |
3294 | mv_pmp_select(link->ap, sata_srst_pmp(link)); |
3295 | return sata_std_hardreset(link, class, deadline); | |
3296 | } | |
bdd4ddde | 3297 | |
e49856d8 ML |
3298 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
3299 | unsigned long deadline) | |
3300 | { | |
3301 | mv_pmp_select(link->ap, sata_srst_pmp(link)); | |
3302 | return ata_sff_softreset(link, class, deadline); | |
22374677 JG |
3303 | } |
3304 | ||
cc0680a5 | 3305 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
bdd4ddde | 3306 | unsigned long deadline) |
31961943 | 3307 | { |
cc0680a5 | 3308 | struct ata_port *ap = link->ap; |
bdd4ddde | 3309 | struct mv_host_priv *hpriv = ap->host->private_data; |
b562468c | 3310 | struct mv_port_priv *pp = ap->private_data; |
f351b2d6 | 3311 | void __iomem *mmio = hpriv->base; |
0d8be5cb ML |
3312 | int rc, attempts = 0, extra = 0; |
3313 | u32 sstatus; | |
3314 | bool online; | |
31961943 | 3315 | |
e12bef50 | 3316 | mv_reset_channel(hpriv, mmio, ap->port_no); |
b562468c | 3317 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
d16ab3f6 ML |
3318 | pp->pp_flags &= |
3319 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); | |
bdd4ddde | 3320 | |
0d8be5cb ML |
3321 | /* Workaround for errata FEr SATA#10 (part 2) */ |
3322 | do { | |
17c5aab5 ML |
3323 | const unsigned long *timing = |
3324 | sata_ehc_deb_timing(&link->eh_context); | |
bdd4ddde | 3325 | |
17c5aab5 ML |
3326 | rc = sata_link_hardreset(link, timing, deadline + extra, |
3327 | &online, NULL); | |
9dcffd99 | 3328 | rc = online ? -EAGAIN : rc; |
17c5aab5 | 3329 | if (rc) |
0d8be5cb | 3330 | return rc; |
0d8be5cb ML |
3331 | sata_scr_read(link, SCR_STATUS, &sstatus); |
3332 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { | |
3333 | /* Force 1.5gb/s link speed and try again */ | |
8e7decdb | 3334 | mv_setup_ifcfg(mv_ap_base(ap), 0); |
0d8be5cb ML |
3335 | if (time_after(jiffies + HZ, deadline)) |
3336 | extra = HZ; /* only extend it once, max */ | |
3337 | } | |
3338 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); | |
08da1759 | 3339 | mv_save_cached_regs(ap); |
66e57a2c | 3340 | mv_edma_cfg(ap, 0, 0); |
bdd4ddde | 3341 | |
17c5aab5 | 3342 | return rc; |
bdd4ddde JG |
3343 | } |
3344 | ||
bdd4ddde JG |
3345 | static void mv_eh_freeze(struct ata_port *ap) |
3346 | { | |
1cfd19ae | 3347 | mv_stop_edma(ap); |
c4de573b | 3348 | mv_enable_port_irqs(ap, 0); |
bdd4ddde JG |
3349 | } |
3350 | ||
3351 | static void mv_eh_thaw(struct ata_port *ap) | |
3352 | { | |
f351b2d6 | 3353 | struct mv_host_priv *hpriv = ap->host->private_data; |
c4de573b ML |
3354 | unsigned int port = ap->port_no; |
3355 | unsigned int hardport = mv_hardport_from_port(port); | |
1cfd19ae | 3356 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); |
bdd4ddde | 3357 | void __iomem *port_mmio = mv_ap_base(ap); |
c4de573b | 3358 | u32 hc_irq_cause; |
bdd4ddde | 3359 | |
bdd4ddde JG |
3360 | /* clear EDMA errors on this port */ |
3361 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
3362 | ||
3363 | /* clear pending irq events */ | |
cae6edc3 | 3364 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
1cfd19ae | 3365 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
bdd4ddde | 3366 | |
88e675e1 | 3367 | mv_enable_port_irqs(ap, ERR_IRQ); |
31961943 BR |
3368 | } |
3369 | ||
05b308e1 BR |
3370 | /** |
3371 | * mv_port_init - Perform some early initialization on a single port. | |
3372 | * @port: libata data structure storing shadow register addresses | |
3373 | * @port_mmio: base address of the port | |
3374 | * | |
3375 | * Initialize shadow register mmio addresses, clear outstanding | |
3376 | * interrupts on the port, and unmask interrupts for the future | |
3377 | * start of the port. | |
3378 | * | |
3379 | * LOCKING: | |
3380 | * Inherited from caller. | |
3381 | */ | |
31961943 | 3382 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
20f733e7 | 3383 | { |
0d5ff566 | 3384 | void __iomem *shd_base = port_mmio + SHD_BLK_OFS; |
31961943 BR |
3385 | unsigned serr_ofs; |
3386 | ||
8b260248 | 3387 | /* PIO related setup |
31961943 BR |
3388 | */ |
3389 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); | |
8b260248 | 3390 | port->error_addr = |
31961943 BR |
3391 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); |
3392 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); | |
3393 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); | |
3394 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); | |
3395 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); | |
3396 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); | |
8b260248 | 3397 | port->status_addr = |
31961943 BR |
3398 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); |
3399 | /* special case: control/altstatus doesn't have ATA_REG_ address */ | |
3400 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; | |
3401 | ||
3402 | /* unused: */ | |
8d9db2d2 | 3403 | port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; |
20f733e7 | 3404 | |
31961943 BR |
3405 | /* Clear any currently outstanding port interrupt conditions */ |
3406 | serr_ofs = mv_scr_offset(SCR_ERROR); | |
3407 | writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); | |
3408 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | |
3409 | ||
646a4da5 ML |
3410 | /* unmask all non-transient EDMA error interrupts */ |
3411 | writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); | |
20f733e7 | 3412 | |
8b260248 | 3413 | VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", |
31961943 BR |
3414 | readl(port_mmio + EDMA_CFG_OFS), |
3415 | readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), | |
3416 | readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); | |
20f733e7 BR |
3417 | } |
3418 | ||
616d4a98 ML |
3419 | static unsigned int mv_in_pcix_mode(struct ata_host *host) |
3420 | { | |
3421 | struct mv_host_priv *hpriv = host->private_data; | |
3422 | void __iomem *mmio = hpriv->base; | |
3423 | u32 reg; | |
3424 | ||
1f398472 | 3425 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
616d4a98 ML |
3426 | return 0; /* not PCI-X capable */ |
3427 | reg = readl(mmio + MV_PCI_MODE_OFS); | |
3428 | if ((reg & MV_PCI_MODE_MASK) == 0) | |
3429 | return 0; /* conventional PCI mode */ | |
3430 | return 1; /* chip is in PCI-X mode */ | |
3431 | } | |
3432 | ||
3433 | static int mv_pci_cut_through_okay(struct ata_host *host) | |
3434 | { | |
3435 | struct mv_host_priv *hpriv = host->private_data; | |
3436 | void __iomem *mmio = hpriv->base; | |
3437 | u32 reg; | |
3438 | ||
3439 | if (!mv_in_pcix_mode(host)) { | |
3440 | reg = readl(mmio + PCI_COMMAND_OFS); | |
3441 | if (reg & PCI_COMMAND_MRDTRIG) | |
3442 | return 0; /* not okay */ | |
3443 | } | |
3444 | return 1; /* okay */ | |
3445 | } | |
3446 | ||
4447d351 | 3447 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
bca1c4eb | 3448 | { |
4447d351 TH |
3449 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3450 | struct mv_host_priv *hpriv = host->private_data; | |
bca1c4eb JG |
3451 | u32 hp_flags = hpriv->hp_flags; |
3452 | ||
5796d1c4 | 3453 | switch (board_idx) { |
47c2b677 JG |
3454 | case chip_5080: |
3455 | hpriv->ops = &mv5xxx_ops; | |
ee9ccdf7 | 3456 | hp_flags |= MV_HP_GEN_I; |
47c2b677 | 3457 | |
44c10138 | 3458 | switch (pdev->revision) { |
47c2b677 JG |
3459 | case 0x1: |
3460 | hp_flags |= MV_HP_ERRATA_50XXB0; | |
3461 | break; | |
3462 | case 0x3: | |
3463 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3464 | break; | |
3465 | default: | |
3466 | dev_printk(KERN_WARNING, &pdev->dev, | |
3467 | "Applying 50XXB2 workarounds to unknown rev\n"); | |
3468 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3469 | break; | |
3470 | } | |
3471 | break; | |
3472 | ||
bca1c4eb JG |
3473 | case chip_504x: |
3474 | case chip_508x: | |
47c2b677 | 3475 | hpriv->ops = &mv5xxx_ops; |
ee9ccdf7 | 3476 | hp_flags |= MV_HP_GEN_I; |
bca1c4eb | 3477 | |
44c10138 | 3478 | switch (pdev->revision) { |
47c2b677 JG |
3479 | case 0x0: |
3480 | hp_flags |= MV_HP_ERRATA_50XXB0; | |
3481 | break; | |
3482 | case 0x3: | |
3483 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3484 | break; | |
3485 | default: | |
3486 | dev_printk(KERN_WARNING, &pdev->dev, | |
3487 | "Applying B2 workarounds to unknown rev\n"); | |
3488 | hp_flags |= MV_HP_ERRATA_50XXB2; | |
3489 | break; | |
bca1c4eb JG |
3490 | } |
3491 | break; | |
3492 | ||
3493 | case chip_604x: | |
3494 | case chip_608x: | |
47c2b677 | 3495 | hpriv->ops = &mv6xxx_ops; |
ee9ccdf7 | 3496 | hp_flags |= MV_HP_GEN_II; |
47c2b677 | 3497 | |
44c10138 | 3498 | switch (pdev->revision) { |
47c2b677 JG |
3499 | case 0x7: |
3500 | hp_flags |= MV_HP_ERRATA_60X1B2; | |
3501 | break; | |
3502 | case 0x9: | |
3503 | hp_flags |= MV_HP_ERRATA_60X1C0; | |
bca1c4eb JG |
3504 | break; |
3505 | default: | |
3506 | dev_printk(KERN_WARNING, &pdev->dev, | |
47c2b677 JG |
3507 | "Applying B2 workarounds to unknown rev\n"); |
3508 | hp_flags |= MV_HP_ERRATA_60X1B2; | |
bca1c4eb JG |
3509 | break; |
3510 | } | |
3511 | break; | |
3512 | ||
e4e7b892 | 3513 | case chip_7042: |
616d4a98 | 3514 | hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; |
306b30f7 ML |
3515 | if (pdev->vendor == PCI_VENDOR_ID_TTI && |
3516 | (pdev->device == 0x2300 || pdev->device == 0x2310)) | |
3517 | { | |
4e520033 ML |
3518 | /* |
3519 | * Highpoint RocketRAID PCIe 23xx series cards: | |
3520 | * | |
3521 | * Unconfigured drives are treated as "Legacy" | |
3522 | * by the BIOS, and it overwrites sector 8 with | |
3523 | * a "Lgcy" metadata block prior to Linux boot. | |
3524 | * | |
3525 | * Configured drives (RAID or JBOD) leave sector 8 | |
3526 | * alone, but instead overwrite a high numbered | |
3527 | * sector for the RAID metadata. This sector can | |
3528 | * be determined exactly, by truncating the physical | |
3529 | * drive capacity to a nice even GB value. | |
3530 | * | |
3531 | * RAID metadata is at: (dev->n_sectors & ~0xfffff) | |
3532 | * | |
3533 | * Warn the user, lest they think we're just buggy. | |
3534 | */ | |
3535 | printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" | |
3536 | " BIOS CORRUPTS DATA on all attached drives," | |
3537 | " regardless of if/how they are configured." | |
3538 | " BEWARE!\n"); | |
3539 | printk(KERN_WARNING DRV_NAME ": For data safety, do not" | |
3540 | " use sectors 8-9 on \"Legacy\" drives," | |
3541 | " and avoid the final two gigabytes on" | |
3542 | " all RocketRAID BIOS initialized drives.\n"); | |
306b30f7 | 3543 | } |
8e7decdb | 3544 | /* drop through */ |
e4e7b892 JG |
3545 | case chip_6042: |
3546 | hpriv->ops = &mv6xxx_ops; | |
e4e7b892 | 3547 | hp_flags |= MV_HP_GEN_IIE; |
616d4a98 ML |
3548 | if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) |
3549 | hp_flags |= MV_HP_CUT_THROUGH; | |
e4e7b892 | 3550 | |
44c10138 | 3551 | switch (pdev->revision) { |
5cf73bfb | 3552 | case 0x2: /* Rev.B0: the first/only public release */ |
e4e7b892 JG |
3553 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3554 | break; | |
3555 | default: | |
3556 | dev_printk(KERN_WARNING, &pdev->dev, | |
3557 | "Applying 60X1C0 workarounds to unknown rev\n"); | |
3558 | hp_flags |= MV_HP_ERRATA_60X1C0; | |
3559 | break; | |
3560 | } | |
3561 | break; | |
f351b2d6 SB |
3562 | case chip_soc: |
3563 | hpriv->ops = &mv_soc_ops; | |
eb3a55a9 SB |
3564 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | |
3565 | MV_HP_ERRATA_60X1C0; | |
f351b2d6 | 3566 | break; |
e4e7b892 | 3567 | |
bca1c4eb | 3568 | default: |
f351b2d6 | 3569 | dev_printk(KERN_ERR, host->dev, |
5796d1c4 | 3570 | "BUG: invalid board index %u\n", board_idx); |
bca1c4eb JG |
3571 | return 1; |
3572 | } | |
3573 | ||
3574 | hpriv->hp_flags = hp_flags; | |
02a121da ML |
3575 | if (hp_flags & MV_HP_PCIE) { |
3576 | hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; | |
3577 | hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; | |
3578 | hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; | |
3579 | } else { | |
3580 | hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; | |
3581 | hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; | |
3582 | hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; | |
3583 | } | |
bca1c4eb JG |
3584 | |
3585 | return 0; | |
3586 | } | |
3587 | ||
05b308e1 | 3588 | /** |
47c2b677 | 3589 | * mv_init_host - Perform some early initialization of the host. |
4447d351 TH |
3590 | * @host: ATA host to initialize |
3591 | * @board_idx: controller index | |
05b308e1 BR |
3592 | * |
3593 | * If possible, do an early global reset of the host. Then do | |
3594 | * our port init and clear/unmask all/relevant host interrupts. | |
3595 | * | |
3596 | * LOCKING: | |
3597 | * Inherited from caller. | |
3598 | */ | |
4447d351 | 3599 | static int mv_init_host(struct ata_host *host, unsigned int board_idx) |
20f733e7 BR |
3600 | { |
3601 | int rc = 0, n_hc, port, hc; | |
4447d351 | 3602 | struct mv_host_priv *hpriv = host->private_data; |
f351b2d6 | 3603 | void __iomem *mmio = hpriv->base; |
47c2b677 | 3604 | |
4447d351 | 3605 | rc = mv_chip_id(host, board_idx); |
bca1c4eb | 3606 | if (rc) |
352fab70 | 3607 | goto done; |
f351b2d6 | 3608 | |
1f398472 | 3609 | if (IS_SOC(hpriv)) { |
7368f919 ML |
3610 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; |
3611 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | |
1f398472 ML |
3612 | } else { |
3613 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | |
3614 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | |
f351b2d6 | 3615 | } |
352fab70 | 3616 | |
5d0fb2e7 TR |
3617 | /* initialize shadow irq mask with register's value */ |
3618 | hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); | |
3619 | ||
352fab70 | 3620 | /* global interrupt mask: 0 == mask everything */ |
c4de573b | 3621 | mv_set_main_irq_mask(host, ~0, 0); |
bca1c4eb | 3622 | |
4447d351 | 3623 | n_hc = mv_get_hc_count(host->ports[0]->flags); |
bca1c4eb | 3624 | |
4447d351 | 3625 | for (port = 0; port < host->n_ports; port++) |
47c2b677 | 3626 | hpriv->ops->read_preamp(hpriv, port, mmio); |
20f733e7 | 3627 | |
c9d39130 | 3628 | rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); |
47c2b677 | 3629 | if (rc) |
20f733e7 | 3630 | goto done; |
20f733e7 | 3631 | |
522479fb | 3632 | hpriv->ops->reset_flash(hpriv, mmio); |
7bb3c529 | 3633 | hpriv->ops->reset_bus(host, mmio); |
47c2b677 | 3634 | hpriv->ops->enable_leds(hpriv, mmio); |
20f733e7 | 3635 | |
4447d351 | 3636 | for (port = 0; port < host->n_ports; port++) { |
cbcdd875 | 3637 | struct ata_port *ap = host->ports[port]; |
2a47ce06 | 3638 | void __iomem *port_mmio = mv_port_base(mmio, port); |
cbcdd875 TH |
3639 | |
3640 | mv_port_init(&ap->ioaddr, port_mmio); | |
3641 | ||
7bb3c529 | 3642 | #ifdef CONFIG_PCI |
1f398472 | 3643 | if (!IS_SOC(hpriv)) { |
f351b2d6 SB |
3644 | unsigned int offset = port_mmio - mmio; |
3645 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | |
3646 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | |
3647 | } | |
7bb3c529 | 3648 | #endif |
20f733e7 BR |
3649 | } |
3650 | ||
3651 | for (hc = 0; hc < n_hc; hc++) { | |
31961943 BR |
3652 | void __iomem *hc_mmio = mv_hc_base(mmio, hc); |
3653 | ||
3654 | VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " | |
3655 | "(before clear)=0x%08x\n", hc, | |
3656 | readl(hc_mmio + HC_CFG_OFS), | |
3657 | readl(hc_mmio + HC_IRQ_CAUSE_OFS)); | |
3658 | ||
3659 | /* Clear any currently outstanding hc interrupt conditions */ | |
3660 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | |
20f733e7 BR |
3661 | } |
3662 | ||
6be96ac1 ML |
3663 | /* Clear any currently outstanding host interrupt conditions */ |
3664 | writelfl(0, mmio + hpriv->irq_cause_ofs); | |
31961943 | 3665 | |
6be96ac1 ML |
3666 | /* and unmask interrupt generation for host regs */ |
3667 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); | |
51de32d2 | 3668 | |
6be96ac1 ML |
3669 | /* |
3670 | * enable only global host interrupts for now. | |
3671 | * The per-port interrupts get done later as ports are set up. | |
3672 | */ | |
3673 | mv_set_main_irq_mask(host, 0, PCI_ERR); | |
2b748a0a ML |
3674 | mv_set_irq_coalescing(host, irq_coalescing_io_count, |
3675 | irq_coalescing_usecs); | |
f351b2d6 SB |
3676 | done: |
3677 | return rc; | |
3678 | } | |
fb621e2f | 3679 | |
fbf14e2f BB |
3680 | static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) |
3681 | { | |
3682 | hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, | |
3683 | MV_CRQB_Q_SZ, 0); | |
3684 | if (!hpriv->crqb_pool) | |
3685 | return -ENOMEM; | |
3686 | ||
3687 | hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, | |
3688 | MV_CRPB_Q_SZ, 0); | |
3689 | if (!hpriv->crpb_pool) | |
3690 | return -ENOMEM; | |
3691 | ||
3692 | hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, | |
3693 | MV_SG_TBL_SZ, 0); | |
3694 | if (!hpriv->sg_tbl_pool) | |
3695 | return -ENOMEM; | |
3696 | ||
3697 | return 0; | |
3698 | } | |
3699 | ||
15a32632 LB |
3700 | static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, |
3701 | struct mbus_dram_target_info *dram) | |
3702 | { | |
3703 | int i; | |
3704 | ||
3705 | for (i = 0; i < 4; i++) { | |
3706 | writel(0, hpriv->base + WINDOW_CTRL(i)); | |
3707 | writel(0, hpriv->base + WINDOW_BASE(i)); | |
3708 | } | |
3709 | ||
3710 | for (i = 0; i < dram->num_cs; i++) { | |
3711 | struct mbus_dram_window *cs = dram->cs + i; | |
3712 | ||
3713 | writel(((cs->size - 1) & 0xffff0000) | | |
3714 | (cs->mbus_attr << 8) | | |
3715 | (dram->mbus_dram_target_id << 4) | 1, | |
3716 | hpriv->base + WINDOW_CTRL(i)); | |
3717 | writel(cs->base, hpriv->base + WINDOW_BASE(i)); | |
3718 | } | |
3719 | } | |
3720 | ||
f351b2d6 SB |
3721 | /** |
3722 | * mv_platform_probe - handle a positive probe of an soc Marvell | |
3723 | * host | |
3724 | * @pdev: platform device found | |
3725 | * | |
3726 | * LOCKING: | |
3727 | * Inherited from caller. | |
3728 | */ | |
3729 | static int mv_platform_probe(struct platform_device *pdev) | |
3730 | { | |
3731 | static int printed_version; | |
3732 | const struct mv_sata_platform_data *mv_platform_data; | |
3733 | const struct ata_port_info *ppi[] = | |
3734 | { &mv_port_info[chip_soc], NULL }; | |
3735 | struct ata_host *host; | |
3736 | struct mv_host_priv *hpriv; | |
3737 | struct resource *res; | |
3738 | int n_ports, rc; | |
20f733e7 | 3739 | |
f351b2d6 SB |
3740 | if (!printed_version++) |
3741 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | |
bca1c4eb | 3742 | |
f351b2d6 SB |
3743 | /* |
3744 | * Simple resource validation .. | |
3745 | */ | |
3746 | if (unlikely(pdev->num_resources != 2)) { | |
3747 | dev_err(&pdev->dev, "invalid number of resources\n"); | |
3748 | return -EINVAL; | |
3749 | } | |
3750 | ||
3751 | /* | |
3752 | * Get the register base first | |
3753 | */ | |
3754 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
3755 | if (res == NULL) | |
3756 | return -EINVAL; | |
3757 | ||
3758 | /* allocate host */ | |
3759 | mv_platform_data = pdev->dev.platform_data; | |
3760 | n_ports = mv_platform_data->n_ports; | |
3761 | ||
3762 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | |
3763 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | |
3764 | ||
3765 | if (!host || !hpriv) | |
3766 | return -ENOMEM; | |
3767 | host->private_data = hpriv; | |
3768 | hpriv->n_ports = n_ports; | |
3769 | ||
3770 | host->iomap = NULL; | |
f1cb0ea1 SB |
3771 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
3772 | res->end - res->start + 1); | |
f351b2d6 SB |
3773 | hpriv->base -= MV_SATAHC0_REG_BASE; |
3774 | ||
15a32632 LB |
3775 | /* |
3776 | * (Re-)program MBUS remapping windows if we are asked to. | |
3777 | */ | |
3778 | if (mv_platform_data->dram != NULL) | |
3779 | mv_conf_mbus_windows(hpriv, mv_platform_data->dram); | |
3780 | ||
fbf14e2f BB |
3781 | rc = mv_create_dma_pools(hpriv, &pdev->dev); |
3782 | if (rc) | |
3783 | return rc; | |
3784 | ||
f351b2d6 SB |
3785 | /* initialize adapter */ |
3786 | rc = mv_init_host(host, chip_soc); | |
3787 | if (rc) | |
3788 | return rc; | |
3789 | ||
3790 | dev_printk(KERN_INFO, &pdev->dev, | |
3791 | "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, | |
3792 | host->n_ports); | |
3793 | ||
3794 | return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, | |
3795 | IRQF_SHARED, &mv6_sht); | |
3796 | } | |
3797 | ||
3798 | /* | |
3799 | * | |
3800 | * mv_platform_remove - unplug a platform interface | |
3801 | * @pdev: platform device | |
3802 | * | |
3803 | * A platform bus SATA device has been unplugged. Perform the needed | |
3804 | * cleanup. Also called on module unload for any active devices. | |
3805 | */ | |
3806 | static int __devexit mv_platform_remove(struct platform_device *pdev) | |
3807 | { | |
3808 | struct device *dev = &pdev->dev; | |
3809 | struct ata_host *host = dev_get_drvdata(dev); | |
f351b2d6 SB |
3810 | |
3811 | ata_host_detach(host); | |
f351b2d6 | 3812 | return 0; |
20f733e7 BR |
3813 | } |
3814 | ||
f351b2d6 SB |
3815 | static struct platform_driver mv_platform_driver = { |
3816 | .probe = mv_platform_probe, | |
3817 | .remove = __devexit_p(mv_platform_remove), | |
3818 | .driver = { | |
3819 | .name = DRV_NAME, | |
3820 | .owner = THIS_MODULE, | |
3821 | }, | |
3822 | }; | |
3823 | ||
3824 | ||
7bb3c529 | 3825 | #ifdef CONFIG_PCI |
f351b2d6 SB |
3826 | static int mv_pci_init_one(struct pci_dev *pdev, |
3827 | const struct pci_device_id *ent); | |
3828 | ||
7bb3c529 SB |
3829 | |
3830 | static struct pci_driver mv_pci_driver = { | |
3831 | .name = DRV_NAME, | |
3832 | .id_table = mv_pci_tbl, | |
f351b2d6 | 3833 | .probe = mv_pci_init_one, |
7bb3c529 SB |
3834 | .remove = ata_pci_remove_one, |
3835 | }; | |
3836 | ||
7bb3c529 SB |
3837 | /* move to PCI layer or libata core? */ |
3838 | static int pci_go_64(struct pci_dev *pdev) | |
3839 | { | |
3840 | int rc; | |
3841 | ||
3842 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | |
3843 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
3844 | if (rc) { | |
3845 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
3846 | if (rc) { | |
3847 | dev_printk(KERN_ERR, &pdev->dev, | |
3848 | "64-bit DMA enable failed\n"); | |
3849 | return rc; | |
3850 | } | |
3851 | } | |
3852 | } else { | |
3853 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
3854 | if (rc) { | |
3855 | dev_printk(KERN_ERR, &pdev->dev, | |
3856 | "32-bit DMA enable failed\n"); | |
3857 | return rc; | |
3858 | } | |
3859 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
3860 | if (rc) { | |
3861 | dev_printk(KERN_ERR, &pdev->dev, | |
3862 | "32-bit consistent DMA enable failed\n"); | |
3863 | return rc; | |
3864 | } | |
3865 | } | |
3866 | ||
3867 | return rc; | |
3868 | } | |
3869 | ||
05b308e1 BR |
3870 | /** |
3871 | * mv_print_info - Dump key info to kernel log for perusal. | |
4447d351 | 3872 | * @host: ATA host to print info about |
05b308e1 BR |
3873 | * |
3874 | * FIXME: complete this. | |
3875 | * | |
3876 | * LOCKING: | |
3877 | * Inherited from caller. | |
3878 | */ | |
4447d351 | 3879 | static void mv_print_info(struct ata_host *host) |
31961943 | 3880 | { |
4447d351 TH |
3881 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3882 | struct mv_host_priv *hpriv = host->private_data; | |
44c10138 | 3883 | u8 scc; |
c1e4fe71 | 3884 | const char *scc_s, *gen; |
31961943 BR |
3885 | |
3886 | /* Use this to determine the HW stepping of the chip so we know | |
3887 | * what errata to workaround | |
3888 | */ | |
31961943 BR |
3889 | pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); |
3890 | if (scc == 0) | |
3891 | scc_s = "SCSI"; | |
3892 | else if (scc == 0x01) | |
3893 | scc_s = "RAID"; | |
3894 | else | |
c1e4fe71 JG |
3895 | scc_s = "?"; |
3896 | ||
3897 | if (IS_GEN_I(hpriv)) | |
3898 | gen = "I"; | |
3899 | else if (IS_GEN_II(hpriv)) | |
3900 | gen = "II"; | |
3901 | else if (IS_GEN_IIE(hpriv)) | |
3902 | gen = "IIE"; | |
3903 | else | |
3904 | gen = "?"; | |
31961943 | 3905 | |
a9524a76 | 3906 | dev_printk(KERN_INFO, &pdev->dev, |
c1e4fe71 JG |
3907 | "Gen-%s %u slots %u ports %s mode IRQ via %s\n", |
3908 | gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, | |
31961943 BR |
3909 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); |
3910 | } | |
3911 | ||
05b308e1 | 3912 | /** |
f351b2d6 | 3913 | * mv_pci_init_one - handle a positive probe of a PCI Marvell host |
05b308e1 BR |
3914 | * @pdev: PCI device found |
3915 | * @ent: PCI device ID entry for the matched host | |
3916 | * | |
3917 | * LOCKING: | |
3918 | * Inherited from caller. | |
3919 | */ | |
f351b2d6 SB |
3920 | static int mv_pci_init_one(struct pci_dev *pdev, |
3921 | const struct pci_device_id *ent) | |
20f733e7 | 3922 | { |
2dcb407e | 3923 | static int printed_version; |
20f733e7 | 3924 | unsigned int board_idx = (unsigned int)ent->driver_data; |
4447d351 TH |
3925 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; |
3926 | struct ata_host *host; | |
3927 | struct mv_host_priv *hpriv; | |
3928 | int n_ports, rc; | |
20f733e7 | 3929 | |
a9524a76 JG |
3930 | if (!printed_version++) |
3931 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | |
20f733e7 | 3932 | |
4447d351 TH |
3933 | /* allocate host */ |
3934 | n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; | |
3935 | ||
3936 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); | |
3937 | hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); | |
3938 | if (!host || !hpriv) | |
3939 | return -ENOMEM; | |
3940 | host->private_data = hpriv; | |
f351b2d6 | 3941 | hpriv->n_ports = n_ports; |
4447d351 TH |
3942 | |
3943 | /* acquire resources */ | |
24dc5f33 TH |
3944 | rc = pcim_enable_device(pdev); |
3945 | if (rc) | |
20f733e7 | 3946 | return rc; |
20f733e7 | 3947 | |
0d5ff566 TH |
3948 | rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); |
3949 | if (rc == -EBUSY) | |
24dc5f33 | 3950 | pcim_pin_device(pdev); |
0d5ff566 | 3951 | if (rc) |
24dc5f33 | 3952 | return rc; |
4447d351 | 3953 | host->iomap = pcim_iomap_table(pdev); |
f351b2d6 | 3954 | hpriv->base = host->iomap[MV_PRIMARY_BAR]; |
20f733e7 | 3955 | |
d88184fb JG |
3956 | rc = pci_go_64(pdev); |
3957 | if (rc) | |
3958 | return rc; | |
3959 | ||
da2fa9ba ML |
3960 | rc = mv_create_dma_pools(hpriv, &pdev->dev); |
3961 | if (rc) | |
3962 | return rc; | |
3963 | ||
20f733e7 | 3964 | /* initialize adapter */ |
4447d351 | 3965 | rc = mv_init_host(host, board_idx); |
24dc5f33 TH |
3966 | if (rc) |
3967 | return rc; | |
20f733e7 | 3968 | |
6d3c30ef ML |
3969 | /* Enable message-switched interrupts, if requested */ |
3970 | if (msi && pci_enable_msi(pdev) == 0) | |
3971 | hpriv->hp_flags |= MV_HP_FLAG_MSI; | |
20f733e7 | 3972 | |
31961943 | 3973 | mv_dump_pci_cfg(pdev, 0x68); |
4447d351 | 3974 | mv_print_info(host); |
20f733e7 | 3975 | |
4447d351 | 3976 | pci_set_master(pdev); |
ea8b4db9 | 3977 | pci_try_set_mwi(pdev); |
4447d351 | 3978 | return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, |
c5d3e45a | 3979 | IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); |
20f733e7 | 3980 | } |
7bb3c529 | 3981 | #endif |
20f733e7 | 3982 | |
f351b2d6 SB |
3983 | static int mv_platform_probe(struct platform_device *pdev); |
3984 | static int __devexit mv_platform_remove(struct platform_device *pdev); | |
3985 | ||
20f733e7 BR |
3986 | static int __init mv_init(void) |
3987 | { | |
7bb3c529 SB |
3988 | int rc = -ENODEV; |
3989 | #ifdef CONFIG_PCI | |
3990 | rc = pci_register_driver(&mv_pci_driver); | |
f351b2d6 SB |
3991 | if (rc < 0) |
3992 | return rc; | |
3993 | #endif | |
3994 | rc = platform_driver_register(&mv_platform_driver); | |
3995 | ||
3996 | #ifdef CONFIG_PCI | |
3997 | if (rc < 0) | |
3998 | pci_unregister_driver(&mv_pci_driver); | |
7bb3c529 SB |
3999 | #endif |
4000 | return rc; | |
20f733e7 BR |
4001 | } |
4002 | ||
4003 | static void __exit mv_exit(void) | |
4004 | { | |
7bb3c529 | 4005 | #ifdef CONFIG_PCI |
20f733e7 | 4006 | pci_unregister_driver(&mv_pci_driver); |
7bb3c529 | 4007 | #endif |
f351b2d6 | 4008 | platform_driver_unregister(&mv_platform_driver); |
20f733e7 BR |
4009 | } |
4010 | ||
4011 | MODULE_AUTHOR("Brett Russ"); | |
4012 | MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); | |
4013 | MODULE_LICENSE("GPL"); | |
4014 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); | |
4015 | MODULE_VERSION(DRV_VERSION); | |
17c5aab5 | 4016 | MODULE_ALIAS("platform:" DRV_NAME); |
20f733e7 BR |
4017 | |
4018 | module_init(mv_init); | |
4019 | module_exit(mv_exit); |