sata_mv: fix loop with last port
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
20f733e7 464static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
465static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
469static int mv_port_start(struct ata_port *ap);
470static void mv_port_stop(struct ata_port *ap);
471static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 472static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 473static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde 474static void mv_error_handler(struct ata_port *ap);
bdd4ddde
JG
475static void mv_eh_freeze(struct ata_port *ap);
476static void mv_eh_thaw(struct ata_port *ap);
f273827e 477static void mv6_dev_config(struct ata_device *dev);
20f733e7 478
2a47ce06
JG
479static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port);
47c2b677
JG
481static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 void __iomem *mmio);
c9d39130
JG
484static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int n_hc);
522479fb 486static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 487static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 488
2a47ce06
JG
489static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
47c2b677
JG
491static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
c9d39130
JG
494static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
522479fb 496static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
497static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 void __iomem *mmio);
499static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 void __iomem *mmio);
501static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio);
505static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 506static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
507static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
72109168
ML
509static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511static int __mv_stop_dma(struct ata_port *ap);
47c2b677 512
eb73d558
ML
513/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
516 */
c5d3e45a
JG
517static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE,
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
baf14aa1 524 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 530 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
533};
534
535static struct scsi_host_template mv6_sht = {
20f733e7
BR
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
138bfdd0
ML
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
20f733e7 542 .this_id = ATA_SHT_THIS_ID,
baf14aa1 543 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
d88184fb 546 .use_clustering = 1,
20f733e7
BR
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 549 .slave_configure = ata_scsi_slave_config,
ccf68c34 550 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 551 .bios_param = ata_std_bios_param,
20f733e7
BR
552};
553
c9d39130 554static const struct ata_port_operations mv5_ops = {
c9d39130
JG
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
560
cffacd85 561 .cable_detect = ata_cable_sata,
c9d39130
JG
562
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
0d5ff566 565 .data_xfer = ata_data_xfer,
c9d39130 566
c9d39130 567 .irq_clear = mv_irq_clear,
246ce3b6 568 .irq_on = ata_irq_on,
c9d39130 569
bdd4ddde 570 .error_handler = mv_error_handler,
bdd4ddde
JG
571 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw,
573
c9d39130
JG
574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
576
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
c9d39130
JG
579};
580
581static const struct ata_port_operations mv6_ops = {
f273827e 582 .dev_config = mv6_dev_config,
20f733e7
BR
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
588
cffacd85 589 .cable_detect = ata_cable_sata,
20f733e7 590
31961943
BR
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
0d5ff566 593 .data_xfer = ata_data_xfer,
20f733e7 594
20f733e7 595 .irq_clear = mv_irq_clear,
246ce3b6 596 .irq_on = ata_irq_on,
20f733e7 597
bdd4ddde 598 .error_handler = mv_error_handler,
bdd4ddde
JG
599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
138bfdd0 601 .qc_defer = ata_std_qc_defer,
bdd4ddde 602
20f733e7
BR
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
605
31961943
BR
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
20f733e7
BR
608};
609
e4e7b892 610static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
616
cffacd85 617 .cable_detect = ata_cable_sata,
e4e7b892
JG
618
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
0d5ff566 621 .data_xfer = ata_data_xfer,
e4e7b892 622
e4e7b892 623 .irq_clear = mv_irq_clear,
246ce3b6 624 .irq_on = ata_irq_on,
e4e7b892 625
bdd4ddde 626 .error_handler = mv_error_handler,
bdd4ddde
JG
627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
138bfdd0 629 .qc_defer = ata_std_qc_defer,
bdd4ddde 630
e4e7b892
JG
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
633
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
e4e7b892
JG
636};
637
98ac62de 638static const struct ata_port_info mv_port_info[] = {
20f733e7 639 { /* chip_504x */
cca3974e 640 .flags = MV_COMMON_FLAGS,
31961943 641 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 642 .udma_mask = ATA_UDMA6,
c9d39130 643 .port_ops = &mv5_ops,
20f733e7
BR
644 },
645 { /* chip_508x */
c5d3e45a 646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
c9d39130 649 .port_ops = &mv5_ops,
20f733e7 650 },
47c2b677 651 { /* chip_5080 */
c5d3e45a 652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 653 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 654 .udma_mask = ATA_UDMA6,
c9d39130 655 .port_ops = &mv5_ops,
47c2b677 656 },
20f733e7 657 { /* chip_604x */
138bfdd0
ML
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
31961943 660 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 661 .udma_mask = ATA_UDMA6,
c9d39130 662 .port_ops = &mv6_ops,
20f733e7
BR
663 },
664 { /* chip_608x */
c5d3e45a 665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 667 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 668 .udma_mask = ATA_UDMA6,
c9d39130 669 .port_ops = &mv6_ops,
20f733e7 670 },
e4e7b892 671 { /* chip_6042 */
138bfdd0
ML
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
e4e7b892 674 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 675 .udma_mask = ATA_UDMA6,
e4e7b892
JG
676 .port_ops = &mv_iie_ops,
677 },
678 { /* chip_7042 */
138bfdd0
ML
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 ATA_FLAG_NCQ,
e4e7b892 681 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 682 .udma_mask = ATA_UDMA6,
e4e7b892
JG
683 .port_ops = &mv_iie_ops,
684 },
f351b2d6
SB
685 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
690 },
20f733e7
BR
691};
692
3b7d697d 693static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
701
702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
707
708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709
d9f9c6bc
FA
710 /* Adaptec 1430SA */
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712
02a121da 713 /* Marvell 7042 support */
6a3d586d
MT
714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715
02a121da
ML
716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719
2d2744fc 720 { } /* terminate list */
20f733e7
BR
721};
722
47c2b677
JG
723static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
522479fb
JG
728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
47c2b677
JG
730};
731
732static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
522479fb
JG
737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
739};
740
f351b2d6
SB
741static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
748};
749
20f733e7
BR
750/*
751 * Functions
752 */
753
754static inline void writelfl(unsigned long data, void __iomem *addr)
755{
756 writel(data, addr);
757 (void) readl(addr); /* flush to avoid PCI posted write */
758}
759
20f733e7
BR
760static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761{
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
763}
764
c9d39130
JG
765static inline unsigned int mv_hc_from_port(unsigned int port)
766{
767 return port >> MV_PORT_HC_SHIFT;
768}
769
770static inline unsigned int mv_hardport_from_port(unsigned int port)
771{
772 return port & MV_PORT_MASK;
773}
774
775static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 unsigned int port)
777{
778 return mv_hc_base(base, mv_hc_from_port(port));
779}
780
20f733e7
BR
781static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782{
c9d39130 783 return mv_hc_base_from_port(base, port) +
8b260248 784 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
786}
787
f351b2d6
SB
788static inline void __iomem *mv_host_base(struct ata_host *host)
789{
790 struct mv_host_priv *hpriv = host->private_data;
791 return hpriv->base;
792}
793
20f733e7
BR
794static inline void __iomem *mv_ap_base(struct ata_port *ap)
795{
f351b2d6 796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
797}
798
cca3974e 799static inline int mv_get_hc_count(unsigned long port_flags)
31961943 800{
cca3974e 801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
802}
803
804static void mv_irq_clear(struct ata_port *ap)
20f733e7 805{
20f733e7
BR
806}
807
c5d3e45a
JG
808static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
811{
bdd4ddde
JG
812 u32 index;
813
c5d3e45a
JG
814 /*
815 * initialize request queue
816 */
bdd4ddde
JG
817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818
c5d3e45a
JG
819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 825 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 else
bdd4ddde 828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
829
830 /*
831 * initialize response queue
832 */
bdd4ddde
JG
833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834
c5d3e45a
JG
835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 839 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 else
bdd4ddde 842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 843
bdd4ddde 844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
846}
847
05b308e1
BR
848/**
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
852 *
beec7dbc
TH
853 * Verify the local cache of the eDMA state is accurate with a
854 * WARN_ON.
05b308e1
BR
855 *
856 * LOCKING:
857 * Inherited from caller.
858 */
0c58912e 859static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 860 struct mv_port_priv *pp, u8 protocol)
20f733e7 861{
72109168
ML
862 int want_ncq = (protocol == ATA_PROT_NCQ);
863
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap);
868 }
c5d3e45a 869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
873 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
874 u32 hc_irq_cause, ipending;
875
bdd4ddde 876 /* clear EDMA event indicators, if any */
f630d562 877 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 878
0c58912e
ML
879 /* clear EDMA interrupt indicator, if any */
880 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 ipending = (DEV_IRQ << hard_port) |
882 (CRPB_DMA_DONE << hard_port);
883 if (hc_irq_cause & ipending) {
884 writelfl(hc_irq_cause & ~ipending,
885 hc_mmio + HC_IRQ_CAUSE_OFS);
886 }
887
72109168 888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
889
890 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892
f630d562 893 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 894
f630d562 895 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
896 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 }
f630d562 898 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
899}
900
05b308e1 901/**
0ea9e179 902 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
903 * @ap: ATA channel to manipulate
904 *
beec7dbc
TH
905 * Verify the local cache of the eDMA state is accurate with a
906 * WARN_ON.
05b308e1
BR
907 *
908 * LOCKING:
909 * Inherited from caller.
910 */
0ea9e179 911static int __mv_stop_dma(struct ata_port *ap)
20f733e7 912{
31961943
BR
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
31961943 915 u32 reg;
c5d3e45a 916 int i, err = 0;
31961943 917
4537deb5 918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 919 /* Disable EDMA if active. The disable bit auto clears.
31961943 920 */
31961943
BR
921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 923 } else {
beec7dbc 924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 925 }
8b260248 926
31961943
BR
927 /* now properly wait for the eDMA to stop */
928 for (i = 1000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 930 if (!(reg & EDMA_EN))
31961943 931 break;
4537deb5 932
31961943
BR
933 udelay(100);
934 }
935
c5d3e45a 936 if (reg & EDMA_EN) {
f15a1daf 937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 938 err = -EIO;
31961943 939 }
c5d3e45a
JG
940
941 return err;
20f733e7
BR
942}
943
0ea9e179
JG
944static int mv_stop_dma(struct ata_port *ap)
945{
946 unsigned long flags;
947 int rc;
948
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
952
953 return rc;
954}
955
8a70f8dc 956#ifdef ATA_DEBUG
31961943 957static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 958{
31961943
BR
959 int b, w;
960 for (b = 0; b < bytes; ) {
961 DPRINTK("%p: ", start + b);
962 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 963 printk("%08x ", readl(start + b));
31961943
BR
964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
31961943 968}
8a70f8dc
JG
969#endif
970
31961943
BR
971static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
972{
973#ifdef ATA_DEBUG
974 int b, w;
975 u32 dw;
976 for (b = 0; b < bytes; ) {
977 DPRINTK("%02x: ", b);
978 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
979 (void) pci_read_config_dword(pdev, b, &dw);
980 printk("%08x ", dw);
31961943
BR
981 b += sizeof(u32);
982 }
983 printk("\n");
984 }
985#endif
986}
987static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 struct pci_dev *pdev)
989{
990#ifdef ATA_DEBUG
8b260248 991 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
992 port >> MV_PORT_HC_SHIFT);
993 void __iomem *port_base;
994 int start_port, num_ports, p, start_hc, num_hcs, hc;
995
996 if (0 > port) {
997 start_hc = start_port = 0;
998 num_ports = 8; /* shld be benign for 4 port devs */
999 num_hcs = 2;
1000 } else {
1001 start_hc = port >> MV_PORT_HC_SHIFT;
1002 start_port = port;
1003 num_ports = num_hcs = 1;
1004 }
8b260248 1005 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1006 num_ports > 1 ? num_ports - 1 : start_port);
1007
1008 if (NULL != pdev) {
1009 DPRINTK("PCI config space regs:\n");
1010 mv_dump_pci_cfg(pdev, 0x68);
1011 }
1012 DPRINTK("PCI regs:\n");
1013 mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 mv_dump_mem(mmio_base+0xd00, 0x34);
1015 mv_dump_mem(mmio_base+0xf00, 0x4);
1016 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1018 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1019 DPRINTK("HC regs (HC %i):\n", hc);
1020 mv_dump_mem(hc_base, 0x1c);
1021 }
1022 for (p = start_port; p < start_port + num_ports; p++) {
1023 port_base = mv_port_base(mmio_base, p);
2dcb407e 1024 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1025 mv_dump_mem(port_base, 0x54);
2dcb407e 1026 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1027 mv_dump_mem(port_base+0x300, 0x60);
1028 }
1029#endif
20f733e7
BR
1030}
1031
1032static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033{
1034 unsigned int ofs;
1035
1036 switch (sc_reg_in) {
1037 case SCR_STATUS:
1038 case SCR_CONTROL:
1039 case SCR_ERROR:
1040 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1041 break;
1042 case SCR_ACTIVE:
1043 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1044 break;
1045 default:
1046 ofs = 0xffffffffU;
1047 break;
1048 }
1049 return ofs;
1050}
1051
da3dbb17 1052static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1053{
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
da3dbb17
TH
1056 if (ofs != 0xffffffffU) {
1057 *val = readl(mv_ap_base(ap) + ofs);
1058 return 0;
1059 } else
1060 return -EINVAL;
20f733e7
BR
1061}
1062
da3dbb17 1063static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1064{
1065 unsigned int ofs = mv_scr_offset(sc_reg_in);
1066
da3dbb17 1067 if (ofs != 0xffffffffU) {
20f733e7 1068 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1069 return 0;
1070 } else
1071 return -EINVAL;
20f733e7
BR
1072}
1073
f273827e
ML
1074static void mv6_dev_config(struct ata_device *adev)
1075{
1076 /*
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info.
1079 */
1080 if (adev->flags & ATA_DFLAG_NCQ)
1081 if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS;
1083}
1084
72109168
ML
1085static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 void __iomem *port_mmio, int want_ncq)
e4e7b892 1087{
0c58912e 1088 u32 cfg;
e4e7b892
JG
1089
1090 /* set up non-NCQ EDMA configuration */
0c58912e 1091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1092
0c58912e 1093 if (IS_GEN_I(hpriv))
e4e7b892
JG
1094 cfg |= (1 << 8); /* enab config burst size mask */
1095
0c58912e 1096 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1097 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098
1099 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1100 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1102 cfg |= (1 << 18); /* enab early completion */
e728eabe 1103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1104 }
1105
72109168
ML
1106 if (want_ncq) {
1107 cfg |= EDMA_CFG_NCQ;
1108 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1109 } else
1110 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111
e4e7b892
JG
1112 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1113}
1114
da2fa9ba
ML
1115static void mv_port_free_dma_mem(struct ata_port *ap)
1116{
1117 struct mv_host_priv *hpriv = ap->host->private_data;
1118 struct mv_port_priv *pp = ap->private_data;
eb73d558 1119 int tag;
da2fa9ba
ML
1120
1121 if (pp->crqb) {
1122 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 pp->crqb = NULL;
1124 }
1125 if (pp->crpb) {
1126 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 pp->crpb = NULL;
1128 }
eb73d558
ML
1129 /*
1130 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 */
1133 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 if (pp->sg_tbl[tag]) {
1135 if (tag == 0 || !IS_GEN_I(hpriv))
1136 dma_pool_free(hpriv->sg_tbl_pool,
1137 pp->sg_tbl[tag],
1138 pp->sg_tbl_dma[tag]);
1139 pp->sg_tbl[tag] = NULL;
1140 }
da2fa9ba
ML
1141 }
1142}
1143
05b308e1
BR
1144/**
1145 * mv_port_start - Port specific init/start routine.
1146 * @ap: ATA channel to manipulate
1147 *
1148 * Allocate and point to DMA memory, init port private memory,
1149 * zero indices.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
31961943
BR
1154static int mv_port_start(struct ata_port *ap)
1155{
cca3974e
JG
1156 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1160 unsigned long flags;
eb73d558 1161 int tag, rc;
31961943 1162
24dc5f33 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1164 if (!pp)
24dc5f33 1165 return -ENOMEM;
da2fa9ba 1166 ap->private_data = pp;
31961943 1167
6037d6bb
JG
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
24dc5f33 1170 return rc;
6037d6bb 1171
da2fa9ba
ML
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb)
1174 return -ENOMEM;
1175 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1176
da2fa9ba
ML
1177 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1178 if (!pp->crpb)
1179 goto out_port_free_dma_mem;
1180 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1181
eb73d558
ML
1182 /*
1183 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1184 * For later hardware, we need one unique sg_tbl per NCQ tag.
1185 */
1186 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1187 if (tag == 0 || !IS_GEN_I(hpriv)) {
1188 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1189 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1190 if (!pp->sg_tbl[tag])
1191 goto out_port_free_dma_mem;
1192 } else {
1193 pp->sg_tbl[tag] = pp->sg_tbl[0];
1194 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1195 }
1196 }
31961943 1197
0ea9e179
JG
1198 spin_lock_irqsave(&ap->host->lock, flags);
1199
72109168 1200 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1201 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1202
0ea9e179
JG
1203 spin_unlock_irqrestore(&ap->host->lock, flags);
1204
31961943
BR
1205 /* Don't turn on EDMA here...do it before DMA commands only. Else
1206 * we'll be unable to send non-data, PIO, etc due to restricted access
1207 * to shadow regs.
1208 */
31961943 1209 return 0;
da2fa9ba
ML
1210
1211out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1213 return -ENOMEM;
31961943
BR
1214}
1215
05b308e1
BR
1216/**
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1219 *
1220 * Stop DMA, cleanup port memory.
1221 *
1222 * LOCKING:
cca3974e 1223 * This routine uses the host lock to protect the DMA stop.
05b308e1 1224 */
31961943
BR
1225static void mv_port_stop(struct ata_port *ap)
1226{
31961943 1227 mv_stop_dma(ap);
da2fa9ba 1228 mv_port_free_dma_mem(ap);
31961943
BR
1229}
1230
05b308e1
BR
1231/**
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1234 *
1235 * Populate the SG list and mark the last entry.
1236 *
1237 * LOCKING:
1238 * Inherited from caller.
1239 */
6c08772e 1240static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1241{
1242 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1243 struct scatterlist *sg;
3be6cbd7 1244 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1245 unsigned int si;
31961943 1246
eb73d558 1247 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
22374677 1251
4007b493
OJ
1252 while (sg_len) {
1253 u32 offset = addr & 0xffff;
1254 u32 len = sg_len;
22374677 1255
4007b493
OJ
1256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
1258
1259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1262
1263 sg_len -= len;
1264 addr += len;
1265
3be6cbd7 1266 last_sg = mv_sg;
4007b493 1267 mv_sg++;
4007b493 1268 }
31961943 1269 }
3be6cbd7
JG
1270
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1273}
1274
5796d1c4 1275static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1276{
559eedad 1277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1278 (last ? CRQB_CMD_LAST : 0);
559eedad 1279 *cmdw = cpu_to_le16(tmp);
31961943
BR
1280}
1281
05b308e1
BR
1282/**
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1285 *
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1290 *
1291 * LOCKING:
1292 * Inherited from caller.
1293 */
31961943
BR
1294static void mv_qc_prep(struct ata_queued_cmd *qc)
1295{
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
e1469874 1298 __le16 *cw;
31961943
BR
1299 struct ata_taskfile *tf;
1300 u16 flags = 0;
a6432436 1301 unsigned in_index;
31961943 1302
138bfdd0
ML
1303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1305 return;
20f733e7 1306
31961943
BR
1307 /* Fill in command request block
1308 */
e4e7b892 1309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1310 flags |= CRQB_FLAG_READ;
beec7dbc 1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1312 flags |= qc->tag << CRQB_TAG_SHIFT;
1313
bdd4ddde
JG
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1316
1317 pp->crqb[in_index].sg_addr =
eb73d558 1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1319 pp->crqb[in_index].sg_addr_hi =
eb73d558 1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1322
a6432436 1323 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1324 tf = &qc->tf;
1325
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
20f733e7 1331 */
31961943
BR
1332 switch (tf->command) {
1333 case ATA_CMD_READ:
1334 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE:
1336 case ATA_CMD_WRITE_EXT:
c15d85c8 1337 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 break;
31961943
BR
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
8b260248 1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1344 break;
31961943
BR
1345 default:
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1350 *
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1353 */
1354 BUG_ON(tf->command);
1355 break;
1356 }
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366
e4e7b892
JG
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1368 return;
1369 mv_fill_sg(qc);
1370}
1371
1372/**
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1375 *
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1380 *
1381 * LOCKING:
1382 * Inherited from caller.
1383 */
1384static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385{
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
a6432436 1390 unsigned in_index;
e4e7b892
JG
1391 u32 flags = 0;
1392
138bfdd0
ML
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1395 return;
1396
e4e7b892
JG
1397 /* Fill in Gen IIE command request block
1398 */
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1401
beec7dbc 1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1403 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1405
bdd4ddde
JG
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1408
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1412 crqb->flags = cpu_to_le32(flags);
1413
1414 tf = &qc->tf;
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1417 (tf->feature << 24)
1418 );
1419 crqb->ata_cmd[1] = cpu_to_le32(
1420 (tf->lbal << 0) |
1421 (tf->lbam << 8) |
1422 (tf->lbah << 16) |
1423 (tf->device << 24)
1424 );
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1430 );
1431 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->nsect << 0) |
1433 (tf->hob_nsect << 8)
1434 );
1435
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1437 return;
31961943
BR
1438 mv_fill_sg(qc);
1439}
1440
05b308e1
BR
1441/**
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1444 *
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1449 *
1450 * LOCKING:
1451 * Inherited from caller.
1452 */
9a3d9eb0 1453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1454{
c5d3e45a
JG
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1458 u32 in_index;
31961943 1459
138bfdd0
ML
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1462 /* We're about to send a non-EDMA capable command to the
1463 * port. Turn off EDMA so there won't be problems accessing
1464 * shadow block, etc registers.
1465 */
0ea9e179 1466 __mv_stop_dma(ap);
31961943
BR
1467 return ata_qc_issue_prot(qc);
1468 }
1469
72109168 1470 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1471
bdd4ddde 1472 pp->req_idx++;
31961943 1473
bdd4ddde 1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1475
1476 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1479
1480 return 0;
1481}
1482
05b308e1
BR
1483/**
1484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
9b358e30 1486 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1487 *
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1493 *
1494 * LOCKING:
1495 * Inherited from caller.
1496 */
bdd4ddde 1497static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1498{
1499 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
9af5c9c9 1505 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1506
bdd4ddde 1507 ata_ehi_clear_desc(ehi);
20f733e7 1508
bdd4ddde
JG
1509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1512 */
936fd732
TH
1513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1515 }
bdd4ddde
JG
1516
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1520
1521 /*
1522 * all generations share these EDMA error cause bits
1523 */
1524
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
b64bbc39 1532 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1533 }
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1537 "dev disconnect" : "dev connect");
3606a380 1538 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1539 }
1540
ee9ccdf7 1541 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 struct mv_port_priv *pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1547 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1548 }
1549 } else {
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1551
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 struct mv_port_priv *pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1555 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1556 }
1557
1558 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1563 }
afb0edd9 1564 }
20f733e7
BR
1565
1566 /* Clear EDMA now that SERR cleanup done */
3606a380 1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1568
bdd4ddde
JG
1569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586}
1587
1588static void mv_intr_pio(struct ata_port *ap)
1589{
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
9af5c9c9 1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608}
1609
1610static void mv_intr_edma(struct ata_port *ap)
1611{
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
6c1153e0 1625 unsigned int tag;
bdd4ddde
JG
1626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
bdd4ddde 1632 /* 50xx: get active ATA command */
0ea9e179 1633 if (IS_GEN_I(hpriv))
9af5c9c9 1634 tag = ap->link.active_tag;
bdd4ddde 1635
6c1153e0
JG
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
bdd4ddde 1639 */
8c0aeb4a
ML
1640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1642
6c1153e0 1643 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1644
cb924419
ML
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
bdd4ddde
JG
1648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
0ea9e179 1662 /* advance software response queue pointer, to
bdd4ddde
JG
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
1670 if (work_done)
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1674}
1675
05b308e1
BR
1676/**
1677 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1678 * @host: host specific structure
05b308e1
BR
1679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1681 *
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1688 *
1689 * LOCKING:
1690 * Inherited from caller.
1691 */
cca3974e 1692static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1693{
f351b2d6
SB
1694 struct mv_host_priv *hpriv = host->private_data;
1695 void __iomem *mmio = hpriv->base;
20f733e7 1696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1697 u32 hc_irq_cause;
f351b2d6 1698 int port, port0, last_port;
20f733e7 1699
35177265 1700 if (hc == 0)
20f733e7 1701 port0 = 0;
35177265 1702 else
20f733e7 1703 port0 = MV_PORTS_PER_HC;
20f733e7 1704
f351b2d6
SB
1705 if (HAS_PCI(host))
1706 last_port = port0 + MV_PORTS_PER_HC;
1707 else
1708 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1709 /* we'll need the HC success int register in most cases */
1710 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1711 if (!hc_irq_cause)
1712 return;
1713
1714 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1715
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1717 hc, relevant, hc_irq_cause);
20f733e7 1718
8f71efe2 1719 for (port = port0; port < last_port; port++) {
cca3974e 1720 struct ata_port *ap = host->ports[port];
8f71efe2 1721 struct mv_port_priv *pp;
bdd4ddde 1722 int have_err_bits, hard_port, shift;
55d8ca4f 1723
bdd4ddde 1724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1725 continue;
1726
8f71efe2
YL
1727 pp = ap->private_data;
1728
31961943 1729 shift = port << 1; /* (port * 2) */
20f733e7
BR
1730 if (port >= MV_PORTS_PER_HC) {
1731 shift++; /* skip bit 8 in the HC Main IRQ reg */
1732 }
bdd4ddde
JG
1733 have_err_bits = ((PORT0_ERR << shift) & relevant);
1734
1735 if (unlikely(have_err_bits)) {
1736 struct ata_queued_cmd *qc;
8b260248 1737
9af5c9c9 1738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1739 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1740 continue;
1741
1742 mv_err_intr(ap, qc);
1743 continue;
1744 }
1745
1746 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1747
1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1749 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1750 mv_intr_edma(ap);
1751 } else {
1752 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1753 mv_intr_pio(ap);
20f733e7
BR
1754 }
1755 }
1756 VPRINTK("EXIT\n");
1757}
1758
bdd4ddde
JG
1759static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1760{
02a121da 1761 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1762 struct ata_port *ap;
1763 struct ata_queued_cmd *qc;
1764 struct ata_eh_info *ehi;
1765 unsigned int i, err_mask, printed = 0;
1766 u32 err_cause;
1767
02a121da 1768 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1769
1770 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1771 err_cause);
1772
1773 DPRINTK("All regs @ PCI error\n");
1774 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1775
02a121da 1776 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1777
1778 for (i = 0; i < host->n_ports; i++) {
1779 ap = host->ports[i];
936fd732 1780 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1781 ehi = &ap->link.eh_info;
bdd4ddde
JG
1782 ata_ehi_clear_desc(ehi);
1783 if (!printed++)
1784 ata_ehi_push_desc(ehi,
1785 "PCI err cause 0x%08x", err_cause);
1786 err_mask = AC_ERR_HOST_BUS;
1787 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1788 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1789 if (qc)
1790 qc->err_mask |= err_mask;
1791 else
1792 ehi->err_mask |= err_mask;
1793
1794 ata_port_freeze(ap);
1795 }
1796 }
1797}
1798
05b308e1 1799/**
c5d3e45a 1800 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1801 * @irq: unused
1802 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1803 *
1804 * Read the read only register to determine if any host
1805 * controllers have pending interrupts. If so, call lower level
1806 * routine to handle. Also check for PCI errors which are only
1807 * reported here.
1808 *
8b260248 1809 * LOCKING:
cca3974e 1810 * This routine holds the host lock while processing pending
05b308e1
BR
1811 * interrupts.
1812 */
7d12e780 1813static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1814{
cca3974e 1815 struct ata_host *host = dev_instance;
f351b2d6 1816 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1817 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1818 void __iomem *mmio = hpriv->base;
646a4da5 1819 u32 irq_stat, irq_mask;
20f733e7 1820
646a4da5 1821 spin_lock(&host->lock);
f351b2d6
SB
1822
1823 irq_stat = readl(hpriv->main_cause_reg_addr);
1824 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1825
1826 /* check the cases where we either have nothing pending or have read
1827 * a bogus register value which can indicate HW removal or PCI fault
1828 */
646a4da5
ML
1829 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1830 goto out_unlock;
20f733e7 1831
cca3974e 1832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1833
7bb3c529 1834 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1835 mv_pci_error(host, mmio);
1836 handled = 1;
1837 goto out_unlock; /* skip all other HC irq handling */
1838 }
1839
20f733e7
BR
1840 for (hc = 0; hc < n_hcs; hc++) {
1841 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1842 if (relevant) {
cca3974e 1843 mv_host_intr(host, relevant, hc);
bdd4ddde 1844 handled = 1;
20f733e7
BR
1845 }
1846 }
615ab953 1847
bdd4ddde 1848out_unlock:
cca3974e 1849 spin_unlock(&host->lock);
20f733e7
BR
1850
1851 return IRQ_RETVAL(handled);
1852}
1853
c9d39130
JG
1854static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1855{
1856 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1857 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1858
1859 return hc_mmio + ofs;
1860}
1861
1862static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1863{
1864 unsigned int ofs;
1865
1866 switch (sc_reg_in) {
1867 case SCR_STATUS:
1868 case SCR_ERROR:
1869 case SCR_CONTROL:
1870 ofs = sc_reg_in * sizeof(u32);
1871 break;
1872 default:
1873 ofs = 0xffffffffU;
1874 break;
1875 }
1876 return ofs;
1877}
1878
da3dbb17 1879static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1880{
f351b2d6
SB
1881 struct mv_host_priv *hpriv = ap->host->private_data;
1882 void __iomem *mmio = hpriv->base;
0d5ff566 1883 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1884 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1885
da3dbb17
TH
1886 if (ofs != 0xffffffffU) {
1887 *val = readl(addr + ofs);
1888 return 0;
1889 } else
1890 return -EINVAL;
c9d39130
JG
1891}
1892
da3dbb17 1893static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1894{
f351b2d6
SB
1895 struct mv_host_priv *hpriv = ap->host->private_data;
1896 void __iomem *mmio = hpriv->base;
0d5ff566 1897 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1898 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1899
da3dbb17 1900 if (ofs != 0xffffffffU) {
0d5ff566 1901 writelfl(val, addr + ofs);
da3dbb17
TH
1902 return 0;
1903 } else
1904 return -EINVAL;
c9d39130
JG
1905}
1906
7bb3c529 1907static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1908{
7bb3c529 1909 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1910 int early_5080;
1911
44c10138 1912 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1913
1914 if (!early_5080) {
1915 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1916 tmp |= (1 << 0);
1917 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1918 }
1919
7bb3c529 1920 mv_reset_pci_bus(host, mmio);
522479fb
JG
1921}
1922
1923static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1924{
1925 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1926}
1927
47c2b677 1928static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1929 void __iomem *mmio)
1930{
c9d39130
JG
1931 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1932 u32 tmp;
1933
1934 tmp = readl(phy_mmio + MV5_PHY_MODE);
1935
1936 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1937 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1938}
1939
47c2b677 1940static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1941{
522479fb
JG
1942 u32 tmp;
1943
1944 writel(0, mmio + MV_GPIO_PORT_CTL);
1945
1946 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1947
1948 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1949 tmp |= ~(1 << 0);
1950 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1951}
1952
2a47ce06
JG
1953static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1954 unsigned int port)
bca1c4eb 1955{
c9d39130
JG
1956 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1957 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1958 u32 tmp;
1959 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1960
1961 if (fix_apm_sq) {
1962 tmp = readl(phy_mmio + MV5_LT_MODE);
1963 tmp |= (1 << 19);
1964 writel(tmp, phy_mmio + MV5_LT_MODE);
1965
1966 tmp = readl(phy_mmio + MV5_PHY_CTL);
1967 tmp &= ~0x3;
1968 tmp |= 0x1;
1969 writel(tmp, phy_mmio + MV5_PHY_CTL);
1970 }
1971
1972 tmp = readl(phy_mmio + MV5_PHY_MODE);
1973 tmp &= ~mask;
1974 tmp |= hpriv->signal[port].pre;
1975 tmp |= hpriv->signal[port].amps;
1976 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1977}
1978
c9d39130
JG
1979
1980#undef ZERO
1981#define ZERO(reg) writel(0, port_mmio + (reg))
1982static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int port)
1984{
1985 void __iomem *port_mmio = mv_port_base(mmio, port);
1986
1987 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1988
1989 mv_channel_reset(hpriv, mmio, port);
1990
1991 ZERO(0x028); /* command */
1992 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1993 ZERO(0x004); /* timer */
1994 ZERO(0x008); /* irq err cause */
1995 ZERO(0x00c); /* irq err mask */
1996 ZERO(0x010); /* rq bah */
1997 ZERO(0x014); /* rq inp */
1998 ZERO(0x018); /* rq outp */
1999 ZERO(0x01c); /* respq bah */
2000 ZERO(0x024); /* respq outp */
2001 ZERO(0x020); /* respq inp */
2002 ZERO(0x02c); /* test control */
2003 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2004}
2005#undef ZERO
2006
2007#define ZERO(reg) writel(0, hc_mmio + (reg))
2008static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2009 unsigned int hc)
47c2b677 2010{
c9d39130
JG
2011 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2012 u32 tmp;
2013
2014 ZERO(0x00c);
2015 ZERO(0x010);
2016 ZERO(0x014);
2017 ZERO(0x018);
2018
2019 tmp = readl(hc_mmio + 0x20);
2020 tmp &= 0x1c1c1c1c;
2021 tmp |= 0x03030303;
2022 writel(tmp, hc_mmio + 0x20);
2023}
2024#undef ZERO
2025
2026static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 unsigned int n_hc)
2028{
2029 unsigned int hc, port;
2030
2031 for (hc = 0; hc < n_hc; hc++) {
2032 for (port = 0; port < MV_PORTS_PER_HC; port++)
2033 mv5_reset_hc_port(hpriv, mmio,
2034 (hc * MV_PORTS_PER_HC) + port);
2035
2036 mv5_reset_one_hc(hpriv, mmio, hc);
2037 }
2038
2039 return 0;
47c2b677
JG
2040}
2041
101ffae2
JG
2042#undef ZERO
2043#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2044static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2045{
02a121da 2046 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2047 u32 tmp;
2048
2049 tmp = readl(mmio + MV_PCI_MODE);
2050 tmp &= 0xff00ffff;
2051 writel(tmp, mmio + MV_PCI_MODE);
2052
2053 ZERO(MV_PCI_DISC_TIMER);
2054 ZERO(MV_PCI_MSI_TRIGGER);
2055 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2056 ZERO(HC_MAIN_IRQ_MASK_OFS);
2057 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2058 ZERO(hpriv->irq_cause_ofs);
2059 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2060 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2061 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2062 ZERO(MV_PCI_ERR_ATTRIBUTE);
2063 ZERO(MV_PCI_ERR_COMMAND);
2064}
2065#undef ZERO
2066
2067static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2068{
2069 u32 tmp;
2070
2071 mv5_reset_flash(hpriv, mmio);
2072
2073 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2074 tmp &= 0x3;
2075 tmp |= (1 << 5) | (1 << 6);
2076 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2077}
2078
2079/**
2080 * mv6_reset_hc - Perform the 6xxx global soft reset
2081 * @mmio: base address of the HBA
2082 *
2083 * This routine only applies to 6xxx parts.
2084 *
2085 * LOCKING:
2086 * Inherited from caller.
2087 */
c9d39130
JG
2088static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2089 unsigned int n_hc)
101ffae2
JG
2090{
2091 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2092 int i, rc = 0;
2093 u32 t;
2094
2095 /* Following procedure defined in PCI "main command and status
2096 * register" table.
2097 */
2098 t = readl(reg);
2099 writel(t | STOP_PCI_MASTER, reg);
2100
2101 for (i = 0; i < 1000; i++) {
2102 udelay(1);
2103 t = readl(reg);
2dcb407e 2104 if (PCI_MASTER_EMPTY & t)
101ffae2 2105 break;
101ffae2
JG
2106 }
2107 if (!(PCI_MASTER_EMPTY & t)) {
2108 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2109 rc = 1;
2110 goto done;
2111 }
2112
2113 /* set reset */
2114 i = 5;
2115 do {
2116 writel(t | GLOB_SFT_RST, reg);
2117 t = readl(reg);
2118 udelay(1);
2119 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2120
2121 if (!(GLOB_SFT_RST & t)) {
2122 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2123 rc = 1;
2124 goto done;
2125 }
2126
2127 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2128 i = 5;
2129 do {
2130 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2131 t = readl(reg);
2132 udelay(1);
2133 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2134
2135 if (GLOB_SFT_RST & t) {
2136 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2137 rc = 1;
2138 }
2139done:
2140 return rc;
2141}
2142
47c2b677 2143static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2144 void __iomem *mmio)
2145{
2146 void __iomem *port_mmio;
2147 u32 tmp;
2148
ba3fe8fb
JG
2149 tmp = readl(mmio + MV_RESET_CFG);
2150 if ((tmp & (1 << 0)) == 0) {
47c2b677 2151 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2152 hpriv->signal[idx].pre = 0x1 << 5;
2153 return;
2154 }
2155
2156 port_mmio = mv_port_base(mmio, idx);
2157 tmp = readl(port_mmio + PHY_MODE2);
2158
2159 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2160 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2161}
2162
47c2b677 2163static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2164{
47c2b677 2165 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2166}
2167
c9d39130 2168static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2169 unsigned int port)
bca1c4eb 2170{
c9d39130
JG
2171 void __iomem *port_mmio = mv_port_base(mmio, port);
2172
bca1c4eb 2173 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2174 int fix_phy_mode2 =
2175 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2176 int fix_phy_mode4 =
47c2b677
JG
2177 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2178 u32 m2, tmp;
2179
2180 if (fix_phy_mode2) {
2181 m2 = readl(port_mmio + PHY_MODE2);
2182 m2 &= ~(1 << 16);
2183 m2 |= (1 << 31);
2184 writel(m2, port_mmio + PHY_MODE2);
2185
2186 udelay(200);
2187
2188 m2 = readl(port_mmio + PHY_MODE2);
2189 m2 &= ~((1 << 16) | (1 << 31));
2190 writel(m2, port_mmio + PHY_MODE2);
2191
2192 udelay(200);
2193 }
2194
2195 /* who knows what this magic does */
2196 tmp = readl(port_mmio + PHY_MODE3);
2197 tmp &= ~0x7F800000;
2198 tmp |= 0x2A800000;
2199 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2200
2201 if (fix_phy_mode4) {
47c2b677 2202 u32 m4;
bca1c4eb
JG
2203
2204 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2205
2206 if (hp_flags & MV_HP_ERRATA_60X1B2)
2207 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2208
2209 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2210
2211 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2212
2213 if (hp_flags & MV_HP_ERRATA_60X1B2)
2214 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2215 }
2216
2217 /* Revert values of pre-emphasis and signal amps to the saved ones */
2218 m2 = readl(port_mmio + PHY_MODE2);
2219
2220 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2221 m2 |= hpriv->signal[port].amps;
2222 m2 |= hpriv->signal[port].pre;
47c2b677 2223 m2 &= ~(1 << 16);
bca1c4eb 2224
e4e7b892
JG
2225 /* according to mvSata 3.6.1, some IIE values are fixed */
2226 if (IS_GEN_IIE(hpriv)) {
2227 m2 &= ~0xC30FF01F;
2228 m2 |= 0x0000900F;
2229 }
2230
bca1c4eb
JG
2231 writel(m2, port_mmio + PHY_MODE2);
2232}
2233
f351b2d6
SB
2234/* TODO: use the generic LED interface to configure the SATA Presence */
2235/* & Acitivy LEDs on the board */
2236static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2237 void __iomem *mmio)
2238{
2239 return;
2240}
2241
2242static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2243 void __iomem *mmio)
2244{
2245 void __iomem *port_mmio;
2246 u32 tmp;
2247
2248 port_mmio = mv_port_base(mmio, idx);
2249 tmp = readl(port_mmio + PHY_MODE2);
2250
2251 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2252 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2253}
2254
2255#undef ZERO
2256#define ZERO(reg) writel(0, port_mmio + (reg))
2257static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2258 void __iomem *mmio, unsigned int port)
2259{
2260 void __iomem *port_mmio = mv_port_base(mmio, port);
2261
2262 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2263
2264 mv_channel_reset(hpriv, mmio, port);
2265
2266 ZERO(0x028); /* command */
2267 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2268 ZERO(0x004); /* timer */
2269 ZERO(0x008); /* irq err cause */
2270 ZERO(0x00c); /* irq err mask */
2271 ZERO(0x010); /* rq bah */
2272 ZERO(0x014); /* rq inp */
2273 ZERO(0x018); /* rq outp */
2274 ZERO(0x01c); /* respq bah */
2275 ZERO(0x024); /* respq outp */
2276 ZERO(0x020); /* respq inp */
2277 ZERO(0x02c); /* test control */
2278 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2279}
2280
2281#undef ZERO
2282
2283#define ZERO(reg) writel(0, hc_mmio + (reg))
2284static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio)
2286{
2287 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2288
2289 ZERO(0x00c);
2290 ZERO(0x010);
2291 ZERO(0x014);
2292
2293}
2294
2295#undef ZERO
2296
2297static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2298 void __iomem *mmio, unsigned int n_hc)
2299{
2300 unsigned int port;
2301
2302 for (port = 0; port < hpriv->n_ports; port++)
2303 mv_soc_reset_hc_port(hpriv, mmio, port);
2304
2305 mv_soc_reset_one_hc(hpriv, mmio);
2306
2307 return 0;
2308}
2309
2310static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2311 void __iomem *mmio)
2312{
2313 return;
2314}
2315
2316static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2317{
2318 return;
2319}
2320
c9d39130
JG
2321static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2322 unsigned int port_no)
2323{
2324 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2325
2326 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2327
ee9ccdf7 2328 if (IS_GEN_II(hpriv)) {
c9d39130 2329 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2330 ifctl |= (1 << 7); /* enable gen2i speed */
2331 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2332 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 }
2334
2335 udelay(25); /* allow reset propagation */
2336
2337 /* Spec never mentions clearing the bit. Marvell's driver does
2338 * clear the bit, however.
2339 */
2340 writelfl(0, port_mmio + EDMA_CMD_OFS);
2341
2342 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2343
ee9ccdf7 2344 if (IS_GEN_I(hpriv))
c9d39130
JG
2345 mdelay(1);
2346}
2347
05b308e1 2348/**
bdd4ddde 2349 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2350 * @ap: ATA channel to manipulate
2351 *
2352 * Part of this is taken from __sata_phy_reset and modified to
2353 * not sleep since this routine gets called from interrupt level.
2354 *
2355 * LOCKING:
2356 * Inherited from caller. This is coded to safe to call at
2357 * interrupt level, i.e. it does not sleep.
31961943 2358 */
bdd4ddde
JG
2359static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2360 unsigned long deadline)
20f733e7 2361{
095fec88 2362 struct mv_port_priv *pp = ap->private_data;
cca3974e 2363 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2364 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2365 int retry = 5;
2366 u32 sstatus;
20f733e7
BR
2367
2368 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2369
da3dbb17
TH
2370#ifdef DEBUG
2371 {
2372 u32 sstatus, serror, scontrol;
2373
2374 mv_scr_read(ap, SCR_STATUS, &sstatus);
2375 mv_scr_read(ap, SCR_ERROR, &serror);
2376 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2377 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2378 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2379 }
2380#endif
20f733e7 2381
22374677
JG
2382 /* Issue COMRESET via SControl */
2383comreset_retry:
936fd732 2384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2385 msleep(1);
22374677 2386
936fd732 2387 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2388 msleep(20);
22374677 2389
31961943 2390 do {
936fd732 2391 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2392 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2393 break;
22374677 2394
bdd4ddde 2395 msleep(1);
c5d3e45a 2396 } while (time_before(jiffies, deadline));
20f733e7 2397
22374677 2398 /* work around errata */
ee9ccdf7 2399 if (IS_GEN_II(hpriv) &&
22374677
JG
2400 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2401 (retry-- > 0))
2402 goto comreset_retry;
095fec88 2403
da3dbb17
TH
2404#ifdef DEBUG
2405 {
2406 u32 sstatus, serror, scontrol;
2407
2408 mv_scr_read(ap, SCR_STATUS, &sstatus);
2409 mv_scr_read(ap, SCR_ERROR, &serror);
2410 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2411 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2412 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2413 }
2414#endif
31961943 2415
936fd732 2416 if (ata_link_offline(&ap->link)) {
bdd4ddde 2417 *class = ATA_DEV_NONE;
20f733e7
BR
2418 return;
2419 }
2420
22374677
JG
2421 /* even after SStatus reflects that device is ready,
2422 * it seems to take a while for link to be fully
2423 * established (and thus Status no longer 0x80/0x7F),
2424 * so we poll a bit for that, here.
2425 */
2426 retry = 20;
2427 while (1) {
2428 u8 drv_stat = ata_check_status(ap);
2429 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2430 break;
bdd4ddde 2431 msleep(500);
22374677
JG
2432 if (retry-- <= 0)
2433 break;
bdd4ddde
JG
2434 if (time_after(jiffies, deadline))
2435 break;
22374677
JG
2436 }
2437
bdd4ddde
JG
2438 /* FIXME: if we passed the deadline, the following
2439 * code probably produces an invalid result
2440 */
20f733e7 2441
bdd4ddde 2442 /* finally, read device signature from TF registers */
3f19859e 2443 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2444
2445 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2446
bdd4ddde 2447 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2448
bca1c4eb 2449 VPRINTK("EXIT\n");
20f733e7
BR
2450}
2451
cc0680a5 2452static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2453{
cc0680a5 2454 struct ata_port *ap = link->ap;
bdd4ddde 2455 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2456 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2457 int rc;
0ea9e179 2458
bdd4ddde
JG
2459 rc = mv_stop_dma(ap);
2460 if (rc)
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462
2463 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2464 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2465 ehc->i.action |= ATA_EH_HARDRESET;
2466 }
2467
2468 /* if we're about to do hardreset, nothing more to do */
2469 if (ehc->i.action & ATA_EH_HARDRESET)
2470 return 0;
2471
cc0680a5 2472 if (ata_link_online(link))
bdd4ddde
JG
2473 rc = ata_wait_ready(ap, deadline);
2474 else
2475 rc = -ENODEV;
2476
2477 return rc;
22374677
JG
2478}
2479
cc0680a5 2480static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2481 unsigned long deadline)
31961943 2482{
cc0680a5 2483 struct ata_port *ap = link->ap;
bdd4ddde 2484 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2485 void __iomem *mmio = hpriv->base;
31961943 2486
bdd4ddde 2487 mv_stop_dma(ap);
31961943 2488
bdd4ddde 2489 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2490
bdd4ddde
JG
2491 mv_phy_reset(ap, class, deadline);
2492
2493 return 0;
2494}
2495
cc0680a5 2496static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2497{
cc0680a5 2498 struct ata_port *ap = link->ap;
bdd4ddde
JG
2499 u32 serr;
2500
2501 /* print link status */
cc0680a5 2502 sata_print_link_status(link);
31961943 2503
bdd4ddde 2504 /* clear SError */
cc0680a5
TH
2505 sata_scr_read(link, SCR_ERROR, &serr);
2506 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2507
2508 /* bail out if no device is present */
2509 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2510 DPRINTK("EXIT, no device\n");
2511 return;
9b358e30 2512 }
bdd4ddde
JG
2513
2514 /* set up device control */
2515 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2516}
2517
2518static void mv_error_handler(struct ata_port *ap)
2519{
2520 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2521 mv_hardreset, mv_postreset);
2522}
2523
bdd4ddde
JG
2524static void mv_eh_freeze(struct ata_port *ap)
2525{
f351b2d6 2526 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2527 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2528 u32 tmp, mask;
2529 unsigned int shift;
2530
2531 /* FIXME: handle coalescing completion events properly */
2532
2533 shift = ap->port_no * 2;
2534 if (hc > 0)
2535 shift++;
2536
2537 mask = 0x3 << shift;
2538
2539 /* disable assertion of portN err, done events */
f351b2d6
SB
2540 tmp = readl(hpriv->main_mask_reg_addr);
2541 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2542}
2543
2544static void mv_eh_thaw(struct ata_port *ap)
2545{
f351b2d6
SB
2546 struct mv_host_priv *hpriv = ap->host->private_data;
2547 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2548 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2549 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2550 void __iomem *port_mmio = mv_ap_base(ap);
2551 u32 tmp, mask, hc_irq_cause;
2552 unsigned int shift, hc_port_no = ap->port_no;
2553
2554 /* FIXME: handle coalescing completion events properly */
2555
2556 shift = ap->port_no * 2;
2557 if (hc > 0) {
2558 shift++;
2559 hc_port_no -= 4;
2560 }
2561
2562 mask = 0x3 << shift;
2563
2564 /* clear EDMA errors on this port */
2565 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2566
2567 /* clear pending irq events */
2568 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2569 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2570 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2571 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2572
2573 /* enable assertion of portN err, done events */
f351b2d6
SB
2574 tmp = readl(hpriv->main_mask_reg_addr);
2575 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2576}
2577
05b308e1
BR
2578/**
2579 * mv_port_init - Perform some early initialization on a single port.
2580 * @port: libata data structure storing shadow register addresses
2581 * @port_mmio: base address of the port
2582 *
2583 * Initialize shadow register mmio addresses, clear outstanding
2584 * interrupts on the port, and unmask interrupts for the future
2585 * start of the port.
2586 *
2587 * LOCKING:
2588 * Inherited from caller.
2589 */
31961943 2590static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2591{
0d5ff566 2592 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2593 unsigned serr_ofs;
2594
8b260248 2595 /* PIO related setup
31961943
BR
2596 */
2597 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2598 port->error_addr =
31961943
BR
2599 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2600 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2601 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2602 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2603 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2604 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2605 port->status_addr =
31961943
BR
2606 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2607 /* special case: control/altstatus doesn't have ATA_REG_ address */
2608 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2609
2610 /* unused: */
8d9db2d2 2611 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2612
31961943
BR
2613 /* Clear any currently outstanding port interrupt conditions */
2614 serr_ofs = mv_scr_offset(SCR_ERROR);
2615 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2616 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2617
646a4da5
ML
2618 /* unmask all non-transient EDMA error interrupts */
2619 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2620
8b260248 2621 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2622 readl(port_mmio + EDMA_CFG_OFS),
2623 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2624 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2625}
2626
4447d351 2627static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2628{
4447d351
TH
2629 struct pci_dev *pdev = to_pci_dev(host->dev);
2630 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2631 u32 hp_flags = hpriv->hp_flags;
2632
5796d1c4 2633 switch (board_idx) {
47c2b677
JG
2634 case chip_5080:
2635 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2636 hp_flags |= MV_HP_GEN_I;
47c2b677 2637
44c10138 2638 switch (pdev->revision) {
47c2b677
JG
2639 case 0x1:
2640 hp_flags |= MV_HP_ERRATA_50XXB0;
2641 break;
2642 case 0x3:
2643 hp_flags |= MV_HP_ERRATA_50XXB2;
2644 break;
2645 default:
2646 dev_printk(KERN_WARNING, &pdev->dev,
2647 "Applying 50XXB2 workarounds to unknown rev\n");
2648 hp_flags |= MV_HP_ERRATA_50XXB2;
2649 break;
2650 }
2651 break;
2652
bca1c4eb
JG
2653 case chip_504x:
2654 case chip_508x:
47c2b677 2655 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2656 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2657
44c10138 2658 switch (pdev->revision) {
47c2b677
JG
2659 case 0x0:
2660 hp_flags |= MV_HP_ERRATA_50XXB0;
2661 break;
2662 case 0x3:
2663 hp_flags |= MV_HP_ERRATA_50XXB2;
2664 break;
2665 default:
2666 dev_printk(KERN_WARNING, &pdev->dev,
2667 "Applying B2 workarounds to unknown rev\n");
2668 hp_flags |= MV_HP_ERRATA_50XXB2;
2669 break;
bca1c4eb
JG
2670 }
2671 break;
2672
2673 case chip_604x:
2674 case chip_608x:
47c2b677 2675 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2676 hp_flags |= MV_HP_GEN_II;
47c2b677 2677
44c10138 2678 switch (pdev->revision) {
47c2b677
JG
2679 case 0x7:
2680 hp_flags |= MV_HP_ERRATA_60X1B2;
2681 break;
2682 case 0x9:
2683 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2684 break;
2685 default:
2686 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2687 "Applying B2 workarounds to unknown rev\n");
2688 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2689 break;
2690 }
2691 break;
2692
e4e7b892 2693 case chip_7042:
02a121da 2694 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2695 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2696 (pdev->device == 0x2300 || pdev->device == 0x2310))
2697 {
4e520033
ML
2698 /*
2699 * Highpoint RocketRAID PCIe 23xx series cards:
2700 *
2701 * Unconfigured drives are treated as "Legacy"
2702 * by the BIOS, and it overwrites sector 8 with
2703 * a "Lgcy" metadata block prior to Linux boot.
2704 *
2705 * Configured drives (RAID or JBOD) leave sector 8
2706 * alone, but instead overwrite a high numbered
2707 * sector for the RAID metadata. This sector can
2708 * be determined exactly, by truncating the physical
2709 * drive capacity to a nice even GB value.
2710 *
2711 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2712 *
2713 * Warn the user, lest they think we're just buggy.
2714 */
2715 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2716 " BIOS CORRUPTS DATA on all attached drives,"
2717 " regardless of if/how they are configured."
2718 " BEWARE!\n");
2719 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2720 " use sectors 8-9 on \"Legacy\" drives,"
2721 " and avoid the final two gigabytes on"
2722 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2723 }
e4e7b892
JG
2724 case chip_6042:
2725 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2726 hp_flags |= MV_HP_GEN_IIE;
2727
44c10138 2728 switch (pdev->revision) {
e4e7b892
JG
2729 case 0x0:
2730 hp_flags |= MV_HP_ERRATA_XX42A0;
2731 break;
2732 case 0x1:
2733 hp_flags |= MV_HP_ERRATA_60X1C0;
2734 break;
2735 default:
2736 dev_printk(KERN_WARNING, &pdev->dev,
2737 "Applying 60X1C0 workarounds to unknown rev\n");
2738 hp_flags |= MV_HP_ERRATA_60X1C0;
2739 break;
2740 }
2741 break;
f351b2d6
SB
2742 case chip_soc:
2743 hpriv->ops = &mv_soc_ops;
2744 hp_flags |= MV_HP_ERRATA_60X1C0;
2745 break;
e4e7b892 2746
bca1c4eb 2747 default:
f351b2d6 2748 dev_printk(KERN_ERR, host->dev,
5796d1c4 2749 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2750 return 1;
2751 }
2752
2753 hpriv->hp_flags = hp_flags;
02a121da
ML
2754 if (hp_flags & MV_HP_PCIE) {
2755 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2756 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2757 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2758 } else {
2759 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2760 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2761 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2762 }
bca1c4eb
JG
2763
2764 return 0;
2765}
2766
05b308e1 2767/**
47c2b677 2768 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2769 * @host: ATA host to initialize
2770 * @board_idx: controller index
05b308e1
BR
2771 *
2772 * If possible, do an early global reset of the host. Then do
2773 * our port init and clear/unmask all/relevant host interrupts.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 */
4447d351 2778static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2779{
2780 int rc = 0, n_hc, port, hc;
4447d351 2781 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2782 void __iomem *mmio = hpriv->base;
47c2b677 2783
4447d351 2784 rc = mv_chip_id(host, board_idx);
bca1c4eb 2785 if (rc)
f351b2d6
SB
2786 goto done;
2787
2788 if (HAS_PCI(host)) {
2789 hpriv->main_cause_reg_addr = hpriv->base +
2790 HC_MAIN_IRQ_CAUSE_OFS;
2791 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2792 } else {
2793 hpriv->main_cause_reg_addr = hpriv->base +
2794 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2795 hpriv->main_mask_reg_addr = hpriv->base +
2796 HC_SOC_MAIN_IRQ_MASK_OFS;
2797 }
2798 /* global interrupt mask */
2799 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2800
4447d351 2801 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2802
4447d351 2803 for (port = 0; port < host->n_ports; port++)
47c2b677 2804 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2805
c9d39130 2806 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2807 if (rc)
20f733e7 2808 goto done;
20f733e7 2809
522479fb 2810 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2811 hpriv->ops->reset_bus(host, mmio);
47c2b677 2812 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2813
4447d351 2814 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2815 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2816 void __iomem *port_mmio = mv_port_base(mmio, port);
2817
2a47ce06 2818 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2819 ifctl |= (1 << 7); /* enable gen2i speed */
2820 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2821 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2822 }
2823
c9d39130 2824 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2825 }
2826
4447d351 2827 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2828 struct ata_port *ap = host->ports[port];
2a47ce06 2829 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2830
2831 mv_port_init(&ap->ioaddr, port_mmio);
2832
7bb3c529 2833#ifdef CONFIG_PCI
f351b2d6
SB
2834 if (HAS_PCI(host)) {
2835 unsigned int offset = port_mmio - mmio;
2836 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2837 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2838 }
7bb3c529 2839#endif
20f733e7
BR
2840 }
2841
2842 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2843 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2844
2845 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2846 "(before clear)=0x%08x\n", hc,
2847 readl(hc_mmio + HC_CFG_OFS),
2848 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2849
2850 /* Clear any currently outstanding hc interrupt conditions */
2851 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2852 }
2853
f351b2d6
SB
2854 if (HAS_PCI(host)) {
2855 /* Clear any currently outstanding host interrupt conditions */
2856 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2857
f351b2d6
SB
2858 /* and unmask interrupt generation for host regs */
2859 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2860 if (IS_GEN_I(hpriv))
2861 writelfl(~HC_MAIN_MASKED_IRQS_5,
2862 hpriv->main_mask_reg_addr);
2863 else
2864 writelfl(~HC_MAIN_MASKED_IRQS,
2865 hpriv->main_mask_reg_addr);
2866
2867 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2868 "PCI int cause/mask=0x%08x/0x%08x\n",
2869 readl(hpriv->main_cause_reg_addr),
2870 readl(hpriv->main_mask_reg_addr),
2871 readl(mmio + hpriv->irq_cause_ofs),
2872 readl(mmio + hpriv->irq_mask_ofs));
2873 } else {
2874 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2875 hpriv->main_mask_reg_addr);
2876 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2877 readl(hpriv->main_cause_reg_addr),
2878 readl(hpriv->main_mask_reg_addr));
2879 }
2880done:
2881 return rc;
2882}
fb621e2f 2883
f351b2d6
SB
2884/**
2885 * mv_platform_probe - handle a positive probe of an soc Marvell
2886 * host
2887 * @pdev: platform device found
2888 *
2889 * LOCKING:
2890 * Inherited from caller.
2891 */
2892static int mv_platform_probe(struct platform_device *pdev)
2893{
2894 static int printed_version;
2895 const struct mv_sata_platform_data *mv_platform_data;
2896 const struct ata_port_info *ppi[] =
2897 { &mv_port_info[chip_soc], NULL };
2898 struct ata_host *host;
2899 struct mv_host_priv *hpriv;
2900 struct resource *res;
2901 int n_ports, rc;
20f733e7 2902
f351b2d6
SB
2903 if (!printed_version++)
2904 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2905
f351b2d6
SB
2906 /*
2907 * Simple resource validation ..
2908 */
2909 if (unlikely(pdev->num_resources != 2)) {
2910 dev_err(&pdev->dev, "invalid number of resources\n");
2911 return -EINVAL;
2912 }
2913
2914 /*
2915 * Get the register base first
2916 */
2917 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2918 if (res == NULL)
2919 return -EINVAL;
2920
2921 /* allocate host */
2922 mv_platform_data = pdev->dev.platform_data;
2923 n_ports = mv_platform_data->n_ports;
2924
2925 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2926 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2927
2928 if (!host || !hpriv)
2929 return -ENOMEM;
2930 host->private_data = hpriv;
2931 hpriv->n_ports = n_ports;
2932
2933 host->iomap = NULL;
2934 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2935 hpriv->base -= MV_SATAHC0_REG_BASE;
2936
2937 /* initialize adapter */
2938 rc = mv_init_host(host, chip_soc);
2939 if (rc)
2940 return rc;
2941
2942 dev_printk(KERN_INFO, &pdev->dev,
2943 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2944 host->n_ports);
2945
2946 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2947 IRQF_SHARED, &mv6_sht);
2948}
2949
2950/*
2951 *
2952 * mv_platform_remove - unplug a platform interface
2953 * @pdev: platform device
2954 *
2955 * A platform bus SATA device has been unplugged. Perform the needed
2956 * cleanup. Also called on module unload for any active devices.
2957 */
2958static int __devexit mv_platform_remove(struct platform_device *pdev)
2959{
2960 struct device *dev = &pdev->dev;
2961 struct ata_host *host = dev_get_drvdata(dev);
2962 struct mv_host_priv *hpriv = host->private_data;
2963 void __iomem *base = hpriv->base;
2964
2965 ata_host_detach(host);
2966 iounmap(base);
2967 return 0;
20f733e7
BR
2968}
2969
f351b2d6
SB
2970static struct platform_driver mv_platform_driver = {
2971 .probe = mv_platform_probe,
2972 .remove = __devexit_p(mv_platform_remove),
2973 .driver = {
2974 .name = DRV_NAME,
2975 .owner = THIS_MODULE,
2976 },
2977};
2978
2979
7bb3c529 2980#ifdef CONFIG_PCI
f351b2d6
SB
2981static int mv_pci_init_one(struct pci_dev *pdev,
2982 const struct pci_device_id *ent);
2983
7bb3c529
SB
2984
2985static struct pci_driver mv_pci_driver = {
2986 .name = DRV_NAME,
2987 .id_table = mv_pci_tbl,
f351b2d6 2988 .probe = mv_pci_init_one,
7bb3c529
SB
2989 .remove = ata_pci_remove_one,
2990};
2991
2992/*
2993 * module options
2994 */
2995static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2996
2997
2998/* move to PCI layer or libata core? */
2999static int pci_go_64(struct pci_dev *pdev)
3000{
3001 int rc;
3002
3003 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3004 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3005 if (rc) {
3006 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3007 if (rc) {
3008 dev_printk(KERN_ERR, &pdev->dev,
3009 "64-bit DMA enable failed\n");
3010 return rc;
3011 }
3012 }
3013 } else {
3014 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3015 if (rc) {
3016 dev_printk(KERN_ERR, &pdev->dev,
3017 "32-bit DMA enable failed\n");
3018 return rc;
3019 }
3020 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3021 if (rc) {
3022 dev_printk(KERN_ERR, &pdev->dev,
3023 "32-bit consistent DMA enable failed\n");
3024 return rc;
3025 }
3026 }
3027
3028 return rc;
3029}
3030
05b308e1
BR
3031/**
3032 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 3033 * @host: ATA host to print info about
05b308e1
BR
3034 *
3035 * FIXME: complete this.
3036 *
3037 * LOCKING:
3038 * Inherited from caller.
3039 */
4447d351 3040static void mv_print_info(struct ata_host *host)
31961943 3041{
4447d351
TH
3042 struct pci_dev *pdev = to_pci_dev(host->dev);
3043 struct mv_host_priv *hpriv = host->private_data;
44c10138 3044 u8 scc;
c1e4fe71 3045 const char *scc_s, *gen;
31961943
BR
3046
3047 /* Use this to determine the HW stepping of the chip so we know
3048 * what errata to workaround
3049 */
31961943
BR
3050 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3051 if (scc == 0)
3052 scc_s = "SCSI";
3053 else if (scc == 0x01)
3054 scc_s = "RAID";
3055 else
c1e4fe71
JG
3056 scc_s = "?";
3057
3058 if (IS_GEN_I(hpriv))
3059 gen = "I";
3060 else if (IS_GEN_II(hpriv))
3061 gen = "II";
3062 else if (IS_GEN_IIE(hpriv))
3063 gen = "IIE";
3064 else
3065 gen = "?";
31961943 3066
a9524a76 3067 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3068 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3069 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3070 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3071}
3072
da2fa9ba
ML
3073static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3074{
3075 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3076 MV_CRQB_Q_SZ, 0);
3077 if (!hpriv->crqb_pool)
3078 return -ENOMEM;
3079
3080 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3081 MV_CRPB_Q_SZ, 0);
3082 if (!hpriv->crpb_pool)
3083 return -ENOMEM;
3084
3085 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3086 MV_SG_TBL_SZ, 0);
3087 if (!hpriv->sg_tbl_pool)
3088 return -ENOMEM;
3089
3090 return 0;
3091}
3092
05b308e1 3093/**
f351b2d6 3094 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3095 * @pdev: PCI device found
3096 * @ent: PCI device ID entry for the matched host
3097 *
3098 * LOCKING:
3099 * Inherited from caller.
3100 */
f351b2d6
SB
3101static int mv_pci_init_one(struct pci_dev *pdev,
3102 const struct pci_device_id *ent)
20f733e7 3103{
2dcb407e 3104 static int printed_version;
20f733e7 3105 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3106 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3107 struct ata_host *host;
3108 struct mv_host_priv *hpriv;
3109 int n_ports, rc;
20f733e7 3110
a9524a76
JG
3111 if (!printed_version++)
3112 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3113
4447d351
TH
3114 /* allocate host */
3115 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3116
3117 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3118 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3119 if (!host || !hpriv)
3120 return -ENOMEM;
3121 host->private_data = hpriv;
f351b2d6 3122 hpriv->n_ports = n_ports;
4447d351
TH
3123
3124 /* acquire resources */
24dc5f33
TH
3125 rc = pcim_enable_device(pdev);
3126 if (rc)
20f733e7 3127 return rc;
20f733e7 3128
0d5ff566
TH
3129 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3130 if (rc == -EBUSY)
24dc5f33 3131 pcim_pin_device(pdev);
0d5ff566 3132 if (rc)
24dc5f33 3133 return rc;
4447d351 3134 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3135 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3136
d88184fb
JG
3137 rc = pci_go_64(pdev);
3138 if (rc)
3139 return rc;
3140
da2fa9ba
ML
3141 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3142 if (rc)
3143 return rc;
3144
20f733e7 3145 /* initialize adapter */
4447d351 3146 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3147 if (rc)
3148 return rc;
20f733e7 3149
31961943 3150 /* Enable interrupts */
6a59dcf8 3151 if (msi && pci_enable_msi(pdev))
31961943 3152 pci_intx(pdev, 1);
20f733e7 3153
31961943 3154 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3155 mv_print_info(host);
20f733e7 3156
4447d351 3157 pci_set_master(pdev);
ea8b4db9 3158 pci_try_set_mwi(pdev);
4447d351 3159 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3160 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3161}
7bb3c529 3162#endif
20f733e7 3163
f351b2d6
SB
3164static int mv_platform_probe(struct platform_device *pdev);
3165static int __devexit mv_platform_remove(struct platform_device *pdev);
3166
20f733e7
BR
3167static int __init mv_init(void)
3168{
7bb3c529
SB
3169 int rc = -ENODEV;
3170#ifdef CONFIG_PCI
3171 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3172 if (rc < 0)
3173 return rc;
3174#endif
3175 rc = platform_driver_register(&mv_platform_driver);
3176
3177#ifdef CONFIG_PCI
3178 if (rc < 0)
3179 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3180#endif
3181 return rc;
20f733e7
BR
3182}
3183
3184static void __exit mv_exit(void)
3185{
7bb3c529 3186#ifdef CONFIG_PCI
20f733e7 3187 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3188#endif
f351b2d6 3189 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3190}
3191
3192MODULE_AUTHOR("Brett Russ");
3193MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3194MODULE_LICENSE("GPL");
3195MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3196MODULE_VERSION(DRV_VERSION);
3197
7bb3c529 3198#ifdef CONFIG_PCI
ddef9bb3
JG
3199module_param(msi, int, 0444);
3200MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3201#endif
ddef9bb3 3202
20f733e7
BR
3203module_init(mv_init);
3204module_exit(mv_exit);