libata: normalize port_info, port_operations and sht tables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
da3dbb17
TH
464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde 473static void mv_error_handler(struct ata_port *ap);
bdd4ddde
JG
474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
f273827e 476static void mv6_dev_config(struct ata_device *dev);
20f733e7 477
2a47ce06
JG
478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 487
2a47ce06
JG
488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
47c2b677
JG
490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
c9d39130
JG
493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
522479fb 495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
72109168
ML
508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
47c2b677 511
eb73d558
ML
512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
c5d3e45a
JG
516static struct scsi_host_template mv5_sht = {
517 .module = THIS_MODULE,
518 .name = DRV_NAME,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
baf14aa1 523 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
526 .use_clustering = 1,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 529 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
532};
533
534static struct scsi_host_template mv6_sht = {
20f733e7
BR
535 .module = THIS_MODULE,
536 .name = DRV_NAME,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
138bfdd0
ML
539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
20f733e7 541 .this_id = ATA_SHT_THIS_ID,
baf14aa1 542 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
d88184fb 545 .use_clustering = 1,
20f733e7
BR
546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 548 .slave_configure = ata_scsi_slave_config,
ccf68c34 549 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 550 .bios_param = ata_std_bios_param,
20f733e7
BR
551};
552
c9d39130 553static const struct ata_port_operations mv5_ops = {
c9d39130
JG
554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
559
c9d39130
JG
560 .qc_prep = mv_qc_prep,
561 .qc_issue = mv_qc_issue,
0d5ff566 562 .data_xfer = ata_data_xfer,
c9d39130 563
358f9a77 564 .irq_clear = ata_noop_irq_clear,
246ce3b6 565 .irq_on = ata_irq_on,
c9d39130 566
bdd4ddde 567 .error_handler = mv_error_handler,
bdd4ddde
JG
568 .freeze = mv_eh_freeze,
569 .thaw = mv_eh_thaw,
570
c9d39130
JG
571 .scr_read = mv5_scr_read,
572 .scr_write = mv5_scr_write,
573
574 .port_start = mv_port_start,
575 .port_stop = mv_port_stop,
c9d39130
JG
576};
577
578static const struct ata_port_operations mv6_ops = {
f273827e 579 .dev_config = mv6_dev_config,
20f733e7
BR
580 .tf_load = ata_tf_load,
581 .tf_read = ata_tf_read,
582 .check_status = ata_check_status,
583 .exec_command = ata_exec_command,
584 .dev_select = ata_std_dev_select,
585
31961943
BR
586 .qc_prep = mv_qc_prep,
587 .qc_issue = mv_qc_issue,
0d5ff566 588 .data_xfer = ata_data_xfer,
20f733e7 589
358f9a77 590 .irq_clear = ata_noop_irq_clear,
246ce3b6 591 .irq_on = ata_irq_on,
20f733e7 592
bdd4ddde 593 .error_handler = mv_error_handler,
bdd4ddde
JG
594 .freeze = mv_eh_freeze,
595 .thaw = mv_eh_thaw,
138bfdd0 596 .qc_defer = ata_std_qc_defer,
bdd4ddde 597
20f733e7
BR
598 .scr_read = mv_scr_read,
599 .scr_write = mv_scr_write,
600
31961943
BR
601 .port_start = mv_port_start,
602 .port_stop = mv_port_stop,
20f733e7
BR
603};
604
e4e7b892 605static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
606 .tf_load = ata_tf_load,
607 .tf_read = ata_tf_read,
608 .check_status = ata_check_status,
609 .exec_command = ata_exec_command,
610 .dev_select = ata_std_dev_select,
611
e4e7b892
JG
612 .qc_prep = mv_qc_prep_iie,
613 .qc_issue = mv_qc_issue,
0d5ff566 614 .data_xfer = ata_data_xfer,
e4e7b892 615
358f9a77 616 .irq_clear = ata_noop_irq_clear,
246ce3b6 617 .irq_on = ata_irq_on,
e4e7b892 618
bdd4ddde 619 .error_handler = mv_error_handler,
bdd4ddde
JG
620 .freeze = mv_eh_freeze,
621 .thaw = mv_eh_thaw,
138bfdd0 622 .qc_defer = ata_std_qc_defer,
bdd4ddde 623
e4e7b892
JG
624 .scr_read = mv_scr_read,
625 .scr_write = mv_scr_write,
626
627 .port_start = mv_port_start,
628 .port_stop = mv_port_stop,
e4e7b892
JG
629};
630
98ac62de 631static const struct ata_port_info mv_port_info[] = {
20f733e7 632 { /* chip_504x */
cca3974e 633 .flags = MV_COMMON_FLAGS,
31961943 634 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 635 .udma_mask = ATA_UDMA6,
c9d39130 636 .port_ops = &mv5_ops,
20f733e7
BR
637 },
638 { /* chip_508x */
c5d3e45a 639 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 640 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 641 .udma_mask = ATA_UDMA6,
c9d39130 642 .port_ops = &mv5_ops,
20f733e7 643 },
47c2b677 644 { /* chip_5080 */
c5d3e45a 645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 646 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 647 .udma_mask = ATA_UDMA6,
c9d39130 648 .port_ops = &mv5_ops,
47c2b677 649 },
20f733e7 650 { /* chip_604x */
138bfdd0
ML
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_NCQ,
31961943 653 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 654 .udma_mask = ATA_UDMA6,
c9d39130 655 .port_ops = &mv6_ops,
20f733e7
BR
656 },
657 { /* chip_608x */
c5d3e45a 658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 659 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 660 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 661 .udma_mask = ATA_UDMA6,
c9d39130 662 .port_ops = &mv6_ops,
20f733e7 663 },
e4e7b892 664 { /* chip_6042 */
138bfdd0
ML
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 ATA_FLAG_NCQ,
e4e7b892 667 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 668 .udma_mask = ATA_UDMA6,
e4e7b892
JG
669 .port_ops = &mv_iie_ops,
670 },
671 { /* chip_7042 */
138bfdd0
ML
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
e4e7b892 674 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 675 .udma_mask = ATA_UDMA6,
e4e7b892
JG
676 .port_ops = &mv_iie_ops,
677 },
f351b2d6
SB
678 { /* chip_soc */
679 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
680 .pio_mask = 0x1f, /* pio0-4 */
681 .udma_mask = ATA_UDMA6,
682 .port_ops = &mv_iie_ops,
683 },
20f733e7
BR
684};
685
3b7d697d 686static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
687 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
688 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
689 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
690 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
691 /* RocketRAID 1740/174x have different identifiers */
692 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
693 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
694
695 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
696 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
697 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
698 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
699 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
700
701 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
702
d9f9c6bc
FA
703 /* Adaptec 1430SA */
704 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
705
02a121da 706 /* Marvell 7042 support */
6a3d586d
MT
707 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
708
02a121da
ML
709 /* Highpoint RocketRAID PCIe series */
710 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
711 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
712
2d2744fc 713 { } /* terminate list */
20f733e7
BR
714};
715
47c2b677
JG
716static const struct mv_hw_ops mv5xxx_ops = {
717 .phy_errata = mv5_phy_errata,
718 .enable_leds = mv5_enable_leds,
719 .read_preamp = mv5_read_preamp,
720 .reset_hc = mv5_reset_hc,
522479fb
JG
721 .reset_flash = mv5_reset_flash,
722 .reset_bus = mv5_reset_bus,
47c2b677
JG
723};
724
725static const struct mv_hw_ops mv6xxx_ops = {
726 .phy_errata = mv6_phy_errata,
727 .enable_leds = mv6_enable_leds,
728 .read_preamp = mv6_read_preamp,
729 .reset_hc = mv6_reset_hc,
522479fb
JG
730 .reset_flash = mv6_reset_flash,
731 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
732};
733
f351b2d6
SB
734static const struct mv_hw_ops mv_soc_ops = {
735 .phy_errata = mv6_phy_errata,
736 .enable_leds = mv_soc_enable_leds,
737 .read_preamp = mv_soc_read_preamp,
738 .reset_hc = mv_soc_reset_hc,
739 .reset_flash = mv_soc_reset_flash,
740 .reset_bus = mv_soc_reset_bus,
741};
742
20f733e7
BR
743/*
744 * Functions
745 */
746
747static inline void writelfl(unsigned long data, void __iomem *addr)
748{
749 writel(data, addr);
750 (void) readl(addr); /* flush to avoid PCI posted write */
751}
752
20f733e7
BR
753static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
754{
755 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
756}
757
c9d39130
JG
758static inline unsigned int mv_hc_from_port(unsigned int port)
759{
760 return port >> MV_PORT_HC_SHIFT;
761}
762
763static inline unsigned int mv_hardport_from_port(unsigned int port)
764{
765 return port & MV_PORT_MASK;
766}
767
768static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
769 unsigned int port)
770{
771 return mv_hc_base(base, mv_hc_from_port(port));
772}
773
20f733e7
BR
774static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
775{
c9d39130 776 return mv_hc_base_from_port(base, port) +
8b260248 777 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 778 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
779}
780
f351b2d6
SB
781static inline void __iomem *mv_host_base(struct ata_host *host)
782{
783 struct mv_host_priv *hpriv = host->private_data;
784 return hpriv->base;
785}
786
20f733e7
BR
787static inline void __iomem *mv_ap_base(struct ata_port *ap)
788{
f351b2d6 789 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
790}
791
cca3974e 792static inline int mv_get_hc_count(unsigned long port_flags)
31961943 793{
cca3974e 794 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
795}
796
c5d3e45a
JG
797static void mv_set_edma_ptrs(void __iomem *port_mmio,
798 struct mv_host_priv *hpriv,
799 struct mv_port_priv *pp)
800{
bdd4ddde
JG
801 u32 index;
802
c5d3e45a
JG
803 /*
804 * initialize request queue
805 */
bdd4ddde
JG
806 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
807
c5d3e45a
JG
808 WARN_ON(pp->crqb_dma & 0x3ff);
809 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 810 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
811 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
812
813 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 814 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
815 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
816 else
bdd4ddde 817 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
818
819 /*
820 * initialize response queue
821 */
bdd4ddde
JG
822 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
823
c5d3e45a
JG
824 WARN_ON(pp->crpb_dma & 0xff);
825 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
826
827 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 828 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
829 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
830 else
bdd4ddde 831 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 832
bdd4ddde 833 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 834 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
835}
836
05b308e1
BR
837/**
838 * mv_start_dma - Enable eDMA engine
839 * @base: port base address
840 * @pp: port private data
841 *
beec7dbc
TH
842 * Verify the local cache of the eDMA state is accurate with a
843 * WARN_ON.
05b308e1
BR
844 *
845 * LOCKING:
846 * Inherited from caller.
847 */
0c58912e 848static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 849 struct mv_port_priv *pp, u8 protocol)
20f733e7 850{
72109168
ML
851 int want_ncq = (protocol == ATA_PROT_NCQ);
852
853 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
854 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
855 if (want_ncq != using_ncq)
856 __mv_stop_dma(ap);
857 }
c5d3e45a 858 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
859 struct mv_host_priv *hpriv = ap->host->private_data;
860 int hard_port = mv_hardport_from_port(ap->port_no);
861 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 862 mv_host_base(ap->host), hard_port);
0c58912e
ML
863 u32 hc_irq_cause, ipending;
864
bdd4ddde 865 /* clear EDMA event indicators, if any */
f630d562 866 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 867
0c58912e
ML
868 /* clear EDMA interrupt indicator, if any */
869 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
870 ipending = (DEV_IRQ << hard_port) |
871 (CRPB_DMA_DONE << hard_port);
872 if (hc_irq_cause & ipending) {
873 writelfl(hc_irq_cause & ~ipending,
874 hc_mmio + HC_IRQ_CAUSE_OFS);
875 }
876
72109168 877 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
878
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
881
f630d562 882 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 883
f630d562 884 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
885 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
886 }
f630d562 887 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
888}
889
05b308e1 890/**
0ea9e179 891 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
892 * @ap: ATA channel to manipulate
893 *
beec7dbc
TH
894 * Verify the local cache of the eDMA state is accurate with a
895 * WARN_ON.
05b308e1
BR
896 *
897 * LOCKING:
898 * Inherited from caller.
899 */
0ea9e179 900static int __mv_stop_dma(struct ata_port *ap)
20f733e7 901{
31961943
BR
902 void __iomem *port_mmio = mv_ap_base(ap);
903 struct mv_port_priv *pp = ap->private_data;
31961943 904 u32 reg;
c5d3e45a 905 int i, err = 0;
31961943 906
4537deb5 907 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 908 /* Disable EDMA if active. The disable bit auto clears.
31961943 909 */
31961943
BR
910 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
911 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 912 } else {
beec7dbc 913 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 914 }
8b260248 915
31961943
BR
916 /* now properly wait for the eDMA to stop */
917 for (i = 1000; i > 0; i--) {
918 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 919 if (!(reg & EDMA_EN))
31961943 920 break;
4537deb5 921
31961943
BR
922 udelay(100);
923 }
924
c5d3e45a 925 if (reg & EDMA_EN) {
f15a1daf 926 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 927 err = -EIO;
31961943 928 }
c5d3e45a
JG
929
930 return err;
20f733e7
BR
931}
932
0ea9e179
JG
933static int mv_stop_dma(struct ata_port *ap)
934{
935 unsigned long flags;
936 int rc;
937
938 spin_lock_irqsave(&ap->host->lock, flags);
939 rc = __mv_stop_dma(ap);
940 spin_unlock_irqrestore(&ap->host->lock, flags);
941
942 return rc;
943}
944
8a70f8dc 945#ifdef ATA_DEBUG
31961943 946static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 947{
31961943
BR
948 int b, w;
949 for (b = 0; b < bytes; ) {
950 DPRINTK("%p: ", start + b);
951 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 952 printk("%08x ", readl(start + b));
31961943
BR
953 b += sizeof(u32);
954 }
955 printk("\n");
956 }
31961943 957}
8a70f8dc
JG
958#endif
959
31961943
BR
960static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
961{
962#ifdef ATA_DEBUG
963 int b, w;
964 u32 dw;
965 for (b = 0; b < bytes; ) {
966 DPRINTK("%02x: ", b);
967 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
968 (void) pci_read_config_dword(pdev, b, &dw);
969 printk("%08x ", dw);
31961943
BR
970 b += sizeof(u32);
971 }
972 printk("\n");
973 }
974#endif
975}
976static void mv_dump_all_regs(void __iomem *mmio_base, int port,
977 struct pci_dev *pdev)
978{
979#ifdef ATA_DEBUG
8b260248 980 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
981 port >> MV_PORT_HC_SHIFT);
982 void __iomem *port_base;
983 int start_port, num_ports, p, start_hc, num_hcs, hc;
984
985 if (0 > port) {
986 start_hc = start_port = 0;
987 num_ports = 8; /* shld be benign for 4 port devs */
988 num_hcs = 2;
989 } else {
990 start_hc = port >> MV_PORT_HC_SHIFT;
991 start_port = port;
992 num_ports = num_hcs = 1;
993 }
8b260248 994 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
995 num_ports > 1 ? num_ports - 1 : start_port);
996
997 if (NULL != pdev) {
998 DPRINTK("PCI config space regs:\n");
999 mv_dump_pci_cfg(pdev, 0x68);
1000 }
1001 DPRINTK("PCI regs:\n");
1002 mv_dump_mem(mmio_base+0xc00, 0x3c);
1003 mv_dump_mem(mmio_base+0xd00, 0x34);
1004 mv_dump_mem(mmio_base+0xf00, 0x4);
1005 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1006 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1007 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1008 DPRINTK("HC regs (HC %i):\n", hc);
1009 mv_dump_mem(hc_base, 0x1c);
1010 }
1011 for (p = start_port; p < start_port + num_ports; p++) {
1012 port_base = mv_port_base(mmio_base, p);
2dcb407e 1013 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1014 mv_dump_mem(port_base, 0x54);
2dcb407e 1015 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1016 mv_dump_mem(port_base+0x300, 0x60);
1017 }
1018#endif
20f733e7
BR
1019}
1020
1021static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1022{
1023 unsigned int ofs;
1024
1025 switch (sc_reg_in) {
1026 case SCR_STATUS:
1027 case SCR_CONTROL:
1028 case SCR_ERROR:
1029 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1030 break;
1031 case SCR_ACTIVE:
1032 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1033 break;
1034 default:
1035 ofs = 0xffffffffU;
1036 break;
1037 }
1038 return ofs;
1039}
1040
da3dbb17 1041static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1042{
1043 unsigned int ofs = mv_scr_offset(sc_reg_in);
1044
da3dbb17
TH
1045 if (ofs != 0xffffffffU) {
1046 *val = readl(mv_ap_base(ap) + ofs);
1047 return 0;
1048 } else
1049 return -EINVAL;
20f733e7
BR
1050}
1051
da3dbb17 1052static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1053{
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
da3dbb17 1056 if (ofs != 0xffffffffU) {
20f733e7 1057 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1058 return 0;
1059 } else
1060 return -EINVAL;
20f733e7
BR
1061}
1062
f273827e
ML
1063static void mv6_dev_config(struct ata_device *adev)
1064{
1065 /*
1066 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1067 * See mv_qc_prep() for more info.
1068 */
1069 if (adev->flags & ATA_DFLAG_NCQ)
1070 if (adev->max_sectors > ATA_MAX_SECTORS)
1071 adev->max_sectors = ATA_MAX_SECTORS;
1072}
1073
72109168
ML
1074static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1075 void __iomem *port_mmio, int want_ncq)
e4e7b892 1076{
0c58912e 1077 u32 cfg;
e4e7b892
JG
1078
1079 /* set up non-NCQ EDMA configuration */
0c58912e 1080 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1081
0c58912e 1082 if (IS_GEN_I(hpriv))
e4e7b892
JG
1083 cfg |= (1 << 8); /* enab config burst size mask */
1084
0c58912e 1085 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1086 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1087
1088 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1089 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1090 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1091 cfg |= (1 << 18); /* enab early completion */
e728eabe 1092 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1093 }
1094
72109168
ML
1095 if (want_ncq) {
1096 cfg |= EDMA_CFG_NCQ;
1097 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1098 } else
1099 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1100
e4e7b892
JG
1101 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1102}
1103
da2fa9ba
ML
1104static void mv_port_free_dma_mem(struct ata_port *ap)
1105{
1106 struct mv_host_priv *hpriv = ap->host->private_data;
1107 struct mv_port_priv *pp = ap->private_data;
eb73d558 1108 int tag;
da2fa9ba
ML
1109
1110 if (pp->crqb) {
1111 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1112 pp->crqb = NULL;
1113 }
1114 if (pp->crpb) {
1115 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1116 pp->crpb = NULL;
1117 }
eb73d558
ML
1118 /*
1119 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1120 * For later hardware, we have one unique sg_tbl per NCQ tag.
1121 */
1122 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1123 if (pp->sg_tbl[tag]) {
1124 if (tag == 0 || !IS_GEN_I(hpriv))
1125 dma_pool_free(hpriv->sg_tbl_pool,
1126 pp->sg_tbl[tag],
1127 pp->sg_tbl_dma[tag]);
1128 pp->sg_tbl[tag] = NULL;
1129 }
da2fa9ba
ML
1130 }
1131}
1132
05b308e1
BR
1133/**
1134 * mv_port_start - Port specific init/start routine.
1135 * @ap: ATA channel to manipulate
1136 *
1137 * Allocate and point to DMA memory, init port private memory,
1138 * zero indices.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
31961943
BR
1143static int mv_port_start(struct ata_port *ap)
1144{
cca3974e
JG
1145 struct device *dev = ap->host->dev;
1146 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1147 struct mv_port_priv *pp;
1148 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1149 unsigned long flags;
dde20207 1150 int tag;
31961943 1151
24dc5f33 1152 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1153 if (!pp)
24dc5f33 1154 return -ENOMEM;
da2fa9ba 1155 ap->private_data = pp;
31961943 1156
da2fa9ba
ML
1157 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1158 if (!pp->crqb)
1159 return -ENOMEM;
1160 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1161
da2fa9ba
ML
1162 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1163 if (!pp->crpb)
1164 goto out_port_free_dma_mem;
1165 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1166
eb73d558
ML
1167 /*
1168 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1169 * For later hardware, we need one unique sg_tbl per NCQ tag.
1170 */
1171 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1172 if (tag == 0 || !IS_GEN_I(hpriv)) {
1173 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1174 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1175 if (!pp->sg_tbl[tag])
1176 goto out_port_free_dma_mem;
1177 } else {
1178 pp->sg_tbl[tag] = pp->sg_tbl[0];
1179 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1180 }
1181 }
31961943 1182
0ea9e179
JG
1183 spin_lock_irqsave(&ap->host->lock, flags);
1184
72109168 1185 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1186 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1187
0ea9e179
JG
1188 spin_unlock_irqrestore(&ap->host->lock, flags);
1189
31961943
BR
1190 /* Don't turn on EDMA here...do it before DMA commands only. Else
1191 * we'll be unable to send non-data, PIO, etc due to restricted access
1192 * to shadow regs.
1193 */
31961943 1194 return 0;
da2fa9ba
ML
1195
1196out_port_free_dma_mem:
1197 mv_port_free_dma_mem(ap);
1198 return -ENOMEM;
31961943
BR
1199}
1200
05b308e1
BR
1201/**
1202 * mv_port_stop - Port specific cleanup/stop routine.
1203 * @ap: ATA channel to manipulate
1204 *
1205 * Stop DMA, cleanup port memory.
1206 *
1207 * LOCKING:
cca3974e 1208 * This routine uses the host lock to protect the DMA stop.
05b308e1 1209 */
31961943
BR
1210static void mv_port_stop(struct ata_port *ap)
1211{
31961943 1212 mv_stop_dma(ap);
da2fa9ba 1213 mv_port_free_dma_mem(ap);
31961943
BR
1214}
1215
05b308e1
BR
1216/**
1217 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1218 * @qc: queued command whose SG list to source from
1219 *
1220 * Populate the SG list and mark the last entry.
1221 *
1222 * LOCKING:
1223 * Inherited from caller.
1224 */
6c08772e 1225static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1226{
1227 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1228 struct scatterlist *sg;
3be6cbd7 1229 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1230 unsigned int si;
31961943 1231
eb73d558 1232 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1233 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1234 dma_addr_t addr = sg_dma_address(sg);
1235 u32 sg_len = sg_dma_len(sg);
22374677 1236
4007b493
OJ
1237 while (sg_len) {
1238 u32 offset = addr & 0xffff;
1239 u32 len = sg_len;
22374677 1240
4007b493
OJ
1241 if ((offset + sg_len > 0x10000))
1242 len = 0x10000 - offset;
1243
1244 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1245 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1246 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1247
1248 sg_len -= len;
1249 addr += len;
1250
3be6cbd7 1251 last_sg = mv_sg;
4007b493 1252 mv_sg++;
4007b493 1253 }
31961943 1254 }
3be6cbd7
JG
1255
1256 if (likely(last_sg))
1257 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1258}
1259
5796d1c4 1260static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1261{
559eedad 1262 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1263 (last ? CRQB_CMD_LAST : 0);
559eedad 1264 *cmdw = cpu_to_le16(tmp);
31961943
BR
1265}
1266
05b308e1
BR
1267/**
1268 * mv_qc_prep - Host specific command preparation.
1269 * @qc: queued command to prepare
1270 *
1271 * This routine simply redirects to the general purpose routine
1272 * if command is not DMA. Else, it handles prep of the CRQB
1273 * (command request block), does some sanity checking, and calls
1274 * the SG load routine.
1275 *
1276 * LOCKING:
1277 * Inherited from caller.
1278 */
31961943
BR
1279static void mv_qc_prep(struct ata_queued_cmd *qc)
1280{
1281 struct ata_port *ap = qc->ap;
1282 struct mv_port_priv *pp = ap->private_data;
e1469874 1283 __le16 *cw;
31961943
BR
1284 struct ata_taskfile *tf;
1285 u16 flags = 0;
a6432436 1286 unsigned in_index;
31961943 1287
138bfdd0
ML
1288 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1289 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1290 return;
20f733e7 1291
31961943
BR
1292 /* Fill in command request block
1293 */
e4e7b892 1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1295 flags |= CRQB_FLAG_READ;
beec7dbc 1296 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1297 flags |= qc->tag << CRQB_TAG_SHIFT;
1298
bdd4ddde
JG
1299 /* get current queue index from software */
1300 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1301
1302 pp->crqb[in_index].sg_addr =
eb73d558 1303 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1304 pp->crqb[in_index].sg_addr_hi =
eb73d558 1305 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1306 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1307
a6432436 1308 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1309 tf = &qc->tf;
1310
1311 /* Sadly, the CRQB cannot accomodate all registers--there are
1312 * only 11 bytes...so we must pick and choose required
1313 * registers based on the command. So, we drop feature and
1314 * hob_feature for [RW] DMA commands, but they are needed for
1315 * NCQ. NCQ will drop hob_nsect.
20f733e7 1316 */
31961943
BR
1317 switch (tf->command) {
1318 case ATA_CMD_READ:
1319 case ATA_CMD_READ_EXT:
1320 case ATA_CMD_WRITE:
1321 case ATA_CMD_WRITE_EXT:
c15d85c8 1322 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1323 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1324 break;
31961943
BR
1325 case ATA_CMD_FPDMA_READ:
1326 case ATA_CMD_FPDMA_WRITE:
8b260248 1327 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1328 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1329 break;
31961943
BR
1330 default:
1331 /* The only other commands EDMA supports in non-queued and
1332 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1333 * of which are defined/used by Linux. If we get here, this
1334 * driver needs work.
1335 *
1336 * FIXME: modify libata to give qc_prep a return value and
1337 * return error here.
1338 */
1339 BUG_ON(tf->command);
1340 break;
1341 }
1342 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1343 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1344 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1345 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1346 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1347 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1348 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1349 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1350 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1351
e4e7b892
JG
1352 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1353 return;
1354 mv_fill_sg(qc);
1355}
1356
1357/**
1358 * mv_qc_prep_iie - Host specific command preparation.
1359 * @qc: queued command to prepare
1360 *
1361 * This routine simply redirects to the general purpose routine
1362 * if command is not DMA. Else, it handles prep of the CRQB
1363 * (command request block), does some sanity checking, and calls
1364 * the SG load routine.
1365 *
1366 * LOCKING:
1367 * Inherited from caller.
1368 */
1369static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1370{
1371 struct ata_port *ap = qc->ap;
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_crqb_iie *crqb;
1374 struct ata_taskfile *tf;
a6432436 1375 unsigned in_index;
e4e7b892
JG
1376 u32 flags = 0;
1377
138bfdd0
ML
1378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1380 return;
1381
e4e7b892
JG
1382 /* Fill in Gen IIE command request block
1383 */
1384 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1385 flags |= CRQB_FLAG_READ;
1386
beec7dbc 1387 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1388 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1389 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1390
bdd4ddde
JG
1391 /* get current queue index from software */
1392 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1393
1394 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1395 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1396 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1397 crqb->flags = cpu_to_le32(flags);
1398
1399 tf = &qc->tf;
1400 crqb->ata_cmd[0] = cpu_to_le32(
1401 (tf->command << 16) |
1402 (tf->feature << 24)
1403 );
1404 crqb->ata_cmd[1] = cpu_to_le32(
1405 (tf->lbal << 0) |
1406 (tf->lbam << 8) |
1407 (tf->lbah << 16) |
1408 (tf->device << 24)
1409 );
1410 crqb->ata_cmd[2] = cpu_to_le32(
1411 (tf->hob_lbal << 0) |
1412 (tf->hob_lbam << 8) |
1413 (tf->hob_lbah << 16) |
1414 (tf->hob_feature << 24)
1415 );
1416 crqb->ata_cmd[3] = cpu_to_le32(
1417 (tf->nsect << 0) |
1418 (tf->hob_nsect << 8)
1419 );
1420
1421 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1422 return;
31961943
BR
1423 mv_fill_sg(qc);
1424}
1425
05b308e1
BR
1426/**
1427 * mv_qc_issue - Initiate a command to the host
1428 * @qc: queued command to start
1429 *
1430 * This routine simply redirects to the general purpose routine
1431 * if command is not DMA. Else, it sanity checks our local
1432 * caches of the request producer/consumer indices then enables
1433 * DMA and bumps the request producer index.
1434 *
1435 * LOCKING:
1436 * Inherited from caller.
1437 */
9a3d9eb0 1438static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1439{
c5d3e45a
JG
1440 struct ata_port *ap = qc->ap;
1441 void __iomem *port_mmio = mv_ap_base(ap);
1442 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1443 u32 in_index;
31961943 1444
138bfdd0
ML
1445 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1446 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1447 /* We're about to send a non-EDMA capable command to the
1448 * port. Turn off EDMA so there won't be problems accessing
1449 * shadow block, etc registers.
1450 */
0ea9e179 1451 __mv_stop_dma(ap);
31961943
BR
1452 return ata_qc_issue_prot(qc);
1453 }
1454
72109168 1455 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1456
bdd4ddde 1457 pp->req_idx++;
31961943 1458
bdd4ddde 1459 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1460
1461 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1462 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1463 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1464
1465 return 0;
1466}
1467
05b308e1
BR
1468/**
1469 * mv_err_intr - Handle error interrupts on the port
1470 * @ap: ATA channel to manipulate
9b358e30 1471 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1472 *
1473 * In most cases, just clear the interrupt and move on. However,
1474 * some cases require an eDMA reset, which is done right before
1475 * the COMRESET in mv_phy_reset(). The SERR case requires a
1476 * clear of pending errors in the SATA SERROR register. Finally,
1477 * if the port disabled DMA, update our cached copy to match.
1478 *
1479 * LOCKING:
1480 * Inherited from caller.
1481 */
bdd4ddde 1482static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1483{
1484 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1485 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1486 struct mv_port_priv *pp = ap->private_data;
1487 struct mv_host_priv *hpriv = ap->host->private_data;
1488 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1489 unsigned int action = 0, err_mask = 0;
9af5c9c9 1490 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1491
bdd4ddde 1492 ata_ehi_clear_desc(ehi);
20f733e7 1493
bdd4ddde
JG
1494 if (!edma_enabled) {
1495 /* just a guess: do we need to do this? should we
1496 * expand this, and do it in all cases?
1497 */
936fd732
TH
1498 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1499 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1500 }
bdd4ddde
JG
1501
1502 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1503
1504 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1505
1506 /*
1507 * all generations share these EDMA error cause bits
1508 */
1509
1510 if (edma_err_cause & EDMA_ERR_DEV)
1511 err_mask |= AC_ERR_DEV;
1512 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1513 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1514 EDMA_ERR_INTRL_PAR)) {
1515 err_mask |= AC_ERR_ATA_BUS;
cf480626 1516 action |= ATA_EH_RESET;
b64bbc39 1517 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1518 }
1519 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1520 ata_ehi_hotplugged(ehi);
1521 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1522 "dev disconnect" : "dev connect");
cf480626 1523 action |= ATA_EH_RESET;
bdd4ddde
JG
1524 }
1525
ee9ccdf7 1526 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1527 eh_freeze_mask = EDMA_EH_FREEZE_5;
1528
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1530 pp = ap->private_data;
bdd4ddde 1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1533 }
1534 } else {
1535 eh_freeze_mask = EDMA_EH_FREEZE;
1536
1537 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1538 pp = ap->private_data;
bdd4ddde 1539 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1540 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1541 }
1542
1543 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1544 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1545 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1546 err_mask = AC_ERR_ATA_BUS;
cf480626 1547 action |= ATA_EH_RESET;
bdd4ddde 1548 }
afb0edd9 1549 }
20f733e7
BR
1550
1551 /* Clear EDMA now that SERR cleanup done */
3606a380 1552 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1553
bdd4ddde
JG
1554 if (!err_mask) {
1555 err_mask = AC_ERR_OTHER;
cf480626 1556 action |= ATA_EH_RESET;
bdd4ddde
JG
1557 }
1558
1559 ehi->serror |= serr;
1560 ehi->action |= action;
1561
1562 if (qc)
1563 qc->err_mask |= err_mask;
1564 else
1565 ehi->err_mask |= err_mask;
1566
1567 if (edma_err_cause & eh_freeze_mask)
1568 ata_port_freeze(ap);
1569 else
1570 ata_port_abort(ap);
1571}
1572
1573static void mv_intr_pio(struct ata_port *ap)
1574{
1575 struct ata_queued_cmd *qc;
1576 u8 ata_status;
1577
1578 /* ignore spurious intr if drive still BUSY */
1579 ata_status = readb(ap->ioaddr.status_addr);
1580 if (unlikely(ata_status & ATA_BUSY))
1581 return;
1582
1583 /* get active ATA command */
9af5c9c9 1584 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1585 if (unlikely(!qc)) /* no active tag */
1586 return;
1587 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1588 return;
1589
1590 /* and finally, complete the ATA command */
1591 qc->err_mask |= ac_err_mask(ata_status);
1592 ata_qc_complete(qc);
1593}
1594
1595static void mv_intr_edma(struct ata_port *ap)
1596{
1597 void __iomem *port_mmio = mv_ap_base(ap);
1598 struct mv_host_priv *hpriv = ap->host->private_data;
1599 struct mv_port_priv *pp = ap->private_data;
1600 struct ata_queued_cmd *qc;
1601 u32 out_index, in_index;
1602 bool work_done = false;
1603
1604 /* get h/w response queue pointer */
1605 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1606 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1607
1608 while (1) {
1609 u16 status;
6c1153e0 1610 unsigned int tag;
bdd4ddde
JG
1611
1612 /* get s/w response queue last-read pointer, and compare */
1613 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1614 if (in_index == out_index)
1615 break;
1616
bdd4ddde 1617 /* 50xx: get active ATA command */
0ea9e179 1618 if (IS_GEN_I(hpriv))
9af5c9c9 1619 tag = ap->link.active_tag;
bdd4ddde 1620
6c1153e0
JG
1621 /* Gen II/IIE: get active ATA command via tag, to enable
1622 * support for queueing. this works transparently for
1623 * queued and non-queued modes.
bdd4ddde 1624 */
8c0aeb4a
ML
1625 else
1626 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1627
6c1153e0 1628 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1629
cb924419
ML
1630 /* For non-NCQ mode, the lower 8 bits of status
1631 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1632 * which should be zero if all went well.
bdd4ddde
JG
1633 */
1634 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1635 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1636 mv_err_intr(ap, qc);
1637 return;
1638 }
1639
1640 /* and finally, complete the ATA command */
1641 if (qc) {
1642 qc->err_mask |=
1643 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1644 ata_qc_complete(qc);
1645 }
1646
0ea9e179 1647 /* advance software response queue pointer, to
bdd4ddde
JG
1648 * indicate (after the loop completes) to hardware
1649 * that we have consumed a response queue entry.
1650 */
1651 work_done = true;
1652 pp->resp_idx++;
1653 }
1654
1655 if (work_done)
1656 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1657 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1658 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1659}
1660
05b308e1
BR
1661/**
1662 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1663 * @host: host specific structure
05b308e1
BR
1664 * @relevant: port error bits relevant to this host controller
1665 * @hc: which host controller we're to look at
1666 *
1667 * Read then write clear the HC interrupt status then walk each
1668 * port connected to the HC and see if it needs servicing. Port
1669 * success ints are reported in the HC interrupt status reg, the
1670 * port error ints are reported in the higher level main
1671 * interrupt status register and thus are passed in via the
1672 * 'relevant' argument.
1673 *
1674 * LOCKING:
1675 * Inherited from caller.
1676 */
cca3974e 1677static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1678{
f351b2d6
SB
1679 struct mv_host_priv *hpriv = host->private_data;
1680 void __iomem *mmio = hpriv->base;
20f733e7 1681 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1682 u32 hc_irq_cause;
f351b2d6 1683 int port, port0, last_port;
20f733e7 1684
35177265 1685 if (hc == 0)
20f733e7 1686 port0 = 0;
35177265 1687 else
20f733e7 1688 port0 = MV_PORTS_PER_HC;
20f733e7 1689
f351b2d6
SB
1690 if (HAS_PCI(host))
1691 last_port = port0 + MV_PORTS_PER_HC;
1692 else
1693 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1694 /* we'll need the HC success int register in most cases */
1695 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1696 if (!hc_irq_cause)
1697 return;
1698
1699 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1700
1701 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1702 hc, relevant, hc_irq_cause);
20f733e7 1703
8f71efe2 1704 for (port = port0; port < last_port; port++) {
cca3974e 1705 struct ata_port *ap = host->ports[port];
8f71efe2 1706 struct mv_port_priv *pp;
bdd4ddde 1707 int have_err_bits, hard_port, shift;
55d8ca4f 1708
bdd4ddde 1709 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1710 continue;
1711
8f71efe2
YL
1712 pp = ap->private_data;
1713
31961943 1714 shift = port << 1; /* (port * 2) */
20f733e7
BR
1715 if (port >= MV_PORTS_PER_HC) {
1716 shift++; /* skip bit 8 in the HC Main IRQ reg */
1717 }
bdd4ddde
JG
1718 have_err_bits = ((PORT0_ERR << shift) & relevant);
1719
1720 if (unlikely(have_err_bits)) {
1721 struct ata_queued_cmd *qc;
8b260248 1722
9af5c9c9 1723 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1724 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1725 continue;
1726
1727 mv_err_intr(ap, qc);
1728 continue;
1729 }
1730
1731 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1732
1733 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1734 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1735 mv_intr_edma(ap);
1736 } else {
1737 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1738 mv_intr_pio(ap);
20f733e7
BR
1739 }
1740 }
1741 VPRINTK("EXIT\n");
1742}
1743
bdd4ddde
JG
1744static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1745{
02a121da 1746 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1747 struct ata_port *ap;
1748 struct ata_queued_cmd *qc;
1749 struct ata_eh_info *ehi;
1750 unsigned int i, err_mask, printed = 0;
1751 u32 err_cause;
1752
02a121da 1753 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1754
1755 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1756 err_cause);
1757
1758 DPRINTK("All regs @ PCI error\n");
1759 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1760
02a121da 1761 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1762
1763 for (i = 0; i < host->n_ports; i++) {
1764 ap = host->ports[i];
936fd732 1765 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1766 ehi = &ap->link.eh_info;
bdd4ddde
JG
1767 ata_ehi_clear_desc(ehi);
1768 if (!printed++)
1769 ata_ehi_push_desc(ehi,
1770 "PCI err cause 0x%08x", err_cause);
1771 err_mask = AC_ERR_HOST_BUS;
cf480626 1772 ehi->action = ATA_EH_RESET;
9af5c9c9 1773 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1774 if (qc)
1775 qc->err_mask |= err_mask;
1776 else
1777 ehi->err_mask |= err_mask;
1778
1779 ata_port_freeze(ap);
1780 }
1781 }
1782}
1783
05b308e1 1784/**
c5d3e45a 1785 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1786 * @irq: unused
1787 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1788 *
1789 * Read the read only register to determine if any host
1790 * controllers have pending interrupts. If so, call lower level
1791 * routine to handle. Also check for PCI errors which are only
1792 * reported here.
1793 *
8b260248 1794 * LOCKING:
cca3974e 1795 * This routine holds the host lock while processing pending
05b308e1
BR
1796 * interrupts.
1797 */
7d12e780 1798static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1799{
cca3974e 1800 struct ata_host *host = dev_instance;
f351b2d6 1801 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1802 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1803 void __iomem *mmio = hpriv->base;
646a4da5 1804 u32 irq_stat, irq_mask;
20f733e7 1805
646a4da5 1806 spin_lock(&host->lock);
f351b2d6
SB
1807
1808 irq_stat = readl(hpriv->main_cause_reg_addr);
1809 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1810
1811 /* check the cases where we either have nothing pending or have read
1812 * a bogus register value which can indicate HW removal or PCI fault
1813 */
646a4da5
ML
1814 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1815 goto out_unlock;
20f733e7 1816
cca3974e 1817 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1818
7bb3c529 1819 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1820 mv_pci_error(host, mmio);
1821 handled = 1;
1822 goto out_unlock; /* skip all other HC irq handling */
1823 }
1824
20f733e7
BR
1825 for (hc = 0; hc < n_hcs; hc++) {
1826 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1827 if (relevant) {
cca3974e 1828 mv_host_intr(host, relevant, hc);
bdd4ddde 1829 handled = 1;
20f733e7
BR
1830 }
1831 }
615ab953 1832
bdd4ddde 1833out_unlock:
cca3974e 1834 spin_unlock(&host->lock);
20f733e7
BR
1835
1836 return IRQ_RETVAL(handled);
1837}
1838
c9d39130
JG
1839static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1840{
1841 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1842 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1843
1844 return hc_mmio + ofs;
1845}
1846
1847static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1848{
1849 unsigned int ofs;
1850
1851 switch (sc_reg_in) {
1852 case SCR_STATUS:
1853 case SCR_ERROR:
1854 case SCR_CONTROL:
1855 ofs = sc_reg_in * sizeof(u32);
1856 break;
1857 default:
1858 ofs = 0xffffffffU;
1859 break;
1860 }
1861 return ofs;
1862}
1863
da3dbb17 1864static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1865{
f351b2d6
SB
1866 struct mv_host_priv *hpriv = ap->host->private_data;
1867 void __iomem *mmio = hpriv->base;
0d5ff566 1868 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1869 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1870
da3dbb17
TH
1871 if (ofs != 0xffffffffU) {
1872 *val = readl(addr + ofs);
1873 return 0;
1874 } else
1875 return -EINVAL;
c9d39130
JG
1876}
1877
da3dbb17 1878static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1879{
f351b2d6
SB
1880 struct mv_host_priv *hpriv = ap->host->private_data;
1881 void __iomem *mmio = hpriv->base;
0d5ff566 1882 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1883 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1884
da3dbb17 1885 if (ofs != 0xffffffffU) {
0d5ff566 1886 writelfl(val, addr + ofs);
da3dbb17
TH
1887 return 0;
1888 } else
1889 return -EINVAL;
c9d39130
JG
1890}
1891
7bb3c529 1892static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1893{
7bb3c529 1894 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1895 int early_5080;
1896
44c10138 1897 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1898
1899 if (!early_5080) {
1900 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1901 tmp |= (1 << 0);
1902 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1903 }
1904
7bb3c529 1905 mv_reset_pci_bus(host, mmio);
522479fb
JG
1906}
1907
1908static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1909{
1910 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1911}
1912
47c2b677 1913static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1914 void __iomem *mmio)
1915{
c9d39130
JG
1916 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1917 u32 tmp;
1918
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1920
1921 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1922 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1923}
1924
47c2b677 1925static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1926{
522479fb
JG
1927 u32 tmp;
1928
1929 writel(0, mmio + MV_GPIO_PORT_CTL);
1930
1931 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1932
1933 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1934 tmp |= ~(1 << 0);
1935 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1936}
1937
2a47ce06
JG
1938static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1939 unsigned int port)
bca1c4eb 1940{
c9d39130
JG
1941 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1942 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1943 u32 tmp;
1944 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1945
1946 if (fix_apm_sq) {
1947 tmp = readl(phy_mmio + MV5_LT_MODE);
1948 tmp |= (1 << 19);
1949 writel(tmp, phy_mmio + MV5_LT_MODE);
1950
1951 tmp = readl(phy_mmio + MV5_PHY_CTL);
1952 tmp &= ~0x3;
1953 tmp |= 0x1;
1954 writel(tmp, phy_mmio + MV5_PHY_CTL);
1955 }
1956
1957 tmp = readl(phy_mmio + MV5_PHY_MODE);
1958 tmp &= ~mask;
1959 tmp |= hpriv->signal[port].pre;
1960 tmp |= hpriv->signal[port].amps;
1961 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1962}
1963
c9d39130
JG
1964
1965#undef ZERO
1966#define ZERO(reg) writel(0, port_mmio + (reg))
1967static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1968 unsigned int port)
1969{
1970 void __iomem *port_mmio = mv_port_base(mmio, port);
1971
1972 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1973
1974 mv_channel_reset(hpriv, mmio, port);
1975
1976 ZERO(0x028); /* command */
1977 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1978 ZERO(0x004); /* timer */
1979 ZERO(0x008); /* irq err cause */
1980 ZERO(0x00c); /* irq err mask */
1981 ZERO(0x010); /* rq bah */
1982 ZERO(0x014); /* rq inp */
1983 ZERO(0x018); /* rq outp */
1984 ZERO(0x01c); /* respq bah */
1985 ZERO(0x024); /* respq outp */
1986 ZERO(0x020); /* respq inp */
1987 ZERO(0x02c); /* test control */
1988 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1989}
1990#undef ZERO
1991
1992#define ZERO(reg) writel(0, hc_mmio + (reg))
1993static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1994 unsigned int hc)
47c2b677 1995{
c9d39130
JG
1996 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1997 u32 tmp;
1998
1999 ZERO(0x00c);
2000 ZERO(0x010);
2001 ZERO(0x014);
2002 ZERO(0x018);
2003
2004 tmp = readl(hc_mmio + 0x20);
2005 tmp &= 0x1c1c1c1c;
2006 tmp |= 0x03030303;
2007 writel(tmp, hc_mmio + 0x20);
2008}
2009#undef ZERO
2010
2011static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2012 unsigned int n_hc)
2013{
2014 unsigned int hc, port;
2015
2016 for (hc = 0; hc < n_hc; hc++) {
2017 for (port = 0; port < MV_PORTS_PER_HC; port++)
2018 mv5_reset_hc_port(hpriv, mmio,
2019 (hc * MV_PORTS_PER_HC) + port);
2020
2021 mv5_reset_one_hc(hpriv, mmio, hc);
2022 }
2023
2024 return 0;
47c2b677
JG
2025}
2026
101ffae2
JG
2027#undef ZERO
2028#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2029static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2030{
02a121da 2031 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2032 u32 tmp;
2033
2034 tmp = readl(mmio + MV_PCI_MODE);
2035 tmp &= 0xff00ffff;
2036 writel(tmp, mmio + MV_PCI_MODE);
2037
2038 ZERO(MV_PCI_DISC_TIMER);
2039 ZERO(MV_PCI_MSI_TRIGGER);
2040 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2041 ZERO(HC_MAIN_IRQ_MASK_OFS);
2042 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2043 ZERO(hpriv->irq_cause_ofs);
2044 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2045 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2046 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2047 ZERO(MV_PCI_ERR_ATTRIBUTE);
2048 ZERO(MV_PCI_ERR_COMMAND);
2049}
2050#undef ZERO
2051
2052static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2053{
2054 u32 tmp;
2055
2056 mv5_reset_flash(hpriv, mmio);
2057
2058 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2059 tmp &= 0x3;
2060 tmp |= (1 << 5) | (1 << 6);
2061 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2062}
2063
2064/**
2065 * mv6_reset_hc - Perform the 6xxx global soft reset
2066 * @mmio: base address of the HBA
2067 *
2068 * This routine only applies to 6xxx parts.
2069 *
2070 * LOCKING:
2071 * Inherited from caller.
2072 */
c9d39130
JG
2073static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2074 unsigned int n_hc)
101ffae2
JG
2075{
2076 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2077 int i, rc = 0;
2078 u32 t;
2079
2080 /* Following procedure defined in PCI "main command and status
2081 * register" table.
2082 */
2083 t = readl(reg);
2084 writel(t | STOP_PCI_MASTER, reg);
2085
2086 for (i = 0; i < 1000; i++) {
2087 udelay(1);
2088 t = readl(reg);
2dcb407e 2089 if (PCI_MASTER_EMPTY & t)
101ffae2 2090 break;
101ffae2
JG
2091 }
2092 if (!(PCI_MASTER_EMPTY & t)) {
2093 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2094 rc = 1;
2095 goto done;
2096 }
2097
2098 /* set reset */
2099 i = 5;
2100 do {
2101 writel(t | GLOB_SFT_RST, reg);
2102 t = readl(reg);
2103 udelay(1);
2104 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2105
2106 if (!(GLOB_SFT_RST & t)) {
2107 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2108 rc = 1;
2109 goto done;
2110 }
2111
2112 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2113 i = 5;
2114 do {
2115 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2116 t = readl(reg);
2117 udelay(1);
2118 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2119
2120 if (GLOB_SFT_RST & t) {
2121 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2122 rc = 1;
2123 }
2124done:
2125 return rc;
2126}
2127
47c2b677 2128static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2129 void __iomem *mmio)
2130{
2131 void __iomem *port_mmio;
2132 u32 tmp;
2133
ba3fe8fb
JG
2134 tmp = readl(mmio + MV_RESET_CFG);
2135 if ((tmp & (1 << 0)) == 0) {
47c2b677 2136 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2137 hpriv->signal[idx].pre = 0x1 << 5;
2138 return;
2139 }
2140
2141 port_mmio = mv_port_base(mmio, idx);
2142 tmp = readl(port_mmio + PHY_MODE2);
2143
2144 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2145 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2146}
2147
47c2b677 2148static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2149{
47c2b677 2150 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2151}
2152
c9d39130 2153static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2154 unsigned int port)
bca1c4eb 2155{
c9d39130
JG
2156 void __iomem *port_mmio = mv_port_base(mmio, port);
2157
bca1c4eb 2158 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2159 int fix_phy_mode2 =
2160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2161 int fix_phy_mode4 =
47c2b677
JG
2162 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2163 u32 m2, tmp;
2164
2165 if (fix_phy_mode2) {
2166 m2 = readl(port_mmio + PHY_MODE2);
2167 m2 &= ~(1 << 16);
2168 m2 |= (1 << 31);
2169 writel(m2, port_mmio + PHY_MODE2);
2170
2171 udelay(200);
2172
2173 m2 = readl(port_mmio + PHY_MODE2);
2174 m2 &= ~((1 << 16) | (1 << 31));
2175 writel(m2, port_mmio + PHY_MODE2);
2176
2177 udelay(200);
2178 }
2179
2180 /* who knows what this magic does */
2181 tmp = readl(port_mmio + PHY_MODE3);
2182 tmp &= ~0x7F800000;
2183 tmp |= 0x2A800000;
2184 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2185
2186 if (fix_phy_mode4) {
47c2b677 2187 u32 m4;
bca1c4eb
JG
2188
2189 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2190
2191 if (hp_flags & MV_HP_ERRATA_60X1B2)
2192 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2193
2194 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2195
2196 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2197
2198 if (hp_flags & MV_HP_ERRATA_60X1B2)
2199 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2200 }
2201
2202 /* Revert values of pre-emphasis and signal amps to the saved ones */
2203 m2 = readl(port_mmio + PHY_MODE2);
2204
2205 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2206 m2 |= hpriv->signal[port].amps;
2207 m2 |= hpriv->signal[port].pre;
47c2b677 2208 m2 &= ~(1 << 16);
bca1c4eb 2209
e4e7b892
JG
2210 /* according to mvSata 3.6.1, some IIE values are fixed */
2211 if (IS_GEN_IIE(hpriv)) {
2212 m2 &= ~0xC30FF01F;
2213 m2 |= 0x0000900F;
2214 }
2215
bca1c4eb
JG
2216 writel(m2, port_mmio + PHY_MODE2);
2217}
2218
f351b2d6
SB
2219/* TODO: use the generic LED interface to configure the SATA Presence */
2220/* & Acitivy LEDs on the board */
2221static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2222 void __iomem *mmio)
2223{
2224 return;
2225}
2226
2227static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2228 void __iomem *mmio)
2229{
2230 void __iomem *port_mmio;
2231 u32 tmp;
2232
2233 port_mmio = mv_port_base(mmio, idx);
2234 tmp = readl(port_mmio + PHY_MODE2);
2235
2236 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2237 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2238}
2239
2240#undef ZERO
2241#define ZERO(reg) writel(0, port_mmio + (reg))
2242static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2243 void __iomem *mmio, unsigned int port)
2244{
2245 void __iomem *port_mmio = mv_port_base(mmio, port);
2246
2247 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2248
2249 mv_channel_reset(hpriv, mmio, port);
2250
2251 ZERO(0x028); /* command */
2252 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2253 ZERO(0x004); /* timer */
2254 ZERO(0x008); /* irq err cause */
2255 ZERO(0x00c); /* irq err mask */
2256 ZERO(0x010); /* rq bah */
2257 ZERO(0x014); /* rq inp */
2258 ZERO(0x018); /* rq outp */
2259 ZERO(0x01c); /* respq bah */
2260 ZERO(0x024); /* respq outp */
2261 ZERO(0x020); /* respq inp */
2262 ZERO(0x02c); /* test control */
2263 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2264}
2265
2266#undef ZERO
2267
2268#define ZERO(reg) writel(0, hc_mmio + (reg))
2269static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2270 void __iomem *mmio)
2271{
2272 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2273
2274 ZERO(0x00c);
2275 ZERO(0x010);
2276 ZERO(0x014);
2277
2278}
2279
2280#undef ZERO
2281
2282static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2283 void __iomem *mmio, unsigned int n_hc)
2284{
2285 unsigned int port;
2286
2287 for (port = 0; port < hpriv->n_ports; port++)
2288 mv_soc_reset_hc_port(hpriv, mmio, port);
2289
2290 mv_soc_reset_one_hc(hpriv, mmio);
2291
2292 return 0;
2293}
2294
2295static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2296 void __iomem *mmio)
2297{
2298 return;
2299}
2300
2301static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2302{
2303 return;
2304}
2305
c9d39130
JG
2306static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2307 unsigned int port_no)
2308{
2309 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2310
2311 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2312
ee9ccdf7 2313 if (IS_GEN_II(hpriv)) {
c9d39130 2314 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2315 ifctl |= (1 << 7); /* enable gen2i speed */
2316 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2317 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2318 }
2319
2320 udelay(25); /* allow reset propagation */
2321
2322 /* Spec never mentions clearing the bit. Marvell's driver does
2323 * clear the bit, however.
2324 */
2325 writelfl(0, port_mmio + EDMA_CMD_OFS);
2326
2327 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2328
ee9ccdf7 2329 if (IS_GEN_I(hpriv))
c9d39130
JG
2330 mdelay(1);
2331}
2332
05b308e1 2333/**
bdd4ddde 2334 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2335 * @ap: ATA channel to manipulate
2336 *
2337 * Part of this is taken from __sata_phy_reset and modified to
2338 * not sleep since this routine gets called from interrupt level.
2339 *
2340 * LOCKING:
2341 * Inherited from caller. This is coded to safe to call at
2342 * interrupt level, i.e. it does not sleep.
31961943 2343 */
bdd4ddde
JG
2344static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2345 unsigned long deadline)
20f733e7 2346{
095fec88 2347 struct mv_port_priv *pp = ap->private_data;
cca3974e 2348 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2349 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2350 int retry = 5;
2351 u32 sstatus;
20f733e7
BR
2352
2353 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2354
da3dbb17
TH
2355#ifdef DEBUG
2356 {
2357 u32 sstatus, serror, scontrol;
2358
2359 mv_scr_read(ap, SCR_STATUS, &sstatus);
2360 mv_scr_read(ap, SCR_ERROR, &serror);
2361 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2362 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2363 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2364 }
2365#endif
20f733e7 2366
22374677
JG
2367 /* Issue COMRESET via SControl */
2368comreset_retry:
936fd732 2369 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2370 msleep(1);
22374677 2371
936fd732 2372 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2373 msleep(20);
22374677 2374
31961943 2375 do {
936fd732 2376 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2377 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2378 break;
22374677 2379
bdd4ddde 2380 msleep(1);
c5d3e45a 2381 } while (time_before(jiffies, deadline));
20f733e7 2382
22374677 2383 /* work around errata */
ee9ccdf7 2384 if (IS_GEN_II(hpriv) &&
22374677
JG
2385 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2386 (retry-- > 0))
2387 goto comreset_retry;
095fec88 2388
da3dbb17
TH
2389#ifdef DEBUG
2390 {
2391 u32 sstatus, serror, scontrol;
2392
2393 mv_scr_read(ap, SCR_STATUS, &sstatus);
2394 mv_scr_read(ap, SCR_ERROR, &serror);
2395 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2396 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2397 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2398 }
2399#endif
31961943 2400
936fd732 2401 if (ata_link_offline(&ap->link)) {
bdd4ddde 2402 *class = ATA_DEV_NONE;
20f733e7
BR
2403 return;
2404 }
2405
22374677
JG
2406 /* even after SStatus reflects that device is ready,
2407 * it seems to take a while for link to be fully
2408 * established (and thus Status no longer 0x80/0x7F),
2409 * so we poll a bit for that, here.
2410 */
2411 retry = 20;
2412 while (1) {
2413 u8 drv_stat = ata_check_status(ap);
2414 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2415 break;
bdd4ddde 2416 msleep(500);
22374677
JG
2417 if (retry-- <= 0)
2418 break;
bdd4ddde
JG
2419 if (time_after(jiffies, deadline))
2420 break;
22374677
JG
2421 }
2422
bdd4ddde
JG
2423 /* FIXME: if we passed the deadline, the following
2424 * code probably produces an invalid result
2425 */
20f733e7 2426
bdd4ddde 2427 /* finally, read device signature from TF registers */
3f19859e 2428 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2429
2430 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2431
bdd4ddde 2432 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2433
bca1c4eb 2434 VPRINTK("EXIT\n");
20f733e7
BR
2435}
2436
cc0680a5 2437static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2438{
cc0680a5 2439 struct ata_port *ap = link->ap;
bdd4ddde 2440 struct mv_port_priv *pp = ap->private_data;
0ea9e179 2441
cf480626 2442 mv_stop_dma(ap);
bdd4ddde 2443
cf480626 2444 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
bdd4ddde 2445 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
bdd4ddde 2446
cf480626 2447 return 0;
22374677
JG
2448}
2449
cc0680a5 2450static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2451 unsigned long deadline)
31961943 2452{
cc0680a5 2453 struct ata_port *ap = link->ap;
bdd4ddde 2454 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2455 void __iomem *mmio = hpriv->base;
31961943 2456
bdd4ddde 2457 mv_stop_dma(ap);
31961943 2458
bdd4ddde 2459 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2460
bdd4ddde
JG
2461 mv_phy_reset(ap, class, deadline);
2462
2463 return 0;
2464}
2465
cc0680a5 2466static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2467{
cc0680a5 2468 struct ata_port *ap = link->ap;
bdd4ddde
JG
2469 u32 serr;
2470
2471 /* print link status */
cc0680a5 2472 sata_print_link_status(link);
31961943 2473
bdd4ddde 2474 /* clear SError */
cc0680a5
TH
2475 sata_scr_read(link, SCR_ERROR, &serr);
2476 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2477
2478 /* bail out if no device is present */
2479 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2480 DPRINTK("EXIT, no device\n");
2481 return;
9b358e30 2482 }
bdd4ddde
JG
2483
2484 /* set up device control */
2485 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2486}
2487
2488static void mv_error_handler(struct ata_port *ap)
2489{
2490 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2491 mv_hardreset, mv_postreset);
2492}
2493
bdd4ddde
JG
2494static void mv_eh_freeze(struct ata_port *ap)
2495{
f351b2d6 2496 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2497 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2498 u32 tmp, mask;
2499 unsigned int shift;
2500
2501 /* FIXME: handle coalescing completion events properly */
2502
2503 shift = ap->port_no * 2;
2504 if (hc > 0)
2505 shift++;
2506
2507 mask = 0x3 << shift;
2508
2509 /* disable assertion of portN err, done events */
f351b2d6
SB
2510 tmp = readl(hpriv->main_mask_reg_addr);
2511 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2512}
2513
2514static void mv_eh_thaw(struct ata_port *ap)
2515{
f351b2d6
SB
2516 struct mv_host_priv *hpriv = ap->host->private_data;
2517 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2518 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2519 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2520 void __iomem *port_mmio = mv_ap_base(ap);
2521 u32 tmp, mask, hc_irq_cause;
2522 unsigned int shift, hc_port_no = ap->port_no;
2523
2524 /* FIXME: handle coalescing completion events properly */
2525
2526 shift = ap->port_no * 2;
2527 if (hc > 0) {
2528 shift++;
2529 hc_port_no -= 4;
2530 }
2531
2532 mask = 0x3 << shift;
2533
2534 /* clear EDMA errors on this port */
2535 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2536
2537 /* clear pending irq events */
2538 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2539 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2540 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2541 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2542
2543 /* enable assertion of portN err, done events */
f351b2d6
SB
2544 tmp = readl(hpriv->main_mask_reg_addr);
2545 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2546}
2547
05b308e1
BR
2548/**
2549 * mv_port_init - Perform some early initialization on a single port.
2550 * @port: libata data structure storing shadow register addresses
2551 * @port_mmio: base address of the port
2552 *
2553 * Initialize shadow register mmio addresses, clear outstanding
2554 * interrupts on the port, and unmask interrupts for the future
2555 * start of the port.
2556 *
2557 * LOCKING:
2558 * Inherited from caller.
2559 */
31961943 2560static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2561{
0d5ff566 2562 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2563 unsigned serr_ofs;
2564
8b260248 2565 /* PIO related setup
31961943
BR
2566 */
2567 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2568 port->error_addr =
31961943
BR
2569 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2570 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2571 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2572 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2573 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2574 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2575 port->status_addr =
31961943
BR
2576 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2577 /* special case: control/altstatus doesn't have ATA_REG_ address */
2578 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2579
2580 /* unused: */
8d9db2d2 2581 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2582
31961943
BR
2583 /* Clear any currently outstanding port interrupt conditions */
2584 serr_ofs = mv_scr_offset(SCR_ERROR);
2585 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2586 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2587
646a4da5
ML
2588 /* unmask all non-transient EDMA error interrupts */
2589 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2590
8b260248 2591 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2592 readl(port_mmio + EDMA_CFG_OFS),
2593 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2594 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2595}
2596
4447d351 2597static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2598{
4447d351
TH
2599 struct pci_dev *pdev = to_pci_dev(host->dev);
2600 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2601 u32 hp_flags = hpriv->hp_flags;
2602
5796d1c4 2603 switch (board_idx) {
47c2b677
JG
2604 case chip_5080:
2605 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2606 hp_flags |= MV_HP_GEN_I;
47c2b677 2607
44c10138 2608 switch (pdev->revision) {
47c2b677
JG
2609 case 0x1:
2610 hp_flags |= MV_HP_ERRATA_50XXB0;
2611 break;
2612 case 0x3:
2613 hp_flags |= MV_HP_ERRATA_50XXB2;
2614 break;
2615 default:
2616 dev_printk(KERN_WARNING, &pdev->dev,
2617 "Applying 50XXB2 workarounds to unknown rev\n");
2618 hp_flags |= MV_HP_ERRATA_50XXB2;
2619 break;
2620 }
2621 break;
2622
bca1c4eb
JG
2623 case chip_504x:
2624 case chip_508x:
47c2b677 2625 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2626 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2627
44c10138 2628 switch (pdev->revision) {
47c2b677
JG
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_50XXB0;
2631 break;
2632 case 0x3:
2633 hp_flags |= MV_HP_ERRATA_50XXB2;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying B2 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_50XXB2;
2639 break;
bca1c4eb
JG
2640 }
2641 break;
2642
2643 case chip_604x:
2644 case chip_608x:
47c2b677 2645 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2646 hp_flags |= MV_HP_GEN_II;
47c2b677 2647
44c10138 2648 switch (pdev->revision) {
47c2b677
JG
2649 case 0x7:
2650 hp_flags |= MV_HP_ERRATA_60X1B2;
2651 break;
2652 case 0x9:
2653 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2654 break;
2655 default:
2656 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2657 "Applying B2 workarounds to unknown rev\n");
2658 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2659 break;
2660 }
2661 break;
2662
e4e7b892 2663 case chip_7042:
02a121da 2664 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2665 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2666 (pdev->device == 0x2300 || pdev->device == 0x2310))
2667 {
4e520033
ML
2668 /*
2669 * Highpoint RocketRAID PCIe 23xx series cards:
2670 *
2671 * Unconfigured drives are treated as "Legacy"
2672 * by the BIOS, and it overwrites sector 8 with
2673 * a "Lgcy" metadata block prior to Linux boot.
2674 *
2675 * Configured drives (RAID or JBOD) leave sector 8
2676 * alone, but instead overwrite a high numbered
2677 * sector for the RAID metadata. This sector can
2678 * be determined exactly, by truncating the physical
2679 * drive capacity to a nice even GB value.
2680 *
2681 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2682 *
2683 * Warn the user, lest they think we're just buggy.
2684 */
2685 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2686 " BIOS CORRUPTS DATA on all attached drives,"
2687 " regardless of if/how they are configured."
2688 " BEWARE!\n");
2689 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2690 " use sectors 8-9 on \"Legacy\" drives,"
2691 " and avoid the final two gigabytes on"
2692 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2693 }
e4e7b892
JG
2694 case chip_6042:
2695 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2696 hp_flags |= MV_HP_GEN_IIE;
2697
44c10138 2698 switch (pdev->revision) {
e4e7b892
JG
2699 case 0x0:
2700 hp_flags |= MV_HP_ERRATA_XX42A0;
2701 break;
2702 case 0x1:
2703 hp_flags |= MV_HP_ERRATA_60X1C0;
2704 break;
2705 default:
2706 dev_printk(KERN_WARNING, &pdev->dev,
2707 "Applying 60X1C0 workarounds to unknown rev\n");
2708 hp_flags |= MV_HP_ERRATA_60X1C0;
2709 break;
2710 }
2711 break;
f351b2d6
SB
2712 case chip_soc:
2713 hpriv->ops = &mv_soc_ops;
2714 hp_flags |= MV_HP_ERRATA_60X1C0;
2715 break;
e4e7b892 2716
bca1c4eb 2717 default:
f351b2d6 2718 dev_printk(KERN_ERR, host->dev,
5796d1c4 2719 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2720 return 1;
2721 }
2722
2723 hpriv->hp_flags = hp_flags;
02a121da
ML
2724 if (hp_flags & MV_HP_PCIE) {
2725 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2726 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2727 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2728 } else {
2729 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2730 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2731 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2732 }
bca1c4eb
JG
2733
2734 return 0;
2735}
2736
05b308e1 2737/**
47c2b677 2738 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2739 * @host: ATA host to initialize
2740 * @board_idx: controller index
05b308e1
BR
2741 *
2742 * If possible, do an early global reset of the host. Then do
2743 * our port init and clear/unmask all/relevant host interrupts.
2744 *
2745 * LOCKING:
2746 * Inherited from caller.
2747 */
4447d351 2748static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2749{
2750 int rc = 0, n_hc, port, hc;
4447d351 2751 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2752 void __iomem *mmio = hpriv->base;
47c2b677 2753
4447d351 2754 rc = mv_chip_id(host, board_idx);
bca1c4eb 2755 if (rc)
f351b2d6
SB
2756 goto done;
2757
2758 if (HAS_PCI(host)) {
2759 hpriv->main_cause_reg_addr = hpriv->base +
2760 HC_MAIN_IRQ_CAUSE_OFS;
2761 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2762 } else {
2763 hpriv->main_cause_reg_addr = hpriv->base +
2764 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2765 hpriv->main_mask_reg_addr = hpriv->base +
2766 HC_SOC_MAIN_IRQ_MASK_OFS;
2767 }
2768 /* global interrupt mask */
2769 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2770
4447d351 2771 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2772
4447d351 2773 for (port = 0; port < host->n_ports; port++)
47c2b677 2774 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2775
c9d39130 2776 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2777 if (rc)
20f733e7 2778 goto done;
20f733e7 2779
522479fb 2780 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2781 hpriv->ops->reset_bus(host, mmio);
47c2b677 2782 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2783
4447d351 2784 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2785 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2786 void __iomem *port_mmio = mv_port_base(mmio, port);
2787
2a47ce06 2788 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2789 ifctl |= (1 << 7); /* enable gen2i speed */
2790 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2791 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2792 }
2793
c9d39130 2794 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2795 }
2796
4447d351 2797 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2798 struct ata_port *ap = host->ports[port];
2a47ce06 2799 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2800
2801 mv_port_init(&ap->ioaddr, port_mmio);
2802
7bb3c529 2803#ifdef CONFIG_PCI
f351b2d6
SB
2804 if (HAS_PCI(host)) {
2805 unsigned int offset = port_mmio - mmio;
2806 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2807 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2808 }
7bb3c529 2809#endif
20f733e7
BR
2810 }
2811
2812 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2813 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2814
2815 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2816 "(before clear)=0x%08x\n", hc,
2817 readl(hc_mmio + HC_CFG_OFS),
2818 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2819
2820 /* Clear any currently outstanding hc interrupt conditions */
2821 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2822 }
2823
f351b2d6
SB
2824 if (HAS_PCI(host)) {
2825 /* Clear any currently outstanding host interrupt conditions */
2826 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2827
f351b2d6
SB
2828 /* and unmask interrupt generation for host regs */
2829 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2830 if (IS_GEN_I(hpriv))
2831 writelfl(~HC_MAIN_MASKED_IRQS_5,
2832 hpriv->main_mask_reg_addr);
2833 else
2834 writelfl(~HC_MAIN_MASKED_IRQS,
2835 hpriv->main_mask_reg_addr);
2836
2837 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2838 "PCI int cause/mask=0x%08x/0x%08x\n",
2839 readl(hpriv->main_cause_reg_addr),
2840 readl(hpriv->main_mask_reg_addr),
2841 readl(mmio + hpriv->irq_cause_ofs),
2842 readl(mmio + hpriv->irq_mask_ofs));
2843 } else {
2844 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2845 hpriv->main_mask_reg_addr);
2846 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2847 readl(hpriv->main_cause_reg_addr),
2848 readl(hpriv->main_mask_reg_addr));
2849 }
2850done:
2851 return rc;
2852}
fb621e2f 2853
fbf14e2f
BB
2854static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2855{
2856 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2857 MV_CRQB_Q_SZ, 0);
2858 if (!hpriv->crqb_pool)
2859 return -ENOMEM;
2860
2861 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2862 MV_CRPB_Q_SZ, 0);
2863 if (!hpriv->crpb_pool)
2864 return -ENOMEM;
2865
2866 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2867 MV_SG_TBL_SZ, 0);
2868 if (!hpriv->sg_tbl_pool)
2869 return -ENOMEM;
2870
2871 return 0;
2872}
2873
f351b2d6
SB
2874/**
2875 * mv_platform_probe - handle a positive probe of an soc Marvell
2876 * host
2877 * @pdev: platform device found
2878 *
2879 * LOCKING:
2880 * Inherited from caller.
2881 */
2882static int mv_platform_probe(struct platform_device *pdev)
2883{
2884 static int printed_version;
2885 const struct mv_sata_platform_data *mv_platform_data;
2886 const struct ata_port_info *ppi[] =
2887 { &mv_port_info[chip_soc], NULL };
2888 struct ata_host *host;
2889 struct mv_host_priv *hpriv;
2890 struct resource *res;
2891 int n_ports, rc;
20f733e7 2892
f351b2d6
SB
2893 if (!printed_version++)
2894 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2895
f351b2d6
SB
2896 /*
2897 * Simple resource validation ..
2898 */
2899 if (unlikely(pdev->num_resources != 2)) {
2900 dev_err(&pdev->dev, "invalid number of resources\n");
2901 return -EINVAL;
2902 }
2903
2904 /*
2905 * Get the register base first
2906 */
2907 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2908 if (res == NULL)
2909 return -EINVAL;
2910
2911 /* allocate host */
2912 mv_platform_data = pdev->dev.platform_data;
2913 n_ports = mv_platform_data->n_ports;
2914
2915 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2916 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2917
2918 if (!host || !hpriv)
2919 return -ENOMEM;
2920 host->private_data = hpriv;
2921 hpriv->n_ports = n_ports;
2922
2923 host->iomap = NULL;
f1cb0ea1
SB
2924 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2925 res->end - res->start + 1);
f351b2d6
SB
2926 hpriv->base -= MV_SATAHC0_REG_BASE;
2927
fbf14e2f
BB
2928 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2929 if (rc)
2930 return rc;
2931
f351b2d6
SB
2932 /* initialize adapter */
2933 rc = mv_init_host(host, chip_soc);
2934 if (rc)
2935 return rc;
2936
2937 dev_printk(KERN_INFO, &pdev->dev,
2938 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2939 host->n_ports);
2940
2941 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2942 IRQF_SHARED, &mv6_sht);
2943}
2944
2945/*
2946 *
2947 * mv_platform_remove - unplug a platform interface
2948 * @pdev: platform device
2949 *
2950 * A platform bus SATA device has been unplugged. Perform the needed
2951 * cleanup. Also called on module unload for any active devices.
2952 */
2953static int __devexit mv_platform_remove(struct platform_device *pdev)
2954{
2955 struct device *dev = &pdev->dev;
2956 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2957
2958 ata_host_detach(host);
f351b2d6 2959 return 0;
20f733e7
BR
2960}
2961
f351b2d6
SB
2962static struct platform_driver mv_platform_driver = {
2963 .probe = mv_platform_probe,
2964 .remove = __devexit_p(mv_platform_remove),
2965 .driver = {
2966 .name = DRV_NAME,
2967 .owner = THIS_MODULE,
2968 },
2969};
2970
2971
7bb3c529 2972#ifdef CONFIG_PCI
f351b2d6
SB
2973static int mv_pci_init_one(struct pci_dev *pdev,
2974 const struct pci_device_id *ent);
2975
7bb3c529
SB
2976
2977static struct pci_driver mv_pci_driver = {
2978 .name = DRV_NAME,
2979 .id_table = mv_pci_tbl,
f351b2d6 2980 .probe = mv_pci_init_one,
7bb3c529
SB
2981 .remove = ata_pci_remove_one,
2982};
2983
2984/*
2985 * module options
2986 */
2987static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2988
2989
2990/* move to PCI layer or libata core? */
2991static int pci_go_64(struct pci_dev *pdev)
2992{
2993 int rc;
2994
2995 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2996 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2997 if (rc) {
2998 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2999 if (rc) {
3000 dev_printk(KERN_ERR, &pdev->dev,
3001 "64-bit DMA enable failed\n");
3002 return rc;
3003 }
3004 }
3005 } else {
3006 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3007 if (rc) {
3008 dev_printk(KERN_ERR, &pdev->dev,
3009 "32-bit DMA enable failed\n");
3010 return rc;
3011 }
3012 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3013 if (rc) {
3014 dev_printk(KERN_ERR, &pdev->dev,
3015 "32-bit consistent DMA enable failed\n");
3016 return rc;
3017 }
3018 }
3019
3020 return rc;
3021}
3022
05b308e1
BR
3023/**
3024 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 3025 * @host: ATA host to print info about
05b308e1
BR
3026 *
3027 * FIXME: complete this.
3028 *
3029 * LOCKING:
3030 * Inherited from caller.
3031 */
4447d351 3032static void mv_print_info(struct ata_host *host)
31961943 3033{
4447d351
TH
3034 struct pci_dev *pdev = to_pci_dev(host->dev);
3035 struct mv_host_priv *hpriv = host->private_data;
44c10138 3036 u8 scc;
c1e4fe71 3037 const char *scc_s, *gen;
31961943
BR
3038
3039 /* Use this to determine the HW stepping of the chip so we know
3040 * what errata to workaround
3041 */
31961943
BR
3042 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3043 if (scc == 0)
3044 scc_s = "SCSI";
3045 else if (scc == 0x01)
3046 scc_s = "RAID";
3047 else
c1e4fe71
JG
3048 scc_s = "?";
3049
3050 if (IS_GEN_I(hpriv))
3051 gen = "I";
3052 else if (IS_GEN_II(hpriv))
3053 gen = "II";
3054 else if (IS_GEN_IIE(hpriv))
3055 gen = "IIE";
3056 else
3057 gen = "?";
31961943 3058
a9524a76 3059 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3060 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3061 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3062 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3063}
3064
05b308e1 3065/**
f351b2d6 3066 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3067 * @pdev: PCI device found
3068 * @ent: PCI device ID entry for the matched host
3069 *
3070 * LOCKING:
3071 * Inherited from caller.
3072 */
f351b2d6
SB
3073static int mv_pci_init_one(struct pci_dev *pdev,
3074 const struct pci_device_id *ent)
20f733e7 3075{
2dcb407e 3076 static int printed_version;
20f733e7 3077 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3078 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3079 struct ata_host *host;
3080 struct mv_host_priv *hpriv;
3081 int n_ports, rc;
20f733e7 3082
a9524a76
JG
3083 if (!printed_version++)
3084 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3085
4447d351
TH
3086 /* allocate host */
3087 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3088
3089 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3090 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3091 if (!host || !hpriv)
3092 return -ENOMEM;
3093 host->private_data = hpriv;
f351b2d6 3094 hpriv->n_ports = n_ports;
4447d351
TH
3095
3096 /* acquire resources */
24dc5f33
TH
3097 rc = pcim_enable_device(pdev);
3098 if (rc)
20f733e7 3099 return rc;
20f733e7 3100
0d5ff566
TH
3101 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3102 if (rc == -EBUSY)
24dc5f33 3103 pcim_pin_device(pdev);
0d5ff566 3104 if (rc)
24dc5f33 3105 return rc;
4447d351 3106 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3107 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3108
d88184fb
JG
3109 rc = pci_go_64(pdev);
3110 if (rc)
3111 return rc;
3112
da2fa9ba
ML
3113 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3114 if (rc)
3115 return rc;
3116
20f733e7 3117 /* initialize adapter */
4447d351 3118 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3119 if (rc)
3120 return rc;
20f733e7 3121
31961943 3122 /* Enable interrupts */
6a59dcf8 3123 if (msi && pci_enable_msi(pdev))
31961943 3124 pci_intx(pdev, 1);
20f733e7 3125
31961943 3126 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3127 mv_print_info(host);
20f733e7 3128
4447d351 3129 pci_set_master(pdev);
ea8b4db9 3130 pci_try_set_mwi(pdev);
4447d351 3131 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3132 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3133}
7bb3c529 3134#endif
20f733e7 3135
f351b2d6
SB
3136static int mv_platform_probe(struct platform_device *pdev);
3137static int __devexit mv_platform_remove(struct platform_device *pdev);
3138
20f733e7
BR
3139static int __init mv_init(void)
3140{
7bb3c529
SB
3141 int rc = -ENODEV;
3142#ifdef CONFIG_PCI
3143 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3144 if (rc < 0)
3145 return rc;
3146#endif
3147 rc = platform_driver_register(&mv_platform_driver);
3148
3149#ifdef CONFIG_PCI
3150 if (rc < 0)
3151 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3152#endif
3153 return rc;
20f733e7
BR
3154}
3155
3156static void __exit mv_exit(void)
3157{
7bb3c529 3158#ifdef CONFIG_PCI
20f733e7 3159 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3160#endif
f351b2d6 3161 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3162}
3163
3164MODULE_AUTHOR("Brett Russ");
3165MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3166MODULE_LICENSE("GPL");
3167MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3168MODULE_VERSION(DRV_VERSION);
2e7e1214 3169MODULE_ALIAS("platform:sata_mv");
20f733e7 3170
7bb3c529 3171#ifdef CONFIG_PCI
ddef9bb3
JG
3172module_param(msi, int, 0444);
3173MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3174#endif
ddef9bb3 3175
20f733e7
BR
3176module_init(mv_init);
3177module_exit(mv_exit);