libata: implement and use ata_port_desc() to report port configuration
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
fb621e2f
JG
191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
20f733e7
BR
193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
31961943 198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
31961943
BR
203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
47c2b677 209 PHY_MODE3 = 0x310,
bca1c4eb
JG
210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
c9d39130
JG
212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
31961943
BR
221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
6c1153e0 257 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
6c1153e0 272 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
20f733e7 276
31961943
BR
277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
0ea9e179
JG
288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 292
c9d39130 293 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 294 EDMA_ARB_CFG = 0x38,
bca1c4eb 295
31961943
BR
296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 302 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
20f733e7 306
31961943 307 /* Port private flags (pp_flags) */
0ea9e179
JG
308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
310};
311
ee9ccdf7
JG
312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 315
095fec88 316enum {
baf14aa1
JG
317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
095fec88 321
0ea9e179
JG
322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
095fec88
JG
325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
0ea9e179 327 /* ditto, for response queue */
095fec88
JG
328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
522479fb
JG
331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
e4e7b892
JG
337 chip_6042,
338 chip_7042,
522479fb
JG
339};
340
31961943
BR
341/* Command ReQuest Block: 32B */
342struct mv_crqb {
e1469874
ML
343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
31961943 347};
20f733e7 348
e4e7b892 349struct mv_crqb_iie {
e1469874
ML
350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
e4e7b892
JG
355};
356
31961943
BR
357/* Command ResPonse Block: 8B */
358struct mv_crpb {
e1469874
ML
359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
20f733e7
BR
362};
363
31961943
BR
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
e1469874
ML
366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
31961943 370};
20f733e7 371
31961943
BR
372struct mv_port_priv {
373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
31961943
BR
383 u32 pp_flags;
384};
385
bca1c4eb
JG
386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
47c2b677
JG
391struct mv_host_priv;
392struct mv_hw_ops {
2a47ce06
JG
393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
47c2b677
JG
395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
c9d39130
JG
398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
522479fb
JG
400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
402};
403
31961943
BR
404struct mv_host_priv {
405 u32 hp_flags;
bca1c4eb 406 struct mv_port_signal signal[8];
47c2b677 407 const struct mv_hw_ops *ops;
20f733e7
BR
408};
409
410static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
6c08772e 424static int mv_slave_config(struct scsi_device *sdev);
20f733e7
BR
425static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426
2a47ce06
JG
427static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
47c2b677
JG
429static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
c9d39130
JG
432static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
522479fb
JG
434static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 436
2a47ce06
JG
437static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
47c2b677
JG
439static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
c9d39130
JG
442static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
522479fb
JG
444static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
446static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
47c2b677 448
c5d3e45a
JG
449static struct scsi_host_template mv5_sht = {
450 .module = THIS_MODULE,
451 .name = DRV_NAME,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
baf14aa1 456 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
459 .use_clustering = 1,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
6c08772e 462 .slave_configure = mv_slave_config,
c5d3e45a
JG
463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
465};
466
467static struct scsi_host_template mv6_sht = {
20f733e7
BR
468 .module = THIS_MODULE,
469 .name = DRV_NAME,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 472 .can_queue = ATA_DEF_QUEUE,
20f733e7 473 .this_id = ATA_SHT_THIS_ID,
baf14aa1 474 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
d88184fb 477 .use_clustering = 1,
20f733e7
BR
478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
6c08772e 480 .slave_configure = mv_slave_config,
ccf68c34 481 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 482 .bios_param = ata_std_bios_param,
20f733e7
BR
483};
484
c9d39130 485static const struct ata_port_operations mv5_ops = {
c9d39130
JG
486 .tf_load = ata_tf_load,
487 .tf_read = ata_tf_read,
488 .check_status = ata_check_status,
489 .exec_command = ata_exec_command,
490 .dev_select = ata_std_dev_select,
491
cffacd85 492 .cable_detect = ata_cable_sata,
c9d39130
JG
493
494 .qc_prep = mv_qc_prep,
495 .qc_issue = mv_qc_issue,
0d5ff566 496 .data_xfer = ata_data_xfer,
c9d39130 497
c9d39130 498 .irq_clear = mv_irq_clear,
246ce3b6 499 .irq_on = ata_irq_on,
c9d39130 500
bdd4ddde
JG
501 .error_handler = mv_error_handler,
502 .post_internal_cmd = mv_post_int_cmd,
503 .freeze = mv_eh_freeze,
504 .thaw = mv_eh_thaw,
505
c9d39130
JG
506 .scr_read = mv5_scr_read,
507 .scr_write = mv5_scr_write,
508
509 .port_start = mv_port_start,
510 .port_stop = mv_port_stop,
c9d39130
JG
511};
512
513static const struct ata_port_operations mv6_ops = {
20f733e7
BR
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
cffacd85 520 .cable_detect = ata_cable_sata,
20f733e7 521
31961943
BR
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
0d5ff566 524 .data_xfer = ata_data_xfer,
20f733e7 525
20f733e7 526 .irq_clear = mv_irq_clear,
246ce3b6 527 .irq_on = ata_irq_on,
20f733e7 528
bdd4ddde
JG
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
20f733e7
BR
534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
536
31961943
BR
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
20f733e7
BR
539};
540
e4e7b892 541static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
cffacd85 548 .cable_detect = ata_cable_sata,
e4e7b892
JG
549
550 .qc_prep = mv_qc_prep_iie,
551 .qc_issue = mv_qc_issue,
0d5ff566 552 .data_xfer = ata_data_xfer,
e4e7b892 553
e4e7b892 554 .irq_clear = mv_irq_clear,
246ce3b6 555 .irq_on = ata_irq_on,
e4e7b892 556
bdd4ddde
JG
557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
560 .thaw = mv_eh_thaw,
561
e4e7b892
JG
562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
564
565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
e4e7b892
JG
567};
568
98ac62de 569static const struct ata_port_info mv_port_info[] = {
20f733e7 570 { /* chip_504x */
cca3974e 571 .flags = MV_COMMON_FLAGS,
31961943 572 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 573 .udma_mask = ATA_UDMA6,
c9d39130 574 .port_ops = &mv5_ops,
20f733e7
BR
575 },
576 { /* chip_508x */
c5d3e45a 577 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 578 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 579 .udma_mask = ATA_UDMA6,
c9d39130 580 .port_ops = &mv5_ops,
20f733e7 581 },
47c2b677 582 { /* chip_5080 */
c5d3e45a 583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 584 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 585 .udma_mask = ATA_UDMA6,
c9d39130 586 .port_ops = &mv5_ops,
47c2b677 587 },
20f733e7 588 { /* chip_604x */
c5d3e45a 589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 590 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 591 .udma_mask = ATA_UDMA6,
c9d39130 592 .port_ops = &mv6_ops,
20f733e7
BR
593 },
594 { /* chip_608x */
c5d3e45a
JG
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 MV_FLAG_DUAL_HC,
31961943 597 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 598 .udma_mask = ATA_UDMA6,
c9d39130 599 .port_ops = &mv6_ops,
20f733e7 600 },
e4e7b892 601 { /* chip_6042 */
c5d3e45a 602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
e4e7b892
JG
605 .port_ops = &mv_iie_ops,
606 },
607 { /* chip_7042 */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
e4e7b892
JG
611 .port_ops = &mv_iie_ops,
612 },
20f733e7
BR
613};
614
3b7d697d 615static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
616 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
617 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
618 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
619 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
620 /* RocketRAID 1740/174x have different identifiers */
621 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
622 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
623
624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
629
630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631
d9f9c6bc
FA
632 /* Adaptec 1430SA */
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634
e93f09dc
OJ
635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636
6a3d586d
MT
637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639
2d2744fc 640 { } /* terminate list */
20f733e7
BR
641};
642
643static struct pci_driver mv_pci_driver = {
644 .name = DRV_NAME,
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
648};
649
47c2b677
JG
650static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
522479fb
JG
655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
47c2b677
JG
657};
658
659static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
522479fb
JG
664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
666};
667
ddef9bb3
JG
668/*
669 * module options
670 */
671static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
672
673
d88184fb
JG
674/* move to PCI layer or libata core? */
675static int pci_go_64(struct pci_dev *pdev)
676{
677 int rc;
678
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 if (rc) {
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 if (rc) {
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
686 return rc;
687 }
688 }
689 } else {
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
694 return rc;
695 }
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 if (rc) {
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
700 return rc;
701 }
702 }
703
704 return rc;
705}
706
20f733e7
BR
707/*
708 * Functions
709 */
710
711static inline void writelfl(unsigned long data, void __iomem *addr)
712{
713 writel(data, addr);
714 (void) readl(addr); /* flush to avoid PCI posted write */
715}
716
20f733e7
BR
717static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718{
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720}
721
c9d39130
JG
722static inline unsigned int mv_hc_from_port(unsigned int port)
723{
724 return port >> MV_PORT_HC_SHIFT;
725}
726
727static inline unsigned int mv_hardport_from_port(unsigned int port)
728{
729 return port & MV_PORT_MASK;
730}
731
732static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 unsigned int port)
734{
735 return mv_hc_base(base, mv_hc_from_port(port));
736}
737
20f733e7
BR
738static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739{
c9d39130 740 return mv_hc_base_from_port(base, port) +
8b260248 741 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
743}
744
745static inline void __iomem *mv_ap_base(struct ata_port *ap)
746{
0d5ff566 747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
748}
749
cca3974e 750static inline int mv_get_hc_count(unsigned long port_flags)
31961943 751{
cca3974e 752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
753}
754
755static void mv_irq_clear(struct ata_port *ap)
20f733e7 756{
20f733e7
BR
757}
758
6c08772e
JG
759static int mv_slave_config(struct scsi_device *sdev)
760{
761 int rc = ata_scsi_slave_config(sdev);
762 if (rc)
763 return rc;
764
765 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
766
767 return 0; /* scsi layer doesn't check return value, sigh */
768}
769
c5d3e45a
JG
770static void mv_set_edma_ptrs(void __iomem *port_mmio,
771 struct mv_host_priv *hpriv,
772 struct mv_port_priv *pp)
773{
bdd4ddde
JG
774 u32 index;
775
c5d3e45a
JG
776 /*
777 * initialize request queue
778 */
bdd4ddde
JG
779 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
780
c5d3e45a
JG
781 WARN_ON(pp->crqb_dma & 0x3ff);
782 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 783 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
784 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
785
786 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 787 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
788 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
789 else
bdd4ddde 790 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
791
792 /*
793 * initialize response queue
794 */
bdd4ddde
JG
795 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
796
c5d3e45a
JG
797 WARN_ON(pp->crpb_dma & 0xff);
798 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
799
800 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 801 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
802 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
803 else
bdd4ddde 804 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 805
bdd4ddde 806 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 807 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
808}
809
05b308e1
BR
810/**
811 * mv_start_dma - Enable eDMA engine
812 * @base: port base address
813 * @pp: port private data
814 *
beec7dbc
TH
815 * Verify the local cache of the eDMA state is accurate with a
816 * WARN_ON.
05b308e1
BR
817 *
818 * LOCKING:
819 * Inherited from caller.
820 */
c5d3e45a
JG
821static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
822 struct mv_port_priv *pp)
20f733e7 823{
c5d3e45a 824 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
825 /* clear EDMA event indicators, if any */
826 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
827
828 mv_set_edma_ptrs(base, hpriv, pp);
829
afb0edd9
BR
830 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
831 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
832 }
beec7dbc 833 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
834}
835
05b308e1 836/**
0ea9e179 837 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
838 * @ap: ATA channel to manipulate
839 *
beec7dbc
TH
840 * Verify the local cache of the eDMA state is accurate with a
841 * WARN_ON.
05b308e1
BR
842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
0ea9e179 846static int __mv_stop_dma(struct ata_port *ap)
20f733e7 847{
31961943
BR
848 void __iomem *port_mmio = mv_ap_base(ap);
849 struct mv_port_priv *pp = ap->private_data;
31961943 850 u32 reg;
c5d3e45a 851 int i, err = 0;
31961943 852
4537deb5 853 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 854 /* Disable EDMA if active. The disable bit auto clears.
31961943 855 */
31961943
BR
856 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
857 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 858 } else {
beec7dbc 859 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 860 }
8b260248 861
31961943
BR
862 /* now properly wait for the eDMA to stop */
863 for (i = 1000; i > 0; i--) {
864 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 865 if (!(reg & EDMA_EN))
31961943 866 break;
4537deb5 867
31961943
BR
868 udelay(100);
869 }
870
c5d3e45a 871 if (reg & EDMA_EN) {
f15a1daf 872 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 873 err = -EIO;
31961943 874 }
c5d3e45a
JG
875
876 return err;
20f733e7
BR
877}
878
0ea9e179
JG
879static int mv_stop_dma(struct ata_port *ap)
880{
881 unsigned long flags;
882 int rc;
883
884 spin_lock_irqsave(&ap->host->lock, flags);
885 rc = __mv_stop_dma(ap);
886 spin_unlock_irqrestore(&ap->host->lock, flags);
887
888 return rc;
889}
890
8a70f8dc 891#ifdef ATA_DEBUG
31961943 892static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 893{
31961943
BR
894 int b, w;
895 for (b = 0; b < bytes; ) {
896 DPRINTK("%p: ", start + b);
897 for (w = 0; b < bytes && w < 4; w++) {
898 printk("%08x ",readl(start + b));
899 b += sizeof(u32);
900 }
901 printk("\n");
902 }
31961943 903}
8a70f8dc
JG
904#endif
905
31961943
BR
906static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
907{
908#ifdef ATA_DEBUG
909 int b, w;
910 u32 dw;
911 for (b = 0; b < bytes; ) {
912 DPRINTK("%02x: ", b);
913 for (w = 0; b < bytes && w < 4; w++) {
914 (void) pci_read_config_dword(pdev,b,&dw);
915 printk("%08x ",dw);
916 b += sizeof(u32);
917 }
918 printk("\n");
919 }
920#endif
921}
922static void mv_dump_all_regs(void __iomem *mmio_base, int port,
923 struct pci_dev *pdev)
924{
925#ifdef ATA_DEBUG
8b260248 926 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
927 port >> MV_PORT_HC_SHIFT);
928 void __iomem *port_base;
929 int start_port, num_ports, p, start_hc, num_hcs, hc;
930
931 if (0 > port) {
932 start_hc = start_port = 0;
933 num_ports = 8; /* shld be benign for 4 port devs */
934 num_hcs = 2;
935 } else {
936 start_hc = port >> MV_PORT_HC_SHIFT;
937 start_port = port;
938 num_ports = num_hcs = 1;
939 }
8b260248 940 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
941 num_ports > 1 ? num_ports - 1 : start_port);
942
943 if (NULL != pdev) {
944 DPRINTK("PCI config space regs:\n");
945 mv_dump_pci_cfg(pdev, 0x68);
946 }
947 DPRINTK("PCI regs:\n");
948 mv_dump_mem(mmio_base+0xc00, 0x3c);
949 mv_dump_mem(mmio_base+0xd00, 0x34);
950 mv_dump_mem(mmio_base+0xf00, 0x4);
951 mv_dump_mem(mmio_base+0x1d00, 0x6c);
952 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 953 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
954 DPRINTK("HC regs (HC %i):\n", hc);
955 mv_dump_mem(hc_base, 0x1c);
956 }
957 for (p = start_port; p < start_port + num_ports; p++) {
958 port_base = mv_port_base(mmio_base, p);
959 DPRINTK("EDMA regs (port %i):\n",p);
960 mv_dump_mem(port_base, 0x54);
961 DPRINTK("SATA regs (port %i):\n",p);
962 mv_dump_mem(port_base+0x300, 0x60);
963 }
964#endif
20f733e7
BR
965}
966
967static unsigned int mv_scr_offset(unsigned int sc_reg_in)
968{
969 unsigned int ofs;
970
971 switch (sc_reg_in) {
972 case SCR_STATUS:
973 case SCR_CONTROL:
974 case SCR_ERROR:
975 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
976 break;
977 case SCR_ACTIVE:
978 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
979 break;
980 default:
981 ofs = 0xffffffffU;
982 break;
983 }
984 return ofs;
985}
986
da3dbb17 987static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
988{
989 unsigned int ofs = mv_scr_offset(sc_reg_in);
990
da3dbb17
TH
991 if (ofs != 0xffffffffU) {
992 *val = readl(mv_ap_base(ap) + ofs);
993 return 0;
994 } else
995 return -EINVAL;
20f733e7
BR
996}
997
da3dbb17 998static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
999{
1000 unsigned int ofs = mv_scr_offset(sc_reg_in);
1001
da3dbb17 1002 if (ofs != 0xffffffffU) {
20f733e7 1003 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1004 return 0;
1005 } else
1006 return -EINVAL;
20f733e7
BR
1007}
1008
c5d3e45a
JG
1009static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1010 void __iomem *port_mmio)
e4e7b892
JG
1011{
1012 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1013
1014 /* set up non-NCQ EDMA configuration */
c5d3e45a 1015 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1016
e728eabe
JG
1017 if (IS_GEN_I(hpriv)) {
1018 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1019 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1020 }
e4e7b892 1021
e728eabe
JG
1022 else if (IS_GEN_II(hpriv)) {
1023 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1024 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1025 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1026 }
e4e7b892
JG
1027
1028 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1029 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1030 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1031 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1032 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1033 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1034 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1035 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1036 }
1037
1038 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1039}
1040
05b308e1
BR
1041/**
1042 * mv_port_start - Port specific init/start routine.
1043 * @ap: ATA channel to manipulate
1044 *
1045 * Allocate and point to DMA memory, init port private memory,
1046 * zero indices.
1047 *
1048 * LOCKING:
1049 * Inherited from caller.
1050 */
31961943
BR
1051static int mv_port_start(struct ata_port *ap)
1052{
cca3974e
JG
1053 struct device *dev = ap->host->dev;
1054 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1055 struct mv_port_priv *pp;
1056 void __iomem *port_mmio = mv_ap_base(ap);
1057 void *mem;
1058 dma_addr_t mem_dma;
0ea9e179 1059 unsigned long flags;
24dc5f33 1060 int rc;
31961943 1061
24dc5f33 1062 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1063 if (!pp)
24dc5f33 1064 return -ENOMEM;
31961943 1065
24dc5f33
TH
1066 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1067 GFP_KERNEL);
6037d6bb 1068 if (!mem)
24dc5f33 1069 return -ENOMEM;
31961943
BR
1070 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1071
6037d6bb
JG
1072 rc = ata_pad_alloc(ap, dev);
1073 if (rc)
24dc5f33 1074 return rc;
6037d6bb 1075
8b260248 1076 /* First item in chunk of DMA memory:
31961943
BR
1077 * 32-slot command request table (CRQB), 32 bytes each in size
1078 */
1079 pp->crqb = mem;
1080 pp->crqb_dma = mem_dma;
1081 mem += MV_CRQB_Q_SZ;
1082 mem_dma += MV_CRQB_Q_SZ;
1083
8b260248 1084 /* Second item:
31961943
BR
1085 * 32-slot command response table (CRPB), 8 bytes each in size
1086 */
1087 pp->crpb = mem;
1088 pp->crpb_dma = mem_dma;
1089 mem += MV_CRPB_Q_SZ;
1090 mem_dma += MV_CRPB_Q_SZ;
1091
1092 /* Third item:
1093 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1094 */
1095 pp->sg_tbl = mem;
1096 pp->sg_tbl_dma = mem_dma;
1097
0ea9e179
JG
1098 spin_lock_irqsave(&ap->host->lock, flags);
1099
c5d3e45a 1100 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1101
c5d3e45a 1102 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1103
0ea9e179
JG
1104 spin_unlock_irqrestore(&ap->host->lock, flags);
1105
31961943
BR
1106 /* Don't turn on EDMA here...do it before DMA commands only. Else
1107 * we'll be unable to send non-data, PIO, etc due to restricted access
1108 * to shadow regs.
1109 */
1110 ap->private_data = pp;
1111 return 0;
1112}
1113
05b308e1
BR
1114/**
1115 * mv_port_stop - Port specific cleanup/stop routine.
1116 * @ap: ATA channel to manipulate
1117 *
1118 * Stop DMA, cleanup port memory.
1119 *
1120 * LOCKING:
cca3974e 1121 * This routine uses the host lock to protect the DMA stop.
05b308e1 1122 */
31961943
BR
1123static void mv_port_stop(struct ata_port *ap)
1124{
31961943 1125 mv_stop_dma(ap);
31961943
BR
1126}
1127
05b308e1
BR
1128/**
1129 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1130 * @qc: queued command whose SG list to source from
1131 *
1132 * Populate the SG list and mark the last entry.
1133 *
1134 * LOCKING:
1135 * Inherited from caller.
1136 */
6c08772e 1137static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1138{
1139 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1140 struct scatterlist *sg;
d88184fb 1141 struct mv_sg *mv_sg;
31961943 1142
d88184fb 1143 mv_sg = pp->sg_tbl;
972c26bd 1144 ata_for_each_sg(sg, qc) {
d88184fb
JG
1145 dma_addr_t addr = sg_dma_address(sg);
1146 u32 sg_len = sg_dma_len(sg);
22374677 1147
4007b493
OJ
1148 while (sg_len) {
1149 u32 offset = addr & 0xffff;
1150 u32 len = sg_len;
22374677 1151
4007b493
OJ
1152 if ((offset + sg_len > 0x10000))
1153 len = 0x10000 - offset;
1154
1155 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1156 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1157 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1158
1159 sg_len -= len;
1160 addr += len;
1161
1162 if (!sg_len && ata_sg_is_last(sg, qc))
1163 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1164
1165 mv_sg++;
4007b493 1166 }
22374677 1167
31961943
BR
1168 }
1169}
1170
e1469874 1171static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1172{
559eedad 1173 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1174 (last ? CRQB_CMD_LAST : 0);
559eedad 1175 *cmdw = cpu_to_le16(tmp);
31961943
BR
1176}
1177
05b308e1
BR
1178/**
1179 * mv_qc_prep - Host specific command preparation.
1180 * @qc: queued command to prepare
1181 *
1182 * This routine simply redirects to the general purpose routine
1183 * if command is not DMA. Else, it handles prep of the CRQB
1184 * (command request block), does some sanity checking, and calls
1185 * the SG load routine.
1186 *
1187 * LOCKING:
1188 * Inherited from caller.
1189 */
31961943
BR
1190static void mv_qc_prep(struct ata_queued_cmd *qc)
1191{
1192 struct ata_port *ap = qc->ap;
1193 struct mv_port_priv *pp = ap->private_data;
e1469874 1194 __le16 *cw;
31961943
BR
1195 struct ata_taskfile *tf;
1196 u16 flags = 0;
a6432436 1197 unsigned in_index;
31961943 1198
c5d3e45a 1199 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1200 return;
20f733e7 1201
31961943
BR
1202 /* Fill in command request block
1203 */
e4e7b892 1204 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1205 flags |= CRQB_FLAG_READ;
beec7dbc 1206 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1207 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1208 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1209
bdd4ddde
JG
1210 /* get current queue index from software */
1211 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1212
1213 pp->crqb[in_index].sg_addr =
31961943 1214 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1215 pp->crqb[in_index].sg_addr_hi =
31961943 1216 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1217 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1218
a6432436 1219 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1220 tf = &qc->tf;
1221
1222 /* Sadly, the CRQB cannot accomodate all registers--there are
1223 * only 11 bytes...so we must pick and choose required
1224 * registers based on the command. So, we drop feature and
1225 * hob_feature for [RW] DMA commands, but they are needed for
1226 * NCQ. NCQ will drop hob_nsect.
20f733e7 1227 */
31961943
BR
1228 switch (tf->command) {
1229 case ATA_CMD_READ:
1230 case ATA_CMD_READ_EXT:
1231 case ATA_CMD_WRITE:
1232 case ATA_CMD_WRITE_EXT:
c15d85c8 1233 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1234 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1235 break;
1236#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1237 case ATA_CMD_FPDMA_READ:
1238 case ATA_CMD_FPDMA_WRITE:
8b260248 1239 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1240 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1241 break;
1242#endif /* FIXME: remove this line when NCQ added */
1243 default:
1244 /* The only other commands EDMA supports in non-queued and
1245 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1246 * of which are defined/used by Linux. If we get here, this
1247 * driver needs work.
1248 *
1249 * FIXME: modify libata to give qc_prep a return value and
1250 * return error here.
1251 */
1252 BUG_ON(tf->command);
1253 break;
1254 }
1255 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1260 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1261 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1262 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1263 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1264
e4e7b892
JG
1265 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1266 return;
1267 mv_fill_sg(qc);
1268}
1269
1270/**
1271 * mv_qc_prep_iie - Host specific command preparation.
1272 * @qc: queued command to prepare
1273 *
1274 * This routine simply redirects to the general purpose routine
1275 * if command is not DMA. Else, it handles prep of the CRQB
1276 * (command request block), does some sanity checking, and calls
1277 * the SG load routine.
1278 *
1279 * LOCKING:
1280 * Inherited from caller.
1281 */
1282static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1283{
1284 struct ata_port *ap = qc->ap;
1285 struct mv_port_priv *pp = ap->private_data;
1286 struct mv_crqb_iie *crqb;
1287 struct ata_taskfile *tf;
a6432436 1288 unsigned in_index;
e4e7b892
JG
1289 u32 flags = 0;
1290
c5d3e45a 1291 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1292 return;
1293
e4e7b892
JG
1294 /* Fill in Gen IIE command request block
1295 */
1296 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1297 flags |= CRQB_FLAG_READ;
1298
beec7dbc 1299 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1300 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1301 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1302 what we use as our tag */
e4e7b892 1303
bdd4ddde
JG
1304 /* get current queue index from software */
1305 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1306
1307 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1308 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1309 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1310 crqb->flags = cpu_to_le32(flags);
1311
1312 tf = &qc->tf;
1313 crqb->ata_cmd[0] = cpu_to_le32(
1314 (tf->command << 16) |
1315 (tf->feature << 24)
1316 );
1317 crqb->ata_cmd[1] = cpu_to_le32(
1318 (tf->lbal << 0) |
1319 (tf->lbam << 8) |
1320 (tf->lbah << 16) |
1321 (tf->device << 24)
1322 );
1323 crqb->ata_cmd[2] = cpu_to_le32(
1324 (tf->hob_lbal << 0) |
1325 (tf->hob_lbam << 8) |
1326 (tf->hob_lbah << 16) |
1327 (tf->hob_feature << 24)
1328 );
1329 crqb->ata_cmd[3] = cpu_to_le32(
1330 (tf->nsect << 0) |
1331 (tf->hob_nsect << 8)
1332 );
1333
1334 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1335 return;
31961943
BR
1336 mv_fill_sg(qc);
1337}
1338
05b308e1
BR
1339/**
1340 * mv_qc_issue - Initiate a command to the host
1341 * @qc: queued command to start
1342 *
1343 * This routine simply redirects to the general purpose routine
1344 * if command is not DMA. Else, it sanity checks our local
1345 * caches of the request producer/consumer indices then enables
1346 * DMA and bumps the request producer index.
1347 *
1348 * LOCKING:
1349 * Inherited from caller.
1350 */
9a3d9eb0 1351static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1352{
c5d3e45a
JG
1353 struct ata_port *ap = qc->ap;
1354 void __iomem *port_mmio = mv_ap_base(ap);
1355 struct mv_port_priv *pp = ap->private_data;
1356 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1357 u32 in_index;
31961943 1358
c5d3e45a 1359 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1360 /* We're about to send a non-EDMA capable command to the
1361 * port. Turn off EDMA so there won't be problems accessing
1362 * shadow block, etc registers.
1363 */
0ea9e179 1364 __mv_stop_dma(ap);
31961943
BR
1365 return ata_qc_issue_prot(qc);
1366 }
1367
bdd4ddde
JG
1368 mv_start_dma(port_mmio, hpriv, pp);
1369
1370 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1371
31961943 1372 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1373 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1374 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1375
bdd4ddde 1376 pp->req_idx++;
31961943 1377
bdd4ddde 1378 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1379
1380 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1381 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1382 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1383
1384 return 0;
1385}
1386
05b308e1
BR
1387/**
1388 * mv_err_intr - Handle error interrupts on the port
1389 * @ap: ATA channel to manipulate
9b358e30 1390 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1391 *
1392 * In most cases, just clear the interrupt and move on. However,
1393 * some cases require an eDMA reset, which is done right before
1394 * the COMRESET in mv_phy_reset(). The SERR case requires a
1395 * clear of pending errors in the SATA SERROR register. Finally,
1396 * if the port disabled DMA, update our cached copy to match.
1397 *
1398 * LOCKING:
1399 * Inherited from caller.
1400 */
bdd4ddde 1401static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1402{
1403 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1404 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1405 struct mv_port_priv *pp = ap->private_data;
1406 struct mv_host_priv *hpriv = ap->host->private_data;
1407 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1408 unsigned int action = 0, err_mask = 0;
9af5c9c9 1409 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1410
bdd4ddde 1411 ata_ehi_clear_desc(ehi);
20f733e7 1412
bdd4ddde
JG
1413 if (!edma_enabled) {
1414 /* just a guess: do we need to do this? should we
1415 * expand this, and do it in all cases?
1416 */
936fd732
TH
1417 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1418 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1419 }
bdd4ddde
JG
1420
1421 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1422
1423 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1424
1425 /*
1426 * all generations share these EDMA error cause bits
1427 */
1428
1429 if (edma_err_cause & EDMA_ERR_DEV)
1430 err_mask |= AC_ERR_DEV;
1431 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1432 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1433 EDMA_ERR_INTRL_PAR)) {
1434 err_mask |= AC_ERR_ATA_BUS;
1435 action |= ATA_EH_HARDRESET;
b64bbc39 1436 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1437 }
1438 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1439 ata_ehi_hotplugged(ehi);
1440 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1441 "dev disconnect" : "dev connect");
bdd4ddde
JG
1442 }
1443
ee9ccdf7 1444 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1445 eh_freeze_mask = EDMA_EH_FREEZE_5;
1446
1447 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1448 struct mv_port_priv *pp = ap->private_data;
1449 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1450 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1451 }
1452 } else {
1453 eh_freeze_mask = EDMA_EH_FREEZE;
1454
1455 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1456 struct mv_port_priv *pp = ap->private_data;
1457 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1458 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1459 }
1460
1461 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1462 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1463 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1464 err_mask = AC_ERR_ATA_BUS;
1465 action |= ATA_EH_HARDRESET;
1466 }
afb0edd9 1467 }
20f733e7
BR
1468
1469 /* Clear EDMA now that SERR cleanup done */
1470 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1471
bdd4ddde
JG
1472 if (!err_mask) {
1473 err_mask = AC_ERR_OTHER;
1474 action |= ATA_EH_HARDRESET;
1475 }
1476
1477 ehi->serror |= serr;
1478 ehi->action |= action;
1479
1480 if (qc)
1481 qc->err_mask |= err_mask;
1482 else
1483 ehi->err_mask |= err_mask;
1484
1485 if (edma_err_cause & eh_freeze_mask)
1486 ata_port_freeze(ap);
1487 else
1488 ata_port_abort(ap);
1489}
1490
1491static void mv_intr_pio(struct ata_port *ap)
1492{
1493 struct ata_queued_cmd *qc;
1494 u8 ata_status;
1495
1496 /* ignore spurious intr if drive still BUSY */
1497 ata_status = readb(ap->ioaddr.status_addr);
1498 if (unlikely(ata_status & ATA_BUSY))
1499 return;
1500
1501 /* get active ATA command */
9af5c9c9 1502 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1503 if (unlikely(!qc)) /* no active tag */
1504 return;
1505 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1506 return;
1507
1508 /* and finally, complete the ATA command */
1509 qc->err_mask |= ac_err_mask(ata_status);
1510 ata_qc_complete(qc);
1511}
1512
1513static void mv_intr_edma(struct ata_port *ap)
1514{
1515 void __iomem *port_mmio = mv_ap_base(ap);
1516 struct mv_host_priv *hpriv = ap->host->private_data;
1517 struct mv_port_priv *pp = ap->private_data;
1518 struct ata_queued_cmd *qc;
1519 u32 out_index, in_index;
1520 bool work_done = false;
1521
1522 /* get h/w response queue pointer */
1523 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1524 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1525
1526 while (1) {
1527 u16 status;
6c1153e0 1528 unsigned int tag;
bdd4ddde
JG
1529
1530 /* get s/w response queue last-read pointer, and compare */
1531 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1532 if (in_index == out_index)
1533 break;
1534
bdd4ddde 1535 /* 50xx: get active ATA command */
0ea9e179 1536 if (IS_GEN_I(hpriv))
9af5c9c9 1537 tag = ap->link.active_tag;
bdd4ddde 1538
6c1153e0
JG
1539 /* Gen II/IIE: get active ATA command via tag, to enable
1540 * support for queueing. this works transparently for
1541 * queued and non-queued modes.
bdd4ddde 1542 */
6c1153e0
JG
1543 else if (IS_GEN_II(hpriv))
1544 tag = (le16_to_cpu(pp->crpb[out_index].id)
1545 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1546
6c1153e0
JG
1547 else /* IS_GEN_IIE */
1548 tag = (le16_to_cpu(pp->crpb[out_index].id)
1549 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1550
6c1153e0 1551 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1552
1553 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1554 * bits (WARNING: might not necessarily be associated
1555 * with this command), which -should- be clear
1556 * if all is well
1557 */
1558 status = le16_to_cpu(pp->crpb[out_index].flags);
1559 if (unlikely(status & 0xff)) {
1560 mv_err_intr(ap, qc);
1561 return;
1562 }
1563
1564 /* and finally, complete the ATA command */
1565 if (qc) {
1566 qc->err_mask |=
1567 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1568 ata_qc_complete(qc);
1569 }
1570
0ea9e179 1571 /* advance software response queue pointer, to
bdd4ddde
JG
1572 * indicate (after the loop completes) to hardware
1573 * that we have consumed a response queue entry.
1574 */
1575 work_done = true;
1576 pp->resp_idx++;
1577 }
1578
1579 if (work_done)
1580 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1581 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1582 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1583}
1584
05b308e1
BR
1585/**
1586 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1587 * @host: host specific structure
05b308e1
BR
1588 * @relevant: port error bits relevant to this host controller
1589 * @hc: which host controller we're to look at
1590 *
1591 * Read then write clear the HC interrupt status then walk each
1592 * port connected to the HC and see if it needs servicing. Port
1593 * success ints are reported in the HC interrupt status reg, the
1594 * port error ints are reported in the higher level main
1595 * interrupt status register and thus are passed in via the
1596 * 'relevant' argument.
1597 *
1598 * LOCKING:
1599 * Inherited from caller.
1600 */
cca3974e 1601static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1602{
0d5ff566 1603 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1604 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1605 u32 hc_irq_cause;
c5d3e45a 1606 int port, port0;
20f733e7 1607
35177265 1608 if (hc == 0)
20f733e7 1609 port0 = 0;
35177265 1610 else
20f733e7 1611 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1612
1613 /* we'll need the HC success int register in most cases */
1614 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1615 if (!hc_irq_cause)
1616 return;
1617
1618 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1619
1620 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1621 hc,relevant,hc_irq_cause);
1622
1623 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1624 struct ata_port *ap = host->ports[port];
63af2a5c 1625 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1626 int have_err_bits, hard_port, shift;
55d8ca4f 1627
bdd4ddde 1628 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1629 continue;
1630
31961943 1631 shift = port << 1; /* (port * 2) */
20f733e7
BR
1632 if (port >= MV_PORTS_PER_HC) {
1633 shift++; /* skip bit 8 in the HC Main IRQ reg */
1634 }
bdd4ddde
JG
1635 have_err_bits = ((PORT0_ERR << shift) & relevant);
1636
1637 if (unlikely(have_err_bits)) {
1638 struct ata_queued_cmd *qc;
8b260248 1639
9af5c9c9 1640 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1641 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1642 continue;
1643
1644 mv_err_intr(ap, qc);
1645 continue;
1646 }
1647
1648 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1649
1650 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1651 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1652 mv_intr_edma(ap);
1653 } else {
1654 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1655 mv_intr_pio(ap);
20f733e7
BR
1656 }
1657 }
1658 VPRINTK("EXIT\n");
1659}
1660
bdd4ddde
JG
1661static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1662{
1663 struct ata_port *ap;
1664 struct ata_queued_cmd *qc;
1665 struct ata_eh_info *ehi;
1666 unsigned int i, err_mask, printed = 0;
1667 u32 err_cause;
1668
1669 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1670
1671 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1672 err_cause);
1673
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1676
1677 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1678
1679 for (i = 0; i < host->n_ports; i++) {
1680 ap = host->ports[i];
936fd732 1681 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1682 ehi = &ap->link.eh_info;
bdd4ddde
JG
1683 ata_ehi_clear_desc(ehi);
1684 if (!printed++)
1685 ata_ehi_push_desc(ehi,
1686 "PCI err cause 0x%08x", err_cause);
1687 err_mask = AC_ERR_HOST_BUS;
1688 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1690 if (qc)
1691 qc->err_mask |= err_mask;
1692 else
1693 ehi->err_mask |= err_mask;
1694
1695 ata_port_freeze(ap);
1696 }
1697 }
1698}
1699
05b308e1 1700/**
c5d3e45a 1701 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1702 * @irq: unused
1703 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1704 *
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1708 * reported here.
1709 *
8b260248 1710 * LOCKING:
cca3974e 1711 * This routine holds the host lock while processing pending
05b308e1
BR
1712 * interrupts.
1713 */
7d12e780 1714static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1715{
cca3974e 1716 struct ata_host *host = dev_instance;
20f733e7 1717 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1718 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1719 u32 irq_stat;
1720
20f733e7 1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1722
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1725 */
35177265 1726 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1727 return IRQ_NONE;
20f733e7 1728
cca3974e
JG
1729 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 spin_lock(&host->lock);
20f733e7 1731
bdd4ddde
JG
1732 if (unlikely(irq_stat & PCI_ERR)) {
1733 mv_pci_error(host, mmio);
1734 handled = 1;
1735 goto out_unlock; /* skip all other HC irq handling */
1736 }
1737
20f733e7
BR
1738 for (hc = 0; hc < n_hcs; hc++) {
1739 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1740 if (relevant) {
cca3974e 1741 mv_host_intr(host, relevant, hc);
bdd4ddde 1742 handled = 1;
20f733e7
BR
1743 }
1744 }
615ab953 1745
bdd4ddde 1746out_unlock:
cca3974e 1747 spin_unlock(&host->lock);
20f733e7
BR
1748
1749 return IRQ_RETVAL(handled);
1750}
1751
c9d39130
JG
1752static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1753{
1754 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1756
1757 return hc_mmio + ofs;
1758}
1759
1760static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761{
1762 unsigned int ofs;
1763
1764 switch (sc_reg_in) {
1765 case SCR_STATUS:
1766 case SCR_ERROR:
1767 case SCR_CONTROL:
1768 ofs = sc_reg_in * sizeof(u32);
1769 break;
1770 default:
1771 ofs = 0xffffffffU;
1772 break;
1773 }
1774 return ofs;
1775}
1776
da3dbb17 1777static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1778{
0d5ff566
TH
1779 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1781 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1782
da3dbb17
TH
1783 if (ofs != 0xffffffffU) {
1784 *val = readl(addr + ofs);
1785 return 0;
1786 } else
1787 return -EINVAL;
c9d39130
JG
1788}
1789
da3dbb17 1790static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1791{
0d5ff566
TH
1792 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1794 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1795
da3dbb17 1796 if (ofs != 0xffffffffU) {
0d5ff566 1797 writelfl(val, addr + ofs);
da3dbb17
TH
1798 return 0;
1799 } else
1800 return -EINVAL;
c9d39130
JG
1801}
1802
522479fb
JG
1803static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804{
522479fb
JG
1805 int early_5080;
1806
44c10138 1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1808
1809 if (!early_5080) {
1810 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 tmp |= (1 << 0);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1813 }
1814
1815 mv_reset_pci_bus(pdev, mmio);
1816}
1817
1818static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1819{
1820 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1821}
1822
47c2b677 1823static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1824 void __iomem *mmio)
1825{
c9d39130
JG
1826 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1827 u32 tmp;
1828
1829 tmp = readl(phy_mmio + MV5_PHY_MODE);
1830
1831 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1832 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1833}
1834
47c2b677 1835static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1836{
522479fb
JG
1837 u32 tmp;
1838
1839 writel(0, mmio + MV_GPIO_PORT_CTL);
1840
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1842
1843 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844 tmp |= ~(1 << 0);
1845 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1846}
1847
2a47ce06
JG
1848static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1849 unsigned int port)
bca1c4eb 1850{
c9d39130
JG
1851 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1853 u32 tmp;
1854 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1855
1856 if (fix_apm_sq) {
1857 tmp = readl(phy_mmio + MV5_LT_MODE);
1858 tmp |= (1 << 19);
1859 writel(tmp, phy_mmio + MV5_LT_MODE);
1860
1861 tmp = readl(phy_mmio + MV5_PHY_CTL);
1862 tmp &= ~0x3;
1863 tmp |= 0x1;
1864 writel(tmp, phy_mmio + MV5_PHY_CTL);
1865 }
1866
1867 tmp = readl(phy_mmio + MV5_PHY_MODE);
1868 tmp &= ~mask;
1869 tmp |= hpriv->signal[port].pre;
1870 tmp |= hpriv->signal[port].amps;
1871 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1872}
1873
c9d39130
JG
1874
1875#undef ZERO
1876#define ZERO(reg) writel(0, port_mmio + (reg))
1877static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port)
1879{
1880 void __iomem *port_mmio = mv_port_base(mmio, port);
1881
1882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1883
1884 mv_channel_reset(hpriv, mmio, port);
1885
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899}
1900#undef ZERO
1901
1902#define ZERO(reg) writel(0, hc_mmio + (reg))
1903static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int hc)
47c2b677 1905{
c9d39130
JG
1906 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1907 u32 tmp;
1908
1909 ZERO(0x00c);
1910 ZERO(0x010);
1911 ZERO(0x014);
1912 ZERO(0x018);
1913
1914 tmp = readl(hc_mmio + 0x20);
1915 tmp &= 0x1c1c1c1c;
1916 tmp |= 0x03030303;
1917 writel(tmp, hc_mmio + 0x20);
1918}
1919#undef ZERO
1920
1921static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 unsigned int n_hc)
1923{
1924 unsigned int hc, port;
1925
1926 for (hc = 0; hc < n_hc; hc++) {
1927 for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 mv5_reset_hc_port(hpriv, mmio,
1929 (hc * MV_PORTS_PER_HC) + port);
1930
1931 mv5_reset_one_hc(hpriv, mmio, hc);
1932 }
1933
1934 return 0;
47c2b677
JG
1935}
1936
101ffae2
JG
1937#undef ZERO
1938#define ZERO(reg) writel(0, mmio + (reg))
1939static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940{
1941 u32 tmp;
1942
1943 tmp = readl(mmio + MV_PCI_MODE);
1944 tmp &= 0xff00ffff;
1945 writel(tmp, mmio + MV_PCI_MODE);
1946
1947 ZERO(MV_PCI_DISC_TIMER);
1948 ZERO(MV_PCI_MSI_TRIGGER);
1949 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1950 ZERO(HC_MAIN_IRQ_MASK_OFS);
1951 ZERO(MV_PCI_SERR_MASK);
1952 ZERO(PCI_IRQ_CAUSE_OFS);
1953 ZERO(PCI_IRQ_MASK_OFS);
1954 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1955 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1956 ZERO(MV_PCI_ERR_ATTRIBUTE);
1957 ZERO(MV_PCI_ERR_COMMAND);
1958}
1959#undef ZERO
1960
1961static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962{
1963 u32 tmp;
1964
1965 mv5_reset_flash(hpriv, mmio);
1966
1967 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1968 tmp &= 0x3;
1969 tmp |= (1 << 5) | (1 << 6);
1970 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971}
1972
1973/**
1974 * mv6_reset_hc - Perform the 6xxx global soft reset
1975 * @mmio: base address of the HBA
1976 *
1977 * This routine only applies to 6xxx parts.
1978 *
1979 * LOCKING:
1980 * Inherited from caller.
1981 */
c9d39130
JG
1982static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int n_hc)
101ffae2
JG
1984{
1985 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986 int i, rc = 0;
1987 u32 t;
1988
1989 /* Following procedure defined in PCI "main command and status
1990 * register" table.
1991 */
1992 t = readl(reg);
1993 writel(t | STOP_PCI_MASTER, reg);
1994
1995 for (i = 0; i < 1000; i++) {
1996 udelay(1);
1997 t = readl(reg);
1998 if (PCI_MASTER_EMPTY & t) {
1999 break;
2000 }
2001 }
2002 if (!(PCI_MASTER_EMPTY & t)) {
2003 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 rc = 1;
2005 goto done;
2006 }
2007
2008 /* set reset */
2009 i = 5;
2010 do {
2011 writel(t | GLOB_SFT_RST, reg);
2012 t = readl(reg);
2013 udelay(1);
2014 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015
2016 if (!(GLOB_SFT_RST & t)) {
2017 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 rc = 1;
2019 goto done;
2020 }
2021
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 i = 5;
2024 do {
2025 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 t = readl(reg);
2027 udelay(1);
2028 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2029
2030 if (GLOB_SFT_RST & t) {
2031 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 rc = 1;
2033 }
2034done:
2035 return rc;
2036}
2037
47c2b677 2038static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2039 void __iomem *mmio)
2040{
2041 void __iomem *port_mmio;
2042 u32 tmp;
2043
ba3fe8fb
JG
2044 tmp = readl(mmio + MV_RESET_CFG);
2045 if ((tmp & (1 << 0)) == 0) {
47c2b677 2046 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2047 hpriv->signal[idx].pre = 0x1 << 5;
2048 return;
2049 }
2050
2051 port_mmio = mv_port_base(mmio, idx);
2052 tmp = readl(port_mmio + PHY_MODE2);
2053
2054 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2055 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2056}
2057
47c2b677 2058static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2059{
47c2b677 2060 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2061}
2062
c9d39130 2063static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2064 unsigned int port)
bca1c4eb 2065{
c9d39130
JG
2066 void __iomem *port_mmio = mv_port_base(mmio, port);
2067
bca1c4eb 2068 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2069 int fix_phy_mode2 =
2070 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2071 int fix_phy_mode4 =
47c2b677
JG
2072 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 u32 m2, tmp;
2074
2075 if (fix_phy_mode2) {
2076 m2 = readl(port_mmio + PHY_MODE2);
2077 m2 &= ~(1 << 16);
2078 m2 |= (1 << 31);
2079 writel(m2, port_mmio + PHY_MODE2);
2080
2081 udelay(200);
2082
2083 m2 = readl(port_mmio + PHY_MODE2);
2084 m2 &= ~((1 << 16) | (1 << 31));
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088 }
2089
2090 /* who knows what this magic does */
2091 tmp = readl(port_mmio + PHY_MODE3);
2092 tmp &= ~0x7F800000;
2093 tmp |= 0x2A800000;
2094 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2095
2096 if (fix_phy_mode4) {
47c2b677 2097 u32 m4;
bca1c4eb
JG
2098
2099 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2100
2101 if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2103
2104 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105
2106 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2107
2108 if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2110 }
2111
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2 = readl(port_mmio + PHY_MODE2);
2114
2115 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2116 m2 |= hpriv->signal[port].amps;
2117 m2 |= hpriv->signal[port].pre;
47c2b677 2118 m2 &= ~(1 << 16);
bca1c4eb 2119
e4e7b892
JG
2120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv)) {
2122 m2 &= ~0xC30FF01F;
2123 m2 |= 0x0000900F;
2124 }
2125
bca1c4eb
JG
2126 writel(m2, port_mmio + PHY_MODE2);
2127}
2128
c9d39130
JG
2129static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 unsigned int port_no)
2131{
2132 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2133
2134 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2135
ee9ccdf7 2136 if (IS_GEN_II(hpriv)) {
c9d39130 2137 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2138 ifctl |= (1 << 7); /* enable gen2i speed */
2139 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2140 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 }
2142
2143 udelay(25); /* allow reset propagation */
2144
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2147 */
2148 writelfl(0, port_mmio + EDMA_CMD_OFS);
2149
2150 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151
ee9ccdf7 2152 if (IS_GEN_I(hpriv))
c9d39130
JG
2153 mdelay(1);
2154}
2155
05b308e1 2156/**
bdd4ddde 2157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2158 * @ap: ATA channel to manipulate
2159 *
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2162 *
2163 * LOCKING:
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
31961943 2166 */
bdd4ddde
JG
2167static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 unsigned long deadline)
20f733e7 2169{
095fec88 2170 struct mv_port_priv *pp = ap->private_data;
cca3974e 2171 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2172 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2173 int retry = 5;
2174 u32 sstatus;
20f733e7
BR
2175
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177
da3dbb17
TH
2178#ifdef DEBUG
2179 {
2180 u32 sstatus, serror, scontrol;
2181
2182 mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 mv_scr_read(ap, SCR_ERROR, &serror);
2184 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 "SCtrl 0x%08x\n", status, serror, scontrol);
2187 }
2188#endif
20f733e7 2189
22374677
JG
2190 /* Issue COMRESET via SControl */
2191comreset_retry:
936fd732 2192 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2193 msleep(1);
22374677 2194
936fd732 2195 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2196 msleep(20);
22374677 2197
31961943 2198 do {
936fd732 2199 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2201 break;
22374677 2202
bdd4ddde 2203 msleep(1);
c5d3e45a 2204 } while (time_before(jiffies, deadline));
20f733e7 2205
22374677 2206 /* work around errata */
ee9ccdf7 2207 if (IS_GEN_II(hpriv) &&
22374677
JG
2208 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 (retry-- > 0))
2210 goto comreset_retry;
095fec88 2211
da3dbb17
TH
2212#ifdef DEBUG
2213 {
2214 u32 sstatus, serror, scontrol;
2215
2216 mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 mv_scr_read(ap, SCR_ERROR, &serror);
2218 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 }
2222#endif
31961943 2223
936fd732 2224 if (ata_link_offline(&ap->link)) {
bdd4ddde 2225 *class = ATA_DEV_NONE;
20f733e7
BR
2226 return;
2227 }
2228
22374677
JG
2229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2233 */
2234 retry = 20;
2235 while (1) {
2236 u8 drv_stat = ata_check_status(ap);
2237 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 break;
bdd4ddde 2239 msleep(500);
22374677
JG
2240 if (retry-- <= 0)
2241 break;
bdd4ddde
JG
2242 if (time_after(jiffies, deadline))
2243 break;
22374677
JG
2244 }
2245
bdd4ddde
JG
2246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2248 */
20f733e7 2249
bdd4ddde
JG
2250 /* finally, read device signature from TF registers */
2251 *class = ata_dev_try_classify(ap, 0, NULL);
095fec88
JG
2252
2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254
bdd4ddde 2255 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2256
bca1c4eb 2257 VPRINTK("EXIT\n");
20f733e7
BR
2258}
2259
cc0680a5 2260static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2261{
cc0680a5 2262 struct ata_port *ap = link->ap;
bdd4ddde 2263 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2264 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2265 int rc;
0ea9e179 2266
bdd4ddde
JG
2267 rc = mv_stop_dma(ap);
2268 if (rc)
2269 ehc->i.action |= ATA_EH_HARDRESET;
2270
2271 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2272 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2273 ehc->i.action |= ATA_EH_HARDRESET;
2274 }
2275
2276 /* if we're about to do hardreset, nothing more to do */
2277 if (ehc->i.action & ATA_EH_HARDRESET)
2278 return 0;
2279
cc0680a5 2280 if (ata_link_online(link))
bdd4ddde
JG
2281 rc = ata_wait_ready(ap, deadline);
2282 else
2283 rc = -ENODEV;
2284
2285 return rc;
22374677
JG
2286}
2287
cc0680a5 2288static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2289 unsigned long deadline)
31961943 2290{
cc0680a5 2291 struct ata_port *ap = link->ap;
bdd4ddde 2292 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2293 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2294
bdd4ddde 2295 mv_stop_dma(ap);
31961943 2296
bdd4ddde 2297 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2298
bdd4ddde
JG
2299 mv_phy_reset(ap, class, deadline);
2300
2301 return 0;
2302}
2303
cc0680a5 2304static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2305{
cc0680a5 2306 struct ata_port *ap = link->ap;
bdd4ddde
JG
2307 u32 serr;
2308
2309 /* print link status */
cc0680a5 2310 sata_print_link_status(link);
31961943 2311
bdd4ddde 2312 /* clear SError */
cc0680a5
TH
2313 sata_scr_read(link, SCR_ERROR, &serr);
2314 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2315
2316 /* bail out if no device is present */
2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 DPRINTK("EXIT, no device\n");
2319 return;
9b358e30 2320 }
bdd4ddde
JG
2321
2322 /* set up device control */
2323 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2324}
2325
2326static void mv_error_handler(struct ata_port *ap)
2327{
2328 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2329 mv_hardreset, mv_postreset);
2330}
2331
2332static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333{
2334 mv_stop_dma(qc->ap);
2335}
2336
2337static void mv_eh_freeze(struct ata_port *ap)
2338{
2339 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 u32 tmp, mask;
2342 unsigned int shift;
2343
2344 /* FIXME: handle coalescing completion events properly */
2345
2346 shift = ap->port_no * 2;
2347 if (hc > 0)
2348 shift++;
2349
2350 mask = 0x3 << shift;
2351
2352 /* disable assertion of portN err, done events */
2353 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2354 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2355}
2356
2357static void mv_eh_thaw(struct ata_port *ap)
2358{
2359 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2360 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2361 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2362 void __iomem *port_mmio = mv_ap_base(ap);
2363 u32 tmp, mask, hc_irq_cause;
2364 unsigned int shift, hc_port_no = ap->port_no;
2365
2366 /* FIXME: handle coalescing completion events properly */
2367
2368 shift = ap->port_no * 2;
2369 if (hc > 0) {
2370 shift++;
2371 hc_port_no -= 4;
2372 }
2373
2374 mask = 0x3 << shift;
2375
2376 /* clear EDMA errors on this port */
2377 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378
2379 /* clear pending irq events */
2380 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2381 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2382 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2383 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2384
2385 /* enable assertion of portN err, done events */
2386 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2387 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2388}
2389
05b308e1
BR
2390/**
2391 * mv_port_init - Perform some early initialization on a single port.
2392 * @port: libata data structure storing shadow register addresses
2393 * @port_mmio: base address of the port
2394 *
2395 * Initialize shadow register mmio addresses, clear outstanding
2396 * interrupts on the port, and unmask interrupts for the future
2397 * start of the port.
2398 *
2399 * LOCKING:
2400 * Inherited from caller.
2401 */
31961943 2402static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2403{
0d5ff566 2404 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2405 unsigned serr_ofs;
2406
8b260248 2407 /* PIO related setup
31961943
BR
2408 */
2409 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2410 port->error_addr =
31961943
BR
2411 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2412 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2413 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2414 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2415 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2416 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2417 port->status_addr =
31961943
BR
2418 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2419 /* special case: control/altstatus doesn't have ATA_REG_ address */
2420 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2421
2422 /* unused: */
8d9db2d2 2423 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2424
31961943
BR
2425 /* Clear any currently outstanding port interrupt conditions */
2426 serr_ofs = mv_scr_offset(SCR_ERROR);
2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429
20f733e7 2430 /* unmask all EDMA error interrupts */
31961943 2431 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2432
8b260248 2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2434 readl(port_mmio + EDMA_CFG_OFS),
2435 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2437}
2438
4447d351 2439static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2440{
4447d351
TH
2441 struct pci_dev *pdev = to_pci_dev(host->dev);
2442 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2443 u32 hp_flags = hpriv->hp_flags;
2444
bca1c4eb 2445 switch(board_idx) {
47c2b677
JG
2446 case chip_5080:
2447 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2448 hp_flags |= MV_HP_GEN_I;
47c2b677 2449
44c10138 2450 switch (pdev->revision) {
47c2b677
JG
2451 case 0x1:
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2453 break;
2454 case 0x3:
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2456 break;
2457 default:
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying 50XXB2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 break;
2462 }
2463 break;
2464
bca1c4eb
JG
2465 case chip_504x:
2466 case chip_508x:
47c2b677 2467 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2468 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2469
44c10138 2470 switch (pdev->revision) {
47c2b677
JG
2471 case 0x0:
2472 hp_flags |= MV_HP_ERRATA_50XXB0;
2473 break;
2474 case 0x3:
2475 hp_flags |= MV_HP_ERRATA_50XXB2;
2476 break;
2477 default:
2478 dev_printk(KERN_WARNING, &pdev->dev,
2479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_50XXB2;
2481 break;
bca1c4eb
JG
2482 }
2483 break;
2484
2485 case chip_604x:
2486 case chip_608x:
47c2b677 2487 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2488 hp_flags |= MV_HP_GEN_II;
47c2b677 2489
44c10138 2490 switch (pdev->revision) {
47c2b677
JG
2491 case 0x7:
2492 hp_flags |= MV_HP_ERRATA_60X1B2;
2493 break;
2494 case 0x9:
2495 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2496 break;
2497 default:
2498 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2499 "Applying B2 workarounds to unknown rev\n");
2500 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2501 break;
2502 }
2503 break;
2504
e4e7b892
JG
2505 case chip_7042:
2506 case chip_6042:
2507 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2508 hp_flags |= MV_HP_GEN_IIE;
2509
44c10138 2510 switch (pdev->revision) {
e4e7b892
JG
2511 case 0x0:
2512 hp_flags |= MV_HP_ERRATA_XX42A0;
2513 break;
2514 case 0x1:
2515 hp_flags |= MV_HP_ERRATA_60X1C0;
2516 break;
2517 default:
2518 dev_printk(KERN_WARNING, &pdev->dev,
2519 "Applying 60X1C0 workarounds to unknown rev\n");
2520 hp_flags |= MV_HP_ERRATA_60X1C0;
2521 break;
2522 }
2523 break;
2524
bca1c4eb
JG
2525 default:
2526 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2527 return 1;
2528 }
2529
2530 hpriv->hp_flags = hp_flags;
2531
2532 return 0;
2533}
2534
05b308e1 2535/**
47c2b677 2536 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2537 * @host: ATA host to initialize
2538 * @board_idx: controller index
05b308e1
BR
2539 *
2540 * If possible, do an early global reset of the host. Then do
2541 * our port init and clear/unmask all/relevant host interrupts.
2542 *
2543 * LOCKING:
2544 * Inherited from caller.
2545 */
4447d351 2546static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2547{
2548 int rc = 0, n_hc, port, hc;
4447d351
TH
2549 struct pci_dev *pdev = to_pci_dev(host->dev);
2550 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2551 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2552
47c2b677
JG
2553 /* global interrupt mask */
2554 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2555
4447d351 2556 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2557 if (rc)
2558 goto done;
2559
4447d351 2560 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2561
4447d351 2562 for (port = 0; port < host->n_ports; port++)
47c2b677 2563 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2564
c9d39130 2565 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2566 if (rc)
20f733e7 2567 goto done;
20f733e7 2568
522479fb
JG
2569 hpriv->ops->reset_flash(hpriv, mmio);
2570 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2571 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2572
4447d351 2573 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2574 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2575 void __iomem *port_mmio = mv_port_base(mmio, port);
2576
2a47ce06 2577 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2578 ifctl |= (1 << 7); /* enable gen2i speed */
2579 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2580 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2581 }
2582
c9d39130 2583 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2584 }
2585
4447d351 2586 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2587 struct ata_port *ap = host->ports[port];
2a47ce06 2588 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2589 unsigned int offset = port_mmio - mmio;
2590
2591 mv_port_init(&ap->ioaddr, port_mmio);
2592
2593 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2594 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2595 }
2596
2597 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2598 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2599
2600 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 "(before clear)=0x%08x\n", hc,
2602 readl(hc_mmio + HC_CFG_OFS),
2603 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2604
2605 /* Clear any currently outstanding hc interrupt conditions */
2606 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2607 }
2608
31961943
BR
2609 /* Clear any currently outstanding host interrupt conditions */
2610 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2611
2612 /* and unmask interrupt generation for host regs */
2613 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f 2614
ee9ccdf7 2615 if (IS_GEN_I(hpriv))
fb621e2f
JG
2616 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 else
2618 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2619
2620 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2621 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2622 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2626
31961943 2627done:
20f733e7
BR
2628 return rc;
2629}
2630
05b308e1
BR
2631/**
2632 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2633 * @host: ATA host to print info about
05b308e1
BR
2634 *
2635 * FIXME: complete this.
2636 *
2637 * LOCKING:
2638 * Inherited from caller.
2639 */
4447d351 2640static void mv_print_info(struct ata_host *host)
31961943 2641{
4447d351
TH
2642 struct pci_dev *pdev = to_pci_dev(host->dev);
2643 struct mv_host_priv *hpriv = host->private_data;
44c10138 2644 u8 scc;
c1e4fe71 2645 const char *scc_s, *gen;
31961943
BR
2646
2647 /* Use this to determine the HW stepping of the chip so we know
2648 * what errata to workaround
2649 */
31961943
BR
2650 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2651 if (scc == 0)
2652 scc_s = "SCSI";
2653 else if (scc == 0x01)
2654 scc_s = "RAID";
2655 else
c1e4fe71
JG
2656 scc_s = "?";
2657
2658 if (IS_GEN_I(hpriv))
2659 gen = "I";
2660 else if (IS_GEN_II(hpriv))
2661 gen = "II";
2662 else if (IS_GEN_IIE(hpriv))
2663 gen = "IIE";
2664 else
2665 gen = "?";
31961943 2666
a9524a76 2667 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2668 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2670 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2671}
2672
05b308e1
BR
2673/**
2674 * mv_init_one - handle a positive probe of a Marvell host
2675 * @pdev: PCI device found
2676 * @ent: PCI device ID entry for the matched host
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
20f733e7
BR
2681static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2682{
2683 static int printed_version = 0;
20f733e7 2684 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2685 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 struct ata_host *host;
2687 struct mv_host_priv *hpriv;
2688 int n_ports, rc;
20f733e7 2689
a9524a76
JG
2690 if (!printed_version++)
2691 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2692
4447d351
TH
2693 /* allocate host */
2694 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2695
2696 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 if (!host || !hpriv)
2699 return -ENOMEM;
2700 host->private_data = hpriv;
2701
2702 /* acquire resources */
24dc5f33
TH
2703 rc = pcim_enable_device(pdev);
2704 if (rc)
20f733e7 2705 return rc;
20f733e7 2706
0d5ff566
TH
2707 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2708 if (rc == -EBUSY)
24dc5f33 2709 pcim_pin_device(pdev);
0d5ff566 2710 if (rc)
24dc5f33 2711 return rc;
4447d351 2712 host->iomap = pcim_iomap_table(pdev);
20f733e7 2713
d88184fb
JG
2714 rc = pci_go_64(pdev);
2715 if (rc)
2716 return rc;
2717
20f733e7 2718 /* initialize adapter */
4447d351 2719 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2720 if (rc)
2721 return rc;
20f733e7 2722
31961943 2723 /* Enable interrupts */
6a59dcf8 2724 if (msi && pci_enable_msi(pdev))
31961943 2725 pci_intx(pdev, 1);
20f733e7 2726
31961943 2727 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2728 mv_print_info(host);
20f733e7 2729
4447d351 2730 pci_set_master(pdev);
ea8b4db9 2731 pci_try_set_mwi(pdev);
4447d351 2732 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2733 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2734}
2735
2736static int __init mv_init(void)
2737{
b7887196 2738 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2739}
2740
2741static void __exit mv_exit(void)
2742{
2743 pci_unregister_driver(&mv_pci_driver);
2744}
2745
2746MODULE_AUTHOR("Brett Russ");
2747MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748MODULE_LICENSE("GPL");
2749MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750MODULE_VERSION(DRV_VERSION);
2751
ddef9bb3
JG
2752module_param(msi, int, 0444);
2753MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2754
20f733e7
BR
2755module_init(mv_init);
2756module_exit(mv_exit);