Pull bugzilla-1641 into release branch
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58 */
59
60
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
73
74 #define DRV_NAME "sata_mv"
75 #define DRV_VERSION "0.81"
76
77 enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
85
86 MV_PCI_REG_BASE = 0,
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93
94 MV_SATAHC0_REG_BASE = 0x20000,
95 MV_FLASH_CTL = 0x1046c,
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
98
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
103
104 MV_MAX_Q_DEPTH = 32,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
114 MV_MAX_SG_CT = 176,
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117
118 MV_PORTS_PER_HC = 4,
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
122 MV_PORT_MASK = 3,
123
124 /* Host Flags */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131
132 CRQB_FLAG_READ = (1 << 0),
133 CRQB_TAG_SHIFT = 1,
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
139
140 CRPB_FLAG_STATUS_SHIFT = 8,
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
143
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
145
146 /* PCI interface registers */
147
148 PCI_COMMAND_OFS = 0xc00,
149
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
154
155 MV_PCI_MODE = 0xd00,
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
165
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
176 PCI_ERR = (1 << 18),
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 HC_MAIN_RSVD),
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 HC_MAIN_RSVD_5),
192
193 /* SATAHC registers */
194 HC_CFG_OFS = 0,
195
196 HC_IRQ_CAUSE_OFS = 0x14,
197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
200
201 /* Shadow block registers */
202 SHD_BLK_OFS = 0x100,
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
204
205 /* SATA registers */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
208 PHY_MODE3 = 0x310,
209 PHY_MODE4 = 0x314,
210 PHY_MODE2 = 0x330,
211 MV5_PHY_MODE = 0x74,
212 MV5_LT_MODE = 0x30,
213 MV5_PHY_CTL = 0x0C,
214 SATA_INTERFACE_CTL = 0x050,
215
216 MV_M2_PREAMP_MASK = 0x7e0,
217
218 /* Port registers */
219 EDMA_CFG_OFS = 0,
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
225
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
251 EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON |
253 EDMA_ERR_DEV_CON |
254 EDMA_ERR_SERR |
255 EDMA_ERR_SELF_DIS |
256 EDMA_ERR_CRQB_PAR |
257 EDMA_ERR_CRPB_PAR |
258 EDMA_ERR_INTRL_PAR |
259 EDMA_ERR_IORDY |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
265 EDMA_ERR_PRD_PAR |
266 EDMA_ERR_DEV_DCON |
267 EDMA_ERR_DEV_CON |
268 EDMA_ERR_OVERRUN_5 |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
271 EDMA_ERR_CRQB_PAR |
272 EDMA_ERR_CRPB_PAR |
273 EDMA_ERR_INTRL_PAR |
274 EDMA_ERR_IORDY,
275
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
278
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
281
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
285 EDMA_RSP_Q_PTR_SHIFT = 3,
286
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
291
292 EDMA_IORDY_TMOUT = 0x34,
293 EDMA_ARB_CFG = 0x38,
294
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
301 MV_HP_ERRATA_XX42A0 = (1 << 5),
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
305
306 /* Port private flags (pp_flags) */
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
309 };
310
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
314
315 enum {
316 MV_DMA_BOUNDARY = 0xffffffffU,
317
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
320 */
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
322
323 /* ditto, for response queue */
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
325 };
326
327 enum chip_type {
328 chip_504x,
329 chip_508x,
330 chip_5080,
331 chip_604x,
332 chip_608x,
333 chip_6042,
334 chip_7042,
335 };
336
337 /* Command ReQuest Block: 32B */
338 struct mv_crqb {
339 __le32 sg_addr;
340 __le32 sg_addr_hi;
341 __le16 ctrl_flags;
342 __le16 ata_cmd[11];
343 };
344
345 struct mv_crqb_iie {
346 __le32 addr;
347 __le32 addr_hi;
348 __le32 flags;
349 __le32 len;
350 __le32 ata_cmd[4];
351 };
352
353 /* Command ResPonse Block: 8B */
354 struct mv_crpb {
355 __le16 id;
356 __le16 flags;
357 __le32 tmstmp;
358 };
359
360 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361 struct mv_sg {
362 __le32 addr;
363 __le32 flags_size;
364 __le32 addr_hi;
365 __le32 reserved;
366 };
367
368 struct mv_port_priv {
369 struct mv_crqb *crqb;
370 dma_addr_t crqb_dma;
371 struct mv_crpb *crpb;
372 dma_addr_t crpb_dma;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
375
376 unsigned int req_idx;
377 unsigned int resp_idx;
378
379 u32 pp_flags;
380 };
381
382 struct mv_port_signal {
383 u32 amps;
384 u32 pre;
385 };
386
387 struct mv_host_priv;
388 struct mv_hw_ops {
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 unsigned int port);
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 void __iomem *mmio);
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 unsigned int n_hc);
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
398 };
399
400 struct mv_host_priv {
401 u32 hp_flags;
402 struct mv_port_signal signal[8];
403 const struct mv_hw_ops *ops;
404 };
405
406 static void mv_irq_clear(struct ata_port *ap);
407 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 static int mv_port_start(struct ata_port *ap);
412 static void mv_port_stop(struct ata_port *ap);
413 static void mv_qc_prep(struct ata_queued_cmd *qc);
414 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 static void mv_error_handler(struct ata_port *ap);
417 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418 static void mv_eh_freeze(struct ata_port *ap);
419 static void mv_eh_thaw(struct ata_port *ap);
420 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421
422 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 unsigned int port);
424 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 void __iomem *mmio);
427 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int n_hc);
429 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
431
432 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int port);
434 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 void __iomem *mmio);
437 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int n_hc);
439 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
443
444 static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
446 .name = DRV_NAME,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
460 };
461
462 static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
464 .name = DRV_NAME,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
468 .this_id = ATA_SHT_THIS_ID,
469 .sg_tablesize = MV_MAX_SG_CT,
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
472 .use_clustering = 1,
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
476 .slave_destroy = ata_scsi_slave_destroy,
477 .bios_param = ata_std_bios_param,
478 };
479
480 static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
482
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
488
489 .cable_detect = ata_cable_sata,
490
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
493 .data_xfer = ata_data_xfer,
494
495 .irq_clear = mv_irq_clear,
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
498
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
502 .thaw = mv_eh_thaw,
503
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
506
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
509 };
510
511 static const struct ata_port_operations mv6_ops = {
512 .port_disable = ata_port_disable,
513
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
520 .cable_detect = ata_cable_sata,
521
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
525
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
529
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
533 .thaw = mv_eh_thaw,
534
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
537
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
540 };
541
542 static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
544
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
550
551 .cable_detect = ata_cable_sata,
552
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
555 .data_xfer = ata_data_xfer,
556
557 .irq_clear = mv_irq_clear,
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
560
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
571 };
572
573 static const struct ata_port_info mv_port_info[] = {
574 { /* chip_504x */
575 .flags = MV_COMMON_FLAGS,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
579 },
580 { /* chip_508x */
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
585 },
586 { /* chip_5080 */
587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588 .pio_mask = 0x1f, /* pio0-4 */
589 .udma_mask = ATA_UDMA6,
590 .port_ops = &mv5_ops,
591 },
592 { /* chip_604x */
593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594 .pio_mask = 0x1f, /* pio0-4 */
595 .udma_mask = ATA_UDMA6,
596 .port_ops = &mv6_ops,
597 },
598 { /* chip_608x */
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 MV_FLAG_DUAL_HC,
601 .pio_mask = 0x1f, /* pio0-4 */
602 .udma_mask = ATA_UDMA6,
603 .port_ops = &mv6_ops,
604 },
605 { /* chip_6042 */
606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607 .pio_mask = 0x1f, /* pio0-4 */
608 .udma_mask = ATA_UDMA6,
609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv_iie_ops,
616 },
617 };
618
619 static const struct pci_device_id mv_pci_tbl[] = {
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 /* RocketRAID 1740/174x have different identifiers */
625 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
626 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
627
628 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
629 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
630 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
631 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
632 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
633
634 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
635
636 /* Adaptec 1430SA */
637 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
638
639 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
640
641 /* add Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
644 { } /* terminate list */
645 };
646
647 static struct pci_driver mv_pci_driver = {
648 .name = DRV_NAME,
649 .id_table = mv_pci_tbl,
650 .probe = mv_init_one,
651 .remove = ata_pci_remove_one,
652 };
653
654 static const struct mv_hw_ops mv5xxx_ops = {
655 .phy_errata = mv5_phy_errata,
656 .enable_leds = mv5_enable_leds,
657 .read_preamp = mv5_read_preamp,
658 .reset_hc = mv5_reset_hc,
659 .reset_flash = mv5_reset_flash,
660 .reset_bus = mv5_reset_bus,
661 };
662
663 static const struct mv_hw_ops mv6xxx_ops = {
664 .phy_errata = mv6_phy_errata,
665 .enable_leds = mv6_enable_leds,
666 .read_preamp = mv6_read_preamp,
667 .reset_hc = mv6_reset_hc,
668 .reset_flash = mv6_reset_flash,
669 .reset_bus = mv_reset_pci_bus,
670 };
671
672 /*
673 * module options
674 */
675 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
676
677
678 /* move to PCI layer or libata core? */
679 static int pci_go_64(struct pci_dev *pdev)
680 {
681 int rc;
682
683 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
684 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
685 if (rc) {
686 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
687 if (rc) {
688 dev_printk(KERN_ERR, &pdev->dev,
689 "64-bit DMA enable failed\n");
690 return rc;
691 }
692 }
693 } else {
694 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
695 if (rc) {
696 dev_printk(KERN_ERR, &pdev->dev,
697 "32-bit DMA enable failed\n");
698 return rc;
699 }
700 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
701 if (rc) {
702 dev_printk(KERN_ERR, &pdev->dev,
703 "32-bit consistent DMA enable failed\n");
704 return rc;
705 }
706 }
707
708 return rc;
709 }
710
711 /*
712 * Functions
713 */
714
715 static inline void writelfl(unsigned long data, void __iomem *addr)
716 {
717 writel(data, addr);
718 (void) readl(addr); /* flush to avoid PCI posted write */
719 }
720
721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
722 {
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
724 }
725
726 static inline unsigned int mv_hc_from_port(unsigned int port)
727 {
728 return port >> MV_PORT_HC_SHIFT;
729 }
730
731 static inline unsigned int mv_hardport_from_port(unsigned int port)
732 {
733 return port & MV_PORT_MASK;
734 }
735
736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
737 unsigned int port)
738 {
739 return mv_hc_base(base, mv_hc_from_port(port));
740 }
741
742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
743 {
744 return mv_hc_base_from_port(base, port) +
745 MV_SATAHC_ARBTR_REG_SZ +
746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
747 }
748
749 static inline void __iomem *mv_ap_base(struct ata_port *ap)
750 {
751 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
752 }
753
754 static inline int mv_get_hc_count(unsigned long port_flags)
755 {
756 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
757 }
758
759 static void mv_irq_clear(struct ata_port *ap)
760 {
761 }
762
763 static void mv_set_edma_ptrs(void __iomem *port_mmio,
764 struct mv_host_priv *hpriv,
765 struct mv_port_priv *pp)
766 {
767 u32 index;
768
769 /*
770 * initialize request queue
771 */
772 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
773
774 WARN_ON(pp->crqb_dma & 0x3ff);
775 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
776 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
777 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
778
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
780 writelfl((pp->crqb_dma & 0xffffffff) | index,
781 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
782 else
783 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
784
785 /*
786 * initialize response queue
787 */
788 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
789
790 WARN_ON(pp->crpb_dma & 0xff);
791 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
792
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
794 writelfl((pp->crpb_dma & 0xffffffff) | index,
795 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
796 else
797 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
798
799 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
800 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
801 }
802
803 /**
804 * mv_start_dma - Enable eDMA engine
805 * @base: port base address
806 * @pp: port private data
807 *
808 * Verify the local cache of the eDMA state is accurate with a
809 * WARN_ON.
810 *
811 * LOCKING:
812 * Inherited from caller.
813 */
814 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
815 struct mv_port_priv *pp)
816 {
817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
818 /* clear EDMA event indicators, if any */
819 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
820
821 mv_set_edma_ptrs(base, hpriv, pp);
822
823 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
824 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
825 }
826 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
827 }
828
829 /**
830 * __mv_stop_dma - Disable eDMA engine
831 * @ap: ATA channel to manipulate
832 *
833 * Verify the local cache of the eDMA state is accurate with a
834 * WARN_ON.
835 *
836 * LOCKING:
837 * Inherited from caller.
838 */
839 static int __mv_stop_dma(struct ata_port *ap)
840 {
841 void __iomem *port_mmio = mv_ap_base(ap);
842 struct mv_port_priv *pp = ap->private_data;
843 u32 reg;
844 int i, err = 0;
845
846 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
847 /* Disable EDMA if active. The disable bit auto clears.
848 */
849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
850 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
851 } else {
852 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
853 }
854
855 /* now properly wait for the eDMA to stop */
856 for (i = 1000; i > 0; i--) {
857 reg = readl(port_mmio + EDMA_CMD_OFS);
858 if (!(reg & EDMA_EN))
859 break;
860
861 udelay(100);
862 }
863
864 if (reg & EDMA_EN) {
865 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
866 err = -EIO;
867 }
868
869 return err;
870 }
871
872 static int mv_stop_dma(struct ata_port *ap)
873 {
874 unsigned long flags;
875 int rc;
876
877 spin_lock_irqsave(&ap->host->lock, flags);
878 rc = __mv_stop_dma(ap);
879 spin_unlock_irqrestore(&ap->host->lock, flags);
880
881 return rc;
882 }
883
884 #ifdef ATA_DEBUG
885 static void mv_dump_mem(void __iomem *start, unsigned bytes)
886 {
887 int b, w;
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%p: ", start + b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 printk("%08x ",readl(start + b));
892 b += sizeof(u32);
893 }
894 printk("\n");
895 }
896 }
897 #endif
898
899 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
900 {
901 #ifdef ATA_DEBUG
902 int b, w;
903 u32 dw;
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%02x: ", b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 (void) pci_read_config_dword(pdev,b,&dw);
908 printk("%08x ",dw);
909 b += sizeof(u32);
910 }
911 printk("\n");
912 }
913 #endif
914 }
915 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
916 struct pci_dev *pdev)
917 {
918 #ifdef ATA_DEBUG
919 void __iomem *hc_base = mv_hc_base(mmio_base,
920 port >> MV_PORT_HC_SHIFT);
921 void __iomem *port_base;
922 int start_port, num_ports, p, start_hc, num_hcs, hc;
923
924 if (0 > port) {
925 start_hc = start_port = 0;
926 num_ports = 8; /* shld be benign for 4 port devs */
927 num_hcs = 2;
928 } else {
929 start_hc = port >> MV_PORT_HC_SHIFT;
930 start_port = port;
931 num_ports = num_hcs = 1;
932 }
933 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
934 num_ports > 1 ? num_ports - 1 : start_port);
935
936 if (NULL != pdev) {
937 DPRINTK("PCI config space regs:\n");
938 mv_dump_pci_cfg(pdev, 0x68);
939 }
940 DPRINTK("PCI regs:\n");
941 mv_dump_mem(mmio_base+0xc00, 0x3c);
942 mv_dump_mem(mmio_base+0xd00, 0x34);
943 mv_dump_mem(mmio_base+0xf00, 0x4);
944 mv_dump_mem(mmio_base+0x1d00, 0x6c);
945 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
946 hc_base = mv_hc_base(mmio_base, hc);
947 DPRINTK("HC regs (HC %i):\n", hc);
948 mv_dump_mem(hc_base, 0x1c);
949 }
950 for (p = start_port; p < start_port + num_ports; p++) {
951 port_base = mv_port_base(mmio_base, p);
952 DPRINTK("EDMA regs (port %i):\n",p);
953 mv_dump_mem(port_base, 0x54);
954 DPRINTK("SATA regs (port %i):\n",p);
955 mv_dump_mem(port_base+0x300, 0x60);
956 }
957 #endif
958 }
959
960 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961 {
962 unsigned int ofs;
963
964 switch (sc_reg_in) {
965 case SCR_STATUS:
966 case SCR_CONTROL:
967 case SCR_ERROR:
968 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
969 break;
970 case SCR_ACTIVE:
971 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
972 break;
973 default:
974 ofs = 0xffffffffU;
975 break;
976 }
977 return ofs;
978 }
979
980 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
981 {
982 unsigned int ofs = mv_scr_offset(sc_reg_in);
983
984 if (ofs != 0xffffffffU) {
985 *val = readl(mv_ap_base(ap) + ofs);
986 return 0;
987 } else
988 return -EINVAL;
989 }
990
991 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
992 {
993 unsigned int ofs = mv_scr_offset(sc_reg_in);
994
995 if (ofs != 0xffffffffU) {
996 writelfl(val, mv_ap_base(ap) + ofs);
997 return 0;
998 } else
999 return -EINVAL;
1000 }
1001
1002 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1003 void __iomem *port_mmio)
1004 {
1005 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1006
1007 /* set up non-NCQ EDMA configuration */
1008 cfg &= ~(1 << 9); /* disable eQue */
1009
1010 if (IS_GEN_I(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
1012 cfg |= (1 << 8); /* enab config burst size mask */
1013 }
1014
1015 else if (IS_GEN_II(hpriv)) {
1016 cfg &= ~0x1f; /* clear queue depth */
1017 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1018 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1019 }
1020
1021 else if (IS_GEN_IIE(hpriv)) {
1022 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1023 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1024 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1025 cfg |= (1 << 18); /* enab early completion */
1026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1027 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1028 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1029 }
1030
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032 }
1033
1034 /**
1035 * mv_port_start - Port specific init/start routine.
1036 * @ap: ATA channel to manipulate
1037 *
1038 * Allocate and point to DMA memory, init port private memory,
1039 * zero indices.
1040 *
1041 * LOCKING:
1042 * Inherited from caller.
1043 */
1044 static int mv_port_start(struct ata_port *ap)
1045 {
1046 struct device *dev = ap->host->dev;
1047 struct mv_host_priv *hpriv = ap->host->private_data;
1048 struct mv_port_priv *pp;
1049 void __iomem *port_mmio = mv_ap_base(ap);
1050 void *mem;
1051 dma_addr_t mem_dma;
1052 unsigned long flags;
1053 int rc;
1054
1055 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1056 if (!pp)
1057 return -ENOMEM;
1058
1059 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1060 GFP_KERNEL);
1061 if (!mem)
1062 return -ENOMEM;
1063 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1064
1065 rc = ata_pad_alloc(ap, dev);
1066 if (rc)
1067 return rc;
1068
1069 /* First item in chunk of DMA memory:
1070 * 32-slot command request table (CRQB), 32 bytes each in size
1071 */
1072 pp->crqb = mem;
1073 pp->crqb_dma = mem_dma;
1074 mem += MV_CRQB_Q_SZ;
1075 mem_dma += MV_CRQB_Q_SZ;
1076
1077 /* Second item:
1078 * 32-slot command response table (CRPB), 8 bytes each in size
1079 */
1080 pp->crpb = mem;
1081 pp->crpb_dma = mem_dma;
1082 mem += MV_CRPB_Q_SZ;
1083 mem_dma += MV_CRPB_Q_SZ;
1084
1085 /* Third item:
1086 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1087 */
1088 pp->sg_tbl = mem;
1089 pp->sg_tbl_dma = mem_dma;
1090
1091 spin_lock_irqsave(&ap->host->lock, flags);
1092
1093 mv_edma_cfg(ap, hpriv, port_mmio);
1094
1095 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1096
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1098
1099 /* Don't turn on EDMA here...do it before DMA commands only. Else
1100 * we'll be unable to send non-data, PIO, etc due to restricted access
1101 * to shadow regs.
1102 */
1103 ap->private_data = pp;
1104 return 0;
1105 }
1106
1107 /**
1108 * mv_port_stop - Port specific cleanup/stop routine.
1109 * @ap: ATA channel to manipulate
1110 *
1111 * Stop DMA, cleanup port memory.
1112 *
1113 * LOCKING:
1114 * This routine uses the host lock to protect the DMA stop.
1115 */
1116 static void mv_port_stop(struct ata_port *ap)
1117 {
1118 mv_stop_dma(ap);
1119 }
1120
1121 /**
1122 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1123 * @qc: queued command whose SG list to source from
1124 *
1125 * Populate the SG list and mark the last entry.
1126 *
1127 * LOCKING:
1128 * Inherited from caller.
1129 */
1130 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1131 {
1132 struct mv_port_priv *pp = qc->ap->private_data;
1133 unsigned int n_sg = 0;
1134 struct scatterlist *sg;
1135 struct mv_sg *mv_sg;
1136
1137 mv_sg = pp->sg_tbl;
1138 ata_for_each_sg(sg, qc) {
1139 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg);
1141
1142 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1143 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1144 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1145
1146 if (ata_sg_is_last(sg, qc))
1147 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1148
1149 mv_sg++;
1150 n_sg++;
1151 }
1152
1153 return n_sg;
1154 }
1155
1156 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1157 {
1158 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1159 (last ? CRQB_CMD_LAST : 0);
1160 *cmdw = cpu_to_le16(tmp);
1161 }
1162
1163 /**
1164 * mv_qc_prep - Host specific command preparation.
1165 * @qc: queued command to prepare
1166 *
1167 * This routine simply redirects to the general purpose routine
1168 * if command is not DMA. Else, it handles prep of the CRQB
1169 * (command request block), does some sanity checking, and calls
1170 * the SG load routine.
1171 *
1172 * LOCKING:
1173 * Inherited from caller.
1174 */
1175 static void mv_qc_prep(struct ata_queued_cmd *qc)
1176 {
1177 struct ata_port *ap = qc->ap;
1178 struct mv_port_priv *pp = ap->private_data;
1179 __le16 *cw;
1180 struct ata_taskfile *tf;
1181 u16 flags = 0;
1182 unsigned in_index;
1183
1184 if (qc->tf.protocol != ATA_PROT_DMA)
1185 return;
1186
1187 /* Fill in command request block
1188 */
1189 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1190 flags |= CRQB_FLAG_READ;
1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1192 flags |= qc->tag << CRQB_TAG_SHIFT;
1193 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1194
1195 /* get current queue index from software */
1196 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1197
1198 pp->crqb[in_index].sg_addr =
1199 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 pp->crqb[in_index].sg_addr_hi =
1201 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1202 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1203
1204 cw = &pp->crqb[in_index].ata_cmd[0];
1205 tf = &qc->tf;
1206
1207 /* Sadly, the CRQB cannot accomodate all registers--there are
1208 * only 11 bytes...so we must pick and choose required
1209 * registers based on the command. So, we drop feature and
1210 * hob_feature for [RW] DMA commands, but they are needed for
1211 * NCQ. NCQ will drop hob_nsect.
1212 */
1213 switch (tf->command) {
1214 case ATA_CMD_READ:
1215 case ATA_CMD_READ_EXT:
1216 case ATA_CMD_WRITE:
1217 case ATA_CMD_WRITE_EXT:
1218 case ATA_CMD_WRITE_FUA_EXT:
1219 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1220 break;
1221 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1222 case ATA_CMD_FPDMA_READ:
1223 case ATA_CMD_FPDMA_WRITE:
1224 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1225 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1226 break;
1227 #endif /* FIXME: remove this line when NCQ added */
1228 default:
1229 /* The only other commands EDMA supports in non-queued and
1230 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1231 * of which are defined/used by Linux. If we get here, this
1232 * driver needs work.
1233 *
1234 * FIXME: modify libata to give qc_prep a return value and
1235 * return error here.
1236 */
1237 BUG_ON(tf->command);
1238 break;
1239 }
1240 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1241 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1242 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1243 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1244 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1245 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1246 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1247 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1248 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1249
1250 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1251 return;
1252 mv_fill_sg(qc);
1253 }
1254
1255 /**
1256 * mv_qc_prep_iie - Host specific command preparation.
1257 * @qc: queued command to prepare
1258 *
1259 * This routine simply redirects to the general purpose routine
1260 * if command is not DMA. Else, it handles prep of the CRQB
1261 * (command request block), does some sanity checking, and calls
1262 * the SG load routine.
1263 *
1264 * LOCKING:
1265 * Inherited from caller.
1266 */
1267 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1268 {
1269 struct ata_port *ap = qc->ap;
1270 struct mv_port_priv *pp = ap->private_data;
1271 struct mv_crqb_iie *crqb;
1272 struct ata_taskfile *tf;
1273 unsigned in_index;
1274 u32 flags = 0;
1275
1276 if (qc->tf.protocol != ATA_PROT_DMA)
1277 return;
1278
1279 /* Fill in Gen IIE command request block
1280 */
1281 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1282 flags |= CRQB_FLAG_READ;
1283
1284 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1285 flags |= qc->tag << CRQB_TAG_SHIFT;
1286 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1287 what we use as our tag */
1288
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1291
1292 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1293 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1294 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1295 crqb->flags = cpu_to_le32(flags);
1296
1297 tf = &qc->tf;
1298 crqb->ata_cmd[0] = cpu_to_le32(
1299 (tf->command << 16) |
1300 (tf->feature << 24)
1301 );
1302 crqb->ata_cmd[1] = cpu_to_le32(
1303 (tf->lbal << 0) |
1304 (tf->lbam << 8) |
1305 (tf->lbah << 16) |
1306 (tf->device << 24)
1307 );
1308 crqb->ata_cmd[2] = cpu_to_le32(
1309 (tf->hob_lbal << 0) |
1310 (tf->hob_lbam << 8) |
1311 (tf->hob_lbah << 16) |
1312 (tf->hob_feature << 24)
1313 );
1314 crqb->ata_cmd[3] = cpu_to_le32(
1315 (tf->nsect << 0) |
1316 (tf->hob_nsect << 8)
1317 );
1318
1319 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1320 return;
1321 mv_fill_sg(qc);
1322 }
1323
1324 /**
1325 * mv_qc_issue - Initiate a command to the host
1326 * @qc: queued command to start
1327 *
1328 * This routine simply redirects to the general purpose routine
1329 * if command is not DMA. Else, it sanity checks our local
1330 * caches of the request producer/consumer indices then enables
1331 * DMA and bumps the request producer index.
1332 *
1333 * LOCKING:
1334 * Inherited from caller.
1335 */
1336 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1337 {
1338 struct ata_port *ap = qc->ap;
1339 void __iomem *port_mmio = mv_ap_base(ap);
1340 struct mv_port_priv *pp = ap->private_data;
1341 struct mv_host_priv *hpriv = ap->host->private_data;
1342 u32 in_index;
1343
1344 if (qc->tf.protocol != ATA_PROT_DMA) {
1345 /* We're about to send a non-EDMA capable command to the
1346 * port. Turn off EDMA so there won't be problems accessing
1347 * shadow block, etc registers.
1348 */
1349 __mv_stop_dma(ap);
1350 return ata_qc_issue_prot(qc);
1351 }
1352
1353 mv_start_dma(port_mmio, hpriv, pp);
1354
1355 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1356
1357 /* until we do queuing, the queue should be empty at this point */
1358 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1359 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1360
1361 pp->req_idx++;
1362
1363 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1364
1365 /* and write the request in pointer to kick the EDMA to life */
1366 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1367 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1368
1369 return 0;
1370 }
1371
1372 /**
1373 * mv_err_intr - Handle error interrupts on the port
1374 * @ap: ATA channel to manipulate
1375 * @reset_allowed: bool: 0 == don't trigger from reset here
1376 *
1377 * In most cases, just clear the interrupt and move on. However,
1378 * some cases require an eDMA reset, which is done right before
1379 * the COMRESET in mv_phy_reset(). The SERR case requires a
1380 * clear of pending errors in the SATA SERROR register. Finally,
1381 * if the port disabled DMA, update our cached copy to match.
1382 *
1383 * LOCKING:
1384 * Inherited from caller.
1385 */
1386 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1387 {
1388 void __iomem *port_mmio = mv_ap_base(ap);
1389 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1390 struct mv_port_priv *pp = ap->private_data;
1391 struct mv_host_priv *hpriv = ap->host->private_data;
1392 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1393 unsigned int action = 0, err_mask = 0;
1394 struct ata_eh_info *ehi = &ap->eh_info;
1395
1396 ata_ehi_clear_desc(ehi);
1397
1398 if (!edma_enabled) {
1399 /* just a guess: do we need to do this? should we
1400 * expand this, and do it in all cases?
1401 */
1402 sata_scr_read(ap, SCR_ERROR, &serr);
1403 sata_scr_write_flush(ap, SCR_ERROR, serr);
1404 }
1405
1406 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1407
1408 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1409
1410 /*
1411 * all generations share these EDMA error cause bits
1412 */
1413
1414 if (edma_err_cause & EDMA_ERR_DEV)
1415 err_mask |= AC_ERR_DEV;
1416 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1417 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1418 EDMA_ERR_INTRL_PAR)) {
1419 err_mask |= AC_ERR_ATA_BUS;
1420 action |= ATA_EH_HARDRESET;
1421 ata_ehi_push_desc(ehi, "parity error");
1422 }
1423 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1424 ata_ehi_hotplugged(ehi);
1425 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1426 "dev disconnect" : "dev connect");
1427 }
1428
1429 if (IS_GEN_I(hpriv)) {
1430 eh_freeze_mask = EDMA_EH_FREEZE_5;
1431
1432 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1433 struct mv_port_priv *pp = ap->private_data;
1434 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1435 ata_ehi_push_desc(ehi, "EDMA self-disable");
1436 }
1437 } else {
1438 eh_freeze_mask = EDMA_EH_FREEZE;
1439
1440 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1441 struct mv_port_priv *pp = ap->private_data;
1442 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1443 ata_ehi_push_desc(ehi, "EDMA self-disable");
1444 }
1445
1446 if (edma_err_cause & EDMA_ERR_SERR) {
1447 sata_scr_read(ap, SCR_ERROR, &serr);
1448 sata_scr_write_flush(ap, SCR_ERROR, serr);
1449 err_mask = AC_ERR_ATA_BUS;
1450 action |= ATA_EH_HARDRESET;
1451 }
1452 }
1453
1454 /* Clear EDMA now that SERR cleanup done */
1455 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1456
1457 if (!err_mask) {
1458 err_mask = AC_ERR_OTHER;
1459 action |= ATA_EH_HARDRESET;
1460 }
1461
1462 ehi->serror |= serr;
1463 ehi->action |= action;
1464
1465 if (qc)
1466 qc->err_mask |= err_mask;
1467 else
1468 ehi->err_mask |= err_mask;
1469
1470 if (edma_err_cause & eh_freeze_mask)
1471 ata_port_freeze(ap);
1472 else
1473 ata_port_abort(ap);
1474 }
1475
1476 static void mv_intr_pio(struct ata_port *ap)
1477 {
1478 struct ata_queued_cmd *qc;
1479 u8 ata_status;
1480
1481 /* ignore spurious intr if drive still BUSY */
1482 ata_status = readb(ap->ioaddr.status_addr);
1483 if (unlikely(ata_status & ATA_BUSY))
1484 return;
1485
1486 /* get active ATA command */
1487 qc = ata_qc_from_tag(ap, ap->active_tag);
1488 if (unlikely(!qc)) /* no active tag */
1489 return;
1490 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1491 return;
1492
1493 /* and finally, complete the ATA command */
1494 qc->err_mask |= ac_err_mask(ata_status);
1495 ata_qc_complete(qc);
1496 }
1497
1498 static void mv_intr_edma(struct ata_port *ap)
1499 {
1500 void __iomem *port_mmio = mv_ap_base(ap);
1501 struct mv_host_priv *hpriv = ap->host->private_data;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct ata_queued_cmd *qc;
1504 u32 out_index, in_index;
1505 bool work_done = false;
1506
1507 /* get h/w response queue pointer */
1508 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1509 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1510
1511 while (1) {
1512 u16 status;
1513 unsigned int tag;
1514
1515 /* get s/w response queue last-read pointer, and compare */
1516 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1517 if (in_index == out_index)
1518 break;
1519
1520 /* 50xx: get active ATA command */
1521 if (IS_GEN_I(hpriv))
1522 tag = ap->active_tag;
1523
1524 /* Gen II/IIE: get active ATA command via tag, to enable
1525 * support for queueing. this works transparently for
1526 * queued and non-queued modes.
1527 */
1528 else if (IS_GEN_II(hpriv))
1529 tag = (le16_to_cpu(pp->crpb[out_index].id)
1530 >> CRPB_IOID_SHIFT_6) & 0x3f;
1531
1532 else /* IS_GEN_IIE */
1533 tag = (le16_to_cpu(pp->crpb[out_index].id)
1534 >> CRPB_IOID_SHIFT_7) & 0x3f;
1535
1536 qc = ata_qc_from_tag(ap, tag);
1537
1538 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1539 * bits (WARNING: might not necessarily be associated
1540 * with this command), which -should- be clear
1541 * if all is well
1542 */
1543 status = le16_to_cpu(pp->crpb[out_index].flags);
1544 if (unlikely(status & 0xff)) {
1545 mv_err_intr(ap, qc);
1546 return;
1547 }
1548
1549 /* and finally, complete the ATA command */
1550 if (qc) {
1551 qc->err_mask |=
1552 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1553 ata_qc_complete(qc);
1554 }
1555
1556 /* advance software response queue pointer, to
1557 * indicate (after the loop completes) to hardware
1558 * that we have consumed a response queue entry.
1559 */
1560 work_done = true;
1561 pp->resp_idx++;
1562 }
1563
1564 if (work_done)
1565 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1566 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1567 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1568 }
1569
1570 /**
1571 * mv_host_intr - Handle all interrupts on the given host controller
1572 * @host: host specific structure
1573 * @relevant: port error bits relevant to this host controller
1574 * @hc: which host controller we're to look at
1575 *
1576 * Read then write clear the HC interrupt status then walk each
1577 * port connected to the HC and see if it needs servicing. Port
1578 * success ints are reported in the HC interrupt status reg, the
1579 * port error ints are reported in the higher level main
1580 * interrupt status register and thus are passed in via the
1581 * 'relevant' argument.
1582 *
1583 * LOCKING:
1584 * Inherited from caller.
1585 */
1586 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1587 {
1588 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1589 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1590 u32 hc_irq_cause;
1591 int port, port0;
1592
1593 if (hc == 0)
1594 port0 = 0;
1595 else
1596 port0 = MV_PORTS_PER_HC;
1597
1598 /* we'll need the HC success int register in most cases */
1599 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1600 if (!hc_irq_cause)
1601 return;
1602
1603 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1604
1605 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1606 hc,relevant,hc_irq_cause);
1607
1608 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1609 struct ata_port *ap = host->ports[port];
1610 struct mv_port_priv *pp = ap->private_data;
1611 int have_err_bits, hard_port, shift;
1612
1613 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1614 continue;
1615
1616 shift = port << 1; /* (port * 2) */
1617 if (port >= MV_PORTS_PER_HC) {
1618 shift++; /* skip bit 8 in the HC Main IRQ reg */
1619 }
1620 have_err_bits = ((PORT0_ERR << shift) & relevant);
1621
1622 if (unlikely(have_err_bits)) {
1623 struct ata_queued_cmd *qc;
1624
1625 qc = ata_qc_from_tag(ap, ap->active_tag);
1626 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1627 continue;
1628
1629 mv_err_intr(ap, qc);
1630 continue;
1631 }
1632
1633 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1634
1635 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1636 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1637 mv_intr_edma(ap);
1638 } else {
1639 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1640 mv_intr_pio(ap);
1641 }
1642 }
1643 VPRINTK("EXIT\n");
1644 }
1645
1646 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1647 {
1648 struct ata_port *ap;
1649 struct ata_queued_cmd *qc;
1650 struct ata_eh_info *ehi;
1651 unsigned int i, err_mask, printed = 0;
1652 u32 err_cause;
1653
1654 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1655
1656 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1657 err_cause);
1658
1659 DPRINTK("All regs @ PCI error\n");
1660 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1661
1662 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1663
1664 for (i = 0; i < host->n_ports; i++) {
1665 ap = host->ports[i];
1666 if (!ata_port_offline(ap)) {
1667 ehi = &ap->eh_info;
1668 ata_ehi_clear_desc(ehi);
1669 if (!printed++)
1670 ata_ehi_push_desc(ehi,
1671 "PCI err cause 0x%08x", err_cause);
1672 err_mask = AC_ERR_HOST_BUS;
1673 ehi->action = ATA_EH_HARDRESET;
1674 qc = ata_qc_from_tag(ap, ap->active_tag);
1675 if (qc)
1676 qc->err_mask |= err_mask;
1677 else
1678 ehi->err_mask |= err_mask;
1679
1680 ata_port_freeze(ap);
1681 }
1682 }
1683 }
1684
1685 /**
1686 * mv_interrupt - Main interrupt event handler
1687 * @irq: unused
1688 * @dev_instance: private data; in this case the host structure
1689 *
1690 * Read the read only register to determine if any host
1691 * controllers have pending interrupts. If so, call lower level
1692 * routine to handle. Also check for PCI errors which are only
1693 * reported here.
1694 *
1695 * LOCKING:
1696 * This routine holds the host lock while processing pending
1697 * interrupts.
1698 */
1699 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1700 {
1701 struct ata_host *host = dev_instance;
1702 unsigned int hc, handled = 0, n_hcs;
1703 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1704 u32 irq_stat;
1705
1706 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1707
1708 /* check the cases where we either have nothing pending or have read
1709 * a bogus register value which can indicate HW removal or PCI fault
1710 */
1711 if (!irq_stat || (0xffffffffU == irq_stat))
1712 return IRQ_NONE;
1713
1714 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1715 spin_lock(&host->lock);
1716
1717 if (unlikely(irq_stat & PCI_ERR)) {
1718 mv_pci_error(host, mmio);
1719 handled = 1;
1720 goto out_unlock; /* skip all other HC irq handling */
1721 }
1722
1723 for (hc = 0; hc < n_hcs; hc++) {
1724 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1725 if (relevant) {
1726 mv_host_intr(host, relevant, hc);
1727 handled = 1;
1728 }
1729 }
1730
1731 out_unlock:
1732 spin_unlock(&host->lock);
1733
1734 return IRQ_RETVAL(handled);
1735 }
1736
1737 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1738 {
1739 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1740 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1741
1742 return hc_mmio + ofs;
1743 }
1744
1745 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1746 {
1747 unsigned int ofs;
1748
1749 switch (sc_reg_in) {
1750 case SCR_STATUS:
1751 case SCR_ERROR:
1752 case SCR_CONTROL:
1753 ofs = sc_reg_in * sizeof(u32);
1754 break;
1755 default:
1756 ofs = 0xffffffffU;
1757 break;
1758 }
1759 return ofs;
1760 }
1761
1762 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1763 {
1764 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1765 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1766 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1767
1768 if (ofs != 0xffffffffU) {
1769 *val = readl(addr + ofs);
1770 return 0;
1771 } else
1772 return -EINVAL;
1773 }
1774
1775 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1776 {
1777 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1778 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1779 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1780
1781 if (ofs != 0xffffffffU) {
1782 writelfl(val, addr + ofs);
1783 return 0;
1784 } else
1785 return -EINVAL;
1786 }
1787
1788 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1789 {
1790 int early_5080;
1791
1792 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1793
1794 if (!early_5080) {
1795 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1796 tmp |= (1 << 0);
1797 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1798 }
1799
1800 mv_reset_pci_bus(pdev, mmio);
1801 }
1802
1803 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1804 {
1805 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1806 }
1807
1808 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1809 void __iomem *mmio)
1810 {
1811 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1812 u32 tmp;
1813
1814 tmp = readl(phy_mmio + MV5_PHY_MODE);
1815
1816 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1817 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1818 }
1819
1820 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1821 {
1822 u32 tmp;
1823
1824 writel(0, mmio + MV_GPIO_PORT_CTL);
1825
1826 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1827
1828 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1829 tmp |= ~(1 << 0);
1830 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 }
1832
1833 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1834 unsigned int port)
1835 {
1836 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1837 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1838 u32 tmp;
1839 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1840
1841 if (fix_apm_sq) {
1842 tmp = readl(phy_mmio + MV5_LT_MODE);
1843 tmp |= (1 << 19);
1844 writel(tmp, phy_mmio + MV5_LT_MODE);
1845
1846 tmp = readl(phy_mmio + MV5_PHY_CTL);
1847 tmp &= ~0x3;
1848 tmp |= 0x1;
1849 writel(tmp, phy_mmio + MV5_PHY_CTL);
1850 }
1851
1852 tmp = readl(phy_mmio + MV5_PHY_MODE);
1853 tmp &= ~mask;
1854 tmp |= hpriv->signal[port].pre;
1855 tmp |= hpriv->signal[port].amps;
1856 writel(tmp, phy_mmio + MV5_PHY_MODE);
1857 }
1858
1859
1860 #undef ZERO
1861 #define ZERO(reg) writel(0, port_mmio + (reg))
1862 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1863 unsigned int port)
1864 {
1865 void __iomem *port_mmio = mv_port_base(mmio, port);
1866
1867 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1868
1869 mv_channel_reset(hpriv, mmio, port);
1870
1871 ZERO(0x028); /* command */
1872 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1873 ZERO(0x004); /* timer */
1874 ZERO(0x008); /* irq err cause */
1875 ZERO(0x00c); /* irq err mask */
1876 ZERO(0x010); /* rq bah */
1877 ZERO(0x014); /* rq inp */
1878 ZERO(0x018); /* rq outp */
1879 ZERO(0x01c); /* respq bah */
1880 ZERO(0x024); /* respq outp */
1881 ZERO(0x020); /* respq inp */
1882 ZERO(0x02c); /* test control */
1883 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1884 }
1885 #undef ZERO
1886
1887 #define ZERO(reg) writel(0, hc_mmio + (reg))
1888 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1889 unsigned int hc)
1890 {
1891 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1892 u32 tmp;
1893
1894 ZERO(0x00c);
1895 ZERO(0x010);
1896 ZERO(0x014);
1897 ZERO(0x018);
1898
1899 tmp = readl(hc_mmio + 0x20);
1900 tmp &= 0x1c1c1c1c;
1901 tmp |= 0x03030303;
1902 writel(tmp, hc_mmio + 0x20);
1903 }
1904 #undef ZERO
1905
1906 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1907 unsigned int n_hc)
1908 {
1909 unsigned int hc, port;
1910
1911 for (hc = 0; hc < n_hc; hc++) {
1912 for (port = 0; port < MV_PORTS_PER_HC; port++)
1913 mv5_reset_hc_port(hpriv, mmio,
1914 (hc * MV_PORTS_PER_HC) + port);
1915
1916 mv5_reset_one_hc(hpriv, mmio, hc);
1917 }
1918
1919 return 0;
1920 }
1921
1922 #undef ZERO
1923 #define ZERO(reg) writel(0, mmio + (reg))
1924 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1925 {
1926 u32 tmp;
1927
1928 tmp = readl(mmio + MV_PCI_MODE);
1929 tmp &= 0xff00ffff;
1930 writel(tmp, mmio + MV_PCI_MODE);
1931
1932 ZERO(MV_PCI_DISC_TIMER);
1933 ZERO(MV_PCI_MSI_TRIGGER);
1934 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1935 ZERO(HC_MAIN_IRQ_MASK_OFS);
1936 ZERO(MV_PCI_SERR_MASK);
1937 ZERO(PCI_IRQ_CAUSE_OFS);
1938 ZERO(PCI_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1940 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1941 ZERO(MV_PCI_ERR_ATTRIBUTE);
1942 ZERO(MV_PCI_ERR_COMMAND);
1943 }
1944 #undef ZERO
1945
1946 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1947 {
1948 u32 tmp;
1949
1950 mv5_reset_flash(hpriv, mmio);
1951
1952 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1953 tmp &= 0x3;
1954 tmp |= (1 << 5) | (1 << 6);
1955 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1956 }
1957
1958 /**
1959 * mv6_reset_hc - Perform the 6xxx global soft reset
1960 * @mmio: base address of the HBA
1961 *
1962 * This routine only applies to 6xxx parts.
1963 *
1964 * LOCKING:
1965 * Inherited from caller.
1966 */
1967 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1968 unsigned int n_hc)
1969 {
1970 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1971 int i, rc = 0;
1972 u32 t;
1973
1974 /* Following procedure defined in PCI "main command and status
1975 * register" table.
1976 */
1977 t = readl(reg);
1978 writel(t | STOP_PCI_MASTER, reg);
1979
1980 for (i = 0; i < 1000; i++) {
1981 udelay(1);
1982 t = readl(reg);
1983 if (PCI_MASTER_EMPTY & t) {
1984 break;
1985 }
1986 }
1987 if (!(PCI_MASTER_EMPTY & t)) {
1988 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1989 rc = 1;
1990 goto done;
1991 }
1992
1993 /* set reset */
1994 i = 5;
1995 do {
1996 writel(t | GLOB_SFT_RST, reg);
1997 t = readl(reg);
1998 udelay(1);
1999 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2000
2001 if (!(GLOB_SFT_RST & t)) {
2002 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2003 rc = 1;
2004 goto done;
2005 }
2006
2007 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2008 i = 5;
2009 do {
2010 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2011 t = readl(reg);
2012 udelay(1);
2013 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2014
2015 if (GLOB_SFT_RST & t) {
2016 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2017 rc = 1;
2018 }
2019 done:
2020 return rc;
2021 }
2022
2023 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2024 void __iomem *mmio)
2025 {
2026 void __iomem *port_mmio;
2027 u32 tmp;
2028
2029 tmp = readl(mmio + MV_RESET_CFG);
2030 if ((tmp & (1 << 0)) == 0) {
2031 hpriv->signal[idx].amps = 0x7 << 8;
2032 hpriv->signal[idx].pre = 0x1 << 5;
2033 return;
2034 }
2035
2036 port_mmio = mv_port_base(mmio, idx);
2037 tmp = readl(port_mmio + PHY_MODE2);
2038
2039 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2040 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2041 }
2042
2043 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2044 {
2045 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2046 }
2047
2048 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2049 unsigned int port)
2050 {
2051 void __iomem *port_mmio = mv_port_base(mmio, port);
2052
2053 u32 hp_flags = hpriv->hp_flags;
2054 int fix_phy_mode2 =
2055 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2056 int fix_phy_mode4 =
2057 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2058 u32 m2, tmp;
2059
2060 if (fix_phy_mode2) {
2061 m2 = readl(port_mmio + PHY_MODE2);
2062 m2 &= ~(1 << 16);
2063 m2 |= (1 << 31);
2064 writel(m2, port_mmio + PHY_MODE2);
2065
2066 udelay(200);
2067
2068 m2 = readl(port_mmio + PHY_MODE2);
2069 m2 &= ~((1 << 16) | (1 << 31));
2070 writel(m2, port_mmio + PHY_MODE2);
2071
2072 udelay(200);
2073 }
2074
2075 /* who knows what this magic does */
2076 tmp = readl(port_mmio + PHY_MODE3);
2077 tmp &= ~0x7F800000;
2078 tmp |= 0x2A800000;
2079 writel(tmp, port_mmio + PHY_MODE3);
2080
2081 if (fix_phy_mode4) {
2082 u32 m4;
2083
2084 m4 = readl(port_mmio + PHY_MODE4);
2085
2086 if (hp_flags & MV_HP_ERRATA_60X1B2)
2087 tmp = readl(port_mmio + 0x310);
2088
2089 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2090
2091 writel(m4, port_mmio + PHY_MODE4);
2092
2093 if (hp_flags & MV_HP_ERRATA_60X1B2)
2094 writel(tmp, port_mmio + 0x310);
2095 }
2096
2097 /* Revert values of pre-emphasis and signal amps to the saved ones */
2098 m2 = readl(port_mmio + PHY_MODE2);
2099
2100 m2 &= ~MV_M2_PREAMP_MASK;
2101 m2 |= hpriv->signal[port].amps;
2102 m2 |= hpriv->signal[port].pre;
2103 m2 &= ~(1 << 16);
2104
2105 /* according to mvSata 3.6.1, some IIE values are fixed */
2106 if (IS_GEN_IIE(hpriv)) {
2107 m2 &= ~0xC30FF01F;
2108 m2 |= 0x0000900F;
2109 }
2110
2111 writel(m2, port_mmio + PHY_MODE2);
2112 }
2113
2114 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2115 unsigned int port_no)
2116 {
2117 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2118
2119 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2120
2121 if (IS_GEN_II(hpriv)) {
2122 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2123 ifctl |= (1 << 7); /* enable gen2i speed */
2124 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2125 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2126 }
2127
2128 udelay(25); /* allow reset propagation */
2129
2130 /* Spec never mentions clearing the bit. Marvell's driver does
2131 * clear the bit, however.
2132 */
2133 writelfl(0, port_mmio + EDMA_CMD_OFS);
2134
2135 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2136
2137 if (IS_GEN_I(hpriv))
2138 mdelay(1);
2139 }
2140
2141 /**
2142 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2143 * @ap: ATA channel to manipulate
2144 *
2145 * Part of this is taken from __sata_phy_reset and modified to
2146 * not sleep since this routine gets called from interrupt level.
2147 *
2148 * LOCKING:
2149 * Inherited from caller. This is coded to safe to call at
2150 * interrupt level, i.e. it does not sleep.
2151 */
2152 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2153 unsigned long deadline)
2154 {
2155 struct mv_port_priv *pp = ap->private_data;
2156 struct mv_host_priv *hpriv = ap->host->private_data;
2157 void __iomem *port_mmio = mv_ap_base(ap);
2158 int retry = 5;
2159 u32 sstatus;
2160
2161 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2162
2163 #ifdef DEBUG
2164 {
2165 u32 sstatus, serror, scontrol;
2166
2167 mv_scr_read(ap, SCR_STATUS, &sstatus);
2168 mv_scr_read(ap, SCR_ERROR, &serror);
2169 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2170 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2171 "SCtrl 0x%08x\n", status, serror, scontrol);
2172 }
2173 #endif
2174
2175 /* Issue COMRESET via SControl */
2176 comreset_retry:
2177 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2178 msleep(1);
2179
2180 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2181 msleep(20);
2182
2183 do {
2184 sata_scr_read(ap, SCR_STATUS, &sstatus);
2185 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2186 break;
2187
2188 msleep(1);
2189 } while (time_before(jiffies, deadline));
2190
2191 /* work around errata */
2192 if (IS_GEN_II(hpriv) &&
2193 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2194 (retry-- > 0))
2195 goto comreset_retry;
2196
2197 #ifdef DEBUG
2198 {
2199 u32 sstatus, serror, scontrol;
2200
2201 mv_scr_read(ap, SCR_STATUS, &sstatus);
2202 mv_scr_read(ap, SCR_ERROR, &serror);
2203 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2204 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2205 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2206 }
2207 #endif
2208
2209 if (ata_port_offline(ap)) {
2210 *class = ATA_DEV_NONE;
2211 return;
2212 }
2213
2214 /* even after SStatus reflects that device is ready,
2215 * it seems to take a while for link to be fully
2216 * established (and thus Status no longer 0x80/0x7F),
2217 * so we poll a bit for that, here.
2218 */
2219 retry = 20;
2220 while (1) {
2221 u8 drv_stat = ata_check_status(ap);
2222 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2223 break;
2224 msleep(500);
2225 if (retry-- <= 0)
2226 break;
2227 if (time_after(jiffies, deadline))
2228 break;
2229 }
2230
2231 /* FIXME: if we passed the deadline, the following
2232 * code probably produces an invalid result
2233 */
2234
2235 /* finally, read device signature from TF registers */
2236 *class = ata_dev_try_classify(ap, 0, NULL);
2237
2238 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2239
2240 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2241
2242 VPRINTK("EXIT\n");
2243 }
2244
2245 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2246 {
2247 struct mv_port_priv *pp = ap->private_data;
2248 struct ata_eh_context *ehc = &ap->eh_context;
2249 int rc;
2250
2251 rc = mv_stop_dma(ap);
2252 if (rc)
2253 ehc->i.action |= ATA_EH_HARDRESET;
2254
2255 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2256 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2257 ehc->i.action |= ATA_EH_HARDRESET;
2258 }
2259
2260 /* if we're about to do hardreset, nothing more to do */
2261 if (ehc->i.action & ATA_EH_HARDRESET)
2262 return 0;
2263
2264 if (ata_port_online(ap))
2265 rc = ata_wait_ready(ap, deadline);
2266 else
2267 rc = -ENODEV;
2268
2269 return rc;
2270 }
2271
2272 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2273 unsigned long deadline)
2274 {
2275 struct mv_host_priv *hpriv = ap->host->private_data;
2276 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2277
2278 mv_stop_dma(ap);
2279
2280 mv_channel_reset(hpriv, mmio, ap->port_no);
2281
2282 mv_phy_reset(ap, class, deadline);
2283
2284 return 0;
2285 }
2286
2287 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2288 {
2289 u32 serr;
2290
2291 /* print link status */
2292 sata_print_link_status(ap);
2293
2294 /* clear SError */
2295 sata_scr_read(ap, SCR_ERROR, &serr);
2296 sata_scr_write_flush(ap, SCR_ERROR, serr);
2297
2298 /* bail out if no device is present */
2299 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2300 DPRINTK("EXIT, no device\n");
2301 return;
2302 }
2303
2304 /* set up device control */
2305 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2306 }
2307
2308 static void mv_error_handler(struct ata_port *ap)
2309 {
2310 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2311 mv_hardreset, mv_postreset);
2312 }
2313
2314 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2315 {
2316 mv_stop_dma(qc->ap);
2317 }
2318
2319 static void mv_eh_freeze(struct ata_port *ap)
2320 {
2321 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2322 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2323 u32 tmp, mask;
2324 unsigned int shift;
2325
2326 /* FIXME: handle coalescing completion events properly */
2327
2328 shift = ap->port_no * 2;
2329 if (hc > 0)
2330 shift++;
2331
2332 mask = 0x3 << shift;
2333
2334 /* disable assertion of portN err, done events */
2335 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2336 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2337 }
2338
2339 static void mv_eh_thaw(struct ata_port *ap)
2340 {
2341 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2342 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2343 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2344 void __iomem *port_mmio = mv_ap_base(ap);
2345 u32 tmp, mask, hc_irq_cause;
2346 unsigned int shift, hc_port_no = ap->port_no;
2347
2348 /* FIXME: handle coalescing completion events properly */
2349
2350 shift = ap->port_no * 2;
2351 if (hc > 0) {
2352 shift++;
2353 hc_port_no -= 4;
2354 }
2355
2356 mask = 0x3 << shift;
2357
2358 /* clear EDMA errors on this port */
2359 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2360
2361 /* clear pending irq events */
2362 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2363 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2364 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2365 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2366
2367 /* enable assertion of portN err, done events */
2368 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2369 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2370 }
2371
2372 /**
2373 * mv_port_init - Perform some early initialization on a single port.
2374 * @port: libata data structure storing shadow register addresses
2375 * @port_mmio: base address of the port
2376 *
2377 * Initialize shadow register mmio addresses, clear outstanding
2378 * interrupts on the port, and unmask interrupts for the future
2379 * start of the port.
2380 *
2381 * LOCKING:
2382 * Inherited from caller.
2383 */
2384 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2385 {
2386 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2387 unsigned serr_ofs;
2388
2389 /* PIO related setup
2390 */
2391 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2392 port->error_addr =
2393 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2394 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2395 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2396 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2397 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2398 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2399 port->status_addr =
2400 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2401 /* special case: control/altstatus doesn't have ATA_REG_ address */
2402 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2403
2404 /* unused: */
2405 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2406
2407 /* Clear any currently outstanding port interrupt conditions */
2408 serr_ofs = mv_scr_offset(SCR_ERROR);
2409 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2410 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2411
2412 /* unmask all EDMA error interrupts */
2413 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2414
2415 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2416 readl(port_mmio + EDMA_CFG_OFS),
2417 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2418 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2419 }
2420
2421 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2422 {
2423 struct pci_dev *pdev = to_pci_dev(host->dev);
2424 struct mv_host_priv *hpriv = host->private_data;
2425 u32 hp_flags = hpriv->hp_flags;
2426
2427 switch(board_idx) {
2428 case chip_5080:
2429 hpriv->ops = &mv5xxx_ops;
2430 hp_flags |= MV_HP_GEN_I;
2431
2432 switch (pdev->revision) {
2433 case 0x1:
2434 hp_flags |= MV_HP_ERRATA_50XXB0;
2435 break;
2436 case 0x3:
2437 hp_flags |= MV_HP_ERRATA_50XXB2;
2438 break;
2439 default:
2440 dev_printk(KERN_WARNING, &pdev->dev,
2441 "Applying 50XXB2 workarounds to unknown rev\n");
2442 hp_flags |= MV_HP_ERRATA_50XXB2;
2443 break;
2444 }
2445 break;
2446
2447 case chip_504x:
2448 case chip_508x:
2449 hpriv->ops = &mv5xxx_ops;
2450 hp_flags |= MV_HP_GEN_I;
2451
2452 switch (pdev->revision) {
2453 case 0x0:
2454 hp_flags |= MV_HP_ERRATA_50XXB0;
2455 break;
2456 case 0x3:
2457 hp_flags |= MV_HP_ERRATA_50XXB2;
2458 break;
2459 default:
2460 dev_printk(KERN_WARNING, &pdev->dev,
2461 "Applying B2 workarounds to unknown rev\n");
2462 hp_flags |= MV_HP_ERRATA_50XXB2;
2463 break;
2464 }
2465 break;
2466
2467 case chip_604x:
2468 case chip_608x:
2469 hpriv->ops = &mv6xxx_ops;
2470 hp_flags |= MV_HP_GEN_II;
2471
2472 switch (pdev->revision) {
2473 case 0x7:
2474 hp_flags |= MV_HP_ERRATA_60X1B2;
2475 break;
2476 case 0x9:
2477 hp_flags |= MV_HP_ERRATA_60X1C0;
2478 break;
2479 default:
2480 dev_printk(KERN_WARNING, &pdev->dev,
2481 "Applying B2 workarounds to unknown rev\n");
2482 hp_flags |= MV_HP_ERRATA_60X1B2;
2483 break;
2484 }
2485 break;
2486
2487 case chip_7042:
2488 case chip_6042:
2489 hpriv->ops = &mv6xxx_ops;
2490 hp_flags |= MV_HP_GEN_IIE;
2491
2492 switch (pdev->revision) {
2493 case 0x0:
2494 hp_flags |= MV_HP_ERRATA_XX42A0;
2495 break;
2496 case 0x1:
2497 hp_flags |= MV_HP_ERRATA_60X1C0;
2498 break;
2499 default:
2500 dev_printk(KERN_WARNING, &pdev->dev,
2501 "Applying 60X1C0 workarounds to unknown rev\n");
2502 hp_flags |= MV_HP_ERRATA_60X1C0;
2503 break;
2504 }
2505 break;
2506
2507 default:
2508 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2509 return 1;
2510 }
2511
2512 hpriv->hp_flags = hp_flags;
2513
2514 return 0;
2515 }
2516
2517 /**
2518 * mv_init_host - Perform some early initialization of the host.
2519 * @host: ATA host to initialize
2520 * @board_idx: controller index
2521 *
2522 * If possible, do an early global reset of the host. Then do
2523 * our port init and clear/unmask all/relevant host interrupts.
2524 *
2525 * LOCKING:
2526 * Inherited from caller.
2527 */
2528 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2529 {
2530 int rc = 0, n_hc, port, hc;
2531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2533 struct mv_host_priv *hpriv = host->private_data;
2534
2535 /* global interrupt mask */
2536 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2537
2538 rc = mv_chip_id(host, board_idx);
2539 if (rc)
2540 goto done;
2541
2542 n_hc = mv_get_hc_count(host->ports[0]->flags);
2543
2544 for (port = 0; port < host->n_ports; port++)
2545 hpriv->ops->read_preamp(hpriv, port, mmio);
2546
2547 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2548 if (rc)
2549 goto done;
2550
2551 hpriv->ops->reset_flash(hpriv, mmio);
2552 hpriv->ops->reset_bus(pdev, mmio);
2553 hpriv->ops->enable_leds(hpriv, mmio);
2554
2555 for (port = 0; port < host->n_ports; port++) {
2556 if (IS_GEN_II(hpriv)) {
2557 void __iomem *port_mmio = mv_port_base(mmio, port);
2558
2559 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2560 ifctl |= (1 << 7); /* enable gen2i speed */
2561 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2562 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2563 }
2564
2565 hpriv->ops->phy_errata(hpriv, mmio, port);
2566 }
2567
2568 for (port = 0; port < host->n_ports; port++) {
2569 void __iomem *port_mmio = mv_port_base(mmio, port);
2570 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2571 }
2572
2573 for (hc = 0; hc < n_hc; hc++) {
2574 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2575
2576 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2577 "(before clear)=0x%08x\n", hc,
2578 readl(hc_mmio + HC_CFG_OFS),
2579 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2580
2581 /* Clear any currently outstanding hc interrupt conditions */
2582 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2583 }
2584
2585 /* Clear any currently outstanding host interrupt conditions */
2586 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2587
2588 /* and unmask interrupt generation for host regs */
2589 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2590
2591 if (IS_GEN_I(hpriv))
2592 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2593 else
2594 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2595
2596 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2597 "PCI int cause/mask=0x%08x/0x%08x\n",
2598 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2599 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2600 readl(mmio + PCI_IRQ_CAUSE_OFS),
2601 readl(mmio + PCI_IRQ_MASK_OFS));
2602
2603 done:
2604 return rc;
2605 }
2606
2607 /**
2608 * mv_print_info - Dump key info to kernel log for perusal.
2609 * @host: ATA host to print info about
2610 *
2611 * FIXME: complete this.
2612 *
2613 * LOCKING:
2614 * Inherited from caller.
2615 */
2616 static void mv_print_info(struct ata_host *host)
2617 {
2618 struct pci_dev *pdev = to_pci_dev(host->dev);
2619 struct mv_host_priv *hpriv = host->private_data;
2620 u8 scc;
2621 const char *scc_s, *gen;
2622
2623 /* Use this to determine the HW stepping of the chip so we know
2624 * what errata to workaround
2625 */
2626 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2627 if (scc == 0)
2628 scc_s = "SCSI";
2629 else if (scc == 0x01)
2630 scc_s = "RAID";
2631 else
2632 scc_s = "?";
2633
2634 if (IS_GEN_I(hpriv))
2635 gen = "I";
2636 else if (IS_GEN_II(hpriv))
2637 gen = "II";
2638 else if (IS_GEN_IIE(hpriv))
2639 gen = "IIE";
2640 else
2641 gen = "?";
2642
2643 dev_printk(KERN_INFO, &pdev->dev,
2644 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2645 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2646 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2647 }
2648
2649 /**
2650 * mv_init_one - handle a positive probe of a Marvell host
2651 * @pdev: PCI device found
2652 * @ent: PCI device ID entry for the matched host
2653 *
2654 * LOCKING:
2655 * Inherited from caller.
2656 */
2657 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2658 {
2659 static int printed_version = 0;
2660 unsigned int board_idx = (unsigned int)ent->driver_data;
2661 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2662 struct ata_host *host;
2663 struct mv_host_priv *hpriv;
2664 int n_ports, rc;
2665
2666 if (!printed_version++)
2667 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2668
2669 /* allocate host */
2670 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2671
2672 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2673 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2674 if (!host || !hpriv)
2675 return -ENOMEM;
2676 host->private_data = hpriv;
2677
2678 /* acquire resources */
2679 rc = pcim_enable_device(pdev);
2680 if (rc)
2681 return rc;
2682
2683 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2684 if (rc == -EBUSY)
2685 pcim_pin_device(pdev);
2686 if (rc)
2687 return rc;
2688 host->iomap = pcim_iomap_table(pdev);
2689
2690 rc = pci_go_64(pdev);
2691 if (rc)
2692 return rc;
2693
2694 /* initialize adapter */
2695 rc = mv_init_host(host, board_idx);
2696 if (rc)
2697 return rc;
2698
2699 /* Enable interrupts */
2700 if (msi && pci_enable_msi(pdev))
2701 pci_intx(pdev, 1);
2702
2703 mv_dump_pci_cfg(pdev, 0x68);
2704 mv_print_info(host);
2705
2706 pci_set_master(pdev);
2707 pci_try_set_mwi(pdev);
2708 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2709 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2710 }
2711
2712 static int __init mv_init(void)
2713 {
2714 return pci_register_driver(&mv_pci_driver);
2715 }
2716
2717 static void __exit mv_exit(void)
2718 {
2719 pci_unregister_driver(&mv_pci_driver);
2720 }
2721
2722 MODULE_AUTHOR("Brett Russ");
2723 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2724 MODULE_LICENSE("GPL");
2725 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2726 MODULE_VERSION(DRV_VERSION);
2727
2728 module_param(msi, int, 0444);
2729 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2730
2731 module_init(mv_init);
2732 module_exit(mv_exit);