PCI: Change all drivers to use pci_device->revision
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65 */
66
67
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
80
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
83
84 enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
112
113 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_MAX_SG_CT = 176,
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126
127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
148
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
150
151 /* PCI interface registers */
152
153 PCI_COMMAND_OFS = 0xc00,
154
155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
159
160 MV_PCI_MODE = 0xd00,
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
170
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 PHY_MODE3 = 0x310,
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
255 EDMA_ERR_LNK_DATA_RX |
256 EDMA_ERR_LNK_DATA_TX |
257 EDMA_ERR_TRANS_PROTO),
258
259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
261
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
264
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
268 EDMA_RSP_Q_PTR_SHIFT = 3,
269
270 EDMA_CMD_OFS = 0x28,
271 EDMA_EN = (1 << 0),
272 EDMA_DS = (1 << 1),
273 ATA_RST = (1 << 2),
274
275 EDMA_IORDY_TMOUT = 0x34,
276 EDMA_ARB_CFG = 0x38,
277
278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
287
288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
291 };
292
293 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
294 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
295 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
296 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
297 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
298
299 enum {
300 MV_DMA_BOUNDARY = 0xffffffffU,
301
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
303
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
305 };
306
307 enum chip_type {
308 chip_504x,
309 chip_508x,
310 chip_5080,
311 chip_604x,
312 chip_608x,
313 chip_6042,
314 chip_7042,
315 };
316
317 /* Command ReQuest Block: 32B */
318 struct mv_crqb {
319 __le32 sg_addr;
320 __le32 sg_addr_hi;
321 __le16 ctrl_flags;
322 __le16 ata_cmd[11];
323 };
324
325 struct mv_crqb_iie {
326 __le32 addr;
327 __le32 addr_hi;
328 __le32 flags;
329 __le32 len;
330 __le32 ata_cmd[4];
331 };
332
333 /* Command ResPonse Block: 8B */
334 struct mv_crpb {
335 __le16 id;
336 __le16 flags;
337 __le32 tmstmp;
338 };
339
340 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
341 struct mv_sg {
342 __le32 addr;
343 __le32 flags_size;
344 __le32 addr_hi;
345 __le32 reserved;
346 };
347
348 struct mv_port_priv {
349 struct mv_crqb *crqb;
350 dma_addr_t crqb_dma;
351 struct mv_crpb *crpb;
352 dma_addr_t crpb_dma;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
355 u32 pp_flags;
356 };
357
358 struct mv_port_signal {
359 u32 amps;
360 u32 pre;
361 };
362
363 struct mv_host_priv;
364 struct mv_hw_ops {
365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
366 unsigned int port);
367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
369 void __iomem *mmio);
370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
371 unsigned int n_hc);
372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
374 };
375
376 struct mv_host_priv {
377 u32 hp_flags;
378 struct mv_port_signal signal[8];
379 const struct mv_hw_ops *ops;
380 };
381
382 static void mv_irq_clear(struct ata_port *ap);
383 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
385 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
387 static void mv_phy_reset(struct ata_port *ap);
388 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
389 static int mv_port_start(struct ata_port *ap);
390 static void mv_port_stop(struct ata_port *ap);
391 static void mv_qc_prep(struct ata_queued_cmd *qc);
392 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
393 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
394 static void mv_eng_timeout(struct ata_port *ap);
395 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
396
397 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
398 unsigned int port);
399 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
401 void __iomem *mmio);
402 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
403 unsigned int n_hc);
404 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
406
407 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
408 unsigned int port);
409 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
411 void __iomem *mmio);
412 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
413 unsigned int n_hc);
414 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
416 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418 static void mv_stop_and_reset(struct ata_port *ap);
419
420 static struct scsi_host_template mv_sht = {
421 .module = THIS_MODULE,
422 .name = DRV_NAME,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
425 .can_queue = MV_USE_Q_DEPTH,
426 .this_id = ATA_SHT_THIS_ID,
427 .sg_tablesize = MV_MAX_SG_CT,
428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
430 .use_clustering = 1,
431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
434 .slave_destroy = ata_scsi_slave_destroy,
435 .bios_param = ata_std_bios_param,
436 };
437
438 static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
440
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
446
447 .phy_reset = mv_phy_reset,
448 .cable_detect = ata_cable_sata,
449
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
452 .data_xfer = ata_data_xfer,
453
454 .eng_timeout = mv_eng_timeout,
455
456 .irq_clear = mv_irq_clear,
457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
459
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
462
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
465 };
466
467 static const struct ata_port_operations mv6_ops = {
468 .port_disable = ata_port_disable,
469
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .phy_reset = mv_phy_reset,
477 .cable_detect = ata_cable_sata,
478
479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
481 .data_xfer = ata_data_xfer,
482
483 .eng_timeout = mv_eng_timeout,
484
485 .irq_clear = mv_irq_clear,
486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
488
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
491
492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
494 };
495
496 static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
498
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
504
505 .phy_reset = mv_phy_reset,
506 .cable_detect = ata_cable_sata,
507
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
510 .data_xfer = ata_data_xfer,
511
512 .eng_timeout = mv_eng_timeout,
513
514 .irq_clear = mv_irq_clear,
515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
517
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
520
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
523 };
524
525 static const struct ata_port_info mv_port_info[] = {
526 { /* chip_504x */
527 .flags = MV_COMMON_FLAGS,
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = ATA_UDMA6,
530 .port_ops = &mv5_ops,
531 },
532 { /* chip_508x */
533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
534 .pio_mask = 0x1f, /* pio0-4 */
535 .udma_mask = ATA_UDMA6,
536 .port_ops = &mv5_ops,
537 },
538 { /* chip_5080 */
539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
540 .pio_mask = 0x1f, /* pio0-4 */
541 .udma_mask = ATA_UDMA6,
542 .port_ops = &mv5_ops,
543 },
544 { /* chip_604x */
545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
546 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = ATA_UDMA6,
548 .port_ops = &mv6_ops,
549 },
550 { /* chip_608x */
551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
552 MV_FLAG_DUAL_HC),
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = ATA_UDMA6,
555 .port_ops = &mv6_ops,
556 },
557 { /* chip_6042 */
558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = ATA_UDMA6,
561 .port_ops = &mv_iie_ops,
562 },
563 { /* chip_7042 */
564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = ATA_UDMA6,
567 .port_ops = &mv_iie_ops,
568 },
569 };
570
571 static const struct pci_device_id mv_pci_tbl[] = {
572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
576
577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
582
583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
584
585 /* Adaptec 1430SA */
586 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
587
588 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
589
590 /* add Marvell 7042 support */
591 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
592
593 { } /* terminate list */
594 };
595
596 static struct pci_driver mv_pci_driver = {
597 .name = DRV_NAME,
598 .id_table = mv_pci_tbl,
599 .probe = mv_init_one,
600 .remove = ata_pci_remove_one,
601 };
602
603 static const struct mv_hw_ops mv5xxx_ops = {
604 .phy_errata = mv5_phy_errata,
605 .enable_leds = mv5_enable_leds,
606 .read_preamp = mv5_read_preamp,
607 .reset_hc = mv5_reset_hc,
608 .reset_flash = mv5_reset_flash,
609 .reset_bus = mv5_reset_bus,
610 };
611
612 static const struct mv_hw_ops mv6xxx_ops = {
613 .phy_errata = mv6_phy_errata,
614 .enable_leds = mv6_enable_leds,
615 .read_preamp = mv6_read_preamp,
616 .reset_hc = mv6_reset_hc,
617 .reset_flash = mv6_reset_flash,
618 .reset_bus = mv_reset_pci_bus,
619 };
620
621 /*
622 * module options
623 */
624 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
625
626
627 /* move to PCI layer or libata core? */
628 static int pci_go_64(struct pci_dev *pdev)
629 {
630 int rc;
631
632 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
633 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
634 if (rc) {
635 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
636 if (rc) {
637 dev_printk(KERN_ERR, &pdev->dev,
638 "64-bit DMA enable failed\n");
639 return rc;
640 }
641 }
642 } else {
643 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
644 if (rc) {
645 dev_printk(KERN_ERR, &pdev->dev,
646 "32-bit DMA enable failed\n");
647 return rc;
648 }
649 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
650 if (rc) {
651 dev_printk(KERN_ERR, &pdev->dev,
652 "32-bit consistent DMA enable failed\n");
653 return rc;
654 }
655 }
656
657 return rc;
658 }
659
660 /*
661 * Functions
662 */
663
664 static inline void writelfl(unsigned long data, void __iomem *addr)
665 {
666 writel(data, addr);
667 (void) readl(addr); /* flush to avoid PCI posted write */
668 }
669
670 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
671 {
672 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
673 }
674
675 static inline unsigned int mv_hc_from_port(unsigned int port)
676 {
677 return port >> MV_PORT_HC_SHIFT;
678 }
679
680 static inline unsigned int mv_hardport_from_port(unsigned int port)
681 {
682 return port & MV_PORT_MASK;
683 }
684
685 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
686 unsigned int port)
687 {
688 return mv_hc_base(base, mv_hc_from_port(port));
689 }
690
691 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
692 {
693 return mv_hc_base_from_port(base, port) +
694 MV_SATAHC_ARBTR_REG_SZ +
695 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
696 }
697
698 static inline void __iomem *mv_ap_base(struct ata_port *ap)
699 {
700 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
701 }
702
703 static inline int mv_get_hc_count(unsigned long port_flags)
704 {
705 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
706 }
707
708 static void mv_irq_clear(struct ata_port *ap)
709 {
710 }
711
712 /**
713 * mv_start_dma - Enable eDMA engine
714 * @base: port base address
715 * @pp: port private data
716 *
717 * Verify the local cache of the eDMA state is accurate with a
718 * WARN_ON.
719 *
720 * LOCKING:
721 * Inherited from caller.
722 */
723 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
724 {
725 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
726 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
728 }
729 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
730 }
731
732 /**
733 * mv_stop_dma - Disable eDMA engine
734 * @ap: ATA channel to manipulate
735 *
736 * Verify the local cache of the eDMA state is accurate with a
737 * WARN_ON.
738 *
739 * LOCKING:
740 * Inherited from caller.
741 */
742 static void mv_stop_dma(struct ata_port *ap)
743 {
744 void __iomem *port_mmio = mv_ap_base(ap);
745 struct mv_port_priv *pp = ap->private_data;
746 u32 reg;
747 int i;
748
749 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 /* Disable EDMA if active. The disable bit auto clears.
751 */
752 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
753 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
754 } else {
755 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
756 }
757
758 /* now properly wait for the eDMA to stop */
759 for (i = 1000; i > 0; i--) {
760 reg = readl(port_mmio + EDMA_CMD_OFS);
761 if (!(EDMA_EN & reg)) {
762 break;
763 }
764 udelay(100);
765 }
766
767 if (EDMA_EN & reg) {
768 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
769 /* FIXME: Consider doing a reset here to recover */
770 }
771 }
772
773 #ifdef ATA_DEBUG
774 static void mv_dump_mem(void __iomem *start, unsigned bytes)
775 {
776 int b, w;
777 for (b = 0; b < bytes; ) {
778 DPRINTK("%p: ", start + b);
779 for (w = 0; b < bytes && w < 4; w++) {
780 printk("%08x ",readl(start + b));
781 b += sizeof(u32);
782 }
783 printk("\n");
784 }
785 }
786 #endif
787
788 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
789 {
790 #ifdef ATA_DEBUG
791 int b, w;
792 u32 dw;
793 for (b = 0; b < bytes; ) {
794 DPRINTK("%02x: ", b);
795 for (w = 0; b < bytes && w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw);
797 printk("%08x ",dw);
798 b += sizeof(u32);
799 }
800 printk("\n");
801 }
802 #endif
803 }
804 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
805 struct pci_dev *pdev)
806 {
807 #ifdef ATA_DEBUG
808 void __iomem *hc_base = mv_hc_base(mmio_base,
809 port >> MV_PORT_HC_SHIFT);
810 void __iomem *port_base;
811 int start_port, num_ports, p, start_hc, num_hcs, hc;
812
813 if (0 > port) {
814 start_hc = start_port = 0;
815 num_ports = 8; /* shld be benign for 4 port devs */
816 num_hcs = 2;
817 } else {
818 start_hc = port >> MV_PORT_HC_SHIFT;
819 start_port = port;
820 num_ports = num_hcs = 1;
821 }
822 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
823 num_ports > 1 ? num_ports - 1 : start_port);
824
825 if (NULL != pdev) {
826 DPRINTK("PCI config space regs:\n");
827 mv_dump_pci_cfg(pdev, 0x68);
828 }
829 DPRINTK("PCI regs:\n");
830 mv_dump_mem(mmio_base+0xc00, 0x3c);
831 mv_dump_mem(mmio_base+0xd00, 0x34);
832 mv_dump_mem(mmio_base+0xf00, 0x4);
833 mv_dump_mem(mmio_base+0x1d00, 0x6c);
834 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
835 hc_base = mv_hc_base(mmio_base, hc);
836 DPRINTK("HC regs (HC %i):\n", hc);
837 mv_dump_mem(hc_base, 0x1c);
838 }
839 for (p = start_port; p < start_port + num_ports; p++) {
840 port_base = mv_port_base(mmio_base, p);
841 DPRINTK("EDMA regs (port %i):\n",p);
842 mv_dump_mem(port_base, 0x54);
843 DPRINTK("SATA regs (port %i):\n",p);
844 mv_dump_mem(port_base+0x300, 0x60);
845 }
846 #endif
847 }
848
849 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
850 {
851 unsigned int ofs;
852
853 switch (sc_reg_in) {
854 case SCR_STATUS:
855 case SCR_CONTROL:
856 case SCR_ERROR:
857 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
858 break;
859 case SCR_ACTIVE:
860 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
861 break;
862 default:
863 ofs = 0xffffffffU;
864 break;
865 }
866 return ofs;
867 }
868
869 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
870 {
871 unsigned int ofs = mv_scr_offset(sc_reg_in);
872
873 if (0xffffffffU != ofs)
874 return readl(mv_ap_base(ap) + ofs);
875 else
876 return (u32) ofs;
877 }
878
879 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
880 {
881 unsigned int ofs = mv_scr_offset(sc_reg_in);
882
883 if (0xffffffffU != ofs)
884 writelfl(val, mv_ap_base(ap) + ofs);
885 }
886
887 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
888 {
889 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
890
891 /* set up non-NCQ EDMA configuration */
892 cfg &= ~(1 << 9); /* disable equeue */
893
894 if (IS_GEN_I(hpriv)) {
895 cfg &= ~0x1f; /* clear queue depth */
896 cfg |= (1 << 8); /* enab config burst size mask */
897 }
898
899 else if (IS_GEN_II(hpriv)) {
900 cfg &= ~0x1f; /* clear queue depth */
901 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
902 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
903 }
904
905 else if (IS_GEN_IIE(hpriv)) {
906 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
907 cfg |= (1 << 22); /* enab 4-entry host queue cache */
908 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
909 cfg |= (1 << 18); /* enab early completion */
910 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
911 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
912 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
913 }
914
915 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
916 }
917
918 /**
919 * mv_port_start - Port specific init/start routine.
920 * @ap: ATA channel to manipulate
921 *
922 * Allocate and point to DMA memory, init port private memory,
923 * zero indices.
924 *
925 * LOCKING:
926 * Inherited from caller.
927 */
928 static int mv_port_start(struct ata_port *ap)
929 {
930 struct device *dev = ap->host->dev;
931 struct mv_host_priv *hpriv = ap->host->private_data;
932 struct mv_port_priv *pp;
933 void __iomem *port_mmio = mv_ap_base(ap);
934 void *mem;
935 dma_addr_t mem_dma;
936 int rc;
937
938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
939 if (!pp)
940 return -ENOMEM;
941
942 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
943 GFP_KERNEL);
944 if (!mem)
945 return -ENOMEM;
946 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
947
948 rc = ata_pad_alloc(ap, dev);
949 if (rc)
950 return rc;
951
952 /* First item in chunk of DMA memory:
953 * 32-slot command request table (CRQB), 32 bytes each in size
954 */
955 pp->crqb = mem;
956 pp->crqb_dma = mem_dma;
957 mem += MV_CRQB_Q_SZ;
958 mem_dma += MV_CRQB_Q_SZ;
959
960 /* Second item:
961 * 32-slot command response table (CRPB), 8 bytes each in size
962 */
963 pp->crpb = mem;
964 pp->crpb_dma = mem_dma;
965 mem += MV_CRPB_Q_SZ;
966 mem_dma += MV_CRPB_Q_SZ;
967
968 /* Third item:
969 * Table of scatter-gather descriptors (ePRD), 16 bytes each
970 */
971 pp->sg_tbl = mem;
972 pp->sg_tbl_dma = mem_dma;
973
974 mv_edma_cfg(hpriv, port_mmio);
975
976 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
977 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
978 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
979
980 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 writelfl(pp->crqb_dma & 0xffffffff,
982 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 else
984 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
985
986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
987
988 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
989 writelfl(pp->crpb_dma & 0xffffffff,
990 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 else
992 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
993
994 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
995 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
996
997 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access
999 * to shadow regs.
1000 */
1001 ap->private_data = pp;
1002 return 0;
1003 }
1004
1005 /**
1006 * mv_port_stop - Port specific cleanup/stop routine.
1007 * @ap: ATA channel to manipulate
1008 *
1009 * Stop DMA, cleanup port memory.
1010 *
1011 * LOCKING:
1012 * This routine uses the host lock to protect the DMA stop.
1013 */
1014 static void mv_port_stop(struct ata_port *ap)
1015 {
1016 unsigned long flags;
1017
1018 spin_lock_irqsave(&ap->host->lock, flags);
1019 mv_stop_dma(ap);
1020 spin_unlock_irqrestore(&ap->host->lock, flags);
1021 }
1022
1023 /**
1024 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025 * @qc: queued command whose SG list to source from
1026 *
1027 * Populate the SG list and mark the last entry.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
1032 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1033 {
1034 struct mv_port_priv *pp = qc->ap->private_data;
1035 unsigned int n_sg = 0;
1036 struct scatterlist *sg;
1037 struct mv_sg *mv_sg;
1038
1039 mv_sg = pp->sg_tbl;
1040 ata_for_each_sg(sg, qc) {
1041 dma_addr_t addr = sg_dma_address(sg);
1042 u32 sg_len = sg_dma_len(sg);
1043
1044 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1045 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1046 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1047
1048 if (ata_sg_is_last(sg, qc))
1049 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1050
1051 mv_sg++;
1052 n_sg++;
1053 }
1054
1055 return n_sg;
1056 }
1057
1058 static inline unsigned mv_inc_q_index(unsigned index)
1059 {
1060 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1061 }
1062
1063 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1064 {
1065 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1066 (last ? CRQB_CMD_LAST : 0);
1067 *cmdw = cpu_to_le16(tmp);
1068 }
1069
1070 /**
1071 * mv_qc_prep - Host specific command preparation.
1072 * @qc: queued command to prepare
1073 *
1074 * This routine simply redirects to the general purpose routine
1075 * if command is not DMA. Else, it handles prep of the CRQB
1076 * (command request block), does some sanity checking, and calls
1077 * the SG load routine.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
1082 static void mv_qc_prep(struct ata_queued_cmd *qc)
1083 {
1084 struct ata_port *ap = qc->ap;
1085 struct mv_port_priv *pp = ap->private_data;
1086 __le16 *cw;
1087 struct ata_taskfile *tf;
1088 u16 flags = 0;
1089 unsigned in_index;
1090
1091 if (ATA_PROT_DMA != qc->tf.protocol)
1092 return;
1093
1094 /* Fill in command request block
1095 */
1096 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1097 flags |= CRQB_FLAG_READ;
1098 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1099 flags |= qc->tag << CRQB_TAG_SHIFT;
1100
1101 /* get current queue index from hardware */
1102 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1103 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1104
1105 pp->crqb[in_index].sg_addr =
1106 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1107 pp->crqb[in_index].sg_addr_hi =
1108 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1109 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1110
1111 cw = &pp->crqb[in_index].ata_cmd[0];
1112 tf = &qc->tf;
1113
1114 /* Sadly, the CRQB cannot accomodate all registers--there are
1115 * only 11 bytes...so we must pick and choose required
1116 * registers based on the command. So, we drop feature and
1117 * hob_feature for [RW] DMA commands, but they are needed for
1118 * NCQ. NCQ will drop hob_nsect.
1119 */
1120 switch (tf->command) {
1121 case ATA_CMD_READ:
1122 case ATA_CMD_READ_EXT:
1123 case ATA_CMD_WRITE:
1124 case ATA_CMD_WRITE_EXT:
1125 case ATA_CMD_WRITE_FUA_EXT:
1126 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1127 break;
1128 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1129 case ATA_CMD_FPDMA_READ:
1130 case ATA_CMD_FPDMA_WRITE:
1131 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1132 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1133 break;
1134 #endif /* FIXME: remove this line when NCQ added */
1135 default:
1136 /* The only other commands EDMA supports in non-queued and
1137 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 * of which are defined/used by Linux. If we get here, this
1139 * driver needs work.
1140 *
1141 * FIXME: modify libata to give qc_prep a return value and
1142 * return error here.
1143 */
1144 BUG_ON(tf->command);
1145 break;
1146 }
1147 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1148 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1149 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1150 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1151 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1152 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1153 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1154 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1155 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1156
1157 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1158 return;
1159 mv_fill_sg(qc);
1160 }
1161
1162 /**
1163 * mv_qc_prep_iie - Host specific command preparation.
1164 * @qc: queued command to prepare
1165 *
1166 * This routine simply redirects to the general purpose routine
1167 * if command is not DMA. Else, it handles prep of the CRQB
1168 * (command request block), does some sanity checking, and calls
1169 * the SG load routine.
1170 *
1171 * LOCKING:
1172 * Inherited from caller.
1173 */
1174 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1175 {
1176 struct ata_port *ap = qc->ap;
1177 struct mv_port_priv *pp = ap->private_data;
1178 struct mv_crqb_iie *crqb;
1179 struct ata_taskfile *tf;
1180 unsigned in_index;
1181 u32 flags = 0;
1182
1183 if (ATA_PROT_DMA != qc->tf.protocol)
1184 return;
1185
1186 /* Fill in Gen IIE command request block
1187 */
1188 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1189 flags |= CRQB_FLAG_READ;
1190
1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1192 flags |= qc->tag << CRQB_TAG_SHIFT;
1193
1194 /* get current queue index from hardware */
1195 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1196 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1197
1198 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1199 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1201 crqb->flags = cpu_to_le32(flags);
1202
1203 tf = &qc->tf;
1204 crqb->ata_cmd[0] = cpu_to_le32(
1205 (tf->command << 16) |
1206 (tf->feature << 24)
1207 );
1208 crqb->ata_cmd[1] = cpu_to_le32(
1209 (tf->lbal << 0) |
1210 (tf->lbam << 8) |
1211 (tf->lbah << 16) |
1212 (tf->device << 24)
1213 );
1214 crqb->ata_cmd[2] = cpu_to_le32(
1215 (tf->hob_lbal << 0) |
1216 (tf->hob_lbam << 8) |
1217 (tf->hob_lbah << 16) |
1218 (tf->hob_feature << 24)
1219 );
1220 crqb->ata_cmd[3] = cpu_to_le32(
1221 (tf->nsect << 0) |
1222 (tf->hob_nsect << 8)
1223 );
1224
1225 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1226 return;
1227 mv_fill_sg(qc);
1228 }
1229
1230 /**
1231 * mv_qc_issue - Initiate a command to the host
1232 * @qc: queued command to start
1233 *
1234 * This routine simply redirects to the general purpose routine
1235 * if command is not DMA. Else, it sanity checks our local
1236 * caches of the request producer/consumer indices then enables
1237 * DMA and bumps the request producer index.
1238 *
1239 * LOCKING:
1240 * Inherited from caller.
1241 */
1242 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1243 {
1244 void __iomem *port_mmio = mv_ap_base(qc->ap);
1245 struct mv_port_priv *pp = qc->ap->private_data;
1246 unsigned in_index;
1247 u32 in_ptr;
1248
1249 if (ATA_PROT_DMA != qc->tf.protocol) {
1250 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers.
1253 */
1254 mv_stop_dma(qc->ap);
1255 return ata_qc_issue_prot(qc);
1256 }
1257
1258 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1259 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1260
1261 /* until we do queuing, the queue should be empty at this point */
1262 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1263 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1264
1265 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1266
1267 mv_start_dma(port_mmio, pp);
1268
1269 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1271 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1272 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1273
1274 return 0;
1275 }
1276
1277 /**
1278 * mv_get_crpb_status - get status from most recently completed cmd
1279 * @ap: ATA channel to manipulate
1280 *
1281 * This routine is for use when the port is in DMA mode, when it
1282 * will be using the CRPB (command response block) method of
1283 * returning command completion information. We check indices
1284 * are good, grab status, and bump the response consumer index to
1285 * prove that we're up to date.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
1290 static u8 mv_get_crpb_status(struct ata_port *ap)
1291 {
1292 void __iomem *port_mmio = mv_ap_base(ap);
1293 struct mv_port_priv *pp = ap->private_data;
1294 unsigned out_index;
1295 u32 out_ptr;
1296 u8 ata_status;
1297
1298 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1299 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1300
1301 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1302 >> CRPB_FLAG_STATUS_SHIFT;
1303
1304 /* increment our consumer index... */
1305 out_index = mv_inc_q_index(out_index);
1306
1307 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1308 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1309 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1310
1311 /* write out our inc'd consumer index so EDMA knows we're caught up */
1312 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1313 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1314 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1315
1316 /* Return ATA status register for completed CRPB */
1317 return ata_status;
1318 }
1319
1320 /**
1321 * mv_err_intr - Handle error interrupts on the port
1322 * @ap: ATA channel to manipulate
1323 * @reset_allowed: bool: 0 == don't trigger from reset here
1324 *
1325 * In most cases, just clear the interrupt and move on. However,
1326 * some cases require an eDMA reset, which is done right before
1327 * the COMRESET in mv_phy_reset(). The SERR case requires a
1328 * clear of pending errors in the SATA SERROR register. Finally,
1329 * if the port disabled DMA, update our cached copy to match.
1330 *
1331 * LOCKING:
1332 * Inherited from caller.
1333 */
1334 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1335 {
1336 void __iomem *port_mmio = mv_ap_base(ap);
1337 u32 edma_err_cause, serr = 0;
1338
1339 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1340
1341 if (EDMA_ERR_SERR & edma_err_cause) {
1342 sata_scr_read(ap, SCR_ERROR, &serr);
1343 sata_scr_write_flush(ap, SCR_ERROR, serr);
1344 }
1345 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1346 struct mv_port_priv *pp = ap->private_data;
1347 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1348 }
1349 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1350 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1351
1352 /* Clear EDMA now that SERR cleanup done */
1353 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1354
1355 /* check for fatal here and recover if needed */
1356 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1357 mv_stop_and_reset(ap);
1358 }
1359
1360 /**
1361 * mv_host_intr - Handle all interrupts on the given host controller
1362 * @host: host specific structure
1363 * @relevant: port error bits relevant to this host controller
1364 * @hc: which host controller we're to look at
1365 *
1366 * Read then write clear the HC interrupt status then walk each
1367 * port connected to the HC and see if it needs servicing. Port
1368 * success ints are reported in the HC interrupt status reg, the
1369 * port error ints are reported in the higher level main
1370 * interrupt status register and thus are passed in via the
1371 * 'relevant' argument.
1372 *
1373 * LOCKING:
1374 * Inherited from caller.
1375 */
1376 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1377 {
1378 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1379 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1380 struct ata_queued_cmd *qc;
1381 u32 hc_irq_cause;
1382 int shift, port, port0, hard_port, handled;
1383 unsigned int err_mask;
1384
1385 if (hc == 0)
1386 port0 = 0;
1387 else
1388 port0 = MV_PORTS_PER_HC;
1389
1390 /* we'll need the HC success int register in most cases */
1391 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1392 if (hc_irq_cause)
1393 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1394
1395 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 hc,relevant,hc_irq_cause);
1397
1398 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1399 u8 ata_status = 0;
1400 struct ata_port *ap = host->ports[port];
1401 struct mv_port_priv *pp = ap->private_data;
1402
1403 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1404 handled = 0; /* ensure ata_status is set if handled++ */
1405
1406 /* Note that DEV_IRQ might happen spuriously during EDMA,
1407 * and should be ignored in such cases.
1408 * The cause of this is still under investigation.
1409 */
1410 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1411 /* EDMA: check for response queue interrupt */
1412 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1413 ata_status = mv_get_crpb_status(ap);
1414 handled = 1;
1415 }
1416 } else {
1417 /* PIO: check for device (drive) interrupt */
1418 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1419 ata_status = readb(ap->ioaddr.status_addr);
1420 handled = 1;
1421 /* ignore spurious intr if drive still BUSY */
1422 if (ata_status & ATA_BUSY) {
1423 ata_status = 0;
1424 handled = 0;
1425 }
1426 }
1427 }
1428
1429 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1430 continue;
1431
1432 err_mask = ac_err_mask(ata_status);
1433
1434 shift = port << 1; /* (port * 2) */
1435 if (port >= MV_PORTS_PER_HC) {
1436 shift++; /* skip bit 8 in the HC Main IRQ reg */
1437 }
1438 if ((PORT0_ERR << shift) & relevant) {
1439 mv_err_intr(ap, 1);
1440 err_mask |= AC_ERR_OTHER;
1441 handled = 1;
1442 }
1443
1444 if (handled) {
1445 qc = ata_qc_from_tag(ap, ap->active_tag);
1446 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1447 VPRINTK("port %u IRQ found for qc, "
1448 "ata_status 0x%x\n", port,ata_status);
1449 /* mark qc status appropriately */
1450 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1451 qc->err_mask |= err_mask;
1452 ata_qc_complete(qc);
1453 }
1454 }
1455 }
1456 }
1457 VPRINTK("EXIT\n");
1458 }
1459
1460 /**
1461 * mv_interrupt -
1462 * @irq: unused
1463 * @dev_instance: private data; in this case the host structure
1464 * @regs: unused
1465 *
1466 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level
1468 * routine to handle. Also check for PCI errors which are only
1469 * reported here.
1470 *
1471 * LOCKING:
1472 * This routine holds the host lock while processing pending
1473 * interrupts.
1474 */
1475 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1476 {
1477 struct ata_host *host = dev_instance;
1478 unsigned int hc, handled = 0, n_hcs;
1479 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1480 struct mv_host_priv *hpriv;
1481 u32 irq_stat;
1482
1483 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1484
1485 /* check the cases where we either have nothing pending or have read
1486 * a bogus register value which can indicate HW removal or PCI fault
1487 */
1488 if (!irq_stat || (0xffffffffU == irq_stat))
1489 return IRQ_NONE;
1490
1491 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1492 spin_lock(&host->lock);
1493
1494 for (hc = 0; hc < n_hcs; hc++) {
1495 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1496 if (relevant) {
1497 mv_host_intr(host, relevant, hc);
1498 handled++;
1499 }
1500 }
1501
1502 hpriv = host->private_data;
1503 if (IS_60XX(hpriv)) {
1504 /* deal with the interrupt coalescing bits */
1505 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1506 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1507 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1508 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1509 }
1510 }
1511
1512 if (PCI_ERR & irq_stat) {
1513 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 readl(mmio + PCI_IRQ_CAUSE_OFS));
1515
1516 DPRINTK("All regs @ PCI error\n");
1517 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1518
1519 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1520 handled++;
1521 }
1522 spin_unlock(&host->lock);
1523
1524 return IRQ_RETVAL(handled);
1525 }
1526
1527 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1528 {
1529 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1530 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1531
1532 return hc_mmio + ofs;
1533 }
1534
1535 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1536 {
1537 unsigned int ofs;
1538
1539 switch (sc_reg_in) {
1540 case SCR_STATUS:
1541 case SCR_ERROR:
1542 case SCR_CONTROL:
1543 ofs = sc_reg_in * sizeof(u32);
1544 break;
1545 default:
1546 ofs = 0xffffffffU;
1547 break;
1548 }
1549 return ofs;
1550 }
1551
1552 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1553 {
1554 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1555 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1556 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1557
1558 if (ofs != 0xffffffffU)
1559 return readl(addr + ofs);
1560 else
1561 return (u32) ofs;
1562 }
1563
1564 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1565 {
1566 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1567 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1568 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1569
1570 if (ofs != 0xffffffffU)
1571 writelfl(val, addr + ofs);
1572 }
1573
1574 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1575 {
1576 int early_5080;
1577
1578 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1579
1580 if (!early_5080) {
1581 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1582 tmp |= (1 << 0);
1583 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1584 }
1585
1586 mv_reset_pci_bus(pdev, mmio);
1587 }
1588
1589 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1590 {
1591 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1592 }
1593
1594 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1595 void __iomem *mmio)
1596 {
1597 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1598 u32 tmp;
1599
1600 tmp = readl(phy_mmio + MV5_PHY_MODE);
1601
1602 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1603 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1604 }
1605
1606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1607 {
1608 u32 tmp;
1609
1610 writel(0, mmio + MV_GPIO_PORT_CTL);
1611
1612 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1613
1614 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1615 tmp |= ~(1 << 0);
1616 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1617 }
1618
1619 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1620 unsigned int port)
1621 {
1622 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1623 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1624 u32 tmp;
1625 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1626
1627 if (fix_apm_sq) {
1628 tmp = readl(phy_mmio + MV5_LT_MODE);
1629 tmp |= (1 << 19);
1630 writel(tmp, phy_mmio + MV5_LT_MODE);
1631
1632 tmp = readl(phy_mmio + MV5_PHY_CTL);
1633 tmp &= ~0x3;
1634 tmp |= 0x1;
1635 writel(tmp, phy_mmio + MV5_PHY_CTL);
1636 }
1637
1638 tmp = readl(phy_mmio + MV5_PHY_MODE);
1639 tmp &= ~mask;
1640 tmp |= hpriv->signal[port].pre;
1641 tmp |= hpriv->signal[port].amps;
1642 writel(tmp, phy_mmio + MV5_PHY_MODE);
1643 }
1644
1645
1646 #undef ZERO
1647 #define ZERO(reg) writel(0, port_mmio + (reg))
1648 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1649 unsigned int port)
1650 {
1651 void __iomem *port_mmio = mv_port_base(mmio, port);
1652
1653 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1654
1655 mv_channel_reset(hpriv, mmio, port);
1656
1657 ZERO(0x028); /* command */
1658 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1659 ZERO(0x004); /* timer */
1660 ZERO(0x008); /* irq err cause */
1661 ZERO(0x00c); /* irq err mask */
1662 ZERO(0x010); /* rq bah */
1663 ZERO(0x014); /* rq inp */
1664 ZERO(0x018); /* rq outp */
1665 ZERO(0x01c); /* respq bah */
1666 ZERO(0x024); /* respq outp */
1667 ZERO(0x020); /* respq inp */
1668 ZERO(0x02c); /* test control */
1669 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1670 }
1671 #undef ZERO
1672
1673 #define ZERO(reg) writel(0, hc_mmio + (reg))
1674 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1675 unsigned int hc)
1676 {
1677 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1678 u32 tmp;
1679
1680 ZERO(0x00c);
1681 ZERO(0x010);
1682 ZERO(0x014);
1683 ZERO(0x018);
1684
1685 tmp = readl(hc_mmio + 0x20);
1686 tmp &= 0x1c1c1c1c;
1687 tmp |= 0x03030303;
1688 writel(tmp, hc_mmio + 0x20);
1689 }
1690 #undef ZERO
1691
1692 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1693 unsigned int n_hc)
1694 {
1695 unsigned int hc, port;
1696
1697 for (hc = 0; hc < n_hc; hc++) {
1698 for (port = 0; port < MV_PORTS_PER_HC; port++)
1699 mv5_reset_hc_port(hpriv, mmio,
1700 (hc * MV_PORTS_PER_HC) + port);
1701
1702 mv5_reset_one_hc(hpriv, mmio, hc);
1703 }
1704
1705 return 0;
1706 }
1707
1708 #undef ZERO
1709 #define ZERO(reg) writel(0, mmio + (reg))
1710 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1711 {
1712 u32 tmp;
1713
1714 tmp = readl(mmio + MV_PCI_MODE);
1715 tmp &= 0xff00ffff;
1716 writel(tmp, mmio + MV_PCI_MODE);
1717
1718 ZERO(MV_PCI_DISC_TIMER);
1719 ZERO(MV_PCI_MSI_TRIGGER);
1720 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1721 ZERO(HC_MAIN_IRQ_MASK_OFS);
1722 ZERO(MV_PCI_SERR_MASK);
1723 ZERO(PCI_IRQ_CAUSE_OFS);
1724 ZERO(PCI_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1726 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1727 ZERO(MV_PCI_ERR_ATTRIBUTE);
1728 ZERO(MV_PCI_ERR_COMMAND);
1729 }
1730 #undef ZERO
1731
1732 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1733 {
1734 u32 tmp;
1735
1736 mv5_reset_flash(hpriv, mmio);
1737
1738 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1739 tmp &= 0x3;
1740 tmp |= (1 << 5) | (1 << 6);
1741 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1742 }
1743
1744 /**
1745 * mv6_reset_hc - Perform the 6xxx global soft reset
1746 * @mmio: base address of the HBA
1747 *
1748 * This routine only applies to 6xxx parts.
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 */
1753 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1754 unsigned int n_hc)
1755 {
1756 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1757 int i, rc = 0;
1758 u32 t;
1759
1760 /* Following procedure defined in PCI "main command and status
1761 * register" table.
1762 */
1763 t = readl(reg);
1764 writel(t | STOP_PCI_MASTER, reg);
1765
1766 for (i = 0; i < 1000; i++) {
1767 udelay(1);
1768 t = readl(reg);
1769 if (PCI_MASTER_EMPTY & t) {
1770 break;
1771 }
1772 }
1773 if (!(PCI_MASTER_EMPTY & t)) {
1774 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1775 rc = 1;
1776 goto done;
1777 }
1778
1779 /* set reset */
1780 i = 5;
1781 do {
1782 writel(t | GLOB_SFT_RST, reg);
1783 t = readl(reg);
1784 udelay(1);
1785 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1786
1787 if (!(GLOB_SFT_RST & t)) {
1788 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1789 rc = 1;
1790 goto done;
1791 }
1792
1793 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1794 i = 5;
1795 do {
1796 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1797 t = readl(reg);
1798 udelay(1);
1799 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1800
1801 if (GLOB_SFT_RST & t) {
1802 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1803 rc = 1;
1804 }
1805 done:
1806 return rc;
1807 }
1808
1809 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1810 void __iomem *mmio)
1811 {
1812 void __iomem *port_mmio;
1813 u32 tmp;
1814
1815 tmp = readl(mmio + MV_RESET_CFG);
1816 if ((tmp & (1 << 0)) == 0) {
1817 hpriv->signal[idx].amps = 0x7 << 8;
1818 hpriv->signal[idx].pre = 0x1 << 5;
1819 return;
1820 }
1821
1822 port_mmio = mv_port_base(mmio, idx);
1823 tmp = readl(port_mmio + PHY_MODE2);
1824
1825 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1826 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1827 }
1828
1829 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1830 {
1831 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1832 }
1833
1834 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1835 unsigned int port)
1836 {
1837 void __iomem *port_mmio = mv_port_base(mmio, port);
1838
1839 u32 hp_flags = hpriv->hp_flags;
1840 int fix_phy_mode2 =
1841 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1842 int fix_phy_mode4 =
1843 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1844 u32 m2, tmp;
1845
1846 if (fix_phy_mode2) {
1847 m2 = readl(port_mmio + PHY_MODE2);
1848 m2 &= ~(1 << 16);
1849 m2 |= (1 << 31);
1850 writel(m2, port_mmio + PHY_MODE2);
1851
1852 udelay(200);
1853
1854 m2 = readl(port_mmio + PHY_MODE2);
1855 m2 &= ~((1 << 16) | (1 << 31));
1856 writel(m2, port_mmio + PHY_MODE2);
1857
1858 udelay(200);
1859 }
1860
1861 /* who knows what this magic does */
1862 tmp = readl(port_mmio + PHY_MODE3);
1863 tmp &= ~0x7F800000;
1864 tmp |= 0x2A800000;
1865 writel(tmp, port_mmio + PHY_MODE3);
1866
1867 if (fix_phy_mode4) {
1868 u32 m4;
1869
1870 m4 = readl(port_mmio + PHY_MODE4);
1871
1872 if (hp_flags & MV_HP_ERRATA_60X1B2)
1873 tmp = readl(port_mmio + 0x310);
1874
1875 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1876
1877 writel(m4, port_mmio + PHY_MODE4);
1878
1879 if (hp_flags & MV_HP_ERRATA_60X1B2)
1880 writel(tmp, port_mmio + 0x310);
1881 }
1882
1883 /* Revert values of pre-emphasis and signal amps to the saved ones */
1884 m2 = readl(port_mmio + PHY_MODE2);
1885
1886 m2 &= ~MV_M2_PREAMP_MASK;
1887 m2 |= hpriv->signal[port].amps;
1888 m2 |= hpriv->signal[port].pre;
1889 m2 &= ~(1 << 16);
1890
1891 /* according to mvSata 3.6.1, some IIE values are fixed */
1892 if (IS_GEN_IIE(hpriv)) {
1893 m2 &= ~0xC30FF01F;
1894 m2 |= 0x0000900F;
1895 }
1896
1897 writel(m2, port_mmio + PHY_MODE2);
1898 }
1899
1900 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port_no)
1902 {
1903 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1904
1905 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1906
1907 if (IS_60XX(hpriv)) {
1908 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1909 ifctl |= (1 << 7); /* enable gen2i speed */
1910 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1911 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1912 }
1913
1914 udelay(25); /* allow reset propagation */
1915
1916 /* Spec never mentions clearing the bit. Marvell's driver does
1917 * clear the bit, however.
1918 */
1919 writelfl(0, port_mmio + EDMA_CMD_OFS);
1920
1921 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1922
1923 if (IS_50XX(hpriv))
1924 mdelay(1);
1925 }
1926
1927 static void mv_stop_and_reset(struct ata_port *ap)
1928 {
1929 struct mv_host_priv *hpriv = ap->host->private_data;
1930 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1931
1932 mv_stop_dma(ap);
1933
1934 mv_channel_reset(hpriv, mmio, ap->port_no);
1935
1936 __mv_phy_reset(ap, 0);
1937 }
1938
1939 static inline void __msleep(unsigned int msec, int can_sleep)
1940 {
1941 if (can_sleep)
1942 msleep(msec);
1943 else
1944 mdelay(msec);
1945 }
1946
1947 /**
1948 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1949 * @ap: ATA channel to manipulate
1950 *
1951 * Part of this is taken from __sata_phy_reset and modified to
1952 * not sleep since this routine gets called from interrupt level.
1953 *
1954 * LOCKING:
1955 * Inherited from caller. This is coded to safe to call at
1956 * interrupt level, i.e. it does not sleep.
1957 */
1958 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1959 {
1960 struct mv_port_priv *pp = ap->private_data;
1961 struct mv_host_priv *hpriv = ap->host->private_data;
1962 void __iomem *port_mmio = mv_ap_base(ap);
1963 struct ata_taskfile tf;
1964 struct ata_device *dev = &ap->device[0];
1965 unsigned long timeout;
1966 int retry = 5;
1967 u32 sstatus;
1968
1969 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1970
1971 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1972 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1973 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1974
1975 /* Issue COMRESET via SControl */
1976 comreset_retry:
1977 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1978 __msleep(1, can_sleep);
1979
1980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1981 __msleep(20, can_sleep);
1982
1983 timeout = jiffies + msecs_to_jiffies(200);
1984 do {
1985 sata_scr_read(ap, SCR_STATUS, &sstatus);
1986 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1987 break;
1988
1989 __msleep(1, can_sleep);
1990 } while (time_before(jiffies, timeout));
1991
1992 /* work around errata */
1993 if (IS_60XX(hpriv) &&
1994 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1995 (retry-- > 0))
1996 goto comreset_retry;
1997
1998 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1999 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2000 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2001
2002 if (ata_port_online(ap)) {
2003 ata_port_probe(ap);
2004 } else {
2005 sata_scr_read(ap, SCR_STATUS, &sstatus);
2006 ata_port_printk(ap, KERN_INFO,
2007 "no device found (phy stat %08x)\n", sstatus);
2008 ata_port_disable(ap);
2009 return;
2010 }
2011
2012 /* even after SStatus reflects that device is ready,
2013 * it seems to take a while for link to be fully
2014 * established (and thus Status no longer 0x80/0x7F),
2015 * so we poll a bit for that, here.
2016 */
2017 retry = 20;
2018 while (1) {
2019 u8 drv_stat = ata_check_status(ap);
2020 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2021 break;
2022 __msleep(500, can_sleep);
2023 if (retry-- <= 0)
2024 break;
2025 }
2026
2027 tf.lbah = readb(ap->ioaddr.lbah_addr);
2028 tf.lbam = readb(ap->ioaddr.lbam_addr);
2029 tf.lbal = readb(ap->ioaddr.lbal_addr);
2030 tf.nsect = readb(ap->ioaddr.nsect_addr);
2031
2032 dev->class = ata_dev_classify(&tf);
2033 if (!ata_dev_enabled(dev)) {
2034 VPRINTK("Port disabled post-sig: No device present.\n");
2035 ata_port_disable(ap);
2036 }
2037
2038 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2039
2040 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2041
2042 VPRINTK("EXIT\n");
2043 }
2044
2045 static void mv_phy_reset(struct ata_port *ap)
2046 {
2047 __mv_phy_reset(ap, 1);
2048 }
2049
2050 /**
2051 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2052 * @ap: ATA channel to manipulate
2053 *
2054 * Intent is to clear all pending error conditions, reset the
2055 * chip/bus, fail the command, and move on.
2056 *
2057 * LOCKING:
2058 * This routine holds the host lock while failing the command.
2059 */
2060 static void mv_eng_timeout(struct ata_port *ap)
2061 {
2062 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2063 struct ata_queued_cmd *qc;
2064 unsigned long flags;
2065
2066 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2067 DPRINTK("All regs @ start of eng_timeout\n");
2068 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2069
2070 qc = ata_qc_from_tag(ap, ap->active_tag);
2071 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2072 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2073
2074 spin_lock_irqsave(&ap->host->lock, flags);
2075 mv_err_intr(ap, 0);
2076 mv_stop_and_reset(ap);
2077 spin_unlock_irqrestore(&ap->host->lock, flags);
2078
2079 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2080 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2081 qc->err_mask |= AC_ERR_TIMEOUT;
2082 ata_eh_qc_complete(qc);
2083 }
2084 }
2085
2086 /**
2087 * mv_port_init - Perform some early initialization on a single port.
2088 * @port: libata data structure storing shadow register addresses
2089 * @port_mmio: base address of the port
2090 *
2091 * Initialize shadow register mmio addresses, clear outstanding
2092 * interrupts on the port, and unmask interrupts for the future
2093 * start of the port.
2094 *
2095 * LOCKING:
2096 * Inherited from caller.
2097 */
2098 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2099 {
2100 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2101 unsigned serr_ofs;
2102
2103 /* PIO related setup
2104 */
2105 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2106 port->error_addr =
2107 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2108 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2109 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2110 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2111 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2112 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2113 port->status_addr =
2114 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2115 /* special case: control/altstatus doesn't have ATA_REG_ address */
2116 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2117
2118 /* unused: */
2119 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2120
2121 /* Clear any currently outstanding port interrupt conditions */
2122 serr_ofs = mv_scr_offset(SCR_ERROR);
2123 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2124 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2125
2126 /* unmask all EDMA error interrupts */
2127 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2128
2129 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2130 readl(port_mmio + EDMA_CFG_OFS),
2131 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2132 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2133 }
2134
2135 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2136 {
2137 struct pci_dev *pdev = to_pci_dev(host->dev);
2138 struct mv_host_priv *hpriv = host->private_data;
2139 u32 hp_flags = hpriv->hp_flags;
2140
2141 switch(board_idx) {
2142 case chip_5080:
2143 hpriv->ops = &mv5xxx_ops;
2144 hp_flags |= MV_HP_50XX;
2145
2146 switch (pdev->revision) {
2147 case 0x1:
2148 hp_flags |= MV_HP_ERRATA_50XXB0;
2149 break;
2150 case 0x3:
2151 hp_flags |= MV_HP_ERRATA_50XXB2;
2152 break;
2153 default:
2154 dev_printk(KERN_WARNING, &pdev->dev,
2155 "Applying 50XXB2 workarounds to unknown rev\n");
2156 hp_flags |= MV_HP_ERRATA_50XXB2;
2157 break;
2158 }
2159 break;
2160
2161 case chip_504x:
2162 case chip_508x:
2163 hpriv->ops = &mv5xxx_ops;
2164 hp_flags |= MV_HP_50XX;
2165
2166 switch (pdev->revision) {
2167 case 0x0:
2168 hp_flags |= MV_HP_ERRATA_50XXB0;
2169 break;
2170 case 0x3:
2171 hp_flags |= MV_HP_ERRATA_50XXB2;
2172 break;
2173 default:
2174 dev_printk(KERN_WARNING, &pdev->dev,
2175 "Applying B2 workarounds to unknown rev\n");
2176 hp_flags |= MV_HP_ERRATA_50XXB2;
2177 break;
2178 }
2179 break;
2180
2181 case chip_604x:
2182 case chip_608x:
2183 hpriv->ops = &mv6xxx_ops;
2184
2185 switch (pdev->revision) {
2186 case 0x7:
2187 hp_flags |= MV_HP_ERRATA_60X1B2;
2188 break;
2189 case 0x9:
2190 hp_flags |= MV_HP_ERRATA_60X1C0;
2191 break;
2192 default:
2193 dev_printk(KERN_WARNING, &pdev->dev,
2194 "Applying B2 workarounds to unknown rev\n");
2195 hp_flags |= MV_HP_ERRATA_60X1B2;
2196 break;
2197 }
2198 break;
2199
2200 case chip_7042:
2201 case chip_6042:
2202 hpriv->ops = &mv6xxx_ops;
2203
2204 hp_flags |= MV_HP_GEN_IIE;
2205
2206 switch (pdev->revision) {
2207 case 0x0:
2208 hp_flags |= MV_HP_ERRATA_XX42A0;
2209 break;
2210 case 0x1:
2211 hp_flags |= MV_HP_ERRATA_60X1C0;
2212 break;
2213 default:
2214 dev_printk(KERN_WARNING, &pdev->dev,
2215 "Applying 60X1C0 workarounds to unknown rev\n");
2216 hp_flags |= MV_HP_ERRATA_60X1C0;
2217 break;
2218 }
2219 break;
2220
2221 default:
2222 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2223 return 1;
2224 }
2225
2226 hpriv->hp_flags = hp_flags;
2227
2228 return 0;
2229 }
2230
2231 /**
2232 * mv_init_host - Perform some early initialization of the host.
2233 * @host: ATA host to initialize
2234 * @board_idx: controller index
2235 *
2236 * If possible, do an early global reset of the host. Then do
2237 * our port init and clear/unmask all/relevant host interrupts.
2238 *
2239 * LOCKING:
2240 * Inherited from caller.
2241 */
2242 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2243 {
2244 int rc = 0, n_hc, port, hc;
2245 struct pci_dev *pdev = to_pci_dev(host->dev);
2246 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2247 struct mv_host_priv *hpriv = host->private_data;
2248
2249 /* global interrupt mask */
2250 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2251
2252 rc = mv_chip_id(host, board_idx);
2253 if (rc)
2254 goto done;
2255
2256 n_hc = mv_get_hc_count(host->ports[0]->flags);
2257
2258 for (port = 0; port < host->n_ports; port++)
2259 hpriv->ops->read_preamp(hpriv, port, mmio);
2260
2261 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2262 if (rc)
2263 goto done;
2264
2265 hpriv->ops->reset_flash(hpriv, mmio);
2266 hpriv->ops->reset_bus(pdev, mmio);
2267 hpriv->ops->enable_leds(hpriv, mmio);
2268
2269 for (port = 0; port < host->n_ports; port++) {
2270 if (IS_60XX(hpriv)) {
2271 void __iomem *port_mmio = mv_port_base(mmio, port);
2272
2273 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2274 ifctl |= (1 << 7); /* enable gen2i speed */
2275 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2276 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2277 }
2278
2279 hpriv->ops->phy_errata(hpriv, mmio, port);
2280 }
2281
2282 for (port = 0; port < host->n_ports; port++) {
2283 void __iomem *port_mmio = mv_port_base(mmio, port);
2284 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2285 }
2286
2287 for (hc = 0; hc < n_hc; hc++) {
2288 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2289
2290 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2291 "(before clear)=0x%08x\n", hc,
2292 readl(hc_mmio + HC_CFG_OFS),
2293 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2294
2295 /* Clear any currently outstanding hc interrupt conditions */
2296 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2297 }
2298
2299 /* Clear any currently outstanding host interrupt conditions */
2300 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2301
2302 /* and unmask interrupt generation for host regs */
2303 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2304
2305 if (IS_50XX(hpriv))
2306 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2307 else
2308 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2309
2310 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2311 "PCI int cause/mask=0x%08x/0x%08x\n",
2312 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2313 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2314 readl(mmio + PCI_IRQ_CAUSE_OFS),
2315 readl(mmio + PCI_IRQ_MASK_OFS));
2316
2317 done:
2318 return rc;
2319 }
2320
2321 /**
2322 * mv_print_info - Dump key info to kernel log for perusal.
2323 * @host: ATA host to print info about
2324 *
2325 * FIXME: complete this.
2326 *
2327 * LOCKING:
2328 * Inherited from caller.
2329 */
2330 static void mv_print_info(struct ata_host *host)
2331 {
2332 struct pci_dev *pdev = to_pci_dev(host->dev);
2333 struct mv_host_priv *hpriv = host->private_data;
2334 u8 scc;
2335 const char *scc_s, *gen;
2336
2337 /* Use this to determine the HW stepping of the chip so we know
2338 * what errata to workaround
2339 */
2340 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2341 if (scc == 0)
2342 scc_s = "SCSI";
2343 else if (scc == 0x01)
2344 scc_s = "RAID";
2345 else
2346 scc_s = "?";
2347
2348 if (IS_GEN_I(hpriv))
2349 gen = "I";
2350 else if (IS_GEN_II(hpriv))
2351 gen = "II";
2352 else if (IS_GEN_IIE(hpriv))
2353 gen = "IIE";
2354 else
2355 gen = "?";
2356
2357 dev_printk(KERN_INFO, &pdev->dev,
2358 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2359 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2360 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2361 }
2362
2363 /**
2364 * mv_init_one - handle a positive probe of a Marvell host
2365 * @pdev: PCI device found
2366 * @ent: PCI device ID entry for the matched host
2367 *
2368 * LOCKING:
2369 * Inherited from caller.
2370 */
2371 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2372 {
2373 static int printed_version = 0;
2374 unsigned int board_idx = (unsigned int)ent->driver_data;
2375 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2376 struct ata_host *host;
2377 struct mv_host_priv *hpriv;
2378 int n_ports, rc;
2379
2380 if (!printed_version++)
2381 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2382
2383 /* allocate host */
2384 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2385
2386 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2387 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2388 if (!host || !hpriv)
2389 return -ENOMEM;
2390 host->private_data = hpriv;
2391
2392 /* acquire resources */
2393 rc = pcim_enable_device(pdev);
2394 if (rc)
2395 return rc;
2396
2397 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2398 if (rc == -EBUSY)
2399 pcim_pin_device(pdev);
2400 if (rc)
2401 return rc;
2402 host->iomap = pcim_iomap_table(pdev);
2403
2404 rc = pci_go_64(pdev);
2405 if (rc)
2406 return rc;
2407
2408 /* initialize adapter */
2409 rc = mv_init_host(host, board_idx);
2410 if (rc)
2411 return rc;
2412
2413 /* Enable interrupts */
2414 if (msi && pci_enable_msi(pdev))
2415 pci_intx(pdev, 1);
2416
2417 mv_dump_pci_cfg(pdev, 0x68);
2418 mv_print_info(host);
2419
2420 pci_set_master(pdev);
2421 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2422 &mv_sht);
2423 }
2424
2425 static int __init mv_init(void)
2426 {
2427 return pci_register_driver(&mv_pci_driver);
2428 }
2429
2430 static void __exit mv_exit(void)
2431 {
2432 pci_unregister_driver(&mv_pci_driver);
2433 }
2434
2435 MODULE_AUTHOR("Brett Russ");
2436 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2437 MODULE_LICENSE("GPL");
2438 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2439 MODULE_VERSION(DRV_VERSION);
2440
2441 module_param(msi, int, 0444);
2442 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2443
2444 module_init(mv_init);
2445 module_exit(mv_exit);