[SCSI] mvsas: split driver into multiple files
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / scsi / mvsas / mv_sas.c
CommitLineData
b5762948 1/*
dd4969a8 2 mv_sas.c - Marvell 88SE6440 SAS/SATA support
b5762948
JG
3
4 Copyright 2007 Red Hat, Inc.
8f261aaf 5 Copyright 2008 Marvell. <kewei@marvell.com>
b5762948
JG
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
dd4969a8
JG
31#include "mv_sas.h"
32#include "mv_64xx.h"
33#include "mv_chips.h"
8f261aaf
KW
34
35/* offset for D2H FIS in the Received FIS List Structure */
36#define SATA_RECEIVED_D2H_FIS(reg_set) \
37 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
38#define SATA_RECEIVED_PIO_FIS(reg_set) \
39 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
40#define UNASSOC_D2H_FIS(id) \
41 ((void *) mvi->rx_fis + 0x100 * id)
42
dd4969a8
JG
43struct mvs_task_exec_info {
44 struct sas_task *task;
45 struct mvs_cmd_hdr *hdr;
46 struct mvs_port *port;
47 u32 tag;
48 int n_elem;
b5762948
JG
49};
50
dd4969a8
JG
51static void mvs_release_task(struct mvs_info *mvi, int phy_no);
52static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
53static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
54 int get_st);
55static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
56static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
57 u32 slot_idx);
b5762948 58
dd4969a8
JG
59static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
60{
61 if (task->lldd_task) {
62 struct mvs_slot_info *slot;
63 slot = (struct mvs_slot_info *) task->lldd_task;
64 *tag = slot - mvi->slot_info;
65 return 1;
66 }
67 return 0;
68}
8f261aaf 69
dd4969a8
JG
70static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
71{
72 void *bitmap = (void *) &mvi->tags;
73 clear_bit(tag, bitmap);
74}
8f261aaf 75
dd4969a8
JG
76static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
77{
78 mvs_tag_clear(mvi, tag);
79}
8f261aaf 80
dd4969a8
JG
81static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
82{
83 void *bitmap = (void *) &mvi->tags;
84 set_bit(tag, bitmap);
85}
8f261aaf 86
dd4969a8
JG
87static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
88{
89 unsigned int index, tag;
90 void *bitmap = (void *) &mvi->tags;
b5762948 91
dd4969a8
JG
92 index = find_first_zero_bit(bitmap, MVS_SLOTS);
93 tag = index;
94 if (tag >= MVS_SLOTS)
95 return -SAS_QUEUE_FULL;
96 mvs_tag_set(mvi, tag);
97 *tag_out = tag;
98 return 0;
99}
b5762948 100
dd4969a8
JG
101void mvs_tag_init(struct mvs_info *mvi)
102{
103 int i;
104 for (i = 0; i < MVS_SLOTS; ++i)
105 mvs_tag_clear(mvi, i);
106}
b5762948 107
8f261aaf
KW
108static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
109{
110 u32 i;
111 u32 run;
112 u32 offset;
113
114 offset = 0;
115 while (size) {
116 printk("%08X : ", baseaddr + offset);
117 if (size >= 16)
118 run = 16;
119 else
120 run = size;
121 size -= run;
122 for (i = 0; i < 16; i++) {
123 if (i < run)
124 printk("%02X ", (u32)data[i]);
125 else
126 printk(" ");
127 }
128 printk(": ");
129 for (i = 0; i < run; i++)
130 printk("%c", isalnum(data[i]) ? data[i] : '.');
131 printk("\n");
132 data = &data[16];
133 offset += run;
134 }
135 printk("\n");
136}
137
ee1f1c2e 138#if _MV_DUMP
8f261aaf
KW
139static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
140 enum sas_protocol proto)
141{
8f261aaf
KW
142 u32 offset;
143 struct pci_dev *pdev = mvi->pdev;
144 struct mvs_slot_info *slot = &mvi->slot_info[tag];
145
146 offset = slot->cmd_size + MVS_OAF_SZ +
147 sizeof(struct mvs_prd) * slot->n_elem;
148 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
149 tag);
150 mvs_hexdump(32, (u8 *) slot->response,
151 (u32) slot->buf_dma + offset);
8f261aaf 152}
ee1f1c2e 153#endif
8f261aaf
KW
154
155static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
156 enum sas_protocol proto)
157{
158#if _MV_DUMP
ee1f1c2e 159 u32 sz, w_ptr;
8f261aaf
KW
160 u64 addr;
161 void __iomem *regs = mvi->regs;
162 struct pci_dev *pdev = mvi->pdev;
163 struct mvs_slot_info *slot = &mvi->slot_info[tag];
164
165 /*Delivery Queue */
166 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
ee1f1c2e 167 w_ptr = slot->tx;
8f261aaf
KW
168 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
169 dev_printk(KERN_DEBUG, &pdev->dev,
ee1f1c2e 170 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
8f261aaf
KW
171 dev_printk(KERN_DEBUG, &pdev->dev,
172 "Delivery Queue Base Address=0x%llX (PA)"
173 "(tx_dma=0x%llX), Entry=%04d\n",
174 addr, mvi->tx_dma, w_ptr);
175 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
176 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
177 /*Command List */
ee1f1c2e 178 addr = mvi->slot_dma;
8f261aaf
KW
179 dev_printk(KERN_DEBUG, &pdev->dev,
180 "Command List Base Address=0x%llX (PA)"
181 "(slot_dma=0x%llX), Header=%03d\n",
ee1f1c2e 182 addr, slot->buf_dma, tag);
8f261aaf
KW
183 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
184 /*mvs_cmd_hdr */
185 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
186 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
187 /*1.command table area */
188 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
189 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
190 /*2.open address frame area */
191 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
192 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
193 (u32) slot->buf_dma + slot->cmd_size);
194 /*3.status buffer */
195 mvs_hba_sb_dump(mvi, tag, proto);
196 /*4.PRD table */
197 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
198 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
199 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
200 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
201#endif
202}
203
204static void mvs_hba_cq_dump(struct mvs_info *mvi)
205{
ee1f1c2e 206#if (_MV_DUMP > 2)
8f261aaf
KW
207 u64 addr;
208 void __iomem *regs = mvi->regs;
209 struct pci_dev *pdev = mvi->pdev;
210 u32 entry = mvi->rx_cons + 1;
211 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
212
213 /*Completion Queue */
214 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
ee1f1c2e
KW
215 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
216 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
8f261aaf
KW
217 dev_printk(KERN_DEBUG, &pdev->dev,
218 "Completion List Base Address=0x%llX (PA), "
219 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
220 addr, entry - 1, mvi->rx[0]);
221 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
222 mvi->rx_dma + sizeof(u32) * entry);
223#endif
224}
225
dd4969a8
JG
226/* FIXME: locking? */
227int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata)
8f261aaf 228{
dd4969a8
JG
229 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
230 int rc = 0, phy_id = sas_phy->id;
8f261aaf
KW
231 u32 tmp;
232
dd4969a8 233 tmp = mvs_read_phy_ctl(mvi, phy_id);
8f261aaf 234
dd4969a8
JG
235 switch (func) {
236 case PHY_FUNC_SET_LINK_RATE:{
237 struct sas_phy_linkrates *rates = funcdata;
238 u32 lrmin = 0, lrmax = 0;
8f261aaf 239
dd4969a8
JG
240 lrmin = (rates->minimum_linkrate << 8);
241 lrmax = (rates->maximum_linkrate << 12);
8f261aaf 242
dd4969a8
JG
243 if (lrmin) {
244 tmp &= ~(0xf << 8);
245 tmp |= lrmin;
246 }
247 if (lrmax) {
248 tmp &= ~(0xf << 12);
249 tmp |= lrmax;
250 }
251 mvs_write_phy_ctl(mvi, phy_id, tmp);
252 break;
253 }
8f261aaf 254
dd4969a8
JG
255 case PHY_FUNC_HARD_RESET:
256 if (tmp & PHY_RST_HARD)
257 break;
258 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
259 break;
b5762948 260
dd4969a8
JG
261 case PHY_FUNC_LINK_RESET:
262 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
263 break;
b5762948 264
dd4969a8
JG
265 case PHY_FUNC_DISABLE:
266 case PHY_FUNC_RELEASE_SPINUP_HOLD:
267 default:
268 rc = -EOPNOTSUPP;
b5762948
JG
269 }
270
271 return rc;
272}
273
dd4969a8 274static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
ee1f1c2e 275{
dd4969a8
JG
276 struct mvs_phy *phy = &mvi->phy[i];
277 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
278
279 if (!phy->phy_attached)
280 return;
281
282 if (sas_phy->phy) {
283 struct sas_phy *sphy = sas_phy->phy;
284
285 sphy->negotiated_linkrate = sas_phy->linkrate;
286 sphy->minimum_linkrate = phy->minimum_linkrate;
287 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
288 sphy->maximum_linkrate = phy->maximum_linkrate;
289 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
ee1f1c2e 290 }
ee1f1c2e 291
dd4969a8
JG
292 if (phy->phy_type & PORT_TYPE_SAS) {
293 struct sas_identify_frame *id;
b5762948 294
dd4969a8
JG
295 id = (struct sas_identify_frame *)phy->frame_rcvd;
296 id->dev_type = phy->identify.device_type;
297 id->initiator_bits = SAS_PROTOCOL_ALL;
298 id->target_bits = phy->identify.target_port_protocols;
299 } else if (phy->phy_type & PORT_TYPE_SATA) {
300 /* TODO */
301 }
302 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
303 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
304 PORTE_BYTES_DMAED);
ee1f1c2e
KW
305}
306
dd4969a8 307int mvs_slave_configure(struct scsi_device *sdev)
ee1f1c2e 308{
dd4969a8
JG
309 struct domain_device *dev = sdev_to_domain_dev(sdev);
310 int ret = sas_slave_configure(sdev);
b5762948 311
dd4969a8
JG
312 if (ret)
313 return ret;
ee1f1c2e 314
dd4969a8
JG
315 if (dev_is_sata(dev)) {
316 /* struct ata_port *ap = dev->sata_dev.ap; */
317 /* struct ata_device *adev = ap->link.device; */
318
319 /* clamp at no NCQ for the time being */
320 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
321 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
322 }
ee1f1c2e 323 return 0;
b5762948
JG
324}
325
dd4969a8 326void mvs_scan_start(struct Scsi_Host *shost)
b5762948 327{
8f261aaf 328 int i;
dd4969a8
JG
329 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
330
331 for (i = 0; i < mvi->chip->n_phy; ++i) {
332 mvs_bytes_dmaed(mvi, i);
333 }
b5762948
JG
334}
335
dd4969a8 336int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
b5762948 337{
dd4969a8
JG
338 /* give the phy enabling interrupt event time to come in (1s
339 * is empirically about all it takes) */
340 if (time < HZ)
341 return 0;
342 /* Wait for discovery to finish */
343 scsi_flush_work(shost);
344 return 1;
b5762948
JG
345}
346
dd4969a8
JG
347static int mvs_task_prep_smp(struct mvs_info *mvi,
348 struct mvs_task_exec_info *tei)
b5762948 349{
dd4969a8
JG
350 int elem, rc, i;
351 struct sas_task *task = tei->task;
352 struct mvs_cmd_hdr *hdr = tei->hdr;
353 struct scatterlist *sg_req, *sg_resp;
354 u32 req_len, resp_len, tag = tei->tag;
355 void *buf_tmp;
356 u8 *buf_oaf;
357 dma_addr_t buf_tmp_dma;
358 struct mvs_prd *buf_prd;
359 struct scatterlist *sg;
360 struct mvs_slot_info *slot = &mvi->slot_info[tag];
361 struct asd_sas_port *sas_port = task->dev->port;
362 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
363#if _MV_DUMP
364 u8 *buf_cmd;
365 void *from;
366#endif
367 /*
368 * DMA-map SMP request, response buffers
369 */
370 sg_req = &task->smp_task.smp_req;
371 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
372 if (!elem)
373 return -ENOMEM;
374 req_len = sg_dma_len(sg_req);
b5762948 375
dd4969a8
JG
376 sg_resp = &task->smp_task.smp_resp;
377 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
378 if (!elem) {
379 rc = -ENOMEM;
380 goto err_out;
381 }
382 resp_len = sg_dma_len(sg_resp);
b5762948 383
dd4969a8
JG
384 /* must be in dwords */
385 if ((req_len & 0x3) || (resp_len & 0x3)) {
386 rc = -EINVAL;
387 goto err_out_2;
b5762948
JG
388 }
389
dd4969a8
JG
390 /*
391 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
392 */
b5762948 393
dd4969a8
JG
394 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
395 buf_tmp = slot->buf;
396 buf_tmp_dma = slot->buf_dma;
b5762948 397
dd4969a8
JG
398#if _MV_DUMP
399 buf_cmd = buf_tmp;
400 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
401 buf_tmp += req_len;
402 buf_tmp_dma += req_len;
403 slot->cmd_size = req_len;
404#else
405 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
406#endif
b5762948 407
dd4969a8
JG
408 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
409 buf_oaf = buf_tmp;
410 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
b5762948 411
dd4969a8
JG
412 buf_tmp += MVS_OAF_SZ;
413 buf_tmp_dma += MVS_OAF_SZ;
b5762948 414
dd4969a8
JG
415 /* region 3: PRD table ********************************************* */
416 buf_prd = buf_tmp;
417 if (tei->n_elem)
418 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
419 else
420 hdr->prd_tbl = 0;
b5762948 421
dd4969a8
JG
422 i = sizeof(struct mvs_prd) * tei->n_elem;
423 buf_tmp += i;
424 buf_tmp_dma += i;
b5762948 425
dd4969a8
JG
426 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
427 slot->response = buf_tmp;
428 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
b5762948 429
dd4969a8
JG
430 /*
431 * Fill in TX ring and command slot header
432 */
433 slot->tx = mvi->tx_prod;
434 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
435 TXQ_MODE_I | tag |
436 (sas_port->phy_mask << TXQ_PHY_SHIFT));
b5762948 437
dd4969a8
JG
438 hdr->flags |= flags;
439 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
440 hdr->tags = cpu_to_le32(tag);
441 hdr->data_len = 0;
b5762948 442
dd4969a8
JG
443 /* generate open address frame hdr (first 12 bytes) */
444 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
445 buf_oaf[1] = task->dev->linkrate & 0xf;
446 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
447 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
448
449 /* fill in PRD (scatter/gather) table, if any */
450 for_each_sg(task->scatter, sg, tei->n_elem, i) {
451 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
452 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
453 buf_prd++;
b5762948
JG
454 }
455
dd4969a8
JG
456#if _MV_DUMP
457 /* copy cmd table */
458 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
459 memcpy(buf_cmd, from + sg_req->offset, req_len);
460 kunmap_atomic(from, KM_IRQ0);
461#endif
b5762948
JG
462 return 0;
463
dd4969a8
JG
464err_out_2:
465 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
466 PCI_DMA_FROMDEVICE);
b5762948 467err_out:
dd4969a8
JG
468 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
469 PCI_DMA_TODEVICE);
8f261aaf 470 return rc;
8f261aaf
KW
471}
472
dd4969a8 473static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
8f261aaf 474{
dd4969a8 475 struct ata_queued_cmd *qc = task->uldd_task;
8f261aaf 476
dd4969a8
JG
477 if (qc) {
478 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
479 qc->tf.command == ATA_CMD_FPDMA_READ) {
480 *tag = qc->tag;
481 return 1;
482 }
8f261aaf 483 }
8f261aaf 484
dd4969a8 485 return 0;
8f261aaf
KW
486}
487
dd4969a8
JG
488static int mvs_task_prep_ata(struct mvs_info *mvi,
489 struct mvs_task_exec_info *tei)
b5762948
JG
490{
491 struct sas_task *task = tei->task;
492 struct domain_device *dev = task->dev;
493 struct mvs_cmd_hdr *hdr = tei->hdr;
494 struct asd_sas_port *sas_port = dev->port;
8f261aaf 495 struct mvs_slot_info *slot;
b5762948
JG
496 struct scatterlist *sg;
497 struct mvs_prd *buf_prd;
8f261aaf
KW
498 struct mvs_port *port = tei->port;
499 u32 tag = tei->tag;
500 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
b5762948
JG
501 void *buf_tmp;
502 u8 *buf_cmd, *buf_oaf;
503 dma_addr_t buf_tmp_dma;
8f261aaf
KW
504 u32 i, req_len, resp_len;
505 const u32 max_resp_len = SB_RFB_MAX;
506
507 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
508 return -EBUSY;
b5762948 509
8f261aaf
KW
510 slot = &mvi->slot_info[tag];
511 slot->tx = mvi->tx_prod;
512 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
513 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
514 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
515 (port->taskfileset << TXQ_SRS_SHIFT));
b5762948
JG
516
517 if (task->ata_task.use_ncq)
518 flags |= MCH_FPDMA;
8f261aaf
KW
519 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
520 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
521 flags |= MCH_ATAPI;
522 }
523
b5762948
JG
524 /* FIXME: fill in port multiplier number */
525
526 hdr->flags = cpu_to_le32(flags);
8f261aaf
KW
527
528 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
4e52fc0a
KW
529 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
530 task->ata_task.fis.sector_count |= hdr->tags << 3;
531 else
8f261aaf 532 hdr->tags = cpu_to_le32(tag);
b5762948
JG
533 hdr->data_len = cpu_to_le32(task->total_xfer_len);
534
535 /*
536 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
537 */
b5762948 538
8f261aaf
KW
539 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
540 buf_cmd = buf_tmp = slot->buf;
b5762948
JG
541 buf_tmp_dma = slot->buf_dma;
542
543 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
544
545 buf_tmp += MVS_ATA_CMD_SZ;
546 buf_tmp_dma += MVS_ATA_CMD_SZ;
8f261aaf
KW
547#if _MV_DUMP
548 slot->cmd_size = MVS_ATA_CMD_SZ;
549#endif
b5762948 550
8f261aaf 551 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
b5762948
JG
552 /* used for STP. unused for SATA? */
553 buf_oaf = buf_tmp;
554 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
555
556 buf_tmp += MVS_OAF_SZ;
557 buf_tmp_dma += MVS_OAF_SZ;
558
8f261aaf 559 /* region 3: PRD table ********************************************* */
b5762948 560 buf_prd = buf_tmp;
8f261aaf
KW
561 if (tei->n_elem)
562 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
563 else
564 hdr->prd_tbl = 0;
b5762948
JG
565
566 i = sizeof(struct mvs_prd) * tei->n_elem;
567 buf_tmp += i;
568 buf_tmp_dma += i;
569
8f261aaf 570 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
b5762948
JG
571 /* FIXME: probably unused, for SATA. kept here just in case
572 * we get a STP/SATA error information record
573 */
574 slot->response = buf_tmp;
575 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
576
8f261aaf 577 req_len = sizeof(struct host_to_dev_fis);
b5762948 578 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
8f261aaf 579 sizeof(struct mvs_err_info) - i;
b5762948
JG
580
581 /* request, response lengths */
8f261aaf 582 resp_len = min(resp_len, max_resp_len);
b5762948
JG
583 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
584
8f261aaf 585 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
b5762948 586 /* fill in command FIS and ATAPI CDB */
8f261aaf
KW
587 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
588 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
589 memcpy(buf_cmd + STP_ATAPI_CMD,
590 task->ata_task.atapi_packet, 16);
591
592 /* generate open address frame hdr (first 12 bytes) */
593 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
594 buf_oaf[1] = task->dev->linkrate & 0xf;
595 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
596 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
b5762948
JG
597
598 /* fill in PRD (scatter/gather) table, if any */
8f261aaf 599 for_each_sg(task->scatter, sg, tei->n_elem, i) {
b5762948
JG
600 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
601 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
b5762948
JG
602 buf_prd++;
603 }
604
605 return 0;
606}
607
608static int mvs_task_prep_ssp(struct mvs_info *mvi,
609 struct mvs_task_exec_info *tei)
610{
611 struct sas_task *task = tei->task;
b5762948 612 struct mvs_cmd_hdr *hdr = tei->hdr;
8f261aaf 613 struct mvs_port *port = tei->port;
b5762948
JG
614 struct mvs_slot_info *slot;
615 struct scatterlist *sg;
b5762948
JG
616 struct mvs_prd *buf_prd;
617 struct ssp_frame_hdr *ssp_hdr;
618 void *buf_tmp;
619 u8 *buf_cmd, *buf_oaf, fburst = 0;
620 dma_addr_t buf_tmp_dma;
621 u32 flags;
8f261aaf
KW
622 u32 resp_len, req_len, i, tag = tei->tag;
623 const u32 max_resp_len = SB_RFB_MAX;
4e52fc0a 624 u8 phy_mask;
b5762948
JG
625
626 slot = &mvi->slot_info[tag];
627
4e52fc0a
KW
628 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
629 task->dev->port->phy_mask;
8f261aaf
KW
630 slot->tx = mvi->tx_prod;
631 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
632 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
4e52fc0a 633 (phy_mask << TXQ_PHY_SHIFT));
b5762948
JG
634
635 flags = MCH_RETRY;
636 if (task->ssp_task.enable_first_burst) {
637 flags |= MCH_FBURST;
638 fburst = (1 << 7);
639 }
640 hdr->flags = cpu_to_le32(flags |
8f261aaf
KW
641 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
642 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
b5762948
JG
643
644 hdr->tags = cpu_to_le32(tag);
645 hdr->data_len = cpu_to_le32(task->total_xfer_len);
646
647 /*
648 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
649 */
b5762948 650
8f261aaf
KW
651 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
652 buf_cmd = buf_tmp = slot->buf;
b5762948
JG
653 buf_tmp_dma = slot->buf_dma;
654
655 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
656
657 buf_tmp += MVS_SSP_CMD_SZ;
658 buf_tmp_dma += MVS_SSP_CMD_SZ;
8f261aaf
KW
659#if _MV_DUMP
660 slot->cmd_size = MVS_SSP_CMD_SZ;
661#endif
b5762948 662
8f261aaf 663 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
b5762948
JG
664 buf_oaf = buf_tmp;
665 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
666
667 buf_tmp += MVS_OAF_SZ;
668 buf_tmp_dma += MVS_OAF_SZ;
669
8f261aaf 670 /* region 3: PRD table ********************************************* */
b5762948 671 buf_prd = buf_tmp;
8f261aaf
KW
672 if (tei->n_elem)
673 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
674 else
675 hdr->prd_tbl = 0;
b5762948
JG
676
677 i = sizeof(struct mvs_prd) * tei->n_elem;
678 buf_tmp += i;
679 buf_tmp_dma += i;
680
8f261aaf 681 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
b5762948
JG
682 slot->response = buf_tmp;
683 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
684
b5762948 685 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
8f261aaf
KW
686 sizeof(struct mvs_err_info) - i;
687 resp_len = min(resp_len, max_resp_len);
688
689 req_len = sizeof(struct ssp_frame_hdr) + 28;
b5762948
JG
690
691 /* request, response lengths */
692 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
693
694 /* generate open address frame hdr (first 12 bytes) */
695 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
696 buf_oaf[1] = task->dev->linkrate & 0xf;
8f261aaf 697 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
b5762948
JG
698 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
699
8f261aaf
KW
700 /* fill in SSP frame header (Command Table.SSP frame header) */
701 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
b5762948
JG
702 ssp_hdr->frame_type = SSP_COMMAND;
703 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
704 HASHED_SAS_ADDR_SIZE);
705 memcpy(ssp_hdr->hashed_src_addr,
706 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
707 ssp_hdr->tag = cpu_to_be16(tag);
708
709 /* fill in command frame IU */
710 buf_cmd += sizeof(*ssp_hdr);
711 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
8f261aaf
KW
712 buf_cmd[9] = fburst | task->ssp_task.task_attr |
713 (task->ssp_task.task_prio << 3);
b5762948
JG
714 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
715
716 /* fill in PRD (scatter/gather) table, if any */
8f261aaf 717 for_each_sg(task->scatter, sg, tei->n_elem, i) {
b5762948
JG
718 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
719 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
b5762948
JG
720 buf_prd++;
721 }
722
723 return 0;
724}
725
dd4969a8 726int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
b5762948 727{
8f261aaf
KW
728 struct domain_device *dev = task->dev;
729 struct mvs_info *mvi = dev->port->ha->lldd_ha;
730 struct pci_dev *pdev = mvi->pdev;
b5762948 731 void __iomem *regs = mvi->regs;
b5762948 732 struct mvs_task_exec_info tei;
8f261aaf 733 struct sas_task *t = task;
4e52fc0a 734 struct mvs_slot_info *slot;
8f261aaf
KW
735 u32 tag = 0xdeadbeef, rc, n_elem = 0;
736 unsigned long flags;
737 u32 n = num, pass = 0;
b5762948 738
8f261aaf 739 spin_lock_irqsave(&mvi->lock, flags);
8f261aaf 740 do {
4e52fc0a 741 dev = t->dev;
8f261aaf 742 tei.port = &mvi->port[dev->port->id];
b5762948 743
dd4969a8
JG
744 if (!tei.port->port_attached) {
745 if (sas_protocol_ata(t->task_proto)) {
746 rc = SAS_PHY_DOWN;
747 goto out_done;
748 } else {
749 struct task_status_struct *ts = &t->task_status;
750 ts->resp = SAS_TASK_UNDELIVERED;
751 ts->stat = SAS_PHY_DOWN;
752 t->task_done(t);
753 if (n > 1)
754 t = list_entry(t->list.next,
755 struct sas_task, list);
756 continue;
757 }
758 }
759
760 if (!sas_protocol_ata(t->task_proto)) {
761 if (t->num_scatter) {
762 n_elem = pci_map_sg(mvi->pdev, t->scatter,
763 t->num_scatter,
764 t->data_dir);
765 if (!n_elem) {
766 rc = -ENOMEM;
767 goto err_out;
768 }
769 }
770 } else {
771 n_elem = t->num_scatter;
772 }
773
774 rc = mvs_tag_alloc(mvi, &tag);
775 if (rc)
776 goto err_out;
777
778 slot = &mvi->slot_info[tag];
779 t->lldd_task = NULL;
780 slot->n_elem = n_elem;
781 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
782 tei.task = t;
783 tei.hdr = &mvi->slot[tag];
784 tei.tag = tag;
785 tei.n_elem = n_elem;
786
787 switch (t->task_proto) {
788 case SAS_PROTOCOL_SMP:
789 rc = mvs_task_prep_smp(mvi, &tei);
790 break;
791 case SAS_PROTOCOL_SSP:
792 rc = mvs_task_prep_ssp(mvi, &tei);
793 break;
794 case SAS_PROTOCOL_SATA:
795 case SAS_PROTOCOL_STP:
796 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
797 rc = mvs_task_prep_ata(mvi, &tei);
798 break;
799 default:
800 dev_printk(KERN_ERR, &pdev->dev,
801 "unknown sas_task proto: 0x%x\n",
802 t->task_proto);
803 rc = -EINVAL;
804 break;
805 }
806
807 if (rc)
808 goto err_out_tag;
809
810 slot->task = t;
811 slot->port = tei.port;
812 t->lldd_task = (void *) slot;
813 list_add_tail(&slot->list, &slot->port->list);
814 /* TODO: select normal or high priority */
815
816 spin_lock(&t->task_state_lock);
817 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
818 spin_unlock(&t->task_state_lock);
819
820 mvs_hba_memory_dump(mvi, tag, t->task_proto);
821
822 ++pass;
823 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
824 if (n > 1)
825 t = list_entry(t->list.next, struct sas_task, list);
826 } while (--n);
827
828 rc = 0;
829 goto out_done;
830
831err_out_tag:
832 mvs_tag_free(mvi, tag);
833err_out:
834 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
835 if (!sas_protocol_ata(t->task_proto))
836 if (n_elem)
837 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
838 t->data_dir);
839out_done:
840 if (pass)
841 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
842 spin_unlock_irqrestore(&mvi->lock, flags);
843 return rc;
844}
845
846static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
847{
848 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
849 mvs_tag_clear(mvi, slot_idx);
850}
851
852static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
853 struct mvs_slot_info *slot, u32 slot_idx)
854{
855 if (!sas_protocol_ata(task->task_proto))
856 if (slot->n_elem)
857 pci_unmap_sg(mvi->pdev, task->scatter,
858 slot->n_elem, task->data_dir);
859
860 switch (task->task_proto) {
861 case SAS_PROTOCOL_SMP:
862 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
863 PCI_DMA_FROMDEVICE);
864 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
865 PCI_DMA_TODEVICE);
866 break;
867
868 case SAS_PROTOCOL_SATA:
869 case SAS_PROTOCOL_STP:
870 case SAS_PROTOCOL_SSP:
871 default:
872 /* do nothing */
873 break;
874 }
875 list_del(&slot->list);
876 task->lldd_task = NULL;
877 slot->task = NULL;
878 slot->port = NULL;
879}
880
881static void mvs_update_wideport(struct mvs_info *mvi, int i)
882{
883 struct mvs_phy *phy = &mvi->phy[i];
884 struct mvs_port *port = phy->port;
885 int j, no;
886
887 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
888 if (no & 1) {
889 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
890 mvs_write_port_cfg_data(mvi, no,
891 port->wide_port_phymap);
892 } else {
893 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
894 mvs_write_port_cfg_data(mvi, no, 0);
895 }
896}
897
898static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
899{
900 u32 tmp;
901 struct mvs_phy *phy = &mvi->phy[i];
902 struct mvs_port *port = phy->port;;
903
904 tmp = mvs_read_phy_ctl(mvi, i);
905
906 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
907 if (!port)
908 phy->phy_attached = 1;
909 return tmp;
910 }
911
912 if (port) {
913 if (phy->phy_type & PORT_TYPE_SAS) {
914 port->wide_port_phymap &= ~(1U << i);
915 if (!port->wide_port_phymap)
916 port->port_attached = 0;
917 mvs_update_wideport(mvi, i);
918 } else if (phy->phy_type & PORT_TYPE_SATA)
919 port->port_attached = 0;
920 mvs_free_reg_set(mvi, phy->port);
921 phy->port = NULL;
922 phy->phy_attached = 0;
923 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
924 }
925 return 0;
926}
927
928static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
929{
930 u32 *s = (u32 *) buf;
931
932 if (!s)
933 return NULL;
934
935 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
936 s[3] = mvs_read_port_cfg_data(mvi, i);
937
938 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
939 s[2] = mvs_read_port_cfg_data(mvi, i);
940
941 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
942 s[1] = mvs_read_port_cfg_data(mvi, i);
943
944 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
945 s[0] = mvs_read_port_cfg_data(mvi, i);
946
947 return (void *)s;
948}
949
950static u32 mvs_is_sig_fis_received(u32 irq_status)
951{
952 return irq_status & PHYEV_SIG_FIS;
953}
954
955static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
956 int get_st)
957{
958 struct mvs_phy *phy = &mvi->phy[i];
959 struct pci_dev *pdev = mvi->pdev;
960 u32 tmp;
961 u64 tmp64;
962
963 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
964 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
4e52fc0a 965
dd4969a8
JG
966 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
967 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
b5762948 968
dd4969a8
JG
969 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
970 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
b5762948 971
dd4969a8
JG
972 if (get_st) {
973 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
974 phy->phy_status = mvs_is_phy_ready(mvi, i);
975 }
8f261aaf 976
dd4969a8
JG
977 if (phy->phy_status) {
978 u32 phy_st;
979 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
b5762948 980
dd4969a8
JG
981 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
982 phy_st = mvs_read_port_cfg_data(mvi, i);
b5762948 983
dd4969a8
JG
984 sas_phy->linkrate =
985 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
986 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
987 phy->minimum_linkrate =
988 (phy->phy_status &
989 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
990 phy->maximum_linkrate =
991 (phy->phy_status &
992 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
b5762948 993
dd4969a8
JG
994 if (phy->phy_type & PORT_TYPE_SAS) {
995 /* Updated attached_sas_addr */
996 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
997 phy->att_dev_sas_addr =
998 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
999 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
1000 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
1001 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
1002 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
1003 phy->identify.device_type =
1004 phy->att_dev_info & PORT_DEV_TYPE_MASK;
b5762948 1005
dd4969a8
JG
1006 if (phy->identify.device_type == SAS_END_DEV)
1007 phy->identify.target_port_protocols =
1008 SAS_PROTOCOL_SSP;
1009 else if (phy->identify.device_type != NO_DEVICE)
1010 phy->identify.target_port_protocols =
1011 SAS_PROTOCOL_SMP;
1012 if (phy_st & PHY_OOB_DTCTD)
1013 sas_phy->oob_mode = SAS_OOB_MODE;
1014 phy->frame_rcvd_size =
1015 sizeof(struct sas_identify_frame);
1016 } else if (phy->phy_type & PORT_TYPE_SATA) {
1017 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1018 if (mvs_is_sig_fis_received(phy->irq_status)) {
1019 phy->att_dev_sas_addr = i; /* temp */
1020 if (phy_st & PHY_OOB_DTCTD)
1021 sas_phy->oob_mode = SATA_OOB_MODE;
1022 phy->frame_rcvd_size =
1023 sizeof(struct dev_to_host_fis);
1024 mvs_get_d2h_reg(mvi, i,
1025 (void *)sas_phy->frame_rcvd);
1026 } else {
1027 dev_printk(KERN_DEBUG, &pdev->dev,
1028 "No sig fis\n");
1029 phy->phy_type &= ~(PORT_TYPE_SATA);
1030 goto out_done;
1031 }
1032 }
1033 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
1034 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
b5762948 1035
dd4969a8
JG
1036 dev_printk(KERN_DEBUG, &pdev->dev,
1037 "phy[%d] Get Attached Address 0x%llX ,"
1038 " SAS Address 0x%llX\n",
1039 i,
1040 (unsigned long long)phy->att_dev_sas_addr,
1041 (unsigned long long)phy->dev_sas_addr);
1042 dev_printk(KERN_DEBUG, &pdev->dev,
1043 "Rate = %x , type = %d\n",
1044 sas_phy->linkrate, phy->phy_type);
b5762948 1045
dd4969a8
JG
1046 /* workaround for HW phy decoding error on 1.5g disk drive */
1047 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
1048 tmp = mvs_read_port_vsr_data(mvi, i);
1049 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
1050 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
1051 SAS_LINK_RATE_1_5_GBPS)
1052 tmp &= ~PHY_MODE6_LATECLK;
1053 else
1054 tmp |= PHY_MODE6_LATECLK;
1055 mvs_write_port_vsr_data(mvi, i, tmp);
b5762948 1056
dd4969a8 1057 }
4e52fc0a 1058out_done:
dd4969a8
JG
1059 if (get_st)
1060 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
b5762948
JG
1061}
1062
dd4969a8 1063void mvs_port_formed(struct asd_sas_phy *sas_phy)
8f261aaf 1064{
dd4969a8
JG
1065 struct sas_ha_struct *sas_ha = sas_phy->ha;
1066 struct mvs_info *mvi = sas_ha->lldd_ha;
1067 struct asd_sas_port *sas_port = sas_phy->port;
1068 struct mvs_phy *phy = sas_phy->lldd_phy;
1069 struct mvs_port *port = &mvi->port[sas_port->id];
8f261aaf 1070 unsigned long flags;
8f261aaf 1071
dd4969a8
JG
1072 spin_lock_irqsave(&mvi->lock, flags);
1073 port->port_attached = 1;
1074 phy->port = port;
1075 port->taskfileset = MVS_ID_NOT_MAPPED;
1076 if (phy->phy_type & PORT_TYPE_SAS) {
1077 port->wide_port_phymap = sas_port->phy_mask;
1078 mvs_update_wideport(mvi, sas_phy->id);
8f261aaf 1079 }
dd4969a8
JG
1080 spin_unlock_irqrestore(&mvi->lock, flags);
1081}
1082
1083int mvs_I_T_nexus_reset(struct domain_device *dev)
1084{
1085 return TMF_RESP_FUNC_FAILED;
1086}
1087
1088static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1089 u32 slot_idx, int err)
1090{
1091 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1092 struct task_status_struct *tstat = &task->task_status;
1093 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1094 int stat = SAM_GOOD;
1095
1096 resp->frame_len = sizeof(struct dev_to_host_fis);
1097 memcpy(&resp->ending_fis[0],
1098 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1099 sizeof(struct dev_to_host_fis));
1100 tstat->buf_valid_size = sizeof(*resp);
1101 if (unlikely(err))
1102 stat = SAS_PROTO_RESPONSE;
1103 return stat;
1104}
1105
1106static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1107 u32 slot_idx)
1108{
1109 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1110 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1111 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1112 int stat = SAM_CHECK_COND;
8f261aaf 1113
dd4969a8
JG
1114 if (err_dw1 & SLOT_BSY_ERR) {
1115 stat = SAS_QUEUE_FULL;
1116 mvs_slot_reset(mvi, task, slot_idx);
1117 }
8f261aaf 1118 switch (task->task_proto) {
8f261aaf 1119 case SAS_PROTOCOL_SSP:
dd4969a8
JG
1120 break;
1121 case SAS_PROTOCOL_SMP:
8f261aaf
KW
1122 break;
1123 case SAS_PROTOCOL_SATA:
1124 case SAS_PROTOCOL_STP:
dd4969a8
JG
1125 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1126 if (err_dw0 & TFILE_ERR)
1127 stat = mvs_sata_done(mvi, task, slot_idx, 1);
8f261aaf 1128 break;
8f261aaf
KW
1129 default:
1130 break;
1131 }
8121ed42 1132
dd4969a8
JG
1133 mvs_hexdump(16, (u8 *) slot->response, 0);
1134 return stat;
8f261aaf
KW
1135}
1136
dd4969a8 1137static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
b5762948 1138{
dd4969a8
JG
1139 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1140 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1141 struct sas_task *task = slot->task;
1142 struct task_status_struct *tstat;
1143 struct mvs_port *port;
1144 bool aborted;
1145 void *to;
b5762948 1146
dd4969a8
JG
1147 if (unlikely(!task || !task->lldd_task))
1148 return -1;
b5762948 1149
dd4969a8 1150 mvs_hba_cq_dump(mvi);
b5762948 1151
dd4969a8
JG
1152 spin_lock(&task->task_state_lock);
1153 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1154 if (!aborted) {
1155 task->task_state_flags &=
1156 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1157 task->task_state_flags |= SAS_TASK_STATE_DONE;
b5762948 1158 }
dd4969a8 1159 spin_unlock(&task->task_state_lock);
b5762948 1160
dd4969a8
JG
1161 if (aborted) {
1162 mvs_slot_task_free(mvi, task, slot, slot_idx);
1163 mvs_slot_free(mvi, rx_desc);
1164 return -1;
1165 }
b5762948 1166
dd4969a8
JG
1167 port = slot->port;
1168 tstat = &task->task_status;
1169 memset(tstat, 0, sizeof(*tstat));
1170 tstat->resp = SAS_TASK_COMPLETE;
b5762948 1171
dd4969a8
JG
1172 if (unlikely(!port->port_attached || flags)) {
1173 mvs_slot_err(mvi, task, slot_idx);
1174 if (!sas_protocol_ata(task->task_proto))
1175 tstat->stat = SAS_PHY_DOWN;
1176 goto out;
1177 }
b5762948 1178
dd4969a8
JG
1179 /* error info record present */
1180 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1181 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1182 goto out;
1183 }
b5762948 1184
dd4969a8
JG
1185 switch (task->task_proto) {
1186 case SAS_PROTOCOL_SSP:
1187 /* hw says status == 0, datapres == 0 */
1188 if (rx_desc & RXQ_GOOD) {
1189 tstat->stat = SAM_GOOD;
1190 tstat->resp = SAS_TASK_COMPLETE;
1191 }
1192 /* response frame present */
1193 else if (rx_desc & RXQ_RSP) {
1194 struct ssp_response_iu *iu =
1195 slot->response + sizeof(struct mvs_err_info);
1196 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1197 }
b5762948 1198
dd4969a8
JG
1199 /* should never happen? */
1200 else
1201 tstat->stat = SAM_CHECK_COND;
1202 break;
1203
1204 case SAS_PROTOCOL_SMP: {
1205 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1206 tstat->stat = SAM_GOOD;
1207 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1208 memcpy(to + sg_resp->offset,
1209 slot->response + sizeof(struct mvs_err_info),
1210 sg_dma_len(sg_resp));
1211 kunmap_atomic(to, KM_IRQ0);
8f261aaf 1212 break;
b5762948 1213 }
b5762948 1214
dd4969a8
JG
1215 case SAS_PROTOCOL_SATA:
1216 case SAS_PROTOCOL_STP:
1217 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1218 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
b5762948 1219 break;
dd4969a8 1220 }
b5762948 1221
b5762948 1222 default:
dd4969a8
JG
1223 tstat->stat = SAM_CHECK_COND;
1224 break;
b5762948
JG
1225 }
1226
dd4969a8
JG
1227out:
1228 mvs_slot_task_free(mvi, task, slot, slot_idx);
1229 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1230 mvs_slot_free(mvi, rx_desc);
b5762948 1231
dd4969a8
JG
1232 spin_unlock(&mvi->lock);
1233 task->task_done(task);
1234 spin_lock(&mvi->lock);
1235 return tstat->stat;
b5762948
JG
1236}
1237
dd4969a8 1238static void mvs_release_task(struct mvs_info *mvi, int phy_no)
b5762948 1239{
dd4969a8
JG
1240 struct list_head *pos, *n;
1241 struct mvs_slot_info *slot;
1242 struct mvs_phy *phy = &mvi->phy[phy_no];
1243 struct mvs_port *port = phy->port;
1244 u32 rx_desc;
b5762948 1245
dd4969a8
JG
1246 if (!port)
1247 return;
b5762948 1248
dd4969a8
JG
1249 list_for_each_safe(pos, n, &port->list) {
1250 slot = container_of(pos, struct mvs_slot_info, list);
1251 rx_desc = (u32) (slot - mvi->slot_info);
1252 mvs_slot_complete(mvi, rx_desc, 1);
b5762948 1253 }
dd4969a8 1254}
b5762948 1255
dd4969a8
JG
1256static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1257{
1258 struct pci_dev *pdev = mvi->pdev;
1259 struct sas_ha_struct *sas_ha = &mvi->sas;
1260 struct mvs_phy *phy = &mvi->phy[phy_no];
1261 struct asd_sas_phy *sas_phy = &phy->sas_phy;
b5762948 1262
dd4969a8 1263 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
b5762948 1264 /*
dd4969a8
JG
1265 * events is port event now ,
1266 * we need check the interrupt status which belongs to per port.
1267 */
1268 dev_printk(KERN_DEBUG, &pdev->dev,
1269 "Port %d Event = %X\n",
1270 phy_no, phy->irq_status);
b5762948 1271
dd4969a8
JG
1272 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1273 mvs_release_task(mvi, phy_no);
1274 if (!mvs_is_phy_ready(mvi, phy_no)) {
1275 sas_phy_disconnected(sas_phy);
1276 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1277 dev_printk(KERN_INFO, &pdev->dev,
1278 "Port %d Unplug Notice\n", phy_no);
b5762948 1279
dd4969a8
JG
1280 } else
1281 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
b5762948 1282 }
dd4969a8
JG
1283 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1284 if (phy->irq_status & PHYEV_COMWAKE) {
1285 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1286 mvs_write_port_irq_mask(mvi, phy_no,
1287 tmp | PHYEV_SIG_FIS);
1288 }
1289 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1290 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1291 if (phy->phy_status) {
1292 mvs_detect_porttype(mvi, phy_no);
b5762948 1293
dd4969a8
JG
1294 if (phy->phy_type & PORT_TYPE_SATA) {
1295 u32 tmp = mvs_read_port_irq_mask(mvi,
1296 phy_no);
1297 tmp &= ~PHYEV_SIG_FIS;
1298 mvs_write_port_irq_mask(mvi,
1299 phy_no, tmp);
1300 }
b5762948 1301
dd4969a8
JG
1302 mvs_update_phyinfo(mvi, phy_no, 0);
1303 sas_ha->notify_phy_event(sas_phy,
1304 PHYE_OOB_DONE);
1305 mvs_bytes_dmaed(mvi, phy_no);
1306 } else {
1307 dev_printk(KERN_DEBUG, &pdev->dev,
1308 "plugin interrupt but phy is gone\n");
1309 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1310 NULL);
1311 }
1312 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1313 mvs_release_task(mvi, phy_no);
1314 sas_ha->notify_port_event(sas_phy,
1315 PORTE_BROADCAST_RCVD);
1316 }
1317 }
1318 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
b5762948 1319}
b5762948 1320
dd4969a8 1321static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
b5762948
JG
1322{
1323 void __iomem *regs = mvi->regs;
dd4969a8
JG
1324 u32 rx_prod_idx, rx_desc;
1325 bool attn = false;
1326 struct pci_dev *pdev = mvi->pdev;
8f261aaf 1327
dd4969a8
JG
1328 /* the first dword in the RX ring is special: it contains
1329 * a mirror of the hardware's RX producer index, so that
1330 * we don't have to stall the CPU reading that register.
1331 * The actual RX ring is offset by one dword, due to this.
1332 */
1333 rx_prod_idx = mvi->rx_cons;
1334 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1335 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1336 return 0;
8f261aaf 1337
dd4969a8
JG
1338 /* The CMPL_Q may come late, read from register and try again
1339 * note: if coalescing is enabled,
1340 * it will need to read from register every time for sure
1341 */
1342 if (mvi->rx_cons == rx_prod_idx)
1343 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
8f261aaf 1344
dd4969a8
JG
1345 if (mvi->rx_cons == rx_prod_idx)
1346 return 0;
8f261aaf 1347
dd4969a8 1348 while (mvi->rx_cons != rx_prod_idx) {
8f261aaf 1349
dd4969a8
JG
1350 /* increment our internal RX consumer pointer */
1351 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
8f261aaf 1352
dd4969a8 1353 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
8f261aaf 1354
dd4969a8
JG
1355 if (likely(rx_desc & RXQ_DONE))
1356 mvs_slot_complete(mvi, rx_desc, 0);
1357 if (rx_desc & RXQ_ATTN) {
1358 attn = true;
1359 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1360 rx_desc);
1361 } else if (rx_desc & RXQ_ERR) {
1362 if (!(rx_desc & RXQ_DONE))
1363 mvs_slot_complete(mvi, rx_desc, 0);
1364 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1365 rx_desc);
1366 } else if (rx_desc & RXQ_SLOT_RESET) {
1367 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1368 rx_desc);
1369 mvs_slot_free(mvi, rx_desc);
1370 }
1371 }
8f261aaf 1372
dd4969a8
JG
1373 if (attn && self_clear)
1374 mvs_int_full(mvi);
8f261aaf 1375
dd4969a8 1376 return 0;
8f261aaf
KW
1377}
1378
dd4969a8
JG
1379#ifndef MVS_DISABLE_NVRAM
1380static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
8f261aaf 1381{
dd4969a8 1382 int timeout = 1000;
b5762948 1383
dd4969a8
JG
1384 if (addr & ~SPI_ADDR_MASK)
1385 return -EINVAL;
b5762948 1386
dd4969a8
JG
1387 writel(addr, regs + SPI_CMD);
1388 writel(TWSI_RD, regs + SPI_CTL);
b5762948 1389
dd4969a8
JG
1390 while (timeout-- > 0) {
1391 if (readl(regs + SPI_CTL) & TWSI_RDY) {
1392 *data = readl(regs + SPI_DATA);
1393 return 0;
1394 }
b5762948 1395
dd4969a8
JG
1396 udelay(10);
1397 }
b5762948 1398
dd4969a8
JG
1399 return -EBUSY;
1400}
b5762948 1401
dd4969a8
JG
1402static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
1403 void *buf, u32 buflen)
1404{
1405 u32 addr_end, tmp_addr, i, j;
1406 u32 tmp = 0;
1407 int rc;
1408 u8 *tmp8, *buf8 = buf;
b5762948 1409
dd4969a8
JG
1410 addr_end = addr + buflen;
1411 tmp_addr = ALIGN(addr, 4);
1412 if (addr > 0xff)
1413 return -EINVAL;
b5762948 1414
dd4969a8
JG
1415 j = addr & 0x3;
1416 if (j) {
1417 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1418 if (rc)
1419 return rc;
b5762948 1420
dd4969a8
JG
1421 tmp8 = (u8 *)&tmp;
1422 for (i = j; i < 4; i++)
1423 *buf8++ = tmp8[i];
8f261aaf 1424
dd4969a8
JG
1425 tmp_addr += 4;
1426 }
8f261aaf 1427
dd4969a8
JG
1428 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1429 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1430 if (rc)
1431 return rc;
8f261aaf 1432
dd4969a8
JG
1433 memcpy(buf8, &tmp, 4);
1434 buf8 += 4;
1435 }
8f261aaf 1436
dd4969a8
JG
1437 if (tmp_addr < addr_end) {
1438 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1439 if (rc)
1440 return rc;
8f261aaf 1441
dd4969a8
JG
1442 tmp8 = (u8 *)&tmp;
1443 j = addr_end - tmp_addr;
1444 for (i = 0; i < j; i++)
1445 *buf8++ = tmp8[i];
8f261aaf 1446
dd4969a8
JG
1447 tmp_addr += 4;
1448 }
8f261aaf 1449
dd4969a8 1450 return 0;
8f261aaf 1451}
dd4969a8 1452#endif
8f261aaf 1453
dd4969a8 1454int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen)
8f261aaf 1455{
dd4969a8 1456#ifndef MVS_DISABLE_NVRAM
8f261aaf 1457 void __iomem *regs = mvi->regs;
dd4969a8
JG
1458 int rc, i;
1459 u32 sum;
1460 u8 hdr[2], *tmp;
1461 const char *msg;
8f261aaf 1462
dd4969a8
JG
1463 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1464 if (rc) {
1465 msg = "nvram hdr read failed";
1466 goto err_out;
1467 }
1468 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1469 if (rc) {
1470 msg = "nvram read failed";
1471 goto err_out;
1472 }
8f261aaf 1473
dd4969a8
JG
1474 if (hdr[0] != 0x5A) {
1475 /* entry id */
1476 msg = "invalid nvram entry id";
1477 rc = -ENOENT;
1478 goto err_out;
1479 }
8f261aaf 1480
dd4969a8
JG
1481 tmp = buf;
1482 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1483 for (i = 0; i < buflen; i++)
1484 sum += ((u32)tmp[i]);
8f261aaf 1485
dd4969a8
JG
1486 if (sum) {
1487 msg = "nvram checksum failure";
1488 rc = -EILSEQ;
1489 goto err_out;
1490 }
8f261aaf 1491
dd4969a8 1492 return 0;
8f261aaf 1493
dd4969a8
JG
1494err_out:
1495 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1496 return rc;
1497#else
1498 /* FIXME , For SAS target mode */
1499 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1500 return 0;
1501#endif
8f261aaf
KW
1502}
1503
dd4969a8 1504static void mvs_int_sata(struct mvs_info *mvi)
8f261aaf 1505{
dd4969a8
JG
1506 u32 tmp;
1507 void __iomem *regs = mvi->regs;
1508 tmp = mr32(INT_STAT_SRS);
1509 mw32(INT_STAT_SRS, tmp & 0xFFFF);
8f261aaf
KW
1510}
1511
dd4969a8
JG
1512static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1513 u32 slot_idx)
8f261aaf 1514{
dd4969a8
JG
1515 void __iomem *regs = mvi->regs;
1516 struct domain_device *dev = task->dev;
1517 struct asd_sas_port *sas_port = dev->port;
1518 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1519 u32 reg_set, phy_mask;
8f261aaf 1520
dd4969a8
JG
1521 if (!sas_protocol_ata(task->task_proto)) {
1522 reg_set = 0;
1523 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1524 sas_port->phy_mask;
1525 } else {
1526 reg_set = port->taskfileset;
1527 phy_mask = sas_port->phy_mask;
8f261aaf 1528 }
dd4969a8
JG
1529 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1530 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1531 (phy_mask << TXQ_PHY_SHIFT) |
1532 (reg_set << TXQ_SRS_SHIFT));
1533
1534 mw32(TX_PROD_IDX, mvi->tx_prod);
1535 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
8f261aaf
KW
1536}
1537
dd4969a8 1538void mvs_int_full(struct mvs_info *mvi)
8f261aaf 1539{
dd4969a8
JG
1540 void __iomem *regs = mvi->regs;
1541 u32 tmp, stat;
1542 int i;
8f261aaf 1543
dd4969a8 1544 stat = mr32(INT_STAT);
8f261aaf 1545
dd4969a8 1546 mvs_int_rx(mvi, false);
8f261aaf 1547
dd4969a8
JG
1548 for (i = 0; i < MVS_MAX_PORTS; i++) {
1549 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1550 if (tmp)
1551 mvs_int_port(mvi, i, tmp);
8f261aaf
KW
1552 }
1553
dd4969a8
JG
1554 if (stat & CINT_SRS)
1555 mvs_int_sata(mvi);
8f261aaf 1556
dd4969a8
JG
1557 mw32(INT_STAT, stat);
1558}
8f261aaf 1559
dd4969a8
JG
1560#ifndef MVS_DISABLE_MSI
1561static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1562{
1563 struct mvs_info *mvi = opaque;
e9ff91b6 1564
dd4969a8
JG
1565#ifndef MVS_USE_TASKLET
1566 spin_lock(&mvi->lock);
e9ff91b6 1567
dd4969a8 1568 mvs_int_rx(mvi, true);
8f261aaf 1569
dd4969a8
JG
1570 spin_unlock(&mvi->lock);
1571#else
1572 tasklet_schedule(&mvi->tasklet);
1573#endif
1574 return IRQ_HANDLED;
8f261aaf 1575}
dd4969a8 1576#endif
8f261aaf 1577
dd4969a8 1578int mvs_task_abort(struct sas_task *task)
8f261aaf 1579{
dd4969a8 1580 int rc;
8f261aaf 1581 unsigned long flags;
dd4969a8
JG
1582 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1583 struct pci_dev *pdev = mvi->pdev;
1584 int tag;
8f261aaf 1585
dd4969a8
JG
1586 spin_lock_irqsave(&task->task_state_lock, flags);
1587 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1588 rc = TMF_RESP_FUNC_COMPLETE;
1589 spin_unlock_irqrestore(&task->task_state_lock, flags);
1590 goto out_done;
8f261aaf 1591 }
dd4969a8 1592 spin_unlock_irqrestore(&task->task_state_lock, flags);
b5762948 1593
dd4969a8
JG
1594 switch (task->task_proto) {
1595 case SAS_PROTOCOL_SMP:
1596 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
1597 break;
1598 case SAS_PROTOCOL_SSP:
1599 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
1600 break;
1601 case SAS_PROTOCOL_SATA:
1602 case SAS_PROTOCOL_STP:
1603 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1604 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
1605#if _MV_DUMP
1606 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
1607 mvs_hexdump(sizeof(struct host_to_dev_fis),
1608 (void *)&task->ata_task.fis, 0);
1609 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1610 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1611#endif
1612 spin_lock_irqsave(&task->task_state_lock, flags);
1613 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
1614 /* TODO */
1615 ;
1616 }
1617 spin_unlock_irqrestore(&task->task_state_lock, flags);
1618 break;
1619 }
1620 default:
1621 break;
1622 }
1623
1624 if (mvs_find_tag(mvi, task, &tag)) {
1625 spin_lock_irqsave(&mvi->lock, flags);
1626 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
1627 spin_unlock_irqrestore(&mvi->lock, flags);
1628 }
1629 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
1630 rc = TMF_RESP_FUNC_COMPLETE;
1631 else
1632 rc = TMF_RESP_FUNC_FAILED;
1633out_done:
1634 return rc;
e9ff91b6
KW
1635}
1636
dd4969a8 1637int __devinit mvs_hw_init(struct mvs_info *mvi)
b5762948
JG
1638{
1639 void __iomem *regs = mvi->regs;
1640 int i;
1641 u32 tmp, cctl;
1642
1643 /* make sure interrupts are masked immediately (paranoia) */
1644 mw32(GBL_CTL, 0);
1645 tmp = mr32(GBL_CTL);
1646
8f261aaf 1647 /* Reset Controller */
b5762948
JG
1648 if (!(tmp & HBA_RST)) {
1649 if (mvi->flags & MVF_PHY_PWR_FIX) {
1650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1651 tmp &= ~PCTL_PWR_ON;
1652 tmp |= PCTL_OFF;
1653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1654
1655 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1656 tmp &= ~PCTL_PWR_ON;
1657 tmp |= PCTL_OFF;
1658 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1659 }
1660
1661 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
1662 mw32_f(GBL_CTL, HBA_RST);
1663 }
1664
b5762948
JG
1665 /* wait for reset to finish; timeout is just a guess */
1666 i = 1000;
1667 while (i-- > 0) {
1668 msleep(10);
1669
1670 if (!(mr32(GBL_CTL) & HBA_RST))
1671 break;
1672 }
1673 if (mr32(GBL_CTL) & HBA_RST) {
1674 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
1675 return -EBUSY;
1676 }
1677
8f261aaf 1678 /* Init Chip */
b5762948
JG
1679 /* make sure RST is set; HBA_RST /should/ have done that for us */
1680 cctl = mr32(CTL);
1681 if (cctl & CCTL_RST)
1682 cctl &= ~CCTL_RST;
1683 else
1684 mw32_f(CTL, cctl | CCTL_RST);
1685
8f261aaf
KW
1686 /* write to device control _AND_ device status register? - A.C. */
1687 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
1688 tmp &= ~PRD_REQ_MASK;
1689 tmp |= PRD_REQ_SIZE;
1690 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
1691
b5762948
JG
1692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1693 tmp |= PCTL_PWR_ON;
1694 tmp &= ~PCTL_OFF;
1695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1696
1697 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1698 tmp |= PCTL_PWR_ON;
1699 tmp &= ~PCTL_OFF;
1700 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1701
1702 mw32_f(CTL, cctl);
1703
8f261aaf
KW
1704 /* reset control */
1705 mw32(PCS, 0); /*MVS_PCS */
1706
b5762948
JG
1707 mvs_phy_hacks(mvi);
1708
1709 mw32(CMD_LIST_LO, mvi->slot_dma);
1710 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
1711
1712 mw32(RX_FIS_LO, mvi->rx_fis_dma);
1713 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
1714
8f261aaf 1715 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
b5762948
JG
1716 mw32(TX_LO, mvi->tx_dma);
1717 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
1718
1719 mw32(RX_CFG, MVS_RX_RING_SZ);
1720 mw32(RX_LO, mvi->rx_dma);
1721 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
1722
8f261aaf
KW
1723 /* enable auto port detection */
1724 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
ddccf307 1725 msleep(1100);
b5762948
JG
1726 /* init and reset phys */
1727 for (i = 0; i < mvi->chip->n_phy; i++) {
00da714b
KW
1728 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
1729 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
8f261aaf
KW
1730
1731 mvs_detect_porttype(mvi, i);
b5762948
JG
1732
1733 /* set phy local SAS address */
8f261aaf
KW
1734 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
1735 mvs_write_port_cfg_data(mvi, i, lo);
1736 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
1737 mvs_write_port_cfg_data(mvi, i, hi);
b5762948
JG
1738
1739 /* reset phy */
8f261aaf 1740 tmp = mvs_read_phy_ctl(mvi, i);
b5762948 1741 tmp |= PHY_RST;
8f261aaf 1742 mvs_write_phy_ctl(mvi, i, tmp);
b5762948
JG
1743 }
1744
1745 msleep(100);
1746
1747 for (i = 0; i < mvi->chip->n_phy; i++) {
8f261aaf
KW
1748 /* clear phy int status */
1749 tmp = mvs_read_port_irq_stat(mvi, i);
1750 tmp &= ~PHYEV_SIG_FIS;
1751 mvs_write_port_irq_stat(mvi, i, tmp);
1752
b5762948 1753 /* set phy int mask */
8f261aaf
KW
1754 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
1755 PHYEV_ID_DONE | PHYEV_DEC_ERR;
1756 mvs_write_port_irq_mask(mvi, i, tmp);
b5762948 1757
8f261aaf
KW
1758 msleep(100);
1759 mvs_update_phyinfo(mvi, i, 1);
1760 mvs_enable_xmt(mvi, i);
b5762948
JG
1761 }
1762
1763 /* FIXME: update wide port bitmaps */
1764
8f261aaf
KW
1765 /* little endian for open address and command table, etc. */
1766 /* A.C.
1767 * it seems that ( from the spec ) turning on big-endian won't
1768 * do us any good on big-endian machines, need further confirmation
1769 */
1770 cctl = mr32(CTL);
1771 cctl |= CCTL_ENDIAN_CMD;
1772 cctl |= CCTL_ENDIAN_DATA;
1773 cctl &= ~CCTL_ENDIAN_OPEN;
1774 cctl |= CCTL_ENDIAN_RSP;
1775 mw32_f(CTL, cctl);
1776
1777 /* reset CMD queue */
1778 tmp = mr32(PCS);
1779 tmp |= PCS_CMD_RST;
1780 mw32(PCS, tmp);
1781 /* interrupt coalescing may cause missing HW interrput in some case,
1782 * and the max count is 0x1ff, while our max slot is 0x200,
1783 * it will make count 0.
1784 */
1785 tmp = 0;
1786 mw32(INT_COAL, tmp);
1787
1788 tmp = 0x100;
1789 mw32(INT_COAL_TMOUT, tmp);
1790
b5762948 1791 /* ladies and gentlemen, start your engines */
8f261aaf
KW
1792 mw32(TX_CFG, 0);
1793 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
b5762948 1794 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
8f261aaf
KW
1795 /* enable CMD/CMPL_Q/RESP mode */
1796 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
b5762948 1797
8f261aaf 1798 /* enable completion queue interrupt */
e9ff91b6 1799 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
8f261aaf 1800 mw32(INT_MASK, tmp);
b5762948 1801
e9ff91b6
KW
1802 /* Enable SRS interrupt */
1803 mw32(INT_MASK_SRS, 0xFF);
b5762948
JG
1804 return 0;
1805}
1806
dd4969a8 1807void __devinit mvs_print_info(struct mvs_info *mvi)
b5762948
JG
1808{
1809 struct pci_dev *pdev = mvi->pdev;
1810 static int printed_version;
1811
1812 if (!printed_version++)
1813 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
1814
1815 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
1816 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
1817}
1818