sata_sil: disable trim
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / pata_bf54x.c
1 /*
2 * File: drivers/ata/pata_bf54x.c
3 * Author: Sonic Zhang <sonic.zhang@analog.com>
4 *
5 * Created:
6 * Description: PATA Driver for blackfin 54x
7 *
8 * Modified:
9 * Copyright 2007 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/device.h>
36 #include <scsi/scsi_host.h>
37 #include <linux/libata.h>
38 #include <linux/platform_device.h>
39 #include <asm/dma.h>
40 #include <asm/gpio.h>
41 #include <asm/portmux.h>
42
43 #define DRV_NAME "pata-bf54x"
44 #define DRV_VERSION "0.9"
45
46 #define ATA_REG_CTRL 0x0E
47 #define ATA_REG_ALTSTATUS ATA_REG_CTRL
48
49 /* These are the offset of the controller's registers */
50 #define ATAPI_OFFSET_CONTROL 0x00
51 #define ATAPI_OFFSET_STATUS 0x04
52 #define ATAPI_OFFSET_DEV_ADDR 0x08
53 #define ATAPI_OFFSET_DEV_TXBUF 0x0c
54 #define ATAPI_OFFSET_DEV_RXBUF 0x10
55 #define ATAPI_OFFSET_INT_MASK 0x14
56 #define ATAPI_OFFSET_INT_STATUS 0x18
57 #define ATAPI_OFFSET_XFER_LEN 0x1c
58 #define ATAPI_OFFSET_LINE_STATUS 0x20
59 #define ATAPI_OFFSET_SM_STATE 0x24
60 #define ATAPI_OFFSET_TERMINATE 0x28
61 #define ATAPI_OFFSET_PIO_TFRCNT 0x2c
62 #define ATAPI_OFFSET_DMA_TFRCNT 0x30
63 #define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
64 #define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
65 #define ATAPI_OFFSET_REG_TIM_0 0x40
66 #define ATAPI_OFFSET_PIO_TIM_0 0x44
67 #define ATAPI_OFFSET_PIO_TIM_1 0x48
68 #define ATAPI_OFFSET_MULTI_TIM_0 0x50
69 #define ATAPI_OFFSET_MULTI_TIM_1 0x54
70 #define ATAPI_OFFSET_MULTI_TIM_2 0x58
71 #define ATAPI_OFFSET_ULTRA_TIM_0 0x60
72 #define ATAPI_OFFSET_ULTRA_TIM_1 0x64
73 #define ATAPI_OFFSET_ULTRA_TIM_2 0x68
74 #define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
75
76
77 #define ATAPI_GET_CONTROL(base)\
78 bfin_read16(base + ATAPI_OFFSET_CONTROL)
79 #define ATAPI_SET_CONTROL(base, val)\
80 bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
81 #define ATAPI_GET_STATUS(base)\
82 bfin_read16(base + ATAPI_OFFSET_STATUS)
83 #define ATAPI_GET_DEV_ADDR(base)\
84 bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
85 #define ATAPI_SET_DEV_ADDR(base, val)\
86 bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
87 #define ATAPI_GET_DEV_TXBUF(base)\
88 bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
89 #define ATAPI_SET_DEV_TXBUF(base, val)\
90 bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
91 #define ATAPI_GET_DEV_RXBUF(base)\
92 bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
93 #define ATAPI_SET_DEV_RXBUF(base, val)\
94 bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
95 #define ATAPI_GET_INT_MASK(base)\
96 bfin_read16(base + ATAPI_OFFSET_INT_MASK)
97 #define ATAPI_SET_INT_MASK(base, val)\
98 bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
99 #define ATAPI_GET_INT_STATUS(base)\
100 bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
101 #define ATAPI_SET_INT_STATUS(base, val)\
102 bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
103 #define ATAPI_GET_XFER_LEN(base)\
104 bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
105 #define ATAPI_SET_XFER_LEN(base, val)\
106 bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
107 #define ATAPI_GET_LINE_STATUS(base)\
108 bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
109 #define ATAPI_GET_SM_STATE(base)\
110 bfin_read16(base + ATAPI_OFFSET_SM_STATE)
111 #define ATAPI_GET_TERMINATE(base)\
112 bfin_read16(base + ATAPI_OFFSET_TERMINATE)
113 #define ATAPI_SET_TERMINATE(base, val)\
114 bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
115 #define ATAPI_GET_PIO_TFRCNT(base)\
116 bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
117 #define ATAPI_GET_DMA_TFRCNT(base)\
118 bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
119 #define ATAPI_GET_UMAIN_TFRCNT(base)\
120 bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
121 #define ATAPI_GET_UDMAOUT_TFRCNT(base)\
122 bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
123 #define ATAPI_GET_REG_TIM_0(base)\
124 bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
125 #define ATAPI_SET_REG_TIM_0(base, val)\
126 bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
127 #define ATAPI_GET_PIO_TIM_0(base)\
128 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
129 #define ATAPI_SET_PIO_TIM_0(base, val)\
130 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
131 #define ATAPI_GET_PIO_TIM_1(base)\
132 bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
133 #define ATAPI_SET_PIO_TIM_1(base, val)\
134 bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
135 #define ATAPI_GET_MULTI_TIM_0(base)\
136 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
137 #define ATAPI_SET_MULTI_TIM_0(base, val)\
138 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
139 #define ATAPI_GET_MULTI_TIM_1(base)\
140 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
141 #define ATAPI_SET_MULTI_TIM_1(base, val)\
142 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
143 #define ATAPI_GET_MULTI_TIM_2(base)\
144 bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
145 #define ATAPI_SET_MULTI_TIM_2(base, val)\
146 bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
147 #define ATAPI_GET_ULTRA_TIM_0(base)\
148 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
149 #define ATAPI_SET_ULTRA_TIM_0(base, val)\
150 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
151 #define ATAPI_GET_ULTRA_TIM_1(base)\
152 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
153 #define ATAPI_SET_ULTRA_TIM_1(base, val)\
154 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
155 #define ATAPI_GET_ULTRA_TIM_2(base)\
156 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
157 #define ATAPI_SET_ULTRA_TIM_2(base, val)\
158 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
159 #define ATAPI_GET_ULTRA_TIM_3(base)\
160 bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
161 #define ATAPI_SET_ULTRA_TIM_3(base, val)\
162 bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
163
164 /**
165 * PIO Mode - Frequency compatibility
166 */
167 /* mode: 0 1 2 3 4 */
168 static const u32 pio_fsclk[] =
169 { 33333333, 33333333, 33333333, 33333333, 33333333 };
170
171 /**
172 * MDMA Mode - Frequency compatibility
173 */
174 /* mode: 0 1 2 */
175 static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
176
177 /**
178 * UDMA Mode - Frequency compatibility
179 *
180 * UDMA5 - 100 MB/s - SCLK = 133 MHz
181 * UDMA4 - 66 MB/s - SCLK >= 80 MHz
182 * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
183 * UDMA2 - 33 MB/s - SCLK >= 40 MHz
184 */
185 /* mode: 0 1 2 3 4 5 */
186 static const u32 udma_fsclk[] =
187 { 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
188
189 /**
190 * Register transfer timing table
191 */
192 /* mode: 0 1 2 3 4 */
193 /* Cycle Time */
194 static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
195 /* DIOR/DIOW to end cycle */
196 static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
197 /* DIOR/DIOW asserted pulse width */
198 static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
199
200 /**
201 * PIO timing table
202 */
203 /* mode: 0 1 2 3 4 */
204 /* Cycle Time */
205 static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
206 /* Address valid to DIOR/DIORW */
207 static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
208 /* DIOR/DIOW to end cycle */
209 static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
210 /* DIOR/DIOW asserted pulse width */
211 static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
212 /* DIOW data hold */
213 static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
214
215 /* ******************************************************************
216 * Multiword DMA timing table
217 * ******************************************************************
218 */
219 /* mode: 0 1 2 */
220 /* Cycle Time */
221 static const u32 mdma_t0min[] = { 480, 150, 120 };
222 /* DIOR/DIOW asserted pulse width */
223 static const u32 mdma_tdmin[] = { 215, 80, 70 };
224 /* DMACK to read data released */
225 static const u32 mdma_thmin[] = { 20, 15, 10 };
226 /* DIOR/DIOW to DMACK hold */
227 static const u32 mdma_tjmin[] = { 20, 5, 5 };
228 /* DIOR negated pulse width */
229 static const u32 mdma_tkrmin[] = { 50, 50, 25 };
230 /* DIOR negated pulse width */
231 static const u32 mdma_tkwmin[] = { 215, 50, 25 };
232 /* CS[1:0] valid to DIOR/DIOW */
233 static const u32 mdma_tmmin[] = { 50, 30, 25 };
234 /* DMACK to read data released */
235 static const u32 mdma_tzmax[] = { 20, 25, 25 };
236
237 /**
238 * Ultra DMA timing table
239 */
240 /* mode: 0 1 2 3 4 5 */
241 static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
242 static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
243 static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
244 static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
245 static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
246
247
248 static const u32 udma_tmlimin = 20;
249 static const u32 udma_tzahmin = 20;
250 static const u32 udma_tenvmin = 20;
251 static const u32 udma_tackmin = 20;
252 static const u32 udma_tssmin = 50;
253
254 #define BFIN_MAX_SG_SEGMENTS 4
255
256 /**
257 *
258 * Function: num_clocks_min
259 *
260 * Description:
261 * calculate number of SCLK cycles to meet minimum timing
262 */
263 static unsigned short num_clocks_min(unsigned long tmin,
264 unsigned long fsclk)
265 {
266 unsigned long tmp ;
267 unsigned short result;
268
269 tmp = tmin * (fsclk/1000/1000) / 1000;
270 result = (unsigned short)tmp;
271 if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
272 result++;
273 }
274
275 return result;
276 }
277
278 /**
279 * bfin_set_piomode - Initialize host controller PATA PIO timings
280 * @ap: Port whose timings we are configuring
281 * @adev: um
282 *
283 * Set PIO mode for device.
284 *
285 * LOCKING:
286 * None (inherited from caller).
287 */
288
289 static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
290 {
291 int mode = adev->pio_mode - XFER_PIO_0;
292 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
293 unsigned int fsclk = get_sclk();
294 unsigned short teoc_reg, t2_reg, teoc_pio;
295 unsigned short t4_reg, t2_pio, t1_reg;
296 unsigned short n0, n6, t6min = 5;
297
298 /* the most restrictive timing value is t6 and tc, the DIOW - data hold
299 * If one SCLK pulse is longer than this minimum value then register
300 * transfers cannot be supported at this frequency.
301 */
302 n6 = num_clocks_min(t6min, fsclk);
303 if (mode >= 0 && mode <= 4 && n6 >= 1) {
304 dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
305 /* calculate the timing values for register transfers. */
306 while (mode > 0 && pio_fsclk[mode] > fsclk)
307 mode--;
308
309 /* DIOR/DIOW to end cycle time */
310 t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
311 /* DIOR/DIOW asserted pulse width */
312 teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
313 /* Cycle Time */
314 n0 = num_clocks_min(reg_t0min[mode], fsclk);
315
316 /* increase t2 until we meed the minimum cycle length */
317 if (t2_reg + teoc_reg < n0)
318 t2_reg = n0 - teoc_reg;
319
320 /* calculate the timing values for pio transfers. */
321
322 /* DIOR/DIOW to end cycle time */
323 t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
324 /* DIOR/DIOW asserted pulse width */
325 teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
326 /* Cycle Time */
327 n0 = num_clocks_min(pio_t0min[mode], fsclk);
328
329 /* increase t2 until we meed the minimum cycle length */
330 if (t2_pio + teoc_pio < n0)
331 t2_pio = n0 - teoc_pio;
332
333 /* Address valid to DIOR/DIORW */
334 t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
335
336 /* DIOW data hold */
337 t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
338
339 ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
340 ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
341 ATAPI_SET_PIO_TIM_1(base, teoc_pio);
342 if (mode > 2) {
343 ATAPI_SET_CONTROL(base,
344 ATAPI_GET_CONTROL(base) | IORDY_EN);
345 } else {
346 ATAPI_SET_CONTROL(base,
347 ATAPI_GET_CONTROL(base) & ~IORDY_EN);
348 }
349
350 /* Disable host ATAPI PIO interrupts */
351 ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
352 & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
353 SSYNC();
354 }
355 }
356
357 /**
358 * bfin_set_dmamode - Initialize host controller PATA DMA timings
359 * @ap: Port whose timings we are configuring
360 * @adev: um
361 *
362 * Set UDMA mode for device.
363 *
364 * LOCKING:
365 * None (inherited from caller).
366 */
367
368 static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
369 {
370 int mode;
371 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
372 unsigned long fsclk = get_sclk();
373 unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
374 unsigned short tm, td, tkr, tkw, teoc, th;
375 unsigned short n0, nf, tfmin = 5;
376 unsigned short nmin, tcyc;
377
378 mode = adev->dma_mode - XFER_UDMA_0;
379 if (mode >= 0 && mode <= 5) {
380 dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
381 /* the most restrictive timing value is t6 and tc,
382 * the DIOW - data hold. If one SCLK pulse is longer
383 * than this minimum value then register
384 * transfers cannot be supported at this frequency.
385 */
386 while (mode > 0 && udma_fsclk[mode] > fsclk)
387 mode--;
388
389 nmin = num_clocks_min(udma_tmin[mode], fsclk);
390 if (nmin >= 1) {
391 /* calculate the timing values for Ultra DMA. */
392 tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
393 tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
394 tcyc_tdvs = 2;
395
396 /* increase tcyc - tdvs (tcyc_tdvs) until we meed
397 * the minimum cycle length
398 */
399 if (tdvs + tcyc_tdvs < tcyc)
400 tcyc_tdvs = tcyc - tdvs;
401
402 /* Mow assign the values required for the timing
403 * registers
404 */
405 if (tcyc_tdvs < 2)
406 tcyc_tdvs = 2;
407
408 if (tdvs < 2)
409 tdvs = 2;
410
411 tack = num_clocks_min(udma_tackmin, fsclk);
412 tss = num_clocks_min(udma_tssmin, fsclk);
413 tmli = num_clocks_min(udma_tmlimin, fsclk);
414 tzah = num_clocks_min(udma_tzahmin, fsclk);
415 trp = num_clocks_min(udma_trpmin[mode], fsclk);
416 tenv = num_clocks_min(udma_tenvmin, fsclk);
417 if (tenv <= udma_tenvmax[mode]) {
418 ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
419 ATAPI_SET_ULTRA_TIM_1(base,
420 (tcyc_tdvs<<8 | tdvs));
421 ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
422 ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
423 }
424 }
425 }
426
427 mode = adev->dma_mode - XFER_MW_DMA_0;
428 if (mode >= 0 && mode <= 2) {
429 dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
430 /* the most restrictive timing value is tf, the DMACK to
431 * read data released. If one SCLK pulse is longer than
432 * this maximum value then the MDMA mode
433 * cannot be supported at this frequency.
434 */
435 while (mode > 0 && mdma_fsclk[mode] > fsclk)
436 mode--;
437
438 nf = num_clocks_min(tfmin, fsclk);
439 if (nf >= 1) {
440 /* calculate the timing values for Multi-word DMA. */
441
442 /* DIOR/DIOW asserted pulse width */
443 td = num_clocks_min(mdma_tdmin[mode], fsclk);
444
445 /* DIOR negated pulse width */
446 tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
447
448 /* Cycle Time */
449 n0 = num_clocks_min(mdma_t0min[mode], fsclk);
450
451 /* increase tk until we meed the minimum cycle length */
452 if (tkw + td < n0)
453 tkw = n0 - td;
454
455 /* DIOR negated pulse width - read */
456 tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
457 /* CS{1:0] valid to DIOR/DIOW */
458 tm = num_clocks_min(mdma_tmmin[mode], fsclk);
459 /* DIOR/DIOW to DMACK hold */
460 teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
461 /* DIOW Data hold */
462 th = num_clocks_min(mdma_thmin[mode], fsclk);
463
464 ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
465 ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
466 ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
467 SSYNC();
468 }
469 }
470 return;
471 }
472
473 /**
474 *
475 * Function: wait_complete
476 *
477 * Description: Waits the interrupt from device
478 *
479 */
480 static inline void wait_complete(void __iomem *base, unsigned short mask)
481 {
482 unsigned short status;
483 unsigned int i = 0;
484
485 #define PATA_BF54X_WAIT_TIMEOUT 10000
486
487 for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
488 status = ATAPI_GET_INT_STATUS(base) & mask;
489 if (status)
490 break;
491 }
492
493 ATAPI_SET_INT_STATUS(base, mask);
494 }
495
496 /**
497 *
498 * Function: write_atapi_register
499 *
500 * Description: Writes to ATA Device Resgister
501 *
502 */
503
504 static void write_atapi_register(void __iomem *base,
505 unsigned long ata_reg, unsigned short value)
506 {
507 /* Program the ATA_DEV_TXBUF register with write data (to be
508 * written into the device).
509 */
510 ATAPI_SET_DEV_TXBUF(base, value);
511
512 /* Program the ATA_DEV_ADDR register with address of the
513 * device register (0x01 to 0x0F).
514 */
515 ATAPI_SET_DEV_ADDR(base, ata_reg);
516
517 /* Program the ATA_CTRL register with dir set to write (1)
518 */
519 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
520
521 /* ensure PIO DMA is not set */
522 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
523
524 /* and start the transfer */
525 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
526
527 /* Wait for the interrupt to indicate the end of the transfer.
528 * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
529 */
530 wait_complete(base, PIO_DONE_INT);
531 }
532
533 /**
534 *
535 * Function: read_atapi_register
536 *
537 *Description: Reads from ATA Device Resgister
538 *
539 */
540
541 static unsigned short read_atapi_register(void __iomem *base,
542 unsigned long ata_reg)
543 {
544 /* Program the ATA_DEV_ADDR register with address of the
545 * device register (0x01 to 0x0F).
546 */
547 ATAPI_SET_DEV_ADDR(base, ata_reg);
548
549 /* Program the ATA_CTRL register with dir set to read (0) and
550 */
551 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
552
553 /* ensure PIO DMA is not set */
554 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
555
556 /* and start the transfer */
557 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
558
559 /* Wait for the interrupt to indicate the end of the transfer.
560 * (PIO_DONE interrupt is set and it doesn't seem to matter
561 * that we don't clear it)
562 */
563 wait_complete(base, PIO_DONE_INT);
564
565 /* Read the ATA_DEV_RXBUF register with write data (to be
566 * written into the device).
567 */
568 return ATAPI_GET_DEV_RXBUF(base);
569 }
570
571 /**
572 *
573 * Function: write_atapi_register_data
574 *
575 * Description: Writes to ATA Device Resgister
576 *
577 */
578
579 static void write_atapi_data(void __iomem *base,
580 int len, unsigned short *buf)
581 {
582 int i;
583
584 /* Set transfer length to 1 */
585 ATAPI_SET_XFER_LEN(base, 1);
586
587 /* Program the ATA_DEV_ADDR register with address of the
588 * ATA_REG_DATA
589 */
590 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
591
592 /* Program the ATA_CTRL register with dir set to write (1)
593 */
594 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
595
596 /* ensure PIO DMA is not set */
597 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
598
599 for (i = 0; i < len; i++) {
600 /* Program the ATA_DEV_TXBUF register with write data (to be
601 * written into the device).
602 */
603 ATAPI_SET_DEV_TXBUF(base, buf[i]);
604
605 /* and start the transfer */
606 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
607
608 /* Wait for the interrupt to indicate the end of the transfer.
609 * (We need to wait on and clear rhe ATA_DEV_INT
610 * interrupt status)
611 */
612 wait_complete(base, PIO_DONE_INT);
613 }
614 }
615
616 /**
617 *
618 * Function: read_atapi_register_data
619 *
620 * Description: Reads from ATA Device Resgister
621 *
622 */
623
624 static void read_atapi_data(void __iomem *base,
625 int len, unsigned short *buf)
626 {
627 int i;
628
629 /* Set transfer length to 1 */
630 ATAPI_SET_XFER_LEN(base, 1);
631
632 /* Program the ATA_DEV_ADDR register with address of the
633 * ATA_REG_DATA
634 */
635 ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
636
637 /* Program the ATA_CTRL register with dir set to read (0) and
638 */
639 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
640
641 /* ensure PIO DMA is not set */
642 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
643
644 for (i = 0; i < len; i++) {
645 /* and start the transfer */
646 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
647
648 /* Wait for the interrupt to indicate the end of the transfer.
649 * (PIO_DONE interrupt is set and it doesn't seem to matter
650 * that we don't clear it)
651 */
652 wait_complete(base, PIO_DONE_INT);
653
654 /* Read the ATA_DEV_RXBUF register with write data (to be
655 * written into the device).
656 */
657 buf[i] = ATAPI_GET_DEV_RXBUF(base);
658 }
659 }
660
661 /**
662 * bfin_tf_load - send taskfile registers to host controller
663 * @ap: Port to which output is sent
664 * @tf: ATA taskfile register set
665 *
666 * Note: Original code is ata_sff_tf_load().
667 */
668
669 static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
670 {
671 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
672 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
673
674 if (tf->ctl != ap->last_ctl) {
675 write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
676 ap->last_ctl = tf->ctl;
677 ata_wait_idle(ap);
678 }
679
680 if (is_addr) {
681 if (tf->flags & ATA_TFLAG_LBA48) {
682 write_atapi_register(base, ATA_REG_FEATURE,
683 tf->hob_feature);
684 write_atapi_register(base, ATA_REG_NSECT,
685 tf->hob_nsect);
686 write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
687 write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
688 write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
689 dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
690 "0x%X 0x%X\n",
691 tf->hob_feature,
692 tf->hob_nsect,
693 tf->hob_lbal,
694 tf->hob_lbam,
695 tf->hob_lbah);
696 }
697
698 write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
699 write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
700 write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
701 write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
702 write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
703 dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
704 tf->feature,
705 tf->nsect,
706 tf->lbal,
707 tf->lbam,
708 tf->lbah);
709 }
710
711 if (tf->flags & ATA_TFLAG_DEVICE) {
712 write_atapi_register(base, ATA_REG_DEVICE, tf->device);
713 dev_dbg(ap->dev, "device 0x%X\n", tf->device);
714 }
715
716 ata_wait_idle(ap);
717 }
718
719 /**
720 * bfin_check_status - Read device status reg & clear interrupt
721 * @ap: port where the device is
722 *
723 * Note: Original code is ata_check_status().
724 */
725
726 static u8 bfin_check_status(struct ata_port *ap)
727 {
728 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
729 return read_atapi_register(base, ATA_REG_STATUS);
730 }
731
732 /**
733 * bfin_tf_read - input device's ATA taskfile shadow registers
734 * @ap: Port from which input is read
735 * @tf: ATA taskfile register set for storing input
736 *
737 * Note: Original code is ata_sff_tf_read().
738 */
739
740 static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
741 {
742 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
743
744 tf->command = bfin_check_status(ap);
745 tf->feature = read_atapi_register(base, ATA_REG_ERR);
746 tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
747 tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
748 tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
749 tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
750 tf->device = read_atapi_register(base, ATA_REG_DEVICE);
751
752 if (tf->flags & ATA_TFLAG_LBA48) {
753 write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
754 tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
755 tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
756 tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
757 tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
758 tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
759 }
760 }
761
762 /**
763 * bfin_exec_command - issue ATA command to host controller
764 * @ap: port to which command is being issued
765 * @tf: ATA taskfile register set
766 *
767 * Note: Original code is ata_sff_exec_command().
768 */
769
770 static void bfin_exec_command(struct ata_port *ap,
771 const struct ata_taskfile *tf)
772 {
773 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
774 dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
775
776 write_atapi_register(base, ATA_REG_CMD, tf->command);
777 ata_sff_pause(ap);
778 }
779
780 /**
781 * bfin_check_altstatus - Read device alternate status reg
782 * @ap: port where the device is
783 */
784
785 static u8 bfin_check_altstatus(struct ata_port *ap)
786 {
787 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
788 return read_atapi_register(base, ATA_REG_ALTSTATUS);
789 }
790
791 /**
792 * bfin_dev_select - Select device 0/1 on ATA bus
793 * @ap: ATA channel to manipulate
794 * @device: ATA device (numbered from zero) to select
795 *
796 * Note: Original code is ata_sff_dev_select().
797 */
798
799 static void bfin_dev_select(struct ata_port *ap, unsigned int device)
800 {
801 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
802 u8 tmp;
803
804 if (device == 0)
805 tmp = ATA_DEVICE_OBS;
806 else
807 tmp = ATA_DEVICE_OBS | ATA_DEV1;
808
809 write_atapi_register(base, ATA_REG_DEVICE, tmp);
810 ata_sff_pause(ap);
811 }
812
813 /**
814 * bfin_set_devctl - Write device control reg
815 * @ap: port where the device is
816 * @ctl: value to write
817 */
818
819 static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
820 {
821 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
822 write_atapi_register(base, ATA_REG_CTRL, ctl);
823 }
824
825 /**
826 * bfin_bmdma_setup - Set up IDE DMA transaction
827 * @qc: Info associated with this ATA transaction.
828 *
829 * Note: Original code is ata_bmdma_setup().
830 */
831
832 static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
833 {
834 struct ata_port *ap = qc->ap;
835 struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
836 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
837 unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
838 struct scatterlist *sg;
839 unsigned int si;
840 unsigned int channel;
841 unsigned int dir;
842 unsigned int size = 0;
843
844 dev_dbg(qc->ap->dev, "in atapi dma setup\n");
845 /* Program the ATA_CTRL register with dir */
846 if (qc->tf.flags & ATA_TFLAG_WRITE) {
847 channel = CH_ATAPI_TX;
848 dir = DMA_TO_DEVICE;
849 } else {
850 channel = CH_ATAPI_RX;
851 dir = DMA_FROM_DEVICE;
852 config |= WNR;
853 }
854
855 dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
856
857 /* fill the ATAPI DMA controller */
858 for_each_sg(qc->sg, sg, qc->n_elem, si) {
859 dma_desc_cpu[si].start_addr = sg_dma_address(sg);
860 dma_desc_cpu[si].cfg = config;
861 dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
862 dma_desc_cpu[si].x_modify = 2;
863 size += sg_dma_len(sg);
864 }
865
866 /* Set the last descriptor to stop mode */
867 dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
868
869 flush_dcache_range((unsigned int)dma_desc_cpu,
870 (unsigned int)dma_desc_cpu +
871 qc->n_elem * sizeof(struct dma_desc_array));
872
873 /* Enable ATA DMA operation*/
874 set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
875 set_dma_x_count(channel, 0);
876 set_dma_x_modify(channel, 0);
877 set_dma_config(channel, config);
878
879 SSYNC();
880
881 /* Send ATA DMA command */
882 bfin_exec_command(ap, &qc->tf);
883
884 if (qc->tf.flags & ATA_TFLAG_WRITE) {
885 /* set ATA DMA write direction */
886 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
887 | XFER_DIR));
888 } else {
889 /* set ATA DMA read direction */
890 ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
891 & ~XFER_DIR));
892 }
893
894 /* Reset all transfer count */
895 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
896
897 /* Set ATAPI state machine contorl in terminate sequence */
898 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
899
900 /* Set transfer length to the total size of sg buffers */
901 ATAPI_SET_XFER_LEN(base, size >> 1);
902 }
903
904 /**
905 * bfin_bmdma_start - Start an IDE DMA transaction
906 * @qc: Info associated with this ATA transaction.
907 *
908 * Note: Original code is ata_bmdma_start().
909 */
910
911 static void bfin_bmdma_start(struct ata_queued_cmd *qc)
912 {
913 struct ata_port *ap = qc->ap;
914 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
915
916 dev_dbg(qc->ap->dev, "in atapi dma start\n");
917
918 if (!(ap->udma_mask || ap->mwdma_mask))
919 return;
920
921 /* start ATAPI transfer*/
922 if (ap->udma_mask)
923 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
924 | ULTRA_START);
925 else
926 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
927 | MULTI_START);
928 }
929
930 /**
931 * bfin_bmdma_stop - Stop IDE DMA transfer
932 * @qc: Command we are ending DMA for
933 */
934
935 static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
936 {
937 struct ata_port *ap = qc->ap;
938 unsigned int dir;
939
940 dev_dbg(qc->ap->dev, "in atapi dma stop\n");
941
942 if (!(ap->udma_mask || ap->mwdma_mask))
943 return;
944
945 /* stop ATAPI DMA controller*/
946 if (qc->tf.flags & ATA_TFLAG_WRITE) {
947 dir = DMA_TO_DEVICE;
948 disable_dma(CH_ATAPI_TX);
949 } else {
950 dir = DMA_FROM_DEVICE;
951 disable_dma(CH_ATAPI_RX);
952 }
953
954 dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
955 }
956
957 /**
958 * bfin_devchk - PATA device presence detection
959 * @ap: ATA channel to examine
960 * @device: Device to examine (starting at zero)
961 *
962 * Note: Original code is ata_devchk().
963 */
964
965 static unsigned int bfin_devchk(struct ata_port *ap,
966 unsigned int device)
967 {
968 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
969 u8 nsect, lbal;
970
971 bfin_dev_select(ap, device);
972
973 write_atapi_register(base, ATA_REG_NSECT, 0x55);
974 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
975
976 write_atapi_register(base, ATA_REG_NSECT, 0xaa);
977 write_atapi_register(base, ATA_REG_LBAL, 0x55);
978
979 write_atapi_register(base, ATA_REG_NSECT, 0x55);
980 write_atapi_register(base, ATA_REG_LBAL, 0xaa);
981
982 nsect = read_atapi_register(base, ATA_REG_NSECT);
983 lbal = read_atapi_register(base, ATA_REG_LBAL);
984
985 if ((nsect == 0x55) && (lbal == 0xaa))
986 return 1; /* we found a device */
987
988 return 0; /* nothing found */
989 }
990
991 /**
992 * bfin_bus_post_reset - PATA device post reset
993 *
994 * Note: Original code is ata_bus_post_reset().
995 */
996
997 static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
998 {
999 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1000 unsigned int dev0 = devmask & (1 << 0);
1001 unsigned int dev1 = devmask & (1 << 1);
1002 unsigned long deadline;
1003
1004 /* if device 0 was found in ata_devchk, wait for its
1005 * BSY bit to clear
1006 */
1007 if (dev0)
1008 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1009
1010 /* if device 1 was found in ata_devchk, wait for
1011 * register access, then wait for BSY to clear
1012 */
1013 deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
1014 while (dev1) {
1015 u8 nsect, lbal;
1016
1017 bfin_dev_select(ap, 1);
1018 nsect = read_atapi_register(base, ATA_REG_NSECT);
1019 lbal = read_atapi_register(base, ATA_REG_LBAL);
1020 if ((nsect == 1) && (lbal == 1))
1021 break;
1022 if (time_after(jiffies, deadline)) {
1023 dev1 = 0;
1024 break;
1025 }
1026 ata_msleep(ap, 50); /* give drive a breather */
1027 }
1028 if (dev1)
1029 ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1030
1031 /* is all this really necessary? */
1032 bfin_dev_select(ap, 0);
1033 if (dev1)
1034 bfin_dev_select(ap, 1);
1035 if (dev0)
1036 bfin_dev_select(ap, 0);
1037 }
1038
1039 /**
1040 * bfin_bus_softreset - PATA device software reset
1041 *
1042 * Note: Original code is ata_bus_softreset().
1043 */
1044
1045 static unsigned int bfin_bus_softreset(struct ata_port *ap,
1046 unsigned int devmask)
1047 {
1048 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1049
1050 /* software reset. causes dev0 to be selected */
1051 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1052 udelay(20);
1053 write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
1054 udelay(20);
1055 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1056
1057 /* spec mandates ">= 2ms" before checking status.
1058 * We wait 150ms, because that was the magic delay used for
1059 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1060 * between when the ATA command register is written, and then
1061 * status is checked. Because waiting for "a while" before
1062 * checking status is fine, post SRST, we perform this magic
1063 * delay here as well.
1064 *
1065 * Old drivers/ide uses the 2mS rule and then waits for ready
1066 */
1067 ata_msleep(ap, 150);
1068
1069 /* Before we perform post reset processing we want to see if
1070 * the bus shows 0xFF because the odd clown forgets the D7
1071 * pulldown resistor.
1072 */
1073 if (bfin_check_status(ap) == 0xFF)
1074 return 0;
1075
1076 bfin_bus_post_reset(ap, devmask);
1077
1078 return 0;
1079 }
1080
1081 /**
1082 * bfin_softreset - reset host port via ATA SRST
1083 * @ap: port to reset
1084 * @classes: resulting classes of attached devices
1085 *
1086 * Note: Original code is ata_sff_softreset().
1087 */
1088
1089 static int bfin_softreset(struct ata_link *link, unsigned int *classes,
1090 unsigned long deadline)
1091 {
1092 struct ata_port *ap = link->ap;
1093 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1094 unsigned int devmask = 0, err_mask;
1095 u8 err;
1096
1097 /* determine if device 0/1 are present */
1098 if (bfin_devchk(ap, 0))
1099 devmask |= (1 << 0);
1100 if (slave_possible && bfin_devchk(ap, 1))
1101 devmask |= (1 << 1);
1102
1103 /* select device 0 again */
1104 bfin_dev_select(ap, 0);
1105
1106 /* issue bus reset */
1107 err_mask = bfin_bus_softreset(ap, devmask);
1108 if (err_mask) {
1109 ata_port_err(ap, "SRST failed (err_mask=0x%x)\n",
1110 err_mask);
1111 return -EIO;
1112 }
1113
1114 /* determine by signature whether we have ATA or ATAPI devices */
1115 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
1116 devmask & (1 << 0), &err);
1117 if (slave_possible && err != 0x81)
1118 classes[1] = ata_sff_dev_classify(&ap->link.device[1],
1119 devmask & (1 << 1), &err);
1120
1121 return 0;
1122 }
1123
1124 /**
1125 * bfin_bmdma_status - Read IDE DMA status
1126 * @ap: Port associated with this ATA transaction.
1127 */
1128
1129 static unsigned char bfin_bmdma_status(struct ata_port *ap)
1130 {
1131 unsigned char host_stat = 0;
1132 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1133
1134 if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
1135 host_stat |= ATA_DMA_ACTIVE;
1136 if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
1137 host_stat |= ATA_DMA_INTR;
1138
1139 dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
1140
1141 return host_stat;
1142 }
1143
1144 /**
1145 * bfin_data_xfer - Transfer data by PIO
1146 * @adev: device for this I/O
1147 * @buf: data buffer
1148 * @buflen: buffer length
1149 * @write_data: read/write
1150 *
1151 * Note: Original code is ata_sff_data_xfer().
1152 */
1153
1154 static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
1155 unsigned int buflen, int rw)
1156 {
1157 struct ata_port *ap = dev->link->ap;
1158 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1159 unsigned int words = buflen >> 1;
1160 unsigned short *buf16 = (u16 *)buf;
1161
1162 /* Transfer multiple of 2 bytes */
1163 if (rw == READ)
1164 read_atapi_data(base, words, buf16);
1165 else
1166 write_atapi_data(base, words, buf16);
1167
1168 /* Transfer trailing 1 byte, if any. */
1169 if (unlikely(buflen & 0x01)) {
1170 unsigned short align_buf[1] = { 0 };
1171 unsigned char *trailing_buf = buf + buflen - 1;
1172
1173 if (rw == READ) {
1174 read_atapi_data(base, 1, align_buf);
1175 memcpy(trailing_buf, align_buf, 1);
1176 } else {
1177 memcpy(align_buf, trailing_buf, 1);
1178 write_atapi_data(base, 1, align_buf);
1179 }
1180 words++;
1181 }
1182
1183 return words << 1;
1184 }
1185
1186 /**
1187 * bfin_irq_clear - Clear ATAPI interrupt.
1188 * @ap: Port associated with this ATA transaction.
1189 *
1190 * Note: Original code is ata_bmdma_irq_clear().
1191 */
1192
1193 static void bfin_irq_clear(struct ata_port *ap)
1194 {
1195 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1196
1197 dev_dbg(ap->dev, "in atapi irq clear\n");
1198 ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
1199 | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
1200 | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
1201 }
1202
1203 /**
1204 * bfin_thaw - Thaw DMA controller port
1205 * @ap: port to thaw
1206 *
1207 * Note: Original code is ata_sff_thaw().
1208 */
1209
1210 void bfin_thaw(struct ata_port *ap)
1211 {
1212 dev_dbg(ap->dev, "in atapi dma thaw\n");
1213 bfin_check_status(ap);
1214 ata_sff_irq_on(ap);
1215 }
1216
1217 /**
1218 * bfin_postreset - standard postreset callback
1219 * @ap: the target ata_port
1220 * @classes: classes of attached devices
1221 *
1222 * Note: Original code is ata_sff_postreset().
1223 */
1224
1225 static void bfin_postreset(struct ata_link *link, unsigned int *classes)
1226 {
1227 struct ata_port *ap = link->ap;
1228 void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
1229
1230 /* re-enable interrupts */
1231 ata_sff_irq_on(ap);
1232
1233 /* is double-select really necessary? */
1234 if (classes[0] != ATA_DEV_NONE)
1235 bfin_dev_select(ap, 1);
1236 if (classes[1] != ATA_DEV_NONE)
1237 bfin_dev_select(ap, 0);
1238
1239 /* bail out if no device is present */
1240 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
1241 return;
1242 }
1243
1244 /* set up device control */
1245 write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
1246 }
1247
1248 static void bfin_port_stop(struct ata_port *ap)
1249 {
1250 dev_dbg(ap->dev, "in atapi port stop\n");
1251 if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
1252 dma_free_coherent(ap->dev,
1253 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1254 ap->bmdma_prd,
1255 ap->bmdma_prd_dma);
1256
1257 free_dma(CH_ATAPI_RX);
1258 free_dma(CH_ATAPI_TX);
1259 }
1260 }
1261
1262 static int bfin_port_start(struct ata_port *ap)
1263 {
1264 dev_dbg(ap->dev, "in atapi port start\n");
1265 if (!(ap->udma_mask || ap->mwdma_mask))
1266 return 0;
1267
1268 ap->bmdma_prd = dma_alloc_coherent(ap->dev,
1269 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1270 &ap->bmdma_prd_dma,
1271 GFP_KERNEL);
1272
1273 if (ap->bmdma_prd == NULL) {
1274 dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
1275 goto out;
1276 }
1277
1278 if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
1279 if (request_dma(CH_ATAPI_TX,
1280 "BFIN ATAPI TX DMA") >= 0)
1281 return 0;
1282
1283 free_dma(CH_ATAPI_RX);
1284 dma_free_coherent(ap->dev,
1285 BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
1286 ap->bmdma_prd,
1287 ap->bmdma_prd_dma);
1288 }
1289
1290 out:
1291 ap->udma_mask = 0;
1292 ap->mwdma_mask = 0;
1293 dev_err(ap->dev, "Unable to request ATAPI DMA!"
1294 " Continue in PIO mode.\n");
1295
1296 return 0;
1297 }
1298
1299 static unsigned int bfin_ata_host_intr(struct ata_port *ap,
1300 struct ata_queued_cmd *qc)
1301 {
1302 struct ata_eh_info *ehi = &ap->link.eh_info;
1303 u8 status, host_stat = 0;
1304
1305 VPRINTK("ata%u: protocol %d task_state %d\n",
1306 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1307
1308 /* Check whether we are expecting interrupt in this state */
1309 switch (ap->hsm_task_state) {
1310 case HSM_ST_FIRST:
1311 /* Some pre-ATAPI-4 devices assert INTRQ
1312 * at this state when ready to receive CDB.
1313 */
1314
1315 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1316 * The flag was turned on only for atapi devices.
1317 * No need to check is_atapi_taskfile(&qc->tf) again.
1318 */
1319 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1320 goto idle_irq;
1321 break;
1322 case HSM_ST_LAST:
1323 if (qc->tf.protocol == ATA_PROT_DMA ||
1324 qc->tf.protocol == ATAPI_PROT_DMA) {
1325 /* check status of DMA engine */
1326 host_stat = ap->ops->bmdma_status(ap);
1327 VPRINTK("ata%u: host_stat 0x%X\n",
1328 ap->print_id, host_stat);
1329
1330 /* if it's not our irq... */
1331 if (!(host_stat & ATA_DMA_INTR))
1332 goto idle_irq;
1333
1334 /* before we do anything else, clear DMA-Start bit */
1335 ap->ops->bmdma_stop(qc);
1336
1337 if (unlikely(host_stat & ATA_DMA_ERR)) {
1338 /* error when transferring data to/from memory */
1339 qc->err_mask |= AC_ERR_HOST_BUS;
1340 ap->hsm_task_state = HSM_ST_ERR;
1341 }
1342 }
1343 break;
1344 case HSM_ST:
1345 break;
1346 default:
1347 goto idle_irq;
1348 }
1349
1350 /* check altstatus */
1351 status = ap->ops->sff_check_altstatus(ap);
1352 if (status & ATA_BUSY)
1353 goto busy_ata;
1354
1355 /* check main status, clearing INTRQ */
1356 status = ap->ops->sff_check_status(ap);
1357 if (unlikely(status & ATA_BUSY))
1358 goto busy_ata;
1359
1360 /* ack bmdma irq events */
1361 ap->ops->sff_irq_clear(ap);
1362
1363 ata_sff_hsm_move(ap, qc, status, 0);
1364
1365 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1366 qc->tf.protocol == ATAPI_PROT_DMA))
1367 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1368
1369 busy_ata:
1370 return 1; /* irq handled */
1371
1372 idle_irq:
1373 ap->stats.idle_irq++;
1374
1375 #ifdef ATA_IRQ_TRAP
1376 if ((ap->stats.idle_irq % 1000) == 0) {
1377 ap->ops->irq_ack(ap, 0); /* debug trap */
1378 ata_port_warn(ap, "irq trap\n");
1379 return 1;
1380 }
1381 #endif
1382 return 0; /* irq not handled */
1383 }
1384
1385 static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
1386 {
1387 struct ata_host *host = dev_instance;
1388 unsigned int i;
1389 unsigned int handled = 0;
1390 unsigned long flags;
1391
1392 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1393 spin_lock_irqsave(&host->lock, flags);
1394
1395 for (i = 0; i < host->n_ports; i++) {
1396 struct ata_port *ap = host->ports[i];
1397 struct ata_queued_cmd *qc;
1398
1399 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1400 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1401 handled |= bfin_ata_host_intr(ap, qc);
1402 }
1403
1404 spin_unlock_irqrestore(&host->lock, flags);
1405
1406 return IRQ_RETVAL(handled);
1407 }
1408
1409
1410 static struct scsi_host_template bfin_sht = {
1411 ATA_BASE_SHT(DRV_NAME),
1412 .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
1413 .dma_boundary = ATA_DMA_BOUNDARY,
1414 };
1415
1416 static struct ata_port_operations bfin_pata_ops = {
1417 .inherits = &ata_bmdma_port_ops,
1418
1419 .set_piomode = bfin_set_piomode,
1420 .set_dmamode = bfin_set_dmamode,
1421
1422 .sff_tf_load = bfin_tf_load,
1423 .sff_tf_read = bfin_tf_read,
1424 .sff_exec_command = bfin_exec_command,
1425 .sff_check_status = bfin_check_status,
1426 .sff_check_altstatus = bfin_check_altstatus,
1427 .sff_dev_select = bfin_dev_select,
1428 .sff_set_devctl = bfin_set_devctl,
1429
1430 .bmdma_setup = bfin_bmdma_setup,
1431 .bmdma_start = bfin_bmdma_start,
1432 .bmdma_stop = bfin_bmdma_stop,
1433 .bmdma_status = bfin_bmdma_status,
1434 .sff_data_xfer = bfin_data_xfer,
1435
1436 .qc_prep = ata_noop_qc_prep,
1437
1438 .thaw = bfin_thaw,
1439 .softreset = bfin_softreset,
1440 .postreset = bfin_postreset,
1441
1442 .sff_irq_clear = bfin_irq_clear,
1443
1444 .port_start = bfin_port_start,
1445 .port_stop = bfin_port_stop,
1446 };
1447
1448 static struct ata_port_info bfin_port_info[] = {
1449 {
1450 .flags = ATA_FLAG_SLAVE_POSS,
1451 .pio_mask = ATA_PIO4,
1452 .mwdma_mask = 0,
1453 .udma_mask = 0,
1454 .port_ops = &bfin_pata_ops,
1455 },
1456 };
1457
1458 /**
1459 * bfin_reset_controller - initialize BF54x ATAPI controller.
1460 */
1461
1462 static int bfin_reset_controller(struct ata_host *host)
1463 {
1464 void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
1465 int count;
1466 unsigned short status;
1467
1468 /* Disable all ATAPI interrupts */
1469 ATAPI_SET_INT_MASK(base, 0);
1470 SSYNC();
1471
1472 /* Assert the RESET signal 25us*/
1473 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
1474 udelay(30);
1475
1476 /* Negate the RESET signal for 2ms*/
1477 ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
1478 msleep(2);
1479
1480 /* Wait on Busy flag to clear */
1481 count = 10000000;
1482 do {
1483 status = read_atapi_register(base, ATA_REG_STATUS);
1484 } while (--count && (status & ATA_BUSY));
1485
1486 /* Enable only ATAPI Device interrupt */
1487 ATAPI_SET_INT_MASK(base, 1);
1488 SSYNC();
1489
1490 return (!count);
1491 }
1492
1493 /**
1494 * atapi_io_port - define atapi peripheral port pins.
1495 */
1496 static unsigned short atapi_io_port[] = {
1497 P_ATAPI_RESET,
1498 P_ATAPI_DIOR,
1499 P_ATAPI_DIOW,
1500 P_ATAPI_CS0,
1501 P_ATAPI_CS1,
1502 P_ATAPI_DMACK,
1503 P_ATAPI_DMARQ,
1504 P_ATAPI_INTRQ,
1505 P_ATAPI_IORDY,
1506 P_ATAPI_D0A,
1507 P_ATAPI_D1A,
1508 P_ATAPI_D2A,
1509 P_ATAPI_D3A,
1510 P_ATAPI_D4A,
1511 P_ATAPI_D5A,
1512 P_ATAPI_D6A,
1513 P_ATAPI_D7A,
1514 P_ATAPI_D8A,
1515 P_ATAPI_D9A,
1516 P_ATAPI_D10A,
1517 P_ATAPI_D11A,
1518 P_ATAPI_D12A,
1519 P_ATAPI_D13A,
1520 P_ATAPI_D14A,
1521 P_ATAPI_D15A,
1522 P_ATAPI_A0A,
1523 P_ATAPI_A1A,
1524 P_ATAPI_A2A,
1525 0
1526 };
1527
1528 /**
1529 * bfin_atapi_probe - attach a bfin atapi interface
1530 * @pdev: platform device
1531 *
1532 * Register a bfin atapi interface.
1533 *
1534 *
1535 * Platform devices are expected to contain 2 resources per port:
1536 *
1537 * - I/O Base (IORESOURCE_IO)
1538 * - IRQ (IORESOURCE_IRQ)
1539 *
1540 */
1541 static int bfin_atapi_probe(struct platform_device *pdev)
1542 {
1543 int board_idx = 0;
1544 struct resource *res;
1545 struct ata_host *host;
1546 unsigned int fsclk = get_sclk();
1547 int udma_mode = 5;
1548 const struct ata_port_info *ppi[] =
1549 { &bfin_port_info[board_idx], NULL };
1550
1551 /*
1552 * Simple resource validation ..
1553 */
1554 if (unlikely(pdev->num_resources != 2)) {
1555 dev_err(&pdev->dev, "invalid number of resources\n");
1556 return -EINVAL;
1557 }
1558
1559 /*
1560 * Get the register base first
1561 */
1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 if (res == NULL)
1564 return -EINVAL;
1565
1566 while (bfin_port_info[board_idx].udma_mask > 0 &&
1567 udma_fsclk[udma_mode] > fsclk) {
1568 udma_mode--;
1569 bfin_port_info[board_idx].udma_mask >>= 1;
1570 }
1571
1572 /*
1573 * Now that that's out of the way, wire up the port..
1574 */
1575 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1576 if (!host)
1577 return -ENOMEM;
1578
1579 host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
1580
1581 if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
1582 dev_err(&pdev->dev, "Requesting Peripherals failed\n");
1583 return -EFAULT;
1584 }
1585
1586 if (bfin_reset_controller(host)) {
1587 peripheral_free_list(atapi_io_port);
1588 dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
1589 return -EFAULT;
1590 }
1591
1592 if (ata_host_activate(host, platform_get_irq(pdev, 0),
1593 bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
1594 peripheral_free_list(atapi_io_port);
1595 dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
1596 return -ENODEV;
1597 }
1598
1599 dev_set_drvdata(&pdev->dev, host);
1600
1601 return 0;
1602 }
1603
1604 /**
1605 * bfin_atapi_remove - unplug a bfin atapi interface
1606 * @pdev: platform device
1607 *
1608 * A bfin atapi device has been unplugged. Perform the needed
1609 * cleanup. Also called on module unload for any active devices.
1610 */
1611 static int bfin_atapi_remove(struct platform_device *pdev)
1612 {
1613 struct device *dev = &pdev->dev;
1614 struct ata_host *host = dev_get_drvdata(dev);
1615
1616 ata_host_detach(host);
1617 dev_set_drvdata(&pdev->dev, NULL);
1618
1619 peripheral_free_list(atapi_io_port);
1620
1621 return 0;
1622 }
1623
1624 #ifdef CONFIG_PM
1625 static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1626 {
1627 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1628 if (host)
1629 return ata_host_suspend(host, state);
1630 else
1631 return 0;
1632 }
1633
1634 static int bfin_atapi_resume(struct platform_device *pdev)
1635 {
1636 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1637 int ret;
1638
1639 if (host) {
1640 ret = bfin_reset_controller(host);
1641 if (ret) {
1642 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
1643 return ret;
1644 }
1645 ata_host_resume(host);
1646 }
1647
1648 return 0;
1649 }
1650 #else
1651 #define bfin_atapi_suspend NULL
1652 #define bfin_atapi_resume NULL
1653 #endif
1654
1655 static struct platform_driver bfin_atapi_driver = {
1656 .probe = bfin_atapi_probe,
1657 .remove = bfin_atapi_remove,
1658 .suspend = bfin_atapi_suspend,
1659 .resume = bfin_atapi_resume,
1660 .driver = {
1661 .name = DRV_NAME,
1662 .owner = THIS_MODULE,
1663 },
1664 };
1665
1666 #define ATAPI_MODE_SIZE 10
1667 static char bfin_atapi_mode[ATAPI_MODE_SIZE];
1668
1669 static int __init bfin_atapi_init(void)
1670 {
1671 pr_info("register bfin atapi driver\n");
1672
1673 switch(bfin_atapi_mode[0]) {
1674 case 'p':
1675 case 'P':
1676 break;
1677 case 'm':
1678 case 'M':
1679 bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
1680 break;
1681 default:
1682 bfin_port_info[0].udma_mask = ATA_UDMA5;
1683 };
1684
1685 return platform_driver_register(&bfin_atapi_driver);
1686 }
1687
1688 static void __exit bfin_atapi_exit(void)
1689 {
1690 platform_driver_unregister(&bfin_atapi_driver);
1691 }
1692
1693 module_init(bfin_atapi_init);
1694 module_exit(bfin_atapi_exit);
1695 /*
1696 * ATAPI mode:
1697 * pio/PIO
1698 * udma/UDMA (default)
1699 * mwdma/MWDMA
1700 */
1701 module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
1702
1703 MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
1704 MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
1705 MODULE_LICENSE("GPL");
1706 MODULE_VERSION(DRV_VERSION);
1707 MODULE_ALIAS("platform:" DRV_NAME);