[PATCH] SPI: busnum == 0 needs to work
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / spi / pxa2xx_spi.c
CommitLineData
e0c9905e
SS
1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/ioport.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26#include <linux/dma-mapping.h>
27#include <linux/spi/spi.h>
28#include <linux/workqueue.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/hardware.h>
35#include <asm/delay.h>
36#include <asm/dma.h>
37
38#include <asm/arch/hardware.h>
39#include <asm/arch/pxa-regs.h>
40#include <asm/arch/pxa2xx_spi.h>
41
42MODULE_AUTHOR("Stephen Street");
43MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller");
44MODULE_LICENSE("GPL");
45
46#define MAX_BUSES 3
47
48#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
51
52#define DEFINE_SSP_REG(reg, off) \
53static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
54static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
55
56DEFINE_SSP_REG(SSCR0, 0x00)
57DEFINE_SSP_REG(SSCR1, 0x04)
58DEFINE_SSP_REG(SSSR, 0x08)
59DEFINE_SSP_REG(SSITR, 0x0c)
60DEFINE_SSP_REG(SSDR, 0x10)
61DEFINE_SSP_REG(SSTO, 0x28)
62DEFINE_SSP_REG(SSPSP, 0x2c)
63
64#define START_STATE ((void*)0)
65#define RUNNING_STATE ((void*)1)
66#define DONE_STATE ((void*)2)
67#define ERROR_STATE ((void*)-1)
68
69#define QUEUE_RUNNING 0
70#define QUEUE_STOPPED 1
71
72struct driver_data {
73 /* Driver model hookup */
74 struct platform_device *pdev;
75
76 /* SPI framework hookup */
77 enum pxa_ssp_type ssp_type;
78 struct spi_master *master;
79
80 /* PXA hookup */
81 struct pxa2xx_spi_master *master_info;
82
83 /* DMA setup stuff */
84 int rx_channel;
85 int tx_channel;
86 u32 *null_dma_buf;
87
88 /* SSP register addresses */
89 void *ioaddr;
90 u32 ssdr_physical;
91
92 /* SSP masks*/
93 u32 dma_cr1;
94 u32 int_cr1;
95 u32 clear_sr;
96 u32 mask_sr;
97
98 /* Driver message queue */
99 struct workqueue_struct *workqueue;
100 struct work_struct pump_messages;
101 spinlock_t lock;
102 struct list_head queue;
103 int busy;
104 int run;
105
106 /* Message Transfer pump */
107 struct tasklet_struct pump_transfers;
108
109 /* Current message transfer state info */
110 struct spi_message* cur_msg;
111 struct spi_transfer* cur_transfer;
112 struct chip_data *cur_chip;
113 size_t len;
114 void *tx;
115 void *tx_end;
116 void *rx;
117 void *rx_end;
118 int dma_mapped;
119 dma_addr_t rx_dma;
120 dma_addr_t tx_dma;
121 size_t rx_map_len;
122 size_t tx_map_len;
123 int cs_change;
124 void (*write)(struct driver_data *drv_data);
125 void (*read)(struct driver_data *drv_data);
126 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
127 void (*cs_control)(u32 command);
128};
129
130struct chip_data {
131 u32 cr0;
132 u32 cr1;
133 u32 to;
134 u32 psp;
135 u32 timeout;
136 u8 n_bytes;
137 u32 dma_width;
138 u32 dma_burst_size;
139 u32 threshold;
140 u32 dma_threshold;
141 u8 enable_dma;
142 void (*write)(struct driver_data *drv_data);
143 void (*read)(struct driver_data *drv_data);
144 void (*cs_control)(u32 command);
145};
146
147static void pump_messages(void *data);
148
149static int flush(struct driver_data *drv_data)
150{
151 unsigned long limit = loops_per_jiffy << 1;
152
153 void *reg = drv_data->ioaddr;
154
155 do {
156 while (read_SSSR(reg) & SSSR_RNE) {
157 read_SSDR(reg);
158 }
159 } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
160 write_SSSR(SSSR_ROR, reg);
161
162 return limit;
163}
164
165static void restore_state(struct driver_data *drv_data)
166{
167 void *reg = drv_data->ioaddr;
168
169 /* Clear status and disable clock */
170 write_SSSR(drv_data->clear_sr, reg);
171 write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
172
173 /* Load the registers */
174 write_SSCR1(drv_data->cur_chip->cr1, reg);
175 write_SSCR0(drv_data->cur_chip->cr0, reg);
176 if (drv_data->ssp_type != PXA25x_SSP) {
177 write_SSTO(0, reg);
178 write_SSPSP(drv_data->cur_chip->psp, reg);
179 }
180}
181
182static void null_cs_control(u32 command)
183{
184}
185
186static void null_writer(struct driver_data *drv_data)
187{
188 void *reg = drv_data->ioaddr;
189 u8 n_bytes = drv_data->cur_chip->n_bytes;
190
191 while ((read_SSSR(reg) & SSSR_TNF)
192 && (drv_data->tx < drv_data->tx_end)) {
193 write_SSDR(0, reg);
194 drv_data->tx += n_bytes;
195 }
196}
197
198static void null_reader(struct driver_data *drv_data)
199{
200 void *reg = drv_data->ioaddr;
201 u8 n_bytes = drv_data->cur_chip->n_bytes;
202
203 while ((read_SSSR(reg) & SSSR_RNE)
204 && (drv_data->rx < drv_data->rx_end)) {
205 read_SSDR(reg);
206 drv_data->rx += n_bytes;
207 }
208}
209
210static void u8_writer(struct driver_data *drv_data)
211{
212 void *reg = drv_data->ioaddr;
213
214 while ((read_SSSR(reg) & SSSR_TNF)
215 && (drv_data->tx < drv_data->tx_end)) {
216 write_SSDR(*(u8 *)(drv_data->tx), reg);
217 ++drv_data->tx;
218 }
219}
220
221static void u8_reader(struct driver_data *drv_data)
222{
223 void *reg = drv_data->ioaddr;
224
225 while ((read_SSSR(reg) & SSSR_RNE)
226 && (drv_data->rx < drv_data->rx_end)) {
227 *(u8 *)(drv_data->rx) = read_SSDR(reg);
228 ++drv_data->rx;
229 }
230}
231
232static void u16_writer(struct driver_data *drv_data)
233{
234 void *reg = drv_data->ioaddr;
235
236 while ((read_SSSR(reg) & SSSR_TNF)
237 && (drv_data->tx < drv_data->tx_end)) {
238 write_SSDR(*(u16 *)(drv_data->tx), reg);
239 drv_data->tx += 2;
240 }
241}
242
243static void u16_reader(struct driver_data *drv_data)
244{
245 void *reg = drv_data->ioaddr;
246
247 while ((read_SSSR(reg) & SSSR_RNE)
248 && (drv_data->rx < drv_data->rx_end)) {
249 *(u16 *)(drv_data->rx) = read_SSDR(reg);
250 drv_data->rx += 2;
251 }
252}
253static void u32_writer(struct driver_data *drv_data)
254{
255 void *reg = drv_data->ioaddr;
256
257 while ((read_SSSR(reg) & SSSR_TNF)
258 && (drv_data->tx < drv_data->tx_end)) {
259 write_SSDR(*(u16 *)(drv_data->tx), reg);
260 drv_data->tx += 4;
261 }
262}
263
264static void u32_reader(struct driver_data *drv_data)
265{
266 void *reg = drv_data->ioaddr;
267
268 while ((read_SSSR(reg) & SSSR_RNE)
269 && (drv_data->rx < drv_data->rx_end)) {
270 *(u32 *)(drv_data->rx) = read_SSDR(reg);
271 drv_data->rx += 4;
272 }
273}
274
275static void *next_transfer(struct driver_data *drv_data)
276{
277 struct spi_message *msg = drv_data->cur_msg;
278 struct spi_transfer *trans = drv_data->cur_transfer;
279
280 /* Move to next transfer */
281 if (trans->transfer_list.next != &msg->transfers) {
282 drv_data->cur_transfer =
283 list_entry(trans->transfer_list.next,
284 struct spi_transfer,
285 transfer_list);
286 return RUNNING_STATE;
287 } else
288 return DONE_STATE;
289}
290
291static int map_dma_buffers(struct driver_data *drv_data)
292{
293 struct spi_message *msg = drv_data->cur_msg;
294 struct device *dev = &msg->spi->dev;
295
296 if (!drv_data->cur_chip->enable_dma)
297 return 0;
298
299 if (msg->is_dma_mapped)
300 return drv_data->rx_dma && drv_data->tx_dma;
301
302 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
303 return 0;
304
305 /* Modify setup if rx buffer is null */
306 if (drv_data->rx == NULL) {
307 *drv_data->null_dma_buf = 0;
308 drv_data->rx = drv_data->null_dma_buf;
309 drv_data->rx_map_len = 4;
310 } else
311 drv_data->rx_map_len = drv_data->len;
312
313
314 /* Modify setup if tx buffer is null */
315 if (drv_data->tx == NULL) {
316 *drv_data->null_dma_buf = 0;
317 drv_data->tx = drv_data->null_dma_buf;
318 drv_data->tx_map_len = 4;
319 } else
320 drv_data->tx_map_len = drv_data->len;
321
322 /* Stream map the rx buffer */
323 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
324 drv_data->rx_map_len,
325 DMA_FROM_DEVICE);
326 if (dma_mapping_error(drv_data->rx_dma))
327 return 0;
328
329 /* Stream map the tx buffer */
330 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
331 drv_data->tx_map_len,
332 DMA_TO_DEVICE);
333
334 if (dma_mapping_error(drv_data->tx_dma)) {
335 dma_unmap_single(dev, drv_data->rx_dma,
336 drv_data->rx_map_len, DMA_FROM_DEVICE);
337 return 0;
338 }
339
340 return 1;
341}
342
343static void unmap_dma_buffers(struct driver_data *drv_data)
344{
345 struct device *dev;
346
347 if (!drv_data->dma_mapped)
348 return;
349
350 if (!drv_data->cur_msg->is_dma_mapped) {
351 dev = &drv_data->cur_msg->spi->dev;
352 dma_unmap_single(dev, drv_data->rx_dma,
353 drv_data->rx_map_len, DMA_FROM_DEVICE);
354 dma_unmap_single(dev, drv_data->tx_dma,
355 drv_data->tx_map_len, DMA_TO_DEVICE);
356 }
357
358 drv_data->dma_mapped = 0;
359}
360
361/* caller already set message->status; dma and pio irqs are blocked */
362static void giveback(struct spi_message *message, struct driver_data *drv_data)
363{
364 struct spi_transfer* last_transfer;
365
366 last_transfer = list_entry(message->transfers.prev,
367 struct spi_transfer,
368 transfer_list);
369
370 if (!last_transfer->cs_change)
371 drv_data->cs_control(PXA2XX_CS_DEASSERT);
372
373 message->state = NULL;
374 if (message->complete)
375 message->complete(message->context);
376
377 drv_data->cur_msg = NULL;
378 drv_data->cur_transfer = NULL;
379 drv_data->cur_chip = NULL;
380 queue_work(drv_data->workqueue, &drv_data->pump_messages);
381}
382
383static int wait_ssp_rx_stall(void *ioaddr)
384{
385 unsigned long limit = loops_per_jiffy << 1;
386
387 while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
388 cpu_relax();
389
390 return limit;
391}
392
393static int wait_dma_channel_stop(int channel)
394{
395 unsigned long limit = loops_per_jiffy << 1;
396
397 while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
398 cpu_relax();
399
400 return limit;
401}
402
403static void dma_handler(int channel, void *data, struct pt_regs *regs)
404{
405 struct driver_data *drv_data = data;
406 struct spi_message *msg = drv_data->cur_msg;
407 void *reg = drv_data->ioaddr;
408 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
409 u32 trailing_sssr = 0;
410
411 if (irq_status & DCSR_BUSERR) {
412
413 /* Disable interrupts, clear status and reset DMA */
414 if (drv_data->ssp_type != PXA25x_SSP)
415 write_SSTO(0, reg);
416 write_SSSR(drv_data->clear_sr, reg);
417 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
418 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
419 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
420
421 if (flush(drv_data) == 0)
422 dev_err(&drv_data->pdev->dev,
423 "dma_handler: flush fail\n");
424
425 unmap_dma_buffers(drv_data);
426
427 if (channel == drv_data->tx_channel)
428 dev_err(&drv_data->pdev->dev,
429 "dma_handler: bad bus address on "
430 "tx channel %d, source %x target = %x\n",
431 channel, DSADR(channel), DTADR(channel));
432 else
433 dev_err(&drv_data->pdev->dev,
434 "dma_handler: bad bus address on "
435 "rx channel %d, source %x target = %x\n",
436 channel, DSADR(channel), DTADR(channel));
437
438 msg->state = ERROR_STATE;
439 tasklet_schedule(&drv_data->pump_transfers);
440 }
441
442 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
443 if ((drv_data->ssp_type == PXA25x_SSP)
444 && (channel == drv_data->tx_channel)
445 && (irq_status & DCSR_ENDINTR)) {
446
447 /* Wait for rx to stall */
448 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
449 dev_err(&drv_data->pdev->dev,
450 "dma_handler: ssp rx stall failed\n");
451
452 /* Clear and disable interrupts on SSP and DMA channels*/
453 write_SSSR(drv_data->clear_sr, reg);
454 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
455 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
456 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
457 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
458 dev_err(&drv_data->pdev->dev,
459 "dma_handler: dma rx channel stop failed\n");
460
461 unmap_dma_buffers(drv_data);
462
463 /* Read trailing bytes */
464 /* Calculate number of trailing bytes, read them */
465 trailing_sssr = read_SSSR(reg);
466 if ((trailing_sssr & 0xf008) != 0xf000) {
467 drv_data->rx = drv_data->rx_end -
468 (((trailing_sssr >> 12) & 0x0f) + 1);
469 drv_data->read(drv_data);
470 }
471 msg->actual_length += drv_data->len;
472
473 /* Release chip select if requested, transfer delays are
474 * handled in pump_transfers */
475 if (drv_data->cs_change)
476 drv_data->cs_control(PXA2XX_CS_DEASSERT);
477
478 /* Move to next transfer */
479 msg->state = next_transfer(drv_data);
480
481 /* Schedule transfer tasklet */
482 tasklet_schedule(&drv_data->pump_transfers);
483 }
484}
485
486static irqreturn_t dma_transfer(struct driver_data *drv_data)
487{
488 u32 irq_status;
489 u32 trailing_sssr = 0;
490 struct spi_message *msg = drv_data->cur_msg;
491 void *reg = drv_data->ioaddr;
492
493 irq_status = read_SSSR(reg) & drv_data->mask_sr;
494 if (irq_status & SSSR_ROR) {
495 /* Clear and disable interrupts on SSP and DMA channels*/
496 if (drv_data->ssp_type != PXA25x_SSP)
497 write_SSTO(0, reg);
498 write_SSSR(drv_data->clear_sr, reg);
499 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
500 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
501 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
502 unmap_dma_buffers(drv_data);
503
504 if (flush(drv_data) == 0)
505 dev_err(&drv_data->pdev->dev,
506 "dma_transfer: flush fail\n");
507
508 dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
509
510 drv_data->cur_msg->state = ERROR_STATE;
511 tasklet_schedule(&drv_data->pump_transfers);
512
513 return IRQ_HANDLED;
514 }
515
516 /* Check for false positive timeout */
517 if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) {
518 write_SSSR(SSSR_TINT, reg);
519 return IRQ_HANDLED;
520 }
521
522 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
523
524 /* Clear and disable interrupts on SSP and DMA channels*/
525 if (drv_data->ssp_type != PXA25x_SSP)
526 write_SSTO(0, reg);
527 write_SSSR(drv_data->clear_sr, reg);
528 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
529 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
530 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
531
532 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
533 dev_err(&drv_data->pdev->dev,
534 "dma_transfer: dma rx channel stop failed\n");
535
536 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
537 dev_err(&drv_data->pdev->dev,
538 "dma_transfer: ssp rx stall failed\n");
539
540 unmap_dma_buffers(drv_data);
541
542 /* Calculate number of trailing bytes, read them */
543 trailing_sssr = read_SSSR(reg);
544 if ((trailing_sssr & 0xf008) != 0xf000) {
545 drv_data->rx = drv_data->rx_end -
546 (((trailing_sssr >> 12) & 0x0f) + 1);
547 drv_data->read(drv_data);
548 }
549 msg->actual_length += drv_data->len;
550
551 /* Release chip select if requested, transfer delays are
552 * handled in pump_transfers */
553 if (drv_data->cs_change)
554 drv_data->cs_control(PXA2XX_CS_DEASSERT);
555
556 /* Move to next transfer */
557 msg->state = next_transfer(drv_data);
558
559 /* Schedule transfer tasklet */
560 tasklet_schedule(&drv_data->pump_transfers);
561
562 return IRQ_HANDLED;
563 }
564
565 /* Opps problem detected */
566 return IRQ_NONE;
567}
568
569static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
570{
571 u32 irq_status;
572 struct spi_message *msg = drv_data->cur_msg;
573 void *reg = drv_data->ioaddr;
574 irqreturn_t handled = IRQ_NONE;
575 unsigned long limit = loops_per_jiffy << 1;
576
577 while ((irq_status = (read_SSSR(reg) & drv_data->mask_sr))) {
578
579 if (irq_status & SSSR_ROR) {
580
581 /* Clear and disable interrupts */
582 if (drv_data->ssp_type != PXA25x_SSP)
583 write_SSTO(0, reg);
584 write_SSSR(drv_data->clear_sr, reg);
585 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
586
587 if (flush(drv_data) == 0)
588 dev_err(&drv_data->pdev->dev,
589 "interrupt_transfer: flush fail\n");
590
591 dev_warn(&drv_data->pdev->dev,
592 "interrupt_transfer: fifo overun\n");
593
594 msg->state = ERROR_STATE;
595 tasklet_schedule(&drv_data->pump_transfers);
596
597 return IRQ_HANDLED;
598 }
599
600 /* Look for false positive timeout */
601 if ((irq_status & SSSR_TINT)
602 && (drv_data->rx < drv_data->rx_end))
603 write_SSSR(SSSR_TINT, reg);
604
605 /* Pump data */
606 drv_data->read(drv_data);
607 drv_data->write(drv_data);
608
609 if (drv_data->tx == drv_data->tx_end) {
610 /* Disable tx interrupt */
611 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
612
613 /* PXA25x_SSP has no timeout, read trailing bytes */
614 if (drv_data->ssp_type == PXA25x_SSP) {
615 while ((read_SSSR(reg) & SSSR_BSY) && limit--)
616 drv_data->read(drv_data);
617
618 if (limit == 0)
619 dev_err(&drv_data->pdev->dev,
620 "interrupt_transfer: "
621 "trailing byte read failed\n");
622 }
623 }
624
625 if ((irq_status & SSSR_TINT)
626 || (drv_data->rx == drv_data->rx_end)) {
627
628 /* Clear timeout */
629 if (drv_data->ssp_type != PXA25x_SSP)
630 write_SSTO(0, reg);
631 write_SSSR(drv_data->clear_sr, reg);
632 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
633
634 /* Update total byte transfered */
635 msg->actual_length += drv_data->len;
636
637 /* Release chip select if requested, transfer delays are
638 * handled in pump_transfers */
639 if (drv_data->cs_change)
640 drv_data->cs_control(PXA2XX_CS_DEASSERT);
641
642 /* Move to next transfer */
643 msg->state = next_transfer(drv_data);
644
645 /* Schedule transfer tasklet */
646 tasklet_schedule(&drv_data->pump_transfers);
647
648 return IRQ_HANDLED;
649 }
650
651 /* We did something */
652 handled = IRQ_HANDLED;
653 }
654
655 return handled;
656}
657
658static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs)
659{
660 struct driver_data *drv_data = (struct driver_data *)dev_id;
661
662 if (!drv_data->cur_msg) {
663 dev_err(&drv_data->pdev->dev, "bad message state "
664 "in interrupt handler\n");
665 /* Never fail */
666 return IRQ_HANDLED;
667 }
668
669 return drv_data->transfer_handler(drv_data);
670}
671
672static void pump_transfers(unsigned long data)
673{
674 struct driver_data *drv_data = (struct driver_data *)data;
675 struct spi_message *message = NULL;
676 struct spi_transfer *transfer = NULL;
677 struct spi_transfer *previous = NULL;
678 struct chip_data *chip = NULL;
679 void *reg = drv_data->ioaddr;
680
681 /* Get current state information */
682 message = drv_data->cur_msg;
683 transfer = drv_data->cur_transfer;
684 chip = drv_data->cur_chip;
685
686 /* Handle for abort */
687 if (message->state == ERROR_STATE) {
688 message->status = -EIO;
689 giveback(message, drv_data);
690 return;
691 }
692
693 /* Handle end of message */
694 if (message->state == DONE_STATE) {
695 message->status = 0;
696 giveback(message, drv_data);
697 return;
698 }
699
700 /* Delay if requested at end of transfer*/
701 if (message->state == RUNNING_STATE) {
702 previous = list_entry(transfer->transfer_list.prev,
703 struct spi_transfer,
704 transfer_list);
705 if (previous->delay_usecs)
706 udelay(previous->delay_usecs);
707 }
708
709 /* Setup the transfer state based on the type of transfer */
710 if (flush(drv_data) == 0) {
711 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
712 message->status = -EIO;
713 giveback(message, drv_data);
714 return;
715 }
716 drv_data->cs_control = chip->cs_control;
717 drv_data->tx = (void *)transfer->tx_buf;
718 drv_data->tx_end = drv_data->tx + transfer->len;
719 drv_data->rx = transfer->rx_buf;
720 drv_data->rx_end = drv_data->rx + transfer->len;
721 drv_data->rx_dma = transfer->rx_dma;
722 drv_data->tx_dma = transfer->tx_dma;
723 drv_data->len = transfer->len;
724 drv_data->write = drv_data->tx ? chip->write : null_writer;
725 drv_data->read = drv_data->rx ? chip->read : null_reader;
726 drv_data->cs_change = transfer->cs_change;
727 message->state = RUNNING_STATE;
728
729 /* Try to map dma buffer and do a dma transfer if successful */
730 if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
731
732 /* Ensure we have the correct interrupt handler */
733 drv_data->transfer_handler = dma_transfer;
734
735 /* Setup rx DMA Channel */
736 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
737 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
738 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
739 if (drv_data->rx == drv_data->null_dma_buf)
740 /* No target address increment */
741 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
742 | chip->dma_width
743 | chip->dma_burst_size
744 | drv_data->len;
745 else
746 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
747 | DCMD_FLOWSRC
748 | chip->dma_width
749 | chip->dma_burst_size
750 | drv_data->len;
751
752 /* Setup tx DMA Channel */
753 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
754 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
755 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
756 if (drv_data->tx == drv_data->null_dma_buf)
757 /* No source address increment */
758 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
759 | chip->dma_width
760 | chip->dma_burst_size
761 | drv_data->len;
762 else
763 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
764 | DCMD_FLOWTRG
765 | chip->dma_width
766 | chip->dma_burst_size
767 | drv_data->len;
768
769 /* Enable dma end irqs on SSP to detect end of transfer */
770 if (drv_data->ssp_type == PXA25x_SSP)
771 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
772
773 /* Fix me, need to handle cs polarity */
774 drv_data->cs_control(PXA2XX_CS_ASSERT);
775
776 /* Go baby, go */
777 write_SSSR(drv_data->clear_sr, reg);
778 DCSR(drv_data->rx_channel) |= DCSR_RUN;
779 DCSR(drv_data->tx_channel) |= DCSR_RUN;
780 if (drv_data->ssp_type != PXA25x_SSP)
781 write_SSTO(chip->timeout, reg);
782 write_SSCR1(chip->cr1
783 | chip->dma_threshold
784 | drv_data->dma_cr1,
785 reg);
786 } else {
787 /* Ensure we have the correct interrupt handler */
788 drv_data->transfer_handler = interrupt_transfer;
789
790 /* Fix me, need to handle cs polarity */
791 drv_data->cs_control(PXA2XX_CS_ASSERT);
792
793 /* Go baby, go */
794 write_SSSR(drv_data->clear_sr, reg);
795 if (drv_data->ssp_type != PXA25x_SSP)
796 write_SSTO(chip->timeout, reg);
797 write_SSCR1(chip->cr1
798 | chip->threshold
799 | drv_data->int_cr1,
800 reg);
801 }
802}
803
804static void pump_messages(void *data)
805{
806 struct driver_data *drv_data = data;
807 unsigned long flags;
808
809 /* Lock queue and check for queue work */
810 spin_lock_irqsave(&drv_data->lock, flags);
811 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
812 drv_data->busy = 0;
813 spin_unlock_irqrestore(&drv_data->lock, flags);
814 return;
815 }
816
817 /* Make sure we are not already running a message */
818 if (drv_data->cur_msg) {
819 spin_unlock_irqrestore(&drv_data->lock, flags);
820 return;
821 }
822
823 /* Extract head of queue */
824 drv_data->cur_msg = list_entry(drv_data->queue.next,
825 struct spi_message, queue);
826 list_del_init(&drv_data->cur_msg->queue);
827 drv_data->busy = 1;
828 spin_unlock_irqrestore(&drv_data->lock, flags);
829
830 /* Initial message state*/
831 drv_data->cur_msg->state = START_STATE;
832 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
833 struct spi_transfer,
834 transfer_list);
835
836 /* Setup the SSP using the per chip configuration */
837 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
838 restore_state(drv_data);
839
840 /* Mark as busy and launch transfers */
841 tasklet_schedule(&drv_data->pump_transfers);
842}
843
844static int transfer(struct spi_device *spi, struct spi_message *msg)
845{
846 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
847 unsigned long flags;
848
849 spin_lock_irqsave(&drv_data->lock, flags);
850
851 if (drv_data->run == QUEUE_STOPPED) {
852 spin_unlock_irqrestore(&drv_data->lock, flags);
853 return -ESHUTDOWN;
854 }
855
856 msg->actual_length = 0;
857 msg->status = -EINPROGRESS;
858 msg->state = START_STATE;
859
860 list_add_tail(&msg->queue, &drv_data->queue);
861
862 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
863 queue_work(drv_data->workqueue, &drv_data->pump_messages);
864
865 spin_unlock_irqrestore(&drv_data->lock, flags);
866
867 return 0;
868}
869
870static int setup(struct spi_device *spi)
871{
872 struct pxa2xx_spi_chip *chip_info = NULL;
873 struct chip_data *chip;
874 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
875 unsigned int clk_div;
876
877 if (!spi->bits_per_word)
878 spi->bits_per_word = 8;
879
880 if (drv_data->ssp_type != PXA25x_SSP
881 && (spi->bits_per_word < 4 || spi->bits_per_word > 32))
882 return -EINVAL;
883 else if (spi->bits_per_word < 4 || spi->bits_per_word > 16)
884 return -EINVAL;
885
886 /* Only alloc (or use chip_info) on first setup */
887 chip = spi_get_ctldata(spi);
888 if (chip == NULL) {
889 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
890 if (!chip)
891 return -ENOMEM;
892
893 chip->cs_control = null_cs_control;
894 chip->enable_dma = 0;
895 chip->timeout = 5;
896 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
897 chip->dma_burst_size = drv_data->master_info->enable_dma ?
898 DCMD_BURST8 : 0;
899
900 chip_info = spi->controller_data;
901 }
902
903 /* chip_info isn't always needed */
904 if (chip_info) {
905 if (chip_info->cs_control)
906 chip->cs_control = chip_info->cs_control;
907
908 chip->timeout = (chip_info->timeout_microsecs * 10000) / 2712;
909
910 chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold)
911 | SSCR1_TxTresh(chip_info->tx_threshold);
912
913 chip->enable_dma = chip_info->dma_burst_size != 0
914 && drv_data->master_info->enable_dma;
915 chip->dma_threshold = 0;
916
917 if (chip->enable_dma) {
918 if (chip_info->dma_burst_size <= 8) {
919 chip->dma_threshold = SSCR1_RxTresh(8)
920 | SSCR1_TxTresh(8);
921 chip->dma_burst_size = DCMD_BURST8;
922 } else if (chip_info->dma_burst_size <= 16) {
923 chip->dma_threshold = SSCR1_RxTresh(16)
924 | SSCR1_TxTresh(16);
925 chip->dma_burst_size = DCMD_BURST16;
926 } else {
927 chip->dma_threshold = SSCR1_RxTresh(32)
928 | SSCR1_TxTresh(32);
929 chip->dma_burst_size = DCMD_BURST32;
930 }
931 }
932
933
934 if (chip_info->enable_loopback)
935 chip->cr1 = SSCR1_LBM;
936 }
937
938 if (drv_data->ioaddr == SSP1_VIRT)
939 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
940 else if (drv_data->ioaddr == SSP2_VIRT)
941 clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
942 else if (drv_data->ioaddr == SSP3_VIRT)
943 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
944 else
945 return -ENODEV;
946
947 chip->cr0 = clk_div
948 | SSCR0_Motorola
949 | SSCR0_DataSize(spi->bits_per_word & 0x0f)
950 | SSCR0_SSE
951 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
952 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4)
953 | (((spi->mode & SPI_CPOL) != 0) << 3);
954
955 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
956 if (drv_data->ssp_type != PXA25x_SSP)
957 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
958 spi->bits_per_word,
959 (CLOCK_SPEED_HZ)
960 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
961 spi->mode & 0x3);
962 else
963 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
964 spi->bits_per_word,
965 (CLOCK_SPEED_HZ/2)
966 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
967 spi->mode & 0x3);
968
969 if (spi->bits_per_word <= 8) {
970 chip->n_bytes = 1;
971 chip->dma_width = DCMD_WIDTH1;
972 chip->read = u8_reader;
973 chip->write = u8_writer;
974 } else if (spi->bits_per_word <= 16) {
975 chip->n_bytes = 2;
976 chip->dma_width = DCMD_WIDTH2;
977 chip->read = u16_reader;
978 chip->write = u16_writer;
979 } else if (spi->bits_per_word <= 32) {
980 chip->cr0 |= SSCR0_EDSS;
981 chip->n_bytes = 4;
982 chip->dma_width = DCMD_WIDTH4;
983 chip->read = u32_reader;
984 chip->write = u32_writer;
985 } else {
986 dev_err(&spi->dev, "invalid wordsize\n");
987 kfree(chip);
988 return -ENODEV;
989 }
990
991 spi_set_ctldata(spi, chip);
992
993 return 0;
994}
995
996static void cleanup(const struct spi_device *spi)
997{
998 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
999
1000 kfree(chip);
1001}
1002
1003static int init_queue(struct driver_data *drv_data)
1004{
1005 INIT_LIST_HEAD(&drv_data->queue);
1006 spin_lock_init(&drv_data->lock);
1007
1008 drv_data->run = QUEUE_STOPPED;
1009 drv_data->busy = 0;
1010
1011 tasklet_init(&drv_data->pump_transfers,
1012 pump_transfers, (unsigned long)drv_data);
1013
1014 INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
1015 drv_data->workqueue = create_singlethread_workqueue(
1016 drv_data->master->cdev.dev->bus_id);
1017 if (drv_data->workqueue == NULL)
1018 return -EBUSY;
1019
1020 return 0;
1021}
1022
1023static int start_queue(struct driver_data *drv_data)
1024{
1025 unsigned long flags;
1026
1027 spin_lock_irqsave(&drv_data->lock, flags);
1028
1029 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1030 spin_unlock_irqrestore(&drv_data->lock, flags);
1031 return -EBUSY;
1032 }
1033
1034 drv_data->run = QUEUE_RUNNING;
1035 drv_data->cur_msg = NULL;
1036 drv_data->cur_transfer = NULL;
1037 drv_data->cur_chip = NULL;
1038 spin_unlock_irqrestore(&drv_data->lock, flags);
1039
1040 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1041
1042 return 0;
1043}
1044
1045static int stop_queue(struct driver_data *drv_data)
1046{
1047 unsigned long flags;
1048 unsigned limit = 500;
1049 int status = 0;
1050
1051 spin_lock_irqsave(&drv_data->lock, flags);
1052
1053 /* This is a bit lame, but is optimized for the common execution path.
1054 * A wait_queue on the drv_data->busy could be used, but then the common
1055 * execution path (pump_messages) would be required to call wake_up or
1056 * friends on every SPI message. Do this instead */
1057 drv_data->run = QUEUE_STOPPED;
1058 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1059 spin_unlock_irqrestore(&drv_data->lock, flags);
1060 msleep(10);
1061 spin_lock_irqsave(&drv_data->lock, flags);
1062 }
1063
1064 if (!list_empty(&drv_data->queue) || drv_data->busy)
1065 status = -EBUSY;
1066
1067 spin_unlock_irqrestore(&drv_data->lock, flags);
1068
1069 return status;
1070}
1071
1072static int destroy_queue(struct driver_data *drv_data)
1073{
1074 int status;
1075
1076 status = stop_queue(drv_data);
1077 if (status != 0)
1078 return status;
1079
1080 destroy_workqueue(drv_data->workqueue);
1081
1082 return 0;
1083}
1084
1085static int pxa2xx_spi_probe(struct platform_device *pdev)
1086{
1087 struct device *dev = &pdev->dev;
1088 struct pxa2xx_spi_master *platform_info;
1089 struct spi_master *master;
1090 struct driver_data *drv_data = 0;
1091 struct resource *memory_resource;
1092 int irq;
1093 int status = 0;
1094
1095 platform_info = dev->platform_data;
1096
1097 if (platform_info->ssp_type == SSP_UNDEFINED) {
1098 dev_err(&pdev->dev, "undefined SSP\n");
1099 return -ENODEV;
1100 }
1101
1102 /* Allocate master with space for drv_data and null dma buffer */
1103 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1104 if (!master) {
1105 dev_err(&pdev->dev, "can not alloc spi_master\n");
1106 return -ENOMEM;
1107 }
1108 drv_data = spi_master_get_devdata(master);
1109 drv_data->master = master;
1110 drv_data->master_info = platform_info;
1111 drv_data->pdev = pdev;
1112
1113 master->bus_num = pdev->id;
1114 master->num_chipselect = platform_info->num_chipselect;
1115 master->cleanup = cleanup;
1116 master->setup = setup;
1117 master->transfer = transfer;
1118
1119 drv_data->ssp_type = platform_info->ssp_type;
1120 drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1121 sizeof(struct driver_data)), 8);
1122
1123 /* Setup register addresses */
1124 memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1125 if (!memory_resource) {
1126 dev_err(&pdev->dev, "memory resources not defined\n");
1127 status = -ENODEV;
1128 goto out_error_master_alloc;
1129 }
1130
1131 drv_data->ioaddr = (void *)io_p2v(memory_resource->start);
1132 drv_data->ssdr_physical = memory_resource->start + 0x00000010;
1133 if (platform_info->ssp_type == PXA25x_SSP) {
1134 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1135 drv_data->dma_cr1 = 0;
1136 drv_data->clear_sr = SSSR_ROR;
1137 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1138 } else {
1139 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1140 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1141 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1142 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1143 }
1144
1145 /* Attach to IRQ */
1146 irq = platform_get_irq(pdev, 0);
1147 if (irq < 0) {
1148 dev_err(&pdev->dev, "irq resource not defined\n");
1149 status = -ENODEV;
1150 goto out_error_master_alloc;
1151 }
1152
1153 status = request_irq(irq, ssp_int, SA_INTERRUPT, dev->bus_id, drv_data);
1154 if (status < 0) {
1155 dev_err(&pdev->dev, "can not get IRQ\n");
1156 goto out_error_master_alloc;
1157 }
1158
1159 /* Setup DMA if requested */
1160 drv_data->tx_channel = -1;
1161 drv_data->rx_channel = -1;
1162 if (platform_info->enable_dma) {
1163
1164 /* Get two DMA channels (rx and tx) */
1165 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1166 DMA_PRIO_HIGH,
1167 dma_handler,
1168 drv_data);
1169 if (drv_data->rx_channel < 0) {
1170 dev_err(dev, "problem (%d) requesting rx channel\n",
1171 drv_data->rx_channel);
1172 status = -ENODEV;
1173 goto out_error_irq_alloc;
1174 }
1175 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1176 DMA_PRIO_MEDIUM,
1177 dma_handler,
1178 drv_data);
1179 if (drv_data->tx_channel < 0) {
1180 dev_err(dev, "problem (%d) requesting tx channel\n",
1181 drv_data->tx_channel);
1182 status = -ENODEV;
1183 goto out_error_dma_alloc;
1184 }
1185
1186 if (drv_data->ioaddr == SSP1_VIRT) {
1187 DRCMRRXSSDR = DRCMR_MAPVLD
1188 | drv_data->rx_channel;
1189 DRCMRTXSSDR = DRCMR_MAPVLD
1190 | drv_data->tx_channel;
1191 } else if (drv_data->ioaddr == SSP2_VIRT) {
1192 DRCMRRXSS2DR = DRCMR_MAPVLD
1193 | drv_data->rx_channel;
1194 DRCMRTXSS2DR = DRCMR_MAPVLD
1195 | drv_data->tx_channel;
1196 } else if (drv_data->ioaddr == SSP3_VIRT) {
1197 DRCMRRXSS3DR = DRCMR_MAPVLD
1198 | drv_data->rx_channel;
1199 DRCMRTXSS3DR = DRCMR_MAPVLD
1200 | drv_data->tx_channel;
1201 } else {
1202 dev_err(dev, "bad SSP type\n");
1203 goto out_error_dma_alloc;
1204 }
1205 }
1206
1207 /* Enable SOC clock */
1208 pxa_set_cken(platform_info->clock_enable, 1);
1209
1210 /* Load default SSP configuration */
1211 write_SSCR0(0, drv_data->ioaddr);
1212 write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
1213 write_SSCR0(SSCR0_SerClkDiv(2)
1214 | SSCR0_Motorola
1215 | SSCR0_DataSize(8),
1216 drv_data->ioaddr);
1217 if (drv_data->ssp_type != PXA25x_SSP)
1218 write_SSTO(0, drv_data->ioaddr);
1219 write_SSPSP(0, drv_data->ioaddr);
1220
1221 /* Initial and start queue */
1222 status = init_queue(drv_data);
1223 if (status != 0) {
1224 dev_err(&pdev->dev, "problem initializing queue\n");
1225 goto out_error_clock_enabled;
1226 }
1227 status = start_queue(drv_data);
1228 if (status != 0) {
1229 dev_err(&pdev->dev, "problem starting queue\n");
1230 goto out_error_clock_enabled;
1231 }
1232
1233 /* Register with the SPI framework */
1234 platform_set_drvdata(pdev, drv_data);
1235 status = spi_register_master(master);
1236 if (status != 0) {
1237 dev_err(&pdev->dev, "problem registering spi master\n");
1238 goto out_error_queue_alloc;
1239 }
1240
1241 return status;
1242
1243out_error_queue_alloc:
1244 destroy_queue(drv_data);
1245
1246out_error_clock_enabled:
1247 pxa_set_cken(platform_info->clock_enable, 0);
1248
1249out_error_dma_alloc:
1250 if (drv_data->tx_channel != -1)
1251 pxa_free_dma(drv_data->tx_channel);
1252 if (drv_data->rx_channel != -1)
1253 pxa_free_dma(drv_data->rx_channel);
1254
1255out_error_irq_alloc:
1256 free_irq(irq, drv_data);
1257
1258out_error_master_alloc:
1259 spi_master_put(master);
1260 return status;
1261}
1262
1263static int pxa2xx_spi_remove(struct platform_device *pdev)
1264{
1265 struct driver_data *drv_data = platform_get_drvdata(pdev);
1266 int irq;
1267 int status = 0;
1268
1269 if (!drv_data)
1270 return 0;
1271
1272 /* Remove the queue */
1273 status = destroy_queue(drv_data);
1274 if (status != 0)
1275 return status;
1276
1277 /* Disable the SSP at the peripheral and SOC level */
1278 write_SSCR0(0, drv_data->ioaddr);
1279 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1280
1281 /* Release DMA */
1282 if (drv_data->master_info->enable_dma) {
1283 if (drv_data->ioaddr == SSP1_VIRT) {
1284 DRCMRRXSSDR = 0;
1285 DRCMRTXSSDR = 0;
1286 } else if (drv_data->ioaddr == SSP2_VIRT) {
1287 DRCMRRXSS2DR = 0;
1288 DRCMRTXSS2DR = 0;
1289 } else if (drv_data->ioaddr == SSP3_VIRT) {
1290 DRCMRRXSS3DR = 0;
1291 DRCMRTXSS3DR = 0;
1292 }
1293 pxa_free_dma(drv_data->tx_channel);
1294 pxa_free_dma(drv_data->rx_channel);
1295 }
1296
1297 /* Release IRQ */
1298 irq = platform_get_irq(pdev, 0);
1299 if (irq >= 0)
1300 free_irq(irq, drv_data);
1301
1302 /* Disconnect from the SPI framework */
1303 spi_unregister_master(drv_data->master);
1304
1305 /* Prevent double remove */
1306 platform_set_drvdata(pdev, NULL);
1307
1308 return 0;
1309}
1310
1311static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1312{
1313 int status = 0;
1314
1315 if ((status = pxa2xx_spi_remove(pdev)) != 0)
1316 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1317}
1318
1319#ifdef CONFIG_PM
1320static int suspend_devices(struct device *dev, void *pm_message)
1321{
1322 pm_message_t *state = pm_message;
1323
1324 if (dev->power.power_state.event != state->event) {
1325 dev_warn(dev, "pm state does not match request\n");
1326 return -1;
1327 }
1328
1329 return 0;
1330}
1331
1332static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1333{
1334 struct driver_data *drv_data = platform_get_drvdata(pdev);
1335 int status = 0;
1336
1337 /* Check all childern for current power state */
1338 if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
1339 dev_warn(&pdev->dev, "suspend aborted\n");
1340 return -1;
1341 }
1342
1343 status = stop_queue(drv_data);
1344 if (status != 0)
1345 return status;
1346 write_SSCR0(0, drv_data->ioaddr);
1347 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1348
1349 return 0;
1350}
1351
1352static int pxa2xx_spi_resume(struct platform_device *pdev)
1353{
1354 struct driver_data *drv_data = platform_get_drvdata(pdev);
1355 int status = 0;
1356
1357 /* Enable the SSP clock */
1358 pxa_set_cken(drv_data->master_info->clock_enable, 1);
1359
1360 /* Start the queue running */
1361 status = start_queue(drv_data);
1362 if (status != 0) {
1363 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1364 return status;
1365 }
1366
1367 return 0;
1368}
1369#else
1370#define pxa2xx_spi_suspend NULL
1371#define pxa2xx_spi_resume NULL
1372#endif /* CONFIG_PM */
1373
1374static struct platform_driver driver = {
1375 .driver = {
1376 .name = "pxa2xx-spi",
1377 .bus = &platform_bus_type,
1378 .owner = THIS_MODULE,
1379 },
1380 .probe = pxa2xx_spi_probe,
1381 .remove = __devexit_p(pxa2xx_spi_remove),
1382 .shutdown = pxa2xx_spi_shutdown,
1383 .suspend = pxa2xx_spi_suspend,
1384 .resume = pxa2xx_spi_resume,
1385};
1386
1387static int __init pxa2xx_spi_init(void)
1388{
1389 platform_driver_register(&driver);
1390
1391 return 0;
1392}
1393module_init(pxa2xx_spi_init);
1394
1395static void __exit pxa2xx_spi_exit(void)
1396{
1397 platform_driver_unregister(&driver);
1398}
1399module_exit(pxa2xx_spi_exit);