powerpc/pseries: Rename RAS_VECTOR_OFFSET to RTAS_VECTOR_EXTERNAL_INTERRUPT and move...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / spi / amba-pl022.c
1 /*
2 * drivers/spi/amba-pl022.c
3 *
4 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
5 *
6 * Copyright (C) 2008-2009 ST-Ericsson AB
7 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
8 *
9 * Author: Linus Walleij <linus.walleij@stericsson.com>
10 *
11 * Initial version inspired by:
12 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
13 * Initial adoption to PL022 by:
14 * Sachin Verma <sachin.verma@st.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 */
26
27 /*
28 * TODO:
29 * - add timeout on polled transfers
30 * - add generic DMA framework support
31 */
32
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/device.h>
36 #include <linux/ioport.h>
37 #include <linux/errno.h>
38 #include <linux/interrupt.h>
39 #include <linux/spi/spi.h>
40 #include <linux/workqueue.h>
41 #include <linux/delay.h>
42 #include <linux/clk.h>
43 #include <linux/err.h>
44 #include <linux/amba/bus.h>
45 #include <linux/amba/pl022.h>
46 #include <linux/io.h>
47 #include <linux/slab.h>
48
49 /*
50 * This macro is used to define some register default values.
51 * reg is masked with mask, the OR:ed with an (again masked)
52 * val shifted sb steps to the left.
53 */
54 #define SSP_WRITE_BITS(reg, val, mask, sb) \
55 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
56
57 /*
58 * This macro is also used to define some default values.
59 * It will just shift val by sb steps to the left and mask
60 * the result with mask.
61 */
62 #define GEN_MASK_BITS(val, mask, sb) \
63 (((val)<<(sb)) & (mask))
64
65 #define DRIVE_TX 0
66 #define DO_NOT_DRIVE_TX 1
67
68 #define DO_NOT_QUEUE_DMA 0
69 #define QUEUE_DMA 1
70
71 #define RX_TRANSFER 1
72 #define TX_TRANSFER 2
73
74 /*
75 * Macros to access SSP Registers with their offsets
76 */
77 #define SSP_CR0(r) (r + 0x000)
78 #define SSP_CR1(r) (r + 0x004)
79 #define SSP_DR(r) (r + 0x008)
80 #define SSP_SR(r) (r + 0x00C)
81 #define SSP_CPSR(r) (r + 0x010)
82 #define SSP_IMSC(r) (r + 0x014)
83 #define SSP_RIS(r) (r + 0x018)
84 #define SSP_MIS(r) (r + 0x01C)
85 #define SSP_ICR(r) (r + 0x020)
86 #define SSP_DMACR(r) (r + 0x024)
87 #define SSP_ITCR(r) (r + 0x080)
88 #define SSP_ITIP(r) (r + 0x084)
89 #define SSP_ITOP(r) (r + 0x088)
90 #define SSP_TDR(r) (r + 0x08C)
91
92 #define SSP_PID0(r) (r + 0xFE0)
93 #define SSP_PID1(r) (r + 0xFE4)
94 #define SSP_PID2(r) (r + 0xFE8)
95 #define SSP_PID3(r) (r + 0xFEC)
96
97 #define SSP_CID0(r) (r + 0xFF0)
98 #define SSP_CID1(r) (r + 0xFF4)
99 #define SSP_CID2(r) (r + 0xFF8)
100 #define SSP_CID3(r) (r + 0xFFC)
101
102 /*
103 * SSP Control Register 0 - SSP_CR0
104 */
105 #define SSP_CR0_MASK_DSS (0x0FUL << 0)
106 #define SSP_CR0_MASK_FRF (0x3UL << 4)
107 #define SSP_CR0_MASK_SPO (0x1UL << 6)
108 #define SSP_CR0_MASK_SPH (0x1UL << 7)
109 #define SSP_CR0_MASK_SCR (0xFFUL << 8)
110
111 /*
112 * The ST version of this block moves som bits
113 * in SSP_CR0 and extends it to 32 bits
114 */
115 #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
116 #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
117 #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
118 #define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
119
120
121 /*
122 * SSP Control Register 0 - SSP_CR1
123 */
124 #define SSP_CR1_MASK_LBM (0x1UL << 0)
125 #define SSP_CR1_MASK_SSE (0x1UL << 1)
126 #define SSP_CR1_MASK_MS (0x1UL << 2)
127 #define SSP_CR1_MASK_SOD (0x1UL << 3)
128
129 /*
130 * The ST version of this block adds some bits
131 * in SSP_CR1
132 */
133 #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
134 #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
135 #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
136 #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
137 #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
138 /* This one is only in the PL023 variant */
139 #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
140
141 /*
142 * SSP Status Register - SSP_SR
143 */
144 #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
145 #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
146 #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
147 #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
148 #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
149
150 /*
151 * SSP Clock Prescale Register - SSP_CPSR
152 */
153 #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
154
155 /*
156 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
157 */
158 #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
159 #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
160 #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
161 #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
162
163 /*
164 * SSP Raw Interrupt Status Register - SSP_RIS
165 */
166 /* Receive Overrun Raw Interrupt status */
167 #define SSP_RIS_MASK_RORRIS (0x1UL << 0)
168 /* Receive Timeout Raw Interrupt status */
169 #define SSP_RIS_MASK_RTRIS (0x1UL << 1)
170 /* Receive FIFO Raw Interrupt status */
171 #define SSP_RIS_MASK_RXRIS (0x1UL << 2)
172 /* Transmit FIFO Raw Interrupt status */
173 #define SSP_RIS_MASK_TXRIS (0x1UL << 3)
174
175 /*
176 * SSP Masked Interrupt Status Register - SSP_MIS
177 */
178 /* Receive Overrun Masked Interrupt status */
179 #define SSP_MIS_MASK_RORMIS (0x1UL << 0)
180 /* Receive Timeout Masked Interrupt status */
181 #define SSP_MIS_MASK_RTMIS (0x1UL << 1)
182 /* Receive FIFO Masked Interrupt status */
183 #define SSP_MIS_MASK_RXMIS (0x1UL << 2)
184 /* Transmit FIFO Masked Interrupt status */
185 #define SSP_MIS_MASK_TXMIS (0x1UL << 3)
186
187 /*
188 * SSP Interrupt Clear Register - SSP_ICR
189 */
190 /* Receive Overrun Raw Clear Interrupt bit */
191 #define SSP_ICR_MASK_RORIC (0x1UL << 0)
192 /* Receive Timeout Clear Interrupt bit */
193 #define SSP_ICR_MASK_RTIC (0x1UL << 1)
194
195 /*
196 * SSP DMA Control Register - SSP_DMACR
197 */
198 /* Receive DMA Enable bit */
199 #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
200 /* Transmit DMA Enable bit */
201 #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
202
203 /*
204 * SSP Integration Test control Register - SSP_ITCR
205 */
206 #define SSP_ITCR_MASK_ITEN (0x1UL << 0)
207 #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
208
209 /*
210 * SSP Integration Test Input Register - SSP_ITIP
211 */
212 #define ITIP_MASK_SSPRXD (0x1UL << 0)
213 #define ITIP_MASK_SSPFSSIN (0x1UL << 1)
214 #define ITIP_MASK_SSPCLKIN (0x1UL << 2)
215 #define ITIP_MASK_RXDMAC (0x1UL << 3)
216 #define ITIP_MASK_TXDMAC (0x1UL << 4)
217 #define ITIP_MASK_SSPTXDIN (0x1UL << 5)
218
219 /*
220 * SSP Integration Test output Register - SSP_ITOP
221 */
222 #define ITOP_MASK_SSPTXD (0x1UL << 0)
223 #define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
224 #define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
225 #define ITOP_MASK_SSPOEn (0x1UL << 3)
226 #define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
227 #define ITOP_MASK_RORINTR (0x1UL << 5)
228 #define ITOP_MASK_RTINTR (0x1UL << 6)
229 #define ITOP_MASK_RXINTR (0x1UL << 7)
230 #define ITOP_MASK_TXINTR (0x1UL << 8)
231 #define ITOP_MASK_INTR (0x1UL << 9)
232 #define ITOP_MASK_RXDMABREQ (0x1UL << 10)
233 #define ITOP_MASK_RXDMASREQ (0x1UL << 11)
234 #define ITOP_MASK_TXDMABREQ (0x1UL << 12)
235 #define ITOP_MASK_TXDMASREQ (0x1UL << 13)
236
237 /*
238 * SSP Test Data Register - SSP_TDR
239 */
240 #define TDR_MASK_TESTDATA (0xFFFFFFFF)
241
242 /*
243 * Message State
244 * we use the spi_message.state (void *) pointer to
245 * hold a single state value, that's why all this
246 * (void *) casting is done here.
247 */
248 #define STATE_START ((void *) 0)
249 #define STATE_RUNNING ((void *) 1)
250 #define STATE_DONE ((void *) 2)
251 #define STATE_ERROR ((void *) -1)
252
253 /*
254 * Queue State
255 */
256 #define QUEUE_RUNNING (0)
257 #define QUEUE_STOPPED (1)
258 /*
259 * SSP State - Whether Enabled or Disabled
260 */
261 #define SSP_DISABLED (0)
262 #define SSP_ENABLED (1)
263
264 /*
265 * SSP DMA State - Whether DMA Enabled or Disabled
266 */
267 #define SSP_DMA_DISABLED (0)
268 #define SSP_DMA_ENABLED (1)
269
270 /*
271 * SSP Clock Defaults
272 */
273 #define SSP_DEFAULT_CLKRATE 0x2
274 #define SSP_DEFAULT_PRESCALE 0x40
275
276 /*
277 * SSP Clock Parameter ranges
278 */
279 #define CPSDVR_MIN 0x02
280 #define CPSDVR_MAX 0xFE
281 #define SCR_MIN 0x00
282 #define SCR_MAX 0xFF
283
284 /*
285 * SSP Interrupt related Macros
286 */
287 #define DEFAULT_SSP_REG_IMSC 0x0UL
288 #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
289 #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
290
291 #define CLEAR_ALL_INTERRUPTS 0x3
292
293
294 /*
295 * The type of reading going on on this chip
296 */
297 enum ssp_reading {
298 READING_NULL,
299 READING_U8,
300 READING_U16,
301 READING_U32
302 };
303
304 /**
305 * The type of writing going on on this chip
306 */
307 enum ssp_writing {
308 WRITING_NULL,
309 WRITING_U8,
310 WRITING_U16,
311 WRITING_U32
312 };
313
314 /**
315 * struct vendor_data - vendor-specific config parameters
316 * for PL022 derivates
317 * @fifodepth: depth of FIFOs (both)
318 * @max_bpw: maximum number of bits per word
319 * @unidir: supports unidirection transfers
320 * @extended_cr: 32 bit wide control register 0 with extra
321 * features and extra features in CR1 as found in the ST variants
322 * @pl023: supports a subset of the ST extensions called "PL023"
323 */
324 struct vendor_data {
325 int fifodepth;
326 int max_bpw;
327 bool unidir;
328 bool extended_cr;
329 bool pl023;
330 };
331
332 /**
333 * struct pl022 - This is the private SSP driver data structure
334 * @adev: AMBA device model hookup
335 * @vendor: Vendor data for the IP block
336 * @phybase: The physical memory where the SSP device resides
337 * @virtbase: The virtual memory where the SSP is mapped
338 * @master: SPI framework hookup
339 * @master_info: controller-specific data from machine setup
340 * @regs: SSP controller register's virtual address
341 * @pump_messages: Work struct for scheduling work to the workqueue
342 * @lock: spinlock to syncronise access to driver data
343 * @workqueue: a workqueue on which any spi_message request is queued
344 * @busy: workqueue is busy
345 * @run: workqueue is running
346 * @pump_transfers: Tasklet used in Interrupt Transfer mode
347 * @cur_msg: Pointer to current spi_message being processed
348 * @cur_transfer: Pointer to current spi_transfer
349 * @cur_chip: pointer to current clients chip(assigned from controller_state)
350 * @tx: current position in TX buffer to be read
351 * @tx_end: end position in TX buffer to be read
352 * @rx: current position in RX buffer to be written
353 * @rx_end: end position in RX buffer to be written
354 * @readingtype: the type of read currently going on
355 * @writingtype: the type or write currently going on
356 */
357 struct pl022 {
358 struct amba_device *adev;
359 struct vendor_data *vendor;
360 resource_size_t phybase;
361 void __iomem *virtbase;
362 struct clk *clk;
363 struct spi_master *master;
364 struct pl022_ssp_controller *master_info;
365 /* Driver message queue */
366 struct workqueue_struct *workqueue;
367 struct work_struct pump_messages;
368 spinlock_t queue_lock;
369 struct list_head queue;
370 int busy;
371 int run;
372 /* Message transfer pump */
373 struct tasklet_struct pump_transfers;
374 struct spi_message *cur_msg;
375 struct spi_transfer *cur_transfer;
376 struct chip_data *cur_chip;
377 void *tx;
378 void *tx_end;
379 void *rx;
380 void *rx_end;
381 enum ssp_reading read;
382 enum ssp_writing write;
383 u32 exp_fifo_level;
384 };
385
386 /**
387 * struct chip_data - To maintain runtime state of SSP for each client chip
388 * @cr0: Value of control register CR0 of SSP - on later ST variants this
389 * register is 32 bits wide rather than just 16
390 * @cr1: Value of control register CR1 of SSP
391 * @dmacr: Value of DMA control Register of SSP
392 * @cpsr: Value of Clock prescale register
393 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
394 * @enable_dma: Whether to enable DMA or not
395 * @write: function ptr to be used to write when doing xfer for this chip
396 * @read: function ptr to be used to read when doing xfer for this chip
397 * @cs_control: chip select callback provided by chip
398 * @xfer_type: polling/interrupt/DMA
399 *
400 * Runtime state of the SSP controller, maintained per chip,
401 * This would be set according to the current message that would be served
402 */
403 struct chip_data {
404 u32 cr0;
405 u16 cr1;
406 u16 dmacr;
407 u16 cpsr;
408 u8 n_bytes;
409 u8 enable_dma:1;
410 enum ssp_reading read;
411 enum ssp_writing write;
412 void (*cs_control) (u32 command);
413 int xfer_type;
414 };
415
416 /**
417 * null_cs_control - Dummy chip select function
418 * @command: select/delect the chip
419 *
420 * If no chip select function is provided by client this is used as dummy
421 * chip select
422 */
423 static void null_cs_control(u32 command)
424 {
425 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
426 }
427
428 /**
429 * giveback - current spi_message is over, schedule next message and call
430 * callback of this message. Assumes that caller already
431 * set message->status; dma and pio irqs are blocked
432 * @pl022: SSP driver private data structure
433 */
434 static void giveback(struct pl022 *pl022)
435 {
436 struct spi_transfer *last_transfer;
437 unsigned long flags;
438 struct spi_message *msg;
439 void (*curr_cs_control) (u32 command);
440
441 /*
442 * This local reference to the chip select function
443 * is needed because we set curr_chip to NULL
444 * as a step toward termininating the message.
445 */
446 curr_cs_control = pl022->cur_chip->cs_control;
447 spin_lock_irqsave(&pl022->queue_lock, flags);
448 msg = pl022->cur_msg;
449 pl022->cur_msg = NULL;
450 pl022->cur_transfer = NULL;
451 pl022->cur_chip = NULL;
452 queue_work(pl022->workqueue, &pl022->pump_messages);
453 spin_unlock_irqrestore(&pl022->queue_lock, flags);
454
455 last_transfer = list_entry(msg->transfers.prev,
456 struct spi_transfer,
457 transfer_list);
458
459 /* Delay if requested before any change in chip select */
460 if (last_transfer->delay_usecs)
461 /*
462 * FIXME: This runs in interrupt context.
463 * Is this really smart?
464 */
465 udelay(last_transfer->delay_usecs);
466
467 /*
468 * Drop chip select UNLESS cs_change is true or we are returning
469 * a message with an error, or next message is for another chip
470 */
471 if (!last_transfer->cs_change)
472 curr_cs_control(SSP_CHIP_DESELECT);
473 else {
474 struct spi_message *next_msg;
475
476 /* Holding of cs was hinted, but we need to make sure
477 * the next message is for the same chip. Don't waste
478 * time with the following tests unless this was hinted.
479 *
480 * We cannot postpone this until pump_messages, because
481 * after calling msg->complete (below) the driver that
482 * sent the current message could be unloaded, which
483 * could invalidate the cs_control() callback...
484 */
485
486 /* get a pointer to the next message, if any */
487 spin_lock_irqsave(&pl022->queue_lock, flags);
488 if (list_empty(&pl022->queue))
489 next_msg = NULL;
490 else
491 next_msg = list_entry(pl022->queue.next,
492 struct spi_message, queue);
493 spin_unlock_irqrestore(&pl022->queue_lock, flags);
494
495 /* see if the next and current messages point
496 * to the same chip
497 */
498 if (next_msg && next_msg->spi != msg->spi)
499 next_msg = NULL;
500 if (!next_msg || msg->state == STATE_ERROR)
501 curr_cs_control(SSP_CHIP_DESELECT);
502 }
503 msg->state = NULL;
504 if (msg->complete)
505 msg->complete(msg->context);
506 /* This message is completed, so let's turn off the clock! */
507 clk_disable(pl022->clk);
508 }
509
510 /**
511 * flush - flush the FIFO to reach a clean state
512 * @pl022: SSP driver private data structure
513 */
514 static int flush(struct pl022 *pl022)
515 {
516 unsigned long limit = loops_per_jiffy << 1;
517
518 dev_dbg(&pl022->adev->dev, "flush\n");
519 do {
520 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
521 readw(SSP_DR(pl022->virtbase));
522 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
523
524 pl022->exp_fifo_level = 0;
525
526 return limit;
527 }
528
529 /**
530 * restore_state - Load configuration of current chip
531 * @pl022: SSP driver private data structure
532 */
533 static void restore_state(struct pl022 *pl022)
534 {
535 struct chip_data *chip = pl022->cur_chip;
536
537 if (pl022->vendor->extended_cr)
538 writel(chip->cr0, SSP_CR0(pl022->virtbase));
539 else
540 writew(chip->cr0, SSP_CR0(pl022->virtbase));
541 writew(chip->cr1, SSP_CR1(pl022->virtbase));
542 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
543 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
544 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
545 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
546 }
547
548 /*
549 * Default SSP Register Values
550 */
551 #define DEFAULT_SSP_REG_CR0 ( \
552 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
553 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
554 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
555 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
556 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
557 )
558
559 /* ST versions have slightly different bit layout */
560 #define DEFAULT_SSP_REG_CR0_ST ( \
561 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
562 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
563 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
564 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
565 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
566 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
567 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
568 )
569
570 /* The PL023 version is slightly different again */
571 #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
572 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
573 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
574 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
575 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
576 )
577
578 #define DEFAULT_SSP_REG_CR1 ( \
579 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
580 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
581 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
582 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
583 )
584
585 /* ST versions extend this register to use all 16 bits */
586 #define DEFAULT_SSP_REG_CR1_ST ( \
587 DEFAULT_SSP_REG_CR1 | \
588 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
589 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
590 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
591 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
592 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
593 )
594
595 /*
596 * The PL023 variant has further differences: no loopback mode, no microwire
597 * support, and a new clock feedback delay setting.
598 */
599 #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
600 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
601 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
602 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
603 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
604 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
605 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
606 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
607 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
608 )
609
610 #define DEFAULT_SSP_REG_CPSR ( \
611 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
612 )
613
614 #define DEFAULT_SSP_REG_DMACR (\
615 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
616 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
617 )
618
619 /**
620 * load_ssp_default_config - Load default configuration for SSP
621 * @pl022: SSP driver private data structure
622 */
623 static void load_ssp_default_config(struct pl022 *pl022)
624 {
625 if (pl022->vendor->pl023) {
626 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
627 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
628 } else if (pl022->vendor->extended_cr) {
629 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
630 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
631 } else {
632 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
633 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
634 }
635 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
636 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
637 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
638 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
639 }
640
641 /**
642 * This will write to TX and read from RX according to the parameters
643 * set in pl022.
644 */
645 static void readwriter(struct pl022 *pl022)
646 {
647
648 /*
649 * The FIFO depth is different inbetween primecell variants.
650 * I believe filling in too much in the FIFO might cause
651 * errons in 8bit wide transfers on ARM variants (just 8 words
652 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
653 *
654 * To prevent this issue, the TX FIFO is only filled to the
655 * unused RX FIFO fill length, regardless of what the TX
656 * FIFO status flag indicates.
657 */
658 dev_dbg(&pl022->adev->dev,
659 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
660 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
661
662 /* Read as much as you can */
663 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
664 && (pl022->rx < pl022->rx_end)) {
665 switch (pl022->read) {
666 case READING_NULL:
667 readw(SSP_DR(pl022->virtbase));
668 break;
669 case READING_U8:
670 *(u8 *) (pl022->rx) =
671 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
672 break;
673 case READING_U16:
674 *(u16 *) (pl022->rx) =
675 (u16) readw(SSP_DR(pl022->virtbase));
676 break;
677 case READING_U32:
678 *(u32 *) (pl022->rx) =
679 readl(SSP_DR(pl022->virtbase));
680 break;
681 }
682 pl022->rx += (pl022->cur_chip->n_bytes);
683 pl022->exp_fifo_level--;
684 }
685 /*
686 * Write as much as possible up to the RX FIFO size
687 */
688 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
689 && (pl022->tx < pl022->tx_end)) {
690 switch (pl022->write) {
691 case WRITING_NULL:
692 writew(0x0, SSP_DR(pl022->virtbase));
693 break;
694 case WRITING_U8:
695 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
696 break;
697 case WRITING_U16:
698 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
699 break;
700 case WRITING_U32:
701 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
702 break;
703 }
704 pl022->tx += (pl022->cur_chip->n_bytes);
705 pl022->exp_fifo_level++;
706 /*
707 * This inner reader takes care of things appearing in the RX
708 * FIFO as we're transmitting. This will happen a lot since the
709 * clock starts running when you put things into the TX FIFO,
710 * and then things are continously clocked into the RX FIFO.
711 */
712 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
713 && (pl022->rx < pl022->rx_end)) {
714 switch (pl022->read) {
715 case READING_NULL:
716 readw(SSP_DR(pl022->virtbase));
717 break;
718 case READING_U8:
719 *(u8 *) (pl022->rx) =
720 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
721 break;
722 case READING_U16:
723 *(u16 *) (pl022->rx) =
724 (u16) readw(SSP_DR(pl022->virtbase));
725 break;
726 case READING_U32:
727 *(u32 *) (pl022->rx) =
728 readl(SSP_DR(pl022->virtbase));
729 break;
730 }
731 pl022->rx += (pl022->cur_chip->n_bytes);
732 pl022->exp_fifo_level--;
733 }
734 }
735 /*
736 * When we exit here the TX FIFO should be full and the RX FIFO
737 * should be empty
738 */
739 }
740
741
742 /**
743 * next_transfer - Move to the Next transfer in the current spi message
744 * @pl022: SSP driver private data structure
745 *
746 * This function moves though the linked list of spi transfers in the
747 * current spi message and returns with the state of current spi
748 * message i.e whether its last transfer is done(STATE_DONE) or
749 * Next transfer is ready(STATE_RUNNING)
750 */
751 static void *next_transfer(struct pl022 *pl022)
752 {
753 struct spi_message *msg = pl022->cur_msg;
754 struct spi_transfer *trans = pl022->cur_transfer;
755
756 /* Move to next transfer */
757 if (trans->transfer_list.next != &msg->transfers) {
758 pl022->cur_transfer =
759 list_entry(trans->transfer_list.next,
760 struct spi_transfer, transfer_list);
761 return STATE_RUNNING;
762 }
763 return STATE_DONE;
764 }
765 /**
766 * pl022_interrupt_handler - Interrupt handler for SSP controller
767 *
768 * This function handles interrupts generated for an interrupt based transfer.
769 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
770 * current message's state as STATE_ERROR and schedule the tasklet
771 * pump_transfers which will do the postprocessing of the current message by
772 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
773 * more data, and writes data in TX FIFO till it is not full. If we complete
774 * the transfer we move to the next transfer and schedule the tasklet.
775 */
776 static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
777 {
778 struct pl022 *pl022 = dev_id;
779 struct spi_message *msg = pl022->cur_msg;
780 u16 irq_status = 0;
781 u16 flag = 0;
782
783 if (unlikely(!msg)) {
784 dev_err(&pl022->adev->dev,
785 "bad message state in interrupt handler");
786 /* Never fail */
787 return IRQ_HANDLED;
788 }
789
790 /* Read the Interrupt Status Register */
791 irq_status = readw(SSP_MIS(pl022->virtbase));
792
793 if (unlikely(!irq_status))
794 return IRQ_NONE;
795
796 /* This handles the error code interrupts */
797 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
798 /*
799 * Overrun interrupt - bail out since our Data has been
800 * corrupted
801 */
802 dev_err(&pl022->adev->dev,
803 "FIFO overrun\n");
804 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
805 dev_err(&pl022->adev->dev,
806 "RXFIFO is full\n");
807 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
808 dev_err(&pl022->adev->dev,
809 "TXFIFO is full\n");
810
811 /*
812 * Disable and clear interrupts, disable SSP,
813 * mark message with bad status so it can be
814 * retried.
815 */
816 writew(DISABLE_ALL_INTERRUPTS,
817 SSP_IMSC(pl022->virtbase));
818 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
819 writew((readw(SSP_CR1(pl022->virtbase)) &
820 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
821 msg->state = STATE_ERROR;
822
823 /* Schedule message queue handler */
824 tasklet_schedule(&pl022->pump_transfers);
825 return IRQ_HANDLED;
826 }
827
828 readwriter(pl022);
829
830 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
831 flag = 1;
832 /* Disable Transmit interrupt */
833 writew(readw(SSP_IMSC(pl022->virtbase)) &
834 (~SSP_IMSC_MASK_TXIM),
835 SSP_IMSC(pl022->virtbase));
836 }
837
838 /*
839 * Since all transactions must write as much as shall be read,
840 * we can conclude the entire transaction once RX is complete.
841 * At this point, all TX will always be finished.
842 */
843 if (pl022->rx >= pl022->rx_end) {
844 writew(DISABLE_ALL_INTERRUPTS,
845 SSP_IMSC(pl022->virtbase));
846 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
847 if (unlikely(pl022->rx > pl022->rx_end)) {
848 dev_warn(&pl022->adev->dev, "read %u surplus "
849 "bytes (did you request an odd "
850 "number of bytes on a 16bit bus?)\n",
851 (u32) (pl022->rx - pl022->rx_end));
852 }
853 /* Update total bytes transfered */
854 msg->actual_length += pl022->cur_transfer->len;
855 if (pl022->cur_transfer->cs_change)
856 pl022->cur_chip->
857 cs_control(SSP_CHIP_DESELECT);
858 /* Move to next transfer */
859 msg->state = next_transfer(pl022);
860 tasklet_schedule(&pl022->pump_transfers);
861 return IRQ_HANDLED;
862 }
863
864 return IRQ_HANDLED;
865 }
866
867 /**
868 * This sets up the pointers to memory for the next message to
869 * send out on the SPI bus.
870 */
871 static int set_up_next_transfer(struct pl022 *pl022,
872 struct spi_transfer *transfer)
873 {
874 int residue;
875
876 /* Sanity check the message for this bus width */
877 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
878 if (unlikely(residue != 0)) {
879 dev_err(&pl022->adev->dev,
880 "message of %u bytes to transmit but the current "
881 "chip bus has a data width of %u bytes!\n",
882 pl022->cur_transfer->len,
883 pl022->cur_chip->n_bytes);
884 dev_err(&pl022->adev->dev, "skipping this message\n");
885 return -EIO;
886 }
887 pl022->tx = (void *)transfer->tx_buf;
888 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
889 pl022->rx = (void *)transfer->rx_buf;
890 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
891 pl022->write =
892 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
893 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
894 return 0;
895 }
896
897 /**
898 * pump_transfers - Tasklet function which schedules next interrupt transfer
899 * when running in interrupt transfer mode.
900 * @data: SSP driver private data structure
901 *
902 */
903 static void pump_transfers(unsigned long data)
904 {
905 struct pl022 *pl022 = (struct pl022 *) data;
906 struct spi_message *message = NULL;
907 struct spi_transfer *transfer = NULL;
908 struct spi_transfer *previous = NULL;
909
910 /* Get current state information */
911 message = pl022->cur_msg;
912 transfer = pl022->cur_transfer;
913
914 /* Handle for abort */
915 if (message->state == STATE_ERROR) {
916 message->status = -EIO;
917 giveback(pl022);
918 return;
919 }
920
921 /* Handle end of message */
922 if (message->state == STATE_DONE) {
923 message->status = 0;
924 giveback(pl022);
925 return;
926 }
927
928 /* Delay if requested at end of transfer before CS change */
929 if (message->state == STATE_RUNNING) {
930 previous = list_entry(transfer->transfer_list.prev,
931 struct spi_transfer,
932 transfer_list);
933 if (previous->delay_usecs)
934 /*
935 * FIXME: This runs in interrupt context.
936 * Is this really smart?
937 */
938 udelay(previous->delay_usecs);
939
940 /* Drop chip select only if cs_change is requested */
941 if (previous->cs_change)
942 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
943 } else {
944 /* STATE_START */
945 message->state = STATE_RUNNING;
946 }
947
948 if (set_up_next_transfer(pl022, transfer)) {
949 message->state = STATE_ERROR;
950 message->status = -EIO;
951 giveback(pl022);
952 return;
953 }
954 /* Flush the FIFOs and let's go! */
955 flush(pl022);
956 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
957 }
958
959 /**
960 * NOT IMPLEMENTED
961 * configure_dma - It configures the DMA pipes for DMA transfers
962 * @data: SSP driver's private data structure
963 *
964 */
965 static int configure_dma(void *data)
966 {
967 struct pl022 *pl022 = data;
968 dev_dbg(&pl022->adev->dev, "configure DMA\n");
969 return -ENOTSUPP;
970 }
971
972 /**
973 * do_dma_transfer - It handles transfers of the current message
974 * if it is DMA xfer.
975 * NOT FULLY IMPLEMENTED
976 * @data: SSP driver's private data structure
977 */
978 static void do_dma_transfer(void *data)
979 {
980 struct pl022 *pl022 = data;
981
982 if (configure_dma(data)) {
983 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
984 goto err_config_dma;
985 }
986
987 /* TODO: Implememt DMA setup of pipes here */
988
989 /* Enable target chip, set up transfer */
990 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
991 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
992 /* Error path */
993 pl022->cur_msg->state = STATE_ERROR;
994 pl022->cur_msg->status = -EIO;
995 giveback(pl022);
996 return;
997 }
998 /* Enable SSP */
999 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1000 SSP_CR1(pl022->virtbase));
1001
1002 /* TODO: Enable the DMA transfer here */
1003 return;
1004
1005 err_config_dma:
1006 pl022->cur_msg->state = STATE_ERROR;
1007 pl022->cur_msg->status = -EIO;
1008 giveback(pl022);
1009 return;
1010 }
1011
1012 static void do_interrupt_transfer(void *data)
1013 {
1014 struct pl022 *pl022 = data;
1015
1016 /* Enable target chip */
1017 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1018 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1019 /* Error path */
1020 pl022->cur_msg->state = STATE_ERROR;
1021 pl022->cur_msg->status = -EIO;
1022 giveback(pl022);
1023 return;
1024 }
1025 /* Enable SSP, turn on interrupts */
1026 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1027 SSP_CR1(pl022->virtbase));
1028 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
1029 }
1030
1031 static void do_polling_transfer(void *data)
1032 {
1033 struct pl022 *pl022 = data;
1034 struct spi_message *message = NULL;
1035 struct spi_transfer *transfer = NULL;
1036 struct spi_transfer *previous = NULL;
1037 struct chip_data *chip;
1038
1039 chip = pl022->cur_chip;
1040 message = pl022->cur_msg;
1041
1042 while (message->state != STATE_DONE) {
1043 /* Handle for abort */
1044 if (message->state == STATE_ERROR)
1045 break;
1046 transfer = pl022->cur_transfer;
1047
1048 /* Delay if requested at end of transfer */
1049 if (message->state == STATE_RUNNING) {
1050 previous =
1051 list_entry(transfer->transfer_list.prev,
1052 struct spi_transfer, transfer_list);
1053 if (previous->delay_usecs)
1054 udelay(previous->delay_usecs);
1055 if (previous->cs_change)
1056 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1057 } else {
1058 /* STATE_START */
1059 message->state = STATE_RUNNING;
1060 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1061 }
1062
1063 /* Configuration Changing Per Transfer */
1064 if (set_up_next_transfer(pl022, transfer)) {
1065 /* Error path */
1066 message->state = STATE_ERROR;
1067 break;
1068 }
1069 /* Flush FIFOs and enable SSP */
1070 flush(pl022);
1071 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1072 SSP_CR1(pl022->virtbase));
1073
1074 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1075 /* FIXME: insert a timeout so we don't hang here indefinately */
1076 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1077 readwriter(pl022);
1078
1079 /* Update total byte transfered */
1080 message->actual_length += pl022->cur_transfer->len;
1081 if (pl022->cur_transfer->cs_change)
1082 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1083 /* Move to next transfer */
1084 message->state = next_transfer(pl022);
1085 }
1086
1087 /* Handle end of message */
1088 if (message->state == STATE_DONE)
1089 message->status = 0;
1090 else
1091 message->status = -EIO;
1092
1093 giveback(pl022);
1094 return;
1095 }
1096
1097 /**
1098 * pump_messages - Workqueue function which processes spi message queue
1099 * @data: pointer to private data of SSP driver
1100 *
1101 * This function checks if there is any spi message in the queue that
1102 * needs processing and delegate control to appropriate function
1103 * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
1104 * based on the kind of the transfer
1105 *
1106 */
1107 static void pump_messages(struct work_struct *work)
1108 {
1109 struct pl022 *pl022 =
1110 container_of(work, struct pl022, pump_messages);
1111 unsigned long flags;
1112
1113 /* Lock queue and check for queue work */
1114 spin_lock_irqsave(&pl022->queue_lock, flags);
1115 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
1116 pl022->busy = 0;
1117 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1118 return;
1119 }
1120 /* Make sure we are not already running a message */
1121 if (pl022->cur_msg) {
1122 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1123 return;
1124 }
1125 /* Extract head of queue */
1126 pl022->cur_msg =
1127 list_entry(pl022->queue.next, struct spi_message, queue);
1128
1129 list_del_init(&pl022->cur_msg->queue);
1130 pl022->busy = 1;
1131 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1132
1133 /* Initial message state */
1134 pl022->cur_msg->state = STATE_START;
1135 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1136 struct spi_transfer,
1137 transfer_list);
1138
1139 /* Setup the SPI using the per chip configuration */
1140 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1141 /*
1142 * We enable the clock here, then the clock will be disabled when
1143 * giveback() is called in each method (poll/interrupt/DMA)
1144 */
1145 clk_enable(pl022->clk);
1146 restore_state(pl022);
1147 flush(pl022);
1148
1149 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1150 do_polling_transfer(pl022);
1151 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1152 do_interrupt_transfer(pl022);
1153 else
1154 do_dma_transfer(pl022);
1155 }
1156
1157
1158 static int __init init_queue(struct pl022 *pl022)
1159 {
1160 INIT_LIST_HEAD(&pl022->queue);
1161 spin_lock_init(&pl022->queue_lock);
1162
1163 pl022->run = QUEUE_STOPPED;
1164 pl022->busy = 0;
1165
1166 tasklet_init(&pl022->pump_transfers,
1167 pump_transfers, (unsigned long)pl022);
1168
1169 INIT_WORK(&pl022->pump_messages, pump_messages);
1170 pl022->workqueue = create_singlethread_workqueue(
1171 dev_name(pl022->master->dev.parent));
1172 if (pl022->workqueue == NULL)
1173 return -EBUSY;
1174
1175 return 0;
1176 }
1177
1178
1179 static int start_queue(struct pl022 *pl022)
1180 {
1181 unsigned long flags;
1182
1183 spin_lock_irqsave(&pl022->queue_lock, flags);
1184
1185 if (pl022->run == QUEUE_RUNNING || pl022->busy) {
1186 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1187 return -EBUSY;
1188 }
1189
1190 pl022->run = QUEUE_RUNNING;
1191 pl022->cur_msg = NULL;
1192 pl022->cur_transfer = NULL;
1193 pl022->cur_chip = NULL;
1194 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1195
1196 queue_work(pl022->workqueue, &pl022->pump_messages);
1197
1198 return 0;
1199 }
1200
1201
1202 static int stop_queue(struct pl022 *pl022)
1203 {
1204 unsigned long flags;
1205 unsigned limit = 500;
1206 int status = 0;
1207
1208 spin_lock_irqsave(&pl022->queue_lock, flags);
1209
1210 /* This is a bit lame, but is optimized for the common execution path.
1211 * A wait_queue on the pl022->busy could be used, but then the common
1212 * execution path (pump_messages) would be required to call wake_up or
1213 * friends on every SPI message. Do this instead */
1214 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1215 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1216 msleep(10);
1217 spin_lock_irqsave(&pl022->queue_lock, flags);
1218 }
1219
1220 if (!list_empty(&pl022->queue) || pl022->busy)
1221 status = -EBUSY;
1222 else pl022->run = QUEUE_STOPPED;
1223
1224 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1225
1226 return status;
1227 }
1228
1229 static int destroy_queue(struct pl022 *pl022)
1230 {
1231 int status;
1232
1233 status = stop_queue(pl022);
1234 /* we are unloading the module or failing to load (only two calls
1235 * to this routine), and neither call can handle a return value.
1236 * However, destroy_workqueue calls flush_workqueue, and that will
1237 * block until all work is done. If the reason that stop_queue
1238 * timed out is that the work will never finish, then it does no
1239 * good to call destroy_workqueue, so return anyway. */
1240 if (status != 0)
1241 return status;
1242
1243 destroy_workqueue(pl022->workqueue);
1244
1245 return 0;
1246 }
1247
1248 static int verify_controller_parameters(struct pl022 *pl022,
1249 struct pl022_config_chip *chip_info)
1250 {
1251 if ((chip_info->lbm != LOOPBACK_ENABLED)
1252 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1253 dev_err(chip_info->dev,
1254 "loopback Mode is configured incorrectly\n");
1255 return -EINVAL;
1256 }
1257 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1258 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1259 dev_err(chip_info->dev,
1260 "interface is configured incorrectly\n");
1261 return -EINVAL;
1262 }
1263 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1264 (!pl022->vendor->unidir)) {
1265 dev_err(chip_info->dev,
1266 "unidirectional mode not supported in this "
1267 "hardware version\n");
1268 return -EINVAL;
1269 }
1270 if ((chip_info->hierarchy != SSP_MASTER)
1271 && (chip_info->hierarchy != SSP_SLAVE)) {
1272 dev_err(chip_info->dev,
1273 "hierarchy is configured incorrectly\n");
1274 return -EINVAL;
1275 }
1276 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1277 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1278 dev_err(chip_info->dev,
1279 "cpsdvsr is configured incorrectly\n");
1280 return -EINVAL;
1281 }
1282 if ((chip_info->endian_rx != SSP_RX_MSB)
1283 && (chip_info->endian_rx != SSP_RX_LSB)) {
1284 dev_err(chip_info->dev,
1285 "RX FIFO endianess is configured incorrectly\n");
1286 return -EINVAL;
1287 }
1288 if ((chip_info->endian_tx != SSP_TX_MSB)
1289 && (chip_info->endian_tx != SSP_TX_LSB)) {
1290 dev_err(chip_info->dev,
1291 "TX FIFO endianess is configured incorrectly\n");
1292 return -EINVAL;
1293 }
1294 if ((chip_info->data_size < SSP_DATA_BITS_4)
1295 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1296 dev_err(chip_info->dev,
1297 "DATA Size is configured incorrectly\n");
1298 return -EINVAL;
1299 }
1300 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1301 && (chip_info->com_mode != DMA_TRANSFER)
1302 && (chip_info->com_mode != POLLING_TRANSFER)) {
1303 dev_err(chip_info->dev,
1304 "Communication mode is configured incorrectly\n");
1305 return -EINVAL;
1306 }
1307 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1308 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1309 dev_err(chip_info->dev,
1310 "RX FIFO Trigger Level is configured incorrectly\n");
1311 return -EINVAL;
1312 }
1313 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1314 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1315 dev_err(chip_info->dev,
1316 "TX FIFO Trigger Level is configured incorrectly\n");
1317 return -EINVAL;
1318 }
1319 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1320 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1321 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
1322 dev_err(chip_info->dev,
1323 "Clock Phase is configured incorrectly\n");
1324 return -EINVAL;
1325 }
1326 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1327 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1328 dev_err(chip_info->dev,
1329 "Clock Polarity is configured incorrectly\n");
1330 return -EINVAL;
1331 }
1332 }
1333 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1334 if ((chip_info->ctrl_len < SSP_BITS_4)
1335 || (chip_info->ctrl_len > SSP_BITS_32)) {
1336 dev_err(chip_info->dev,
1337 "CTRL LEN is configured incorrectly\n");
1338 return -EINVAL;
1339 }
1340 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1341 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1342 dev_err(chip_info->dev,
1343 "Wait State is configured incorrectly\n");
1344 return -EINVAL;
1345 }
1346 /* Half duplex is only available in the ST Micro version */
1347 if (pl022->vendor->extended_cr) {
1348 if ((chip_info->duplex !=
1349 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1350 && (chip_info->duplex !=
1351 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX))
1352 dev_err(chip_info->dev,
1353 "Microwire duplex mode is configured incorrectly\n");
1354 return -EINVAL;
1355 } else {
1356 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1357 dev_err(chip_info->dev,
1358 "Microwire half duplex mode requested,"
1359 " but this is only available in the"
1360 " ST version of PL022\n");
1361 return -EINVAL;
1362 }
1363 }
1364 if (chip_info->cs_control == NULL) {
1365 dev_warn(chip_info->dev,
1366 "Chip Select Function is NULL for this chip\n");
1367 chip_info->cs_control = null_cs_control;
1368 }
1369 return 0;
1370 }
1371
1372 /**
1373 * pl022_transfer - transfer function registered to SPI master framework
1374 * @spi: spi device which is requesting transfer
1375 * @msg: spi message which is to handled is queued to driver queue
1376 *
1377 * This function is registered to the SPI framework for this SPI master
1378 * controller. It will queue the spi_message in the queue of driver if
1379 * the queue is not stopped and return.
1380 */
1381 static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1382 {
1383 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1384 unsigned long flags;
1385
1386 spin_lock_irqsave(&pl022->queue_lock, flags);
1387
1388 if (pl022->run == QUEUE_STOPPED) {
1389 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1390 return -ESHUTDOWN;
1391 }
1392 msg->actual_length = 0;
1393 msg->status = -EINPROGRESS;
1394 msg->state = STATE_START;
1395
1396 list_add_tail(&msg->queue, &pl022->queue);
1397 if (pl022->run == QUEUE_RUNNING && !pl022->busy)
1398 queue_work(pl022->workqueue, &pl022->pump_messages);
1399
1400 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1401 return 0;
1402 }
1403
1404 static int calculate_effective_freq(struct pl022 *pl022,
1405 int freq,
1406 struct ssp_clock_params *clk_freq)
1407 {
1408 /* Lets calculate the frequency parameters */
1409 u16 cpsdvsr = 2;
1410 u16 scr = 0;
1411 bool freq_found = false;
1412 u32 rate;
1413 u32 max_tclk;
1414 u32 min_tclk;
1415
1416 rate = clk_get_rate(pl022->clk);
1417 /* cpsdvscr = 2 & scr 0 */
1418 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1419 /* cpsdvsr = 254 & scr = 255 */
1420 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1421
1422 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1423 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1424 while (scr <= SCR_MAX && !freq_found) {
1425 if ((rate /
1426 (cpsdvsr * (1 + scr))) > freq)
1427 scr += 1;
1428 else {
1429 /*
1430 * This bool is made true when
1431 * effective frequency >=
1432 * target frequency is found
1433 */
1434 freq_found = true;
1435 if ((rate /
1436 (cpsdvsr * (1 + scr))) != freq) {
1437 if (scr == SCR_MIN) {
1438 cpsdvsr -= 2;
1439 scr = SCR_MAX;
1440 } else
1441 scr -= 1;
1442 }
1443 }
1444 }
1445 if (!freq_found) {
1446 cpsdvsr += 2;
1447 scr = SCR_MIN;
1448 }
1449 }
1450 if (cpsdvsr != 0) {
1451 dev_dbg(&pl022->adev->dev,
1452 "SSP Effective Frequency is %u\n",
1453 (rate / (cpsdvsr * (1 + scr))));
1454 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1455 clk_freq->scr = (u8) (scr & 0xFF);
1456 dev_dbg(&pl022->adev->dev,
1457 "SSP cpsdvsr = %d, scr = %d\n",
1458 clk_freq->cpsdvsr, clk_freq->scr);
1459 }
1460 } else {
1461 dev_err(&pl022->adev->dev,
1462 "controller data is incorrect: out of range frequency");
1463 return -EINVAL;
1464 }
1465 return 0;
1466 }
1467
1468 /**
1469 * NOT IMPLEMENTED
1470 * process_dma_info - Processes the DMA info provided by client drivers
1471 * @chip_info: chip info provided by client device
1472 * @chip: Runtime state maintained by the SSP controller for each spi device
1473 *
1474 * This function processes and stores DMA config provided by client driver
1475 * into the runtime state maintained by the SSP controller driver
1476 */
1477 static int process_dma_info(struct pl022_config_chip *chip_info,
1478 struct chip_data *chip)
1479 {
1480 dev_err(chip_info->dev,
1481 "cannot process DMA info, DMA not implemented!\n");
1482 return -ENOTSUPP;
1483 }
1484
1485 /**
1486 * pl022_setup - setup function registered to SPI master framework
1487 * @spi: spi device which is requesting setup
1488 *
1489 * This function is registered to the SPI framework for this SPI master
1490 * controller. If it is the first time when setup is called by this device,
1491 * this function will initialize the runtime state for this chip and save
1492 * the same in the device structure. Else it will update the runtime info
1493 * with the updated chip info. Nothing is really being written to the
1494 * controller hardware here, that is not done until the actual transfer
1495 * commence.
1496 */
1497
1498 /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
1499 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1500 | SPI_LSB_FIRST | SPI_LOOP)
1501
1502 static int pl022_setup(struct spi_device *spi)
1503 {
1504 struct pl022_config_chip *chip_info;
1505 struct chip_data *chip;
1506 int status = 0;
1507 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1508
1509 if (spi->mode & ~MODEBITS) {
1510 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1511 spi->mode & ~MODEBITS);
1512 return -EINVAL;
1513 }
1514
1515 if (!spi->max_speed_hz)
1516 return -EINVAL;
1517
1518 /* Get controller_state if one is supplied */
1519 chip = spi_get_ctldata(spi);
1520
1521 if (chip == NULL) {
1522 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1523 if (!chip) {
1524 dev_err(&spi->dev,
1525 "cannot allocate controller state\n");
1526 return -ENOMEM;
1527 }
1528 dev_dbg(&spi->dev,
1529 "allocated memory for controller's runtime state\n");
1530 }
1531
1532 /* Get controller data if one is supplied */
1533 chip_info = spi->controller_data;
1534
1535 if (chip_info == NULL) {
1536 /* spi_board_info.controller_data not is supplied */
1537 dev_dbg(&spi->dev,
1538 "using default controller_data settings\n");
1539
1540 chip_info =
1541 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1542
1543 if (!chip_info) {
1544 dev_err(&spi->dev,
1545 "cannot allocate controller data\n");
1546 status = -ENOMEM;
1547 goto err_first_setup;
1548 }
1549
1550 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1551
1552 /* Pointer back to the SPI device */
1553 chip_info->dev = &spi->dev;
1554 /*
1555 * Set controller data default values:
1556 * Polling is supported by default
1557 */
1558 chip_info->lbm = LOOPBACK_DISABLED;
1559 chip_info->com_mode = POLLING_TRANSFER;
1560 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1561 chip_info->hierarchy = SSP_SLAVE;
1562 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1563 chip_info->endian_tx = SSP_TX_LSB;
1564 chip_info->endian_rx = SSP_RX_LSB;
1565 chip_info->data_size = SSP_DATA_BITS_12;
1566 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1567 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1568 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
1569 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1570 chip_info->ctrl_len = SSP_BITS_8;
1571 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1572 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1573 chip_info->cs_control = null_cs_control;
1574 } else {
1575 dev_dbg(&spi->dev,
1576 "using user supplied controller_data settings\n");
1577 }
1578
1579 /*
1580 * We can override with custom divisors, else we use the board
1581 * frequency setting
1582 */
1583 if ((0 == chip_info->clk_freq.cpsdvsr)
1584 && (0 == chip_info->clk_freq.scr)) {
1585 status = calculate_effective_freq(pl022,
1586 spi->max_speed_hz,
1587 &chip_info->clk_freq);
1588 if (status < 0)
1589 goto err_config_params;
1590 } else {
1591 if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
1592 chip_info->clk_freq.cpsdvsr =
1593 chip_info->clk_freq.cpsdvsr - 1;
1594 }
1595 status = verify_controller_parameters(pl022, chip_info);
1596 if (status) {
1597 dev_err(&spi->dev, "controller data is incorrect");
1598 goto err_config_params;
1599 }
1600 /* Now set controller state based on controller data */
1601 chip->xfer_type = chip_info->com_mode;
1602 chip->cs_control = chip_info->cs_control;
1603
1604 if (chip_info->data_size <= 8) {
1605 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
1606 chip->n_bytes = 1;
1607 chip->read = READING_U8;
1608 chip->write = WRITING_U8;
1609 } else if (chip_info->data_size <= 16) {
1610 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1611 chip->n_bytes = 2;
1612 chip->read = READING_U16;
1613 chip->write = WRITING_U16;
1614 } else {
1615 if (pl022->vendor->max_bpw >= 32) {
1616 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1617 chip->n_bytes = 4;
1618 chip->read = READING_U32;
1619 chip->write = WRITING_U32;
1620 } else {
1621 dev_err(&spi->dev,
1622 "illegal data size for this controller!\n");
1623 dev_err(&spi->dev,
1624 "a standard pl022 can only handle "
1625 "1 <= n <= 16 bit words\n");
1626 goto err_config_params;
1627 }
1628 }
1629
1630 /* Now Initialize all register settings required for this chip */
1631 chip->cr0 = 0;
1632 chip->cr1 = 0;
1633 chip->dmacr = 0;
1634 chip->cpsr = 0;
1635 if ((chip_info->com_mode == DMA_TRANSFER)
1636 && ((pl022->master_info)->enable_dma)) {
1637 chip->enable_dma = 1;
1638 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1639 status = process_dma_info(chip_info, chip);
1640 if (status < 0)
1641 goto err_config_params;
1642 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1643 SSP_DMACR_MASK_RXDMAE, 0);
1644 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1645 SSP_DMACR_MASK_TXDMAE, 1);
1646 } else {
1647 chip->enable_dma = 0;
1648 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1649 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1650 SSP_DMACR_MASK_RXDMAE, 0);
1651 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1652 SSP_DMACR_MASK_TXDMAE, 1);
1653 }
1654
1655 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1656
1657 /* Special setup for the ST micro extended control registers */
1658 if (pl022->vendor->extended_cr) {
1659 if (pl022->vendor->pl023) {
1660 /* These bits are only in the PL023 */
1661 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1662 SSP_CR1_MASK_FBCLKDEL_ST, 13);
1663 } else {
1664 /* These bits are in the PL022 but not PL023 */
1665 SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1666 SSP_CR0_MASK_HALFDUP_ST, 5);
1667 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1668 SSP_CR0_MASK_CSS_ST, 16);
1669 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1670 SSP_CR0_MASK_FRF_ST, 21);
1671 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
1672 SSP_CR1_MASK_MWAIT_ST, 6);
1673 }
1674 SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
1675 SSP_CR0_MASK_DSS_ST, 0);
1676 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx,
1677 SSP_CR1_MASK_RENDN_ST, 4);
1678 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx,
1679 SSP_CR1_MASK_TENDN_ST, 5);
1680 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
1681 SSP_CR1_MASK_RXIFLSEL_ST, 7);
1682 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
1683 SSP_CR1_MASK_TXIFLSEL_ST, 10);
1684 } else {
1685 SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
1686 SSP_CR0_MASK_DSS, 0);
1687 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1688 SSP_CR0_MASK_FRF, 4);
1689 }
1690 /* Stuff that is common for all versions */
1691 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1692 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1693 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1694 /* Loopback is available on all versions except PL023 */
1695 if (!pl022->vendor->pl023)
1696 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1697 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1698 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1699 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1700
1701 /* Save controller_state */
1702 spi_set_ctldata(spi, chip);
1703 return status;
1704 err_config_params:
1705 err_first_setup:
1706 kfree(chip);
1707 return status;
1708 }
1709
1710 /**
1711 * pl022_cleanup - cleanup function registered to SPI master framework
1712 * @spi: spi device which is requesting cleanup
1713 *
1714 * This function is registered to the SPI framework for this SPI master
1715 * controller. It will free the runtime state of chip.
1716 */
1717 static void pl022_cleanup(struct spi_device *spi)
1718 {
1719 struct chip_data *chip = spi_get_ctldata(spi);
1720
1721 spi_set_ctldata(spi, NULL);
1722 kfree(chip);
1723 }
1724
1725
1726 static int __init
1727 pl022_probe(struct amba_device *adev, struct amba_id *id)
1728 {
1729 struct device *dev = &adev->dev;
1730 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
1731 struct spi_master *master;
1732 struct pl022 *pl022 = NULL; /*Data for this driver */
1733 int status = 0;
1734
1735 dev_info(&adev->dev,
1736 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
1737 if (platform_info == NULL) {
1738 dev_err(&adev->dev, "probe - no platform data supplied\n");
1739 status = -ENODEV;
1740 goto err_no_pdata;
1741 }
1742
1743 /* Allocate master with space for data */
1744 master = spi_alloc_master(dev, sizeof(struct pl022));
1745 if (master == NULL) {
1746 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
1747 status = -ENOMEM;
1748 goto err_no_master;
1749 }
1750
1751 pl022 = spi_master_get_devdata(master);
1752 pl022->master = master;
1753 pl022->master_info = platform_info;
1754 pl022->adev = adev;
1755 pl022->vendor = id->data;
1756
1757 /*
1758 * Bus Number Which has been Assigned to this SSP controller
1759 * on this board
1760 */
1761 master->bus_num = platform_info->bus_id;
1762 master->num_chipselect = platform_info->num_chipselect;
1763 master->cleanup = pl022_cleanup;
1764 master->setup = pl022_setup;
1765 master->transfer = pl022_transfer;
1766
1767 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1768
1769 status = amba_request_regions(adev, NULL);
1770 if (status)
1771 goto err_no_ioregion;
1772
1773 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1774 if (pl022->virtbase == NULL) {
1775 status = -ENOMEM;
1776 goto err_no_ioremap;
1777 }
1778 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
1779 adev->res.start, pl022->virtbase);
1780
1781 pl022->clk = clk_get(&adev->dev, NULL);
1782 if (IS_ERR(pl022->clk)) {
1783 status = PTR_ERR(pl022->clk);
1784 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
1785 goto err_no_clk;
1786 }
1787
1788 /* Disable SSP */
1789 clk_enable(pl022->clk);
1790 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1791 SSP_CR1(pl022->virtbase));
1792 load_ssp_default_config(pl022);
1793 clk_disable(pl022->clk);
1794
1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1796 pl022);
1797 if (status < 0) {
1798 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1799 goto err_no_irq;
1800 }
1801 /* Initialize and start queue */
1802 status = init_queue(pl022);
1803 if (status != 0) {
1804 dev_err(&adev->dev, "probe - problem initializing queue\n");
1805 goto err_init_queue;
1806 }
1807 status = start_queue(pl022);
1808 if (status != 0) {
1809 dev_err(&adev->dev, "probe - problem starting queue\n");
1810 goto err_start_queue;
1811 }
1812 /* Register with the SPI framework */
1813 amba_set_drvdata(adev, pl022);
1814 status = spi_register_master(master);
1815 if (status != 0) {
1816 dev_err(&adev->dev,
1817 "probe - problem registering spi master\n");
1818 goto err_spi_register;
1819 }
1820 dev_dbg(dev, "probe succeded\n");
1821 return 0;
1822
1823 err_spi_register:
1824 err_start_queue:
1825 err_init_queue:
1826 destroy_queue(pl022);
1827 free_irq(adev->irq[0], pl022);
1828 err_no_irq:
1829 clk_put(pl022->clk);
1830 err_no_clk:
1831 iounmap(pl022->virtbase);
1832 err_no_ioremap:
1833 amba_release_regions(adev);
1834 err_no_ioregion:
1835 spi_master_put(master);
1836 err_no_master:
1837 err_no_pdata:
1838 return status;
1839 }
1840
1841 static int __exit
1842 pl022_remove(struct amba_device *adev)
1843 {
1844 struct pl022 *pl022 = amba_get_drvdata(adev);
1845 int status = 0;
1846 if (!pl022)
1847 return 0;
1848
1849 /* Remove the queue */
1850 status = destroy_queue(pl022);
1851 if (status != 0) {
1852 dev_err(&adev->dev,
1853 "queue remove failed (%d)\n", status);
1854 return status;
1855 }
1856 load_ssp_default_config(pl022);
1857 free_irq(adev->irq[0], pl022);
1858 clk_disable(pl022->clk);
1859 clk_put(pl022->clk);
1860 iounmap(pl022->virtbase);
1861 amba_release_regions(adev);
1862 tasklet_disable(&pl022->pump_transfers);
1863 spi_unregister_master(pl022->master);
1864 spi_master_put(pl022->master);
1865 amba_set_drvdata(adev, NULL);
1866 dev_dbg(&adev->dev, "remove succeded\n");
1867 return 0;
1868 }
1869
1870 #ifdef CONFIG_PM
1871 static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1872 {
1873 struct pl022 *pl022 = amba_get_drvdata(adev);
1874 int status = 0;
1875
1876 status = stop_queue(pl022);
1877 if (status) {
1878 dev_warn(&adev->dev, "suspend cannot stop queue\n");
1879 return status;
1880 }
1881
1882 clk_enable(pl022->clk);
1883 load_ssp_default_config(pl022);
1884 clk_disable(pl022->clk);
1885 dev_dbg(&adev->dev, "suspended\n");
1886 return 0;
1887 }
1888
1889 static int pl022_resume(struct amba_device *adev)
1890 {
1891 struct pl022 *pl022 = amba_get_drvdata(adev);
1892 int status = 0;
1893
1894 /* Start the queue running */
1895 status = start_queue(pl022);
1896 if (status)
1897 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
1898 else
1899 dev_dbg(&adev->dev, "resumed\n");
1900
1901 return status;
1902 }
1903 #else
1904 #define pl022_suspend NULL
1905 #define pl022_resume NULL
1906 #endif /* CONFIG_PM */
1907
1908 static struct vendor_data vendor_arm = {
1909 .fifodepth = 8,
1910 .max_bpw = 16,
1911 .unidir = false,
1912 .extended_cr = false,
1913 .pl023 = false,
1914 };
1915
1916
1917 static struct vendor_data vendor_st = {
1918 .fifodepth = 32,
1919 .max_bpw = 32,
1920 .unidir = false,
1921 .extended_cr = true,
1922 .pl023 = false,
1923 };
1924
1925 static struct vendor_data vendor_st_pl023 = {
1926 .fifodepth = 32,
1927 .max_bpw = 32,
1928 .unidir = false,
1929 .extended_cr = true,
1930 .pl023 = true,
1931 };
1932
1933 static struct amba_id pl022_ids[] = {
1934 {
1935 /*
1936 * ARM PL022 variant, this has a 16bit wide
1937 * and 8 locations deep TX/RX FIFO
1938 */
1939 .id = 0x00041022,
1940 .mask = 0x000fffff,
1941 .data = &vendor_arm,
1942 },
1943 {
1944 /*
1945 * ST Micro derivative, this has 32bit wide
1946 * and 32 locations deep TX/RX FIFO
1947 */
1948 .id = 0x01080022,
1949 .mask = 0xffffffff,
1950 .data = &vendor_st,
1951 },
1952 {
1953 /*
1954 * ST-Ericsson derivative "PL023" (this is not
1955 * an official ARM number), this is a PL022 SSP block
1956 * stripped to SPI mode only, it has 32bit wide
1957 * and 32 locations deep TX/RX FIFO but no extended
1958 * CR0/CR1 register
1959 */
1960 .id = 0x00080023,
1961 .mask = 0xffffffff,
1962 .data = &vendor_st_pl023,
1963 },
1964 { 0, 0 },
1965 };
1966
1967 static struct amba_driver pl022_driver = {
1968 .drv = {
1969 .name = "ssp-pl022",
1970 },
1971 .id_table = pl022_ids,
1972 .probe = pl022_probe,
1973 .remove = __exit_p(pl022_remove),
1974 .suspend = pl022_suspend,
1975 .resume = pl022_resume,
1976 };
1977
1978
1979 static int __init pl022_init(void)
1980 {
1981 return amba_driver_register(&pl022_driver);
1982 }
1983
1984 module_init(pl022_init);
1985
1986 static void __exit pl022_exit(void)
1987 {
1988 amba_driver_unregister(&pl022_driver);
1989 }
1990
1991 module_exit(pl022_exit);
1992
1993 MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1994 MODULE_DESCRIPTION("PL022 SSP Controller Driver");
1995 MODULE_LICENSE("GPL");