usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / spi / spi-s3c64xx.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2009 Samsung Electronics Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/workqueue.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/clk.h>
22#include <linux/clk-provider.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmaengine.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/spi/spi.h>
28#include <linux/gpio.h>
29#include <linux/of.h>
30#include <linux/of_gpio.h>
31#include <soc/samsung/exynos-powermode.h>
32
33#include <linux/platform_data/spi-s3c64xx.h>
34
35#include <linux/dma/dma-pl330.h>
36
37#ifdef CONFIG_CPU_IDLE
38#include <soc/samsung/exynos-pm.h>
39#endif
40
41#include "../pinctrl/core.h"
42
43static LIST_HEAD(drvdata_list);
44
45#define MAX_SPI_PORTS 16
46#define SPI_AUTOSUSPEND_TIMEOUT (100)
47
48/* Registers and bit-fields */
49
50#define S3C64XX_SPI_CH_CFG 0x00
51#define S3C64XX_SPI_CLK_CFG 0x04
52#define S3C64XX_SPI_MODE_CFG 0x08
53#define S3C64XX_SPI_SLAVE_SEL 0x0C
54#define S3C64XX_SPI_INT_EN 0x10
55#define S3C64XX_SPI_STATUS 0x14
56#define S3C64XX_SPI_TX_DATA 0x18
57#define S3C64XX_SPI_RX_DATA 0x1C
58#define S3C64XX_SPI_PACKET_CNT 0x20
59#define S3C64XX_SPI_PENDING_CLR 0x24
60#define S3C64XX_SPI_SWAP_CFG 0x28
61#define S3C64XX_SPI_FB_CLK 0x2C
62
63#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
64#define S3C64XX_SPI_CH_SW_RST (1<<5)
65#define S3C64XX_SPI_CH_SLAVE (1<<4)
66#define S3C64XX_SPI_CPOL_L (1<<3)
67#define S3C64XX_SPI_CPHA_B (1<<2)
68#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
69#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
70
71#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
72#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
73#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
74#define S3C64XX_SPI_PSR_MASK 0xff
75
76#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
77#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
78#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
79#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
80#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
81#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
82#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
83#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
84#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
85#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
86#define S3C64XX_SPI_MODE_4BURST (1<<0)
87
88#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
89#define S3C64XX_SPI_SLAVE_NSC_CNT_1 (1<<4)
90#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
91#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
92
93#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
94#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
95#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
96#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
97#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
98#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
99#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
100
101#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
102#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
103#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
104#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
105#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
106#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
107
108#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
109
110#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
111#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
112#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
113#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
114#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
115
116#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
117#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
118#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
119#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
120#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
121#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
122#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
123#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
124
125#define S3C64XX_SPI_FBCLK_MSK (3<<0)
126
127#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
128#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
129 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
130#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
131#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
132 FIFO_LVL_MASK(i))
133
134#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
135#define S3C64XX_SPI_TRAILCNT_OFF 19
136
137#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
138
139#define S3C64XX_SPI_DMA_4BURST_LEN 0x4
140#define S3C64XX_SPI_DMA_1BURST_LEN 0x1
141
142#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
143
144#define RXBUSY (1<<2)
145#define TXBUSY (1<<3)
146
147#define SPI_DBG_MODE (0x1 << 0)
148
149/**
150 * struct s3c64xx_spi_info - SPI Controller hardware info
151 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
152 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
153 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
154 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
155 * @clk_from_cmu: True, if the controller does not include a clock mux and
156 * prescaler unit.
157 *
158 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
159 * differ in some aspects such as the size of the fifo and spi bus clock
160 * setup. Such differences are specified to the driver using this structure
161 * which is provided as driver data to the driver.
162 */
163struct s3c64xx_spi_port_config {
164 int fifo_lvl_mask[MAX_SPI_PORTS];
165 int rx_lvl_offset;
166 int tx_st_done;
167 bool high_speed;
168 bool clk_from_cmu;
169};
170
171static ssize_t
172spi_dbg_show(struct device *dev, struct device_attribute *attr, char *buf)
173{
174 ssize_t ret = 0;
175
176 ret += snprintf(buf + ret, PAGE_SIZE - ret,
177 "SPI Debug Mode Configuration.\n");
178 ret += snprintf(buf + ret, PAGE_SIZE - ret,
179 "0 : Change DBG mode.\n");
180 ret += snprintf(buf + ret, PAGE_SIZE - ret,
181 "1 : Change Normal mode.\n");
182
183 if (ret < PAGE_SIZE - 1) {
184 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
185 } else {
186 buf[PAGE_SIZE-2] = '\n';
187 buf[PAGE_SIZE-1] = '\0';
188 ret = PAGE_SIZE-1;
189 }
190
191 return ret;
192}
193
194static ssize_t
195spi_dbg_store(struct device *dev, struct device_attribute *attr,
196 const char *buf, size_t count)
197{
198 struct spi_master *master = dev_get_drvdata(dev);
199 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
200 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
201 struct s3c64xx_spi_info *check_sci;
202 int ret, input_cmd;
203
204 ret = sscanf(buf, "%d", &input_cmd);
205
206 list_for_each_entry(check_sci, &drvdata_list, node) {
207 if (check_sci != sci)
208 continue;
209
210 switch(input_cmd) {
211 case 0:
212 printk(KERN_ERR "Change SPI%d to Loopback(DBG) mode\n",
213 sdd->port_id);
214 sci->dbg_mode = SPI_DBG_MODE;
215 break;
216 case 1:
217 printk(KERN_ERR "Change SPI%d to normal mode\n",
218 sdd->port_id);
219 sci->dbg_mode = 0;
220 break;
221 default:
222 printk(KERN_ERR "Wrong Command!(0/1)\n");
223 }
224 }
225
226 return count;
227}
228
229static DEVICE_ATTR(spi_dbg, 0640, spi_dbg_show, spi_dbg_store);
230
231static void s3c64xx_spi_dump_reg(struct s3c64xx_spi_driver_data *sdd)
232{
233 void __iomem *regs = sdd->regs;
234 struct device *dev = &sdd->pdev->dev;
235
236 dev_err(dev, "Register dump for SPI\n"
237 " CH_CFG 0x%08x\n"
238 " MODE_CFG 0x%08x\n"
239 " CS_REG 0x%08x\n"
240 " STATUS 0x%08x\n"
241 " PACKET_CNT 0x%08x\n"
242 , readl(regs + S3C64XX_SPI_CH_CFG)
243 , readl(regs + S3C64XX_SPI_MODE_CFG)
244 , readl(regs + S3C64XX_SPI_SLAVE_SEL)
245 , readl(regs + S3C64XX_SPI_STATUS)
246 , readl(regs + S3C64XX_SPI_PACKET_CNT)
247 );
248
249}
250static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
251{
252 void __iomem *regs = sdd->regs;
253 unsigned long loops;
254 u32 val;
255
256 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
257
258 val = readl(regs + S3C64XX_SPI_CH_CFG);
259 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
260 writel(val, regs + S3C64XX_SPI_CH_CFG);
261
262 val = readl(regs + S3C64XX_SPI_CH_CFG);
263 val |= S3C64XX_SPI_CH_SW_RST;
264 val &= ~S3C64XX_SPI_CH_HS_EN;
265 writel(val, regs + S3C64XX_SPI_CH_CFG);
266
267 /* Flush TxFIFO*/
268 loops = msecs_to_loops(1);
269 do {
270 val = readl(regs + S3C64XX_SPI_STATUS);
271 } while (TX_FIFO_LVL(val, sdd) && loops--);
272
273 if (loops == 0)
274 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
275
276 /* Flush RxFIFO*/
277 loops = msecs_to_loops(1);
278 do {
279 val = readl(regs + S3C64XX_SPI_STATUS);
280 if (RX_FIFO_LVL(val, sdd))
281 readl(regs + S3C64XX_SPI_RX_DATA);
282 else
283 break;
284 } while (loops--);
285
286 if (loops == 0)
287 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
288
289 val = readl(regs + S3C64XX_SPI_CH_CFG);
290 val &= ~S3C64XX_SPI_CH_SW_RST;
291 writel(val, regs + S3C64XX_SPI_CH_CFG);
292
293 val = readl(regs + S3C64XX_SPI_MODE_CFG);
294 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
295 writel(val, regs + S3C64XX_SPI_MODE_CFG);
296}
297
298static void s3c64xx_spi_dmacb(void *data)
299{
300 struct s3c64xx_spi_driver_data *sdd;
301 struct s3c64xx_spi_dma_data *dma = data;
302 unsigned long flags;
303
304 if (dma->direction == DMA_DEV_TO_MEM)
305 sdd = container_of(data,
306 struct s3c64xx_spi_driver_data, rx_dma);
307 else
308 sdd = container_of(data,
309 struct s3c64xx_spi_driver_data, tx_dma);
310
311 spin_lock_irqsave(&sdd->lock, flags);
312
313 if (dma->direction == DMA_DEV_TO_MEM) {
314 sdd->state &= ~RXBUSY;
315 if (!(sdd->state & TXBUSY))
316 complete(&sdd->xfer_completion);
317 } else {
318 sdd->state &= ~TXBUSY;
319 if (!(sdd->state & RXBUSY))
320 complete(&sdd->xfer_completion);
321 }
322
323 spin_unlock_irqrestore(&sdd->lock, flags);
324}
325
326/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
327
328static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
329 .name = "samsung-spi-dma",
330};
331
332static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
333 unsigned len, dma_addr_t buf)
334{
335 struct s3c64xx_spi_driver_data *sdd;
336 struct samsung_dma_prep info;
337 struct samsung_dma_config config;
338 u32 modecfg;
339
340 if (dma->direction == DMA_DEV_TO_MEM) {
341 sdd = container_of((void *)dma,
342 struct s3c64xx_spi_driver_data, rx_dma);
343 config.direction = sdd->rx_dma.direction;
344 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
345 config.width = sdd->cur_bpw / 8;
346 modecfg = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
347 config.maxburst = modecfg & S3C64XX_SPI_MODE_4BURST ?
348 S3C64XX_SPI_DMA_4BURST_LEN :
349 S3C64XX_SPI_DMA_1BURST_LEN;
350
351 #ifdef CONFIG_ARM64
352 sdd->ops->config((unsigned long)sdd->rx_dma.ch, &config);
353 #else
354 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
355 #endif
356 } else {
357 sdd = container_of((void *)dma,
358 struct s3c64xx_spi_driver_data, tx_dma);
359 config.direction = sdd->tx_dma.direction;
360 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
361 config.width = sdd->cur_bpw / 8;
362 modecfg = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
363 config.maxburst = modecfg & S3C64XX_SPI_MODE_4BURST ?
364 S3C64XX_SPI_DMA_4BURST_LEN :
365 S3C64XX_SPI_DMA_1BURST_LEN;
366
367 #ifdef CONFIG_ARM64
368 sdd->ops->config((unsigned long)sdd->tx_dma.ch, &config);
369 #else
370 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
371 #endif
372 }
373
374 info.cap = DMA_SLAVE;
375 info.len = len;
376 info.fp = s3c64xx_spi_dmacb;
377 info.fp_param = dma;
378 info.direction = dma->direction;
379 info.buf = buf;
380
381#ifdef CONFIG_ARM64
382 sdd->ops->prepare((unsigned long)dma->ch, &info);
383 sdd->ops->trigger((unsigned long)dma->ch);
384#else
385 sdd->ops->prepare((enum dma_ch)dma->ch, &info);
386 sdd->ops->trigger((enum dma_ch)dma->ch);
387#endif
388
389}
390
391static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
392{
393 struct samsung_dma_req req;
394 struct device *dev = &sdd->pdev->dev;
395
396 sdd->ops = samsung_dma_get_ops();
397
398 req.cap = DMA_SLAVE;
399 req.client = &s3c64xx_spi_dma_client;
400
401 if (sdd->rx_dma.ch == NULL)
402 sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach,
403 &req, dev, "rx");
404 if (sdd->tx_dma.ch == NULL)
405 sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach,
406 &req, dev, "tx");
407
408 return 1;
409}
410
411static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel);
412
413static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
414{
415 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
416 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
417#ifdef CONFIG_PM
418 int ret;
419#endif
420
421#ifndef CONFIG_PM
422 if (sci->dma_mode == DMA_MODE) {
423 /* Acquire DMA channels */
424 while (!acquire_dma(sdd))
425 usleep_range(10000, 11000);
426 }
427#endif
428
429#ifdef CONFIG_PM
430 ret = pm_runtime_get_sync(&sdd->pdev->dev);
431 if(ret < 0)
432 return ret;
433#endif
434
435 if (sci->need_hw_init)
436 s3c64xx_spi_hwinit(sdd, sdd->port_id);
437
438 return 0;
439}
440
441static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
442{
443 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
444#ifdef CONFIG_PM
445 int ret;
446#endif
447
448#ifndef CONFIG_PM
449 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
450
451 /* Free DMA channels */
452 if (sci->dma_mode == DMA_MODE) {
453 #ifdef CONFIG_ARM64
454 sdd->ops->release((unsigned long)sdd->rx_dma.ch,
455 &s3c64xx_spi_dma_client);
456 sdd->ops->release((unsigned long)sdd->tx_dma.ch,
457 &s3c64xx_spi_dma_client);
458 #else
459 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
460 &s3c64xx_spi_dma_client);
461 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
462 &s3c64xx_spi_dma_client);
463 #endif
464 sdd->rx_dma.ch = NULL;
465 sdd->tx_dma.ch = NULL;
466 }
467#endif
468
469#ifdef CONFIG_PM
470 pm_runtime_mark_last_busy(&sdd->pdev->dev);
471 ret = pm_runtime_put_autosuspend(&sdd->pdev->dev);
472 if(ret < 0)
473 return ret;
474#endif
475
476 return 0;
477}
478
479static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
480 struct s3c64xx_spi_dma_data *dma)
481{
482#ifdef CONFIG_ARM64
483 sdd->ops->stop((unsigned long)dma->ch);
484#else
485 sdd->ops->stop((enum dma_ch)dma->ch);
486#endif
487}
488
489static void s3c64xx_dma_debug(struct s3c64xx_spi_driver_data *sdd,
490 struct s3c64xx_spi_dma_data *dma)
491{
492#ifdef CONFIG_ARM64
493 sdd->ops->debug((unsigned long)dma->ch);
494#else
495 sdd->ops->debug((enum dma_ch)dma->ch);
496#endif
497}
498
499static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
500 struct spi_device *spi,
501 struct spi_transfer *xfer, int dma_mode)
502{
503 void __iomem *regs = sdd->regs;
504 u32 modecfg, chcfg, dma_burst_len;
505
506 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
507 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
508
509 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
510 modecfg &= ~S3C64XX_SPI_MODE_4BURST;
511
512 if (dma_mode) {
513 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
514
515 dma_burst_len = (sdd->cur_bpw / 8) * S3C64XX_SPI_DMA_4BURST_LEN;
516 if (!(xfer->len % dma_burst_len))
517 modecfg |= S3C64XX_SPI_MODE_4BURST;
518 } else {
519 /* Always shift in data in FIFO, even if xfer is Tx only,
520 * this helps setting PCKT_CNT value for generating clocks
521 * as exactly needed.
522 */
523 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
524 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
525 | S3C64XX_SPI_PACKET_CNT_EN,
526 regs + S3C64XX_SPI_PACKET_CNT);
527 }
528
529 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
530 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
531 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
532
533 if (xfer->tx_buf != NULL) {
534 sdd->state |= TXBUSY;
535 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
536 if (dma_mode) {
537 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
538 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
539 } else {
540 switch (sdd->cur_bpw) {
541 case 32:
542 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
543 xfer->tx_buf, xfer->len / 4);
544 break;
545 case 16:
546 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
547 xfer->tx_buf, xfer->len / 2);
548 break;
549 default:
550 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
551 xfer->tx_buf, xfer->len);
552 break;
553 }
554 }
555 }
556
557 if (xfer->rx_buf != NULL) {
558 sdd->state |= RXBUSY;
559
560 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
561 && !(sdd->cur_mode & SPI_CPHA))
562 chcfg |= S3C64XX_SPI_CH_HS_EN;
563
564 if (dma_mode) {
565 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
566 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
567 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
568 | S3C64XX_SPI_PACKET_CNT_EN,
569 regs + S3C64XX_SPI_PACKET_CNT);
570 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
571 }
572 }
573
574 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
575 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
576}
577
578static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
579 struct spi_device *spi)
580{
581 struct s3c64xx_spi_csinfo *cs;
582
583 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
584 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
585 /* Deselect the last toggled device */
586 cs = sdd->tgl_spi->controller_data;
587 if(cs->line != 0)
588 gpio_set_value(cs->line,
589 spi->mode & SPI_CS_HIGH ? 0 : 1);
590 /* Quiese the signals */
591 writel(spi->mode & SPI_CS_HIGH ?
592 0 : S3C64XX_SPI_SLAVE_SIG_INACT,
593 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
594 }
595 sdd->tgl_spi = NULL;
596 }
597
598 cs = spi->controller_data;
599 if(cs->line != 0)
600 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
601
602 if (cs->cs_mode == AUTO_CS_MODE) {
603 /* Set auto chip selection */
604 writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
605 | S3C64XX_SPI_SLAVE_AUTO
606 | S3C64XX_SPI_SLAVE_NSC_CNT_2,
607 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
608 } else {
609 /* Start the signals */
610 writel(spi->mode & SPI_CS_HIGH ?
611 S3C64XX_SPI_SLAVE_SIG_INACT : 0,
612 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
613 }
614}
615
616static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
617 struct spi_transfer *xfer, int dma_mode)
618{
619 void __iomem *regs = sdd->regs;
620 unsigned long val;
621 int ms;
622
623 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
624 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
625 ms = (ms * 10) + 30; /* some tolerance */
626 ms = max(ms, 100); /* minimum timeout */
627
628 if (dma_mode) {
629 val = msecs_to_jiffies(ms) + 10;
630 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
631 } else {
632 u32 status;
633 val = msecs_to_loops(ms);
634 do {
635 status = readl(regs + S3C64XX_SPI_STATUS);
636 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
637 }
638
639 if (!val)
640 return -EIO;
641
642 if (dma_mode) {
643 u32 status;
644
645 /*
646 * DmaTx returns after simply writing data in the FIFO,
647 * w/o waiting for real transmission on the bus to finish.
648 * DmaRx returns only after Dma read data from FIFO which
649 * needs bus transmission to finish, so we don't worry if
650 * Xfer involved Rx(with or without Tx).
651 */
652 if (xfer->rx_buf == NULL) {
653 val = msecs_to_loops(10);
654 status = readl(regs + S3C64XX_SPI_STATUS);
655 while ((TX_FIFO_LVL(status, sdd)
656 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
657 && --val) {
658 cpu_relax();
659 status = readl(regs + S3C64XX_SPI_STATUS);
660 }
661
662 if (!val)
663 return -EIO;
664 }
665 } else {
666 /* If it was only Tx */
667 if (xfer->rx_buf == NULL) {
668 sdd->state &= ~TXBUSY;
669 return 0;
670 }
671
672 switch (sdd->cur_bpw) {
673 case 32:
674 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
675 xfer->rx_buf, xfer->len / 4);
676 break;
677 case 16:
678 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
679 xfer->rx_buf, xfer->len / 2);
680 break;
681 default:
682 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
683 xfer->rx_buf, xfer->len);
684 break;
685 }
686 sdd->state &= ~RXBUSY;
687 }
688
689 return 0;
690}
691
692static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
693 struct spi_device *spi)
694{
695 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
696
697 if (sdd->tgl_spi == spi)
698 sdd->tgl_spi = NULL;
699
700 if(cs->line != 0)
701 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
702
703 if (cs->cs_mode != AUTO_CS_MODE) {
704 /* Quiese the signals */
705 writel(spi->mode & SPI_CS_HIGH
706 ? 0 : S3C64XX_SPI_SLAVE_SIG_INACT,
707 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
708 }
709}
710
711static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
712{
713 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
714 void __iomem *regs = sdd->regs;
715 u32 val;
716
717 /* Disable Clock */
718 if (sdd->port_conf->clk_from_cmu) {
719 clk_disable_unprepare(sdd->src_clk);
720 } else {
721 val = readl(regs + S3C64XX_SPI_CLK_CFG);
722 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
723 writel(val, regs + S3C64XX_SPI_CLK_CFG);
724 }
725
726 /* Set Polarity and Phase */
727 val = readl(regs + S3C64XX_SPI_CH_CFG);
728 val &= ~(S3C64XX_SPI_CH_SLAVE |
729 S3C64XX_SPI_CPOL_L |
730 S3C64XX_SPI_CPHA_B);
731
732 if (sdd->cur_mode & SPI_CPOL)
733 val |= S3C64XX_SPI_CPOL_L;
734
735 if (sdd->cur_mode & SPI_CPHA)
736 val |= S3C64XX_SPI_CPHA_B;
737
738 writel(val, regs + S3C64XX_SPI_CH_CFG);
739
740 /* Set Channel & DMA Mode */
741 val = readl(regs + S3C64XX_SPI_MODE_CFG);
742 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
743 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
744
745 switch (sdd->cur_bpw) {
746 case 32:
747 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
748 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
749 if (sci->swap_mode == SWAP_MODE) {
750 writel(S3C64XX_SPI_SWAP_TX_EN |
751 S3C64XX_SPI_SWAP_TX_BYTE |
752 S3C64XX_SPI_SWAP_TX_HALF_WORD |
753 S3C64XX_SPI_SWAP_RX_EN |
754 S3C64XX_SPI_SWAP_RX_BYTE |
755 S3C64XX_SPI_SWAP_RX_HALF_WORD,
756 regs + S3C64XX_SPI_SWAP_CFG);
757 }
758 break;
759 case 16:
760 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
761 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
762 if (sci->swap_mode == SWAP_MODE) {
763 writel(S3C64XX_SPI_SWAP_TX_EN |
764 S3C64XX_SPI_SWAP_TX_BYTE |
765 S3C64XX_SPI_SWAP_RX_EN |
766 S3C64XX_SPI_SWAP_RX_BYTE,
767 regs + S3C64XX_SPI_SWAP_CFG);
768 }
769 break;
770 default:
771 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
772 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
773 if (sci->swap_mode == SWAP_MODE)
774 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
775 break;
776 }
777
778 writel(val, regs + S3C64XX_SPI_MODE_CFG);
779
780 if (sdd->port_conf->clk_from_cmu) {
781 /* Configure Clock */
782 /* There is half-multiplier before the SPI */
783 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
784 /* Enable Clock */
785 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
786 clk_prepare_enable(sdd->src_clk);
787 } else {
788 /* Configure Clock */
789 val = readl(regs + S3C64XX_SPI_CLK_CFG);
790 val &= ~S3C64XX_SPI_PSR_MASK;
791 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
792 & S3C64XX_SPI_PSR_MASK);
793 writel(val, regs + S3C64XX_SPI_CLK_CFG);
794
795 /* Enable Clock */
796 val = readl(regs + S3C64XX_SPI_CLK_CFG);
797 val |= S3C64XX_SPI_ENCLK_ENABLE;
798 writel(val, regs + S3C64XX_SPI_CLK_CFG);
799 }
800
801 if (sci->dbg_mode & SPI_DBG_MODE) {
802 dev_err(&sdd->pdev->dev, "SPI_MODE_%d", sdd->cur_mode & 0x3);
803 dev_err(&sdd->pdev->dev, "BTS : %d", sdd->cur_bpw);
804 }
805}
806
807#define XFER_DMAADDR_INVALID DMA_BIT_MASK(36)
808
809static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
810 struct spi_message *msg)
811{
812 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
813 struct device *dev = &sdd->pdev->dev;
814 struct spi_transfer *xfer;
815
816 if ((msg->is_dma_mapped) || (sci->dma_mode != DMA_MODE))
817 return 0;
818
819 /* First mark all xfer unmapped */
820 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
821 xfer->rx_dma = XFER_DMAADDR_INVALID;
822 xfer->tx_dma = XFER_DMAADDR_INVALID;
823 }
824
825 /* Map until end or first fail */
826 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
827
828 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
829 continue;
830
831 if (xfer->tx_buf != NULL) {
832 xfer->tx_dma = dma_map_single(dev,
833 (void *)xfer->tx_buf, xfer->len,
834 DMA_TO_DEVICE);
835 if (dma_mapping_error(dev, xfer->tx_dma)) {
836 dev_err(dev, "dma_map_single Tx failed\n");
837 xfer->tx_dma = XFER_DMAADDR_INVALID;
838 return -ENOMEM;
839 }
840 }
841
842 if (xfer->rx_buf != NULL) {
843 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
844 xfer->len, DMA_FROM_DEVICE);
845 if (dma_mapping_error(dev, xfer->rx_dma)) {
846 dev_err(dev, "dma_map_single Rx failed\n");
847 dma_unmap_single(dev, xfer->tx_dma,
848 xfer->len, DMA_TO_DEVICE);
849 xfer->tx_dma = XFER_DMAADDR_INVALID;
850 xfer->rx_dma = XFER_DMAADDR_INVALID;
851 return -ENOMEM;
852 }
853 }
854 }
855
856 return 0;
857}
858
859static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
860 struct spi_message *msg)
861{
862 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
863 struct device *dev = &sdd->pdev->dev;
864 struct spi_transfer *xfer;
865
866 if ((msg->is_dma_mapped) || (sci->dma_mode != DMA_MODE))
867 return;
868
869 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
870
871 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
872 continue;
873
874 if (xfer->rx_buf != NULL
875 && xfer->rx_dma != XFER_DMAADDR_INVALID)
876 dma_unmap_single(dev, xfer->rx_dma,
877 xfer->len, DMA_FROM_DEVICE);
878
879 if (xfer->tx_buf != NULL
880 && xfer->tx_dma != XFER_DMAADDR_INVALID)
881 dma_unmap_single(dev, xfer->tx_dma,
882 xfer->len, DMA_TO_DEVICE);
883 }
884}
885
886static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
887 struct spi_message *msg)
888{
889 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
890 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
891 struct spi_device *spi = msg->spi;
892 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
893 struct spi_transfer *xfer;
894 int status = 0, cs_toggle = 0;
895 const void *origin_tx_buf = NULL;
896 void *origin_rx_buf = NULL;
897 unsigned target_len = 0, origin_len = 0;
898 unsigned fifo_lvl = (FIFO_LVL_MASK(sdd) >> 1) + 1;
899 u32 speed;
900 u8 bpw;
901
902 /* If Master's(controller) state differs from that needed by Slave */
903 if (sdd->cur_speed != spi->max_speed_hz
904 || sdd->cur_mode != spi->mode
905 || sdd->cur_bpw != spi->bits_per_word) {
906 sdd->cur_bpw = spi->bits_per_word;
907 sdd->cur_speed = spi->max_speed_hz;
908 sdd->cur_mode = spi->mode;
909 s3c64xx_spi_config(sdd);
910 }
911
912 /* Map all the transfers if needed */
913 if (s3c64xx_spi_map_mssg(sdd, msg)) {
914 dev_err(&spi->dev,
915 "Xfer: Unable to map message buffers!\n");
916 status = -ENOMEM;
917 goto out;
918 }
919
920 /* Configure feedback delay */
921 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
922
923 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
924
925 unsigned long flags;
926 int use_dma;
927
928 reinit_completion(&sdd->xfer_completion);
929
930 /* Only BPW and Speed may change across transfers */
931 bpw = xfer->bits_per_word;
932 speed = xfer->speed_hz ? : spi->max_speed_hz;
933
934 if (xfer->len % (bpw / 8)) {
935 dev_err(&spi->dev,
936 "Xfer length(%u) not a multiple of word size(%u)\n",
937 xfer->len, bpw / 8);
938 status = -EIO;
939 goto out;
940 }
941
942 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
943 sdd->cur_bpw = bpw;
944 sdd->cur_speed = speed;
945 s3c64xx_spi_config(sdd);
946 }
947
948 /* verify cpu mode */
949 if (sci->dma_mode != DMA_MODE) {
950 use_dma = 0;
951
952 /* backup original tx, rx buf ptr & xfer length */
953 origin_tx_buf = xfer->tx_buf;
954 origin_rx_buf = xfer->rx_buf;
955 origin_len = xfer->len;
956
957 target_len = xfer->len;
958 if (xfer->len > fifo_lvl)
959 xfer->len = fifo_lvl;
960 } else {
961 /* Polling method for xfers not bigger than FIFO capacity */
962 if (xfer->len <= fifo_lvl) {
963 use_dma = 0;
964 } else {
965 use_dma = 1;
966 }
967 }
968try_transfer:
969 spin_lock_irqsave(&sdd->lock, flags);
970
971 /* Pending only which is to be done */
972 sdd->state &= ~RXBUSY;
973 sdd->state &= ~TXBUSY;
974
975 if (cs->cs_mode == AUTO_CS_MODE) {
976 /* Slave Select */
977 enable_cs(sdd, spi);
978
979 enable_datapath(sdd, spi, xfer, use_dma);
980 } else {
981 enable_datapath(sdd, spi, xfer, use_dma);
982
983 /* Slave Select */
984 enable_cs(sdd, spi);
985 }
986
987 spin_unlock_irqrestore(&sdd->lock, flags);
988
989 status = wait_for_xfer(sdd, xfer, use_dma);
990
991 if (status) {
992 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
993 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
994 (sdd->state & RXBUSY) ? 'f' : 'p',
995 (sdd->state & TXBUSY) ? 'f' : 'p',
996 xfer->len);
997
998 if (use_dma) {
999 if (xfer->tx_buf != NULL
1000 && (sdd->state & TXBUSY)) {
1001 s3c64xx_dma_debug(sdd, &sdd->tx_dma);
1002 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
1003 }
1004 if (xfer->rx_buf != NULL
1005 && (sdd->state & RXBUSY)) {
1006 s3c64xx_dma_debug(sdd, &sdd->rx_dma);
1007 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
1008 }
1009 }
1010
1011 s3c64xx_spi_dump_reg(sdd);
1012 flush_fifo(sdd);
1013
1014 goto out;
1015 }
1016
1017 if (xfer->delay_usecs)
1018 udelay(xfer->delay_usecs);
1019
1020 if (xfer->cs_change) {
1021 /* Hint that the next mssg is gonna be
1022 for the same device */
1023 if (list_is_last(&xfer->transfer_list,
1024 &msg->transfers))
1025 cs_toggle = 1;
1026 }
1027
1028 msg->actual_length += xfer->len;
1029
1030 flush_fifo(sdd);
1031
1032 if (sci->dma_mode != DMA_MODE) {
1033 target_len -= xfer->len;
1034
1035 if (xfer->tx_buf != NULL)
1036 xfer->tx_buf += xfer->len;
1037
1038 if (xfer->rx_buf != NULL)
1039 xfer->rx_buf += xfer->len;
1040
1041 if (target_len > 0) {
1042 if (target_len > fifo_lvl)
1043 xfer->len = fifo_lvl;
1044 else
1045 xfer->len = target_len;
1046 goto try_transfer;
1047 }
1048
1049 /* restore original tx, rx buf_ptr & xfer length */
1050 xfer->tx_buf = origin_tx_buf;
1051 xfer->rx_buf = origin_rx_buf;
1052 xfer->len = origin_len;
1053 }
1054 }
1055
1056out:
1057 if (!cs_toggle || status)
1058 disable_cs(sdd, spi);
1059 else
1060 sdd->tgl_spi = spi;
1061
1062 s3c64xx_spi_unmap_mssg(sdd, msg);
1063
1064 msg->status = status;
1065
1066 spi_finalize_current_message(master);
1067
1068 return 0;
1069}
1070
1071static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
1072 struct spi_device *spi)
1073{
1074 struct s3c64xx_spi_csinfo *cs;
1075 struct device_node *slave_np, *data_np = NULL;
1076 u32 fb_delay = 0;
1077 u32 cs_mode = 0;
1078
1079 slave_np = spi->dev.of_node;
1080 if (!slave_np) {
1081 dev_err(&spi->dev, "device node not found\n");
1082 return ERR_PTR(-EINVAL);
1083 }
1084
1085 data_np = of_get_child_by_name(slave_np, "controller-data");
1086 if (!data_np) {
1087 dev_err(&spi->dev, "child node 'controller-data' not found\n");
1088 return ERR_PTR(-EINVAL);
1089 }
1090
1091 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1092 if (!cs) {
1093 dev_err(&spi->dev, "could not allocate memory for controller data\n");
1094 of_node_put(data_np);
1095 return ERR_PTR(-ENOMEM);
1096 }
1097
1098 if (of_get_property(data_np, "cs-gpio", NULL)) {
1099 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
1100 if (!gpio_is_valid(cs->line))
1101 cs->line = 0;
1102 } else {
1103 cs->line = 0;
1104 }
1105
1106 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
1107 cs->fb_delay = fb_delay;
1108
1109 if (of_property_read_u32(data_np,
1110 "samsung,spi-chip-select-mode", &cs_mode)) {
1111 cs->cs_mode = AUTO_CS_MODE;
1112 } else {
1113 if (cs_mode)
1114 cs->cs_mode = AUTO_CS_MODE;
1115 else
1116 cs->cs_mode = MANUAL_CS_MODE;
1117 }
1118
1119 of_node_put(data_np);
1120 return cs;
1121}
1122
1123/*
1124 * Here we only check the validity of requested configuration
1125 * and save the configuration in a local data-structure.
1126 * The controller is actually configured only just before we
1127 * get a message to transfer.
1128 */
1129static int s3c64xx_spi_setup(struct spi_device *spi)
1130{
1131 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
1132 struct s3c64xx_spi_driver_data *sdd;
1133 struct s3c64xx_spi_info *sci;
1134 struct spi_message *msg;
1135 unsigned long flags;
1136 int err;
1137
1138 sdd = spi_master_get_devdata(spi->master);
1139 if (!cs && spi->dev.of_node) {
1140 cs = s3c64xx_get_slave_ctrldata(spi);
1141 spi->controller_data = cs;
1142 }
1143
1144 if (IS_ERR_OR_NULL(cs)) {
1145 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
1146 return -ENODEV;
1147 }
1148#ifdef ENABLE_SENSORS_FPRINT_SECURE
1149 if (sdd->port_id == CONFIG_SENSORS_FP_SPI_NUMBER)
1150 return 0;
1151#endif
1152#ifdef CONFIG_ESE_SECURE
1153 if (sdd->port_id == CONFIG_ESE_SECURE_SPI_PORT) {
1154 dev_err(&spi->dev, "%s.....(%d)\n",__func__, sdd->port_id);
1155 return 0;
1156 }
1157#endif
1158
1159 if (!spi_get_ctldata(spi)) {
1160 if(cs->line != 0) {
1161 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
1162 dev_name(&spi->dev));
1163 if (err) {
1164 dev_err(&spi->dev,
1165 "Failed to get /CS gpio [%d]: %d\n",
1166 cs->line, err);
1167 goto err_gpio_req;
1168 }
1169 }
1170
1171 spi_set_ctldata(spi, cs);
1172 }
1173
1174 sci = sdd->cntrlr_info;
1175
1176 spin_lock_irqsave(&sdd->lock, flags);
1177
1178 list_for_each_entry(msg, &sdd->queue, queue) {
1179 /* Is some mssg is already queued for this device */
1180 if (msg->spi == spi) {
1181 dev_err(&spi->dev,
1182 "setup: attempt while mssg in queue!\n");
1183 spin_unlock_irqrestore(&sdd->lock, flags);
1184 err = -EBUSY;
1185 goto err_msgq;
1186 }
1187 }
1188
1189 spin_unlock_irqrestore(&sdd->lock, flags);
1190
1191 if (spi->bits_per_word != 8
1192 && spi->bits_per_word != 16
1193 && spi->bits_per_word != 32) {
1194 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
1195 spi->bits_per_word);
1196 err = -EINVAL;
1197 goto setup_exit;
1198 }
1199
1200#ifdef CONFIG_PM
1201 pm_runtime_get_sync(&sdd->pdev->dev);
1202#endif
1203
1204 /* Check if we can provide the requested rate */
1205 if (!sdd->port_conf->clk_from_cmu) {
1206 u32 psr, speed;
1207
1208 /* Max possible */
1209 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
1210 if (!speed) {
1211 dev_err(&spi->dev, "clock rate of speed is 0\n");
1212 err = -EINVAL;
1213 goto setup_exit;
1214 }
1215
1216 if (spi->max_speed_hz > speed)
1217 spi->max_speed_hz = speed;
1218
1219 psr = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
1220 psr &= S3C64XX_SPI_PSR_MASK;
1221 if (psr == S3C64XX_SPI_PSR_MASK)
1222 psr--;
1223
1224 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
1225 if (spi->max_speed_hz < speed) {
1226 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
1227 psr++;
1228 } else {
1229 err = -EINVAL;
1230 goto setup_exit;
1231 }
1232 }
1233
1234 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
1235 if (spi->max_speed_hz >= speed) {
1236 spi->max_speed_hz = speed;
1237 } else {
1238 dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
1239 spi->max_speed_hz);
1240 err = -EINVAL;
1241 goto setup_exit;
1242 }
1243 }
1244
1245 disable_cs(sdd, spi);
1246
1247#ifdef CONFIG_PM
1248 pm_runtime_mark_last_busy(&sdd->pdev->dev);
1249 pm_runtime_put_autosuspend(&sdd->pdev->dev);
1250#endif
1251
1252 if (sci->dbg_mode & SPI_DBG_MODE) {
1253 dev_err(&spi->dev, "SPI feedback-delay : %d\n", cs->fb_delay);
1254 dev_err(&spi->dev, "SPI clock : %u(%lu)\n",
1255 sdd->cur_speed, clk_get_rate(sdd->src_clk));
1256 dev_err(&spi->dev, "SPI %s CS mode", cs->cs_mode ? "AUTO" : "MANUAL");
1257 }
1258
1259 return 0;
1260
1261setup_exit:
1262 /* setup() returns with device de-selected */
1263 disable_cs(sdd, spi);
1264
1265err_msgq:
1266 gpio_free(cs->line);
1267 spi_set_ctldata(spi, NULL);
1268
1269err_gpio_req:
1270 if (spi->dev.of_node)
1271 kfree(cs);
1272
1273 return err;
1274}
1275
1276static void s3c64xx_spi_cleanup(struct spi_device *spi)
1277{
1278 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
1279
1280 if (cs) {
1281 gpio_free(cs->line);
1282 if (spi->dev.of_node)
1283 kfree(cs);
1284 }
1285 spi_set_ctldata(spi, NULL);
1286}
1287
1288static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
1289{
1290 struct s3c64xx_spi_driver_data *sdd = data;
1291 struct spi_master *spi = sdd->master;
1292 unsigned int val, clr = 0;
1293
1294 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
1295
1296 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
1297 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
1298 dev_err(&spi->dev, "RX overrun\n");
1299 }
1300 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
1301 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
1302 dev_err(&spi->dev, "RX underrun\n");
1303 }
1304 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
1305 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
1306 dev_err(&spi->dev, "TX overrun\n");
1307 }
1308 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
1309 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1310 dev_err(&spi->dev, "TX underrun\n");
1311 }
1312
1313 /* Clear the pending irq by setting and then clearing it */
1314 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1315 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1316
1317 return IRQ_HANDLED;
1318}
1319
1320static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
1321{
1322 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1323 void __iomem *regs = sdd->regs;
1324 unsigned int val;
1325
1326#ifdef ENABLE_SENSORS_FPRINT_SECURE
1327 if (channel == CONFIG_SENSORS_FP_SPI_NUMBER)
1328 return;
1329#endif
1330#ifdef CONFIG_ESE_SECURE
1331 if (channel == CONFIG_ESE_SECURE_SPI_PORT)
1332 return;
1333#endif
1334 sdd->cur_speed = 0;
1335
1336 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
1337
1338 /* Disable Interrupts - we use Polling if not DMA mode */
1339 writel(0, regs + S3C64XX_SPI_INT_EN);
1340
1341 if (!sdd->port_conf->clk_from_cmu)
1342 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
1343 regs + S3C64XX_SPI_CLK_CFG);
1344 writel(0, regs + S3C64XX_SPI_MODE_CFG);
1345 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1346
1347 /* Clear any irq pending bits, should set and clear the bits */
1348 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1349 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1350 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1351 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1352 writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1353 writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1354
1355 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1356
1357 val = readl(regs + S3C64XX_SPI_MODE_CFG);
1358 val &= ~S3C64XX_SPI_MODE_4BURST;
1359 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1360 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1361 writel(val, regs + S3C64XX_SPI_MODE_CFG);
1362
1363 flush_fifo(sdd);
1364
1365 sci->need_hw_init = 0;
1366}
1367
1368#ifdef CONFIG_OF
1369static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1370{
1371 struct s3c64xx_spi_info *sci;
1372 u32 temp;
1373 const char *domain;
1374
1375 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1376 if (!sci) {
1377 dev_err(dev, "memory allocation for spi_info failed\n");
1378 return ERR_PTR(-ENOMEM);
1379 }
1380
1381 if (of_get_property(dev->of_node, "dma-mode", NULL))
1382 sci->dma_mode = DMA_MODE;
1383 else
1384 sci->dma_mode = CPU_MODE;
1385
1386 if (of_get_property(dev->of_node, "swap-mode", NULL))
1387 sci->swap_mode = SWAP_MODE;
1388 else
1389 sci->swap_mode = NO_SWAP_MODE;
1390
1391 if (of_get_property(dev->of_node, "secure-mode", NULL))
1392 sci->secure_mode = SECURE_MODE;
1393 else
1394 sci->secure_mode = NONSECURE_MODE;
1395
1396 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1397 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1398 sci->src_clk_nr = 0;
1399 } else {
1400 sci->src_clk_nr = temp;
1401 }
1402
1403 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1404 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1405 sci->num_cs = 1;
1406 } else {
1407 sci->num_cs = temp;
1408 }
1409
1410 sci->domain = DOMAIN_TOP;
1411 if (!of_property_read_string(dev->of_node, "domain", &domain)) {
1412 if (strncmp(domain, "isp", 3) == 0)
1413 sci->domain = DOMAIN_ISP;
1414 else if (strncmp(domain, "cam1", 4) == 0)
1415 sci->domain = DOMAIN_CAM1;
1416 }
1417
1418 return sci;
1419}
1420#else
1421static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1422{
1423 return dev_get_platdata(dev);
1424}
1425#endif
1426
1427static const struct of_device_id s3c64xx_spi_dt_match[];
1428
1429static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1430 struct platform_device *pdev)
1431{
1432#ifdef CONFIG_OF
1433 if (pdev->dev.of_node) {
1434 const struct of_device_id *match;
1435 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1436 return (struct s3c64xx_spi_port_config *)match->data;
1437 }
1438#endif
1439 return (struct s3c64xx_spi_port_config *)
1440 platform_get_device_id(pdev)->driver_data;
1441}
1442
1443#ifdef CONFIG_CPU_IDLE
1444static int s3c64xx_spi_notifier(struct notifier_block *self,
1445 unsigned long cmd, void *v)
1446{
1447 struct s3c64xx_spi_info *sci;
1448
1449 switch (cmd) {
1450 case LPA_EXIT:
1451 list_for_each_entry(sci, &drvdata_list, node)
1452 sci->need_hw_init = 1;
1453 break;
1454 }
1455
1456 return NOTIFY_OK;
1457}
1458
1459static struct notifier_block s3c64xx_spi_notifier_block = {
1460 .notifier_call = s3c64xx_spi_notifier,
1461};
1462#endif /* CONFIG_CPU_IDLE */
1463
1464static int s3c64xx_spi_probe(struct platform_device *pdev)
1465{
1466 struct resource *mem_res;
1467 struct resource *res;
1468 struct s3c64xx_spi_driver_data *sdd;
1469 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1470 struct spi_master *master;
1471 int ret, irq;
1472 char clk_name[16];
1473 int fifosize;
1474
1475 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
1476 if (ret)
1477 return ret;
1478
1479 if (!sci && pdev->dev.of_node) {
1480 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1481 if (IS_ERR(sci))
1482 return PTR_ERR(sci);
1483 }
1484
1485 if (!sci) {
1486 dev_err(&pdev->dev, "platform_data missing!\n");
1487 return -ENODEV;
1488 }
1489
1490#if !defined(CONFIG_VIDEO_EXYNOS_FIMC_IS) && !defined(CONFIG_VIDEO_EXYNOS_FIMC_IS2)
1491 if (sci->domain != DOMAIN_TOP)
1492 return -ENODEV;
1493#endif
1494
1495 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1496 if (mem_res == NULL) {
1497 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1498 return -ENXIO;
1499 }
1500
1501 irq = platform_get_irq(pdev, 0);
1502 if (irq < 0) {
1503 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1504 return irq;
1505 }
1506
1507 master = spi_alloc_master(&pdev->dev,
1508 sizeof(struct s3c64xx_spi_driver_data));
1509 if (master == NULL) {
1510 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1511 return -ENOMEM;
1512 }
1513
1514 platform_set_drvdata(pdev, master);
1515
1516 sdd = spi_master_get_devdata(master);
1517 sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1518 sdd->master = master;
1519 sdd->cntrlr_info = sci;
1520 sdd->pdev = pdev;
1521 sdd->sfr_start = mem_res->start;
1522 sdd->is_probed = 0;
1523 sdd->ops = NULL;
1524
1525 sdd->idle_ip_index = exynos_get_idle_ip_index(dev_name(&pdev->dev));
1526
1527 if (pdev->dev.of_node) {
1528 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1529 if (ret < 0) {
1530 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1531 ret);
1532 goto err0;
1533 }
1534 pdev->id = sdd->port_id = ret;
1535 } else {
1536 sdd->port_id = pdev->id;
1537 }
1538
1539 if(sdd->port_id >= MAX_SPI_PORTS) {
1540 dev_err(&pdev->dev, "the port %d exceeded MAX_SPI_PORTS(%d)\n"
1541 , sdd->port_id, MAX_SPI_PORTS);
1542 goto err0;
1543 }
1544
1545 sdd->cur_bpw = 8;
1546
1547 if (sci->dma_mode == DMA_MODE) {
1548 if (!sdd->pdev->dev.of_node) {
1549 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1550 if (!res) {
1551 dev_err(&pdev->dev,
1552 "Unable to get SPI tx dma resource\n");
1553 return -ENXIO;
1554 }
1555 sdd->tx_dma.dmach = res->start;
1556
1557 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1558 if (!res) {
1559 dev_err(&pdev->dev,
1560 "Unable to get SPI rx dma resource\n");
1561 return -ENXIO;
1562 }
1563 sdd->rx_dma.dmach = res->start;
1564 }
1565
1566 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1567 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1568 }
1569
1570 master->dev.of_node = pdev->dev.of_node;
1571 master->bus_num = sdd->port_id;
1572 master->setup = s3c64xx_spi_setup;
1573 master->cleanup = s3c64xx_spi_cleanup;
1574 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1575 master->transfer_one_message = s3c64xx_spi_transfer_one_message;
1576 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1577 master->num_chipselect = sci->num_cs;
1578 master->dma_alignment = 8;
1579 master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
1580 /* the spi->mode bits understood by this driver: */
1581 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1582
1583 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1584 if (IS_ERR(sdd->regs)) {
1585 ret = PTR_ERR(sdd->regs);
1586 goto err0;
1587 }
1588
1589 if (sci->cfg_gpio && sci->cfg_gpio()) {
1590 dev_err(&pdev->dev, "Unable to config gpio\n");
1591 ret = -EBUSY;
1592 goto err0;
1593 }
1594
1595 /* Setup clocks */
1596 sdd->clk = devm_clk_get(&pdev->dev, "spi");
1597 if (IS_ERR(sdd->clk)) {
1598 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1599 ret = PTR_ERR(sdd->clk);
1600 goto err0;
1601 }
1602
1603 snprintf(clk_name, sizeof(clk_name), "spi_busclk%d", sci->src_clk_nr);
1604 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1605 if (IS_ERR(sdd->src_clk)) {
1606 dev_err(&pdev->dev,
1607 "Unable to acquire clock '%s'\n", clk_name);
1608 ret = PTR_ERR(sdd->src_clk);
1609 goto err2;
1610 }
1611#ifdef CONFIG_PM
1612 pm_runtime_use_autosuspend(&pdev->dev);
1613 pm_runtime_enable(&pdev->dev);
1614 pm_runtime_get_sync(&pdev->dev);
1615
1616 sdd->pinctrl = devm_pinctrl_get(&pdev->dev);
1617 if (IS_ERR(sdd->pinctrl)) {
1618 dev_warn(&pdev->dev, "Couldn't get pinctrl.\n");
1619 sdd->pinctrl = NULL;
1620 }
1621
1622 if (sdd->pinctrl) {
1623 sdd->pin_def = pinctrl_lookup_state(sdd->pinctrl, PINCTRL_STATE_DEFAULT);
1624 if (IS_ERR(sdd->pin_def)) {
1625 dev_warn(&pdev->dev, "Not define default state.\n");
1626 sdd->pin_def = NULL;
1627 }
1628
1629 sdd->pin_idle = pinctrl_lookup_state(sdd->pinctrl, PINCTRL_STATE_IDLE);
1630 if (IS_ERR(sdd->pin_idle)) {
1631 dev_info(&pdev->dev, "Not use idle state.\n");
1632 sdd->pin_idle = NULL;
1633 }
1634 }
1635#else
1636
1637 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1638
1639 if (clk_prepare_enable(sdd->clk)) {
1640 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1641 ret = -EBUSY;
1642 goto err0;
1643 }
1644
1645 if (clk_prepare_enable(sdd->src_clk)) {
1646 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1647 ret = -EBUSY;
1648 goto err2;
1649 }
1650#endif
1651
1652 if (of_property_read_u32(pdev->dev.of_node, "spi-clkoff-time",
1653 (int *)&(sdd->spi_clkoff_time))) {
1654 dev_err(&pdev->dev, "spi clkoff-time is empty(Default: 0ms)\n");
1655 sdd->spi_clkoff_time = 0;
1656 } else {
1657 dev_err(&pdev->dev, "spi clkoff-time %d\n", sdd->spi_clkoff_time);
1658 }
1659
1660 if (of_property_read_u32(pdev->dev.of_node,
1661 "samsung,spi-fifosize", &fifosize)) {
1662 dev_err(&pdev->dev, "PORT %d fifosize is not specified\n",
1663 sdd->port_id);
1664 ret = -EINVAL;
1665 goto err3;
1666 } else {
1667 sdd->port_conf->fifo_lvl_mask[sdd->port_id] = (fifosize << 1) - 1;
1668 dev_info(&pdev->dev, "PORT %d fifo_lvl_mask = 0x%x\n",
1669 sdd->port_id, sdd->port_conf->fifo_lvl_mask[sdd->port_id]);
1670 }
1671
1672 /* Setup Deufult Mode */
1673 s3c64xx_spi_hwinit(sdd, sdd->port_id);
1674
1675 spin_lock_init(&sdd->lock);
1676 init_completion(&sdd->xfer_completion);
1677 INIT_LIST_HEAD(&sdd->queue);
1678
1679 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1680 "spi-s3c64xx", sdd);
1681 if (ret != 0) {
1682 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1683 irq, ret);
1684 goto err3;
1685 }
1686
1687 if (1
1688#ifdef ENABLE_SENSORS_FPRINT_SECURE
1689 && (sdd->port_id != CONFIG_SENSORS_FP_SPI_NUMBER)
1690#endif
1691#ifdef CONFIG_ESE_SECURE
1692 && (sdd->port_id != CONFIG_ESE_SECURE_SPI_PORT)
1693#endif
1694 ) {
1695 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1696 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1697 sdd->regs + S3C64XX_SPI_INT_EN);
1698 }
1699
1700#ifdef CONFIG_PM
1701 pm_runtime_mark_last_busy(&pdev->dev);
1702 pm_runtime_put_sync(&pdev->dev);
1703#endif
1704
1705 if (spi_register_master(master)) {
1706 dev_err(&pdev->dev, "cannot register SPI master\n");
1707 ret = -EBUSY;
1708 goto err3;
1709 }
1710
1711 list_add_tail(&sci->node, &drvdata_list);
1712
1713 sdd->is_probed = 1;
1714#ifdef CONFIG_PM
1715 if (sci->domain == DOMAIN_TOP)
1716 pm_runtime_set_autosuspend_delay(&pdev->dev,
1717 sdd->spi_clkoff_time);
1718 else
1719 pm_runtime_set_autosuspend_delay(&pdev->dev,
1720 SPI_AUTOSUSPEND_TIMEOUT);
1721#endif
1722
1723 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1724 sdd->port_id, master->num_chipselect);
1725 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%ld, Tx-%ld]\n",
1726 mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
1727 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1728
1729 ret = device_create_file(&pdev->dev, &dev_attr_spi_dbg);
1730 if (ret < 0)
1731 dev_err(&pdev->dev, "failed to create sysfs file.\n");
1732 sci->dbg_mode = 0;
1733
1734 return 0;
1735
1736err3:
1737#ifdef CONFIG_PM
1738 pm_runtime_disable(&pdev->dev);
1739#endif
1740 clk_disable_unprepare(sdd->src_clk);
1741err2:
1742 clk_disable_unprepare(sdd->clk);
1743err0:
1744 platform_set_drvdata(pdev, NULL);
1745 spi_master_put(master);
1746
1747 return ret;
1748}
1749
1750static int s3c64xx_spi_remove(struct platform_device *pdev)
1751{
1752 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1753 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1754
1755#ifdef CONFIG_PM
1756 pm_runtime_disable(&pdev->dev);
1757#endif
1758
1759 spi_unregister_master(master);
1760
1761 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1762
1763 clk_disable_unprepare(sdd->src_clk);
1764
1765 clk_disable_unprepare(sdd->clk);
1766
1767 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1768
1769 platform_set_drvdata(pdev, NULL);
1770 spi_master_put(master);
1771
1772 return 0;
1773}
1774
1775#ifdef CONFIG_PM
1776static void s3c64xx_spi_pin_ctrl(struct device *dev, int en)
1777{
1778 struct spi_master *master = dev_get_drvdata(dev);
1779 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1780 struct pinctrl_state *pin_stat;
1781
1782 if (!sdd->pin_idle)
1783 return;
1784
1785 pin_stat = en ? sdd->pin_def : sdd->pin_idle;
1786 if (!IS_ERR(pin_stat)) {
1787 sdd->pinctrl->state = NULL;
1788 if (pinctrl_select_state(sdd->pinctrl, pin_stat))
1789 dev_err(dev, "could not set pinctrl.\n");
1790 } else {
1791 dev_warn(dev, "pinctrl stat is null pointer.\n");
1792 }
1793}
1794
1795static int s3c64xx_spi_runtime_suspend(struct device *dev)
1796{
1797 struct spi_master *master = dev_get_drvdata(dev);
1798 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1799 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1800
1801 if (__clk_get_enable_count(sdd->clk))
1802 clk_disable_unprepare(sdd->clk);
1803 if (__clk_get_enable_count(sdd->src_clk))
1804 clk_disable_unprepare(sdd->src_clk);
1805
1806 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1807
1808 /* Free DMA channels */
1809 if (sci->dma_mode == DMA_MODE && sdd->is_probed && sdd->ops != NULL) {
1810 #ifdef CONFIG_ARM64
1811 sdd->ops->release((unsigned long)sdd->rx_dma.ch,
1812 &s3c64xx_spi_dma_client);
1813 sdd->ops->release((unsigned long)sdd->tx_dma.ch,
1814 &s3c64xx_spi_dma_client);
1815 #else
1816 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
1817 &s3c64xx_spi_dma_client);
1818 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
1819 &s3c64xx_spi_dma_client);
1820 #endif
1821 sdd->rx_dma.ch = NULL;
1822 sdd->tx_dma.ch = NULL;
1823 }
1824
1825 s3c64xx_spi_pin_ctrl(dev, 0);
1826
1827 return 0;
1828}
1829
1830static int s3c64xx_spi_runtime_resume(struct device *dev)
1831{
1832 struct spi_master *master = dev_get_drvdata(dev);
1833 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1834 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1835
1836 s3c64xx_spi_pin_ctrl(dev, 1);
1837
1838 if (sci->dma_mode == DMA_MODE && sdd->is_probed) {
1839 /* Acquire DMA channels */
1840 while (!acquire_dma(sdd))
1841 usleep_range(10000, 11000);
1842 }
1843
1844 if (sci->domain == DOMAIN_TOP) {
1845 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1846 clk_prepare_enable(sdd->src_clk);
1847 clk_prepare_enable(sdd->clk);
1848 }
1849
1850#if defined(CONFIG_VIDEO_EXYNOS_FIMC_IS) || defined(CONFIG_VIDEO_EXYNOS_FIMC_IS2)
1851 else if (sci->domain == DOMAIN_CAM1 || sci->domain == DOMAIN_ISP) {
1852 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1853 clk_prepare_enable(sdd->src_clk);
1854 clk_prepare_enable(sdd->clk);
1855
1856 s3c64xx_spi_hwinit(sdd, sdd->port_id);
1857 }
1858#endif
1859
1860 return 0;
1861}
1862#endif /* CONFIG_PM */
1863
1864#ifdef CONFIG_PM_SLEEP
1865static int s3c64xx_spi_suspend_operation(struct device *dev)
1866{
1867 struct spi_master *master = dev_get_drvdata(dev);
1868 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1869#ifndef CONFIG_PM
1870 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1871#endif
1872
1873 int ret = spi_master_suspend(master);
1874 if (ret) {
1875 dev_warn(dev, "cannot suspend master\n");
1876 return ret;
1877 }
1878
1879#ifndef CONFIG_PM
1880 if (sci->domain == DOMAIN_TOP) {
1881 /* Disable the clock */
1882 clk_disable_unprepare(sdd->src_clk);
1883 clk_disable_unprepare(sdd->clk);
1884 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1885 }
1886#endif
1887 if (!pm_runtime_status_suspended(dev))
1888 s3c64xx_spi_runtime_suspend(dev);
1889
1890 sdd->cur_speed = 0; /* Output Clock is stopped */
1891
1892 return 0;
1893}
1894
1895static int s3c64xx_spi_resume_operation(struct device *dev)
1896{
1897 struct spi_master *master = dev_get_drvdata(dev);
1898 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1899 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1900 int ret;
1901
1902 if (!pm_runtime_status_suspended(dev))
1903 s3c64xx_spi_runtime_resume(dev);
1904
1905 if (sci->domain == DOMAIN_TOP) {
1906 /* Enable the clock */
1907 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1908 clk_prepare_enable(sdd->src_clk);
1909 clk_prepare_enable(sdd->clk);
1910
1911 if (sci->cfg_gpio)
1912 sci->cfg_gpio();
1913
1914 if (sci->secure_mode)
1915 sci->need_hw_init = 1;
1916 else
1917 s3c64xx_spi_hwinit(sdd, sdd->port_id);
1918
1919#ifdef CONFIG_PM
1920 /* Disable the clock */
1921 clk_disable_unprepare(sdd->src_clk);
1922 clk_disable_unprepare(sdd->clk);
1923 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1924#endif
1925 }
1926
1927 /* Start the queue running */
1928 ret = spi_master_resume(master);
1929 if (ret)
1930 dev_err(dev, "problem starting queue (%d)\n", ret);
1931 else
1932 dev_dbg(dev, "resumed\n");
1933
1934 return ret;
1935}
1936
1937static int s3c64xx_spi_suspend(struct device *dev)
1938{
1939 struct spi_master *master = dev_get_drvdata(dev);
1940 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1941 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1942
1943 if (sci->dma_mode != DMA_MODE)
1944 return 0;
1945
1946 dev_dbg(dev, "spi suspend is handled in device suspend, dma mode = %d\n",
1947 sci->dma_mode);
1948 return s3c64xx_spi_suspend_operation(dev);
1949}
1950
1951static int s3c64xx_spi_suspend_noirq(struct device *dev)
1952{
1953 struct spi_master *master = dev_get_drvdata(dev);
1954 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1955 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1956
1957 if (sci->dma_mode == DMA_MODE)
1958 return 0;
1959
1960 dev_dbg(dev, "spi suspend is handled in suspend_noirq, dma mode = %d\n",
1961 sci->dma_mode);
1962 return s3c64xx_spi_suspend_operation(dev);
1963}
1964
1965static int s3c64xx_spi_resume(struct device *dev)
1966{
1967 struct spi_master *master = dev_get_drvdata(dev);
1968 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1969 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1970
1971 if (sci->dma_mode != DMA_MODE)
1972 return 0;
1973
1974 dev_dbg(dev, "spi resume is handled in device resume, dma mode = %d\n",
1975 sci->dma_mode);
1976 return s3c64xx_spi_resume_operation(dev);
1977}
1978
1979static int s3c64xx_spi_resume_noirq(struct device *dev)
1980{
1981 struct spi_master *master = dev_get_drvdata(dev);
1982 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1983 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1984
1985 if (sci->dma_mode == DMA_MODE)
1986 return 0;
1987
1988 dev_dbg(dev, "spi resume is handled in resume_noirq, dma mode = %d\n",
1989 sci->dma_mode);
1990 return s3c64xx_spi_resume_operation(dev);
1991}
1992#else
1993static int s3c64xx_spi_suspend(struct device *dev)
1994{
1995 return 0;
1996}
1997
1998static int s3c64xx_spi_resume(struct device *dev)
1999{
2000 return 0;
2001}
2002#endif /* CONFIG_PM_SLEEP */
2003
2004static const struct dev_pm_ops s3c64xx_spi_pm = {
2005 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
2006 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend_noirq, s3c64xx_spi_resume_noirq)
2007 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
2008 s3c64xx_spi_runtime_resume, NULL)
2009};
2010
2011static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
2012 .fifo_lvl_mask = { 0x7f },
2013 .rx_lvl_offset = 13,
2014 .tx_st_done = 21,
2015 .high_speed = true,
2016};
2017
2018static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
2019 .fifo_lvl_mask = { 0x7f, 0x7F },
2020 .rx_lvl_offset = 13,
2021 .tx_st_done = 21,
2022};
2023
2024static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
2025 .fifo_lvl_mask = { 0x1ff, 0x7F },
2026 .rx_lvl_offset = 15,
2027 .tx_st_done = 25,
2028 .high_speed = true,
2029};
2030
2031static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
2032 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
2033 .rx_lvl_offset = 15,
2034 .tx_st_done = 25,
2035 .high_speed = true,
2036 .clk_from_cmu = true,
2037};
2038
2039static struct s3c64xx_spi_port_config exynos5_spi_port_config = {
2040 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x1ff, 0x1ff },
2041 .rx_lvl_offset = 15,
2042 .tx_st_done = 25,
2043 .high_speed = true,
2044 .clk_from_cmu = true,
2045};
2046
2047static struct s3c64xx_spi_port_config exynos543x_spi_port_config = {
2048 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff, 0x1ff },
2049 .rx_lvl_offset = 15,
2050 .tx_st_done = 25,
2051 .high_speed = true,
2052 .clk_from_cmu = true,
2053};
2054
2055static struct s3c64xx_spi_port_config exynos742x_spi_port_config = {
2056 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff, 0x1ff, 0x1ff },
2057 .rx_lvl_offset = 15,
2058 .tx_st_done = 25,
2059 .high_speed = true,
2060 .clk_from_cmu = true,
2061};
2062
2063static struct s3c64xx_spi_port_config exynos758x_spi_port_config = {
2064 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x1ff, 0x1ff },
2065 .rx_lvl_offset = 15,
2066 .tx_st_done = 25,
2067 .high_speed = true,
2068 .clk_from_cmu = true,
2069};
2070
2071static struct s3c64xx_spi_port_config exynos_spi_port_config = {
2072 .fifo_lvl_mask = { 0, },
2073 .rx_lvl_offset = 15,
2074 .tx_st_done = 25,
2075 .high_speed = true,
2076 .clk_from_cmu = true,
2077};
2078
2079static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
2080 {
2081 .name = "s3c2443-spi",
2082 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
2083 }, {
2084 .name = "s3c6410-spi",
2085 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
2086 }, {
2087 .name = "s5pv210-spi",
2088 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
2089 }, {
2090 .name = "exynos4210-spi",
2091 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
2092 }, {
2093 .name = "exynos5410-spi",
2094 .driver_data = (kernel_ulong_t)&exynos5_spi_port_config,
2095 }, {
2096 .name = "exynos543x-spi",
2097 .driver_data = (kernel_ulong_t)&exynos543x_spi_port_config,
2098 }, {
2099 .name = "exynos742x-spi",
2100 .driver_data = (kernel_ulong_t)&exynos742x_spi_port_config,
2101 }, {
2102 .name = "exynos758x-spi",
2103 .driver_data = (kernel_ulong_t)&exynos758x_spi_port_config,
2104 }, {
2105 .name = "exynos-spi",
2106 .driver_data = (kernel_ulong_t)&exynos_spi_port_config,
2107 },
2108 { },
2109};
2110
2111#ifdef CONFIG_OF
2112static const struct of_device_id s3c64xx_spi_dt_match[] = {
2113 { .compatible = "samsung,exynos4210-spi",
2114 .data = (void *)&exynos4_spi_port_config,
2115 },
2116 { .compatible = "samsung,exynos5410-spi",
2117 .data = (void *)&exynos5_spi_port_config,
2118 },
2119 { .compatible = "samsung,exynos543x-spi",
2120 .data = (void *)&exynos543x_spi_port_config,
2121 },
2122 { .compatible = "samsung,exynos742x-spi",
2123 .data = (void *)&exynos742x_spi_port_config,
2124 },
2125 { .compatible = "samsung,exynos758x-spi",
2126 .data = (void *)&exynos758x_spi_port_config,
2127 },
2128 { .compatible = "samsung,exynos-spi",
2129 .data = (void *)&exynos_spi_port_config,
2130 },
2131 { },
2132};
2133MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
2134#endif /* CONFIG_OF */
2135
2136static struct platform_driver s3c64xx_spi_driver = {
2137 .driver = {
2138 .name = "s3c64xx-spi",
2139 .pm = &s3c64xx_spi_pm,
2140 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
2141 },
2142 .remove = s3c64xx_spi_remove,
2143 .id_table = s3c64xx_spi_driver_ids,
2144};
2145MODULE_ALIAS("platform:s3c64xx-spi");
2146
2147static int __init s3c64xx_spi_init(void)
2148{
2149#ifdef CONFIG_CPU_IDLE
2150 exynos_pm_register_notifier(&s3c64xx_spi_notifier_block);
2151#endif
2152 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
2153}
2154subsys_initcall(s3c64xx_spi_init);
2155
2156static void __exit s3c64xx_spi_exit(void)
2157{
2158 platform_driver_unregister(&s3c64xx_spi_driver);
2159}
2160module_exit(s3c64xx_spi_exit);
2161
2162MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
2163MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
2164MODULE_LICENSE("GPL");