usb: gadget: f_mtp: Avoid race between mtp_read and mtp_function_disable
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / spi / spi-s3c64xx.c
CommitLineData
ca632f55 1/*
230d42d4
JB
2 * Copyright (C) 2009 Samsung Electronics Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
230d42d4
JB
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
1cac41cb 18#include <linux/workqueue.h>
c2573128 19#include <linux/interrupt.h>
230d42d4
JB
20#include <linux/delay.h>
21#include <linux/clk.h>
1cac41cb 22#include <linux/clk-provider.h>
230d42d4 23#include <linux/dma-mapping.h>
78843727 24#include <linux/dmaengine.h>
230d42d4 25#include <linux/platform_device.h>
b97b6621 26#include <linux/pm_runtime.h>
230d42d4 27#include <linux/spi/spi.h>
1c20c200 28#include <linux/gpio.h>
2b908075
TA
29#include <linux/of.h>
30#include <linux/of_gpio.h>
1cac41cb 31#include <soc/samsung/exynos-powermode.h>
230d42d4 32
436d42c6 33#include <linux/platform_data/spi-s3c64xx.h>
230d42d4 34
1cac41cb
MB
35#include <linux/dma/dma-pl330.h>
36
37#ifdef CONFIG_CPU_IDLE
38#include <soc/samsung/exynos-pm.h>
39#endif
40
41#include "../pinctrl/core.h"
42
43static LIST_HEAD(drvdata_list);
44
45#define MAX_SPI_PORTS 16
46#define SPI_AUTOSUSPEND_TIMEOUT (100)
a5238e36 47
230d42d4
JB
48/* Registers and bit-fields */
49
50#define S3C64XX_SPI_CH_CFG 0x00
51#define S3C64XX_SPI_CLK_CFG 0x04
52#define S3C64XX_SPI_MODE_CFG 0x08
53#define S3C64XX_SPI_SLAVE_SEL 0x0C
54#define S3C64XX_SPI_INT_EN 0x10
55#define S3C64XX_SPI_STATUS 0x14
56#define S3C64XX_SPI_TX_DATA 0x18
57#define S3C64XX_SPI_RX_DATA 0x1C
58#define S3C64XX_SPI_PACKET_CNT 0x20
59#define S3C64XX_SPI_PENDING_CLR 0x24
60#define S3C64XX_SPI_SWAP_CFG 0x28
61#define S3C64XX_SPI_FB_CLK 0x2C
62
63#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
64#define S3C64XX_SPI_CH_SW_RST (1<<5)
65#define S3C64XX_SPI_CH_SLAVE (1<<4)
66#define S3C64XX_SPI_CPOL_L (1<<3)
67#define S3C64XX_SPI_CPHA_B (1<<2)
68#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
69#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
70
71#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
72#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
73#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
75bf3361 74#define S3C64XX_SPI_PSR_MASK 0xff
230d42d4
JB
75
76#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
77#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
78#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
79#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
80#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
81#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
82#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
83#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
84#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
85#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
86#define S3C64XX_SPI_MODE_4BURST (1<<0)
87
1cac41cb
MB
88#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
89#define S3C64XX_SPI_SLAVE_NSC_CNT_1 (1<<4)
230d42d4
JB
90#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
91#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
92
230d42d4
JB
93#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
94#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
95#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
96#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
97#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
98#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
99#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
100
101#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
102#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
103#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
104#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
105#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
106#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
107
108#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
109
110#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
111#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
112#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
113#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
114#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
115
116#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
117#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
118#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
119#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
120#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
121#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
122#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
123#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
124
125#define S3C64XX_SPI_FBCLK_MSK (3<<0)
126
a5238e36
TA
127#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
128#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
129 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
130#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
131#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
132 FIFO_LVL_MASK(i))
230d42d4
JB
133
134#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
135#define S3C64XX_SPI_TRAILCNT_OFF 19
136
137#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
138
1cac41cb
MB
139#define S3C64XX_SPI_DMA_4BURST_LEN 0x4
140#define S3C64XX_SPI_DMA_1BURST_LEN 0x1
141
230d42d4
JB
142#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
143
230d42d4
JB
144#define RXBUSY (1<<2)
145#define TXBUSY (1<<3)
146
1cac41cb 147#define SPI_DBG_MODE (0x1 << 0)
82ab8cd7 148
a5238e36
TA
149/**
150 * struct s3c64xx_spi_info - SPI Controller hardware info
151 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
152 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
153 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
154 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
155 * @clk_from_cmu: True, if the controller does not include a clock mux and
156 * prescaler unit.
157 *
158 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
159 * differ in some aspects such as the size of the fifo and spi bus clock
160 * setup. Such differences are specified to the driver using this structure
161 * which is provided as driver data to the driver.
162 */
163struct s3c64xx_spi_port_config {
164 int fifo_lvl_mask[MAX_SPI_PORTS];
165 int rx_lvl_offset;
166 int tx_st_done;
167 bool high_speed;
168 bool clk_from_cmu;
169};
170
1cac41cb
MB
171static ssize_t
172spi_dbg_show(struct device *dev, struct device_attribute *attr, char *buf)
173{
174 ssize_t ret = 0;
175
176 ret += snprintf(buf + ret, PAGE_SIZE - ret,
177 "SPI Debug Mode Configuration.\n");
178 ret += snprintf(buf + ret, PAGE_SIZE - ret,
179 "0 : Change DBG mode.\n");
180 ret += snprintf(buf + ret, PAGE_SIZE - ret,
181 "1 : Change Normal mode.\n");
182
183 if (ret < PAGE_SIZE - 1) {
184 ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
185 } else {
186 buf[PAGE_SIZE-2] = '\n';
187 buf[PAGE_SIZE-1] = '\0';
188 ret = PAGE_SIZE-1;
189 }
190
191 return ret;
192}
193
194static ssize_t
195spi_dbg_store(struct device *dev, struct device_attribute *attr,
196 const char *buf, size_t count)
197{
198 struct spi_master *master = dev_get_drvdata(dev);
199 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
200 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
201 struct s3c64xx_spi_info *check_sci;
202 int ret, input_cmd;
203
204 ret = sscanf(buf, "%d", &input_cmd);
205
206 list_for_each_entry(check_sci, &drvdata_list, node) {
207 if (check_sci != sci)
208 continue;
209
210 switch(input_cmd) {
211 case 0:
212 printk(KERN_ERR "Change SPI%d to Loopback(DBG) mode\n",
213 sdd->port_id);
214 sci->dbg_mode = SPI_DBG_MODE;
215 break;
216 case 1:
217 printk(KERN_ERR "Change SPI%d to normal mode\n",
218 sdd->port_id);
219 sci->dbg_mode = 0;
220 break;
221 default:
222 printk(KERN_ERR "Wrong Command!(0/1)\n");
223 }
224 }
225
226 return count;
227}
228
229static DEVICE_ATTR(spi_dbg, 0640, spi_dbg_show, spi_dbg_store);
230
231static void s3c64xx_spi_dump_reg(struct s3c64xx_spi_driver_data *sdd)
232{
233 void __iomem *regs = sdd->regs;
234 struct device *dev = &sdd->pdev->dev;
230d42d4 235
1cac41cb
MB
236 dev_err(dev, "Register dump for SPI\n"
237 " CH_CFG 0x%08x\n"
238 " MODE_CFG 0x%08x\n"
239 " CS_REG 0x%08x\n"
240 " STATUS 0x%08x\n"
241 " PACKET_CNT 0x%08x\n"
242 , readl(regs + S3C64XX_SPI_CH_CFG)
243 , readl(regs + S3C64XX_SPI_MODE_CFG)
244 , readl(regs + S3C64XX_SPI_SLAVE_SEL)
245 , readl(regs + S3C64XX_SPI_STATUS)
246 , readl(regs + S3C64XX_SPI_PACKET_CNT)
247 );
248
249}
230d42d4
JB
250static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
251{
230d42d4
JB
252 void __iomem *regs = sdd->regs;
253 unsigned long loops;
254 u32 val;
255
256 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
257
7d859ff4
KK
258 val = readl(regs + S3C64XX_SPI_CH_CFG);
259 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
260 writel(val, regs + S3C64XX_SPI_CH_CFG);
261
230d42d4
JB
262 val = readl(regs + S3C64XX_SPI_CH_CFG);
263 val |= S3C64XX_SPI_CH_SW_RST;
264 val &= ~S3C64XX_SPI_CH_HS_EN;
265 writel(val, regs + S3C64XX_SPI_CH_CFG);
266
267 /* Flush TxFIFO*/
268 loops = msecs_to_loops(1);
269 do {
270 val = readl(regs + S3C64XX_SPI_STATUS);
a5238e36 271 } while (TX_FIFO_LVL(val, sdd) && loops--);
230d42d4 272
be7852a8
MB
273 if (loops == 0)
274 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
275
230d42d4
JB
276 /* Flush RxFIFO*/
277 loops = msecs_to_loops(1);
278 do {
279 val = readl(regs + S3C64XX_SPI_STATUS);
a5238e36 280 if (RX_FIFO_LVL(val, sdd))
230d42d4
JB
281 readl(regs + S3C64XX_SPI_RX_DATA);
282 else
283 break;
284 } while (loops--);
285
be7852a8
MB
286 if (loops == 0)
287 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
288
230d42d4
JB
289 val = readl(regs + S3C64XX_SPI_CH_CFG);
290 val &= ~S3C64XX_SPI_CH_SW_RST;
291 writel(val, regs + S3C64XX_SPI_CH_CFG);
292
293 val = readl(regs + S3C64XX_SPI_MODE_CFG);
294 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
295 writel(val, regs + S3C64XX_SPI_MODE_CFG);
230d42d4
JB
296}
297
82ab8cd7 298static void s3c64xx_spi_dmacb(void *data)
39d3e807 299{
82ab8cd7
BK
300 struct s3c64xx_spi_driver_data *sdd;
301 struct s3c64xx_spi_dma_data *dma = data;
39d3e807
BK
302 unsigned long flags;
303
054ebcc4 304 if (dma->direction == DMA_DEV_TO_MEM)
82ab8cd7
BK
305 sdd = container_of(data,
306 struct s3c64xx_spi_driver_data, rx_dma);
307 else
308 sdd = container_of(data,
309 struct s3c64xx_spi_driver_data, tx_dma);
310
39d3e807
BK
311 spin_lock_irqsave(&sdd->lock, flags);
312
054ebcc4 313 if (dma->direction == DMA_DEV_TO_MEM) {
82ab8cd7
BK
314 sdd->state &= ~RXBUSY;
315 if (!(sdd->state & TXBUSY))
316 complete(&sdd->xfer_completion);
317 } else {
318 sdd->state &= ~TXBUSY;
319 if (!(sdd->state & RXBUSY))
320 complete(&sdd->xfer_completion);
321 }
39d3e807
BK
322
323 spin_unlock_irqrestore(&sdd->lock, flags);
324}
325
1cac41cb
MB
326/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
327
328static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
329 .name = "samsung-spi-dma",
330};
331
78843727 332static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
1cac41cb 333 unsigned len, dma_addr_t buf)
78843727
AB
334{
335 struct s3c64xx_spi_driver_data *sdd;
1cac41cb
MB
336 struct samsung_dma_prep info;
337 struct samsung_dma_config config;
338 u32 modecfg;
b1a8e78d 339
78843727
AB
340 if (dma->direction == DMA_DEV_TO_MEM) {
341 sdd = container_of((void *)dma,
342 struct s3c64xx_spi_driver_data, rx_dma);
1cac41cb
MB
343 config.direction = sdd->rx_dma.direction;
344 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
345 config.width = sdd->cur_bpw / 8;
346 modecfg = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
347 config.maxburst = modecfg & S3C64XX_SPI_MODE_4BURST ?
348 S3C64XX_SPI_DMA_4BURST_LEN :
349 S3C64XX_SPI_DMA_1BURST_LEN;
350
351 #ifdef CONFIG_ARM64
352 sdd->ops->config((unsigned long)sdd->rx_dma.ch, &config);
353 #else
354 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
355 #endif
78843727
AB
356 } else {
357 sdd = container_of((void *)dma,
358 struct s3c64xx_spi_driver_data, tx_dma);
1cac41cb
MB
359 config.direction = sdd->tx_dma.direction;
360 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
361 config.width = sdd->cur_bpw / 8;
362 modecfg = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
363 config.maxburst = modecfg & S3C64XX_SPI_MODE_4BURST ?
364 S3C64XX_SPI_DMA_4BURST_LEN :
365 S3C64XX_SPI_DMA_1BURST_LEN;
366
367 #ifdef CONFIG_ARM64
368 sdd->ops->config((unsigned long)sdd->tx_dma.ch, &config);
369 #else
370 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
371 #endif
78843727
AB
372 }
373
1cac41cb
MB
374 info.cap = DMA_SLAVE;
375 info.len = len;
376 info.fp = s3c64xx_spi_dmacb;
377 info.fp_param = dma;
378 info.direction = dma->direction;
379 info.buf = buf;
380
381#ifdef CONFIG_ARM64
382 sdd->ops->prepare((unsigned long)dma->ch, &info);
383 sdd->ops->trigger((unsigned long)dma->ch);
384#else
385 sdd->ops->prepare((enum dma_ch)dma->ch, &info);
386 sdd->ops->trigger((enum dma_ch)dma->ch);
387#endif
388
389}
390
391static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
392{
393 struct samsung_dma_req req;
394 struct device *dev = &sdd->pdev->dev;
395
396 sdd->ops = samsung_dma_get_ops();
397
398 req.cap = DMA_SLAVE;
399 req.client = &s3c64xx_spi_dma_client;
78843727 400
1cac41cb
MB
401 if (sdd->rx_dma.ch == NULL)
402 sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach,
403 &req, dev, "rx");
404 if (sdd->tx_dma.ch == NULL)
405 sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach,
406 &req, dev, "tx");
78843727 407
1cac41cb 408 return 1;
78843727
AB
409}
410
1cac41cb
MB
411static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel);
412
78843727
AB
413static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
414{
415 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
1cac41cb
MB
416 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
417#ifdef CONFIG_PM
fb9d044e 418 int ret;
1cac41cb 419#endif
78843727 420
1cac41cb
MB
421#ifndef CONFIG_PM
422 if (sci->dma_mode == DMA_MODE) {
c12f9643 423 /* Acquire DMA channels */
1cac41cb
MB
424 while (!acquire_dma(sdd))
425 usleep_range(10000, 11000);
fb9d044e 426 }
1cac41cb 427#endif
fb9d044e 428
1cac41cb
MB
429#ifdef CONFIG_PM
430 ret = pm_runtime_get_sync(&sdd->pdev->dev);
431 if(ret < 0)
432 return ret;
433#endif
fb9d044e 434
1cac41cb
MB
435 if (sci->need_hw_init)
436 s3c64xx_spi_hwinit(sdd, sdd->port_id);
437
438 return 0;
78843727
AB
439}
440
441static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
442{
443 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
1cac41cb
MB
444#ifdef CONFIG_PM
445 int ret;
446#endif
447
448#ifndef CONFIG_PM
449 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
78843727
AB
450
451 /* Free DMA channels */
1cac41cb
MB
452 if (sci->dma_mode == DMA_MODE) {
453 #ifdef CONFIG_ARM64
454 sdd->ops->release((unsigned long)sdd->rx_dma.ch,
455 &s3c64xx_spi_dma_client);
456 sdd->ops->release((unsigned long)sdd->tx_dma.ch,
457 &s3c64xx_spi_dma_client);
458 #else
459 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
460 &s3c64xx_spi_dma_client);
461 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
462 &s3c64xx_spi_dma_client);
463 #endif
464 sdd->rx_dma.ch = NULL;
465 sdd->tx_dma.ch = NULL;
7e995556 466 }
1cac41cb
MB
467#endif
468
469#ifdef CONFIG_PM
470 pm_runtime_mark_last_busy(&sdd->pdev->dev);
471 ret = pm_runtime_put_autosuspend(&sdd->pdev->dev);
472 if(ret < 0)
473 return ret;
474#endif
78843727 475
78843727
AB
476 return 0;
477}
478
1cac41cb
MB
479static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
480 struct s3c64xx_spi_dma_data *dma)
3f295887 481{
1cac41cb
MB
482#ifdef CONFIG_ARM64
483 sdd->ops->stop((unsigned long)dma->ch);
484#else
485 sdd->ops->stop((enum dma_ch)dma->ch);
486#endif
487}
3f295887 488
1cac41cb
MB
489static void s3c64xx_dma_debug(struct s3c64xx_spi_driver_data *sdd,
490 struct s3c64xx_spi_dma_data *dma)
491{
492#ifdef CONFIG_ARM64
493 sdd->ops->debug((unsigned long)dma->ch);
494#else
495 sdd->ops->debug((enum dma_ch)dma->ch);
496#endif
3f295887
MB
497}
498
230d42d4
JB
499static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
500 struct spi_device *spi,
501 struct spi_transfer *xfer, int dma_mode)
502{
230d42d4 503 void __iomem *regs = sdd->regs;
1cac41cb 504 u32 modecfg, chcfg, dma_burst_len;
230d42d4
JB
505
506 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
507 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
508
1cac41cb
MB
509 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
510 modecfg &= ~S3C64XX_SPI_MODE_4BURST;
511
230d42d4
JB
512 if (dma_mode) {
513 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
1cac41cb
MB
514
515 dma_burst_len = (sdd->cur_bpw / 8) * S3C64XX_SPI_DMA_4BURST_LEN;
516 if (!(xfer->len % dma_burst_len))
517 modecfg |= S3C64XX_SPI_MODE_4BURST;
230d42d4
JB
518 } else {
519 /* Always shift in data in FIFO, even if xfer is Tx only,
520 * this helps setting PCKT_CNT value for generating clocks
521 * as exactly needed.
522 */
523 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
524 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
525 | S3C64XX_SPI_PACKET_CNT_EN,
526 regs + S3C64XX_SPI_PACKET_CNT);
527 }
528
1cac41cb
MB
529 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
530 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
531 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
532
230d42d4
JB
533 if (xfer->tx_buf != NULL) {
534 sdd->state |= TXBUSY;
535 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
536 if (dma_mode) {
537 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
1cac41cb 538 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
230d42d4 539 } else {
0c92ecf1
JB
540 switch (sdd->cur_bpw) {
541 case 32:
542 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
543 xfer->tx_buf, xfer->len / 4);
544 break;
545 case 16:
546 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
547 xfer->tx_buf, xfer->len / 2);
548 break;
549 default:
550 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
551 xfer->tx_buf, xfer->len);
552 break;
553 }
230d42d4
JB
554 }
555 }
556
557 if (xfer->rx_buf != NULL) {
558 sdd->state |= RXBUSY;
559
a5238e36 560 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
230d42d4
JB
561 && !(sdd->cur_mode & SPI_CPHA))
562 chcfg |= S3C64XX_SPI_CH_HS_EN;
563
564 if (dma_mode) {
565 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
566 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
567 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
568 | S3C64XX_SPI_PACKET_CNT_EN,
569 regs + S3C64XX_SPI_PACKET_CNT);
1cac41cb 570 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
230d42d4
JB
571 }
572 }
573
574 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
575 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
576}
577
1cac41cb
MB
578static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
579 struct spi_device *spi)
7e995556 580{
1cac41cb 581 struct s3c64xx_spi_csinfo *cs;
7e995556 582
1cac41cb
MB
583 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
584 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
585 /* Deselect the last toggled device */
586 cs = sdd->tgl_spi->controller_data;
587 if(cs->line != 0)
588 gpio_set_value(cs->line,
589 spi->mode & SPI_CS_HIGH ? 0 : 1);
590 /* Quiese the signals */
591 writel(spi->mode & SPI_CS_HIGH ?
592 0 : S3C64XX_SPI_SLAVE_SIG_INACT,
593 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
594 }
595 sdd->tgl_spi = NULL;
596 }
7e995556 597
1cac41cb
MB
598 cs = spi->controller_data;
599 if(cs->line != 0)
600 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
7e995556 601
1cac41cb
MB
602 if (cs->cs_mode == AUTO_CS_MODE) {
603 /* Set auto chip selection */
604 writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
605 | S3C64XX_SPI_SLAVE_AUTO
606 | S3C64XX_SPI_SLAVE_NSC_CNT_2,
607 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
608 } else {
609 /* Start the signals */
610 writel(spi->mode & SPI_CS_HIGH ?
611 S3C64XX_SPI_SLAVE_SIG_INACT : 0,
612 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
613 }
230d42d4
JB
614}
615
1cac41cb
MB
616static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
617 struct spi_transfer *xfer, int dma_mode)
230d42d4 618{
230d42d4
JB
619 void __iomem *regs = sdd->regs;
620 unsigned long val;
621 int ms;
622
623 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
624 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
1cac41cb
MB
625 ms = (ms * 10) + 30; /* some tolerance */
626 ms = max(ms, 100); /* minimum timeout */
3700c6eb 627
1cac41cb
MB
628 if (dma_mode) {
629 val = msecs_to_jiffies(ms) + 10;
630 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
631 } else {
632 u32 status;
633 val = msecs_to_loops(ms);
634 do {
635 status = readl(regs + S3C64XX_SPI_STATUS);
636 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
230d42d4
JB
637 }
638
3700c6eb
MB
639 if (!val)
640 return -EIO;
230d42d4 641
1cac41cb
MB
642 if (dma_mode) {
643 u32 status;
644
645 /*
646 * DmaTx returns after simply writing data in the FIFO,
647 * w/o waiting for real transmission on the bus to finish.
648 * DmaRx returns only after Dma read data from FIFO which
649 * needs bus transmission to finish, so we don't worry if
650 * Xfer involved Rx(with or without Tx).
651 */
652 if (xfer->rx_buf == NULL) {
653 val = msecs_to_loops(10);
654 status = readl(regs + S3C64XX_SPI_STATUS);
655 while ((TX_FIFO_LVL(status, sdd)
656 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
657 && --val) {
658 cpu_relax();
659 status = readl(regs + S3C64XX_SPI_STATUS);
660 }
230d42d4 661
1cac41cb
MB
662 if (!val)
663 return -EIO;
664 }
665 } else {
666 /* If it was only Tx */
667 if (xfer->rx_buf == NULL) {
668 sdd->state &= ~TXBUSY;
669 return 0;
670 }
3700c6eb
MB
671
672 switch (sdd->cur_bpw) {
673 case 32:
674 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
1cac41cb 675 xfer->rx_buf, xfer->len / 4);
3700c6eb
MB
676 break;
677 case 16:
678 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
1cac41cb 679 xfer->rx_buf, xfer->len / 2);
3700c6eb
MB
680 break;
681 default:
682 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
1cac41cb 683 xfer->rx_buf, xfer->len);
3700c6eb
MB
684 break;
685 }
1cac41cb
MB
686 sdd->state &= ~RXBUSY;
687 }
3700c6eb 688
230d42d4
JB
689 return 0;
690}
691
1cac41cb
MB
692static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
693 struct spi_device *spi)
694{
695 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
696
697 if (sdd->tgl_spi == spi)
698 sdd->tgl_spi = NULL;
699
700 if(cs->line != 0)
701 gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
702
703 if (cs->cs_mode != AUTO_CS_MODE) {
704 /* Quiese the signals */
705 writel(spi->mode & SPI_CS_HIGH
706 ? 0 : S3C64XX_SPI_SLAVE_SIG_INACT,
707 sdd->regs + S3C64XX_SPI_SLAVE_SEL);
708 }
709}
710
230d42d4
JB
711static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
712{
1cac41cb 713 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
714 void __iomem *regs = sdd->regs;
715 u32 val;
716
717 /* Disable Clock */
a5238e36 718 if (sdd->port_conf->clk_from_cmu) {
9f667bff 719 clk_disable_unprepare(sdd->src_clk);
b42a81ca
JB
720 } else {
721 val = readl(regs + S3C64XX_SPI_CLK_CFG);
722 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
723 writel(val, regs + S3C64XX_SPI_CLK_CFG);
724 }
230d42d4
JB
725
726 /* Set Polarity and Phase */
727 val = readl(regs + S3C64XX_SPI_CH_CFG);
728 val &= ~(S3C64XX_SPI_CH_SLAVE |
729 S3C64XX_SPI_CPOL_L |
730 S3C64XX_SPI_CPHA_B);
731
732 if (sdd->cur_mode & SPI_CPOL)
733 val |= S3C64XX_SPI_CPOL_L;
734
735 if (sdd->cur_mode & SPI_CPHA)
736 val |= S3C64XX_SPI_CPHA_B;
737
738 writel(val, regs + S3C64XX_SPI_CH_CFG);
739
740 /* Set Channel & DMA Mode */
741 val = readl(regs + S3C64XX_SPI_MODE_CFG);
742 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
743 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
744
745 switch (sdd->cur_bpw) {
746 case 32:
747 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
0c92ecf1 748 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
1cac41cb
MB
749 if (sci->swap_mode == SWAP_MODE) {
750 writel(S3C64XX_SPI_SWAP_TX_EN |
751 S3C64XX_SPI_SWAP_TX_BYTE |
752 S3C64XX_SPI_SWAP_TX_HALF_WORD |
753 S3C64XX_SPI_SWAP_RX_EN |
754 S3C64XX_SPI_SWAP_RX_BYTE |
755 S3C64XX_SPI_SWAP_RX_HALF_WORD,
756 regs + S3C64XX_SPI_SWAP_CFG);
757 }
230d42d4
JB
758 break;
759 case 16:
760 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
0c92ecf1 761 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
1cac41cb
MB
762 if (sci->swap_mode == SWAP_MODE) {
763 writel(S3C64XX_SPI_SWAP_TX_EN |
764 S3C64XX_SPI_SWAP_TX_BYTE |
765 S3C64XX_SPI_SWAP_RX_EN |
766 S3C64XX_SPI_SWAP_RX_BYTE,
767 regs + S3C64XX_SPI_SWAP_CFG);
768 }
230d42d4
JB
769 break;
770 default:
771 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
0c92ecf1 772 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
1cac41cb
MB
773 if (sci->swap_mode == SWAP_MODE)
774 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
230d42d4
JB
775 break;
776 }
230d42d4
JB
777
778 writel(val, regs + S3C64XX_SPI_MODE_CFG);
779
a5238e36 780 if (sdd->port_conf->clk_from_cmu) {
b42a81ca
JB
781 /* Configure Clock */
782 /* There is half-multiplier before the SPI */
783 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
784 /* Enable Clock */
1cac41cb 785 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
9f667bff 786 clk_prepare_enable(sdd->src_clk);
b42a81ca
JB
787 } else {
788 /* Configure Clock */
789 val = readl(regs + S3C64XX_SPI_CLK_CFG);
790 val &= ~S3C64XX_SPI_PSR_MASK;
791 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
792 & S3C64XX_SPI_PSR_MASK);
793 writel(val, regs + S3C64XX_SPI_CLK_CFG);
794
795 /* Enable Clock */
796 val = readl(regs + S3C64XX_SPI_CLK_CFG);
797 val |= S3C64XX_SPI_ENCLK_ENABLE;
798 writel(val, regs + S3C64XX_SPI_CLK_CFG);
799 }
1cac41cb
MB
800
801 if (sci->dbg_mode & SPI_DBG_MODE) {
802 dev_err(&sdd->pdev->dev, "SPI_MODE_%d", sdd->cur_mode & 0x3);
803 dev_err(&sdd->pdev->dev, "BTS : %d", sdd->cur_bpw);
804 }
230d42d4
JB
805}
806
1cac41cb 807#define XFER_DMAADDR_INVALID DMA_BIT_MASK(36)
230d42d4 808
1cac41cb
MB
809static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
810 struct spi_message *msg)
811{
812 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
813 struct device *dev = &sdd->pdev->dev;
814 struct spi_transfer *xfer;
815
816 if ((msg->is_dma_mapped) || (sci->dma_mode != DMA_MODE))
817 return 0;
818
819 /* First mark all xfer unmapped */
820 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
821 xfer->rx_dma = XFER_DMAADDR_INVALID;
822 xfer->tx_dma = XFER_DMAADDR_INVALID;
823 }
824
825 /* Map until end or first fail */
826 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
827
828 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
829 continue;
830
831 if (xfer->tx_buf != NULL) {
832 xfer->tx_dma = dma_map_single(dev,
833 (void *)xfer->tx_buf, xfer->len,
834 DMA_TO_DEVICE);
835 if (dma_mapping_error(dev, xfer->tx_dma)) {
836 dev_err(dev, "dma_map_single Tx failed\n");
837 xfer->tx_dma = XFER_DMAADDR_INVALID;
838 return -ENOMEM;
839 }
840 }
841
842 if (xfer->rx_buf != NULL) {
843 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
844 xfer->len, DMA_FROM_DEVICE);
845 if (dma_mapping_error(dev, xfer->rx_dma)) {
846 dev_err(dev, "dma_map_single Rx failed\n");
847 dma_unmap_single(dev, xfer->tx_dma,
848 xfer->len, DMA_TO_DEVICE);
849 xfer->tx_dma = XFER_DMAADDR_INVALID;
850 xfer->rx_dma = XFER_DMAADDR_INVALID;
851 return -ENOMEM;
852 }
853 }
854 }
855
856 return 0;
857}
858
859static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
860 struct spi_message *msg)
861{
862 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
863 struct device *dev = &sdd->pdev->dev;
864 struct spi_transfer *xfer;
865
866 if ((msg->is_dma_mapped) || (sci->dma_mode != DMA_MODE))
867 return;
868
869 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
870
871 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
872 continue;
873
874 if (xfer->rx_buf != NULL
875 && xfer->rx_dma != XFER_DMAADDR_INVALID)
876 dma_unmap_single(dev, xfer->rx_dma,
877 xfer->len, DMA_FROM_DEVICE);
878
879 if (xfer->tx_buf != NULL
880 && xfer->tx_dma != XFER_DMAADDR_INVALID)
881 dma_unmap_single(dev, xfer->tx_dma,
882 xfer->len, DMA_TO_DEVICE);
883 }
884}
885
886static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
887 struct spi_message *msg)
230d42d4 888{
ad2a99af 889 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1cac41cb 890 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
891 struct spi_device *spi = msg->spi;
892 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
1cac41cb
MB
893 struct spi_transfer *xfer;
894 int status = 0, cs_toggle = 0;
895 const void *origin_tx_buf = NULL;
896 void *origin_rx_buf = NULL;
897 unsigned target_len = 0, origin_len = 0;
898 unsigned fifo_lvl = (FIFO_LVL_MASK(sdd) >> 1) + 1;
899 u32 speed;
900 u8 bpw;
230d42d4
JB
901
902 /* If Master's(controller) state differs from that needed by Slave */
903 if (sdd->cur_speed != spi->max_speed_hz
904 || sdd->cur_mode != spi->mode
905 || sdd->cur_bpw != spi->bits_per_word) {
906 sdd->cur_bpw = spi->bits_per_word;
907 sdd->cur_speed = spi->max_speed_hz;
908 sdd->cur_mode = spi->mode;
909 s3c64xx_spi_config(sdd);
910 }
911
1cac41cb
MB
912 /* Map all the transfers if needed */
913 if (s3c64xx_spi_map_mssg(sdd, msg)) {
914 dev_err(&spi->dev,
915 "Xfer: Unable to map message buffers!\n");
916 status = -ENOMEM;
917 goto out;
918 }
919
230d42d4
JB
920 /* Configure feedback delay */
921 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
922
1cac41cb 923 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
0c92ecf1 924
1cac41cb
MB
925 unsigned long flags;
926 int use_dma;
230d42d4 927
1cac41cb 928 reinit_completion(&sdd->xfer_completion);
230d42d4 929
1cac41cb
MB
930 /* Only BPW and Speed may change across transfers */
931 bpw = xfer->bits_per_word;
932 speed = xfer->speed_hz ? : spi->max_speed_hz;
230d42d4 933
1cac41cb
MB
934 if (xfer->len % (bpw / 8)) {
935 dev_err(&spi->dev,
936 "Xfer length(%u) not a multiple of word size(%u)\n",
937 xfer->len, bpw / 8);
938 status = -EIO;
939 goto out;
940 }
941
942 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
943 sdd->cur_bpw = bpw;
944 sdd->cur_speed = speed;
945 s3c64xx_spi_config(sdd);
946 }
230d42d4 947
1cac41cb
MB
948 /* verify cpu mode */
949 if (sci->dma_mode != DMA_MODE) {
950 use_dma = 0;
230d42d4 951
1cac41cb
MB
952 /* backup original tx, rx buf ptr & xfer length */
953 origin_tx_buf = xfer->tx_buf;
954 origin_rx_buf = xfer->rx_buf;
955 origin_len = xfer->len;
230d42d4 956
1cac41cb
MB
957 target_len = xfer->len;
958 if (xfer->len > fifo_lvl)
959 xfer->len = fifo_lvl;
960 } else {
961 /* Polling method for xfers not bigger than FIFO capacity */
962 if (xfer->len <= fifo_lvl) {
963 use_dma = 0;
964 } else {
965 use_dma = 1;
966 }
967 }
968try_transfer:
969 spin_lock_irqsave(&sdd->lock, flags);
230d42d4 970
1cac41cb
MB
971 /* Pending only which is to be done */
972 sdd->state &= ~RXBUSY;
973 sdd->state &= ~TXBUSY;
230d42d4 974
1cac41cb
MB
975 if (cs->cs_mode == AUTO_CS_MODE) {
976 /* Slave Select */
977 enable_cs(sdd, spi);
230d42d4 978
1cac41cb
MB
979 enable_datapath(sdd, spi, xfer, use_dma);
980 } else {
981 enable_datapath(sdd, spi, xfer, use_dma);
230d42d4 982
1cac41cb
MB
983 /* Slave Select */
984 enable_cs(sdd, spi);
230d42d4 985 }
1cac41cb
MB
986
987 spin_unlock_irqrestore(&sdd->lock, flags);
988
989 status = wait_for_xfer(sdd, xfer, use_dma);
990
991 if (status) {
992 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
993 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
994 (sdd->state & RXBUSY) ? 'f' : 'p',
995 (sdd->state & TXBUSY) ? 'f' : 'p',
996 xfer->len);
997
998 if (use_dma) {
999 if (xfer->tx_buf != NULL
1000 && (sdd->state & TXBUSY)) {
1001 s3c64xx_dma_debug(sdd, &sdd->tx_dma);
1002 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
1003 }
1004 if (xfer->rx_buf != NULL
1005 && (sdd->state & RXBUSY)) {
1006 s3c64xx_dma_debug(sdd, &sdd->rx_dma);
1007 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
1008 }
1009 }
1010
1011 s3c64xx_spi_dump_reg(sdd);
1012 flush_fifo(sdd);
1013
1014 goto out;
1015 }
1016
1017 if (xfer->delay_usecs)
1018 udelay(xfer->delay_usecs);
1019
1020 if (xfer->cs_change) {
1021 /* Hint that the next mssg is gonna be
1022 for the same device */
1023 if (list_is_last(&xfer->transfer_list,
1024 &msg->transfers))
1025 cs_toggle = 1;
1026 }
1027
1028 msg->actual_length += xfer->len;
1029
230d42d4 1030 flush_fifo(sdd);
1cac41cb
MB
1031
1032 if (sci->dma_mode != DMA_MODE) {
1033 target_len -= xfer->len;
1034
1035 if (xfer->tx_buf != NULL)
1036 xfer->tx_buf += xfer->len;
1037
1038 if (xfer->rx_buf != NULL)
1039 xfer->rx_buf += xfer->len;
1040
1041 if (target_len > 0) {
1042 if (target_len > fifo_lvl)
1043 xfer->len = fifo_lvl;
1044 else
1045 xfer->len = target_len;
1046 goto try_transfer;
1047 }
1048
1049 /* restore original tx, rx buf_ptr & xfer length */
1050 xfer->tx_buf = origin_tx_buf;
1051 xfer->rx_buf = origin_rx_buf;
1052 xfer->len = origin_len;
1053 }
230d42d4
JB
1054 }
1055
1cac41cb
MB
1056out:
1057 if (!cs_toggle || status)
1058 disable_cs(sdd, spi);
1059 else
1060 sdd->tgl_spi = spi;
1061
1062 s3c64xx_spi_unmap_mssg(sdd, msg);
1063
1064 msg->status = status;
1065
1066 spi_finalize_current_message(master);
1067
1068 return 0;
230d42d4 1069}
230d42d4 1070
2b908075 1071static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
2b908075
TA
1072 struct spi_device *spi)
1073{
1074 struct s3c64xx_spi_csinfo *cs;
4732cc63 1075 struct device_node *slave_np, *data_np = NULL;
2b908075 1076 u32 fb_delay = 0;
1cac41cb 1077 u32 cs_mode = 0;
2b908075
TA
1078
1079 slave_np = spi->dev.of_node;
1080 if (!slave_np) {
1081 dev_err(&spi->dev, "device node not found\n");
1082 return ERR_PTR(-EINVAL);
1083 }
1084
06455bbc 1085 data_np = of_get_child_by_name(slave_np, "controller-data");
2b908075
TA
1086 if (!data_np) {
1087 dev_err(&spi->dev, "child node 'controller-data' not found\n");
1088 return ERR_PTR(-EINVAL);
1089 }
1090
1091 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1092 if (!cs) {
1cac41cb 1093 dev_err(&spi->dev, "could not allocate memory for controller data\n");
06455bbc 1094 of_node_put(data_np);
2b908075
TA
1095 return ERR_PTR(-ENOMEM);
1096 }
1097
1cac41cb
MB
1098 if (of_get_property(data_np, "cs-gpio", NULL)) {
1099 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
1100 if (!gpio_is_valid(cs->line))
1101 cs->line = 0;
1102 } else {
1103 cs->line = 0;
1104 }
1105
2b908075
TA
1106 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
1107 cs->fb_delay = fb_delay;
1cac41cb
MB
1108
1109 if (of_property_read_u32(data_np,
1110 "samsung,spi-chip-select-mode", &cs_mode)) {
1111 cs->cs_mode = AUTO_CS_MODE;
1112 } else {
1113 if (cs_mode)
1114 cs->cs_mode = AUTO_CS_MODE;
1115 else
1116 cs->cs_mode = MANUAL_CS_MODE;
1117 }
1118
06455bbc 1119 of_node_put(data_np);
2b908075
TA
1120 return cs;
1121}
1122
230d42d4
JB
1123/*
1124 * Here we only check the validity of requested configuration
1125 * and save the configuration in a local data-structure.
1126 * The controller is actually configured only just before we
1127 * get a message to transfer.
1128 */
1129static int s3c64xx_spi_setup(struct spi_device *spi)
1130{
1131 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
1132 struct s3c64xx_spi_driver_data *sdd;
ad7de729 1133 struct s3c64xx_spi_info *sci;
1cac41cb
MB
1134 struct spi_message *msg;
1135 unsigned long flags;
2b908075 1136 int err;
230d42d4 1137
2b908075 1138 sdd = spi_master_get_devdata(spi->master);
1cac41cb 1139 if (!cs && spi->dev.of_node) {
5c725b34 1140 cs = s3c64xx_get_slave_ctrldata(spi);
2b908075
TA
1141 spi->controller_data = cs;
1142 }
1143
1144 if (IS_ERR_OR_NULL(cs)) {
230d42d4
JB
1145 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
1146 return -ENODEV;
1147 }
1cac41cb
MB
1148#ifdef ENABLE_SENSORS_FPRINT_SECURE
1149 if (sdd->port_id == CONFIG_SENSORS_FP_SPI_NUMBER)
1150 return 0;
1151#endif
1152#ifdef CONFIG_ESE_SECURE
1153 if (sdd->port_id == CONFIG_ESE_SECURE_SPI_PORT) {
1154 dev_err(&spi->dev, "%s.....(%d)\n",__func__, sdd->port_id);
1155 return 0;
1156 }
1157#endif
230d42d4 1158
0149871c 1159 if (!spi_get_ctldata(spi)) {
1cac41cb
MB
1160 if(cs->line != 0) {
1161 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
306972ce
NKC
1162 dev_name(&spi->dev));
1163 if (err) {
1164 dev_err(&spi->dev,
1165 "Failed to get /CS gpio [%d]: %d\n",
1cac41cb 1166 cs->line, err);
306972ce
NKC
1167 goto err_gpio_req;
1168 }
1c20c200 1169 }
1c20c200 1170
3146beec 1171 spi_set_ctldata(spi, cs);
230d42d4
JB
1172 }
1173
230d42d4 1174 sci = sdd->cntrlr_info;
230d42d4 1175
1cac41cb
MB
1176 spin_lock_irqsave(&sdd->lock, flags);
1177
1178 list_for_each_entry(msg, &sdd->queue, queue) {
1179 /* Is some mssg is already queued for this device */
1180 if (msg->spi == spi) {
1181 dev_err(&spi->dev,
1182 "setup: attempt while mssg in queue!\n");
1183 spin_unlock_irqrestore(&sdd->lock, flags);
1184 err = -EBUSY;
1185 goto err_msgq;
1186 }
1187 }
1188
1189 spin_unlock_irqrestore(&sdd->lock, flags);
1190
1191 if (spi->bits_per_word != 8
1192 && spi->bits_per_word != 16
1193 && spi->bits_per_word != 32) {
1194 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
1195 spi->bits_per_word);
1196 err = -EINVAL;
1197 goto setup_exit;
1198 }
1199
1200#ifdef CONFIG_PM
b97b6621 1201 pm_runtime_get_sync(&sdd->pdev->dev);
1cac41cb 1202#endif
b97b6621 1203
230d42d4 1204 /* Check if we can provide the requested rate */
a5238e36 1205 if (!sdd->port_conf->clk_from_cmu) {
b42a81ca
JB
1206 u32 psr, speed;
1207
1208 /* Max possible */
1cac41cb
MB
1209 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
1210 if (!speed) {
1211 dev_err(&spi->dev, "clock rate of speed is 0\n");
1212 err = -EINVAL;
1213 goto setup_exit;
1214 }
b42a81ca
JB
1215
1216 if (spi->max_speed_hz > speed)
1217 spi->max_speed_hz = speed;
1218
1cac41cb 1219 psr = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
b42a81ca
JB
1220 psr &= S3C64XX_SPI_PSR_MASK;
1221 if (psr == S3C64XX_SPI_PSR_MASK)
1222 psr--;
1223
1cac41cb 1224 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
b42a81ca
JB
1225 if (spi->max_speed_hz < speed) {
1226 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
1227 psr++;
1228 } else {
1229 err = -EINVAL;
1230 goto setup_exit;
1231 }
1232 }
230d42d4 1233
1cac41cb 1234 speed = (unsigned int)clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
2b908075 1235 if (spi->max_speed_hz >= speed) {
b42a81ca 1236 spi->max_speed_hz = speed;
2b908075 1237 } else {
e1b0f0df
MB
1238 dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
1239 spi->max_speed_hz);
230d42d4 1240 err = -EINVAL;
2b908075
TA
1241 goto setup_exit;
1242 }
230d42d4
JB
1243 }
1244
1cac41cb
MB
1245 disable_cs(sdd, spi);
1246
1247#ifdef CONFIG_PM
483867ee
HK
1248 pm_runtime_mark_last_busy(&sdd->pdev->dev);
1249 pm_runtime_put_autosuspend(&sdd->pdev->dev);
1cac41cb
MB
1250#endif
1251
1252 if (sci->dbg_mode & SPI_DBG_MODE) {
1253 dev_err(&spi->dev, "SPI feedback-delay : %d\n", cs->fb_delay);
1254 dev_err(&spi->dev, "SPI clock : %u(%lu)\n",
1255 sdd->cur_speed, clk_get_rate(sdd->src_clk));
1256 dev_err(&spi->dev, "SPI %s CS mode", cs->cs_mode ? "AUTO" : "MANUAL");
1257 }
1258
2b908075 1259 return 0;
b97b6621 1260
230d42d4 1261setup_exit:
230d42d4 1262 /* setup() returns with device de-selected */
1cac41cb 1263 disable_cs(sdd, spi);
230d42d4 1264
1cac41cb
MB
1265err_msgq:
1266 gpio_free(cs->line);
2b908075
TA
1267 spi_set_ctldata(spi, NULL);
1268
1269err_gpio_req:
5bee3b94
SN
1270 if (spi->dev.of_node)
1271 kfree(cs);
2b908075 1272
230d42d4
JB
1273 return err;
1274}
1275
1c20c200
TA
1276static void s3c64xx_spi_cleanup(struct spi_device *spi)
1277{
1278 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
1279
1cac41cb
MB
1280 if (cs) {
1281 gpio_free(cs->line);
2b908075
TA
1282 if (spi->dev.of_node)
1283 kfree(cs);
1284 }
1c20c200
TA
1285 spi_set_ctldata(spi, NULL);
1286}
1287
c2573128
MB
1288static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
1289{
1290 struct s3c64xx_spi_driver_data *sdd = data;
1291 struct spi_master *spi = sdd->master;
375981f2 1292 unsigned int val, clr = 0;
c2573128 1293
375981f2 1294 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
c2573128 1295
375981f2
G
1296 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
1297 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
c2573128 1298 dev_err(&spi->dev, "RX overrun\n");
375981f2
G
1299 }
1300 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
1301 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
c2573128 1302 dev_err(&spi->dev, "RX underrun\n");
375981f2
G
1303 }
1304 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
1305 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
c2573128 1306 dev_err(&spi->dev, "TX overrun\n");
375981f2
G
1307 }
1308 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
1309 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
c2573128 1310 dev_err(&spi->dev, "TX underrun\n");
375981f2
G
1311 }
1312
1313 /* Clear the pending irq by setting and then clearing it */
1314 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1315 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
c2573128
MB
1316
1317 return IRQ_HANDLED;
1318}
1319
230d42d4
JB
1320static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
1321{
ad7de729 1322 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
230d42d4
JB
1323 void __iomem *regs = sdd->regs;
1324 unsigned int val;
1325
1cac41cb
MB
1326#ifdef ENABLE_SENSORS_FPRINT_SECURE
1327 if (channel == CONFIG_SENSORS_FP_SPI_NUMBER)
1328 return;
1329#endif
1330#ifdef CONFIG_ESE_SECURE
1331 if (channel == CONFIG_ESE_SECURE_SPI_PORT)
1332 return;
1333#endif
230d42d4
JB
1334 sdd->cur_speed = 0;
1335
1cac41cb 1336 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
230d42d4
JB
1337
1338 /* Disable Interrupts - we use Polling if not DMA mode */
1339 writel(0, regs + S3C64XX_SPI_INT_EN);
1340
a5238e36 1341 if (!sdd->port_conf->clk_from_cmu)
b42a81ca 1342 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
230d42d4
JB
1343 regs + S3C64XX_SPI_CLK_CFG);
1344 writel(0, regs + S3C64XX_SPI_MODE_CFG);
1345 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1346
375981f2
G
1347 /* Clear any irq pending bits, should set and clear the bits */
1348 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1349 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1350 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1351 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1352 writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1353 writel(0, regs + S3C64XX_SPI_PENDING_CLR);
230d42d4
JB
1354
1355 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1356
1357 val = readl(regs + S3C64XX_SPI_MODE_CFG);
1358 val &= ~S3C64XX_SPI_MODE_4BURST;
1359 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1360 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1361 writel(val, regs + S3C64XX_SPI_MODE_CFG);
1362
1363 flush_fifo(sdd);
1cac41cb
MB
1364
1365 sci->need_hw_init = 0;
230d42d4
JB
1366}
1367
2b908075 1368#ifdef CONFIG_OF
75bf3361 1369static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
2b908075
TA
1370{
1371 struct s3c64xx_spi_info *sci;
1372 u32 temp;
1cac41cb 1373 const char *domain;
2b908075
TA
1374
1375 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1cac41cb
MB
1376 if (!sci) {
1377 dev_err(dev, "memory allocation for spi_info failed\n");
2b908075 1378 return ERR_PTR(-ENOMEM);
1cac41cb
MB
1379 }
1380
1381 if (of_get_property(dev->of_node, "dma-mode", NULL))
1382 sci->dma_mode = DMA_MODE;
1383 else
1384 sci->dma_mode = CPU_MODE;
1385
1386 if (of_get_property(dev->of_node, "swap-mode", NULL))
1387 sci->swap_mode = SWAP_MODE;
1388 else
1389 sci->swap_mode = NO_SWAP_MODE;
1390
1391 if (of_get_property(dev->of_node, "secure-mode", NULL))
1392 sci->secure_mode = SECURE_MODE;
1393 else
1394 sci->secure_mode = NONSECURE_MODE;
2b908075
TA
1395
1396 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
75bf3361 1397 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
2b908075
TA
1398 sci->src_clk_nr = 0;
1399 } else {
1400 sci->src_clk_nr = temp;
1401 }
1402
1403 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
75bf3361 1404 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
2b908075
TA
1405 sci->num_cs = 1;
1406 } else {
1407 sci->num_cs = temp;
1408 }
1409
1cac41cb
MB
1410 sci->domain = DOMAIN_TOP;
1411 if (!of_property_read_string(dev->of_node, "domain", &domain)) {
1412 if (strncmp(domain, "isp", 3) == 0)
1413 sci->domain = DOMAIN_ISP;
1414 else if (strncmp(domain, "cam1", 4) == 0)
1415 sci->domain = DOMAIN_CAM1;
1416 }
1417
2b908075
TA
1418 return sci;
1419}
1420#else
1421static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1422{
8074cf06 1423 return dev_get_platdata(dev);
2b908075 1424}
2b908075
TA
1425#endif
1426
1427static const struct of_device_id s3c64xx_spi_dt_match[];
1428
a5238e36
TA
1429static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1430 struct platform_device *pdev)
1431{
2b908075
TA
1432#ifdef CONFIG_OF
1433 if (pdev->dev.of_node) {
1434 const struct of_device_id *match;
1435 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1436 return (struct s3c64xx_spi_port_config *)match->data;
1437 }
1438#endif
a5238e36
TA
1439 return (struct s3c64xx_spi_port_config *)
1440 platform_get_device_id(pdev)->driver_data;
1441}
1442
1cac41cb
MB
1443#ifdef CONFIG_CPU_IDLE
1444static int s3c64xx_spi_notifier(struct notifier_block *self,
1445 unsigned long cmd, void *v)
1446{
1447 struct s3c64xx_spi_info *sci;
1448
1449 switch (cmd) {
1450 case LPA_EXIT:
1451 list_for_each_entry(sci, &drvdata_list, node)
1452 sci->need_hw_init = 1;
1453 break;
1454 }
1455
1456 return NOTIFY_OK;
1457}
1458
1459static struct notifier_block s3c64xx_spi_notifier_block = {
1460 .notifier_call = s3c64xx_spi_notifier,
1461};
1462#endif /* CONFIG_CPU_IDLE */
1463
2deff8d6 1464static int s3c64xx_spi_probe(struct platform_device *pdev)
230d42d4 1465{
2b908075 1466 struct resource *mem_res;
b5be04d3 1467 struct resource *res;
230d42d4 1468 struct s3c64xx_spi_driver_data *sdd;
8074cf06 1469 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
230d42d4 1470 struct spi_master *master;
c2573128 1471 int ret, irq;
a24d850b 1472 char clk_name[16];
1cac41cb
MB
1473 int fifosize;
1474
1475 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
1476 if (ret)
1477 return ret;
230d42d4 1478
2b908075
TA
1479 if (!sci && pdev->dev.of_node) {
1480 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1481 if (IS_ERR(sci))
1482 return PTR_ERR(sci);
230d42d4
JB
1483 }
1484
2b908075 1485 if (!sci) {
230d42d4
JB
1486 dev_err(&pdev->dev, "platform_data missing!\n");
1487 return -ENODEV;
1488 }
1489
1cac41cb
MB
1490#if !defined(CONFIG_VIDEO_EXYNOS_FIMC_IS) && !defined(CONFIG_VIDEO_EXYNOS_FIMC_IS2)
1491 if (sci->domain != DOMAIN_TOP)
1492 return -ENODEV;
1493#endif
1494
230d42d4
JB
1495 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1496 if (mem_res == NULL) {
1497 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1498 return -ENXIO;
1499 }
1500
c2573128
MB
1501 irq = platform_get_irq(pdev, 0);
1502 if (irq < 0) {
1503 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1504 return irq;
1505 }
1506
230d42d4
JB
1507 master = spi_alloc_master(&pdev->dev,
1508 sizeof(struct s3c64xx_spi_driver_data));
1509 if (master == NULL) {
1510 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1511 return -ENOMEM;
1512 }
1513
230d42d4
JB
1514 platform_set_drvdata(pdev, master);
1515
1516 sdd = spi_master_get_devdata(master);
a5238e36 1517 sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
230d42d4
JB
1518 sdd->master = master;
1519 sdd->cntrlr_info = sci;
1520 sdd->pdev = pdev;
1521 sdd->sfr_start = mem_res->start;
1cac41cb
MB
1522 sdd->is_probed = 0;
1523 sdd->ops = NULL;
1524
1525 sdd->idle_ip_index = exynos_get_idle_ip_index(dev_name(&pdev->dev));
1526
2b908075
TA
1527 if (pdev->dev.of_node) {
1528 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1529 if (ret < 0) {
75bf3361
JH
1530 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1531 ret);
2b908075
TA
1532 goto err0;
1533 }
1cac41cb 1534 pdev->id = sdd->port_id = ret;
2b908075
TA
1535 } else {
1536 sdd->port_id = pdev->id;
1537 }
230d42d4 1538
1cac41cb
MB
1539 if(sdd->port_id >= MAX_SPI_PORTS) {
1540 dev_err(&pdev->dev, "the port %d exceeded MAX_SPI_PORTS(%d)\n"
1541 , sdd->port_id, MAX_SPI_PORTS);
1542 goto err0;
1543 }
1544
230d42d4
JB
1545 sdd->cur_bpw = 8;
1546
1cac41cb
MB
1547 if (sci->dma_mode == DMA_MODE) {
1548 if (!sdd->pdev->dev.of_node) {
1549 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1550 if (!res) {
1551 dev_err(&pdev->dev,
1552 "Unable to get SPI tx dma resource\n");
1553 return -ENXIO;
1554 }
7e995556 1555 sdd->tx_dma.dmach = res->start;
b5be04d3 1556
1cac41cb
MB
1557 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1558 if (!res) {
1559 dev_err(&pdev->dev,
1560 "Unable to get SPI rx dma resource\n");
1561 return -ENXIO;
1562 }
7e995556 1563 sdd->rx_dma.dmach = res->start;
1cac41cb 1564 }
2b908075 1565
1cac41cb
MB
1566 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1567 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1568 }
2b908075
TA
1569
1570 master->dev.of_node = pdev->dev.of_node;
a5238e36 1571 master->bus_num = sdd->port_id;
230d42d4 1572 master->setup = s3c64xx_spi_setup;
1c20c200 1573 master->cleanup = s3c64xx_spi_cleanup;
ad2a99af 1574 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1cac41cb 1575 master->transfer_one_message = s3c64xx_spi_transfer_one_message;
ad2a99af 1576 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
230d42d4
JB
1577 master->num_chipselect = sci->num_cs;
1578 master->dma_alignment = 8;
1cac41cb 1579 master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
230d42d4
JB
1580 /* the spi->mode bits understood by this driver: */
1581 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1582
b0ee5605
TR
1583 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1584 if (IS_ERR(sdd->regs)) {
1585 ret = PTR_ERR(sdd->regs);
4eb77006 1586 goto err0;
230d42d4
JB
1587 }
1588
00ab5392 1589 if (sci->cfg_gpio && sci->cfg_gpio()) {
230d42d4
JB
1590 dev_err(&pdev->dev, "Unable to config gpio\n");
1591 ret = -EBUSY;
4eb77006 1592 goto err0;
230d42d4
JB
1593 }
1594
1595 /* Setup clocks */
4eb77006 1596 sdd->clk = devm_clk_get(&pdev->dev, "spi");
230d42d4
JB
1597 if (IS_ERR(sdd->clk)) {
1598 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1599 ret = PTR_ERR(sdd->clk);
00ab5392 1600 goto err0;
230d42d4
JB
1601 }
1602
1cac41cb 1603 snprintf(clk_name, sizeof(clk_name), "spi_busclk%d", sci->src_clk_nr);
4eb77006 1604 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
b0d5d6e5 1605 if (IS_ERR(sdd->src_clk)) {
230d42d4 1606 dev_err(&pdev->dev,
a24d850b 1607 "Unable to acquire clock '%s'\n", clk_name);
b0d5d6e5 1608 ret = PTR_ERR(sdd->src_clk);
4eb77006 1609 goto err2;
230d42d4 1610 }
1cac41cb
MB
1611#ifdef CONFIG_PM
1612 pm_runtime_use_autosuspend(&pdev->dev);
1613 pm_runtime_enable(&pdev->dev);
1614 pm_runtime_get_sync(&pdev->dev);
1615
1616 sdd->pinctrl = devm_pinctrl_get(&pdev->dev);
1617 if (IS_ERR(sdd->pinctrl)) {
1618 dev_warn(&pdev->dev, "Couldn't get pinctrl.\n");
1619 sdd->pinctrl = NULL;
1620 }
1621
1622 if (sdd->pinctrl) {
1623 sdd->pin_def = pinctrl_lookup_state(sdd->pinctrl, PINCTRL_STATE_DEFAULT);
1624 if (IS_ERR(sdd->pin_def)) {
1625 dev_warn(&pdev->dev, "Not define default state.\n");
1626 sdd->pin_def = NULL;
1627 }
1628
1629 sdd->pin_idle = pinctrl_lookup_state(sdd->pinctrl, PINCTRL_STATE_IDLE);
1630 if (IS_ERR(sdd->pin_idle)) {
1631 dev_info(&pdev->dev, "Not use idle state.\n");
1632 sdd->pin_idle = NULL;
1633 }
1634 }
1635#else
1636
1637 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1638
1639 if (clk_prepare_enable(sdd->clk)) {
1640 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1641 ret = -EBUSY;
1642 goto err0;
1643 }
230d42d4 1644
9f667bff 1645 if (clk_prepare_enable(sdd->src_clk)) {
a24d850b 1646 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
230d42d4 1647 ret = -EBUSY;
4eb77006 1648 goto err2;
230d42d4 1649 }
1cac41cb 1650#endif
230d42d4 1651
1cac41cb
MB
1652 if (of_property_read_u32(pdev->dev.of_node, "spi-clkoff-time",
1653 (int *)&(sdd->spi_clkoff_time))) {
1654 dev_err(&pdev->dev, "spi clkoff-time is empty(Default: 0ms)\n");
1655 sdd->spi_clkoff_time = 0;
1656 } else {
1657 dev_err(&pdev->dev, "spi clkoff-time %d\n", sdd->spi_clkoff_time);
1658 }
1659
1660 if (of_property_read_u32(pdev->dev.of_node,
1661 "samsung,spi-fifosize", &fifosize)) {
1662 dev_err(&pdev->dev, "PORT %d fifosize is not specified\n",
1663 sdd->port_id);
1664 ret = -EINVAL;
1665 goto err3;
1666 } else {
1667 sdd->port_conf->fifo_lvl_mask[sdd->port_id] = (fifosize << 1) - 1;
1668 dev_info(&pdev->dev, "PORT %d fifo_lvl_mask = 0x%x\n",
1669 sdd->port_id, sdd->port_conf->fifo_lvl_mask[sdd->port_id]);
1670 }
483867ee 1671
230d42d4 1672 /* Setup Deufult Mode */
a5238e36 1673 s3c64xx_spi_hwinit(sdd, sdd->port_id);
230d42d4
JB
1674
1675 spin_lock_init(&sdd->lock);
1676 init_completion(&sdd->xfer_completion);
1cac41cb 1677 INIT_LIST_HEAD(&sdd->queue);
230d42d4 1678
4eb77006
JH
1679 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1680 "spi-s3c64xx", sdd);
c2573128
MB
1681 if (ret != 0) {
1682 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1683 irq, ret);
4eb77006 1684 goto err3;
c2573128
MB
1685 }
1686
1cac41cb
MB
1687 if (1
1688#ifdef ENABLE_SENSORS_FPRINT_SECURE
1689 && (sdd->port_id != CONFIG_SENSORS_FP_SPI_NUMBER)
1690#endif
1691#ifdef CONFIG_ESE_SECURE
1692 && (sdd->port_id != CONFIG_ESE_SECURE_SPI_PORT)
1693#endif
1694 ) {
1695 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1696 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1697 sdd->regs + S3C64XX_SPI_INT_EN);
1698 }
c2573128 1699
1cac41cb
MB
1700#ifdef CONFIG_PM
1701 pm_runtime_mark_last_busy(&pdev->dev);
1702 pm_runtime_put_sync(&pdev->dev);
1703#endif
1704
1705 if (spi_register_master(master)) {
1706 dev_err(&pdev->dev, "cannot register SPI master\n");
1707 ret = -EBUSY;
483867ee 1708 goto err3;
230d42d4
JB
1709 }
1710
1cac41cb
MB
1711 list_add_tail(&sci->node, &drvdata_list);
1712
1713 sdd->is_probed = 1;
1714#ifdef CONFIG_PM
1715 if (sci->domain == DOMAIN_TOP)
1716 pm_runtime_set_autosuspend_delay(&pdev->dev,
1717 sdd->spi_clkoff_time);
1718 else
1719 pm_runtime_set_autosuspend_delay(&pdev->dev,
1720 SPI_AUTOSUSPEND_TIMEOUT);
1721#endif
1722
75bf3361 1723 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
a5238e36 1724 sdd->port_id, master->num_chipselect);
1cac41cb 1725 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%ld, Tx-%ld]\n",
ed425dcf 1726 mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
82ab8cd7 1727 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
230d42d4 1728
1cac41cb
MB
1729 ret = device_create_file(&pdev->dev, &dev_attr_spi_dbg);
1730 if (ret < 0)
1731 dev_err(&pdev->dev, "failed to create sysfs file.\n");
1732 sci->dbg_mode = 0;
483867ee 1733
230d42d4
JB
1734 return 0;
1735
483867ee 1736err3:
1cac41cb 1737#ifdef CONFIG_PM
3c863792 1738 pm_runtime_disable(&pdev->dev);
1cac41cb 1739#endif
9f667bff 1740 clk_disable_unprepare(sdd->src_clk);
4eb77006 1741err2:
9f667bff 1742 clk_disable_unprepare(sdd->clk);
230d42d4 1743err0:
1cac41cb 1744 platform_set_drvdata(pdev, NULL);
230d42d4
JB
1745 spi_master_put(master);
1746
1747 return ret;
1748}
1749
1750static int s3c64xx_spi_remove(struct platform_device *pdev)
1751{
1752 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1753 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
230d42d4 1754
1cac41cb
MB
1755#ifdef CONFIG_PM
1756 pm_runtime_disable(&pdev->dev);
1757#endif
1758
1759 spi_unregister_master(master);
b97b6621 1760
c2573128
MB
1761 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1762
9f667bff 1763 clk_disable_unprepare(sdd->src_clk);
230d42d4 1764
9f667bff 1765 clk_disable_unprepare(sdd->clk);
230d42d4 1766
1cac41cb
MB
1767 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1768
1769 platform_set_drvdata(pdev, NULL);
1770 spi_master_put(master);
1771
1772 return 0;
1773}
1774
1775#ifdef CONFIG_PM
1776static void s3c64xx_spi_pin_ctrl(struct device *dev, int en)
1777{
1778 struct spi_master *master = dev_get_drvdata(dev);
1779 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1780 struct pinctrl_state *pin_stat;
1781
1782 if (!sdd->pin_idle)
1783 return;
1784
1785 pin_stat = en ? sdd->pin_def : sdd->pin_idle;
1786 if (!IS_ERR(pin_stat)) {
1787 sdd->pinctrl->state = NULL;
1788 if (pinctrl_select_state(sdd->pinctrl, pin_stat))
1789 dev_err(dev, "could not set pinctrl.\n");
1790 } else {
1791 dev_warn(dev, "pinctrl stat is null pointer.\n");
1792 }
1793}
1794
1795static int s3c64xx_spi_runtime_suspend(struct device *dev)
1796{
1797 struct spi_master *master = dev_get_drvdata(dev);
1798 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1799 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1800
1801 if (__clk_get_enable_count(sdd->clk))
1802 clk_disable_unprepare(sdd->clk);
1803 if (__clk_get_enable_count(sdd->src_clk))
1804 clk_disable_unprepare(sdd->src_clk);
1805
1806 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1807
1808 /* Free DMA channels */
1809 if (sci->dma_mode == DMA_MODE && sdd->is_probed && sdd->ops != NULL) {
1810 #ifdef CONFIG_ARM64
1811 sdd->ops->release((unsigned long)sdd->rx_dma.ch,
1812 &s3c64xx_spi_dma_client);
1813 sdd->ops->release((unsigned long)sdd->tx_dma.ch,
1814 &s3c64xx_spi_dma_client);
1815 #else
1816 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
1817 &s3c64xx_spi_dma_client);
1818 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
1819 &s3c64xx_spi_dma_client);
1820 #endif
1821 sdd->rx_dma.ch = NULL;
1822 sdd->tx_dma.ch = NULL;
1823 }
1824
1825 s3c64xx_spi_pin_ctrl(dev, 0);
1826
1827 return 0;
1828}
1829
1830static int s3c64xx_spi_runtime_resume(struct device *dev)
1831{
1832 struct spi_master *master = dev_get_drvdata(dev);
1833 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1834 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1835
1836 s3c64xx_spi_pin_ctrl(dev, 1);
1837
1838 if (sci->dma_mode == DMA_MODE && sdd->is_probed) {
1839 /* Acquire DMA channels */
1840 while (!acquire_dma(sdd))
1841 usleep_range(10000, 11000);
1842 }
1843
1844 if (sci->domain == DOMAIN_TOP) {
1845 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1846 clk_prepare_enable(sdd->src_clk);
1847 clk_prepare_enable(sdd->clk);
1848 }
1849
1850#if defined(CONFIG_VIDEO_EXYNOS_FIMC_IS) || defined(CONFIG_VIDEO_EXYNOS_FIMC_IS2)
1851 else if (sci->domain == DOMAIN_CAM1 || sci->domain == DOMAIN_ISP) {
1852 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1853 clk_prepare_enable(sdd->src_clk);
1854 clk_prepare_enable(sdd->clk);
1855
1856 s3c64xx_spi_hwinit(sdd, sdd->port_id);
1857 }
1858#endif
8ebe9d16 1859
230d42d4
JB
1860 return 0;
1861}
1cac41cb 1862#endif /* CONFIG_PM */
230d42d4 1863
997230d0 1864#ifdef CONFIG_PM_SLEEP
1cac41cb 1865static int s3c64xx_spi_suspend_operation(struct device *dev)
230d42d4 1866{
9a2a5245 1867 struct spi_master *master = dev_get_drvdata(dev);
230d42d4 1868 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1cac41cb
MB
1869#ifndef CONFIG_PM
1870 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1871#endif
230d42d4 1872
347de6ba 1873 int ret = spi_master_suspend(master);
1cac41cb
MB
1874 if (ret) {
1875 dev_warn(dev, "cannot suspend master\n");
347de6ba 1876 return ret;
1cac41cb 1877 }
230d42d4 1878
1cac41cb
MB
1879#ifndef CONFIG_PM
1880 if (sci->domain == DOMAIN_TOP) {
1881 /* Disable the clock */
1882 clk_disable_unprepare(sdd->src_clk);
1883 clk_disable_unprepare(sdd->clk);
1884 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1885 }
1886#endif
1887 if (!pm_runtime_status_suspended(dev))
1888 s3c64xx_spi_runtime_suspend(dev);
230d42d4
JB
1889
1890 sdd->cur_speed = 0; /* Output Clock is stopped */
1891
1892 return 0;
1893}
1894
1cac41cb 1895static int s3c64xx_spi_resume_operation(struct device *dev)
230d42d4 1896{
9a2a5245 1897 struct spi_master *master = dev_get_drvdata(dev);
230d42d4 1898 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
ad7de729 1899 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
4fcd9b9e 1900 int ret;
230d42d4 1901
1cac41cb
MB
1902 if (!pm_runtime_status_suspended(dev))
1903 s3c64xx_spi_runtime_resume(dev);
230d42d4 1904
1cac41cb
MB
1905 if (sci->domain == DOMAIN_TOP) {
1906 /* Enable the clock */
1907 exynos_update_ip_idle_status(sdd->idle_ip_index, 0);
1908 clk_prepare_enable(sdd->src_clk);
1909 clk_prepare_enable(sdd->clk);
230d42d4 1910
1cac41cb
MB
1911 if (sci->cfg_gpio)
1912 sci->cfg_gpio();
230d42d4 1913
1cac41cb
MB
1914 if (sci->secure_mode)
1915 sci->need_hw_init = 1;
1916 else
1917 s3c64xx_spi_hwinit(sdd, sdd->port_id);
230d42d4 1918
ec833050 1919#ifdef CONFIG_PM
1cac41cb
MB
1920 /* Disable the clock */
1921 clk_disable_unprepare(sdd->src_clk);
1922 clk_disable_unprepare(sdd->clk);
1923 exynos_update_ip_idle_status(sdd->idle_ip_index, 1);
1924#endif
1925 }
1926
1927 /* Start the queue running */
1928 ret = spi_master_resume(master);
1929 if (ret)
1930 dev_err(dev, "problem starting queue (%d)\n", ret);
1931 else
1932 dev_dbg(dev, "resumed\n");
1933
1934 return ret;
1935}
1936
1937static int s3c64xx_spi_suspend(struct device *dev)
b97b6621 1938{
9a2a5245 1939 struct spi_master *master = dev_get_drvdata(dev);
b97b6621 1940 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1cac41cb 1941 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
b97b6621 1942
1cac41cb
MB
1943 if (sci->dma_mode != DMA_MODE)
1944 return 0;
b97b6621 1945
1cac41cb
MB
1946 dev_dbg(dev, "spi suspend is handled in device suspend, dma mode = %d\n",
1947 sci->dma_mode);
1948 return s3c64xx_spi_suspend_operation(dev);
b97b6621
MB
1949}
1950
1cac41cb 1951static int s3c64xx_spi_suspend_noirq(struct device *dev)
b97b6621 1952{
9a2a5245 1953 struct spi_master *master = dev_get_drvdata(dev);
b97b6621 1954 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1cac41cb 1955 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
b97b6621 1956
1cac41cb
MB
1957 if (sci->dma_mode == DMA_MODE)
1958 return 0;
8b06d5b8 1959
1cac41cb
MB
1960 dev_dbg(dev, "spi suspend is handled in suspend_noirq, dma mode = %d\n",
1961 sci->dma_mode);
1962 return s3c64xx_spi_suspend_operation(dev);
1963}
1964
1965static int s3c64xx_spi_resume(struct device *dev)
1966{
1967 struct spi_master *master = dev_get_drvdata(dev);
1968 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1969 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1970
1971 if (sci->dma_mode != DMA_MODE)
1972 return 0;
1973
1974 dev_dbg(dev, "spi resume is handled in device resume, dma mode = %d\n",
1975 sci->dma_mode);
1976 return s3c64xx_spi_resume_operation(dev);
1977}
b97b6621 1978
1cac41cb
MB
1979static int s3c64xx_spi_resume_noirq(struct device *dev)
1980{
1981 struct spi_master *master = dev_get_drvdata(dev);
1982 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1983 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1984
1985 if (sci->dma_mode == DMA_MODE)
1986 return 0;
1987
1988 dev_dbg(dev, "spi resume is handled in resume_noirq, dma mode = %d\n",
1989 sci->dma_mode);
1990 return s3c64xx_spi_resume_operation(dev);
1991}
1992#else
1993static int s3c64xx_spi_suspend(struct device *dev)
1994{
b97b6621
MB
1995 return 0;
1996}
1cac41cb
MB
1997
1998static int s3c64xx_spi_resume(struct device *dev)
1999{
2000 return 0;
2001}
2002#endif /* CONFIG_PM_SLEEP */
b97b6621 2003
e25d0bf9
MB
2004static const struct dev_pm_ops s3c64xx_spi_pm = {
2005 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1cac41cb 2006 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend_noirq, s3c64xx_spi_resume_noirq)
b97b6621
MB
2007 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
2008 s3c64xx_spi_runtime_resume, NULL)
e25d0bf9
MB
2009};
2010
10ce0473 2011static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
a5238e36
TA
2012 .fifo_lvl_mask = { 0x7f },
2013 .rx_lvl_offset = 13,
2014 .tx_st_done = 21,
2015 .high_speed = true,
2016};
2017
10ce0473 2018static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
a5238e36
TA
2019 .fifo_lvl_mask = { 0x7f, 0x7F },
2020 .rx_lvl_offset = 13,
2021 .tx_st_done = 21,
2022};
2023
10ce0473 2024static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
a5238e36
TA
2025 .fifo_lvl_mask = { 0x1ff, 0x7F },
2026 .rx_lvl_offset = 15,
2027 .tx_st_done = 25,
2028 .high_speed = true,
2029};
2030
10ce0473 2031static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
a5238e36
TA
2032 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
2033 .rx_lvl_offset = 15,
2034 .tx_st_done = 25,
2035 .high_speed = true,
2036 .clk_from_cmu = true,
2037};
2038
1cac41cb
MB
2039static struct s3c64xx_spi_port_config exynos5_spi_port_config = {
2040 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x1ff, 0x1ff },
2041 .rx_lvl_offset = 15,
2042 .tx_st_done = 25,
2043 .high_speed = true,
2044 .clk_from_cmu = true,
2045};
2046
2047static struct s3c64xx_spi_port_config exynos543x_spi_port_config = {
2048 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff, 0x1ff },
2049 .rx_lvl_offset = 15,
2050 .tx_st_done = 25,
2051 .high_speed = true,
2052 .clk_from_cmu = true,
2053};
2054
2055static struct s3c64xx_spi_port_config exynos742x_spi_port_config = {
2056 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff, 0x1ff, 0x1ff },
bff82038
G
2057 .rx_lvl_offset = 15,
2058 .tx_st_done = 25,
2059 .high_speed = true,
2060 .clk_from_cmu = true,
bff82038
G
2061};
2062
1cac41cb
MB
2063static struct s3c64xx_spi_port_config exynos758x_spi_port_config = {
2064 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x1ff, 0x1ff },
2065 .rx_lvl_offset = 15,
2066 .tx_st_done = 25,
2067 .high_speed = true,
2068 .clk_from_cmu = true,
2069};
2070
2071static struct s3c64xx_spi_port_config exynos_spi_port_config = {
2072 .fifo_lvl_mask = { 0, },
bf77cba9
PV
2073 .rx_lvl_offset = 15,
2074 .tx_st_done = 25,
2075 .high_speed = true,
2076 .clk_from_cmu = true,
bf77cba9
PV
2077};
2078
23f6d39e 2079static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
a5238e36
TA
2080 {
2081 .name = "s3c2443-spi",
2082 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
2083 }, {
2084 .name = "s3c6410-spi",
2085 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
a5238e36
TA
2086 }, {
2087 .name = "s5pv210-spi",
2088 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
2089 }, {
2090 .name = "exynos4210-spi",
2091 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
1cac41cb
MB
2092 }, {
2093 .name = "exynos5410-spi",
2094 .driver_data = (kernel_ulong_t)&exynos5_spi_port_config,
2095 }, {
2096 .name = "exynos543x-spi",
2097 .driver_data = (kernel_ulong_t)&exynos543x_spi_port_config,
2098 }, {
2099 .name = "exynos742x-spi",
2100 .driver_data = (kernel_ulong_t)&exynos742x_spi_port_config,
2101 }, {
2102 .name = "exynos758x-spi",
2103 .driver_data = (kernel_ulong_t)&exynos758x_spi_port_config,
2104 }, {
2105 .name = "exynos-spi",
2106 .driver_data = (kernel_ulong_t)&exynos_spi_port_config,
a5238e36
TA
2107 },
2108 { },
2109};
2110
1cac41cb 2111#ifdef CONFIG_OF
2b908075 2112static const struct of_device_id s3c64xx_spi_dt_match[] = {
1cac41cb
MB
2113 { .compatible = "samsung,exynos4210-spi",
2114 .data = (void *)&exynos4_spi_port_config,
a3b924df 2115 },
1cac41cb
MB
2116 { .compatible = "samsung,exynos5410-spi",
2117 .data = (void *)&exynos5_spi_port_config,
a3b924df 2118 },
1cac41cb
MB
2119 { .compatible = "samsung,exynos543x-spi",
2120 .data = (void *)&exynos543x_spi_port_config,
a3b924df 2121 },
1cac41cb
MB
2122 { .compatible = "samsung,exynos742x-spi",
2123 .data = (void *)&exynos742x_spi_port_config,
2b908075 2124 },
1cac41cb
MB
2125 { .compatible = "samsung,exynos758x-spi",
2126 .data = (void *)&exynos758x_spi_port_config,
bff82038 2127 },
1cac41cb
MB
2128 { .compatible = "samsung,exynos-spi",
2129 .data = (void *)&exynos_spi_port_config,
bf77cba9 2130 },
2b908075
TA
2131 { },
2132};
2133MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1cac41cb 2134#endif /* CONFIG_OF */
2b908075 2135
230d42d4
JB
2136static struct platform_driver s3c64xx_spi_driver = {
2137 .driver = {
2138 .name = "s3c64xx-spi",
e25d0bf9 2139 .pm = &s3c64xx_spi_pm,
2b908075 2140 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
230d42d4
JB
2141 },
2142 .remove = s3c64xx_spi_remove,
a5238e36 2143 .id_table = s3c64xx_spi_driver_ids,
230d42d4
JB
2144};
2145MODULE_ALIAS("platform:s3c64xx-spi");
2146
1cac41cb
MB
2147static int __init s3c64xx_spi_init(void)
2148{
2149#ifdef CONFIG_CPU_IDLE
2150 exynos_pm_register_notifier(&s3c64xx_spi_notifier_block);
2151#endif
2152 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
2153}
2154subsys_initcall(s3c64xx_spi_init);
2155
2156static void __exit s3c64xx_spi_exit(void)
2157{
2158 platform_driver_unregister(&s3c64xx_spi_driver);
2159}
2160module_exit(s3c64xx_spi_exit);
230d42d4
JB
2161
2162MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
2163MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
2164MODULE_LICENSE("GPL");