i2c: at91: manage unexpected RXRDY flag when starting a transfer
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / i2c / busses / i2c-at91.c
1 /*
2 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
3 *
4 * Copyright (C) 2011 Weinmann Medical GmbH
5 * Author: Nikolaus Voss <n.voss@weinmann.de>
6 *
7 * Evolved from original work by:
8 * Copyright (C) 2004 Rick Bronson
9 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
10 *
11 * Borrowed heavily from original work by:
12 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 */
19
20 #include <linux/clk.h>
21 #include <linux/completion.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/err.h>
25 #include <linux/i2c.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/module.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/of_i2c.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/platform_data/dma-atmel.h>
35
36 #define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
37 #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
38 #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
39
40 /* AT91 TWI register definitions */
41 #define AT91_TWI_CR 0x0000 /* Control Register */
42 #define AT91_TWI_START 0x0001 /* Send a Start Condition */
43 #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
44 #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
45 #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
46 #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
47 #define AT91_TWI_SWRST 0x0080 /* Software Reset */
48
49 #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
50 #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
51 #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
52
53 #define AT91_TWI_IADR 0x000c /* Internal Address Register */
54
55 #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
56
57 #define AT91_TWI_SR 0x0020 /* Status Register */
58 #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
59 #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
60 #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
61
62 #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
63 #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
64 #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
65
66 #define AT91_TWI_INT_MASK \
67 (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
68
69 #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
70 #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
71 #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
72 #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
73 #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
74
75 struct at91_twi_pdata {
76 unsigned clk_max_div;
77 unsigned clk_offset;
78 bool has_unre_flag;
79 bool has_dma_support;
80 struct at_dma_slave dma_slave;
81 };
82
83 struct at91_twi_dma {
84 struct dma_chan *chan_rx;
85 struct dma_chan *chan_tx;
86 struct scatterlist sg;
87 struct dma_async_tx_descriptor *data_desc;
88 enum dma_data_direction direction;
89 bool buf_mapped;
90 bool xfer_in_progress;
91 };
92
93 struct at91_twi_dev {
94 struct device *dev;
95 void __iomem *base;
96 struct completion cmd_complete;
97 struct clk *clk;
98 u8 *buf;
99 size_t buf_len;
100 struct i2c_msg *msg;
101 int irq;
102 unsigned imr;
103 unsigned transfer_status;
104 struct i2c_adapter adapter;
105 unsigned twi_cwgr_reg;
106 struct at91_twi_pdata *pdata;
107 bool use_dma;
108 bool recv_len_abort;
109 struct at91_twi_dma dma;
110 };
111
112 static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
113 {
114 return readl_relaxed(dev->base + reg);
115 }
116
117 static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
118 {
119 writel_relaxed(val, dev->base + reg);
120 }
121
122 static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
123 {
124 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
125 }
126
127 static void at91_twi_irq_save(struct at91_twi_dev *dev)
128 {
129 dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
130 at91_disable_twi_interrupts(dev);
131 }
132
133 static void at91_twi_irq_restore(struct at91_twi_dev *dev)
134 {
135 at91_twi_write(dev, AT91_TWI_IER, dev->imr);
136 }
137
138 static void at91_init_twi_bus(struct at91_twi_dev *dev)
139 {
140 at91_disable_twi_interrupts(dev);
141 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
142 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
143 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
144 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
145 }
146
147 /*
148 * Calculate symmetric clock as stated in datasheet:
149 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
150 */
151 static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
152 {
153 int ckdiv, cdiv, div;
154 struct at91_twi_pdata *pdata = dev->pdata;
155 int offset = pdata->clk_offset;
156 int max_ckdiv = pdata->clk_max_div;
157
158 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
159 2 * twi_clk) - offset);
160 ckdiv = fls(div >> 8);
161 cdiv = div >> ckdiv;
162
163 if (ckdiv > max_ckdiv) {
164 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
165 ckdiv, max_ckdiv);
166 ckdiv = max_ckdiv;
167 cdiv = 255;
168 }
169
170 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
171 dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
172 }
173
174 static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
175 {
176 struct at91_twi_dma *dma = &dev->dma;
177
178 at91_twi_irq_save(dev);
179
180 if (dma->xfer_in_progress) {
181 if (dma->direction == DMA_FROM_DEVICE)
182 dmaengine_terminate_all(dma->chan_rx);
183 else
184 dmaengine_terminate_all(dma->chan_tx);
185 dma->xfer_in_progress = false;
186 }
187 if (dma->buf_mapped) {
188 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
189 dev->buf_len, dma->direction);
190 dma->buf_mapped = false;
191 }
192
193 at91_twi_irq_restore(dev);
194 }
195
196 static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
197 {
198 if (dev->buf_len <= 0)
199 return;
200
201 at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
202
203 /* send stop when last byte has been written */
204 if (--dev->buf_len == 0)
205 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
206
207 dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
208
209 ++dev->buf;
210 }
211
212 static void at91_twi_write_data_dma_callback(void *data)
213 {
214 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
215
216 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
217 dev->buf_len, DMA_TO_DEVICE);
218
219 /*
220 * When this callback is called, THR/TX FIFO is likely not to be empty
221 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
222 * Status Register to be sure that the STOP bit has been sent and the
223 * transfer is completed. The NACK interrupt has already been enabled,
224 * we just have to enable TXCOMP one.
225 */
226 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
227 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
228 }
229
230 static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
231 {
232 dma_addr_t dma_addr;
233 struct dma_async_tx_descriptor *txdesc;
234 struct at91_twi_dma *dma = &dev->dma;
235 struct dma_chan *chan_tx = dma->chan_tx;
236
237 if (dev->buf_len <= 0)
238 return;
239
240 dma->direction = DMA_TO_DEVICE;
241
242 at91_twi_irq_save(dev);
243 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
244 DMA_TO_DEVICE);
245 if (dma_mapping_error(dev->dev, dma_addr)) {
246 dev_err(dev->dev, "dma map failed\n");
247 return;
248 }
249 dma->buf_mapped = true;
250 at91_twi_irq_restore(dev);
251 sg_dma_len(&dma->sg) = dev->buf_len;
252 sg_dma_address(&dma->sg) = dma_addr;
253
254 txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
255 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
256 if (!txdesc) {
257 dev_err(dev->dev, "dma prep slave sg failed\n");
258 goto error;
259 }
260
261 txdesc->callback = at91_twi_write_data_dma_callback;
262 txdesc->callback_param = dev;
263
264 dma->xfer_in_progress = true;
265 dmaengine_submit(txdesc);
266 dma_async_issue_pending(chan_tx);
267
268 return;
269
270 error:
271 at91_twi_dma_cleanup(dev);
272 }
273
274 static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
275 {
276 /*
277 * If we are in this case, it means there is garbage data in RHR, so
278 * delete them.
279 */
280 if (!dev->buf_len) {
281 at91_twi_read(dev, AT91_TWI_RHR);
282 return;
283 }
284
285 *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
286 --dev->buf_len;
287
288 /* return if aborting, we only needed to read RHR to clear RXRDY*/
289 if (dev->recv_len_abort)
290 return;
291
292 /* handle I2C_SMBUS_BLOCK_DATA */
293 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
294 /* ensure length byte is a valid value */
295 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
296 dev->msg->flags &= ~I2C_M_RECV_LEN;
297 dev->buf_len += *dev->buf;
298 dev->msg->len = dev->buf_len + 1;
299 dev_dbg(dev->dev, "received block length %d\n",
300 dev->buf_len);
301 } else {
302 /* abort and send the stop by reading one more byte */
303 dev->recv_len_abort = true;
304 dev->buf_len = 1;
305 }
306 }
307
308 /* send stop if second but last byte has been read */
309 if (dev->buf_len == 1)
310 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
311
312 dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
313
314 ++dev->buf;
315 }
316
317 static void at91_twi_read_data_dma_callback(void *data)
318 {
319 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
320
321 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
322 dev->buf_len, DMA_FROM_DEVICE);
323
324 /* The last two bytes have to be read without using dma */
325 dev->buf += dev->buf_len - 2;
326 dev->buf_len = 2;
327 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
328 }
329
330 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
331 {
332 dma_addr_t dma_addr;
333 struct dma_async_tx_descriptor *rxdesc;
334 struct at91_twi_dma *dma = &dev->dma;
335 struct dma_chan *chan_rx = dma->chan_rx;
336
337 dma->direction = DMA_FROM_DEVICE;
338
339 /* Keep in mind that we won't use dma to read the last two bytes */
340 at91_twi_irq_save(dev);
341 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
342 DMA_FROM_DEVICE);
343 if (dma_mapping_error(dev->dev, dma_addr)) {
344 dev_err(dev->dev, "dma map failed\n");
345 return;
346 }
347 dma->buf_mapped = true;
348 at91_twi_irq_restore(dev);
349 dma->sg.dma_address = dma_addr;
350 sg_dma_len(&dma->sg) = dev->buf_len - 2;
351
352 rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
353 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
354 if (!rxdesc) {
355 dev_err(dev->dev, "dma prep slave sg failed\n");
356 goto error;
357 }
358
359 rxdesc->callback = at91_twi_read_data_dma_callback;
360 rxdesc->callback_param = dev;
361
362 dma->xfer_in_progress = true;
363 dmaengine_submit(rxdesc);
364 dma_async_issue_pending(dma->chan_rx);
365
366 return;
367
368 error:
369 at91_twi_dma_cleanup(dev);
370 }
371
372 static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
373 {
374 struct at91_twi_dev *dev = dev_id;
375 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
376 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
377
378 if (!irqstatus)
379 return IRQ_NONE;
380 /*
381 * In reception, the behavior of the twi device (before sama5d2) is
382 * weird. There is some magic about RXRDY flag! When a data has been
383 * almost received, the reception of a new one is anticipated if there
384 * is no stop command to send. That is the reason why ask for sending
385 * the stop command not on the last data but on the second last one.
386 *
387 * Unfortunately, we could still have the RXRDY flag set even if the
388 * transfer is done and we have read the last data. It might happen
389 * when the i2c slave device sends too quickly data after receiving the
390 * ack from the master. The data has been almost received before having
391 * the order to send stop. In this case, sending the stop command could
392 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
393 * the RXRDY interrupt first in order to not keep garbage data in the
394 * Receive Holding Register for the next transfer.
395 */
396 if (irqstatus & AT91_TWI_RXRDY)
397 at91_twi_read_next_byte(dev);
398
399 /*
400 * When a NACK condition is detected, the I2C controller sets the NACK,
401 * TXCOMP and TXRDY bits all together in the Status Register (SR).
402 *
403 * 1 - Handling NACK errors with CPU write transfer.
404 *
405 * In such case, we should not write the next byte into the Transmit
406 * Holding Register (THR) otherwise the I2C controller would start a new
407 * transfer and the I2C slave is likely to reply by another NACK.
408 *
409 * 2 - Handling NACK errors with DMA write transfer.
410 *
411 * By setting the TXRDY bit in the SR, the I2C controller also triggers
412 * the DMA controller to write the next data into the THR. Then the
413 * result depends on the hardware version of the I2C controller.
414 *
415 * 2a - Without support of the Alternative Command mode.
416 *
417 * This is the worst case: the DMA controller is triggered to write the
418 * next data into the THR, hence starting a new transfer: the I2C slave
419 * is likely to reply by another NACK.
420 * Concurrently, this interrupt handler is likely to be called to manage
421 * the first NACK before the I2C controller detects the second NACK and
422 * sets once again the NACK bit into the SR.
423 * When handling the first NACK, this interrupt handler disables the I2C
424 * controller interruptions, especially the NACK interrupt.
425 * Hence, the NACK bit is pending into the SR. This is why we should
426 * read the SR to clear all pending interrupts at the beginning of
427 * at91_do_twi_transfer() before actually starting a new transfer.
428 *
429 * 2b - With support of the Alternative Command mode.
430 *
431 * When a NACK condition is detected, the I2C controller also locks the
432 * THR (and sets the LOCK bit in the SR): even though the DMA controller
433 * is triggered by the TXRDY bit to write the next data into the THR,
434 * this data actually won't go on the I2C bus hence a second NACK is not
435 * generated.
436 */
437 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
438 at91_disable_twi_interrupts(dev);
439 complete(&dev->cmd_complete);
440 } else if (irqstatus & AT91_TWI_TXRDY) {
441 at91_twi_write_next_byte(dev);
442 }
443
444 /* catch error flags */
445 dev->transfer_status |= status;
446
447 return IRQ_HANDLED;
448 }
449
450 static int at91_do_twi_transfer(struct at91_twi_dev *dev)
451 {
452 int ret;
453 bool has_unre_flag = dev->pdata->has_unre_flag;
454
455 /*
456 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
457 * read flag but shows the state of the transmission at the time the
458 * Status Register is read. According to the programmer datasheet,
459 * TXCOMP is set when both holding register and internal shifter are
460 * empty and STOP condition has been sent.
461 * Consequently, we should enable NACK interrupt rather than TXCOMP to
462 * detect transmission failure.
463 *
464 * Besides, the TXCOMP bit is already set before the i2c transaction
465 * has been started. For read transactions, this bit is cleared when
466 * writing the START bit into the Control Register. So the
467 * corresponding interrupt can safely be enabled just after.
468 * However for write transactions managed by the CPU, we first write
469 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
470 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
471 * the interrupt handler would be called immediately and the i2c command
472 * would be reported as completed.
473 * Also when a write transaction is managed by the DMA controller,
474 * enabling the TXCOMP interrupt in this function may lead to a race
475 * condition since we don't know whether the TXCOMP interrupt is enabled
476 * before or after the DMA has started to write into THR. So the TXCOMP
477 * interrupt is enabled later by at91_twi_write_data_dma_callback().
478 * Immediately after in that DMA callback, we still need to send the
479 * STOP condition manually writing the corresponding bit into the
480 * Control Register.
481 */
482
483 dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
484 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
485
486 INIT_COMPLETION(dev->cmd_complete);
487 dev->transfer_status = 0;
488
489 /* Clear pending interrupts, such as NACK. */
490 at91_twi_read(dev, AT91_TWI_SR);
491
492 if (!dev->buf_len) {
493 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
494 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
495 } else if (dev->msg->flags & I2C_M_RD) {
496 unsigned start_flags = AT91_TWI_START;
497
498 /* if only one byte is to be read, immediately stop transfer */
499 if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
500 start_flags |= AT91_TWI_STOP;
501 at91_twi_write(dev, AT91_TWI_CR, start_flags);
502 /*
503 * When using dma, the last byte has to be read manually in
504 * order to not send the stop command too late and then
505 * to receive extra data. In practice, there are some issues
506 * if you use the dma to read n-1 bytes because of latency.
507 * Reading n-2 bytes with dma and the two last ones manually
508 * seems to be the best solution.
509 */
510 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
511 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
512 at91_twi_read_data_dma(dev);
513 } else {
514 at91_twi_write(dev, AT91_TWI_IER,
515 AT91_TWI_TXCOMP |
516 AT91_TWI_NACK |
517 AT91_TWI_RXRDY);
518 }
519 } else {
520 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
521 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
522 at91_twi_write_data_dma(dev);
523 } else {
524 at91_twi_write_next_byte(dev);
525 at91_twi_write(dev, AT91_TWI_IER,
526 AT91_TWI_TXCOMP |
527 AT91_TWI_NACK |
528 AT91_TWI_TXRDY);
529 }
530 }
531
532 ret = wait_for_completion_timeout(&dev->cmd_complete,
533 dev->adapter.timeout);
534 if (ret == 0) {
535 dev_err(dev->dev, "controller timed out\n");
536 at91_init_twi_bus(dev);
537 ret = -ETIMEDOUT;
538 goto error;
539 }
540 if (dev->transfer_status & AT91_TWI_NACK) {
541 dev_dbg(dev->dev, "received nack\n");
542 ret = -EREMOTEIO;
543 goto error;
544 }
545 if (dev->transfer_status & AT91_TWI_OVRE) {
546 dev_err(dev->dev, "overrun while reading\n");
547 ret = -EIO;
548 goto error;
549 }
550 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
551 dev_err(dev->dev, "underrun while writing\n");
552 ret = -EIO;
553 goto error;
554 }
555 if (dev->recv_len_abort) {
556 dev_err(dev->dev, "invalid smbus block length recvd\n");
557 ret = -EPROTO;
558 goto error;
559 }
560
561 dev_dbg(dev->dev, "transfer complete\n");
562
563 return 0;
564
565 error:
566 at91_twi_dma_cleanup(dev);
567 return ret;
568 }
569
570 static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
571 {
572 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
573 int ret;
574 unsigned int_addr_flag = 0;
575 struct i2c_msg *m_start = msg;
576
577 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
578
579 /*
580 * The hardware can handle at most two messages concatenated by a
581 * repeated start via it's internal address feature.
582 */
583 if (num > 2) {
584 dev_err(dev->dev,
585 "cannot handle more than two concatenated messages.\n");
586 return 0;
587 } else if (num == 2) {
588 int internal_address = 0;
589 int i;
590
591 if (msg->flags & I2C_M_RD) {
592 dev_err(dev->dev, "first transfer must be write.\n");
593 return -EINVAL;
594 }
595 if (msg->len > 3) {
596 dev_err(dev->dev, "first message size must be <= 3.\n");
597 return -EINVAL;
598 }
599
600 /* 1st msg is put into the internal address, start with 2nd */
601 m_start = &msg[1];
602 for (i = 0; i < msg->len; ++i) {
603 const unsigned addr = msg->buf[msg->len - 1 - i];
604
605 internal_address |= addr << (8 * i);
606 int_addr_flag += AT91_TWI_IADRSZ_1;
607 }
608 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
609 }
610
611 at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
612 | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
613
614 dev->buf_len = m_start->len;
615 dev->buf = m_start->buf;
616 dev->msg = m_start;
617 dev->recv_len_abort = false;
618
619 ret = at91_do_twi_transfer(dev);
620
621 return (ret < 0) ? ret : num;
622 }
623
624 static u32 at91_twi_func(struct i2c_adapter *adapter)
625 {
626 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
627 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
628 }
629
630 static struct i2c_algorithm at91_twi_algorithm = {
631 .master_xfer = at91_twi_xfer,
632 .functionality = at91_twi_func,
633 };
634
635 static struct at91_twi_pdata at91rm9200_config = {
636 .clk_max_div = 5,
637 .clk_offset = 3,
638 .has_unre_flag = true,
639 .has_dma_support = false,
640 };
641
642 static struct at91_twi_pdata at91sam9261_config = {
643 .clk_max_div = 5,
644 .clk_offset = 4,
645 .has_unre_flag = false,
646 .has_dma_support = false,
647 };
648
649 static struct at91_twi_pdata at91sam9260_config = {
650 .clk_max_div = 7,
651 .clk_offset = 4,
652 .has_unre_flag = false,
653 .has_dma_support = false,
654 };
655
656 static struct at91_twi_pdata at91sam9g20_config = {
657 .clk_max_div = 7,
658 .clk_offset = 4,
659 .has_unre_flag = false,
660 .has_dma_support = false,
661 };
662
663 static struct at91_twi_pdata at91sam9g10_config = {
664 .clk_max_div = 7,
665 .clk_offset = 4,
666 .has_unre_flag = false,
667 .has_dma_support = false,
668 };
669
670 static const struct platform_device_id at91_twi_devtypes[] = {
671 {
672 .name = "i2c-at91rm9200",
673 .driver_data = (unsigned long) &at91rm9200_config,
674 }, {
675 .name = "i2c-at91sam9261",
676 .driver_data = (unsigned long) &at91sam9261_config,
677 }, {
678 .name = "i2c-at91sam9260",
679 .driver_data = (unsigned long) &at91sam9260_config,
680 }, {
681 .name = "i2c-at91sam9g20",
682 .driver_data = (unsigned long) &at91sam9g20_config,
683 }, {
684 .name = "i2c-at91sam9g10",
685 .driver_data = (unsigned long) &at91sam9g10_config,
686 }, {
687 /* sentinel */
688 }
689 };
690
691 #if defined(CONFIG_OF)
692 static struct at91_twi_pdata at91sam9x5_config = {
693 .clk_max_div = 7,
694 .clk_offset = 4,
695 .has_unre_flag = false,
696 .has_dma_support = true,
697 };
698
699 static const struct of_device_id atmel_twi_dt_ids[] = {
700 {
701 .compatible = "atmel,at91rm9200-i2c",
702 .data = &at91rm9200_config,
703 } , {
704 .compatible = "atmel,at91sam9260-i2c",
705 .data = &at91sam9260_config,
706 } , {
707 .compatible = "atmel,at91sam9g20-i2c",
708 .data = &at91sam9g20_config,
709 } , {
710 .compatible = "atmel,at91sam9g10-i2c",
711 .data = &at91sam9g10_config,
712 }, {
713 .compatible = "atmel,at91sam9x5-i2c",
714 .data = &at91sam9x5_config,
715 }, {
716 /* sentinel */
717 }
718 };
719 MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
720 #endif
721
722 static bool filter(struct dma_chan *chan, void *pdata)
723 {
724 struct at91_twi_pdata *sl_pdata = pdata;
725 struct at_dma_slave *sl;
726
727 if (!sl_pdata)
728 return false;
729
730 sl = &sl_pdata->dma_slave;
731 if (sl && (sl->dma_dev == chan->device->dev)) {
732 chan->private = sl;
733 return true;
734 } else {
735 return false;
736 }
737 }
738
739 static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
740 {
741 int ret = 0;
742 struct at91_twi_pdata *pdata = dev->pdata;
743 struct dma_slave_config slave_config;
744 struct at91_twi_dma *dma = &dev->dma;
745 dma_cap_mask_t mask;
746
747 memset(&slave_config, 0, sizeof(slave_config));
748 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
749 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
750 slave_config.src_maxburst = 1;
751 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
752 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
753 slave_config.dst_maxburst = 1;
754 slave_config.device_fc = false;
755
756 dma_cap_zero(mask);
757 dma_cap_set(DMA_SLAVE, mask);
758
759 dma->chan_tx = dma_request_slave_channel_compat(mask, filter, pdata,
760 dev->dev, "tx");
761 if (!dma->chan_tx) {
762 dev_err(dev->dev, "can't get a DMA channel for tx\n");
763 ret = -EBUSY;
764 goto error;
765 }
766
767 dma->chan_rx = dma_request_slave_channel_compat(mask, filter, pdata,
768 dev->dev, "rx");
769 if (!dma->chan_rx) {
770 dev_err(dev->dev, "can't get a DMA channel for rx\n");
771 ret = -EBUSY;
772 goto error;
773 }
774
775 slave_config.direction = DMA_MEM_TO_DEV;
776 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
777 dev_err(dev->dev, "failed to configure tx channel\n");
778 ret = -EINVAL;
779 goto error;
780 }
781
782 slave_config.direction = DMA_DEV_TO_MEM;
783 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
784 dev_err(dev->dev, "failed to configure rx channel\n");
785 ret = -EINVAL;
786 goto error;
787 }
788
789 sg_init_table(&dma->sg, 1);
790 dma->buf_mapped = false;
791 dma->xfer_in_progress = false;
792
793 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
794 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
795
796 return ret;
797
798 error:
799 dev_info(dev->dev, "can't use DMA\n");
800 if (dma->chan_rx)
801 dma_release_channel(dma->chan_rx);
802 if (dma->chan_tx)
803 dma_release_channel(dma->chan_tx);
804 return ret;
805 }
806
807 static struct at91_twi_pdata *at91_twi_get_driver_data(
808 struct platform_device *pdev)
809 {
810 if (pdev->dev.of_node) {
811 const struct of_device_id *match;
812 match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
813 if (!match)
814 return NULL;
815 return (struct at91_twi_pdata *)match->data;
816 }
817 return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
818 }
819
820 static int at91_twi_probe(struct platform_device *pdev)
821 {
822 struct at91_twi_dev *dev;
823 struct resource *mem;
824 int rc;
825 u32 phy_addr;
826
827 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
828 if (!dev)
829 return -ENOMEM;
830 init_completion(&dev->cmd_complete);
831 dev->dev = &pdev->dev;
832
833 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
834 if (!mem)
835 return -ENODEV;
836 phy_addr = mem->start;
837
838 dev->pdata = at91_twi_get_driver_data(pdev);
839 if (!dev->pdata)
840 return -ENODEV;
841
842 dev->base = devm_ioremap_resource(&pdev->dev, mem);
843 if (IS_ERR(dev->base))
844 return PTR_ERR(dev->base);
845
846 dev->irq = platform_get_irq(pdev, 0);
847 if (dev->irq < 0)
848 return dev->irq;
849
850 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
851 dev_name(dev->dev), dev);
852 if (rc) {
853 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
854 return rc;
855 }
856
857 platform_set_drvdata(pdev, dev);
858
859 dev->clk = devm_clk_get(dev->dev, NULL);
860 if (IS_ERR(dev->clk)) {
861 dev_err(dev->dev, "no clock defined\n");
862 return -ENODEV;
863 }
864 clk_prepare_enable(dev->clk);
865
866 if (dev->pdata->has_dma_support) {
867 if (at91_twi_configure_dma(dev, phy_addr) == 0)
868 dev->use_dma = true;
869 }
870
871 at91_calc_twi_clock(dev, TWI_CLK_HZ);
872 at91_init_twi_bus(dev);
873
874 snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
875 i2c_set_adapdata(&dev->adapter, dev);
876 dev->adapter.owner = THIS_MODULE;
877 dev->adapter.class = I2C_CLASS_HWMON;
878 dev->adapter.algo = &at91_twi_algorithm;
879 dev->adapter.dev.parent = dev->dev;
880 dev->adapter.nr = pdev->id;
881 dev->adapter.timeout = AT91_I2C_TIMEOUT;
882 dev->adapter.dev.of_node = pdev->dev.of_node;
883
884 rc = i2c_add_numbered_adapter(&dev->adapter);
885 if (rc) {
886 dev_err(dev->dev, "Adapter %s registration failed\n",
887 dev->adapter.name);
888 clk_disable_unprepare(dev->clk);
889 return rc;
890 }
891
892 of_i2c_register_devices(&dev->adapter);
893
894 dev_info(dev->dev, "AT91 i2c bus driver.\n");
895 return 0;
896 }
897
898 static int at91_twi_remove(struct platform_device *pdev)
899 {
900 struct at91_twi_dev *dev = platform_get_drvdata(pdev);
901
902 i2c_del_adapter(&dev->adapter);
903 clk_disable_unprepare(dev->clk);
904
905 return 0;
906 }
907
908 #ifdef CONFIG_PM
909
910 static int at91_twi_runtime_suspend(struct device *dev)
911 {
912 struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
913
914 clk_disable(twi_dev->clk);
915
916 return 0;
917 }
918
919 static int at91_twi_runtime_resume(struct device *dev)
920 {
921 struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
922
923 return clk_enable(twi_dev->clk);
924 }
925
926 static const struct dev_pm_ops at91_twi_pm = {
927 .runtime_suspend = at91_twi_runtime_suspend,
928 .runtime_resume = at91_twi_runtime_resume,
929 };
930
931 #define at91_twi_pm_ops (&at91_twi_pm)
932 #else
933 #define at91_twi_pm_ops NULL
934 #endif
935
936 static struct platform_driver at91_twi_driver = {
937 .probe = at91_twi_probe,
938 .remove = at91_twi_remove,
939 .id_table = at91_twi_devtypes,
940 .driver = {
941 .name = "at91_i2c",
942 .owner = THIS_MODULE,
943 .of_match_table = of_match_ptr(atmel_twi_dt_ids),
944 .pm = at91_twi_pm_ops,
945 },
946 };
947
948 static int __init at91_twi_init(void)
949 {
950 return platform_driver_register(&at91_twi_driver);
951 }
952
953 static void __exit at91_twi_exit(void)
954 {
955 platform_driver_unregister(&at91_twi_driver);
956 }
957
958 subsys_initcall(at91_twi_init);
959 module_exit(at91_twi_exit);
960
961 MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
962 MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
963 MODULE_LICENSE("GPL");
964 MODULE_ALIAS("platform:at91_i2c");