2 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 * Copyright (C) 2011 Weinmann Medical GmbH
5 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 * Evolved from original work by:
8 * Copyright (C) 2004 Rick Bronson
9 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 * Borrowed heavily from original work by:
12 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
20 #include <linux/clk.h>
21 #include <linux/completion.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/err.h>
25 #include <linux/i2c.h>
26 #include <linux/interrupt.h>
28 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/of_i2c.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/platform_data/dma-atmel.h>
36 #define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
37 #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
38 #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
40 /* AT91 TWI register definitions */
41 #define AT91_TWI_CR 0x0000 /* Control Register */
42 #define AT91_TWI_START 0x0001 /* Send a Start Condition */
43 #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
44 #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
45 #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
46 #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
47 #define AT91_TWI_SWRST 0x0080 /* Software Reset */
49 #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
50 #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
51 #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
53 #define AT91_TWI_IADR 0x000c /* Internal Address Register */
55 #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
57 #define AT91_TWI_SR 0x0020 /* Status Register */
58 #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
59 #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
60 #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
62 #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
63 #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
64 #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
66 #define AT91_TWI_INT_MASK \
67 (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
69 #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
70 #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
71 #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
72 #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
73 #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
75 struct at91_twi_pdata
{
80 struct at_dma_slave dma_slave
;
84 struct dma_chan
*chan_rx
;
85 struct dma_chan
*chan_tx
;
86 struct scatterlist sg
;
87 struct dma_async_tx_descriptor
*data_desc
;
88 enum dma_data_direction direction
;
90 bool xfer_in_progress
;
96 struct completion cmd_complete
;
103 unsigned transfer_status
;
104 struct i2c_adapter adapter
;
105 unsigned twi_cwgr_reg
;
106 struct at91_twi_pdata
*pdata
;
109 struct at91_twi_dma dma
;
112 static unsigned at91_twi_read(struct at91_twi_dev
*dev
, unsigned reg
)
114 return readl_relaxed(dev
->base
+ reg
);
117 static void at91_twi_write(struct at91_twi_dev
*dev
, unsigned reg
, unsigned val
)
119 writel_relaxed(val
, dev
->base
+ reg
);
122 static void at91_disable_twi_interrupts(struct at91_twi_dev
*dev
)
124 at91_twi_write(dev
, AT91_TWI_IDR
, AT91_TWI_INT_MASK
);
127 static void at91_twi_irq_save(struct at91_twi_dev
*dev
)
129 dev
->imr
= at91_twi_read(dev
, AT91_TWI_IMR
) & AT91_TWI_INT_MASK
;
130 at91_disable_twi_interrupts(dev
);
133 static void at91_twi_irq_restore(struct at91_twi_dev
*dev
)
135 at91_twi_write(dev
, AT91_TWI_IER
, dev
->imr
);
138 static void at91_init_twi_bus(struct at91_twi_dev
*dev
)
140 at91_disable_twi_interrupts(dev
);
141 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_SWRST
);
142 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_MSEN
);
143 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_SVDIS
);
144 at91_twi_write(dev
, AT91_TWI_CWGR
, dev
->twi_cwgr_reg
);
148 * Calculate symmetric clock as stated in datasheet:
149 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
151 static void at91_calc_twi_clock(struct at91_twi_dev
*dev
, int twi_clk
)
153 int ckdiv
, cdiv
, div
;
154 struct at91_twi_pdata
*pdata
= dev
->pdata
;
155 int offset
= pdata
->clk_offset
;
156 int max_ckdiv
= pdata
->clk_max_div
;
158 div
= max(0, (int)DIV_ROUND_UP(clk_get_rate(dev
->clk
),
159 2 * twi_clk
) - offset
);
160 ckdiv
= fls(div
>> 8);
163 if (ckdiv
> max_ckdiv
) {
164 dev_warn(dev
->dev
, "%d exceeds ckdiv max value which is %d.\n",
170 dev
->twi_cwgr_reg
= (ckdiv
<< 16) | (cdiv
<< 8) | cdiv
;
171 dev_dbg(dev
->dev
, "cdiv %d ckdiv %d\n", cdiv
, ckdiv
);
174 static void at91_twi_dma_cleanup(struct at91_twi_dev
*dev
)
176 struct at91_twi_dma
*dma
= &dev
->dma
;
178 at91_twi_irq_save(dev
);
180 if (dma
->xfer_in_progress
) {
181 if (dma
->direction
== DMA_FROM_DEVICE
)
182 dmaengine_terminate_all(dma
->chan_rx
);
184 dmaengine_terminate_all(dma
->chan_tx
);
185 dma
->xfer_in_progress
= false;
187 if (dma
->buf_mapped
) {
188 dma_unmap_single(dev
->dev
, sg_dma_address(&dma
->sg
),
189 dev
->buf_len
, dma
->direction
);
190 dma
->buf_mapped
= false;
193 at91_twi_irq_restore(dev
);
196 static void at91_twi_write_next_byte(struct at91_twi_dev
*dev
)
198 if (dev
->buf_len
<= 0)
201 at91_twi_write(dev
, AT91_TWI_THR
, *dev
->buf
);
203 /* send stop when last byte has been written */
204 if (--dev
->buf_len
== 0)
205 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
207 dev_dbg(dev
->dev
, "wrote 0x%x, to go %d\n", *dev
->buf
, dev
->buf_len
);
212 static void at91_twi_write_data_dma_callback(void *data
)
214 struct at91_twi_dev
*dev
= (struct at91_twi_dev
*)data
;
216 dma_unmap_single(dev
->dev
, sg_dma_address(&dev
->dma
.sg
),
217 dev
->buf_len
, DMA_TO_DEVICE
);
220 * When this callback is called, THR/TX FIFO is likely not to be empty
221 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
222 * Status Register to be sure that the STOP bit has been sent and the
223 * transfer is completed. The NACK interrupt has already been enabled,
224 * we just have to enable TXCOMP one.
226 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_TXCOMP
);
227 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
230 static void at91_twi_write_data_dma(struct at91_twi_dev
*dev
)
233 struct dma_async_tx_descriptor
*txdesc
;
234 struct at91_twi_dma
*dma
= &dev
->dma
;
235 struct dma_chan
*chan_tx
= dma
->chan_tx
;
237 if (dev
->buf_len
<= 0)
240 dma
->direction
= DMA_TO_DEVICE
;
242 at91_twi_irq_save(dev
);
243 dma_addr
= dma_map_single(dev
->dev
, dev
->buf
, dev
->buf_len
,
245 if (dma_mapping_error(dev
->dev
, dma_addr
)) {
246 dev_err(dev
->dev
, "dma map failed\n");
249 dma
->buf_mapped
= true;
250 at91_twi_irq_restore(dev
);
251 sg_dma_len(&dma
->sg
) = dev
->buf_len
;
252 sg_dma_address(&dma
->sg
) = dma_addr
;
254 txdesc
= dmaengine_prep_slave_sg(chan_tx
, &dma
->sg
, 1, DMA_MEM_TO_DEV
,
255 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
257 dev_err(dev
->dev
, "dma prep slave sg failed\n");
261 txdesc
->callback
= at91_twi_write_data_dma_callback
;
262 txdesc
->callback_param
= dev
;
264 dma
->xfer_in_progress
= true;
265 dmaengine_submit(txdesc
);
266 dma_async_issue_pending(chan_tx
);
271 at91_twi_dma_cleanup(dev
);
274 static void at91_twi_read_next_byte(struct at91_twi_dev
*dev
)
277 * If we are in this case, it means there is garbage data in RHR, so
281 at91_twi_read(dev
, AT91_TWI_RHR
);
285 *dev
->buf
= at91_twi_read(dev
, AT91_TWI_RHR
) & 0xff;
288 /* return if aborting, we only needed to read RHR to clear RXRDY*/
289 if (dev
->recv_len_abort
)
292 /* handle I2C_SMBUS_BLOCK_DATA */
293 if (unlikely(dev
->msg
->flags
& I2C_M_RECV_LEN
)) {
294 /* ensure length byte is a valid value */
295 if (*dev
->buf
<= I2C_SMBUS_BLOCK_MAX
&& *dev
->buf
> 0) {
296 dev
->msg
->flags
&= ~I2C_M_RECV_LEN
;
297 dev
->buf_len
+= *dev
->buf
;
298 dev
->msg
->len
= dev
->buf_len
+ 1;
299 dev_dbg(dev
->dev
, "received block length %d\n",
302 /* abort and send the stop by reading one more byte */
303 dev
->recv_len_abort
= true;
308 /* send stop if second but last byte has been read */
309 if (dev
->buf_len
== 1)
310 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
312 dev_dbg(dev
->dev
, "read 0x%x, to go %d\n", *dev
->buf
, dev
->buf_len
);
317 static void at91_twi_read_data_dma_callback(void *data
)
319 struct at91_twi_dev
*dev
= (struct at91_twi_dev
*)data
;
321 dma_unmap_single(dev
->dev
, sg_dma_address(&dev
->dma
.sg
),
322 dev
->buf_len
, DMA_FROM_DEVICE
);
324 /* The last two bytes have to be read without using dma */
325 dev
->buf
+= dev
->buf_len
- 2;
327 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_RXRDY
| AT91_TWI_TXCOMP
);
330 static void at91_twi_read_data_dma(struct at91_twi_dev
*dev
)
333 struct dma_async_tx_descriptor
*rxdesc
;
334 struct at91_twi_dma
*dma
= &dev
->dma
;
335 struct dma_chan
*chan_rx
= dma
->chan_rx
;
337 dma
->direction
= DMA_FROM_DEVICE
;
339 /* Keep in mind that we won't use dma to read the last two bytes */
340 at91_twi_irq_save(dev
);
341 dma_addr
= dma_map_single(dev
->dev
, dev
->buf
, dev
->buf_len
- 2,
343 if (dma_mapping_error(dev
->dev
, dma_addr
)) {
344 dev_err(dev
->dev
, "dma map failed\n");
347 dma
->buf_mapped
= true;
348 at91_twi_irq_restore(dev
);
349 dma
->sg
.dma_address
= dma_addr
;
350 sg_dma_len(&dma
->sg
) = dev
->buf_len
- 2;
352 rxdesc
= dmaengine_prep_slave_sg(chan_rx
, &dma
->sg
, 1, DMA_DEV_TO_MEM
,
353 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
355 dev_err(dev
->dev
, "dma prep slave sg failed\n");
359 rxdesc
->callback
= at91_twi_read_data_dma_callback
;
360 rxdesc
->callback_param
= dev
;
362 dma
->xfer_in_progress
= true;
363 dmaengine_submit(rxdesc
);
364 dma_async_issue_pending(dma
->chan_rx
);
369 at91_twi_dma_cleanup(dev
);
372 static irqreturn_t
atmel_twi_interrupt(int irq
, void *dev_id
)
374 struct at91_twi_dev
*dev
= dev_id
;
375 const unsigned status
= at91_twi_read(dev
, AT91_TWI_SR
);
376 const unsigned irqstatus
= status
& at91_twi_read(dev
, AT91_TWI_IMR
);
381 * In reception, the behavior of the twi device (before sama5d2) is
382 * weird. There is some magic about RXRDY flag! When a data has been
383 * almost received, the reception of a new one is anticipated if there
384 * is no stop command to send. That is the reason why ask for sending
385 * the stop command not on the last data but on the second last one.
387 * Unfortunately, we could still have the RXRDY flag set even if the
388 * transfer is done and we have read the last data. It might happen
389 * when the i2c slave device sends too quickly data after receiving the
390 * ack from the master. The data has been almost received before having
391 * the order to send stop. In this case, sending the stop command could
392 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
393 * the RXRDY interrupt first in order to not keep garbage data in the
394 * Receive Holding Register for the next transfer.
396 if (irqstatus
& AT91_TWI_RXRDY
)
397 at91_twi_read_next_byte(dev
);
400 * When a NACK condition is detected, the I2C controller sets the NACK,
401 * TXCOMP and TXRDY bits all together in the Status Register (SR).
403 * 1 - Handling NACK errors with CPU write transfer.
405 * In such case, we should not write the next byte into the Transmit
406 * Holding Register (THR) otherwise the I2C controller would start a new
407 * transfer and the I2C slave is likely to reply by another NACK.
409 * 2 - Handling NACK errors with DMA write transfer.
411 * By setting the TXRDY bit in the SR, the I2C controller also triggers
412 * the DMA controller to write the next data into the THR. Then the
413 * result depends on the hardware version of the I2C controller.
415 * 2a - Without support of the Alternative Command mode.
417 * This is the worst case: the DMA controller is triggered to write the
418 * next data into the THR, hence starting a new transfer: the I2C slave
419 * is likely to reply by another NACK.
420 * Concurrently, this interrupt handler is likely to be called to manage
421 * the first NACK before the I2C controller detects the second NACK and
422 * sets once again the NACK bit into the SR.
423 * When handling the first NACK, this interrupt handler disables the I2C
424 * controller interruptions, especially the NACK interrupt.
425 * Hence, the NACK bit is pending into the SR. This is why we should
426 * read the SR to clear all pending interrupts at the beginning of
427 * at91_do_twi_transfer() before actually starting a new transfer.
429 * 2b - With support of the Alternative Command mode.
431 * When a NACK condition is detected, the I2C controller also locks the
432 * THR (and sets the LOCK bit in the SR): even though the DMA controller
433 * is triggered by the TXRDY bit to write the next data into the THR,
434 * this data actually won't go on the I2C bus hence a second NACK is not
437 if (irqstatus
& (AT91_TWI_TXCOMP
| AT91_TWI_NACK
)) {
438 at91_disable_twi_interrupts(dev
);
439 complete(&dev
->cmd_complete
);
440 } else if (irqstatus
& AT91_TWI_TXRDY
) {
441 at91_twi_write_next_byte(dev
);
444 /* catch error flags */
445 dev
->transfer_status
|= status
;
450 static int at91_do_twi_transfer(struct at91_twi_dev
*dev
)
453 bool has_unre_flag
= dev
->pdata
->has_unre_flag
;
456 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
457 * read flag but shows the state of the transmission at the time the
458 * Status Register is read. According to the programmer datasheet,
459 * TXCOMP is set when both holding register and internal shifter are
460 * empty and STOP condition has been sent.
461 * Consequently, we should enable NACK interrupt rather than TXCOMP to
462 * detect transmission failure.
464 * Besides, the TXCOMP bit is already set before the i2c transaction
465 * has been started. For read transactions, this bit is cleared when
466 * writing the START bit into the Control Register. So the
467 * corresponding interrupt can safely be enabled just after.
468 * However for write transactions managed by the CPU, we first write
469 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
470 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
471 * the interrupt handler would be called immediately and the i2c command
472 * would be reported as completed.
473 * Also when a write transaction is managed by the DMA controller,
474 * enabling the TXCOMP interrupt in this function may lead to a race
475 * condition since we don't know whether the TXCOMP interrupt is enabled
476 * before or after the DMA has started to write into THR. So the TXCOMP
477 * interrupt is enabled later by at91_twi_write_data_dma_callback().
478 * Immediately after in that DMA callback, we still need to send the
479 * STOP condition manually writing the corresponding bit into the
483 dev_dbg(dev
->dev
, "transfer: %s %d bytes.\n",
484 (dev
->msg
->flags
& I2C_M_RD
) ? "read" : "write", dev
->buf_len
);
486 INIT_COMPLETION(dev
->cmd_complete
);
487 dev
->transfer_status
= 0;
489 /* Clear pending interrupts, such as NACK. */
490 at91_twi_read(dev
, AT91_TWI_SR
);
493 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_QUICK
);
494 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_TXCOMP
);
495 } else if (dev
->msg
->flags
& I2C_M_RD
) {
496 unsigned start_flags
= AT91_TWI_START
;
498 /* if only one byte is to be read, immediately stop transfer */
499 if (dev
->buf_len
<= 1 && !(dev
->msg
->flags
& I2C_M_RECV_LEN
))
500 start_flags
|= AT91_TWI_STOP
;
501 at91_twi_write(dev
, AT91_TWI_CR
, start_flags
);
503 * When using dma, the last byte has to be read manually in
504 * order to not send the stop command too late and then
505 * to receive extra data. In practice, there are some issues
506 * if you use the dma to read n-1 bytes because of latency.
507 * Reading n-2 bytes with dma and the two last ones manually
508 * seems to be the best solution.
510 if (dev
->use_dma
&& (dev
->buf_len
> AT91_I2C_DMA_THRESHOLD
)) {
511 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_NACK
);
512 at91_twi_read_data_dma(dev
);
514 at91_twi_write(dev
, AT91_TWI_IER
,
520 if (dev
->use_dma
&& (dev
->buf_len
> AT91_I2C_DMA_THRESHOLD
)) {
521 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_NACK
);
522 at91_twi_write_data_dma(dev
);
524 at91_twi_write_next_byte(dev
);
525 at91_twi_write(dev
, AT91_TWI_IER
,
532 ret
= wait_for_completion_timeout(&dev
->cmd_complete
,
533 dev
->adapter
.timeout
);
535 dev_err(dev
->dev
, "controller timed out\n");
536 at91_init_twi_bus(dev
);
540 if (dev
->transfer_status
& AT91_TWI_NACK
) {
541 dev_dbg(dev
->dev
, "received nack\n");
545 if (dev
->transfer_status
& AT91_TWI_OVRE
) {
546 dev_err(dev
->dev
, "overrun while reading\n");
550 if (has_unre_flag
&& dev
->transfer_status
& AT91_TWI_UNRE
) {
551 dev_err(dev
->dev
, "underrun while writing\n");
555 if (dev
->recv_len_abort
) {
556 dev_err(dev
->dev
, "invalid smbus block length recvd\n");
561 dev_dbg(dev
->dev
, "transfer complete\n");
566 at91_twi_dma_cleanup(dev
);
570 static int at91_twi_xfer(struct i2c_adapter
*adap
, struct i2c_msg
*msg
, int num
)
572 struct at91_twi_dev
*dev
= i2c_get_adapdata(adap
);
574 unsigned int_addr_flag
= 0;
575 struct i2c_msg
*m_start
= msg
;
577 dev_dbg(&adap
->dev
, "at91_xfer: processing %d messages:\n", num
);
580 * The hardware can handle at most two messages concatenated by a
581 * repeated start via it's internal address feature.
585 "cannot handle more than two concatenated messages.\n");
587 } else if (num
== 2) {
588 int internal_address
= 0;
591 if (msg
->flags
& I2C_M_RD
) {
592 dev_err(dev
->dev
, "first transfer must be write.\n");
596 dev_err(dev
->dev
, "first message size must be <= 3.\n");
600 /* 1st msg is put into the internal address, start with 2nd */
602 for (i
= 0; i
< msg
->len
; ++i
) {
603 const unsigned addr
= msg
->buf
[msg
->len
- 1 - i
];
605 internal_address
|= addr
<< (8 * i
);
606 int_addr_flag
+= AT91_TWI_IADRSZ_1
;
608 at91_twi_write(dev
, AT91_TWI_IADR
, internal_address
);
611 at91_twi_write(dev
, AT91_TWI_MMR
, (m_start
->addr
<< 16) | int_addr_flag
612 | ((m_start
->flags
& I2C_M_RD
) ? AT91_TWI_MREAD
: 0));
614 dev
->buf_len
= m_start
->len
;
615 dev
->buf
= m_start
->buf
;
617 dev
->recv_len_abort
= false;
619 ret
= at91_do_twi_transfer(dev
);
621 return (ret
< 0) ? ret
: num
;
624 static u32
at91_twi_func(struct i2c_adapter
*adapter
)
626 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
627 | I2C_FUNC_SMBUS_READ_BLOCK_DATA
;
630 static struct i2c_algorithm at91_twi_algorithm
= {
631 .master_xfer
= at91_twi_xfer
,
632 .functionality
= at91_twi_func
,
635 static struct at91_twi_pdata at91rm9200_config
= {
638 .has_unre_flag
= true,
639 .has_dma_support
= false,
642 static struct at91_twi_pdata at91sam9261_config
= {
645 .has_unre_flag
= false,
646 .has_dma_support
= false,
649 static struct at91_twi_pdata at91sam9260_config
= {
652 .has_unre_flag
= false,
653 .has_dma_support
= false,
656 static struct at91_twi_pdata at91sam9g20_config
= {
659 .has_unre_flag
= false,
660 .has_dma_support
= false,
663 static struct at91_twi_pdata at91sam9g10_config
= {
666 .has_unre_flag
= false,
667 .has_dma_support
= false,
670 static const struct platform_device_id at91_twi_devtypes
[] = {
672 .name
= "i2c-at91rm9200",
673 .driver_data
= (unsigned long) &at91rm9200_config
,
675 .name
= "i2c-at91sam9261",
676 .driver_data
= (unsigned long) &at91sam9261_config
,
678 .name
= "i2c-at91sam9260",
679 .driver_data
= (unsigned long) &at91sam9260_config
,
681 .name
= "i2c-at91sam9g20",
682 .driver_data
= (unsigned long) &at91sam9g20_config
,
684 .name
= "i2c-at91sam9g10",
685 .driver_data
= (unsigned long) &at91sam9g10_config
,
691 #if defined(CONFIG_OF)
692 static struct at91_twi_pdata at91sam9x5_config
= {
695 .has_unre_flag
= false,
696 .has_dma_support
= true,
699 static const struct of_device_id atmel_twi_dt_ids
[] = {
701 .compatible
= "atmel,at91rm9200-i2c",
702 .data
= &at91rm9200_config
,
704 .compatible
= "atmel,at91sam9260-i2c",
705 .data
= &at91sam9260_config
,
707 .compatible
= "atmel,at91sam9g20-i2c",
708 .data
= &at91sam9g20_config
,
710 .compatible
= "atmel,at91sam9g10-i2c",
711 .data
= &at91sam9g10_config
,
713 .compatible
= "atmel,at91sam9x5-i2c",
714 .data
= &at91sam9x5_config
,
719 MODULE_DEVICE_TABLE(of
, atmel_twi_dt_ids
);
722 static bool filter(struct dma_chan
*chan
, void *pdata
)
724 struct at91_twi_pdata
*sl_pdata
= pdata
;
725 struct at_dma_slave
*sl
;
730 sl
= &sl_pdata
->dma_slave
;
731 if (sl
&& (sl
->dma_dev
== chan
->device
->dev
)) {
739 static int at91_twi_configure_dma(struct at91_twi_dev
*dev
, u32 phy_addr
)
742 struct at91_twi_pdata
*pdata
= dev
->pdata
;
743 struct dma_slave_config slave_config
;
744 struct at91_twi_dma
*dma
= &dev
->dma
;
747 memset(&slave_config
, 0, sizeof(slave_config
));
748 slave_config
.src_addr
= (dma_addr_t
)phy_addr
+ AT91_TWI_RHR
;
749 slave_config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
750 slave_config
.src_maxburst
= 1;
751 slave_config
.dst_addr
= (dma_addr_t
)phy_addr
+ AT91_TWI_THR
;
752 slave_config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
753 slave_config
.dst_maxburst
= 1;
754 slave_config
.device_fc
= false;
757 dma_cap_set(DMA_SLAVE
, mask
);
759 dma
->chan_tx
= dma_request_slave_channel_compat(mask
, filter
, pdata
,
762 dev_err(dev
->dev
, "can't get a DMA channel for tx\n");
767 dma
->chan_rx
= dma_request_slave_channel_compat(mask
, filter
, pdata
,
770 dev_err(dev
->dev
, "can't get a DMA channel for rx\n");
775 slave_config
.direction
= DMA_MEM_TO_DEV
;
776 if (dmaengine_slave_config(dma
->chan_tx
, &slave_config
)) {
777 dev_err(dev
->dev
, "failed to configure tx channel\n");
782 slave_config
.direction
= DMA_DEV_TO_MEM
;
783 if (dmaengine_slave_config(dma
->chan_rx
, &slave_config
)) {
784 dev_err(dev
->dev
, "failed to configure rx channel\n");
789 sg_init_table(&dma
->sg
, 1);
790 dma
->buf_mapped
= false;
791 dma
->xfer_in_progress
= false;
793 dev_info(dev
->dev
, "using %s (tx) and %s (rx) for DMA transfers\n",
794 dma_chan_name(dma
->chan_tx
), dma_chan_name(dma
->chan_rx
));
799 dev_info(dev
->dev
, "can't use DMA\n");
801 dma_release_channel(dma
->chan_rx
);
803 dma_release_channel(dma
->chan_tx
);
807 static struct at91_twi_pdata
*at91_twi_get_driver_data(
808 struct platform_device
*pdev
)
810 if (pdev
->dev
.of_node
) {
811 const struct of_device_id
*match
;
812 match
= of_match_node(atmel_twi_dt_ids
, pdev
->dev
.of_node
);
815 return (struct at91_twi_pdata
*)match
->data
;
817 return (struct at91_twi_pdata
*) platform_get_device_id(pdev
)->driver_data
;
820 static int at91_twi_probe(struct platform_device
*pdev
)
822 struct at91_twi_dev
*dev
;
823 struct resource
*mem
;
827 dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dev
), GFP_KERNEL
);
830 init_completion(&dev
->cmd_complete
);
831 dev
->dev
= &pdev
->dev
;
833 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
836 phy_addr
= mem
->start
;
838 dev
->pdata
= at91_twi_get_driver_data(pdev
);
842 dev
->base
= devm_ioremap_resource(&pdev
->dev
, mem
);
843 if (IS_ERR(dev
->base
))
844 return PTR_ERR(dev
->base
);
846 dev
->irq
= platform_get_irq(pdev
, 0);
850 rc
= devm_request_irq(&pdev
->dev
, dev
->irq
, atmel_twi_interrupt
, 0,
851 dev_name(dev
->dev
), dev
);
853 dev_err(dev
->dev
, "Cannot get irq %d: %d\n", dev
->irq
, rc
);
857 platform_set_drvdata(pdev
, dev
);
859 dev
->clk
= devm_clk_get(dev
->dev
, NULL
);
860 if (IS_ERR(dev
->clk
)) {
861 dev_err(dev
->dev
, "no clock defined\n");
864 clk_prepare_enable(dev
->clk
);
866 if (dev
->pdata
->has_dma_support
) {
867 if (at91_twi_configure_dma(dev
, phy_addr
) == 0)
871 at91_calc_twi_clock(dev
, TWI_CLK_HZ
);
872 at91_init_twi_bus(dev
);
874 snprintf(dev
->adapter
.name
, sizeof(dev
->adapter
.name
), "AT91");
875 i2c_set_adapdata(&dev
->adapter
, dev
);
876 dev
->adapter
.owner
= THIS_MODULE
;
877 dev
->adapter
.class = I2C_CLASS_HWMON
;
878 dev
->adapter
.algo
= &at91_twi_algorithm
;
879 dev
->adapter
.dev
.parent
= dev
->dev
;
880 dev
->adapter
.nr
= pdev
->id
;
881 dev
->adapter
.timeout
= AT91_I2C_TIMEOUT
;
882 dev
->adapter
.dev
.of_node
= pdev
->dev
.of_node
;
884 rc
= i2c_add_numbered_adapter(&dev
->adapter
);
886 dev_err(dev
->dev
, "Adapter %s registration failed\n",
888 clk_disable_unprepare(dev
->clk
);
892 of_i2c_register_devices(&dev
->adapter
);
894 dev_info(dev
->dev
, "AT91 i2c bus driver.\n");
898 static int at91_twi_remove(struct platform_device
*pdev
)
900 struct at91_twi_dev
*dev
= platform_get_drvdata(pdev
);
902 i2c_del_adapter(&dev
->adapter
);
903 clk_disable_unprepare(dev
->clk
);
910 static int at91_twi_runtime_suspend(struct device
*dev
)
912 struct at91_twi_dev
*twi_dev
= dev_get_drvdata(dev
);
914 clk_disable(twi_dev
->clk
);
919 static int at91_twi_runtime_resume(struct device
*dev
)
921 struct at91_twi_dev
*twi_dev
= dev_get_drvdata(dev
);
923 return clk_enable(twi_dev
->clk
);
926 static const struct dev_pm_ops at91_twi_pm
= {
927 .runtime_suspend
= at91_twi_runtime_suspend
,
928 .runtime_resume
= at91_twi_runtime_resume
,
931 #define at91_twi_pm_ops (&at91_twi_pm)
933 #define at91_twi_pm_ops NULL
936 static struct platform_driver at91_twi_driver
= {
937 .probe
= at91_twi_probe
,
938 .remove
= at91_twi_remove
,
939 .id_table
= at91_twi_devtypes
,
942 .owner
= THIS_MODULE
,
943 .of_match_table
= of_match_ptr(atmel_twi_dt_ids
),
944 .pm
= at91_twi_pm_ops
,
948 static int __init
at91_twi_init(void)
950 return platform_driver_register(&at91_twi_driver
);
953 static void __exit
at91_twi_exit(void)
955 platform_driver_unregister(&at91_twi_driver
);
958 subsys_initcall(at91_twi_init
);
959 module_exit(at91_twi_exit
);
961 MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
962 MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
963 MODULE_LICENSE("GPL");
964 MODULE_ALIAS("platform:at91_i2c");