1 /* Broadcom FlexRM Mailbox Driver
3 * Copyright (C) 2017 Broadcom
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Each Broadcom FlexSparx4 offload engine is implemented as an
10 * extension to Broadcom FlexRM ring manager. The FlexRM ring
11 * manager provides a set of rings which can be used to submit
12 * work to a FlexSparx4 offload engine.
14 * This driver creates a mailbox controller using a set of FlexRM
15 * rings where each mailbox channel represents a separate FlexRM ring.
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h>
20 #include <linux/atomic.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmapool.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 #include <linux/mailbox_controller.h>
31 #include <linux/mailbox_client.h>
32 #include <linux/mailbox/brcm-message.h>
33 #include <linux/module.h>
34 #include <linux/msi.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/platform_device.h>
38 #include <linux/spinlock.h>
40 /* ====== FlexRM register defines ===== */
42 /* FlexRM configuration */
43 #define RING_REGS_SIZE 0x10000
44 #define RING_DESC_SIZE 8
45 #define RING_DESC_INDEX(offset) \
46 ((offset) / RING_DESC_SIZE)
47 #define RING_DESC_OFFSET(index) \
48 ((index) * RING_DESC_SIZE)
49 #define RING_MAX_REQ_COUNT 1024
50 #define RING_BD_ALIGN_ORDER 12
51 #define RING_BD_ALIGN_CHECK(addr) \
52 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
53 #define RING_BD_TOGGLE_INVALID(offset) \
54 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
55 #define RING_BD_TOGGLE_VALID(offset) \
56 (!RING_BD_TOGGLE_INVALID(offset))
57 #define RING_BD_DESC_PER_REQ 32
58 #define RING_BD_DESC_COUNT \
59 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
60 #define RING_BD_SIZE \
61 (RING_BD_DESC_COUNT * RING_DESC_SIZE)
62 #define RING_CMPL_ALIGN_ORDER 13
63 #define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
64 #define RING_CMPL_SIZE \
65 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
66 #define RING_VER_MAGIC 0x76303031
68 /* Per-Ring register offsets */
69 #define RING_VER 0x000
70 #define RING_BD_START_ADDR 0x004
71 #define RING_BD_READ_PTR 0x008
72 #define RING_BD_WRITE_PTR 0x00c
73 #define RING_BD_READ_PTR_DDR_LS 0x010
74 #define RING_BD_READ_PTR_DDR_MS 0x014
75 #define RING_CMPL_START_ADDR 0x018
76 #define RING_CMPL_WRITE_PTR 0x01c
77 #define RING_NUM_REQ_RECV_LS 0x020
78 #define RING_NUM_REQ_RECV_MS 0x024
79 #define RING_NUM_REQ_TRANS_LS 0x028
80 #define RING_NUM_REQ_TRANS_MS 0x02c
81 #define RING_NUM_REQ_OUTSTAND 0x030
82 #define RING_CONTROL 0x034
83 #define RING_FLUSH_DONE 0x038
84 #define RING_MSI_ADDR_LS 0x03c
85 #define RING_MSI_ADDR_MS 0x040
86 #define RING_MSI_CONTROL 0x048
87 #define RING_BD_READ_PTR_DDR_CONTROL 0x04c
88 #define RING_MSI_DATA_VALUE 0x064
90 /* Register RING_BD_START_ADDR fields */
91 #define BD_LAST_UPDATE_HW_SHIFT 28
92 #define BD_LAST_UPDATE_HW_MASK 0x1
93 #define BD_START_ADDR_VALUE(pa) \
94 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
95 #define BD_START_ADDR_DECODE(val) \
96 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
98 /* Register RING_CMPL_START_ADDR fields */
99 #define CMPL_START_ADDR_VALUE(pa) \
100 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
102 /* Register RING_CONTROL fields */
103 #define CONTROL_MASK_DISABLE_CONTROL 12
104 #define CONTROL_FLUSH_SHIFT 5
105 #define CONTROL_ACTIVE_SHIFT 4
106 #define CONTROL_RATE_ADAPT_MASK 0xf
107 #define CONTROL_RATE_DYNAMIC 0x0
108 #define CONTROL_RATE_FAST 0x8
109 #define CONTROL_RATE_MEDIUM 0x9
110 #define CONTROL_RATE_SLOW 0xa
111 #define CONTROL_RATE_IDLE 0xb
113 /* Register RING_FLUSH_DONE fields */
114 #define FLUSH_DONE_MASK 0x1
116 /* Register RING_MSI_CONTROL fields */
117 #define MSI_TIMER_VAL_SHIFT 16
118 #define MSI_TIMER_VAL_MASK 0xffff
119 #define MSI_ENABLE_SHIFT 15
120 #define MSI_ENABLE_MASK 0x1
121 #define MSI_COUNT_SHIFT 0
122 #define MSI_COUNT_MASK 0x3ff
124 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
125 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
126 #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
127 #define BD_READ_PTR_DDR_ENABLE_SHIFT 15
128 #define BD_READ_PTR_DDR_ENABLE_MASK 0x1
130 /* ====== FlexRM ring descriptor defines ===== */
132 /* Completion descriptor format */
133 #define CMPL_OPAQUE_SHIFT 0
134 #define CMPL_OPAQUE_MASK 0xffff
135 #define CMPL_ENGINE_STATUS_SHIFT 16
136 #define CMPL_ENGINE_STATUS_MASK 0xffff
137 #define CMPL_DME_STATUS_SHIFT 32
138 #define CMPL_DME_STATUS_MASK 0xffff
139 #define CMPL_RM_STATUS_SHIFT 48
140 #define CMPL_RM_STATUS_MASK 0xffff
142 /* Completion DME status code */
143 #define DME_STATUS_MEM_COR_ERR BIT(0)
144 #define DME_STATUS_MEM_UCOR_ERR BIT(1)
145 #define DME_STATUS_FIFO_UNDERFLOW BIT(2)
146 #define DME_STATUS_FIFO_OVERFLOW BIT(3)
147 #define DME_STATUS_RRESP_ERR BIT(4)
148 #define DME_STATUS_BRESP_ERR BIT(5)
149 #define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
150 DME_STATUS_MEM_UCOR_ERR | \
151 DME_STATUS_FIFO_UNDERFLOW | \
152 DME_STATUS_FIFO_OVERFLOW | \
153 DME_STATUS_RRESP_ERR | \
154 DME_STATUS_BRESP_ERR)
156 /* Completion RM status code */
157 #define RM_STATUS_CODE_SHIFT 0
158 #define RM_STATUS_CODE_MASK 0x3ff
159 #define RM_STATUS_CODE_GOOD 0x0
160 #define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
162 /* General descriptor format */
163 #define DESC_TYPE_SHIFT 60
164 #define DESC_TYPE_MASK 0xf
165 #define DESC_PAYLOAD_SHIFT 0
166 #define DESC_PAYLOAD_MASK 0x0fffffffffffffff
168 /* Null descriptor format */
170 #define NULL_TOGGLE_SHIFT 58
171 #define NULL_TOGGLE_MASK 0x1
173 /* Header descriptor format */
174 #define HEADER_TYPE 1
175 #define HEADER_TOGGLE_SHIFT 58
176 #define HEADER_TOGGLE_MASK 0x1
177 #define HEADER_ENDPKT_SHIFT 57
178 #define HEADER_ENDPKT_MASK 0x1
179 #define HEADER_STARTPKT_SHIFT 56
180 #define HEADER_STARTPKT_MASK 0x1
181 #define HEADER_BDCOUNT_SHIFT 36
182 #define HEADER_BDCOUNT_MASK 0x1f
183 #define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
184 #define HEADER_FLAGS_SHIFT 16
185 #define HEADER_FLAGS_MASK 0xffff
186 #define HEADER_OPAQUE_SHIFT 0
187 #define HEADER_OPAQUE_MASK 0xffff
189 /* Source (SRC) descriptor format */
191 #define SRC_LENGTH_SHIFT 44
192 #define SRC_LENGTH_MASK 0xffff
193 #define SRC_ADDR_SHIFT 0
194 #define SRC_ADDR_MASK 0x00000fffffffffff
196 /* Destination (DST) descriptor format */
198 #define DST_LENGTH_SHIFT 44
199 #define DST_LENGTH_MASK 0xffff
200 #define DST_ADDR_SHIFT 0
201 #define DST_ADDR_MASK 0x00000fffffffffff
203 /* Immediate (IMM) descriptor format */
205 #define IMM_DATA_SHIFT 0
206 #define IMM_DATA_MASK 0x0fffffffffffffff
208 /* Next pointer (NPTR) descriptor format */
210 #define NPTR_TOGGLE_SHIFT 58
211 #define NPTR_TOGGLE_MASK 0x1
212 #define NPTR_ADDR_SHIFT 0
213 #define NPTR_ADDR_MASK 0x00000fffffffffff
215 /* Mega source (MSRC) descriptor format */
217 #define MSRC_LENGTH_SHIFT 44
218 #define MSRC_LENGTH_MASK 0xffff
219 #define MSRC_ADDR_SHIFT 0
220 #define MSRC_ADDR_MASK 0x00000fffffffffff
222 /* Mega destination (MDST) descriptor format */
224 #define MDST_LENGTH_SHIFT 44
225 #define MDST_LENGTH_MASK 0xffff
226 #define MDST_ADDR_SHIFT 0
227 #define MDST_ADDR_MASK 0x00000fffffffffff
229 /* Source with tlast (SRCT) descriptor format */
231 #define SRCT_LENGTH_SHIFT 44
232 #define SRCT_LENGTH_MASK 0xffff
233 #define SRCT_ADDR_SHIFT 0
234 #define SRCT_ADDR_MASK 0x00000fffffffffff
236 /* Destination with tlast (DSTT) descriptor format */
238 #define DSTT_LENGTH_SHIFT 44
239 #define DSTT_LENGTH_MASK 0xffff
240 #define DSTT_ADDR_SHIFT 0
241 #define DSTT_ADDR_MASK 0x00000fffffffffff
243 /* Immediate with tlast (IMMT) descriptor format */
245 #define IMMT_DATA_SHIFT 0
246 #define IMMT_DATA_MASK 0x0fffffffffffffff
248 /* Descriptor helper macros */
249 #define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
250 #define DESC_ENC(_d, _v, _s, _m) \
252 (_d) &= ~((u64)(_m) << (_s)); \
253 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
256 /* ====== FlexRM data structures ===== */
259 /* Unprotected members */
261 struct flexrm_mbox
*mbox
;
265 cpumask_t irq_aff_hint
;
266 unsigned int msi_timer_val
;
267 unsigned int msi_count_threshold
;
268 struct brcm_message
*requests
[RING_MAX_REQ_COUNT
];
270 dma_addr_t bd_dma_base
;
273 dma_addr_t cmpl_dma_base
;
275 atomic_t msg_send_count
;
276 atomic_t msg_cmpl_count
;
277 /* Protected members */
279 DECLARE_BITMAP(requests_bmap
, RING_MAX_REQ_COUNT
);
280 u32 cmpl_read_offset
;
287 struct flexrm_ring
*rings
;
288 struct dma_pool
*bd_pool
;
289 struct dma_pool
*cmpl_pool
;
291 struct dentry
*config
;
292 struct dentry
*stats
;
293 struct mbox_controller controller
;
296 /* ====== FlexRM ring descriptor helper routines ===== */
298 static u64
flexrm_read_desc(void *desc_ptr
)
300 return le64_to_cpu(*((u64
*)desc_ptr
));
303 static void flexrm_write_desc(void *desc_ptr
, u64 desc
)
305 *((u64
*)desc_ptr
) = cpu_to_le64(desc
);
308 static u32
flexrm_cmpl_desc_to_reqid(u64 cmpl_desc
)
310 return (u32
)(cmpl_desc
& CMPL_OPAQUE_MASK
);
313 static int flexrm_cmpl_desc_to_error(u64 cmpl_desc
)
317 status
= DESC_DEC(cmpl_desc
, CMPL_DME_STATUS_SHIFT
,
318 CMPL_DME_STATUS_MASK
);
319 if (status
& DME_STATUS_ERROR_MASK
)
322 status
= DESC_DEC(cmpl_desc
, CMPL_RM_STATUS_SHIFT
,
323 CMPL_RM_STATUS_MASK
);
324 status
&= RM_STATUS_CODE_MASK
;
325 if (status
== RM_STATUS_CODE_AE_TIMEOUT
)
331 static bool flexrm_is_next_table_desc(void *desc_ptr
)
333 u64 desc
= flexrm_read_desc(desc_ptr
);
334 u32 type
= DESC_DEC(desc
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
336 return (type
== NPTR_TYPE
) ? true : false;
339 static u64
flexrm_next_table_desc(u32 toggle
, dma_addr_t next_addr
)
343 DESC_ENC(desc
, NPTR_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
344 DESC_ENC(desc
, toggle
, NPTR_TOGGLE_SHIFT
, NPTR_TOGGLE_MASK
);
345 DESC_ENC(desc
, next_addr
, NPTR_ADDR_SHIFT
, NPTR_ADDR_MASK
);
350 static u64
flexrm_null_desc(u32 toggle
)
354 DESC_ENC(desc
, NULL_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
355 DESC_ENC(desc
, toggle
, NULL_TOGGLE_SHIFT
, NULL_TOGGLE_MASK
);
360 static u32
flexrm_estimate_header_desc_count(u32 nhcnt
)
362 u32 hcnt
= nhcnt
/ HEADER_BDCOUNT_MAX
;
364 if (!(nhcnt
% HEADER_BDCOUNT_MAX
))
370 static void flexrm_flip_header_toogle(void *desc_ptr
)
372 u64 desc
= flexrm_read_desc(desc_ptr
);
374 if (desc
& ((u64
)0x1 << HEADER_TOGGLE_SHIFT
))
375 desc
&= ~((u64
)0x1 << HEADER_TOGGLE_SHIFT
);
377 desc
|= ((u64
)0x1 << HEADER_TOGGLE_SHIFT
);
379 flexrm_write_desc(desc_ptr
, desc
);
382 static u64
flexrm_header_desc(u32 toggle
, u32 startpkt
, u32 endpkt
,
383 u32 bdcount
, u32 flags
, u32 opaque
)
387 DESC_ENC(desc
, HEADER_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
388 DESC_ENC(desc
, toggle
, HEADER_TOGGLE_SHIFT
, HEADER_TOGGLE_MASK
);
389 DESC_ENC(desc
, startpkt
, HEADER_STARTPKT_SHIFT
, HEADER_STARTPKT_MASK
);
390 DESC_ENC(desc
, endpkt
, HEADER_ENDPKT_SHIFT
, HEADER_ENDPKT_MASK
);
391 DESC_ENC(desc
, bdcount
, HEADER_BDCOUNT_SHIFT
, HEADER_BDCOUNT_MASK
);
392 DESC_ENC(desc
, flags
, HEADER_FLAGS_SHIFT
, HEADER_FLAGS_MASK
);
393 DESC_ENC(desc
, opaque
, HEADER_OPAQUE_SHIFT
, HEADER_OPAQUE_MASK
);
398 static void flexrm_enqueue_desc(u32 nhpos
, u32 nhcnt
, u32 reqid
,
399 u64 desc
, void **desc_ptr
, u32
*toggle
,
400 void *start_desc
, void *end_desc
)
403 u32 nhavail
, _toggle
, _startpkt
, _endpkt
, _bdcount
;
410 * Each request or packet start with a HEADER descriptor followed
411 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
412 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
413 * following a HEADER descriptor is represented by BDCOUNT field
414 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
415 * means we can only have 31 non-HEADER descriptors following one
418 * In general use, number of non-HEADER descriptors can easily go
419 * beyond 31. To tackle this situation, we have packet (or request)
420 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
422 * To use packet extension, the first HEADER descriptor of request
423 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
424 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
425 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
426 * TOGGLE bit of the first HEADER will be set to invalid state to
427 * ensure that FlexRM does not start fetching descriptors till all
428 * descriptors are enqueued. The user of this function will flip
429 * the TOGGLE bit of first HEADER after all descriptors are
433 if ((nhpos
% HEADER_BDCOUNT_MAX
== 0) && (nhcnt
- nhpos
)) {
434 /* Prepare the header descriptor */
435 nhavail
= (nhcnt
- nhpos
);
436 _toggle
= (nhpos
== 0) ? !(*toggle
) : (*toggle
);
437 _startpkt
= (nhpos
== 0) ? 0x1 : 0x0;
438 _endpkt
= (nhavail
<= HEADER_BDCOUNT_MAX
) ? 0x1 : 0x0;
439 _bdcount
= (nhavail
<= HEADER_BDCOUNT_MAX
) ?
440 nhavail
: HEADER_BDCOUNT_MAX
;
441 if (nhavail
<= HEADER_BDCOUNT_MAX
)
444 _bdcount
= HEADER_BDCOUNT_MAX
;
445 d
= flexrm_header_desc(_toggle
, _startpkt
, _endpkt
,
446 _bdcount
, 0x0, reqid
);
448 /* Write header descriptor */
449 flexrm_write_desc(*desc_ptr
, d
);
451 /* Point to next descriptor */
452 *desc_ptr
+= sizeof(desc
);
453 if (*desc_ptr
== end_desc
)
454 *desc_ptr
= start_desc
;
456 /* Skip next pointer descriptors */
457 while (flexrm_is_next_table_desc(*desc_ptr
)) {
458 *toggle
= (*toggle
) ? 0 : 1;
459 *desc_ptr
+= sizeof(desc
);
460 if (*desc_ptr
== end_desc
)
461 *desc_ptr
= start_desc
;
465 /* Write desired descriptor */
466 flexrm_write_desc(*desc_ptr
, desc
);
468 /* Point to next descriptor */
469 *desc_ptr
+= sizeof(desc
);
470 if (*desc_ptr
== end_desc
)
471 *desc_ptr
= start_desc
;
473 /* Skip next pointer descriptors */
474 while (flexrm_is_next_table_desc(*desc_ptr
)) {
475 *toggle
= (*toggle
) ? 0 : 1;
476 *desc_ptr
+= sizeof(desc
);
477 if (*desc_ptr
== end_desc
)
478 *desc_ptr
= start_desc
;
482 static u64
flexrm_src_desc(dma_addr_t addr
, unsigned int length
)
486 DESC_ENC(desc
, SRC_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
487 DESC_ENC(desc
, length
, SRC_LENGTH_SHIFT
, SRC_LENGTH_MASK
);
488 DESC_ENC(desc
, addr
, SRC_ADDR_SHIFT
, SRC_ADDR_MASK
);
493 static u64
flexrm_msrc_desc(dma_addr_t addr
, unsigned int length_div_16
)
497 DESC_ENC(desc
, MSRC_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
498 DESC_ENC(desc
, length_div_16
, MSRC_LENGTH_SHIFT
, MSRC_LENGTH_MASK
);
499 DESC_ENC(desc
, addr
, MSRC_ADDR_SHIFT
, MSRC_ADDR_MASK
);
504 static u64
flexrm_dst_desc(dma_addr_t addr
, unsigned int length
)
508 DESC_ENC(desc
, DST_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
509 DESC_ENC(desc
, length
, DST_LENGTH_SHIFT
, DST_LENGTH_MASK
);
510 DESC_ENC(desc
, addr
, DST_ADDR_SHIFT
, DST_ADDR_MASK
);
515 static u64
flexrm_mdst_desc(dma_addr_t addr
, unsigned int length_div_16
)
519 DESC_ENC(desc
, MDST_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
520 DESC_ENC(desc
, length_div_16
, MDST_LENGTH_SHIFT
, MDST_LENGTH_MASK
);
521 DESC_ENC(desc
, addr
, MDST_ADDR_SHIFT
, MDST_ADDR_MASK
);
526 static u64
flexrm_imm_desc(u64 data
)
530 DESC_ENC(desc
, IMM_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
531 DESC_ENC(desc
, data
, IMM_DATA_SHIFT
, IMM_DATA_MASK
);
536 static u64
flexrm_srct_desc(dma_addr_t addr
, unsigned int length
)
540 DESC_ENC(desc
, SRCT_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
541 DESC_ENC(desc
, length
, SRCT_LENGTH_SHIFT
, SRCT_LENGTH_MASK
);
542 DESC_ENC(desc
, addr
, SRCT_ADDR_SHIFT
, SRCT_ADDR_MASK
);
547 static u64
flexrm_dstt_desc(dma_addr_t addr
, unsigned int length
)
551 DESC_ENC(desc
, DSTT_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
552 DESC_ENC(desc
, length
, DSTT_LENGTH_SHIFT
, DSTT_LENGTH_MASK
);
553 DESC_ENC(desc
, addr
, DSTT_ADDR_SHIFT
, DSTT_ADDR_MASK
);
558 static u64
flexrm_immt_desc(u64 data
)
562 DESC_ENC(desc
, IMMT_TYPE
, DESC_TYPE_SHIFT
, DESC_TYPE_MASK
);
563 DESC_ENC(desc
, data
, IMMT_DATA_SHIFT
, IMMT_DATA_MASK
);
568 static bool flexrm_spu_sanity_check(struct brcm_message
*msg
)
570 struct scatterlist
*sg
;
572 if (!msg
->spu
.src
|| !msg
->spu
.dst
)
574 for (sg
= msg
->spu
.src
; sg
; sg
= sg_next(sg
)) {
575 if (sg
->length
& 0xf) {
576 if (sg
->length
> SRC_LENGTH_MASK
)
579 if (sg
->length
> (MSRC_LENGTH_MASK
* 16))
583 for (sg
= msg
->spu
.dst
; sg
; sg
= sg_next(sg
)) {
584 if (sg
->length
& 0xf) {
585 if (sg
->length
> DST_LENGTH_MASK
)
588 if (sg
->length
> (MDST_LENGTH_MASK
* 16))
596 static u32
flexrm_spu_estimate_nonheader_desc_count(struct brcm_message
*msg
)
599 unsigned int dst_target
= 0;
600 struct scatterlist
*src_sg
= msg
->spu
.src
, *dst_sg
= msg
->spu
.dst
;
602 while (src_sg
|| dst_sg
) {
605 dst_target
= src_sg
->length
;
606 src_sg
= sg_next(src_sg
);
608 dst_target
= UINT_MAX
;
610 while (dst_target
&& dst_sg
) {
612 if (dst_sg
->length
< dst_target
)
613 dst_target
-= dst_sg
->length
;
616 dst_sg
= sg_next(dst_sg
);
623 static int flexrm_spu_dma_map(struct device
*dev
, struct brcm_message
*msg
)
627 rc
= dma_map_sg(dev
, msg
->spu
.src
, sg_nents(msg
->spu
.src
),
632 rc
= dma_map_sg(dev
, msg
->spu
.dst
, sg_nents(msg
->spu
.dst
),
635 dma_unmap_sg(dev
, msg
->spu
.src
, sg_nents(msg
->spu
.src
),
643 static void flexrm_spu_dma_unmap(struct device
*dev
, struct brcm_message
*msg
)
645 dma_unmap_sg(dev
, msg
->spu
.dst
, sg_nents(msg
->spu
.dst
),
647 dma_unmap_sg(dev
, msg
->spu
.src
, sg_nents(msg
->spu
.src
),
651 static void *flexrm_spu_write_descs(struct brcm_message
*msg
, u32 nhcnt
,
652 u32 reqid
, void *desc_ptr
, u32 toggle
,
653 void *start_desc
, void *end_desc
)
657 void *orig_desc_ptr
= desc_ptr
;
658 unsigned int dst_target
= 0;
659 struct scatterlist
*src_sg
= msg
->spu
.src
, *dst_sg
= msg
->spu
.dst
;
661 while (src_sg
|| dst_sg
) {
663 if (sg_dma_len(src_sg
) & 0xf)
664 d
= flexrm_src_desc(sg_dma_address(src_sg
),
667 d
= flexrm_msrc_desc(sg_dma_address(src_sg
),
668 sg_dma_len(src_sg
)/16);
669 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
670 d
, &desc_ptr
, &toggle
,
671 start_desc
, end_desc
);
673 dst_target
= sg_dma_len(src_sg
);
674 src_sg
= sg_next(src_sg
);
676 dst_target
= UINT_MAX
;
678 while (dst_target
&& dst_sg
) {
679 if (sg_dma_len(dst_sg
) & 0xf)
680 d
= flexrm_dst_desc(sg_dma_address(dst_sg
),
683 d
= flexrm_mdst_desc(sg_dma_address(dst_sg
),
684 sg_dma_len(dst_sg
)/16);
685 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
686 d
, &desc_ptr
, &toggle
,
687 start_desc
, end_desc
);
689 if (sg_dma_len(dst_sg
) < dst_target
)
690 dst_target
-= sg_dma_len(dst_sg
);
693 dst_sg
= sg_next(dst_sg
);
697 /* Null descriptor with invalid toggle bit */
698 flexrm_write_desc(desc_ptr
, flexrm_null_desc(!toggle
));
700 /* Ensure that descriptors have been written to memory */
703 /* Flip toggle bit in header */
704 flexrm_flip_header_toogle(orig_desc_ptr
);
709 static bool flexrm_sba_sanity_check(struct brcm_message
*msg
)
713 if (!msg
->sba
.cmds
|| !msg
->sba
.cmds_count
)
716 for (i
= 0; i
< msg
->sba
.cmds_count
; i
++) {
717 if (((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_B
) ||
718 (msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_C
)) &&
719 (msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_HAS_OUTPUT
))
721 if ((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_B
) &&
722 (msg
->sba
.cmds
[i
].data_len
> SRCT_LENGTH_MASK
))
724 if ((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_C
) &&
725 (msg
->sba
.cmds
[i
].data_len
> SRCT_LENGTH_MASK
))
727 if ((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_HAS_RESP
) &&
728 (msg
->sba
.cmds
[i
].resp_len
> DSTT_LENGTH_MASK
))
730 if ((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_HAS_OUTPUT
) &&
731 (msg
->sba
.cmds
[i
].data_len
> DSTT_LENGTH_MASK
))
738 static u32
flexrm_sba_estimate_nonheader_desc_count(struct brcm_message
*msg
)
743 for (i
= 0; i
< msg
->sba
.cmds_count
; i
++) {
746 if ((msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_B
) ||
747 (msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_TYPE_C
))
750 if (msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_HAS_RESP
)
753 if (msg
->sba
.cmds
[i
].flags
& BRCM_SBA_CMD_HAS_OUTPUT
)
760 static void *flexrm_sba_write_descs(struct brcm_message
*msg
, u32 nhcnt
,
761 u32 reqid
, void *desc_ptr
, u32 toggle
,
762 void *start_desc
, void *end_desc
)
766 struct brcm_sba_command
*c
;
767 void *orig_desc_ptr
= desc_ptr
;
769 /* Convert SBA commands into descriptors */
770 for (i
= 0; i
< msg
->sba
.cmds_count
; i
++) {
771 c
= &msg
->sba
.cmds
[i
];
773 if ((c
->flags
& BRCM_SBA_CMD_HAS_RESP
) &&
774 (c
->flags
& BRCM_SBA_CMD_HAS_OUTPUT
)) {
775 /* Destination response descriptor */
776 d
= flexrm_dst_desc(c
->resp
, c
->resp_len
);
777 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
778 d
, &desc_ptr
, &toggle
,
779 start_desc
, end_desc
);
781 } else if (c
->flags
& BRCM_SBA_CMD_HAS_RESP
) {
782 /* Destination response with tlast descriptor */
783 d
= flexrm_dstt_desc(c
->resp
, c
->resp_len
);
784 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
785 d
, &desc_ptr
, &toggle
,
786 start_desc
, end_desc
);
790 if (c
->flags
& BRCM_SBA_CMD_HAS_OUTPUT
) {
791 /* Destination with tlast descriptor */
792 d
= flexrm_dstt_desc(c
->data
, c
->data_len
);
793 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
794 d
, &desc_ptr
, &toggle
,
795 start_desc
, end_desc
);
799 if (c
->flags
& BRCM_SBA_CMD_TYPE_B
) {
800 /* Command as immediate descriptor */
801 d
= flexrm_imm_desc(c
->cmd
);
802 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
803 d
, &desc_ptr
, &toggle
,
804 start_desc
, end_desc
);
807 /* Command as immediate descriptor with tlast */
808 d
= flexrm_immt_desc(c
->cmd
);
809 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
810 d
, &desc_ptr
, &toggle
,
811 start_desc
, end_desc
);
815 if ((c
->flags
& BRCM_SBA_CMD_TYPE_B
) ||
816 (c
->flags
& BRCM_SBA_CMD_TYPE_C
)) {
817 /* Source with tlast descriptor */
818 d
= flexrm_srct_desc(c
->data
, c
->data_len
);
819 flexrm_enqueue_desc(nhpos
, nhcnt
, reqid
,
820 d
, &desc_ptr
, &toggle
,
821 start_desc
, end_desc
);
826 /* Null descriptor with invalid toggle bit */
827 flexrm_write_desc(desc_ptr
, flexrm_null_desc(!toggle
));
829 /* Ensure that descriptors have been written to memory */
832 /* Flip toggle bit in header */
833 flexrm_flip_header_toogle(orig_desc_ptr
);
838 static bool flexrm_sanity_check(struct brcm_message
*msg
)
844 case BRCM_MESSAGE_SPU
:
845 return flexrm_spu_sanity_check(msg
);
846 case BRCM_MESSAGE_SBA
:
847 return flexrm_sba_sanity_check(msg
);
853 static u32
flexrm_estimate_nonheader_desc_count(struct brcm_message
*msg
)
859 case BRCM_MESSAGE_SPU
:
860 return flexrm_spu_estimate_nonheader_desc_count(msg
);
861 case BRCM_MESSAGE_SBA
:
862 return flexrm_sba_estimate_nonheader_desc_count(msg
);
868 static int flexrm_dma_map(struct device
*dev
, struct brcm_message
*msg
)
874 case BRCM_MESSAGE_SPU
:
875 return flexrm_spu_dma_map(dev
, msg
);
883 static void flexrm_dma_unmap(struct device
*dev
, struct brcm_message
*msg
)
889 case BRCM_MESSAGE_SPU
:
890 flexrm_spu_dma_unmap(dev
, msg
);
897 static void *flexrm_write_descs(struct brcm_message
*msg
, u32 nhcnt
,
898 u32 reqid
, void *desc_ptr
, u32 toggle
,
899 void *start_desc
, void *end_desc
)
901 if (!msg
|| !desc_ptr
|| !start_desc
|| !end_desc
)
902 return ERR_PTR(-ENOTSUPP
);
904 if ((desc_ptr
< start_desc
) || (end_desc
<= desc_ptr
))
905 return ERR_PTR(-ERANGE
);
908 case BRCM_MESSAGE_SPU
:
909 return flexrm_spu_write_descs(msg
, nhcnt
, reqid
,
911 start_desc
, end_desc
);
912 case BRCM_MESSAGE_SBA
:
913 return flexrm_sba_write_descs(msg
, nhcnt
, reqid
,
915 start_desc
, end_desc
);
917 return ERR_PTR(-ENOTSUPP
);
921 /* ====== FlexRM driver helper routines ===== */
923 static void flexrm_write_config_in_seqfile(struct flexrm_mbox
*mbox
,
924 struct seq_file
*file
)
928 struct flexrm_ring
*ring
;
930 seq_printf(file
, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
931 "Ring#", "State", "BD_Addr", "BD_Size",
932 "Cmpl_Addr", "Cmpl_Size");
934 for (i
= 0; i
< mbox
->num_rings
; i
++) {
935 ring
= &mbox
->rings
[i
];
936 if (readl(ring
->regs
+ RING_CONTROL
) &
937 BIT(CONTROL_ACTIVE_SHIFT
))
942 "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
944 (unsigned long long)ring
->bd_dma_base
,
946 (unsigned long long)ring
->cmpl_dma_base
,
947 (u32
)RING_CMPL_SIZE
);
951 static void flexrm_write_stats_in_seqfile(struct flexrm_mbox
*mbox
,
952 struct seq_file
*file
)
955 u32 val
, bd_read_offset
;
956 struct flexrm_ring
*ring
;
958 seq_printf(file
, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
959 "Ring#", "BD_Read", "BD_Write",
960 "Cmpl_Read", "Submitted", "Completed");
962 for (i
= 0; i
< mbox
->num_rings
; i
++) {
963 ring
= &mbox
->rings
[i
];
964 bd_read_offset
= readl_relaxed(ring
->regs
+ RING_BD_READ_PTR
);
965 val
= readl_relaxed(ring
->regs
+ RING_BD_START_ADDR
);
966 bd_read_offset
*= RING_DESC_SIZE
;
967 bd_read_offset
+= (u32
)(BD_START_ADDR_DECODE(val
) -
969 seq_printf(file
, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
972 (u32
)ring
->bd_write_offset
,
973 (u32
)ring
->cmpl_read_offset
,
974 (u32
)atomic_read(&ring
->msg_send_count
),
975 (u32
)atomic_read(&ring
->msg_cmpl_count
));
979 static int flexrm_new_request(struct flexrm_ring
*ring
,
980 struct brcm_message
*batch_msg
,
981 struct brcm_message
*msg
)
985 u32 val
, count
, nhcnt
;
986 u32 read_offset
, write_offset
;
987 bool exit_cleanup
= false;
990 /* Do sanity check on message */
991 if (!flexrm_sanity_check(msg
))
995 /* If no requests possible then save data pointer and goto done. */
996 spin_lock_irqsave(&ring
->lock
, flags
);
997 reqid
= bitmap_find_free_region(ring
->requests_bmap
,
998 RING_MAX_REQ_COUNT
, 0);
999 spin_unlock_irqrestore(&ring
->lock
, flags
);
1002 ring
->requests
[reqid
] = msg
;
1004 /* Do DMA mappings for the message */
1005 ret
= flexrm_dma_map(ring
->mbox
->dev
, msg
);
1007 ring
->requests
[reqid
] = NULL
;
1008 spin_lock_irqsave(&ring
->lock
, flags
);
1009 bitmap_release_region(ring
->requests_bmap
, reqid
, 0);
1010 spin_unlock_irqrestore(&ring
->lock
, flags
);
1014 /* Determine current HW BD read offset */
1015 read_offset
= readl_relaxed(ring
->regs
+ RING_BD_READ_PTR
);
1016 val
= readl_relaxed(ring
->regs
+ RING_BD_START_ADDR
);
1017 read_offset
*= RING_DESC_SIZE
;
1018 read_offset
+= (u32
)(BD_START_ADDR_DECODE(val
) - ring
->bd_dma_base
);
1021 * Number required descriptors = number of non-header descriptors +
1022 * number of header descriptors +
1023 * 1x null descriptor
1025 nhcnt
= flexrm_estimate_nonheader_desc_count(msg
);
1026 count
= flexrm_estimate_header_desc_count(nhcnt
) + nhcnt
+ 1;
1028 /* Check for available descriptor space. */
1029 write_offset
= ring
->bd_write_offset
;
1031 if (!flexrm_is_next_table_desc(ring
->bd_base
+ write_offset
))
1033 write_offset
+= RING_DESC_SIZE
;
1034 if (write_offset
== RING_BD_SIZE
)
1036 if (write_offset
== read_offset
)
1041 exit_cleanup
= true;
1045 /* Write descriptors to ring */
1046 next
= flexrm_write_descs(msg
, nhcnt
, reqid
,
1047 ring
->bd_base
+ ring
->bd_write_offset
,
1048 RING_BD_TOGGLE_VALID(ring
->bd_write_offset
),
1049 ring
->bd_base
, ring
->bd_base
+ RING_BD_SIZE
);
1051 ret
= PTR_ERR(next
);
1052 exit_cleanup
= true;
1056 /* Save ring BD write offset */
1057 ring
->bd_write_offset
= (unsigned long)(next
- ring
->bd_base
);
1059 /* Increment number of messages sent */
1060 atomic_inc_return(&ring
->msg_send_count
);
1063 /* Update error status in message */
1066 /* Cleanup if we failed */
1068 flexrm_dma_unmap(ring
->mbox
->dev
, msg
);
1069 ring
->requests
[reqid
] = NULL
;
1070 spin_lock_irqsave(&ring
->lock
, flags
);
1071 bitmap_release_region(ring
->requests_bmap
, reqid
, 0);
1072 spin_unlock_irqrestore(&ring
->lock
, flags
);
1078 static int flexrm_process_completions(struct flexrm_ring
*ring
)
1082 unsigned long flags
;
1083 struct brcm_message
*msg
= NULL
;
1084 u32 reqid
, cmpl_read_offset
, cmpl_write_offset
;
1085 struct mbox_chan
*chan
= &ring
->mbox
->controller
.chans
[ring
->num
];
1087 spin_lock_irqsave(&ring
->lock
, flags
);
1090 * Get current completion read and write offset
1092 * Note: We should read completion write pointer atleast once
1093 * after we get a MSI interrupt because HW maintains internal
1094 * MSI status which will allow next MSI interrupt only after
1095 * completion write pointer is read.
1097 cmpl_write_offset
= readl_relaxed(ring
->regs
+ RING_CMPL_WRITE_PTR
);
1098 cmpl_write_offset
*= RING_DESC_SIZE
;
1099 cmpl_read_offset
= ring
->cmpl_read_offset
;
1100 ring
->cmpl_read_offset
= cmpl_write_offset
;
1102 spin_unlock_irqrestore(&ring
->lock
, flags
);
1104 /* For each completed request notify mailbox clients */
1106 while (cmpl_read_offset
!= cmpl_write_offset
) {
1107 /* Dequeue next completion descriptor */
1108 desc
= *((u64
*)(ring
->cmpl_base
+ cmpl_read_offset
));
1110 /* Next read offset */
1111 cmpl_read_offset
+= RING_DESC_SIZE
;
1112 if (cmpl_read_offset
== RING_CMPL_SIZE
)
1113 cmpl_read_offset
= 0;
1115 /* Decode error from completion descriptor */
1116 err
= flexrm_cmpl_desc_to_error(desc
);
1118 dev_warn(ring
->mbox
->dev
,
1119 "got completion desc=0x%lx with error %d",
1120 (unsigned long)desc
, err
);
1123 /* Determine request id from completion descriptor */
1124 reqid
= flexrm_cmpl_desc_to_reqid(desc
);
1126 /* Determine message pointer based on reqid */
1127 msg
= ring
->requests
[reqid
];
1129 dev_warn(ring
->mbox
->dev
,
1130 "null msg pointer for completion desc=0x%lx",
1131 (unsigned long)desc
);
1135 /* Release reqid for recycling */
1136 ring
->requests
[reqid
] = NULL
;
1137 spin_lock_irqsave(&ring
->lock
, flags
);
1138 bitmap_release_region(ring
->requests_bmap
, reqid
, 0);
1139 spin_unlock_irqrestore(&ring
->lock
, flags
);
1141 /* Unmap DMA mappings */
1142 flexrm_dma_unmap(ring
->mbox
->dev
, msg
);
1144 /* Give-back message to mailbox client */
1146 mbox_chan_received_data(chan
, msg
);
1148 /* Increment number of completions processed */
1149 atomic_inc_return(&ring
->msg_cmpl_count
);
1156 /* ====== FlexRM Debugfs callbacks ====== */
1158 static int flexrm_debugfs_conf_show(struct seq_file
*file
, void *offset
)
1160 struct platform_device
*pdev
= to_platform_device(file
->private);
1161 struct flexrm_mbox
*mbox
= platform_get_drvdata(pdev
);
1163 /* Write config in file */
1164 flexrm_write_config_in_seqfile(mbox
, file
);
1169 static int flexrm_debugfs_stats_show(struct seq_file
*file
, void *offset
)
1171 struct platform_device
*pdev
= to_platform_device(file
->private);
1172 struct flexrm_mbox
*mbox
= platform_get_drvdata(pdev
);
1174 /* Write stats in file */
1175 flexrm_write_stats_in_seqfile(mbox
, file
);
1180 /* ====== FlexRM interrupt handler ===== */
1182 static irqreturn_t
flexrm_irq_event(int irq
, void *dev_id
)
1184 /* We only have MSI for completions so just wakeup IRQ thread */
1185 /* Ring related errors will be informed via completion descriptors */
1187 return IRQ_WAKE_THREAD
;
1190 static irqreturn_t
flexrm_irq_thread(int irq
, void *dev_id
)
1192 flexrm_process_completions(dev_id
);
1197 /* ====== FlexRM mailbox callbacks ===== */
1199 static int flexrm_send_data(struct mbox_chan
*chan
, void *data
)
1202 struct flexrm_ring
*ring
= chan
->con_priv
;
1203 struct brcm_message
*msg
= data
;
1205 if (msg
->type
== BRCM_MESSAGE_BATCH
) {
1206 for (i
= msg
->batch
.msgs_queued
;
1207 i
< msg
->batch
.msgs_count
; i
++) {
1208 rc
= flexrm_new_request(ring
, msg
,
1209 &msg
->batch
.msgs
[i
]);
1214 msg
->batch
.msgs_queued
++;
1219 return flexrm_new_request(ring
, NULL
, data
);
1222 static bool flexrm_peek_data(struct mbox_chan
*chan
)
1224 int cnt
= flexrm_process_completions(chan
->con_priv
);
1226 return (cnt
> 0) ? true : false;
1229 static int flexrm_startup(struct mbox_chan
*chan
)
1234 dma_addr_t next_addr
;
1235 struct flexrm_ring
*ring
= chan
->con_priv
;
1237 /* Allocate BD memory */
1238 ring
->bd_base
= dma_pool_alloc(ring
->mbox
->bd_pool
,
1239 GFP_KERNEL
, &ring
->bd_dma_base
);
1240 if (!ring
->bd_base
) {
1241 dev_err(ring
->mbox
->dev
, "can't allocate BD memory\n");
1246 /* Configure next table pointer entries in BD memory */
1247 for (off
= 0; off
< RING_BD_SIZE
; off
+= RING_DESC_SIZE
) {
1248 next_addr
= off
+ RING_DESC_SIZE
;
1249 if (next_addr
== RING_BD_SIZE
)
1251 next_addr
+= ring
->bd_dma_base
;
1252 if (RING_BD_ALIGN_CHECK(next_addr
))
1253 d
= flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off
),
1256 d
= flexrm_null_desc(RING_BD_TOGGLE_INVALID(off
));
1257 flexrm_write_desc(ring
->bd_base
+ off
, d
);
1260 /* Allocate completion memory */
1261 ring
->cmpl_base
= dma_pool_alloc(ring
->mbox
->cmpl_pool
,
1262 GFP_KERNEL
, &ring
->cmpl_dma_base
);
1263 if (!ring
->cmpl_base
) {
1264 dev_err(ring
->mbox
->dev
, "can't allocate completion memory\n");
1266 goto fail_free_bd_memory
;
1268 memset(ring
->cmpl_base
, 0, RING_CMPL_SIZE
);
1271 if (ring
->irq
== UINT_MAX
) {
1272 dev_err(ring
->mbox
->dev
, "ring IRQ not available\n");
1274 goto fail_free_cmpl_memory
;
1276 ret
= request_threaded_irq(ring
->irq
,
1279 0, dev_name(ring
->mbox
->dev
), ring
);
1281 dev_err(ring
->mbox
->dev
, "failed to request ring IRQ\n");
1282 goto fail_free_cmpl_memory
;
1284 ring
->irq_requested
= true;
1286 /* Set IRQ affinity hint */
1287 ring
->irq_aff_hint
= CPU_MASK_NONE
;
1288 val
= ring
->mbox
->num_rings
;
1289 val
= (num_online_cpus() < val
) ? val
/ num_online_cpus() : 1;
1290 cpumask_set_cpu((ring
->num
/ val
) % num_online_cpus(),
1291 &ring
->irq_aff_hint
);
1292 ret
= irq_set_affinity_hint(ring
->irq
, &ring
->irq_aff_hint
);
1294 dev_err(ring
->mbox
->dev
, "failed to set IRQ affinity hint\n");
1298 /* Disable/inactivate ring */
1299 writel_relaxed(0x0, ring
->regs
+ RING_CONTROL
);
1301 /* Program BD start address */
1302 val
= BD_START_ADDR_VALUE(ring
->bd_dma_base
);
1303 writel_relaxed(val
, ring
->regs
+ RING_BD_START_ADDR
);
1305 /* BD write pointer will be same as HW write pointer */
1306 ring
->bd_write_offset
=
1307 readl_relaxed(ring
->regs
+ RING_BD_WRITE_PTR
);
1308 ring
->bd_write_offset
*= RING_DESC_SIZE
;
1310 /* Program completion start address */
1311 val
= CMPL_START_ADDR_VALUE(ring
->cmpl_dma_base
);
1312 writel_relaxed(val
, ring
->regs
+ RING_CMPL_START_ADDR
);
1314 /* Completion read pointer will be same as HW write pointer */
1315 ring
->cmpl_read_offset
=
1316 readl_relaxed(ring
->regs
+ RING_CMPL_WRITE_PTR
);
1317 ring
->cmpl_read_offset
*= RING_DESC_SIZE
;
1319 /* Read ring Tx, Rx, and Outstanding counts to clear */
1320 readl_relaxed(ring
->regs
+ RING_NUM_REQ_RECV_LS
);
1321 readl_relaxed(ring
->regs
+ RING_NUM_REQ_RECV_MS
);
1322 readl_relaxed(ring
->regs
+ RING_NUM_REQ_TRANS_LS
);
1323 readl_relaxed(ring
->regs
+ RING_NUM_REQ_TRANS_MS
);
1324 readl_relaxed(ring
->regs
+ RING_NUM_REQ_OUTSTAND
);
1326 /* Configure RING_MSI_CONTROL */
1328 val
|= (ring
->msi_timer_val
<< MSI_TIMER_VAL_SHIFT
);
1329 val
|= BIT(MSI_ENABLE_SHIFT
);
1330 val
|= (ring
->msi_count_threshold
& MSI_COUNT_MASK
) << MSI_COUNT_SHIFT
;
1331 writel_relaxed(val
, ring
->regs
+ RING_MSI_CONTROL
);
1333 /* Enable/activate ring */
1334 val
= BIT(CONTROL_ACTIVE_SHIFT
);
1335 writel_relaxed(val
, ring
->regs
+ RING_CONTROL
);
1337 /* Reset stats to zero */
1338 atomic_set(&ring
->msg_send_count
, 0);
1339 atomic_set(&ring
->msg_cmpl_count
, 0);
1344 free_irq(ring
->irq
, ring
);
1345 ring
->irq_requested
= false;
1346 fail_free_cmpl_memory
:
1347 dma_pool_free(ring
->mbox
->cmpl_pool
,
1348 ring
->cmpl_base
, ring
->cmpl_dma_base
);
1349 ring
->cmpl_base
= NULL
;
1350 fail_free_bd_memory
:
1351 dma_pool_free(ring
->mbox
->bd_pool
,
1352 ring
->bd_base
, ring
->bd_dma_base
);
1353 ring
->bd_base
= NULL
;
1358 static void flexrm_shutdown(struct mbox_chan
*chan
)
1361 unsigned int timeout
;
1362 struct brcm_message
*msg
;
1363 struct flexrm_ring
*ring
= chan
->con_priv
;
1365 /* Disable/inactivate ring */
1366 writel_relaxed(0x0, ring
->regs
+ RING_CONTROL
);
1368 /* Set ring flush state */
1369 timeout
= 1000; /* timeout of 1s */
1370 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT
),
1371 ring
->regs
+ RING_CONTROL
);
1373 if (readl_relaxed(ring
->regs
+ RING_FLUSH_DONE
) &
1377 } while (--timeout
);
1379 dev_err(ring
->mbox
->dev
,
1380 "setting ring%d flush state timedout\n", ring
->num
);
1382 /* Clear ring flush state */
1383 timeout
= 1000; /* timeout of 1s */
1384 writel_relaxed(0x0, ring
->regs
+ RING_CONTROL
);
1386 if (!(readl_relaxed(ring
->regs
+ RING_FLUSH_DONE
) &
1390 } while (--timeout
);
1392 dev_err(ring
->mbox
->dev
,
1393 "clearing ring%d flush state timedout\n", ring
->num
);
1395 /* Abort all in-flight requests */
1396 for (reqid
= 0; reqid
< RING_MAX_REQ_COUNT
; reqid
++) {
1397 msg
= ring
->requests
[reqid
];
1401 /* Release reqid for recycling */
1402 ring
->requests
[reqid
] = NULL
;
1404 /* Unmap DMA mappings */
1405 flexrm_dma_unmap(ring
->mbox
->dev
, msg
);
1407 /* Give-back message to mailbox client */
1409 mbox_chan_received_data(chan
, msg
);
1412 /* Clear requests bitmap */
1413 bitmap_zero(ring
->requests_bmap
, RING_MAX_REQ_COUNT
);
1416 if (ring
->irq_requested
) {
1417 irq_set_affinity_hint(ring
->irq
, NULL
);
1418 free_irq(ring
->irq
, ring
);
1419 ring
->irq_requested
= false;
1422 /* Free-up completion descriptor ring */
1423 if (ring
->cmpl_base
) {
1424 dma_pool_free(ring
->mbox
->cmpl_pool
,
1425 ring
->cmpl_base
, ring
->cmpl_dma_base
);
1426 ring
->cmpl_base
= NULL
;
1429 /* Free-up BD descriptor ring */
1430 if (ring
->bd_base
) {
1431 dma_pool_free(ring
->mbox
->bd_pool
,
1432 ring
->bd_base
, ring
->bd_dma_base
);
1433 ring
->bd_base
= NULL
;
1437 static const struct mbox_chan_ops flexrm_mbox_chan_ops
= {
1438 .send_data
= flexrm_send_data
,
1439 .startup
= flexrm_startup
,
1440 .shutdown
= flexrm_shutdown
,
1441 .peek_data
= flexrm_peek_data
,
1444 static struct mbox_chan
*flexrm_mbox_of_xlate(struct mbox_controller
*cntlr
,
1445 const struct of_phandle_args
*pa
)
1447 struct mbox_chan
*chan
;
1448 struct flexrm_ring
*ring
;
1450 if (pa
->args_count
< 3)
1451 return ERR_PTR(-EINVAL
);
1453 if (pa
->args
[0] >= cntlr
->num_chans
)
1454 return ERR_PTR(-ENOENT
);
1456 if (pa
->args
[1] > MSI_COUNT_MASK
)
1457 return ERR_PTR(-EINVAL
);
1459 if (pa
->args
[2] > MSI_TIMER_VAL_MASK
)
1460 return ERR_PTR(-EINVAL
);
1462 chan
= &cntlr
->chans
[pa
->args
[0]];
1463 ring
= chan
->con_priv
;
1464 ring
->msi_count_threshold
= pa
->args
[1];
1465 ring
->msi_timer_val
= pa
->args
[2];
1470 /* ====== FlexRM platform driver ===== */
1472 static void flexrm_mbox_msi_write(struct msi_desc
*desc
, struct msi_msg
*msg
)
1474 struct device
*dev
= msi_desc_to_dev(desc
);
1475 struct flexrm_mbox
*mbox
= dev_get_drvdata(dev
);
1476 struct flexrm_ring
*ring
= &mbox
->rings
[desc
->platform
.msi_index
];
1478 /* Configure per-Ring MSI registers */
1479 writel_relaxed(msg
->address_lo
, ring
->regs
+ RING_MSI_ADDR_LS
);
1480 writel_relaxed(msg
->address_hi
, ring
->regs
+ RING_MSI_ADDR_MS
);
1481 writel_relaxed(msg
->data
, ring
->regs
+ RING_MSI_DATA_VALUE
);
1484 static int flexrm_mbox_probe(struct platform_device
*pdev
)
1488 void __iomem
*regs_end
;
1489 struct msi_desc
*desc
;
1490 struct resource
*iomem
;
1491 struct flexrm_ring
*ring
;
1492 struct flexrm_mbox
*mbox
;
1493 struct device
*dev
= &pdev
->dev
;
1495 /* Allocate driver mailbox struct */
1496 mbox
= devm_kzalloc(dev
, sizeof(*mbox
), GFP_KERNEL
);
1502 platform_set_drvdata(pdev
, mbox
);
1504 /* Get resource for registers */
1505 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1506 if (!iomem
|| (resource_size(iomem
) < RING_REGS_SIZE
)) {
1511 /* Map registers of all rings */
1512 mbox
->regs
= devm_ioremap_resource(&pdev
->dev
, iomem
);
1513 if (IS_ERR(mbox
->regs
)) {
1514 ret
= PTR_ERR(mbox
->regs
);
1515 dev_err(&pdev
->dev
, "Failed to remap mailbox regs: %d\n", ret
);
1518 regs_end
= mbox
->regs
+ resource_size(iomem
);
1520 /* Scan and count available rings */
1521 mbox
->num_rings
= 0;
1522 for (regs
= mbox
->regs
; regs
< regs_end
; regs
+= RING_REGS_SIZE
) {
1523 if (readl_relaxed(regs
+ RING_VER
) == RING_VER_MAGIC
)
1526 if (!mbox
->num_rings
) {
1531 /* Allocate driver ring structs */
1532 ring
= devm_kcalloc(dev
, mbox
->num_rings
, sizeof(*ring
), GFP_KERNEL
);
1539 /* Initialize members of driver ring structs */
1541 for (index
= 0; index
< mbox
->num_rings
; index
++) {
1542 ring
= &mbox
->rings
[index
];
1545 while ((regs
< regs_end
) &&
1546 (readl_relaxed(regs
+ RING_VER
) != RING_VER_MAGIC
))
1547 regs
+= RING_REGS_SIZE
;
1548 if (regs_end
<= regs
) {
1553 regs
+= RING_REGS_SIZE
;
1554 ring
->irq
= UINT_MAX
;
1555 ring
->irq_requested
= false;
1556 ring
->msi_timer_val
= MSI_TIMER_VAL_MASK
;
1557 ring
->msi_count_threshold
= 0x1;
1558 memset(ring
->requests
, 0, sizeof(ring
->requests
));
1559 ring
->bd_base
= NULL
;
1560 ring
->bd_dma_base
= 0;
1561 ring
->cmpl_base
= NULL
;
1562 ring
->cmpl_dma_base
= 0;
1563 atomic_set(&ring
->msg_send_count
, 0);
1564 atomic_set(&ring
->msg_cmpl_count
, 0);
1565 spin_lock_init(&ring
->lock
);
1566 bitmap_zero(ring
->requests_bmap
, RING_MAX_REQ_COUNT
);
1567 ring
->cmpl_read_offset
= 0;
1570 /* FlexRM is capable of 40-bit physical addresses only */
1571 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(40));
1573 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
1578 /* Create DMA pool for ring BD memory */
1579 mbox
->bd_pool
= dma_pool_create("bd", dev
, RING_BD_SIZE
,
1580 1 << RING_BD_ALIGN_ORDER
, 0);
1581 if (!mbox
->bd_pool
) {
1586 /* Create DMA pool for ring completion memory */
1587 mbox
->cmpl_pool
= dma_pool_create("cmpl", dev
, RING_CMPL_SIZE
,
1588 1 << RING_CMPL_ALIGN_ORDER
, 0);
1589 if (!mbox
->cmpl_pool
) {
1591 goto fail_destroy_bd_pool
;
1594 /* Allocate platform MSIs for each ring */
1595 ret
= platform_msi_domain_alloc_irqs(dev
, mbox
->num_rings
,
1596 flexrm_mbox_msi_write
);
1598 goto fail_destroy_cmpl_pool
;
1600 /* Save alloced IRQ numbers for each ring */
1601 for_each_msi_entry(desc
, dev
) {
1602 ring
= &mbox
->rings
[desc
->platform
.msi_index
];
1603 ring
->irq
= desc
->irq
;
1606 /* Check availability of debugfs */
1607 if (!debugfs_initialized())
1610 /* Create debugfs root entry */
1611 mbox
->root
= debugfs_create_dir(dev_name(mbox
->dev
), NULL
);
1612 if (IS_ERR_OR_NULL(mbox
->root
)) {
1613 ret
= PTR_ERR_OR_ZERO(mbox
->root
);
1614 goto fail_free_msis
;
1617 /* Create debugfs config entry */
1618 mbox
->config
= debugfs_create_devm_seqfile(mbox
->dev
,
1619 "config", mbox
->root
,
1620 flexrm_debugfs_conf_show
);
1621 if (IS_ERR_OR_NULL(mbox
->config
)) {
1622 ret
= PTR_ERR_OR_ZERO(mbox
->config
);
1623 goto fail_free_debugfs_root
;
1626 /* Create debugfs stats entry */
1627 mbox
->stats
= debugfs_create_devm_seqfile(mbox
->dev
,
1628 "stats", mbox
->root
,
1629 flexrm_debugfs_stats_show
);
1630 if (IS_ERR_OR_NULL(mbox
->stats
)) {
1631 ret
= PTR_ERR_OR_ZERO(mbox
->stats
);
1632 goto fail_free_debugfs_root
;
1636 /* Initialize mailbox controller */
1637 mbox
->controller
.txdone_irq
= false;
1638 mbox
->controller
.txdone_poll
= false;
1639 mbox
->controller
.ops
= &flexrm_mbox_chan_ops
;
1640 mbox
->controller
.dev
= dev
;
1641 mbox
->controller
.num_chans
= mbox
->num_rings
;
1642 mbox
->controller
.of_xlate
= flexrm_mbox_of_xlate
;
1643 mbox
->controller
.chans
= devm_kcalloc(dev
, mbox
->num_rings
,
1644 sizeof(*mbox
->controller
.chans
), GFP_KERNEL
);
1645 if (!mbox
->controller
.chans
) {
1647 goto fail_free_debugfs_root
;
1649 for (index
= 0; index
< mbox
->num_rings
; index
++)
1650 mbox
->controller
.chans
[index
].con_priv
= &mbox
->rings
[index
];
1652 /* Register mailbox controller */
1653 ret
= mbox_controller_register(&mbox
->controller
);
1655 goto fail_free_debugfs_root
;
1657 dev_info(dev
, "registered flexrm mailbox with %d channels\n",
1658 mbox
->controller
.num_chans
);
1662 fail_free_debugfs_root
:
1663 debugfs_remove_recursive(mbox
->root
);
1665 platform_msi_domain_free_irqs(dev
);
1666 fail_destroy_cmpl_pool
:
1667 dma_pool_destroy(mbox
->cmpl_pool
);
1668 fail_destroy_bd_pool
:
1669 dma_pool_destroy(mbox
->bd_pool
);
1674 static int flexrm_mbox_remove(struct platform_device
*pdev
)
1676 struct device
*dev
= &pdev
->dev
;
1677 struct flexrm_mbox
*mbox
= platform_get_drvdata(pdev
);
1679 mbox_controller_unregister(&mbox
->controller
);
1681 debugfs_remove_recursive(mbox
->root
);
1683 platform_msi_domain_free_irqs(dev
);
1685 dma_pool_destroy(mbox
->cmpl_pool
);
1686 dma_pool_destroy(mbox
->bd_pool
);
1691 static const struct of_device_id flexrm_mbox_of_match
[] = {
1692 { .compatible
= "brcm,iproc-flexrm-mbox", },
1695 MODULE_DEVICE_TABLE(of
, flexrm_mbox_of_match
);
1697 static struct platform_driver flexrm_mbox_driver
= {
1699 .name
= "brcm-flexrm-mbox",
1700 .of_match_table
= flexrm_mbox_of_match
,
1702 .probe
= flexrm_mbox_probe
,
1703 .remove
= flexrm_mbox_remove
,
1705 module_platform_driver(flexrm_mbox_driver
);
1707 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1708 MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1709 MODULE_LICENSE("GPL v2");