[9610] wlbt: SCSC Driver version 10.9.1.0
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / misc / samsung / scsc_bt / scsc_ant.c
1 /****************************************************************************
2 *
3 * Copyright (c) 2017 Samsung Electronics Co., Ltd
4 *
5 ****************************************************************************/
6
7 /* MX BT shared memory interface */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/module.h>
14 #include <linux/firmware.h>
15 #include <linux/poll.h>
16 #include <linux/slab.h>
17 #include <linux/cdev.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/uaccess.h>
21 #include <linux/wait.h>
22 #include <linux/kthread.h>
23 #include <asm/io.h>
24 #include <linux/wakelock.h>
25
26 #include <scsc/scsc_mx.h>
27 #include <scsc/scsc_mifram.h>
28 #include <scsc/api/bsmhcp.h>
29 #include <scsc/scsc_logring.h>
30
31 #include "scsc_bt_priv.h"
32 #include "scsc_shm.h"
33 #include "scsc_bt_hci.h"
34
35 static u8 ant_write_buffer[ASMHCP_BUFFER_SIZE];
36 static u16 ant_irq_mask;
37
38 static void scsc_ant_shm_irq_handler(int irqbit, void *data)
39 {
40 /* Clear interrupt */
41 scsc_service_mifintrbit_bit_clear(ant_service.service, irqbit);
42
43 ant_service.interrupt_count++;
44
45 /* Wake the reader operation */
46 if (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
47 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
48 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
49 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read ||
50 atomic_read(&ant_service.error_count) != 0 ||
51 ant_service.asmhcp_protocol->header.panic_deathbed_confession) {
52 ant_service.interrupt_read_count++;
53
54 wake_lock_timeout(&ant_service.read_wake_lock, HZ);
55 wake_up(&ant_service.read_wait);
56 }
57
58 if (ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write ==
59 ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_read &&
60 ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write ==
61 ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_read) {
62 ant_service.interrupt_write_count++;
63
64 if (wake_lock_active(&ant_service.write_wake_lock)) {
65 ant_service.write_wake_unlock_count++;
66 wake_unlock(&ant_service.write_wake_lock);
67 }
68 }
69 }
70
71 /* Assign firmware/host interrupts */
72 static int scsc_ant_shm_init_interrupt(void)
73 {
74 int irq_ret = 0;
75 u16 irq_num = 0;
76
77 /* To-host f/w IRQ allocations and ISR registrations */
78 irq_ret = scsc_service_mifintrbit_register_tohost(
79 ant_service.service, scsc_ant_shm_irq_handler, NULL);
80 if (irq_ret < 0)
81 return irq_ret;
82
83 ant_service.asmhcp_protocol->header.bg_to_ap_int_src = irq_ret;
84 ant_irq_mask |= 1 << irq_num++;
85
86 /* From-host f/w IRQ allocations */
87 irq_ret = scsc_service_mifintrbit_alloc_fromhost(
88 ant_service.service, SCSC_MIFINTR_TARGET_R4);
89 if (irq_ret < 0)
90 return irq_ret;
91
92 ant_service.asmhcp_protocol->header.ap_to_bg_int_src = irq_ret;
93 ant_irq_mask |= 1 << irq_num++;
94
95 SCSC_TAG_DEBUG(BT_COMMON, "Registered to-host IRQ bit %d, from-host IRQ bit %d\n",
96 ant_service.asmhcp_protocol->header.bg_to_ap_int_src,
97 ant_service.asmhcp_protocol->header.ap_to_bg_int_src);
98
99 return 0;
100 }
101
102 static ssize_t scsc_shm_ant_cmd_write(const unsigned char *data, size_t count)
103 {
104 /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
105 uint32_t tr_read = ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_read;
106 uint32_t tr_write = ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write;
107
108 struct ASMHCP_TD_CONTROL *td = &ant_service.asmhcp_protocol->cmd_driver_controller_transfer_ring[tr_write];
109 /* Temp vars */
110 SCSC_TAG_DEBUG(BT_H4, "ANT_COMMAND_PKT (len=%zu, read=%u, write=%u)\n",
111 count, tr_read, tr_write);
112
113 /* Index out of bounds check */
114 if (tr_read >= ASMHCP_TRANSFER_RING_CMD_SIZE || tr_write >= ASMHCP_TRANSFER_RING_CMD_SIZE) {
115 SCSC_TAG_ERR(BT_H4,
116 "ANT_COMMAND_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n",
117 tr_read, tr_write);
118 atomic_inc(&ant_service.error_count);
119 return -EIO;
120 }
121
122 /* Does the transfer ring have room for an entry */
123 if (BSMHCP_HAS_ROOM(tr_write, tr_read, ASMHCP_TRANSFER_RING_CMD_SIZE)) {
124 /* Fill the transfer descriptor with the ANT command data */
125 memcpy(td->data, data, count);
126 td->length = (u16)count;
127
128 /* Ensure the wake lock is acquired */
129 if (!wake_lock_active(&ant_service.write_wake_lock)) {
130 ant_service.write_wake_lock_count++;
131 wake_lock(&ant_service.write_wake_lock);
132 }
133
134 /* Increase the write pointer */
135 BSMHCP_INCREASE_INDEX(tr_write, ASMHCP_TRANSFER_RING_CMD_SIZE);
136 ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write = tr_write;
137
138 /* Memory barrier to ensure out-of-order execution is completed */
139 mmiowb();
140
141 /* Trigger the interrupt in the mailbox */
142 scsc_service_mifintrbit_bit_set(
143 ant_service.service,
144 ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
145 SCSC_MIFINTR_TARGET_R4);
146 } else {
147 /* Transfer ring full. Only happens if the user attempt to send more ANT command packets than
148 * available credits
149 */
150 count = 0;
151 }
152
153 return count;
154 }
155
156 static ssize_t scsc_shm_ant_data_write(const unsigned char *data, size_t count)
157 {
158 /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
159 uint32_t tr_read = ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_read;
160 uint32_t tr_write = ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write;
161
162 /* Temp vars */
163 struct ASMHCP_TD_CONTROL *td = &ant_service.asmhcp_protocol->data_driver_controller_transfer_ring[tr_write];
164
165 SCSC_TAG_DEBUG(BT_H4, "ANT_DATA_PKT (len=%zu, read=%u, write=%u)\n",
166 count, tr_read, tr_write);
167
168 /* Index out of bounds check */
169 if (tr_read >= ASMHCP_TRANSFER_RING_DATA_SIZE || tr_write >= ASMHCP_TRANSFER_RING_DATA_SIZE) {
170 SCSC_TAG_ERR(
171 BT_H4,
172 "ANT_DATA_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n",
173 tr_read, tr_write);
174 atomic_inc(&ant_service.error_count);
175 return -EIO;
176 }
177
178 /* Does the transfer ring have room for an entry */
179 if (BSMHCP_HAS_ROOM(tr_write, tr_read, ASMHCP_TRANSFER_RING_DATA_SIZE)) {
180 /* Fill the transfer descriptor with the ANT command data */
181 memcpy(td->data, data, count);
182 td->length = (u16)count;
183
184 /* Ensure the wake lock is acquired */
185 if (!wake_lock_active(&ant_service.write_wake_lock)) {
186 ant_service.write_wake_lock_count++;
187 wake_lock(&ant_service.write_wake_lock);
188 }
189
190 /* Increase the write pointer */
191 BSMHCP_INCREASE_INDEX(tr_write, ASMHCP_TRANSFER_RING_DATA_SIZE);
192 ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write = tr_write;
193
194 /* Memory barrier to ensure out-of-order execution is completed */
195 mmiowb();
196
197 /* Trigger the interrupt in the mailbox */
198 scsc_service_mifintrbit_bit_set(
199 ant_service.service,
200 ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
201 SCSC_MIFINTR_TARGET_R4);
202 }
203 else
204 /* Transfer ring full */
205 count = 0;
206
207 return count;
208 }
209
210 static ssize_t scsc_ant_copy_td_to_buffer(char __user *buf, size_t len, struct ASMHCP_TD_CONTROL *td)
211 {
212 ssize_t ret = 0;
213 ssize_t consumed = 0;
214 size_t copy_len = 0;
215
216 SCSC_TAG_DEBUG(BT_H4, "td (length=%u), len=%zu, read_offset=%zu\n",
217 td->length, len, ant_service.read_offset);
218
219 /* Has the header been copied to userspace (aka is this the start of the copy operation) */
220 if (ant_service.read_offset < ANT_HEADER_LENGTH) {
221 /* Calculate the amount of data that can be transferred */
222 copy_len = min(ANT_HEADER_LENGTH - ant_service.read_offset, len);
223
224 if (td->data[1] + ANT_HEADER_LENGTH + 1 != td->length) {
225 SCSC_TAG_ERR(BT_H4, "Firmware sent invalid ANT cmd/data\n");
226 atomic_inc(&ant_service.error_count);
227 ret = -EFAULT;
228 }
229 /* Copy the ANT header to the userspace buffer */
230 ret = copy_to_user(buf, &td->data[ant_service.read_offset], copy_len);
231 if (ret == 0) {
232 /* All good - Update our consumed information */
233 consumed = copy_len;
234 ant_service.read_offset += copy_len;
235 SCSC_TAG_DEBUG(BT_H4,
236 "copied header: read_offset=%zu, consumed=%zu, ret=%zd, len=%zu, copy_len=%zu\n",
237 ant_service.read_offset, consumed, ret, len, copy_len);
238 } else {
239 SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
240 ret = -EACCES;
241 }
242 }
243
244 /* Can more data be put into the userspace buffer */
245 if (ret == 0 && ant_service.read_offset >= ANT_HEADER_LENGTH && (len - consumed)) {
246 /* Calculate the amount of data that can be transferred */
247 copy_len = min((td->length - ant_service.read_offset), (len - consumed));
248
249 /* Copy the data to the user buffer */
250 ret = copy_to_user(&buf[consumed], &td->data[ant_service.read_offset], copy_len);
251 if (ret == 0) {
252 /* All good - Update our consumed information */
253 ant_service.read_offset += copy_len;
254 consumed += copy_len;
255
256 /* Have all data been copied to the userspace buffer */
257 if (ant_service.read_offset == td->length) {
258 /* All good - read operation is completed */
259 ant_service.read_offset = 0;
260 ant_service.read_operation = ANT_READ_OP_NONE;
261 }
262 } else {
263 SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
264 ret = -EACCES;
265 }
266 }
267
268 SCSC_TAG_DEBUG(BT_H4, "read_offset=%zu, consumed=%zu, ret=%zd, len=%zu, copy_len=%zu\n",
269 ant_service.read_offset, consumed, ret, len, copy_len);
270
271 return ret == 0 ? consumed : ret;
272 }
273
274 static ssize_t scsc_ant_cmd_read(char __user *buf, size_t len)
275 {
276 ssize_t ret = 0;
277
278 /* Temp vars */
279 if (ant_service.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_write) {
280 struct ASMHCP_PROTOCOL *ap = ant_service.asmhcp_protocol;
281 struct ASMHCP_TD_CONTROL *td = &ap->cmd_controller_driver_transfer_ring
282 [ant_service.mailbox_cmd_ctr_driv_read];
283
284 ret = scsc_ant_copy_td_to_buffer(buf, len, td);
285 }
286
287 return ret;
288 }
289
290 static ssize_t scsc_ant_data_read(char __user *buf, size_t len)
291 {
292 ssize_t ret = 0;
293
294 if (ant_service.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_write) {
295 struct ASMHCP_PROTOCOL *ap = ant_service.asmhcp_protocol;
296 struct ASMHCP_TD_CONTROL *td = &ap->data_controller_driver_transfer_ring
297 [ant_service.mailbox_data_ctr_driv_read];
298
299 ret = scsc_ant_copy_td_to_buffer(buf, len, td);
300 }
301
302 return ret;
303 }
304
305 static ssize_t scsc_bt_shm_ant_read_data(char __user *buf, size_t len)
306 {
307 ssize_t ret = 0;
308 ssize_t consumed = 0;
309
310 while (ant_service.read_operation == ANT_READ_OP_NONE &&
311 ret == 0 &&
312 ant_service.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_write) {
313 /* Start a data copy to userspace */
314 ant_service.read_operation = ANT_READ_OP_DATA;
315 ant_service.read_index = ant_service.mailbox_data_ctr_driv_read;
316 ret = scsc_ant_data_read(&buf[consumed], len - consumed);
317 if (ret > 0) {
318 /* All good - Update our consumed information */
319 consumed += ret;
320 ret = 0;
321
322 /* Update the index if all the data could be copied to the userspace buffer
323 * otherwise stop processing the data
324 */
325 if (ant_service.read_operation == ANT_READ_OP_NONE)
326 BSMHCP_INCREASE_INDEX(ant_service.mailbox_data_ctr_driv_read,
327 ASMHCP_TRANSFER_RING_DATA_SIZE);
328 else
329 break;
330 }
331 }
332
333 return ret == 0 ? consumed : ret;
334 }
335
336 static ssize_t scsc_bt_shm_ant_read_cmd(char __user *buf, size_t len)
337 {
338 ssize_t ret = 0;
339 ssize_t consumed = 0;
340
341 while (ant_service.read_operation == ANT_READ_OP_NONE &&
342 ret == 0 &&
343 ant_service.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_write) {
344 /* Start a cmd copy to userspace */
345 ant_service.read_operation = ANT_READ_OP_CMD;
346 ant_service.read_index = ant_service.mailbox_cmd_ctr_driv_read;
347 ret = scsc_ant_cmd_read(&buf[consumed], len - consumed);
348 if (ret > 0) {
349 /* All good - Update our consumed information */
350 consumed += ret;
351 ret = 0;
352
353 /* Update the index if all the data could be copied to the userspace buffer
354 * otherwise stop processing the cmds
355 */
356 if (ant_service.read_operation == ANT_READ_OP_NONE)
357 BSMHCP_INCREASE_INDEX(ant_service.mailbox_cmd_ctr_driv_read,
358 ASMHCP_TRANSFER_RING_CMD_SIZE);
359 else
360 break;
361 }
362 }
363
364 return ret == 0 ? consumed : ret;
365 }
366
367 static ssize_t scsc_shm_ant_read_continue(char __user *buf, size_t len)
368 {
369 ssize_t ret = 0;
370
371 /* Is a cmd read operation ongoing */
372 if (ant_service.read_operation == ANT_READ_OP_CMD) {
373 SCSC_TAG_DEBUG(BT_H4, "ANT_READ_OP_CMD\n");
374
375 /* Copy data into the userspace buffer */
376 ret = scsc_ant_cmd_read(buf, len);
377 if (ant_service.read_operation == ANT_READ_OP_NONE)
378 /* All done - increase the read pointer and continue */
379 if (ant_service.read_operation == ANT_READ_OP_NONE)
380 BSMHCP_INCREASE_INDEX(ant_service.mailbox_cmd_ctr_driv_read,
381 ASMHCP_TRANSFER_RING_CMD_SIZE);
382 /* Is a data read operation ongoing */
383 } else if (ant_service.read_operation == ANT_READ_OP_DATA) {
384 SCSC_TAG_DEBUG(BT_H4, "ANT_READ_OP_DATA\n");
385
386 /* Copy data into the userspace buffer */
387 ret = scsc_ant_data_read(buf, len);
388 if (ant_service.read_operation == ANT_READ_OP_NONE)
389 /* All done - increase the read pointer and continue */
390 BSMHCP_INCREASE_INDEX(ant_service.mailbox_data_ctr_driv_read, ASMHCP_TRANSFER_RING_DATA_SIZE);
391 }
392
393 return ret;
394 }
395
396 ssize_t scsc_shm_ant_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
397 {
398 ssize_t consumed = 0;
399 ssize_t ret = 0;
400 ssize_t res;
401 bool gen_bg_int = false;
402
403 /* Only 1 reader is allowed */
404 if (atomic_inc_return(&ant_service.ant_readers) != 1) {
405 atomic_dec(&ant_service.ant_readers);
406 return -EIO;
407 }
408
409 /* Has en error been detect then just return with an error */
410 if (atomic_read(&ant_service.error_count) != 0) {
411 atomic_dec(&ant_service.ant_readers);
412 return -EIO;
413 }
414
415 /* Update the cached variables with the non-cached variables */
416 ant_service.mailbox_cmd_ctr_driv_write = ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write;
417 ant_service.mailbox_data_ctr_driv_write = ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write;
418
419 /* put the remaining data from the transfer ring into the available userspace buffer */
420 if (ant_service.read_operation != ANT_READ_OP_NONE) {
421 ret = scsc_shm_ant_read_continue(buf, len);
422 /* Update the consumed variable in case a operation was ongoing */
423 if (ret > 0) {
424 consumed = ret;
425 ret = 0;
426 }
427 }
428
429 /* Main loop - Can only be entered when no operation is present on entering this function
430 * or no hardware error has been detected. It loops until data has been placed in the
431 * userspace buffer or an error has been detected
432 */
433 while (atomic_read(&ant_service.error_count) == 0 && consumed == 0) {
434 /* Does any of the read/write pairs differs */
435 if (ant_service.mailbox_data_ctr_driv_read == ant_service.mailbox_data_ctr_driv_write &&
436 ant_service.mailbox_cmd_ctr_driv_read == ant_service.mailbox_cmd_ctr_driv_write &&
437 atomic_read(&ant_service.error_count) == 0 &&
438 ant_service.asmhcp_protocol->header.panic_deathbed_confession == 0) {
439 /* Don't wait if in NONBLOCK mode */
440 if (file->f_flags & O_NONBLOCK) {
441 ret = -EAGAIN;
442 break;
443 }
444
445 /* All read/write pairs are identical - wait for the firmware. The conditional
446 * check is used to verify that a read/write pair has actually changed
447 */
448 ret = wait_event_interruptible(bt_service.read_wait,
449 (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
450 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
451 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
452 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read ||
453 atomic_read(&ant_service.error_count) != 0 ||
454 ant_service.asmhcp_protocol->header.panic_deathbed_confession));
455
456 /* Has an error been detected elsewhere in the driver then just return from this function */
457 if (atomic_read(&ant_service.error_count) != 0)
458 break;
459
460 /* Any failures is handled by the userspace application */
461 if (ret)
462 break;
463
464 /* Refresh our write indexes before starting to process the protocol */
465 ant_service.mailbox_cmd_ctr_driv_write =
466 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write;
467 ant_service.mailbox_data_ctr_driv_write =
468 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write;
469 }
470
471 /* First: process any pending cmd that needs to be sent to userspace */
472 res = scsc_bt_shm_ant_read_cmd(&buf[consumed], len - consumed);
473 if (res > 0)
474 consumed += res;
475 else
476 ret = res;
477
478 /* Second: process any pending data that needs to be sent to userspace */
479 res = scsc_bt_shm_ant_read_data(&buf[consumed], len - consumed);
480 if (res > 0)
481 consumed += res;
482 else
483 ret = res;
484 }
485
486 /* If anything was read, generate the appropriate interrupt(s) */
487 if (ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_read ||
488 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_read)
489 gen_bg_int = true;
490
491 /* Update the read index for all transfer rings */
492 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read = ant_service.mailbox_cmd_ctr_driv_read;
493 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read = ant_service.mailbox_data_ctr_driv_read;
494
495 /* Ensure the data is updating correctly in memory */
496 mmiowb();
497
498 if (gen_bg_int)
499 scsc_service_mifintrbit_bit_set(ant_service.service,
500 ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
501 SCSC_MIFINTR_TARGET_R4);
502
503 /* Decrease the ant readers counter */
504 atomic_dec(&ant_service.ant_readers);
505
506 return ret == 0 ? consumed : ret;
507 }
508
509 ssize_t scsc_shm_ant_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
510 {
511 size_t length;
512 size_t ant_pkt_len;
513 ssize_t written = 0;
514 ssize_t ret = 0;
515 size_t pkt_count = 0;
516
517 SCSC_TAG_DEBUG(BT_H4, "enter\n");
518
519 UNUSED(file);
520 UNUSED(offset);
521
522 /* Only 1 writer is allowed */
523 if (atomic_inc_return(&ant_service.ant_writers) != 1) {
524 SCSC_TAG_DEBUG(BT_H4, "only one reader allowed\n");
525 atomic_dec(&ant_service.ant_writers);
526 return -EIO;
527 }
528
529 /* Has en error been detect then just return with an error */
530 if (atomic_read(&ant_service.error_count) != 0) {
531 SCSC_TAG_DEBUG(BT_H4, "error has occured\n");
532 atomic_dec(&ant_service.ant_writers);
533 return -EIO;
534 }
535
536 while (written != count && ret == 0) {
537 length = min(count - written, sizeof(ant_write_buffer) - ant_service.ant_write_offset);
538 SCSC_TAG_DEBUG(BT_H4, "count: %zu, length: %zu, ant_write_offset: %zu, written:%zu, size:%zu\n",
539 count, length, ant_service.ant_write_offset,
540 written - (pkt_count * 2), sizeof(ant_write_buffer));
541
542 /* Is there room in the temp buffer */
543 if (length == 0) {
544 SCSC_TAG_ERR(BT_H4, "no room in the buffer\n");
545 atomic_inc(&ant_service.error_count);
546 ret = -EIO;
547 break;
548 }
549
550 /* Copy the userspace data to the target buffer */
551 ret = copy_from_user(&ant_write_buffer[ant_service.ant_write_offset], &buf[written], length);
552
553 if (ret == 0) {
554 /* Is the message a data message? */
555 if (ant_write_buffer[0] == ANT_DATA_MSG) {
556 /* Extract the data packet length */
557 ant_pkt_len = ant_write_buffer[1] + ANT_HEADER_LENGTH + 1;
558
559 /* Is it a complete packet available */
560 if (ant_pkt_len <= (length + ant_service.ant_write_offset)) {
561 /* Transfer the packet to the ANT data transfer ring */
562 ret = scsc_shm_ant_data_write(&ant_write_buffer[2], ant_pkt_len - 2);
563 if (ret >= 0) {
564 written += (ant_pkt_len - ant_service.ant_write_offset);
565 pkt_count += 1;
566 ant_service.ant_write_offset = 0;
567 ret = 0;
568 }
569 } else {
570 /* Still needing data to have the complete packet */
571 SCSC_TAG_WARNING(BT_H4,
572 "missing data (need=%zu, got=%zu)\n",
573 ant_pkt_len, (length - ant_service.ant_write_offset));
574 written += length;
575 ant_service.ant_write_offset += (u32) length;
576 }
577 /* Is the message a command message? */
578 } else if (ant_write_buffer[0] == ANT_COMMAND_MSG) {
579 /* Extract the ANT command packet length */
580 ant_pkt_len = ant_write_buffer[1] + ANT_HEADER_LENGTH + 1;
581
582 /* Is it a complete packet available */
583 if ((ant_pkt_len) <= (length + ant_service.ant_write_offset)) {
584 /* Transfer the packet to the ANT command transfer ring */
585 ret = scsc_shm_ant_cmd_write(&ant_write_buffer[2], ant_pkt_len - 2);
586 if (ret >= 0) {
587 written += (ant_pkt_len - ant_service.ant_write_offset);
588 pkt_count += 1;
589 ant_service.ant_write_offset = 0;
590 ret = 0;
591 }
592 } else {
593 /* Still needing data to have the complete packet */
594 SCSC_TAG_WARNING(BT_H4,
595 "missing data (need=%zu, got=%zu)\n",
596 (ant_pkt_len), (length + ant_service.ant_write_offset));
597 written += length;
598 ant_service.ant_write_offset += (u32) length;
599 }
600 /* Is there less data than a header then just wait for more */
601 } else if (length <= ANT_HEADER_LENGTH) {
602 ant_service.ant_write_offset += length;
603 written += length;
604 /* Header is unknown - unable to proceed */
605 } else {
606 atomic_inc(&ant_service.error_count);
607 ret = -EIO;
608 }
609 } else {
610 SCSC_TAG_WARNING(BT_H4, "copy_from_user returned: %zu\n", ret);
611 ret = -EACCES;
612 }
613 }
614
615 SCSC_TAG_DEBUG(BT_H4, "ant_write_offset=%zu, ret=%zu, written=%zu\n",
616 ant_service.ant_write_offset, ret, written - (pkt_count * 2));
617
618 /* Decrease the ant readers counter */
619 atomic_dec(&ant_service.ant_writers);
620
621 return ret == 0 ? written : ret;
622 }
623
624 unsigned int scsc_shm_ant_poll(struct file *file, poll_table *wait)
625 {
626 /* Add the wait queue to the polling queue */
627 poll_wait(file, &ant_service.read_wait, wait);
628
629 if (atomic_read(&ant_service.error_count) != 0)
630 return POLLERR;
631
632 /* Has en error been detect then just return with an error */
633 if (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
634 ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
635 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
636 ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read) {
637 SCSC_TAG_DEBUG(BT_H4, "queue(s) changed\n");
638 return POLLIN | POLLRDNORM; /* readeable */
639 }
640
641 SCSC_TAG_DEBUG(BT_H4, "no change\n");
642
643 return (atomic_read(&ant_service.error_count) != 0) ? POLLERR : POLLOUT;
644 }
645
646 /* Initialise the shared memory interface for ANT */
647 int scsc_ant_shm_init(void)
648 {
649 /* Get kmem pointer to the shared memory ref */
650 ant_service.asmhcp_protocol = scsc_mx_service_mif_addr_to_ptr(ant_service.service, ant_service.asmhcp_ref);
651 if (ant_service.asmhcp_protocol == NULL) {
652 SCSC_TAG_ERR(BT_COMMON, "couldn't map kmem to shm_ref 0x%08x\n", (u32)ant_service.asmhcp_ref);
653 return -ENOMEM;
654 }
655
656 /* Clear the protocol shared memory area */
657 memset(ant_service.asmhcp_protocol, 0, sizeof(*ant_service.asmhcp_protocol));
658 ant_service.asmhcp_protocol->header.magic_value = ASMHCP_PROTOCOL_MAGICVALUE;
659 ant_service.mailbox_data_ctr_driv_read = 0;
660 ant_service.mailbox_data_ctr_driv_write = 0;
661 ant_service.mailbox_cmd_ctr_driv_read = 0;
662 ant_service.mailbox_cmd_ctr_driv_write = 0;
663 ant_service.read_index = 0;
664 ant_irq_mask = 0;
665
666 /* Initialise the interrupt handlers */
667 if (scsc_ant_shm_init_interrupt() < 0) {
668 SCSC_TAG_ERR(BT_COMMON, "Failed to register IRQ bits\n");
669 return -EIO;
670 }
671
672 return 0;
673 }
674
675 /* Terminate the shared memory interface for ANT, stopping its thread.
676 *
677 * Note: The service must be stopped prior to calling this function.
678 * The shared memory can only be released after calling this function.
679 */
680 void scsc_ant_shm_exit(void)
681 {
682 u16 irq_num = 0;
683
684 /* Release IRQs */
685 if (ant_service.asmhcp_protocol != NULL) {
686 if (ant_irq_mask & 1 << irq_num++) {
687 scsc_service_mifintrbit_unregister_tohost(
688 ant_service.service,
689 ant_service.asmhcp_protocol->header.bg_to_ap_int_src);
690 }
691
692 if (ant_irq_mask & 1 << irq_num++) {
693 scsc_service_mifintrbit_free_fromhost(
694 ant_service.service,
695 ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
696 SCSC_MIFINTR_TARGET_R4);
697 }
698 }
699
700 /* Clear all control structures */
701 ant_service.asmhcp_protocol = NULL;
702 }