#include <scsc/scsc_log_collector.h>
#include "scsc_log_collector_proc.h"
+#include "scsc_log_collector_mmap.h"
#include <scsc/scsc_mx.h>
#include "mxlogger.h"
#define SCSC_NUM_CHUNKS_SUPPORTED 12
+#define TO_RAM 0
+#define TO_FILE 1
/* Add-remove supported chunks on this kernel */
static u8 chunk_supported_sbl[SCSC_NUM_CHUNKS_SUPPORTED] = {
SCSC_LOG_CHUNK_SYNC,
};
/* Collect logs in an intermediate buffer to be collected at later time (mmap or wq) */
-static bool collect_to_ram;
+static bool collect_to_ram = true;
module_param(collect_to_ram, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(collect_to_ram, "Collect buffer in ram");
bool in_collection;
char fapi_ver[SCSC_LOG_FAPI_VERSION_SIZE];
+ unsigned char *buf;
struct workqueue_struct *collection_workq;
struct work_struct collect_work;
enum scsc_log_reason collect_reason;
pr_info("Sable Log Collection is now %sABLED.\n",
sable_collection_off ? "DIS" : "EN");
mxlogger_set_enabled_status(!sable_collection_off);
+
+ /* Create the buffer on the constructor */
+ log_status.buf = vzalloc(SCSC_LOG_COLLECT_MAX_SIZE);
+ if (IS_ERR_OR_NULL(log_status.buf)) {
+ pr_err("open allocating memmory err = %ld\n", PTR_ERR(log_status.buf));
+ log_status.buf = NULL;
+ }
+
scsc_log_collect_proc_create();
+ scsc_log_collector_mmap_create();
return 0;
}
void __exit scsc_log_collector_exit(void)
{
+ if (log_status.buf)
+ vfree(log_status.buf);
+
scsc_log_collect_proc_remove();
if (log_status.collection_workq) {
cancel_work_sync(&log_status.collect_work);
EXPORT_SYMBOL(scsc_log_collector_unregister_client);
+unsigned char *scsc_log_collector_get_buffer(void)
+{
+ return log_status.buf;
+}
+
static inline int __scsc_log_collector_write_to_ram(char __user *buf, size_t count, u8 align)
{
+ if (!log_status.in_collection || !log_status.buf)
+ return -EIO;
+
+ if (log_status.pos + count > SCSC_LOG_COLLECT_MAX_SIZE) {
+ pr_err("Write will exceed SCSC_LOG_COLLECT_MAX_SIZE. Abort write\n");
+ return -ENOMEM;
+ }
+
+ log_status.pos = (log_status.pos + align - 1) & ~(align - 1);
+ /* Write buf to RAM */
+ memcpy(log_status.buf + log_status.pos, buf, count);
+
+ log_status.pos += count;
+
return 0;
}
if (!log_status.in_collection)
return -EIO;
+ if (log_status.pos + count > SCSC_LOG_COLLECT_MAX_SIZE) {
+ pr_err("Write will exceed SCSC_LOG_COLLECT_MAX_SIZE. Abort write\n");
+ return -ENOMEM;
+ }
+
log_status.pos = (log_status.pos + align - 1) & ~(align - 1);
/* Write buf to file */
ret = vfs_write(log_status.fp, buf, count, &log_status.pos);
"scsc_log_wlan_trig", "scsc_log_bt_trig",
"scsc_log_common_trig"/* Add others */};
-static inline int __scsc_log_collector_collect_to_file(enum scsc_log_reason reason)
+static inline int __scsc_log_collector_collect(enum scsc_log_reason reason, u8 buffer)
{
struct scsc_log_client *lc, *next;
- struct timeval t;
- struct tm tm_n;
mm_segment_t old_fs;
char memdump_path[128];
int ret = 0;
struct scsc_log_sbl_header sbl_header;
struct scsc_log_chunk_header chk_header;
u8 j;
+ bool sbl_is_valid = false;
mutex_lock(&log_mutex);
- pr_info("Log collection to file triggered\n");
+ pr_info("Log collection triggered\n");
start = ktime_get();
- do_gettimeofday(&t);
- time_to_tm(t.tv_sec, 0, &tm_n);
- snprintf(memdump_path, sizeof(memdump_path), "%s/%s.sbl",
- collection_dir_buf, scsc_loc_reason_str[reason]);
+ if (buffer == TO_FILE) {
+ snprintf(memdump_path, sizeof(memdump_path), "%s/%s.sbl",
+ collection_dir_buf, scsc_loc_reason_str[reason]);
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
- log_status.fp = filp_open(memdump_path, O_CREAT | O_WRONLY | O_SYNC | O_TRUNC, 0664);
- if (IS_ERR(log_status.fp)) {
- pr_err("open file error, err = %ld\n", PTR_ERR(log_status.fp));
- goto exit;
+ log_status.fp = filp_open(memdump_path, O_CREAT | O_WRONLY | O_SYNC | O_TRUNC, 0664);
+ if (IS_ERR(log_status.fp)) {
+ pr_err("open file error, err = %ld\n", PTR_ERR(log_status.fp));
+ mutex_unlock(&log_mutex);
+ return PTR_ERR(log_status.fp);
+ }
+ } else if (!log_status.buf) {
+ pr_err("RAM buffer not created. Aborting dump\n");
+ mutex_unlock(&log_mutex);
+ return -ENOMEM;
}
log_status.in_collection = true;
scsc_log_collector_write((char *)&sbl_header, sizeof(struct scsc_log_sbl_header), 1);
- /* Sync file from filesystem to physical media */
- ret = vfs_fsync(log_status.fp, 0);
- if (ret < 0) {
- pr_err("sync file error, error = %d\n", ret);
- goto exit;
+ if (buffer == TO_FILE) {
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(log_status.fp, 0);
+ if (ret < 0) {
+ pr_err("sync file error, error = %d\n", ret);
+ goto exit;
+ }
}
+ sbl_is_valid = true;
exit:
- /* close file before return */
- if (!IS_ERR(log_status.fp))
- filp_close(log_status.fp, current->files);
+ if (buffer == TO_FILE) {
+ /* close file before return */
+ if (!IS_ERR(log_status.fp))
+ filp_close(log_status.fp, current->files);
- /* restore previous address limit */
- set_fs(old_fs);
+ /* restore previous address limit */
+ set_fs(old_fs);
+ }
log_status.in_collection = false;
lc->collect_client->collect_end(lc->collect_client);
}
- pr_info("File %s collection end. Took: %lld\n", memdump_path, ktime_to_ns(ktime_sub(ktime_get(), start)));
+ pr_info("Log collection end. Took: %lld\n", ktime_to_ns(ktime_sub(ktime_get(), start)));
#ifdef CONFIG_SCSC_WLBTD
- call_wlbtd_sable(scsc_loc_reason_str[reason]);
+ if (sbl_is_valid)
+ call_wlbtd_sable(scsc_loc_reason_str[reason]);
#endif
mutex_unlock(&log_mutex);
}
if (collect_to_ram)
- ret = __scsc_log_collector_collect_to_ram(reason);
+ ret = __scsc_log_collector_collect(reason, TO_RAM);
else
- ret = __scsc_log_collector_collect_to_file(reason);
+ ret = __scsc_log_collector_collect(reason, TO_FILE);
return ret;
}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <scsc/scsc_log_collector.h>
+#include "scsc_log_collector_mmap.h"
+
+#define DEVICE_NAME "scsc_log_collector"
+#define N_MINORS 1
+
+struct class *scsc_log_collector_class;
+struct cdev scsc_log_collector_dev[N_MINORS];
+dev_t dev_num;
+
+static int scsc_log_collector_mmap_open(struct inode *inode, struct file *filp)
+{
+ pr_info("scsc_log_collector_mmap_open\n");
+ return 0;
+}
+
+static int scsc_log_collector_release(struct inode *inode, struct file *filp)
+{
+ pr_info("scsc_log_collector_release\n");
+ return 0;
+}
+
+static int scsc_log_collector_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long page, pos;
+ unsigned char *buf;
+
+ if (size > SCSC_LOG_COLLECT_MAX_SIZE)
+ return -EINVAL;
+ if (offset > SCSC_LOG_COLLECT_MAX_SIZE - size)
+ return -EINVAL;
+
+ buf = scsc_log_collector_get_buffer();
+ if (!buf) {
+ pr_err("No buffer mapped\n");
+ return -ENOMEM;
+ }
+
+ pos = (unsigned long)buf + offset;
+
+ pr_info("scsc_log_collector_mmap size:%lu offset %ld\n", size, offset);
+
+ while (size > 0) {
+ page = vmalloc_to_pfn((void *)pos);
+ if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
+
+ return 0;
+}
+
+static const struct file_operations scsc_log_collector_mmap_fops = {
+ .owner = THIS_MODULE,
+ .open = scsc_log_collector_mmap_open,
+ .mmap = scsc_log_collector_mmap,
+ .release = scsc_log_collector_release,
+};
+
+int scsc_log_collector_mmap_create(void)
+{
+ struct device *dev;
+ int i;
+ int ret;
+ dev_t curr_dev;
+
+ /* Request the kernel for N_MINOR devices */
+ ret = alloc_chrdev_region(&dev_num, 0, N_MINORS, "scsc_log_collector");
+ if (ret) {
+ pr_err("alloc_chrdev_region failed");
+ goto error;
+ }
+
+ /* Create a class : appears at /sys/class */
+ scsc_log_collector_class = class_create(THIS_MODULE, "scsc_log_collector_class");
+ if (IS_ERR(scsc_log_collector_class)) {
+ ret = PTR_ERR(scsc_log_collector_class);
+ goto error_class;
+ }
+
+ /* Initialize and create each of the device(cdev) */
+ for (i = 0; i < N_MINORS; i++) {
+ /* Associate the cdev with a set of file_operations */
+ cdev_init(&scsc_log_collector_dev[i], &scsc_log_collector_mmap_fops);
+
+ ret = cdev_add(&scsc_log_collector_dev[i], dev_num, 1);
+ if (ret)
+ pr_err("cdev_add failed");
+
+ scsc_log_collector_dev[i].owner = THIS_MODULE;
+ /* Build up the current device number. To be used further */
+ dev = device_create(scsc_log_collector_class, NULL, dev_num, NULL, "scsc_log_collector_%d", i);
+ if (IS_ERR(dev)) {
+ pr_err("device_create failed");
+ ret = PTR_ERR(dev);
+ cdev_del(&scsc_log_collector_dev[i]);
+ continue;
+ }
+ curr_dev = MKDEV(MAJOR(dev_num), MINOR(dev_num) + i);
+ }
+
+ return 0;
+
+error_class:
+ unregister_chrdev_region(dev_num, N_MINORS);
+error:
+ return 0;
+}
+
+int scsc_log_collector_mmap_destroy(void)
+{
+ int i;
+
+ device_destroy(scsc_log_collector_class, dev_num);
+ for (i = 0; i < N_MINORS; i++)
+ cdev_del(&scsc_log_collector_dev[i]);
+ class_destroy(scsc_log_collector_class);
+ unregister_chrdev_region(dev_num, N_MINORS);
+ return 0;
+}