--- /dev/null
+source "drivers/misc/samsung/gpio_debug/Kconfig"
+source "drivers/misc/samsung/scsc/Kconfig"
+source "drivers/misc/samsung/scsc_bt/Kconfig"
+source "drivers/misc/samsung/kic/Kconfig"
--- /dev/null
+# Needed since this subdir is symlinked in the main Kernel tree
+# without this our samsung subdir is NOT cleaned.
+clean-files := built-in.o
+
+obj-$(CONFIG_GPIO_DEBUG) += gpio_debug/
+obj-$(CONFIG_SAMSUNG_KIC) += kic/
+obj-$(CONFIG_SCSC_CORE) += scsc/
+obj-$(CONFIG_SCSC_BT) += scsc_bt/
--- /dev/null
+config GPIO_DEBUG
+ tristate "GPIO debug functionality"
+
--- /dev/null
+obj-$(CONFIG_GPIO_DEBUG) += gpio_debug.o
--- /dev/null
+/* Copyright (c) 2014 Samsung Electronics Co., Ltd */
+
+#include <linux/gpio_debug.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+struct gpio_debug_data;
+
+struct gpio_debug_event {
+ int gpio;
+ int gpio_idx;
+ struct gpio_debug_event_def def;
+ struct gpio_debug_data *data;
+ struct dentry *file;
+};
+
+struct gpio_debug_data {
+ int gpio_count;
+ int *gpios;
+ struct dentry *gpio_debug_dir;
+ struct dentry *gpio_debug_events_dir;
+ struct platform_device *pdev;
+ struct gpio_debug_event *events;
+ int event_count;
+ int event_base;
+};
+
+static struct gpio_debug_data debug_data;
+
+DEFINE_MUTEX(debug_lock);
+
+enum {
+ GPIO_DEBUG_TOGGLE_100,
+ GPIO_DEBUG_TOGGLE_200,
+};
+
+static struct gpio_debug_event_def debug_events_table[] = {
+ [GPIO_DEBUG_TOGGLE_100] = {
+ .name = "toggle100",
+ .description = "Toggle the GPIO 100 times at initialisation",
+ },
+ [GPIO_DEBUG_TOGGLE_200] = {
+ .name = "toggle200",
+ .description = "Toggle the GPIO 200 times at initialisation",
+ },
+};
+
+static void gpio_debug_event(int gpio, int state)
+{
+ if (gpio >= 0)
+ gpio_set_value(gpio, state);
+}
+
+static void gpio_debug_event_exec(int event_id, int state)
+{
+ if ((event_id >= 0) && (event_id < debug_data.event_count) && debug_data.events)
+ gpio_debug_event(debug_data.events[event_id].gpio, state);
+}
+
+void gpio_debug_event_enter(int base, int id)
+{
+ gpio_debug_event_exec(base + id, 0);
+}
+
+void gpio_debug_event_exit(int base, int id)
+{
+ gpio_debug_event_exec(base + id, 1);
+}
+
+int gpio_debug_event_enabled(int base, int id)
+{
+ int event_id = base + id;
+
+ if ((event_id >= 0) &&
+ (event_id < debug_data.event_count) &&
+ debug_data.events &&
+ debug_data.events[event_id].gpio >= 0)
+ return 1;
+ else
+ return 0;
+}
+
+static int gpio_debug_event_link(struct gpio_debug_event *event, int gpio_index)
+{
+ struct gpio_debug_data *data = event->data;
+
+ if (gpio_index >= data->gpio_count)
+ return -ERANGE;
+
+ if (gpio_index >= 0)
+ event->gpio = data->gpios[gpio_index];
+ else
+ event->gpio = -1;
+
+ event->gpio_idx = gpio_index;
+
+ return 0;
+}
+
+static ssize_t event_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpio_debug_event *event = file->f_inode->i_private;
+ char buf[256];
+ int pos;
+
+ mutex_lock(&debug_lock);
+
+ pos = snprintf(buf, sizeof(buf), "Description:\n%s\n\nEvent is mapped to GPIO index %d with number %d\n", event->def.description, event->gpio_idx, event->gpio);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ mutex_unlock(&debug_lock);
+
+ return ret;
+}
+
+static ssize_t event_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *user_string;
+ ssize_t ret;
+ struct gpio_debug_event *event = file->f_inode->i_private;
+ int new_index = -1;
+
+ mutex_lock(&debug_lock);
+
+ user_string = kmalloc(count + 1, GFP_KERNEL);
+ memory_read_from_buffer(user_string, count, ppos, user_buf, count);
+
+ user_string[count] = '\0';
+ ret = (ssize_t)strnlen(user_string, count + 1);
+
+ if (kstrtou32(user_string, 10, &new_index)) {
+ return -EINVAL;
+
+ gpio_debug_event_link(event, new_index);
+
+ kfree(user_string);
+ mutex_unlock(&debug_lock);
+
+ return ret;
+}
+
+static const struct file_operations event_ops = {
+ .read = event_read,
+ .write = event_write,
+};
+
+static void create_event_file(struct gpio_debug_event *event)
+{
+ struct gpio_debug_data *data = event->data;
+
+ if (data && data->gpio_debug_events_dir) {
+ event->file = debugfs_create_file(event->def.name, 0660, data->gpio_debug_events_dir, event, &event_ops);
+ if (IS_ERR_OR_NULL(event->file)) {
+ event->file = NULL;
+ pr_warn("%s: Could not create debugfs file for %s\n", __func__, event->def.name);
+ }
+ }
+}
+
+static void remove_event_file(struct gpio_debug_event *event)
+{
+ if (event && event->file) {
+ debugfs_remove(event->file);
+ event->file = NULL;
+ }
+}
+
+static void gpio_debug_init_event(struct gpio_debug_data *data, struct gpio_debug_event_def *event, struct gpio_debug_event *event_save)
+{
+ event_save->def.description = event->description;
+ event_save->def.name = event->name;
+ event_save->gpio = -1;
+ event_save->gpio_idx = -1;
+ event_save->data = data;
+
+ create_event_file(event_save);
+}
+
+static void gpio_debug_destroy_event(struct gpio_debug_event *event)
+{
+ remove_event_file(event);
+ event->def.description = NULL;
+ event->def.name = NULL;
+ event->gpio = -1;
+ event->gpio_idx = -1;
+ event->data = NULL;
+}
+
+int gpio_debug_event_list_register(struct gpio_debug_event_def *events, int event_count)
+{
+ struct gpio_debug_data *data = &debug_data;
+ int start_index = data->event_count;
+ struct gpio_debug_event *new_events;
+ int new_event_count = data->event_count + event_count;
+ int i, j;
+
+ mutex_lock(&debug_lock);
+
+ if (data->events)
+ for (i = 0; i < data->event_count; i++)
+ remove_event_file(&data->events[i]);
+
+ new_events = krealloc(data->events, new_event_count * sizeof(struct gpio_debug_event), GFP_KERNEL);
+ if (!new_events) {
+ pr_warn("%s: Could not expand for extra events\n", __func__);
+ /* If krealloc fails, data->events is unchanged, so just exit */
+ return -ENOMEM;
+ }
+ data->events = new_events;
+ for (i = 0; i < data->event_count; i++)
+ create_event_file(&data->events[i]);
+
+ data->event_count = new_event_count;
+
+ for (i = 0, j = start_index; (i < event_count) && (j < data->event_count); i++, j++)
+ gpio_debug_init_event(data, &events[i], &data->events[j]);
+
+ mutex_unlock(&debug_lock);
+ return start_index;
+}
+
+void gpio_debug_event_list_unregister(int base, int event_count)
+{
+ int i;
+ struct gpio_debug_data *data = &debug_data;
+
+ mutex_lock(&debug_lock);
+
+ for (i = base; (i < (event_count + base)) && (i < data->event_count); i++)
+ gpio_debug_destroy_event(&data->events[i]);
+
+ mutex_unlock(&debug_lock);
+}
+
+static ssize_t event_list_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ int i;
+ ssize_t ret = 0;
+ int length = 0;
+ char *buf;
+ struct gpio_debug_data *data = file->f_inode->i_private;
+ struct device *dev = &data->pdev->dev;
+ char headline[] = " gpio event\n";
+
+ mutex_lock(&debug_lock);
+
+ length += strlen(headline);
+
+ for (i = 0; i < data->event_count; i++)
+ if (data->events[i].def.name)
+ length += strlen(data->events[i].def.name) + 7;
+ length++; /* Reserve space for NULL termination */
+
+ buf = devm_kzalloc(dev, length, GFP_KERNEL);
+ buf[0] = '\0';
+ snprintf(buf, length, "%s", headline);
+ for (i = 0; i < data->event_count; i++)
+ if (data->events[i].data) {
+ if (data->events[i].gpio_idx >= 0)
+ snprintf(buf, length, "%s%5d %s\n", buf, data->events[i].gpio_idx, data->events[i].def.name);
+ else
+ snprintf(buf, length, "%s %s\n", buf, data->events[i].def.name);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, length);
+ devm_kfree(dev, buf);
+
+ mutex_unlock(&debug_lock);
+
+ return ret;
+}
+
+static const struct file_operations event_list_ops = {
+ .read = event_list_read,
+};
+
+static ssize_t num_gpios_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ ssize_t ret = 0;
+ struct gpio_debug_data *data = file->f_inode->i_private;
+ char buf[256];
+ int pos;
+
+ pos = snprintf(buf, sizeof(buf), "%d\n", data->gpio_count);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static const struct file_operations num_gpios_ops = {
+ .read = num_gpios_read,
+};
+
+static int gpio_debug_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int count;
+ struct gpio_debug_data *data = &debug_data;
+ int i, j;
+
+ mutex_lock(&debug_lock);
+
+ count = of_gpio_count(np);
+ if (count < 0)
+ count = 0; /* Errors register as no GPIOs available */
+
+ data->gpio_count = count;
+ data->gpios = NULL;
+ data->pdev = pdev;
+
+ if (count) {
+ data->gpios = devm_kzalloc(dev, count * sizeof(int), GFP_KERNEL);
+ for (i = 0; i < count; i++) {
+ data->gpios[i] = of_get_gpio(np, i);
+ dev_info(dev, "GPIO at index %d has number %d\n", i, data->gpios[i]);
+ if (gpio_is_valid(data->gpios[i])) {
+ char label[256];
+
+ sprintf(label, "debug-gpio-%d", i);
+ dev_info(dev, "Requesting GPIO %d index %d with label %s\n", data->gpios[i], i, label);
+ if (devm_gpio_request(dev, data->gpios[i], label))
+ dev_err(dev, "GPIO [%d] request failed\n", data->gpios[i]);
+ gpio_set_value(data->gpios[i], 1);
+ } else
+ dev_warn(dev, "GPIO at index %d is invalid\n", i);
+ }
+ }
+
+ data->gpio_debug_dir = debugfs_create_dir("gpio_debug", NULL);
+ if (!IS_ERR_OR_NULL(data->gpio_debug_dir)) {
+ data->gpio_debug_events_dir = debugfs_create_dir("events", data->gpio_debug_dir);
+ if (IS_ERR_OR_NULL(data->gpio_debug_events_dir)) {
+ data->gpio_debug_events_dir = NULL;
+ dev_err(dev, "Debugfs cannot create subdir\n");
+ }
+ debugfs_create_file("event_list", 0440, data->gpio_debug_dir, data, &event_list_ops);
+ debugfs_create_file("num_gpios", 0440, data->gpio_debug_dir, data, &num_gpios_ops);
+ } else {
+ data->gpio_debug_dir = NULL;
+ dev_warn(dev, "Debugfs is not available, configuration of GPIO debug is not possible\n");
+ }
+
+ for (i = 0; i < data->event_count; i++)
+ create_event_file(&data->events[i]);
+
+ mutex_unlock(&debug_lock);
+
+ data->event_base = gpio_debug_event_list_register(debug_events_table, ARRAY_SIZE(debug_events_table));
+
+ for (i = 0; i < count; i++) {
+ gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_100], i);
+ for (j = 0; j < 100; j++) {
+ gpio_debug_event_enter(data->event_base, GPIO_DEBUG_TOGGLE_100);
+ gpio_debug_event_exit(data->event_base, GPIO_DEBUG_TOGGLE_100);
+ }
+ }
+ gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_100], -1);
+
+ for (i = 0; i < count; i++) {
+ gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_200], i);
+ for (j = 0; j < 200; j++) {
+ gpio_debug_event_enter(data->event_base, GPIO_DEBUG_TOGGLE_200);
+ gpio_debug_event_exit(data->event_base, GPIO_DEBUG_TOGGLE_200);
+ }
+ }
+ gpio_debug_event_link(&data->events[data->event_base + GPIO_DEBUG_TOGGLE_200], -1);
+
+ return 0;
+}
+
+static int gpio_debug_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_debug_data *data = &debug_data;
+
+ mutex_lock(&debug_lock);
+ debugfs_remove_recursive(data->gpio_debug_dir);
+ data->gpio_debug_dir = NULL;
+ data->gpio_debug_events_dir = NULL;
+
+ if (data->gpios) {
+ int i;
+
+ for (i = 0; i < data->gpio_count; i++)
+ if (gpio_is_valid(data->gpios[i]))
+ devm_gpio_free(dev, data->gpios[i]);
+ devm_kfree(dev, data->gpios);
+ data->gpios = NULL;
+ data->gpio_count = 0;
+ }
+ data->pdev = NULL;
+ kfree(data->events);
+ data->events = NULL;
+ data->event_count = 0;
+ mutex_unlock(&debug_lock);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_debug_match[] = {
+ { .compatible = "samsung,gpio-debug", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_debug_match);
+
+static struct platform_driver gpio_debug_driver = {
+ .probe = gpio_debug_probe,
+ .remove = gpio_debug_remove,
+ .driver = {
+ .name = "gpio_debug",
+ .of_match_table = gpio_debug_match,
+ }
+};
+module_platform_driver(gpio_debug_driver);
+
+MODULE_DESCRIPTION("GPIO Debug framework");
+MODULE_AUTHOR("Samsung Electronics Co., Ltd");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+config SAMSUNG_KIC
+ tristate "Kernel Information and Control (KIC) interface for Samsung Wi-Fi and Bluetooth chips"
+
+if SAMSUNG_KIC != n
+
+config SLSI_KIC_API_ENABLED
+ bool "Enable the KIC kernel API"
+ depends on SAMSUNG_KIC
+endif
--- /dev/null
+obj-$(CONFIG_SAMSUNG_KIC) += samsung_kic.o
+samsung_kic-y += \
+ slsi_kic_core.o \
+ slsi_kic_filtering.o \
+ slsi_kic_sap_wifi.o \
+ slsi_kic_sap_cm.o \
+ slsi_kic_sap_bt.o \
+ slsi_kic_sap_ant.o
+
+ccflags-y += $(CONFIG_SAMSUNG_KIC_EXTRA)
+
+# ----------------------------------------------------------------------------
+# KIC configuration
+# ----------------------------------------------------------------------------
+
+ccflags-$(CONFIG_SLSI_KIC_API_ENABLED) += -DCONFIG_SLSI_KIC_API_ENABLED
+
+ccflags-$(CONFIG_SCSC_LOGRING) += -DCONFIG_SCSC_PRINTK
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include "slsi_kic_internal.h"
+
+static DEFINE_MUTEX(kic_lock);
+static struct slsi_kic_pdata *pdata;
+
+#define SLSI_MAX_NUM_KIC_OPS 7
+#define SLSI_MAX_NUM_MULTICAST_GROUP 1
+
+static struct genl_ops slsi_kic_ops[SLSI_MAX_NUM_KIC_OPS];
+
+static int slsi_kic_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ SCSC_TAG_ERR(KIC_COMMON, "%s Handle CMD %d, seq %d\n",
+ __func__, ops->cmd, info->snd_seq);
+
+ OS_UNUSED_PARAMETER(skb);
+
+ /* Called BEFORE the command cb - do filtering here */
+
+ /* Consider doing some check for "test_mode" primitives here:
+ * It could be a way to prevent test primitives (which can be
+ * powerful) to run unless test_mode has been configured. */
+
+ return 0;
+}
+
+static void slsi_kic_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ OS_UNUSED_PARAMETER(ops);
+ OS_UNUSED_PARAMETER(skb);
+ OS_UNUSED_PARAMETER(info);
+
+ /* Called AFTER the command cb - could do something here */
+}
+
+static const struct genl_multicast_group slsi_kic_general_system_mcgrp[SLSI_MAX_NUM_MULTICAST_GROUP] = {
+ { .name = "general_system", },
+};
+
+
+/* The netlink family */
+static struct genl_family slsi_kic_fam = {
+ .name = "slsi_kic", /* Have users key off the name instead */
+ .hdrsize = 0, /* No private header */
+ .version = 2,
+ .netnsok = true,
+ .maxattr = SLSI_KIC_ATTR_MAX,
+ .pre_doit = slsi_kic_pre_doit,
+ .post_doit = slsi_kic_post_doit,
+ .ops = slsi_kic_ops,
+ .n_ops = SLSI_MAX_NUM_KIC_OPS,
+ .mcgrps = slsi_kic_general_system_mcgrp,
+ .n_mcgrps = SLSI_MAX_NUM_MULTICAST_GROUP,
+};
+
+/**
+ * Message building helpers
+ */
+static inline void *kic_hdr_put(struct sk_buff *skb, uint32_t portid, uint32_t seq,
+ int flags, u8 cmd)
+{
+ /* Since there is no private header just add the generic one */
+ return genlmsg_put(skb, portid, seq, &slsi_kic_fam, flags, cmd);
+}
+
+static int kic_build_u32_msg(struct sk_buff *msg, uint32_t portid, uint32_t seq, int flags,
+ enum slsi_kic_commands cmd, int attrtype, uint32_t payload)
+{
+ void *hdr;
+
+ hdr = kic_hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr)
+ return -EFAULT;
+
+ if (nla_put_u32(msg, attrtype, payload))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int kic_add_timestamp_attrs(struct sk_buff *msg)
+{
+ struct timespec ts;
+
+ /**
+ * Use getrawmonotonic instead of getnstimeofday to avoid problems with
+ * NTP updating things, which can make things look weird.
+ */
+ getrawmonotonic(&ts);
+
+ if (nla_put_u64_64bit(msg, SLSI_KIC_ATTR_TIMESTAMP_TV_SEC, ts.tv_sec, IFLA_BR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, SLSI_KIC_ATTR_TIMESTAMP_TV_NSEC, ts.tv_nsec, IFLA_BR_PAD))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int kic_build_system_event_msg(struct sk_buff *msg, uint32_t portid,
+ uint32_t seq, int flags,
+ uint32_t event_cat, uint32_t event)
+{
+ void *hdr;
+ struct nlattr *nla;
+
+ hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_SYSTEM_EVENT_IND);
+ if (!hdr)
+ return -EFAULT;
+
+ nla = nla_nest_start(msg, SLSI_KIC_ATTR_TIMESTAMP);
+ if (kic_add_timestamp_attrs(msg) < 0)
+ nla_nest_cancel(msg, nla);
+ else
+ nla_nest_end(msg, nla);
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_SYSTEM_EVENT_CATEGORY, event_cat))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_SYSTEM_EVENT, event))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+
+static int kic_build_firmware_event_msg(struct sk_buff *msg, uint32_t portid,
+ uint32_t seq, int flags,
+ uint16_t firmware_event_type,
+ enum slsi_kic_technology_type tech_type,
+ uint32_t contain_type,
+ struct slsi_kic_firmware_event_ccp_host *event)
+{
+ void *hdr;
+ struct nlattr *nla;
+
+ hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_FIRMWARE_EVENT_IND);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return -EFAULT;
+ }
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_TYPE, firmware_event_type))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_TECHNOLOGY_TYPE, tech_type))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_CONTAINER_TYPE, contain_type))
+ goto nla_put_failure;
+
+ nla = nla_nest_start(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CONTAINER_CCP_HOST);
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ID, event->id))
+ goto nla_put_failure_cancel;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL, event->level))
+ goto nla_put_failure_cancel;
+
+ if (nla_put_string(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL_STRING, event->level_string))
+ goto nla_put_failure_cancel;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_TIMESTAMP, event->timestamp))
+ goto nla_put_failure_cancel;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_CPU, event->cpu))
+ goto nla_put_failure_cancel;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_OCCURENCES, event->occurences))
+ goto nla_put_failure_cancel;
+
+ if (nla_put(msg, SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ARG, event->arg_length, event->arg))
+ goto nla_put_failure_cancel;
+ nla_nest_end(msg, nla);
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure_cancel:
+ nla_nest_cancel(msg, nla);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+
+static int kic_build_service_info_msg_add_service(struct sk_buff *msg,
+ enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ struct nlattr *nla = NULL;
+
+ if (!msg || !info)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_TECHNOLOGY_TYPE, tech))
+ goto nla_put_failure;
+
+ nla = nla_nest_start(msg, SLSI_KIC_ATTR_SERVICE_INFO);
+ if (nla_put_string(msg, SLSI_KIC_ATTR_SERVICE_INFO_VER_STR, info->ver_str))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MAJOR, info->fw_api_major))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MINOR, info->fw_api_minor))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_RELEASE_PRODUCT, info->release_product))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_ITERATION, info->host_release_iteration))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_CANDIDATE, info->host_release_candidate))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nla);
+
+ return 0;
+
+nla_put_failure:
+ if (nla)
+ nla_nest_cancel(msg, nla);
+
+ return -EMSGSIZE;
+}
+
+static int kic_build_service_info_msg(struct sk_buff *msg, uint32_t portid,
+ uint32_t seq, int flags,
+ enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ void *hdr;
+
+ hdr = kic_hdr_put(msg, portid, seq, flags, SLSI_KIC_CMD_SERVICE_INFORMATION_IND);
+ if (!hdr)
+ return -EFAULT;
+
+ if (kic_build_service_info_msg_add_service(msg, tech, info) < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+
+static int get_snd_pid(struct genl_info *info)
+{
+ uint32_t snd_pid = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
+ snd_pid = info->snd_pid;
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 6, 0)
+ snd_pid = info->snd_portid;
+#endif
+
+ return snd_pid;
+}
+
+struct slsi_kic_pdata *slsi_kic_core_get_context(void)
+{
+ return pdata;
+}
+
+/**
+ * Set the record to NULL to free and delete all stored records.
+ */
+static int service_info_delete_record(struct slsi_kic_service_details *record)
+{
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
+ SCSC_TAG_ERR(KIC_COMMON, "Failed to lock service info mutex - continue anyway\n");
+
+ if (record == NULL) {
+ struct slsi_kic_service_details *service, *tmp_node;
+
+ list_for_each_entry_safe(service, tmp_node, &pdata->chip_details.proxy_service_list, proxy_q) {
+ list_del(&service->proxy_q);
+ kfree(service);
+ }
+ } else {
+ list_del(&record->proxy_q);
+ kfree(record);
+ }
+ up(&pdata->chip_details.proxy_service_list_mutex);
+
+ return 0;
+}
+
+static struct slsi_kic_service_details *
+service_info_find_entry(enum slsi_kic_technology_type tech)
+{
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+ struct slsi_kic_service_details *service, *tmp_node;
+
+ if (!pdata)
+ return NULL;
+
+ list_for_each_entry_safe(service, tmp_node, &pdata->chip_details.proxy_service_list, proxy_q) {
+ if (service->tech == tech)
+ return service;
+ }
+
+ return NULL;
+}
+
+static int service_info_update_record(enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+ static struct slsi_kic_service_details *record;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
+ goto err_out;
+
+ record = service_info_find_entry(tech);
+ if (record == NULL) {
+ up(&pdata->chip_details.proxy_service_list_mutex);
+ goto err_out;
+ }
+
+ record->tech = tech;
+ memcpy(&record->info, info, sizeof(struct slsi_kic_service_info));
+ up(&pdata->chip_details.proxy_service_list_mutex);
+
+ return 0;
+
+err_out:
+ SCSC_TAG_ERR(KIC_COMMON, "Failed to update service info record\n");
+ return -EFAULT;
+}
+
+static int service_info_add(enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ struct slsi_kic_service_details *new_entry;
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+
+ if (!pdata)
+ return -EINVAL;
+
+ new_entry = kmalloc(sizeof(struct slsi_kic_service_details), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+
+ new_entry->tech = tech;
+ memcpy(&new_entry->info, info, sizeof(struct slsi_kic_service_info));
+
+ if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
+ goto err_out;
+
+ list_add_tail(&new_entry->proxy_q, &pdata->chip_details.proxy_service_list);
+ up(&pdata->chip_details.proxy_service_list_mutex);
+
+ return 0;
+
+err_out:
+ SCSC_TAG_ERR(KIC_COMMON, "Failed to add service info record to list\n");
+ kfree(new_entry);
+ return -EFAULT;
+}
+
+
+/**
+ * Command callbacks
+ */
+
+/* This function shall not do anything since the direction is
+ * kernel->user space for this primitive. We should look into if it's
+ * possible to handle this better than having an empty stub function. */
+static int slsi_kic_wrong_direction(struct sk_buff *skb, struct genl_info *info)
+{
+ OS_UNUSED_PARAMETER(skb);
+
+ SCSC_TAG_ERR(KIC_COMMON, "%s Received CMD from pid %u seq %u: Wrong direction only supports kernel->user space\n",
+ __func__, info->snd_seq, get_snd_pid(info));
+ return -EINVAL;
+}
+
+static int slsi_kic_interface_version_number_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ OS_UNUSED_PARAMETER(skb);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = kic_hdr_put(msg, 0, info->snd_seq, 0, SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ);
+ if (!hdr)
+ goto nl_hdr_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_KIC_VERSION_MAJOR, SLSI_KIC_INTERFACE_VERSION_MAJOR))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_KIC_VERSION_MINOR, SLSI_KIC_INTERFACE_VERSION_MINOR))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+
+nl_hdr_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+static int slsi_kic_echo_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ uint32_t payload = 0;
+
+ OS_UNUSED_PARAMETER(skb);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (info->attrs[SLSI_KIC_ATTR_ECHO])
+ payload = nla_get_u32(info->attrs[SLSI_KIC_ATTR_ECHO]);
+
+ if (kic_build_u32_msg(msg, get_snd_pid(info), info->snd_seq, 0,
+ SLSI_KIC_CMD_ECHO_REQ, SLSI_KIC_ATTR_ECHO, payload) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int slsi_kic_service_information_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+ int counter = 0, i;
+ struct sk_buff *msg;
+ struct slsi_kic_service_details *sr;
+ void *hdr;
+
+ OS_UNUSED_PARAMETER(skb);
+
+ if (!pdata)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = kic_hdr_put(msg, 0, info->snd_seq, 0, SLSI_KIC_CMD_SERVICE_INFORMATION_REQ);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (down_interruptible(&pdata->chip_details.proxy_service_list_mutex))
+ goto nla_put_failure;
+
+ /* The request doesn't carry attributes, so no validation required.
+ * Query the list for information for each technology and encode. */
+ for (i = 0; i < slsi_kic_technology_type__after_last; i++) {
+ sr = service_info_find_entry(i);
+ if (sr) {
+ counter++;
+ if (kic_build_service_info_msg_add_service(msg, i, &sr->info) < 0) {
+ up(&pdata->chip_details.proxy_service_list_mutex);
+ goto nla_put_failure;
+ }
+ }
+ }
+ up(&pdata->chip_details.proxy_service_list_mutex);
+
+ if (nla_put_u32(msg, SLSI_KIC_ATTR_NUMBER_OF_ENCODED_SERVICES, counter))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+static int slsi_kic_test_trigger_recovery_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ uint32_t technology = 0, recovery_type = 0;
+ struct slsi_kic_pdata *pdata = slsi_kic_core_get_context();
+ enum slsi_kic_test_recovery_status status = slsi_kic_test_recovery_status_ok;
+
+ OS_UNUSED_PARAMETER(skb);
+
+ if (info->attrs[SLSI_KIC_ATTR_TECHNOLOGY_TYPE])
+ technology = nla_get_u32(info->attrs[SLSI_KIC_ATTR_TECHNOLOGY_TYPE]);
+
+ if (info->attrs[SLSI_KIC_ATTR_TEST_RECOVERY_TYPE])
+ recovery_type = nla_get_u32(info->attrs[SLSI_KIC_ATTR_TEST_RECOVERY_TYPE]);
+
+ if (pdata) {
+ int err = -EFAULT;
+
+ if (technology == slsi_kic_technology_type_wifi) {
+ struct slsi_kic_wifi_ops_tuple *wifi_ops = NULL;
+
+ wifi_ops = &pdata->wifi_ops_tuple;
+
+ mutex_lock(&wifi_ops->ops_mutex);
+ if (wifi_ops->wifi_ops.trigger_recovery)
+ err = wifi_ops->wifi_ops.trigger_recovery(wifi_ops->priv,
+ (enum slsi_kic_test_recovery_type)recovery_type);
+ mutex_unlock(&wifi_ops->ops_mutex);
+ } else if (technology == slsi_kic_technology_type_curator) {
+ struct slsi_kic_cm_ops_tuple *cm_ops = NULL;
+
+ cm_ops = &pdata->cm_ops_tuple;
+
+ mutex_lock(&cm_ops->ops_mutex);
+ if (cm_ops->cm_ops.trigger_recovery)
+ err = cm_ops->cm_ops.trigger_recovery(cm_ops->priv,
+ (enum slsi_kic_test_recovery_type)recovery_type);
+ mutex_unlock(&cm_ops->ops_mutex);
+ } else if (technology == slsi_kic_technology_type_bt) {
+ struct slsi_kic_bt_ops_tuple *bt_ops = NULL;
+
+ bt_ops = &pdata->bt_ops_tuple;
+
+ mutex_lock(&bt_ops->ops_mutex);
+ if (bt_ops->bt_ops.trigger_recovery)
+ err = bt_ops->bt_ops.trigger_recovery(bt_ops->priv,
+ (enum slsi_kic_test_recovery_type)recovery_type);
+ mutex_unlock(&bt_ops->ops_mutex);
+ } else if (technology == slsi_kic_technology_type_ant) {
+ struct slsi_kic_ant_ops_tuple *ant_ops = NULL;
+
+ ant_ops = &pdata->ant_ops_tuple;
+
+ mutex_lock(&ant_ops->ops_mutex);
+ if (ant_ops->ant_ops.trigger_recovery)
+ err = ant_ops->ant_ops.trigger_recovery(ant_ops->priv,
+ (enum slsi_kic_test_recovery_type)recovery_type);
+ mutex_unlock(&ant_ops->ops_mutex);
+ }
+
+ if (err < 0)
+ status = slsi_kic_test_recovery_status_error_send_msg;
+ } else
+ status = slsi_kic_test_recovery_status_error_invald_param;
+
+ /* Prepare reply */
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (kic_build_u32_msg(msg, get_snd_pid(info), info->snd_seq, 0,
+ SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ, SLSI_KIC_ATTR_TRIGGER_RECOVERY_STATUS, status) < 0)
+ goto nl_hdr_failure;
+
+ return genlmsg_reply(msg, info);
+
+nl_hdr_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+
+int slsi_kic_service_information_ind(enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (service_info_find_entry(tech) == NULL) {
+ if (service_info_add(tech, info) < 0)
+ SCSC_TAG_ERR(KIC_COMMON, "%s Failed to add record\n", __func__);
+ } else if (service_info_update_record(tech, info) < 0)
+ SCSC_TAG_ERR(KIC_COMMON, "%s Failed to update record\n", __func__);
+
+ if (kic_build_service_info_msg(msg, 0, 0, 0, tech, info) < 0)
+ goto err;
+
+ return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, GFP_KERNEL);
+
+err:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(slsi_kic_service_information_ind);
+
+
+int slsi_kic_system_event_ind(enum slsi_kic_system_event_category event_cat,
+ enum slsi_kic_system_events event, gfp_t flags)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags);
+ if (!msg)
+ return -ENOMEM;
+
+ if (kic_build_system_event_msg(msg, 0, 0, 0, event_cat, event) < 0)
+ goto err;
+
+ return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, flags);
+
+err:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(slsi_kic_system_event_ind);
+
+
+int slsi_kic_firmware_event_ind(uint16_t firmware_event_type, enum slsi_kic_technology_type tech_type,
+ uint32_t contain_type, struct slsi_kic_firmware_event_ccp_host *event)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (kic_build_firmware_event_msg(msg, 0, 0, 0, firmware_event_type, tech_type, contain_type, event) < 0)
+ return -ENOBUFS;
+
+ return genlmsg_multicast(&slsi_kic_fam, msg, 0, 0, GFP_KERNEL);
+}
+EXPORT_SYMBOL(slsi_kic_firmware_event_ind);
+
+
+static struct genl_ops slsi_kic_ops[SLSI_MAX_NUM_KIC_OPS] = {
+ {
+ .cmd = SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ,
+ .doit = slsi_kic_interface_version_number_req,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_SYSTEM_EVENT_IND,
+ .doit = slsi_kic_wrong_direction,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_SERVICE_INFORMATION_REQ,
+ .doit = slsi_kic_service_information_req,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_SERVICE_INFORMATION_IND,
+ .doit = slsi_kic_wrong_direction,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_FIRMWARE_EVENT_IND,
+ .doit = slsi_kic_wrong_direction,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_ECHO_REQ,
+ .doit = slsi_kic_echo_req,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ,
+ .doit = slsi_kic_test_trigger_recovery_req,
+ .policy = slsi_kic_attr_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static int __init slsi_kic_init(void)
+{
+ int err;
+
+ SCSC_TAG_DEBUG(KIC_COMMON, "%s Enter\n", __func__);
+
+ mutex_lock(&kic_lock);
+
+ pdata = kzalloc(sizeof(struct slsi_kic_pdata), GFP_KERNEL);
+ if (!pdata) {
+ SCSC_TAG_ERR(KIC_COMMON, "%s Exit - no mem\n", __func__);
+ mutex_unlock(&kic_lock);
+ return -ENOMEM;
+ }
+
+ mutex_init(&pdata->wifi_ops_tuple.ops_mutex);
+ mutex_init(&pdata->cm_ops_tuple.ops_mutex);
+ mutex_init(&pdata->bt_ops_tuple.ops_mutex);
+ mutex_init(&pdata->ant_ops_tuple.ops_mutex);
+
+ /* Init chip information proxy list */
+ INIT_LIST_HEAD(&pdata->chip_details.proxy_service_list);
+ sema_init(&pdata->chip_details.proxy_service_list_mutex, 1);
+ pdata->state = idle;
+
+ err = genl_register_family(&slsi_kic_fam);
+ if (err != 0)
+ goto err_out;
+
+ mutex_unlock(&kic_lock);
+ SCSC_TAG_DEBUG(KIC_COMMON, "%s Exit\n", __func__);
+ return 0;
+
+err_out:
+ genl_unregister_family(&slsi_kic_fam);
+ mutex_unlock(&kic_lock);
+ SCSC_TAG_ERR(KIC_COMMON, "%s Exit - err %d\n", __func__, err);
+ return err;
+}
+
+static void __exit slsi_kic_exit(void)
+{
+ int err;
+
+ SCSC_TAG_DEBUG(KIC_COMMON, "%s Enter\n", __func__);
+
+ BUG_ON(!pdata);
+ if (!pdata) {
+ SCSC_TAG_ERR(KIC_COMMON, "%s Exit - invalid pdata\n", __func__);
+ return;
+ }
+
+ mutex_lock(&kic_lock);
+ err = genl_unregister_family(&slsi_kic_fam);
+ if (err < 0)
+ SCSC_TAG_ERR(KIC_COMMON, "%s Failed to unregister family\n", __func__);
+
+ if (service_info_delete_record(NULL) < 0)
+ SCSC_TAG_ERR(KIC_COMMON, "%s Deleting service info liste failed\n", __func__);
+
+ mutex_destroy(&pdata->wifi_ops_tuple.ops_mutex);
+ mutex_destroy(&pdata->cm_ops_tuple.ops_mutex);
+ mutex_destroy(&pdata->bt_ops_tuple.ops_mutex);
+ mutex_destroy(&pdata->ant_ops_tuple.ops_mutex);
+
+ kfree(pdata);
+ pdata = NULL;
+ mutex_unlock(&kic_lock);
+
+ SCSC_TAG_DEBUG(KIC_COMMON, "%s Exit\n", __func__);
+}
+
+module_init(slsi_kic_init);
+module_exit(slsi_kic_exit);
+
+MODULE_DESCRIPTION("SCSC Kernel Information and Control (KIC) interface");
+MODULE_AUTHOR("Samsung Electronics Co., Ltd");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Implement subscriber and filtering here. This won't be essential for
+ * first draft of the kernel KIC implementation */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_INTERNAL_H
+#define __SLSI_KIC_INTERNAL_H
+
+#include <net/sock.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/semaphore.h>
+#include <net/genetlink.h>
+#include <linux/time.h>
+#include <linux/module.h>
+
+#include <scsc/scsc_logring.h>
+
+#include <scsc/kic/slsi_kic_prim.h>
+#include <scsc/kic/slsi_kic_wifi.h>
+#include <scsc/kic/slsi_kic_cm.h>
+#include <scsc/kic/slsi_kic_bt.h>
+#include <scsc/kic/slsi_kic_ant.h>
+
+#define OS_UNUSED_PARAMETER(x) ((void)(x))
+
+/**
+ * Core instance
+ */
+enum slsi_kic_state {
+ idle,
+ initialised,
+ ready
+};
+
+struct slsi_kic_service_details {
+ struct list_head proxy_q;
+ enum slsi_kic_technology_type tech;
+ struct slsi_kic_service_info info;
+};
+
+struct slsi_kic_chip_details {
+ struct semaphore proxy_service_list_mutex;
+ struct list_head proxy_service_list;
+};
+
+struct slsi_kic_wifi_ops_tuple {
+ void *priv;
+ struct slsi_kic_wifi_ops wifi_ops;
+ struct mutex ops_mutex;
+};
+
+struct slsi_kic_bt_ops_tuple {
+ void *priv;
+ struct slsi_kic_bt_ops bt_ops;
+ struct mutex ops_mutex;
+};
+
+struct slsi_kic_ant_ops_tuple {
+ void *priv;
+ struct slsi_kic_ant_ops ant_ops;
+ struct mutex ops_mutex;
+};
+
+struct slsi_kic_cm_ops_tuple {
+ void *priv;
+ struct slsi_kic_cm_ops cm_ops;
+ struct mutex ops_mutex;
+};
+
+struct slsi_kic_pdata {
+ enum slsi_kic_state state;
+ struct slsi_kic_chip_details chip_details;
+ struct slsi_kic_wifi_ops_tuple wifi_ops_tuple;
+ struct slsi_kic_cm_ops_tuple cm_ops_tuple;
+ struct slsi_kic_bt_ops_tuple bt_ops_tuple;
+ struct slsi_kic_ant_ops_tuple ant_ops_tuple;
+ uint32_t seq; /* This should *perhaps* be moved to a record struct for
+ * each subscription - will look into that during the
+ * filtering work. */
+};
+
+struct slsi_kic_pdata *slsi_kic_core_get_context(void);
+
+#endif /* #ifndef __SLSI_KIC_INTERNAL_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "slsi_kic_internal.h"
+
+#if defined(CONFIG_SCSC_ANT) && defined(CONFIG_SAMSUNG_KIC)
+int slsi_kic_ant_ops_register(void *priv, struct slsi_kic_ant_ops *ant_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ if (!kic_inst)
+ return -EFAULT;
+
+ mutex_lock(&kic_inst->ant_ops_tuple.ops_mutex);
+ memcpy(&kic_inst->ant_ops_tuple.ant_ops, ant_ops, sizeof(struct slsi_kic_ant_ops));
+ kic_inst->ant_ops_tuple.priv = priv;
+ mutex_unlock(&kic_inst->ant_ops_tuple.ops_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(slsi_kic_ant_ops_register);
+
+void slsi_kic_ant_ops_unregister(struct slsi_kic_ant_ops *ant_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ OS_UNUSED_PARAMETER(ant_ops);
+
+ if (!kic_inst)
+ return;
+
+ mutex_lock(&kic_inst->ant_ops_tuple.ops_mutex);
+ memset(&kic_inst->ant_ops_tuple.ant_ops, 0, sizeof(struct slsi_kic_ant_ops));
+ kic_inst->ant_ops_tuple.priv = NULL;
+ mutex_unlock(&kic_inst->ant_ops_tuple.ops_mutex);
+}
+EXPORT_SYMBOL(slsi_kic_ant_ops_unregister);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "slsi_kic_internal.h"
+
+int slsi_kic_bt_ops_register(void *priv, struct slsi_kic_bt_ops *bt_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ if (!kic_inst)
+ return -EFAULT;
+
+ mutex_lock(&kic_inst->bt_ops_tuple.ops_mutex);
+ memcpy(&kic_inst->bt_ops_tuple.bt_ops, bt_ops, sizeof(struct slsi_kic_bt_ops));
+ kic_inst->bt_ops_tuple.priv = priv;
+ mutex_unlock(&kic_inst->bt_ops_tuple.ops_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(slsi_kic_bt_ops_register);
+
+void slsi_kic_bt_ops_unregister(struct slsi_kic_bt_ops *bt_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ OS_UNUSED_PARAMETER(bt_ops);
+
+ if (!kic_inst)
+ return;
+
+ mutex_lock(&kic_inst->bt_ops_tuple.ops_mutex);
+ memset(&kic_inst->bt_ops_tuple.bt_ops, 0, sizeof(struct slsi_kic_bt_ops));
+ kic_inst->bt_ops_tuple.priv = NULL;
+ mutex_unlock(&kic_inst->bt_ops_tuple.ops_mutex);
+}
+EXPORT_SYMBOL(slsi_kic_bt_ops_unregister);
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "slsi_kic_internal.h"
+
+int slsi_kic_cm_ops_register(void *priv, struct slsi_kic_cm_ops *cm_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ if (!kic_inst)
+ return -EFAULT;
+
+ mutex_lock(&kic_inst->cm_ops_tuple.ops_mutex);
+ memcpy(&kic_inst->cm_ops_tuple.cm_ops, cm_ops, sizeof(struct slsi_kic_cm_ops));
+ kic_inst->cm_ops_tuple.priv = priv;
+ mutex_unlock(&kic_inst->cm_ops_tuple.ops_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(slsi_kic_cm_ops_register);
+
+void slsi_kic_cm_ops_unregister(struct slsi_kic_cm_ops *cm_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ OS_UNUSED_PARAMETER(cm_ops);
+
+ if (!kic_inst)
+ return;
+
+ mutex_lock(&kic_inst->cm_ops_tuple.ops_mutex);
+ memset(&kic_inst->cm_ops_tuple.cm_ops, 0, sizeof(struct slsi_kic_cm_ops));
+ kic_inst->cm_ops_tuple.priv = NULL;
+ mutex_unlock(&kic_inst->cm_ops_tuple.ops_mutex);
+}
+EXPORT_SYMBOL(slsi_kic_cm_ops_unregister);
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "slsi_kic_internal.h"
+
+int slsi_kic_wifi_ops_register(void *priv, struct slsi_kic_wifi_ops *wifi_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ if (!kic_inst)
+ return -EFAULT;
+
+ mutex_lock(&kic_inst->wifi_ops_tuple.ops_mutex);
+ memcpy(&kic_inst->wifi_ops_tuple.wifi_ops, wifi_ops, sizeof(struct slsi_kic_wifi_ops));
+ kic_inst->wifi_ops_tuple.priv = priv;
+ mutex_unlock(&kic_inst->wifi_ops_tuple.ops_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(slsi_kic_wifi_ops_register);
+
+void slsi_kic_wifi_ops_unregister(struct slsi_kic_wifi_ops *wifi_ops)
+{
+ struct slsi_kic_pdata *kic_inst = slsi_kic_core_get_context();
+
+ OS_UNUSED_PARAMETER(wifi_ops);
+
+ if (!kic_inst)
+ return;
+
+ mutex_lock(&kic_inst->wifi_ops_tuple.ops_mutex);
+ memset(&kic_inst->wifi_ops_tuple.wifi_ops, 0, sizeof(struct slsi_kic_wifi_ops));
+ kic_inst->wifi_ops_tuple.priv = NULL;
+ mutex_unlock(&kic_inst->wifi_ops_tuple.ops_mutex);
+}
+EXPORT_SYMBOL(slsi_kic_wifi_ops_unregister);
--- /dev/null
+config SCSC_CORE_CM
+ tristate "Samsung SCSC Maxwell driver Core Module"
+ depends on SCSC_CORE
+
+config SCSC_CORE
+ tristate "Samsung SCSC Maxwell driver"
+ select SCSC_CORE_CM
+
+config SCSC_WLBT_CONFIG_PLATFORM
+ string "SCSC WLBT PLATFORM"
+ default ""
+ ---help---
+ Select the platform, e.g. lassen_smdk
+
+config SCSC_CORE_FW_LOCATION
+ string "SCSC FW location"
+ default "/system/etc/wifi"
+ ---help---
+ Select the FW location, when CONFIG_SCSC_CORE_FW_LOCATION_AUTO=n
+
+config SCSC_CORE_FW_LOCATION_AUTO
+ string "SCSC FW location auto select"
+ default y
+ ---help---
+ Select the FW location (Android N/O) automatically
+
+config SCSC_CORE_TOOL_LOCATION
+ string "SCSC tool location"
+ default "/system/bin"
+ ---help---
+ Select the moredump tool location, when CONFIG_SCSC_CORE_FW_LOCATION_AUTO=n
+
+config SCSC_PCIE
+ tristate "Samsung SCSC MIF PCIE implementation"
+ depends on SCSC_CORE
+ depends on PCI
+
+config SCSC_MXLOGGER
+ bool "Samsung SCSC MXLOGGER"
+ depends on SCSC_CORE
+ default y
+ ---help---
+ MXLOGGER provides FW level logging in DRAM
+
+config SCSC_SMAPPER
+ bool "Samsung SCSC WLAN Smapper support"
+ default y
+ depends on SCSC_CORE
+ ---help---
+ SMAPPER provides direct access to Linux SKBs
+
+config SCSC_WLBTD
+ bool "Samsung SCSC Daemon support for Android O"
+ depends on SCSC_CORE
+ default n
+ ---help---
+ Uses generic netlink messages to communicate events to userspace daemon which takes necessary actions e.g. taking moredump
+
+config SCSC_WRITE_INFO_FILE_WLBTD
+ bool "SCSC flag to decide whether to write file via wlbtd or not"
+ depends on SCSC_WLBTD
+ default n
+
+config SCSC_QOS
+ bool "Samsung SCSC kernel PM QoS support"
+ default y
+ depends on SCSC_CORE
+ ---help---
+ SCSC_QOS provides support to configure kernel PM QoS to register configuration as per performance expectations
+
+config SCSC_PLATFORM
+ tristate "Samsung SCSC MIF Platform driver implementation"
+ depends on SCSC_CORE
+
+config SCSC_CM_MX_CLIENT_TEST
+ tristate "Samsung SCSC Test Client"
+ depends on SCSC_CORE
+
+config SCSC_MX_ALWAYS_ON
+ tristate "Samsung SCSC MX140 always booted"
+ depends on SCSC_CM_MX_CLIENT_TEST
+
+config SCSC_CLK20MHZ
+ tristate "Samsung SCSC USB 20MHz Clock"
+ depends on SCSC_CORE
+
+config SCSC_CLK20MHZ_TEST
+ tristate "Samsung SCSC USB 20MHz Clock Test"
+ depends on SCSC_CLK20MHZ
+
+config SCSC_FM
+ tristate "Samsung SCSC MX250 enables LDOs used for FM"
+ depends on SCSC_CORE
+
+config SCSC_FM_TEST
+ tristate "Samsung SCSC MX250 Test for enabling LDOs used for FM"
+ depends on SCSC_FM
+
+config SCSC_MMAP
+ tristate "Samsung SCSC MMAP/GDB User space interface"
+ depends on SCSC_CORE
+
+config SCSC_DBG_SAMPLER
+ tristate "Samsung SCSC Debug Sampler Service"
+ depends on SCSC_CORE
+
+config SCSC_DEBUG
+ tristate "Samsung SCSC Logging"
+ depends on SCSC_CORE && DEBUG_FS
+ default n
+
+config SCSC_DEBUG_COMPATIBILITY
+ bool "Samsung SCSC Logging mode"
+ default y
+
+config SCSC_LOGRING
+ tristate "Samsung SCSC Kernel Logging"
+ default y
+ ---help---
+ SCSC Drivers logging mechanism
+
+config SCSC_STATIC_RING
+ tristate "Samsung SCSC Logging use static ring"
+ depends on SCSC_LOGRING
+ default y
+
+config SCSC_STATIC_RING_SIZE
+ int "Size of the static ring"
+ depends on SCSC_STATIC_RING
+ default "1048576"
+ range 1024 16777216
+
+config SCSC_CHV_SUPPORT
+ bool "Samsung CHV f/w support"
+ depends on SCSC_CORE
+ default n
+
+config SCSC_GPR4_CON_DEBUG
+ bool "GPR4 PIO muxes switching to the Maxwell Subsystem"
+ depends on SCSC_PLATFORM
+ default n
+
+config SCSC_BUILD_TYPE
+ string "Type of Kernel Build"
+ ---help---
+ Type of Kernel Build: User, User Debug, Engineering
+
+config SCSC_WIFILOGGER
+ tristate "Samsung SCSC Android Wi-Fi Logger"
+ depends on SCSC_CORE
+ default n
+
+config SCSC_WIFILOGGER_DEBUGFS
+ bool "Samsung SCSC Android Wi-Fi Logger DebugFS"
+ depends on SCSC_WIFILOGGER && DEBUG_FS
+ default y
+
+config SCSC_WIFILOGGER_TEST
+ bool "Samsung SCSC Android Wi-Fi Logger Test Harness"
+ depends on SCSC_WIFILOGGER && SCSC_WIFILOGGER_DEBUGFS
+ default n
+
+config SCSC_LOG_COLLECTION
+ bool "Samsung SCSC Log Collection"
+ depends on SCSC_CORE
+ default y
+ ---help---
+ Enable LOG collection to collect Chunks (host and FW) and generate a SBL file
+
+config SCSC_COMMON_HCF
+ bool "Enable Common HCF loader"
+ depends on SCSC_CORE
+ default n
+ ---help---
+ Enable Common HCF loader
+
+config SCSC_WLBT_AUTORECOVERY_PERMANENT_DISABLE
+ bool "Permanently disable WLBT autorecovery, ignroring module parameter"
+ depends on SCSC_CORE
+ default n
+ ---help---
+ Override module parameter, and disable recovery
+
+config SCSC_MX450_GDB_SUPPORT
+ bool "Enable GDB Channels for MX450"
+ depends on SCSC_CORE
+ default n
+ ---help---
+ Enable GDB Channels for MX450
--- /dev/null
+#############################################################################
+#
+# Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+#
+#############################################################################
+
+# Needed since this subdir is symlinked in the main Kernel tree
+# without this our samsung subdir is NOT cleaned.
+clean-files := *.o *.ko
+
+ifneq ($(CONFIG_SCSC_CORE),n)
+
+ccflags-y += -Iinclude/scsc
+ccflags-y += $(CONFIG_SAMSUNG_MAXWELL_EXTRA)
+
+obj-$(CONFIG_SCSC_LOGRING) += scsc_logring.o
+scsc_logring-$(CONFIG_SCSC_LOGRING) += scsc_logring_main.o scsc_logring_ring.o scsc_logring_debugfs.o
+
+obj-$(CONFIG_SCSC_LOG_COLLECTION) += scsc_log_collection.o
+scsc_log_collection-y += scsc_log_collector.o scsc_log_collector_proc.o scsc_log_collector_mmap.o
+##
+## The following cumbersome ifeq/else is the only way to make
+## this CONFIG_SCSC_PRINTK propagate correctly in any scenario (y,m,n)
+## Otherwise using ONLY the else branch when CONFIG_SCSC_DEBUG evaluates
+## to m ==>> ccflags-m does NOT work correctly in every build scenario:
+## - kernel debug modules : builtin with scsc_logring support
+## - kernel nodebug modules : builtins without scsc_logring support
+## - kernel debug|nodebug nomodules : no builtin / no scsclogring
+##
+ifeq ($(CONFIG_SCSC_DEBUG), m)
+ccflags-y += -DCONFIG_SCSC_PRINTK
+else
+ccflags-$(CONFIG_SCSC_LOGRING) += -DCONFIG_SCSC_PRINTK
+endif
+
+ifneq ($(CONFIG_SCSC_DEBUG),n)
+ccflags-$(CONFIG_SCSC_STATIC_RING) += -DCONFIG_SCSC_STATIC_RING_SIZE=$(CONFIG_SCSC_STATIC_RING_SIZE)
+endif
+
+obj-$(CONFIG_SCSC_WIFILOGGER) += scsc_wifilogger.o
+scsc_wifilogger-y += scsc_wifilogger_api.o \
+ scsc_wifilogger_internal.o \
+ scsc_wifilogger_core.o \
+ scsc_wifilogger_module.o \
+ scsc_wifilogger_ring_connectivity.o \
+ scsc_wifilogger_ring_wakelock.o \
+ scsc_wifilogger_ring_pktfate.o
+
+scsc_wifilogger-$(CONFIG_SCSC_WIFILOGGER_DEBUGFS) += scsc_wifilogger_debugfs.o
+scsc_wifilogger-$(CONFIG_SCSC_WIFILOGGER_TEST) += scsc_wifilogger_ring_test.o
+
+# GDB channels for MoreDump
+ccflags-$(CONFIG_SCSC_MX450_GDB_SUPPORT) += -DCONFIG_SCSC_MX450_GDB_SUPPORT
+
+# PCI-E MIF
+obj-$(CONFIG_SCSC_PCIE) += scsc_pcie_mif.o
+scsc_pcie_mif-y += pcie_mif_module.o pcie_mif.o pcie_proc.o pcie_mbox.o pcie_mbox_intgen.o
+
+obj-$(CONFIG_SCSC_PLATFORM) += scsc_platform_mif.o
+
+scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += \
+ platform_mif_module.o
+
+ifeq ($(CONFIG_SOC_EXYNOS9610),y)
+ scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += platform_mif_9610.o
+endif
+ifeq ($(CONFIG_SOC_EXYNOS9630),y)
+ scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += platform_mif_9630.o
+endif
+
+ifeq ($(CONFIG_SOC_EXYNOS7570),y)
+ scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += platform_mif.o
+endif
+ifeq ($(CONFIG_SOC_EXYNOS7872),y)
+ scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += platform_mif.o
+endif
+ifeq ($(CONFIG_SOC_EXYNOS7885),y)
+ scsc_platform_mif-$(CONFIG_SCSC_PLATFORM) += platform_mif.o
+endif
+
+#core_module for static functions and registration client module (all the service driver modules -wlan, bt,...-)
+#core for instance
+obj-$(CONFIG_SCSC_CORE_CM) += scsc_mx.o
+scsc_mx-y += \
+ mxlog_transport.o \
+ fw_panic_record.o \
+ panicmon.o \
+ suspendmon.o \
+ mxlog.o \
+ mxproc.o \
+ scsc_service.o \
+ scsc_mx_module.o \
+ scsc_mx_impl.o \
+ mxman.o \
+ miframman.o \
+ mifmboxman.o \
+ mifproc.o \
+ mifintrbit.o \
+ fwhdr.o \
+ fwimage.o \
+ cpacket_buffer.o \
+ mifstream.o \
+ mxmgmt_transport.o \
+ gdb_transport.o \
+ scsc_lerna.o \
+ mxsyserr.o \
+ mxfwconfig.o \
+ mx140_file.o
+
+scsc_mx-$(CONFIG_SCSC_MXLOGGER) += mxlogger.o
+
+scsc_mx-$(CONFIG_SCSC_SMAPPER) += mifsmapper.o
+
+scsc_mx-$(CONFIG_SCSC_WLBTD) += scsc_wlbtd.o
+
+scsc_mx-$(CONFIG_SCSC_QOS) += mifqos.o
+
+ifneq ($(CONFIG_SCSC_CM_MX_CLIENT_TEST),n)
+obj-$(CONFIG_SCSC_CM_MX_CLIENT_TEST) += mx_client_test.o
+mx_client_test-y += client_test.o
+endif
+
+ifneq ($(CONFIG_SCSC_MMAP),n)
+obj-$(CONFIG_SCSC_MMAP) += scsc_mmap.o
+scsc_mmap-y += mx_mmap.o
+endif
+
+ifneq ($(CONFIG_SCSC_CLK20MHZ),n)
+obj-$(CONFIG_SCSC_CLK20MHZ) += scsc_mx140_clk.o
+scsc_mx140_clk-y += mx140_clk.o
+ifeq ($(CONFIG_SCSC_CLK20MHZ_TEST),y)
+scsc_mx140_clk-y += mx140_clk_test.o
+endif
+endif
+
+ifneq ($(CONFIG_SCSC_FM),n)
+obj-$(CONFIG_SCSC_FM) += scsc_mx250_fm.o
+scsc_mx250_fm-y += mx250_fm.o
+ifeq ($(CONFIG_SCSC_FM_TEST),y)
+scsc_mx250_fm-y += mx250_fm_test.o
+endif
+endif
+
+obj-$(CONFIG_SCSC_DBG_SAMPLER) += scsc_dbg_sampler.o
+scsc_dbg_sampler-y += mx_dbg_sampler.o
+
+endif
+
+# The below section is only for DTE purpose. It is controlled by the flag SCSC_DTE_BUILD which is set only by the
+# DTE top level Makefile. Hence, no risk.
+# In the below include dir the KERNEL_DIR comes from the DTE top level makefile.
+ifeq ($(SCSC_DTE_BUILD),y)
+obj-m := scsc_mx.o
+obj-m += mx_client_test.o
+mx_client_test-y += client_test.o
+
+ccflags-y += -I$(KERNEL_DIR)/include/scsc/
+ccflags-y += -I$(KERNEL_DIR)/include
+ccflags-y += -I$(KERNEL_DIR)/drivers/misc/samsung/scsc
+ccflags-y += -I$(KERNEL_DIR)/drivers/net/wireless/scsc
+
+endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+
+struct scsc_mx_test {
+ /* scsc_service_client has to be the first */
+ struct scsc_service_client test_service_client;
+ struct scsc_service *primary_service;
+ struct scsc_service *secondary_service;
+ struct scsc_mx *mx;
+ bool started;
+};
+
+static struct scsc_mx_test *test;
+
+/* First service to start */
+static int service_id = SCSC_SERVICE_ID_NULL;
+module_param(service_id, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(service_id, "ID of service to start, Default 0:NULL, 1:WLAN, 2:BT, 3:ANT, 5:ECHO");
+
+/* Second service to start if != -1 */
+static int service_id_2 = -1;
+module_param(service_id_2, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(service_id_2, "ID of optional second service to start: Default -1:None, 0:NULL, 1:WLAN, 2:BT, 3:ANT, 5:ECHO");
+
+#ifdef CONFIG_SCSC_MX_ALWAYS_ON
+static int auto_start = 2;
+#else
+static int auto_start;
+#endif
+module_param(auto_start, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_start, "Start service automatically: 0: disabled, 1: Enabled, 2: Deferred");
+
+/* Delay after probe before starting mx140 when auto_start=2 */
+#define SCSC_MX_BOOT_DELAY_MS 30000
+
+static DEFINE_MUTEX(ss_lock);
+
+/* char device entry declarations */
+static dev_t client_test_dev_t;
+static struct class *client_test_class;
+static struct cdev *client_test_cdev;
+
+static void test_stop_on_failure(struct scsc_service_client *client)
+{
+ SCSC_TAG_DEBUG(MXMAN_TEST, "OK\n");
+}
+
+static void test_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ (void)scsc_panic_code;
+ SCSC_TAG_ERR(MXMAN_TEST, "OK\n");
+}
+
+
+static void stop_close_services(void)
+{
+ int r;
+
+ mutex_lock(&ss_lock);
+
+ if (!test->started) {
+ pr_info("mx140: already stopped\n");
+ goto done;
+ }
+
+ if (test->primary_service) {
+ r = scsc_mx_service_stop(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(primary_service) failed err: %d\n", r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(primary_service) OK\n");
+ r = scsc_mx_service_close(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id);
+ test->primary_service = NULL;
+ }
+
+ if (test->secondary_service) {
+ r = scsc_mx_service_stop(test->secondary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(secondary_service) failed err: %d\n", r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(secondary_service) OK\n");
+ r = scsc_mx_service_close(test->secondary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) failed err: %d\n", service_id_2, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id_2);
+ test->secondary_service = NULL;
+ }
+ test->started = false;
+done:
+ mutex_unlock(&ss_lock);
+}
+
+static bool open_start_services(struct scsc_mx *mx)
+{
+ struct scsc_service *primary_service;
+ struct scsc_service *secondary_service;
+ int r;
+ bool ok;
+
+ mutex_lock(&ss_lock);
+
+ if (test->started) {
+ pr_info("mx140: already started\n");
+ ok = true;
+ goto done;
+ }
+
+ primary_service = scsc_mx_service_open(mx, service_id, &test->test_service_client, &r);
+ if (!primary_service) {
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_open for primary_service failed %d\n", r);
+ ok = false;
+ goto done;
+ }
+
+ r = scsc_mx_service_start(primary_service, 0);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_start for primary_service failed\n");
+ r = scsc_mx_service_close(primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close for primary_service %d failed\n", r);
+ ok = false;
+ goto done;
+ }
+
+ test->primary_service = primary_service;
+
+ if (service_id_2 != -1) {
+ secondary_service = scsc_mx_service_open(mx, service_id_2, &test->test_service_client, &r);
+ if (!secondary_service) {
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_open for secondary_service failed %d\n", r);
+ r = scsc_mx_service_stop(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(%d) OK\n", service_id);
+ r = scsc_mx_service_close(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id);
+ ok = false;
+ goto done;
+ }
+ r = scsc_mx_service_start(secondary_service, 0);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_start for secondary_service failed\n");
+ r = scsc_mx_service_close(secondary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id);
+
+ r = scsc_mx_service_stop(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_stop(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_stop(%d) OK\n", service_id);
+ r = scsc_mx_service_close(test->primary_service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_service_close(%d) failed err: %d\n", service_id, r);
+ else
+ SCSC_TAG_DEBUG(MXMAN_TEST, "scsc_mx_service_close(%d) OK\n", service_id);
+ ok = false;
+ goto done;
+ }
+ test->secondary_service = secondary_service;
+ }
+ test->started = true;
+ ok = true;
+done:
+ mutex_unlock(&ss_lock);
+ return ok;
+}
+
+static void delay_start_func(struct work_struct *work)
+{
+ (void)work;
+
+ pr_info("mx140: Start wlbt null service\n");
+
+ if (!test->mx)
+ return;
+
+ if (!open_start_services(test->mx))
+ pr_err("mx140: Error starting delayed service\n");
+}
+
+DECLARE_DELAYED_WORK(delay_start, delay_start_func);
+
+/* Start the null service after a delay */
+static void delay_open_start_services(void)
+{
+ schedule_delayed_work(&delay_start, msecs_to_jiffies(SCSC_MX_BOOT_DELAY_MS));
+}
+
+/* Start service(s) and leave running until module unload */
+void client_module_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ /* Avoid unused error */
+ (void)module_client;
+
+ SCSC_TAG_ERR(MXMAN_TEST, "mx140:\n");
+
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return;
+
+ test->test_service_client.stop_on_failure = test_stop_on_failure;
+ test->test_service_client.failure_reset = test_failure_reset;
+ test->mx = mx;
+
+ switch (auto_start) {
+ case 1:
+ if (!open_start_services(test->mx)) {
+ SCSC_TAG_ERR(MXMAN_TEST, "Error starting service/s\n");
+ kfree(test);
+ return;
+ }
+ break;
+ case 2:
+ pr_info("mx140: delayed auto-start\n");
+ delay_open_start_services();
+ break;
+ default:
+ break;
+ }
+
+ SCSC_TAG_ERR(MXMAN_TEST, "OK\n");
+}
+
+void client_module_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ /* Avoid unused error */
+ (void)module_client;
+
+ pr_info("mx140: %s\n", __func__);
+
+ if (!test)
+ return;
+ if (test->mx != mx) {
+ SCSC_TAG_ERR(MXMAN_TEST, "test->mx != mx\n");
+ return;
+ }
+
+ /* Cancel any delayed start attempt */
+ cancel_delayed_work_sync(&delay_start);
+
+ stop_close_services();
+
+ /* de-allocate test structure */
+ kfree(test);
+ SCSC_TAG_DEBUG(MXMAN_TEST, "OK\n");
+}
+
+
+/* Test client driver registration */
+struct scsc_mx_module_client client_test_driver = {
+ .name = "MX client test driver",
+ .probe = client_module_probe,
+ .remove = client_module_remove,
+};
+
+
+static int client_test_dev_open(struct inode *inode, struct file *file)
+{
+ SCSC_TAG_ERR(MXMAN_TEST, "open client test\n");
+ return 0;
+}
+
+static ssize_t client_test_dev_write(struct file *file, const char *data, size_t len, loff_t *offset)
+{
+ unsigned long count;
+ char str[2]; /* One value and carry return */
+ long int val = 0;
+ bool ok = true;
+
+ if (len > 2) {
+ SCSC_TAG_ERR(MXMAN_TEST, "Incorrect value len %zd\n", len);
+ goto error;
+ }
+
+ count = copy_from_user(str, data, len);
+
+ str[1] = 0;
+
+ if (kstrtol(str, 10, &val)) {
+ SCSC_TAG_ERR(MXMAN_TEST, "Invalid value\n");
+ goto error;
+ }
+
+ if (test) {
+ if (val) {
+ SCSC_TAG_INFO(MXMAN_TEST, "Start services\n");
+ ok = open_start_services(test->mx);
+ } else {
+ SCSC_TAG_INFO(MXMAN_TEST, "Stop services\n");
+ stop_close_services();
+ }
+ } else {
+ SCSC_TAG_ERR(MXMAN_TEST, "Test not created\n");
+ goto error;
+ }
+error:
+ SCSC_TAG_ERR(MXMAN_TEST, "%s\n", ok ? "OK" : "FAIL");
+ return ok ? len : -EIO;
+}
+
+static ssize_t client_test_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
+{
+ return length;
+}
+
+static int client_test_dev_release(struct inode *inode, struct file *file)
+{
+ SCSC_TAG_DEBUG(MXMAN_TEST, "close client test\n");
+ return 0;
+}
+
+static const struct file_operations client_test_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = client_test_dev_open,
+ .read = client_test_dev_read,
+ .write = client_test_dev_write,
+ .release = client_test_dev_release,
+};
+
+static int __init scsc_client_test_module_init(void)
+{
+ int r;
+
+ SCSC_TAG_DEBUG(MXMAN_TEST, "mx140:\n");
+
+ r = scsc_mx_module_register_client_module(&client_test_driver);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN_TEST, "scsc_mx_module_register_client_module failed: r=%d\n", r);
+ return r;
+ }
+
+ r = alloc_chrdev_region(&client_test_dev_t, 0, 1, "wlbt-null-service");
+ if (r < 0) {
+ SCSC_TAG_ERR(MXMAN_TEST, "failed to alloc chrdev region\n");
+ goto fail_alloc_chrdev_region;
+ }
+
+ client_test_cdev = cdev_alloc();
+ if (!client_test_cdev) {
+ r = -ENOMEM;
+ SCSC_TAG_ERR(MXMAN_TEST, "failed to alloc cdev\n");
+ goto fail_alloc_cdev;
+ }
+
+ cdev_init(client_test_cdev, &client_test_dev_fops);
+ r = cdev_add(client_test_cdev, client_test_dev_t, 1);
+ if (r < 0) {
+ SCSC_TAG_ERR(MXMAN_TEST, "failed to add cdev\n");
+ goto fail_add_cdev;
+ }
+
+ client_test_class = class_create(THIS_MODULE, "sample");
+ if (!client_test_class) {
+ r = -EEXIST;
+ SCSC_TAG_ERR(MXMAN_TEST, "failed to create class\n");
+ goto fail_create_class;
+ }
+
+ if (!device_create(client_test_class, NULL, client_test_dev_t, NULL, "mx_client_test_%d", MINOR(client_test_dev_t))) {
+ r = -EINVAL;
+ SCSC_TAG_ERR(MXMAN_TEST, "failed to create device\n");
+ goto fail_create_device;
+ }
+
+ return 0;
+fail_create_device:
+ class_destroy(client_test_class);
+fail_create_class:
+ cdev_del(client_test_cdev);
+fail_add_cdev:
+fail_alloc_cdev:
+ unregister_chrdev_region(client_test_dev_t, 1);
+fail_alloc_chrdev_region:
+ return r;
+}
+
+static void __exit scsc_client_test_module_exit(void)
+{
+ SCSC_TAG_DEBUG(MXMAN_TEST, "mx140:\n");
+ scsc_mx_module_unregister_client_module(&client_test_driver);
+ SCSC_TAG_DEBUG(MXMAN_TEST, "exit\n");
+
+ device_destroy(client_test_class, client_test_dev_t);
+ class_destroy(client_test_class);
+ cdev_del(client_test_cdev);
+ unregister_chrdev_region(client_test_dev_t, 1);
+}
+
+late_initcall(scsc_client_test_module_init);
+module_exit(scsc_client_test_module_exit);
+
+MODULE_DESCRIPTION("mx140 Client Test Driver");
+MODULE_AUTHOR("SCSC");
+MODULE_LICENSE("GPL");
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Software Mailbox Emulation shared data definitions.
+*
+* Ref: SC-506707-DD - Structure version 2
+*
+****************************************************************************/
+/**
+ * Circular buffer backed packet stream (Implementation)
+ *
+ * To allow easy distinction between full and empty buffers, there
+ * is one slot reserved in the available space. This means that the following
+ * conditions can be used to easily test the buffer's status without tracking
+ * the used size explicitly:
+ * - read_index == write_index : Buffer is empty
+ * - write_index + 1 == read_index : Buffer is full
+ * Otherwise if read_index == write_index then the buffer could be either
+ * empty or full.
+ */
+/* Implements */
+#include "cpacket_buffer.h"
+
+/* Uses */
+#include <scsc/scsc_logring.h>
+#include <linux/module.h>
+#include "miframman.h"
+
+
+/**
+ * Advances the read/write index by the given amount, wrapping around if this
+ * exceeds the buffer length.
+ */
+static inline void cpacketbuffer_advance_index(uint32_t *idx, uint32_t amount, uint32_t buffer_size)
+{
+ *idx = (*idx + amount) % buffer_size;
+}
+
+/**
+ * Converts a buffer address to a read/write index.
+ *
+ * The address must be at the start of a packet.
+ */
+static inline uint32_t cpacketbuffer_address_to_index(struct cpacketbuffer *buffer, const uint8_t *address)
+{
+ ptrdiff_t offset = address - (uint8_t *)buffer->buffer;
+
+ return (offset / buffer->packet_size) % buffer->num_packets;
+}
+
+/**
+ * Converts a buffer read/write index to an address.
+ */
+static inline uint8_t *cpacketbuffer_index_to_address(struct cpacketbuffer *buffer, uint32_t *idx)
+{
+ return (uint8_t *)buffer->buffer + (*idx % buffer->num_packets) * buffer->packet_size;
+}
+
+/** Returns the current read index of the buffer */
+static inline uint32_t cpacketbuffer_read_index(const struct cpacketbuffer *buffer)
+{
+ return *buffer->read_index;
+}
+
+/** Returns the current write index of the buffer */
+static inline uint32_t cpacketbuffer_write_index(const struct cpacketbuffer *buffer)
+{
+ return *buffer->write_index;
+}
+
+/** Writes a set of whole packets to the buffer */
+static bool cpacketbuffer_write_block(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes)
+{
+ uint32_t num_packets = (num_bytes + buffer->packet_size - 1) / buffer->packet_size;
+ const uint8_t *source_data;
+ uint32_t start_write_index;
+ uint32_t end_write_index;
+
+ if (num_packets > cpacketbuffer_free_space(buffer))
+ /* Not enough free packets to write this block */
+ return false;
+
+ source_data = (const uint8_t *)buf;
+ start_write_index = cpacketbuffer_write_index(buffer);
+ end_write_index = start_write_index;
+ cpacketbuffer_advance_index(&end_write_index, num_packets - 1, buffer->num_packets);
+
+ if (end_write_index < start_write_index) {
+ /* Writes wrap around the buffer, split the write in two */
+ uint32_t initial_write_size = (buffer->num_packets - start_write_index) * buffer->packet_size;
+
+ memcpy(cpacketbuffer_index_to_address(buffer, buffer->write_index), source_data, initial_write_size);
+ memcpy(buffer->buffer, source_data + initial_write_size, num_bytes - initial_write_size);
+ } else
+ memcpy(cpacketbuffer_index_to_address(buffer, buffer->write_index), source_data, num_bytes);
+
+ /* CPU memory barrier */
+ wmb();
+ cpacketbuffer_advance_index(buffer->write_index, num_packets, buffer->num_packets);
+
+ return true;
+}
+
+/** Log buffer configuration at DEBUG level */
+static void mxcbufconf_print(const struct mxcbufconf *buf_conf)
+{
+ SCSC_TAG_DBG4(CPKTBUFF, "mxcbufconf\n\tbuffer_loc: 0x%x\n\tnum_packets: %d\n\tpacket_size: %d\n\treadix: 0x%x\n\twriteix: 0x%x\n",
+ buf_conf->buffer_loc,
+ buf_conf->num_packets,
+ buf_conf->packet_size,
+ buf_conf->read_index_loc,
+ buf_conf->write_index_loc
+ );
+}
+
+/** Externally visible functions */
+
+int cpacketbuffer_init(struct cpacketbuffer *buffer, uint32_t num_packets, uint32_t packet_size, struct scsc_mx *mx)
+{
+ struct miframman *miframman;
+ uint32_t *ridx;
+ uint32_t *widx;
+ void *mem;
+
+ buffer->mx = mx;
+
+ miframman = scsc_mx_get_ramman(mx);
+ mem = miframman_alloc(miframman, num_packets * packet_size, 4, MIFRAMMAN_OWNER_COMMON);
+ if (!mem)
+ return -ENOMEM;
+
+ ridx = miframman_alloc(miframman, sizeof(uint32_t), 4, MIFRAMMAN_OWNER_COMMON);
+ if (!ridx) {
+ miframman_free(miframman, mem);
+ return -ENOMEM;
+ }
+
+ widx = miframman_alloc(miframman, sizeof(uint32_t), 4, MIFRAMMAN_OWNER_COMMON);
+ if (!widx) {
+ miframman_free(miframman, ridx);
+ miframman_free(miframman, mem);
+ return -ENOMEM;
+ }
+
+ buffer->buffer = mem;
+ buffer->num_packets = num_packets;
+ buffer->packet_size = packet_size;
+ buffer->read_index = ridx;
+ buffer->write_index = widx;
+
+ *buffer->read_index = 0;
+ *buffer->write_index = 0;
+ return 0;
+}
+
+void cpacketbuffer_release(struct cpacketbuffer *buffer)
+{
+ struct miframman *miframman;
+
+ miframman = scsc_mx_get_ramman(buffer->mx);
+
+ miframman_free(miframman, buffer->read_index);
+ miframman_free(miframman, buffer->write_index);
+ miframman_free(miframman, buffer->buffer);
+}
+
+bool cpacketbuffer_write(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes)
+{
+ uint32_t start_write_index;
+
+ if (buf == NULL || num_bytes == 0)
+ return false;
+
+ SCSC_TAG_DBG4(CPKTBUFF, "Before: *buffer->read_index=0x%x *buffer->write_index=0x%x\n",
+ *buffer->read_index, *buffer->write_index);
+
+ start_write_index = cpacketbuffer_write_index(buffer);
+ if (!cpacketbuffer_write_block(buffer, buf, num_bytes))
+ return false;
+
+ /* CPU memory barrier */
+ wmb();
+
+ SCSC_TAG_DBG4(CPKTBUFF, "After: *buffer->read_index=0x%x *buffer->write_index=0x%x\n",
+ *buffer->read_index, *buffer->write_index);
+
+ return true;
+}
+
+bool cpacketbuffer_write_gather(struct cpacketbuffer *buffer, const void **bufs, uint32_t *num_bytes, uint32_t num_bufs)
+{
+ uint32_t start_write_index;
+ uint32_t i;
+
+ if (bufs == NULL || num_bytes == 0 || num_bufs == 0)
+ return false;
+
+ start_write_index = cpacketbuffer_write_index(buffer);
+ for (i = 0; i < num_bufs; ++i) {
+ /* Write all the whole packets from this buffer */
+ uint32_t partial_packet_len = num_bytes[i] % buffer->packet_size;
+ uint32_t whole_packet_len = num_bytes[i] - partial_packet_len;
+
+ if (whole_packet_len > 0 &&
+ !cpacketbuffer_write_block(buffer, bufs[i], whole_packet_len))
+ return false;
+
+ if (partial_packet_len != 0) {
+ /* Partial packet present - write this and enough from the next data block(s) to fill this packet
+ * before continuing */
+ uint32_t needed_bytes;
+ uint8_t *write_ptr = cpacketbuffer_index_to_address(buffer, buffer->write_index);
+
+ memcpy(write_ptr, (const uint8_t *)bufs[i] + whole_packet_len, partial_packet_len);
+ write_ptr += partial_packet_len;
+ needed_bytes = buffer->packet_size - partial_packet_len;
+ while (i + 1 < num_bufs && needed_bytes > 0) {
+ uint32_t num_bytes_to_take = num_bytes[i + 1] >= needed_bytes ? needed_bytes : num_bytes[i + 1];
+
+ memcpy(write_ptr, bufs[i + 1], num_bytes_to_take);
+ bufs[i + 1] = (const uint8_t *)bufs[i + 1] + num_bytes_to_take;
+ num_bytes[i + 1] -= num_bytes_to_take;
+ write_ptr += num_bytes_to_take;
+ needed_bytes -= num_bytes_to_take;
+
+ if (num_bytes[i + 1] == 0)
+ /* This buffer has been consumed entirely, move to the next */
+ ++i;
+ }
+
+ /* CPU memory barrier */
+ wmb();
+ cpacketbuffer_advance_index(buffer->write_index, 1, buffer->num_packets);
+ }
+ }
+
+ /* CPU memory barrier */
+ wmb();
+
+ return true;
+}
+
+uint32_t cpacketbuffer_read(struct cpacketbuffer *buffer, void *buf, uint32_t num_bytes)
+{
+ uint8_t *read_start;
+ uint32_t num_packets;
+ uint32_t num_available_packets;
+
+ if (buf == NULL || cpacketbuffer_is_empty(buffer))
+ return 0;
+
+ /* Work out where we're reading from */
+ read_start = cpacketbuffer_index_to_address(buffer, buffer->read_index);
+ num_packets = num_bytes / buffer->packet_size;
+ if (num_bytes % buffer->packet_size != 0)
+ /* Partial data packet read requested, this means we remove the whole thing */
+ ++num_packets;
+
+ /* Ensure we have enough actual data to satisfy the read request, otherwise
+ * truncate the read request to the amount of data available. */
+ num_available_packets = buffer->num_packets - (cpacketbuffer_free_space(buffer) + 1);
+ if (num_packets > num_available_packets) {
+ num_packets = num_available_packets;
+ num_bytes = num_packets * buffer->packet_size;
+ }
+
+ if (cpacketbuffer_read_index(buffer) + num_packets > buffer->num_packets) {
+ /* The read wraps around the end of the buffer, do it in two parts */
+ uint32_t initial_read_size = (buffer->num_packets - cpacketbuffer_read_index(buffer)) * buffer->packet_size;
+
+ memcpy(buf, read_start, initial_read_size);
+ memcpy((uint8_t *)buf + initial_read_size, buffer->buffer, num_bytes - initial_read_size);
+ } else
+ memcpy(buf, read_start, num_bytes);
+
+ /* CPU memory barrier */
+ wmb();
+ /* Update the read index with how many packets we pulled out of the stream */
+ cpacketbuffer_advance_index(buffer->read_index, num_packets, buffer->num_packets);
+ /* CPU memory barrier */
+ wmb();
+
+ return num_bytes;
+}
+
+const void *cpacketbuffer_peek(struct cpacketbuffer *buffer, const void *current_packet)
+{
+ uint32_t next_packet_index;
+
+ SCSC_TAG_DBG4(CPKTBUFF, "*buffer->read_index=0x%x *buffer->write_index=0x%x\n",
+ *buffer->read_index, *buffer->write_index);
+ if (current_packet == NULL)
+ /* Reading the first available packet */
+ next_packet_index = cpacketbuffer_read_index(buffer);
+ else
+ /* Reading the next available packet past the current value of current_packet */
+ next_packet_index = cpacketbuffer_address_to_index(buffer,
+ (const uint8_t *)current_packet + buffer->packet_size);
+
+ if (next_packet_index == cpacketbuffer_write_index(buffer))
+ /* No more packets available */
+ return NULL;
+
+ return cpacketbuffer_index_to_address(buffer, &next_packet_index);
+}
+
+void cpacketbuffer_peek_complete(struct cpacketbuffer *buffer, const void *current_packet)
+{
+ if (current_packet == NULL)
+ return;
+
+ /* The address we're given is the last packet read, so the new read index is for the next one */
+ *buffer->read_index = cpacketbuffer_address_to_index(buffer,
+ (const uint8_t *)current_packet + buffer->packet_size);
+ /* CPU memory barrier */
+ wmb();
+}
+
+bool cpacketbuffer_is_empty(const struct cpacketbuffer *buffer)
+{
+ return cpacketbuffer_read_index(buffer) == cpacketbuffer_write_index(buffer);
+}
+
+bool cpacketbuffer_is_full(const struct cpacketbuffer *buffer)
+{
+ return (cpacketbuffer_write_index(buffer) + 1) % buffer->num_packets == cpacketbuffer_read_index(buffer);
+}
+
+uint32_t cpacketbuffer_free_space(const struct cpacketbuffer *buffer)
+{
+ uint32_t base_free_space = cpacketbuffer_write_index(buffer) >= cpacketbuffer_read_index(buffer) ?
+ cpacketbuffer_read_index(buffer) + buffer->num_packets - cpacketbuffer_write_index(buffer) :
+ cpacketbuffer_read_index(buffer) - cpacketbuffer_write_index(buffer);
+
+ /* Subtract the full/empty identification reserved slot from the free space */
+ return base_free_space - 1;
+}
+
+uint32_t cpacketbuffer_packet_size(const struct cpacketbuffer *buffer)
+{
+ return buffer->packet_size;
+}
+
+void cpacketbuffer_config_serialise(const struct cpacketbuffer *buffer, struct mxcbufconf *buf_conf)
+{
+ scsc_mifram_ref mifram_ref;
+ struct scsc_mif_abs *mif;
+
+ mif = scsc_mx_get_mif_abs(buffer->mx);
+
+ mif->get_mifram_ref(mif, buffer->buffer, &mifram_ref);
+ buf_conf->buffer_loc = mifram_ref;
+ buf_conf->num_packets = buffer->num_packets;
+ buf_conf->packet_size = buffer->packet_size;
+ mif->get_mifram_ref(mif, buffer->read_index, &mifram_ref);
+ buf_conf->read_index_loc = mifram_ref;
+ mif->get_mifram_ref(mif, buffer->write_index, &mifram_ref);
+ buf_conf->write_index_loc = mifram_ref;
+
+ mxcbufconf_print(buf_conf);
+}
+
+void cpacketbuffer_log(const struct cpacketbuffer *buffer, enum scsc_log_level log_level)
+{
+ const uint8_t *read_start = cpacketbuffer_index_to_address((struct cpacketbuffer *)buffer, buffer->read_index);
+
+ SCSC_TAG_LVL((CPKTBUFF), log_level,
+ "read_index=0x%x write_index=0x%x, read_start[0]=0x%08x\n",
+ *buffer->read_index, *buffer->write_index,
+ *(uint32_t *)read_start);
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * Circular buffer backed packet stream (Interface)
+ */
+
+#ifndef CPACKET_BUFFER_H__
+#define CPACKET_BUFFER_H__
+
+/* Uses */
+#include <linux/types.h>
+#include "scsc_mx_impl.h"
+#include "mxconf.h"
+#include "scsc_logring_common.h"
+
+struct cpacketbuffer;
+
+/**
+ * Initialises the circular buffer.
+ * The memory buffer length must be a multiple of the packet size.
+ */
+int cpacketbuffer_init(struct cpacketbuffer *buffer, uint32_t num_packets, uint32_t packet_size, struct scsc_mx *mx);
+void cpacketbuffer_release(struct cpacketbuffer *buffer);
+
+/**
+ * Reads the gven amount of data from the buffer, copying it to the provided address.
+ * This automatically removes the read data from the buffer.
+ *
+ * If the amount of data requested is not a multiple of the packet size
+ * only the requested number of bytes will be read, but the partially read packet
+ * will still be removed from the buffer.
+ *
+ * Returns true if the packet was read.
+ */
+uint32_t cpacketbuffer_read(struct cpacketbuffer *buffer, void *buf, uint32_t num_bytes);
+
+/**
+ * Returns a pointer to the next packet of data within the buffer, without
+ * removing it. This can be used to processss data in place without needing to
+ * copy it first.
+ *
+ * If multiple packets are present these can be read in turn by setting the value
+ * of current_packet to the returned value from the previous call to cpacketbuffer_peek.
+ *
+ * cpacketbuffer_peek_complete must be called to remove the packet(s) from the buffer.
+ *
+ * Returns a pointer to the beginning of the packet to read, or NULL if there is no
+ * packet to process.
+ *
+ * Example use:
+ * // Get the first data packet
+ * void *current_packet = cpacketbuffer_peek( buffer, NULL );
+ * void *last_packet = NULL;
+ * while( current_packet != NULL )
+ * {
+ * // Process data packet
+ * ...
+ *
+ * // Get the next data packet
+ * last_packet = current_packet;
+ * current_packet = cpacketbuffer_peek( buffer, current_packet );
+ * }
+ *
+ * // Remove all processed packets from the buffer
+ * if( last_packet != NULL )
+ * {
+ * cpacketbuffer_peek_complete( buffer, last_packet );
+ * }
+ */
+const void *cpacketbuffer_peek(struct cpacketbuffer *buffer, const void *current_packet);
+
+/**
+ * Removes all packets from the buffer up to and including the given
+ * packet.
+ *
+ * This must be called after using cpacketbuffer_peek to indicate that packet(s)
+ * can be removed from the buffer.
+ */
+void cpacketbuffer_peek_complete(struct cpacketbuffer *buffer, const void *packet);
+
+/**
+ * Writes a number of bytes to the buffer. This will always use up whole packets in the buffer
+ * even if the amount of data written is not an exact multiple of the packet size.
+ *
+ * Returns true if the data was written, false if there is not enough free space in the buffer.
+ */
+bool cpacketbuffer_write(struct cpacketbuffer *buffer, const void *buf, uint32_t num_bytes);
+
+/**
+ * Writes a set of non-contiguous data blocks to the buffer as a contiguous set.
+ * This will always use up whole packets even if the
+ * amount of data written is not an exact multiple of the packet size.
+ *
+ * Returns true if the blocks were written, false if there is not enough
+ * free space in the buffer for all the blocks.
+ */
+bool cpacketbuffer_write_gather(struct cpacketbuffer *buffer, const void **bufs, uint32_t *num_bytes, uint32_t num_bufs);
+
+/**
+ * Returns the number of free packets in the buffer.
+ */
+uint32_t cpacketbuffer_free_space(const struct cpacketbuffer *buffer);
+
+/**
+ * Returns true if the buffer is empty.
+ */
+bool cpacketbuffer_is_empty(const struct cpacketbuffer *buffer);
+
+/**
+ * Returns true if the buffer is full.
+ */
+bool cpacketbuffer_is_full(const struct cpacketbuffer *buffer);
+
+/**
+ * Returns the packet size the buffer was initialised with. This is the same value
+ * as the packet_size argument passed to cpacketbuffer_init().
+ */
+uint32_t cpacketbuffer_packet_size(const struct cpacketbuffer *buffer);
+
+void cpacketbuffer_config_serialise(const struct cpacketbuffer *buffer, struct mxcbufconf *buf_conf);
+
+/**
+ * Log the state of this packet buffer at the specified log_level.
+ */
+void cpacketbuffer_log(const struct cpacketbuffer *buffer, enum scsc_log_level log_level);
+
+/**
+ * Buffer context object.
+ */
+struct cpacketbuffer {
+ struct scsc_mx *mx;
+ void *buffer; /* Buffer location */
+ uint32_t num_packets; /* Total number of packets that can be stored in the buffer */
+ uint32_t packet_size; /* Size of each individual packet within the buffer */
+
+ /** Pointers to 32bit R/W indexes - these should point to uint32_ts */
+ uint32_t *read_index; /* Pointer to the location of the read index, which
+ * contains the index of the next packet to read. */
+ uint32_t *write_index; /* Pointer to the location of the write index, which
+ * contains the index after the last packet written. */
+};
+
+#endif /* CPACKET_BUFFER_H__ */
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+****************************************************************************/
+
+#ifndef __FUNCTOR_H
+#define __FUNCTOR_H
+
+/**
+ * Minimal Functor (no returns, no args other than self).
+ */
+struct functor {
+ /**
+ * The callback invoked by functor_call().
+ *
+ * A pointer to the functor itself is passed to the call.
+ *
+ * Typically the implementation wil upcast this (container_of)
+ * to access a container context.
+ */
+ void (*call)(struct functor *f);
+};
+
+/**
+ * Initialise this functor.
+ */
+static inline void functor_init(struct functor *f, void (*call)(struct functor *f))
+{
+ f->call = call;
+}
+
+/**
+ * Invoke this functor.
+ */
+static inline void functor_call(struct functor *f)
+{
+ f->call(f);
+}
+
+#endif /* __FUNCTOR_H */
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <scsc/scsc_logring.h>
+#include "panic_record_r4_defs.h"
+
+#define PANIC_RECORD_CKSUM_SEED 0xa5a5a5a5
+/*
+ * version 2 r4 panic record defs
+ */
+#define R4_PANIC_RECORD_VERSION_2 2
+#define R4_PANIC_RECORD_LENGTH_INDEX_V2 1
+#define R4_PANIC_RECORD_MAX_LENGTH_V2 256
+
+/*
+ * version 1 mr4 panic record defs
+ */
+#define M4_PANIC_RECORD_VERSION_1 1
+#define M4_PANIC_RECORD_VERSION_INDEX 0
+#define M4_PANIC_RECORD_LENGTH_INDEX 1
+#define M4_PANIC_RECORD_MAX_LENGTH 256
+
+#define R4_PANIC_RECORD_V2_SYMPATHETIC_PANIC_FLAG_INDEX 51
+#define M4_PANIC_RECORD_SYMPATHETIC_PANIC_FLAG_INDEX 39
+
+/**
+ * Compute 32bit xor of specified seed value and data.
+ *
+ * @param seed Initial seed value.
+ * @param data Array of uint32s to be xored
+ * @param len Number of uint32s to be xored
+ *
+ * @return Computed 32bit xor of specified seed value and data.
+ */
+static u32 xor32(uint32_t seed, const u32 data[], size_t len)
+{
+ const u32 *i;
+ u32 xor = seed;
+
+ for (i = data; i != data + len; ++i)
+ xor ^= *i;
+ return xor;
+}
+
+static void panic_record_dump(u32 *panic_record, u32 panic_record_length, bool r4)
+{
+ int i;
+
+ SCSC_TAG_INFO(FW_PANIC, "%s panic record dump(length=%d):\n",
+ r4 ? "R4" : "M4", panic_record_length);
+ for (i = 0; i < panic_record_length; i++)
+ SCSC_TAG_INFO(FW_PANIC, "%s_panic_record[%d] = %08x\n",
+ r4 ? "r4" : "m4", i, panic_record[i]);
+}
+
+static bool fw_parse_r4_panic_record_v2(u32 *r4_panic_record, u32 *r4_panic_record_length)
+{
+ u32 panic_record_cksum;
+ u32 calculated_cksum;
+ u32 panic_record_length = *(r4_panic_record + R4_PANIC_RECORD_LENGTH_INDEX_V2) / 4;
+
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_length: %d\n", panic_record_length);
+
+ if (panic_record_length < R4_PANIC_RECORD_MAX_LENGTH_V2) {
+ panic_record_cksum = *(r4_panic_record + panic_record_length - 1);
+ calculated_cksum = xor32(PANIC_RECORD_CKSUM_SEED, r4_panic_record, panic_record_length - 1);
+ if (calculated_cksum == panic_record_cksum) {
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_cksum OK: %08x\n",
+ calculated_cksum);
+ panic_record_dump(r4_panic_record, panic_record_length, true);
+ *r4_panic_record_length = panic_record_length;
+ return true;
+ } else {
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_cksum: 0x%x calculated_cksum: 0x%x\n",
+ panic_record_cksum, calculated_cksum);
+ }
+ } else {
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_length: %d\n",
+ panic_record_length);
+ }
+ return false;
+}
+
+static bool fw_parse_m4_panic_record_v1(u32 *m4_panic_record, u32 *m4_panic_record_length)
+{
+ u32 panic_record_cksum;
+ u32 calculated_cksum;
+ u32 panic_record_length = *(m4_panic_record + M4_PANIC_RECORD_LENGTH_INDEX) / 4;
+
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_length: %d\n", panic_record_length);
+
+ if (panic_record_length < M4_PANIC_RECORD_MAX_LENGTH) {
+ panic_record_cksum = *(m4_panic_record + panic_record_length - 1);
+ calculated_cksum = xor32(PANIC_RECORD_CKSUM_SEED, m4_panic_record, panic_record_length - 1);
+ if (calculated_cksum == panic_record_cksum) {
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_cksum OK: %08x\n",
+ calculated_cksum);
+ panic_record_dump(m4_panic_record, panic_record_length, false);
+ *m4_panic_record_length = panic_record_length;
+ return true;
+ } else {
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_cksum: 0x%x calculated_cksum: 0x%x\n",
+ panic_record_cksum, calculated_cksum);
+ }
+ } else {
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_length: %d\n",
+ panic_record_length);
+ }
+ return false;
+}
+
+bool fw_parse_r4_panic_record(u32 *r4_panic_record, u32 *r4_panic_record_length)
+{
+ u32 panic_record_version = *(r4_panic_record + PANIC_RECORD_R4_VERSION_INDEX);
+
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_version: %d\n", panic_record_version);
+
+ switch (panic_record_version) {
+ default:
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_version: %d\n",
+ panic_record_version);
+ break;
+ case R4_PANIC_RECORD_VERSION_2:
+ return fw_parse_r4_panic_record_v2(r4_panic_record, r4_panic_record_length);
+ }
+ return false;
+}
+
+bool fw_parse_m4_panic_record(u32 *m4_panic_record, u32 *m4_panic_record_length)
+{
+ u32 panic_record_version = *(m4_panic_record + M4_PANIC_RECORD_VERSION_INDEX);
+
+ SCSC_TAG_INFO(FW_PANIC, "panic_record_version: %d\n", panic_record_version);
+ switch (panic_record_version) {
+ default:
+ SCSC_TAG_ERR(FW_PANIC, "BAD panic_record_version: %d\n",
+ panic_record_version);
+ break;
+ case M4_PANIC_RECORD_VERSION_1:
+ return fw_parse_m4_panic_record_v1(m4_panic_record, m4_panic_record_length);
+ }
+ return false;
+}
+
+bool fw_parse_get_r4_sympathetic_panic_flag(u32 *r4_panic_record)
+{
+ bool sympathetic_panic_flag = *(r4_panic_record + R4_PANIC_RECORD_V2_SYMPATHETIC_PANIC_FLAG_INDEX);
+
+ return sympathetic_panic_flag;
+}
+
+bool fw_parse_get_m4_sympathetic_panic_flag(u32 *m4_panic_record)
+{
+ bool sympathetic_panic_flag = *(m4_panic_record + M4_PANIC_RECORD_SYMPATHETIC_PANIC_FLAG_INDEX);
+
+ return sympathetic_panic_flag;
+}
+
+int panic_record_dump_buffer(char *processor, u32 *panic_record,
+ u32 panic_record_length, char *buffer, size_t blen)
+{
+ int i, used;
+
+ if (!processor)
+ processor = "WLBT";
+
+ used = snprintf(buffer, blen, "%s panic record dump(length=%d):\n",
+ processor, panic_record_length);
+ for (i = 0; i < panic_record_length && used < blen; i++)
+ used += snprintf(buffer + used, blen - used, "%s_panic_record[%d] = %08x\n",
+ processor, i, panic_record[i]);
+
+ return used;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef FW_PANIC_RECORD_H__
+#define FW_PANIC_RECORD_H__
+
+bool fw_parse_r4_panic_record(u32 *r4_panic_record, u32 *r4_panic_record_length);
+bool fw_parse_m4_panic_record(u32 *m4_panic_record, u32 *m4_panic_record_length);
+
+bool fw_parse_get_r4_sympathetic_panic_flag(u32 *r4_panic_record);
+bool fw_parse_get_m4_sympathetic_panic_flag(u32 *m4_panic_record);
+
+int panic_record_dump_buffer(char *processor, u32 *panic_record,
+ u32 panic_record_length, char *buffer, size_t blen);
+#endif /* FW_PANIC_RECORD_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <scsc/scsc_logring.h>
+#include "fwhdr.h"
+
+/*
+ * The Maxwell Firmware Header Format is defined in SC-505846-SW
+ */
+
+#define FWHDR_02_TRAMPOLINE_OFFSET 0
+#define FWHDR_02_MAGIC_OFFSET 8
+#define FWHDR_02_VERSION_MINOR_OFFSET 12
+#define FWHDR_02_VERSION_MAJOR_OFFSET 14
+#define FWHDR_02_LENGTH_OFFSET 16
+#define FWHDR_02_FIRMWARE_API_VERSION_MINOR_OFFSET 20
+#define FWHDR_02_FIRMWARE_API_VERSION_MAJOR_OFFSET 22
+#define FWHDR_02_FIRMWARE_CRC_OFFSET 24
+#define FWHDR_02_CONST_FW_LENGTH_OFFSET 28
+#define FWHDR_02_CONST_CRC_OFFSET 32
+#define FWHDR_02_FIRMWARE_RUNTIME_LENGTH_OFFSET 36
+#define FWHDR_02_FIRMWARE_ENTRY_POINT_OFFSET 40
+#define FWHDR_02_BUILD_ID_OFFSET 48
+#define FWHDR_02_R4_PANIC_RECORD_OFFSET_OFFSET 176
+#define FWHDR_02_M4_PANIC_RECORD_OFFSET_OFFSET 180
+#define FWHDR_02_TTID_OFFSET 184
+
+/*
+ * Firmware header format for version 1.0 is same as version for 0.2
+ */
+#define FWHDR_02_TRAMPOLINE(__fw) (*((u32 *)(__fw + FWHDR_02_TRAMPOLINE_OFFSET)))
+#define FWHDR_02_HEADER_FIRMWARE_ENTRY_POINT(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_ENTRY_POINT_OFFSET)))
+#define FWHDR_02_HEADER_FIRMWARE_RUNTIME_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_RUNTIME_LENGTH_OFFSET)))
+#define FWHDR_02_HEADER_VERSION_MAJOR(__fw) (*((u16 *)(__fw + FWHDR_02_VERSION_MAJOR_OFFSET)))
+#define FWHDR_02_HEADER_VERSION_MINOR(__fw) (*((u16 *)(__fw + FWHDR_02_VERSION_MINOR_OFFSET)))
+#define FWHDR_02_HEADER_FIRMWARE_API_VERSION_MINOR(__fw) (*((u16 *)(__fw + FWHDR_02_FIRMWARE_API_VERSION_MINOR_OFFSET)))
+#define FWHDR_02_HEADER_FIRMWARE_API_VERSION_MAJOR(__fw) (*((u16 *)(__fw + FWHDR_02_FIRMWARE_API_VERSION_MAJOR_OFFSET)))
+#define FWHDR_02_FW_CRC32(__fw) (*((u32 *)(__fw + FWHDR_02_FIRMWARE_CRC_OFFSET)))
+#define FWHDR_02_HDR_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_LENGTH_OFFSET)))
+#define FWHDR_02_HEADER_CRC32(__fw) (*((u32 *)(__fw + (FWHDR_02_HDR_LENGTH(__fw)) - sizeof(u32))))
+#define FWHDR_02_CONST_CRC32(__fw) (*((u32 *)(__fw + FWHDR_02_CONST_CRC_OFFSET)))
+#define FWHDR_02_CONST_FW_LENGTH(__fw) (*((u32 *)(__fw + FWHDR_02_CONST_FW_LENGTH_OFFSET)))
+#define FWHDR_02_R4_PANIC_RECORD_OFFSET(__fw) (*((u32 *)(__fw + FWHDR_02_R4_PANIC_RECORD_OFFSET_OFFSET)))
+#define FWHDR_02_M4_PANIC_RECORD_OFFSET(__fw) (*((u32 *)(__fw + FWHDR_02_M4_PANIC_RECORD_OFFSET_OFFSET)))
+
+/* firmware header has a panic record if the firmware header length is at least 192 bytes long */
+#define MIN_HEADER_LENGTH_WITH_PANIC_RECORD 188
+
+#define FWHDR_MAGIC_STRING "smxf"
+
+static bool fwhdr_parse_v02(char *fw, struct fwhdr *fwhdr)
+{
+ if (!memcmp(fw + FWHDR_02_MAGIC_OFFSET, FWHDR_MAGIC_STRING, sizeof(FWHDR_MAGIC_STRING) - 1)) {
+ fwhdr->firmware_entry_point = FWHDR_02_HEADER_FIRMWARE_ENTRY_POINT(fw);
+ fwhdr->hdr_major = FWHDR_02_HEADER_VERSION_MAJOR(fw);
+ fwhdr->hdr_minor = FWHDR_02_HEADER_VERSION_MINOR(fw);
+ fwhdr->fwapi_major = FWHDR_02_HEADER_FIRMWARE_API_VERSION_MAJOR(fw);
+ fwhdr->fwapi_minor = FWHDR_02_HEADER_FIRMWARE_API_VERSION_MINOR(fw);
+ fwhdr->fw_crc32 = FWHDR_02_FW_CRC32(fw);
+ fwhdr->const_crc32 = FWHDR_02_CONST_CRC32(fw);
+ fwhdr->header_crc32 = FWHDR_02_HEADER_CRC32(fw);
+ fwhdr->const_fw_length = FWHDR_02_CONST_FW_LENGTH(fw);
+ fwhdr->hdr_length = FWHDR_02_HDR_LENGTH(fw);
+ fwhdr->fw_runtime_length = FWHDR_02_HEADER_FIRMWARE_RUNTIME_LENGTH(fw);
+ SCSC_TAG_INFO(FW_LOAD, "hdr_length=%d\n", fwhdr->hdr_length);
+ fwhdr->r4_panic_record_offset = FWHDR_02_R4_PANIC_RECORD_OFFSET(fw);
+ fwhdr->m4_panic_record_offset = FWHDR_02_M4_PANIC_RECORD_OFFSET(fw);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ fwhdr->m4_1_panic_record_offset = FWHDR_02_M4_PANIC_RECORD_OFFSET(fw);
+#endif
+ return true;
+ }
+ return false;
+}
+
+static char *fwhdr_get_build_id_v02(char *fw, struct fwhdr *fwhdr)
+{
+ if (!memcmp(fw + FWHDR_02_MAGIC_OFFSET, FWHDR_MAGIC_STRING, sizeof(FWHDR_MAGIC_STRING) - 1))
+ return fw + FWHDR_02_BUILD_ID_OFFSET;
+ return NULL;
+}
+
+static char *fwhdr_get_ttid_v02(char *fw, struct fwhdr *fwhdr)
+{
+ if (fwhdr->hdr_length < FWHDR_02_TTID_OFFSET)
+ return NULL;
+ if (!memcmp(fw + FWHDR_02_MAGIC_OFFSET, FWHDR_MAGIC_STRING, sizeof(FWHDR_MAGIC_STRING) - 1))
+ return fw + FWHDR_02_TTID_OFFSET;
+ return NULL;
+}
+
+bool fwhdr_parse(char *fw, struct fwhdr *fwhdr)
+{
+ return fwhdr_parse_v02(fw, fwhdr);
+}
+
+char *fwhdr_get_build_id(char *fw, struct fwhdr *fwhdr)
+{
+ return fwhdr_get_build_id_v02(fw, fwhdr);
+}
+
+char *fwhdr_get_ttid(char *fw, struct fwhdr *fwhdr)
+{
+ return fwhdr_get_ttid_v02(fw, fwhdr);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef FWHDR_H
+#define FWHDR_H
+
+#define FW_BUILD_ID_SZ 128
+#define FW_TTID_SZ 32
+
+struct fwhdr {
+ u16 hdr_major;
+ u16 hdr_minor;
+
+ u16 fwapi_major;
+ u16 fwapi_minor;
+
+ u32 firmware_entry_point;
+ u32 fw_runtime_length;
+
+ u32 fw_crc32;
+ u32 const_crc32;
+ u32 header_crc32;
+
+ u32 const_fw_length;
+ u32 hdr_length;
+ u32 r4_panic_record_offset;
+ u32 m4_panic_record_offset;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ u32 m4_1_panic_record_offset;
+#endif
+};
+
+bool fwhdr_parse(char *fw, struct fwhdr *fwhdr);
+char *fwhdr_get_build_id(char *fw, struct fwhdr *fwhdr);
+char *fwhdr_get_ttid(char *fw, struct fwhdr *fwhdr);
+
+#endif /* FWHDR_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <scsc/scsc_logring.h>
+#include "fwimage.h"
+
+int fwimage_check_fw_header_crc(char *fw, u32 hdr_length, u32 header_crc32)
+{
+ u32 header_crc32_calculated;
+
+ /*
+ * The last 4-bytes are header CRC
+ */
+ header_crc32_calculated = ether_crc(hdr_length - sizeof(u32), fw);
+ if (header_crc32_calculated != header_crc32) {
+ SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: header_crc32_calculated=%d header_crc32=%d\n",
+ header_crc32_calculated, header_crc32);
+ return -EINVAL;
+ }
+ SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: header_crc32_calculated=%d header_crc32=%d\n",
+ header_crc32_calculated, header_crc32);
+ return 0;
+}
+
+int fwimage_check_fw_const_section_crc(char *fw, u32 const_crc32, u32 const_fw_length, u32 hdr_length)
+{
+ u32 const_crc32_calculated;
+
+ const_crc32_calculated = ether_crc(const_fw_length - hdr_length, fw + hdr_length);
+ if (const_crc32_calculated != const_crc32) {
+ SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: const_crc32_calculated=%d const_crc32=%d\n",
+ const_crc32_calculated, const_crc32);
+ return -EINVAL;
+ }
+ SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: const_crc32_calculated=%d const_crc32=%d\n",
+ const_crc32_calculated, const_crc32);
+ return 0;
+}
+
+int fwimage_check_fw_crc(char *fw, u32 fw_image_length, u32 hdr_length, u32 fw_crc32)
+{
+ u32 fw_crc32_calculated;
+
+ fw_crc32_calculated = ether_crc(fw_image_length - hdr_length, fw + hdr_length);
+ if (fw_crc32_calculated != fw_crc32) {
+ SCSC_TAG_ERR(FW_LOAD, "CRC32 doesn't match: fw_crc32_calculated=%d fw_crc32=%d\n",
+ fw_crc32_calculated, fw_crc32);
+ return -EINVAL;
+ }
+ SCSC_TAG_DEBUG(FW_LOAD, "CRC32 OK: fw_crc32_calculated=%d fw_crc32=%d\n",
+ fw_crc32_calculated, fw_crc32);
+ return 0;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef FWIMAGE_H
+#define FWIMAGE_H
+
+int fwimage_check_fw_header_crc(char *fw, u32 hdr_length, u32 header_crc32);
+int fwimage_check_fw_const_section_crc(char *fw, u32 const_crc32, u32 const_fw_length, u32 hdr_length);
+int fwimage_check_fw_crc(char *fw, u32 fw_runtime_length, u32 hdr_length, u32 fw_crc32);
+
+#endif /* FWIMAGE_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/** Implements */
+#include "gdb_transport.h"
+
+/** Uses */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <scsc/scsc_logring.h>
+#include "mifintrbit.h"
+
+struct clients_node {
+ struct list_head list;
+ struct gdb_transport_client *gdb_client;
+};
+
+struct gdb_transport_node {
+ struct list_head list;
+ struct gdb_transport *gdb_transport;
+};
+
+static struct gdb_transport_module {
+ struct list_head clients_list;
+ struct list_head gdb_transport_list;
+} gdb_transport_module = {
+ .clients_list = LIST_HEAD_INIT(gdb_transport_module.clients_list),
+ .gdb_transport_list = LIST_HEAD_INIT(gdb_transport_module.gdb_transport_list)
+};
+
+/** Handle incoming packets and pass to handler */
+static void gdb_input_irq_handler(int irq, void *data)
+{
+ struct gdb_transport *gdb_transport = (struct gdb_transport *)data;
+ struct scsc_mif_abs *mif_abs;
+ u32 num_bytes;
+ u32 alloc_bytes;
+ char *buf;
+
+ SCSC_TAG_DEBUG(GDB_TRANS, "Handling write signal.\n");
+
+ /* 1st length */
+ /* Clear the interrupt first to ensure we can't possibly miss one */
+ mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
+ mif_abs->irq_bit_clear(mif_abs, irq);
+
+ while (mif_stream_read(&gdb_transport->mif_istream, &num_bytes, sizeof(uint32_t))) {
+ SCSC_TAG_DEBUG(GDB_TRANS, "Transferring %d byte payload to handler.\n", num_bytes);
+ if (num_bytes > 0 && num_bytes
+ < (GDB_TRANSPORT_BUF_LENGTH - sizeof(uint32_t))) {
+ alloc_bytes = sizeof(char) * num_bytes;
+ /* This is called in atomic context so must use kmalloc with GFP_ATOMIC flag */
+ buf = kmalloc(alloc_bytes, GFP_ATOMIC);
+ /* 2nd payload (msg) */
+ mif_stream_read(&gdb_transport->mif_istream, buf, num_bytes);
+ gdb_transport->channel_handler_fn(buf, num_bytes, gdb_transport->channel_handler_data);
+ kfree(buf);
+ } else {
+ SCSC_TAG_ERR(GDB_TRANS, "Incorrect num_bytes: 0x%08x\n", num_bytes);
+ mif_stream_log(&gdb_transport->mif_istream, SCSC_ERR);
+ }
+ }
+}
+
+
+/** MIF Interrupt handler for acknowledging reads made by the AP */
+static void gdb_output_irq_handler(int irq, void *data)
+{
+ struct scsc_mif_abs *mif_abs;
+ struct gdb_transport *gdb_transport = (struct gdb_transport *)data;
+
+ SCSC_TAG_DEBUG(GDB_TRANS, "Ignoring read signal.\n");
+
+ /* Clear the interrupt first to ensure we can't possibly miss one */
+ /* The FW read some data from the output stream.
+ * Currently we do not care, so just clear the interrupt. */
+ mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
+ mif_abs->irq_bit_clear(mif_abs, irq);
+}
+
+static void gdb_transport_probe_registered_clients(struct gdb_transport *gdb_transport)
+{
+ bool client_registered = false;
+ struct clients_node *gdb_client_node, *gdb_client_next;
+ struct scsc_mif_abs *mif_abs;
+ char *dev_uid;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
+ /* Get UID */
+ mif_abs = scsc_mx_get_mif_abs(gdb_transport->mx);
+ dev_uid = mif_abs->get_uid(mif_abs);
+ gdb_client_node->gdb_client->probe(gdb_client_node->gdb_client, gdb_transport, dev_uid);
+ client_registered = true;
+ }
+ if (client_registered == false)
+ SCSC_TAG_INFO(GDB_TRANS, "No clients registered\n");
+}
+
+void gdb_transport_release(struct gdb_transport *gdb_transport)
+{
+ struct clients_node *gdb_client_node, *gdb_client_next;
+ struct gdb_transport_node *gdb_transport_node, *gdb_transport_node_next;
+ bool match = false;
+
+ list_for_each_entry_safe(gdb_transport_node, gdb_transport_node_next, &gdb_transport_module.gdb_transport_list, list) {
+ if (gdb_transport_node->gdb_transport == gdb_transport) {
+ match = true;
+ SCSC_TAG_INFO(GDB_TRANS, "release client\n");
+ /* Wait for client to close */
+ mutex_lock(&gdb_transport->channel_open_mutex);
+ /* Need to notify clients using the transport has been released */
+ list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
+ gdb_client_node->gdb_client->remove(gdb_client_node->gdb_client, gdb_transport);
+ }
+ mutex_unlock(&gdb_transport->channel_open_mutex);
+ list_del(&gdb_transport_node->list);
+ kfree(gdb_transport_node);
+ }
+ }
+ if (match == false)
+ SCSC_TAG_INFO(GDB_TRANS, "No match for given scsc_mif_abs\n");
+
+ mif_stream_release(&gdb_transport->mif_istream);
+ mif_stream_release(&gdb_transport->mif_ostream);
+}
+
+void gdb_transport_config_serialise(struct gdb_transport *gdb_transport,
+ struct mxtransconf *trans_conf)
+{
+ mif_stream_config_serialise(&gdb_transport->mif_istream, &trans_conf->to_ap_stream_conf);
+ mif_stream_config_serialise(&gdb_transport->mif_ostream, &trans_conf->from_ap_stream_conf);
+}
+
+
+/** Public functions */
+int gdb_transport_init(struct gdb_transport *gdb_transport, struct scsc_mx *mx, enum gdb_transport_enum type)
+{
+ int r;
+ uint32_t mem_length = GDB_TRANSPORT_BUF_LENGTH;
+ uint32_t packet_size = 4;
+ uint32_t num_packets;
+ struct gdb_transport_node *gdb_transport_node;
+
+ gdb_transport_node = kzalloc(sizeof(*gdb_transport_node), GFP_KERNEL);
+ if (!gdb_transport_node)
+ return -EIO;
+
+ memset(gdb_transport, 0, sizeof(struct gdb_transport));
+ num_packets = mem_length / packet_size;
+ mutex_init(&gdb_transport->channel_handler_mutex);
+ mutex_init(&gdb_transport->channel_open_mutex);
+ gdb_transport->mx = mx;
+
+ if (type == GDB_TRANSPORT_M4)
+ r = mif_stream_init(&gdb_transport->mif_istream, SCSC_MIF_ABS_TARGET_M4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, gdb_input_irq_handler, gdb_transport);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (type == GDB_TRANSPORT_M4_1)
+ r = mif_stream_init(&gdb_transport->mif_istream, SCSC_MIF_ABS_TARGET_M4_1, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, gdb_input_irq_handler, gdb_transport);
+#endif
+ else
+ r = mif_stream_init(&gdb_transport->mif_istream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, gdb_input_irq_handler, gdb_transport);
+ if (r) {
+ kfree(gdb_transport_node);
+ return r;
+ }
+
+ if (type == GDB_TRANSPORT_M4)
+ r = mif_stream_init(&gdb_transport->mif_ostream, SCSC_MIF_ABS_TARGET_M4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_RESERVED, gdb_output_irq_handler, gdb_transport);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (type == GDB_TRANSPORT_M4_1)
+ r = mif_stream_init(&gdb_transport->mif_ostream, SCSC_MIF_ABS_TARGET_M4_1, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_RESERVED, gdb_output_irq_handler, gdb_transport);
+#endif
+ else
+ r = mif_stream_init(&gdb_transport->mif_ostream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_RESERVED, gdb_output_irq_handler, gdb_transport);
+ if (r) {
+ mif_stream_release(&gdb_transport->mif_istream);
+ kfree(gdb_transport_node);
+ return r;
+ }
+
+ gdb_transport->channel_handler_fn = NULL;
+ gdb_transport->channel_handler_data = NULL;
+
+ gdb_transport_node->gdb_transport = gdb_transport;
+ /* Add gdb_transport node */
+ list_add_tail(&gdb_transport_node->list, &gdb_transport_module.gdb_transport_list);
+ gdb_transport->type = type;
+ gdb_transport_probe_registered_clients(gdb_transport);
+ return 0;
+}
+
+void gdb_transport_send(struct gdb_transport *gdb_transport, void *message, uint32_t message_length)
+{
+ char msg[300];
+
+ if (message_length > sizeof(msg))
+ return;
+
+ memcpy(msg, message, message_length);
+
+ mutex_lock(&gdb_transport->channel_handler_mutex);
+ /* 1st length */
+ mif_stream_write(&gdb_transport->mif_ostream, &message_length, sizeof(uint32_t));
+ /* 2nd payload (msg) */
+ mif_stream_write(&gdb_transport->mif_ostream, message, message_length);
+ mutex_unlock(&gdb_transport->channel_handler_mutex);
+}
+EXPORT_SYMBOL(gdb_transport_send);
+
+void gdb_transport_register_channel_handler(struct gdb_transport *gdb_transport,
+ gdb_channel_handler handler, void *data)
+{
+ mutex_lock(&gdb_transport->channel_handler_mutex);
+ gdb_transport->channel_handler_fn = handler;
+ gdb_transport->channel_handler_data = (void *)data;
+ mutex_unlock(&gdb_transport->channel_handler_mutex);
+}
+EXPORT_SYMBOL(gdb_transport_register_channel_handler);
+
+int gdb_transport_register_client(struct gdb_transport_client *gdb_client)
+{
+ struct clients_node *gdb_client_node;
+ struct gdb_transport_node *gdb_transport_node;
+ struct scsc_mif_abs *mif_abs;
+ char *dev_uid;
+
+ /* Add node in modules linked list */
+ gdb_client_node = kzalloc(sizeof(*gdb_client_node), GFP_KERNEL);
+ if (!gdb_client_node)
+ return -ENOMEM;
+
+ gdb_client_node->gdb_client = gdb_client;
+ list_add_tail(&gdb_client_node->list, &gdb_transport_module.clients_list);
+
+
+ /* Traverse Linked List for transport registered */
+ list_for_each_entry(gdb_transport_node, &gdb_transport_module.gdb_transport_list, list) {
+ /* Get UID */
+ mif_abs = scsc_mx_get_mif_abs(gdb_transport_node->gdb_transport->mx);
+ dev_uid = mif_abs->get_uid(mif_abs);
+ gdb_client->probe(gdb_client, gdb_transport_node->gdb_transport, dev_uid);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(gdb_transport_register_client);
+
+void gdb_transport_unregister_client(struct gdb_transport_client *gdb_client)
+{
+ struct clients_node *gdb_client_node, *gdb_client_next;
+
+ /* Traverse Linked List for each client_list */
+ list_for_each_entry_safe(gdb_client_node, gdb_client_next, &gdb_transport_module.clients_list, list) {
+ if (gdb_client_node->gdb_client == gdb_client) {
+ list_del(&gdb_client_node->list);
+ kfree(gdb_client_node);
+ }
+ }
+}
+EXPORT_SYMBOL(gdb_transport_unregister_client);
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * Maxwell gdb transport (Interface)
+ *
+ * Provides bi-directional communication between the firmware and the
+ * host.
+ *
+ * This interface also provides a utility method for sending messages across
+ * the stream.
+ */
+
+#ifndef GDB_TRANSPORT_H__
+#define GDB_TRANSPORT_H__
+
+/** Uses */
+#include <linux/kthread.h>
+#include "mifstream.h"
+
+/*
+ * Initialising a buffer of 1 byte is never legitimate, do not allow it.
+ * The memory buffer length must be a multiple of the packet size.
+ */
+#define GDB_TRANSPORT_BUF_LENGTH (2 * 1024)
+
+struct gdb_transport;
+
+enum gdb_transport_enum {
+ GDB_TRANSPORT_R4 = 0,
+ GDB_TRANSPORT_M4,
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ GDB_TRANSPORT_M4_1,
+#endif
+};
+/**
+ * Transport channel callback handler. This will be invoked each time a message on a channel is
+ * received. Handlers may perform work within
+ * their callback implementation, but should not block.
+ *
+ * Note that the message pointer passed is only valid for the duration of the function call.
+ */
+typedef void (*gdb_channel_handler)(const void *message, size_t length, void *data);
+
+/**
+ * Sends a message to the AP across the given channel.
+ *
+ * This function is safe to call from any RTOS thread.
+ */
+void gdb_transport_send(struct gdb_transport *gdb_transport,
+ void *message, uint32_t message_length);
+
+/**
+ * Initialises the maxwell management transport and configures the necessary
+ * interrupt handlers. Called once during boot.
+ */
+int gdb_transport_init(struct gdb_transport *gdb_transport, struct scsc_mx *mx, enum gdb_transport_enum type);
+void gdb_transport_release(struct gdb_transport *gdb_transport);
+
+/*
+ * Initialises the configuration area incl. Maxwell Infrastructure Configuration,
+ * MIF Management Transport Configuration and MIF Management Stream Configuration.
+ */
+void gdb_transport_config_serialise(struct gdb_transport *gdb_transport, struct mxtransconf *trans_conf);
+void gdb_transport_set_error(struct gdb_transport *gdb_transport);
+
+struct gdb_transport {
+ struct scsc_mx *mx;
+ struct mif_stream mif_istream;
+ struct mif_stream mif_ostream;
+ /** Registered channel handlers for messages coming from the AP for each channel */
+ gdb_channel_handler channel_handler_fn;
+ void *channel_handler_data;
+ struct mutex channel_handler_mutex;
+ struct mutex channel_open_mutex;
+ /* Transport processor type */
+ enum gdb_transport_enum type;
+};
+
+struct gdb_transport_client {
+ char *name;
+ void (*probe)(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport, char *dev_uid);
+ void (*remove)(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport);
+};
+
+int gdb_transport_register_client(struct gdb_transport_client *gdb_client);
+void gdb_transport_unregister_client(struct gdb_transport_client *gdb_client);
+void gdb_transport_register_channel_handler(struct gdb_transport *gdb_transport, gdb_channel_handler handler, void *data);
+void gdb_transport_register_char_device(struct scsc_mx *mx, struct gdb_transport **gdb_transport_handler);
+#endif /* GDB_TRANSPORT_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_H
+#define __MIF_REG_H
+
+/*********************************/
+/* PLATFORM register definitions */
+/*********************************/
+#define NUM_MBOX_PLAT 8
+#define NUM_SEMAPHORE 12
+
+#define MAILBOX_WLBT_BASE 0x0000
+#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
+#define MCUCTRL 0x000 /* MCU Controller Register */
+/* R0 [31:16] - Int FROM R4/M4 */
+#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
+#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
+#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
+#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
+#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
+/* R1 [15:0] - Int TO R4 */
+#define INTGR1 0x01c /* Interrupt Generation Register 1 */
+#define INTCR1 0x020 /* Interrupt Clear Register 1 */
+#define INTMR1 0x024 /* Interrupt Mask Register 1 */
+#define INTSR1 0x028 /* Interrupt Status Register 1 */
+#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
+/* R2 [15:0] - Int TO M4 */
+#define INTGR2 0x030 /* Interrupt Generation Register 2 */
+#define INTCR2 0x034 /* Interrupt Clear Register 2 */
+#define INTMR2 0x038 /* Interrupt Mask Register 2 */
+#define INTSR2 0x03c /* Interrupt Status Register 2 */
+#define INTMSR2 0x040 /* Interrupt Mask Status Register 2 */
+#define MIF_INIT 0x04c /* MIF_init */
+#define IS_VERSION 0x050 /* Version Information Register */
+#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
+#define ISSR(r) (ISSR_BASE + (4 * (r)))
+#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
+#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
+#define SEMA0CON 0x1c0
+#define SEMA0STATE 0x1c8
+
+
+/* POWER */
+/* Page 594 datasheet */
+/* Base Address - 0x11C8_0000 */
+#define WIFI_CTRL_NS 0x0140 /* WIFI Control SFR non-secure */
+#define WIFI_PWRON BIT(1)
+#define WIFI_RESET_SET BIT(2)
+#define WIFI_ACTIVE_EN BIT(5) /* Enable of WIFI_ACTIVE_REQ */
+#define WIFI_ACTIVE_CLR BIT(6) /* WIFI_ACTIVE_REQ is clear internally on WAKEUP */
+#define WIFI_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WIFI_RESET_REQ */
+#define WIFI_RESET_REQ_CLR BIT(8) /* WIFI_RESET_REQ is clear internally on WAKEUP */
+#define MASK_WIFI_PWRDN_DONE BIT(9) /* 1:mask, 0 : pass RTC clock out enable to WIFI
+ * This mask WIFI_PWRDN_DONE come in from WIFI.
+ * If MASK_WIFI_PWRDN_DONE = 1, WIFI enter to DOWN
+ * state without checking WIFI_PWRDN_DONE*/
+
+
+
+#define WIFI_CTRL_S 0x0144 /* WIFI Control SFR secure */
+#define WIFI_START BIT(3) /* WIFI Reset release control If WIFI_START = 1,
+ * WIFI exit from DOWN state and go to UP state.
+ * If this field is set to high (WIFI_START = 1)
+ * WIFI state can go to UP state. This signal can be
+ * auto-clear by DIRECTWR at UP */
+
+#define WIFI_STAT 0x0148 /* Indicate whether WIFI uses MIF domain */
+#define WIFI_DEBUG 0x014c /* MIF sleep, wakeup debugging control */
+/* Page 1574 datasheet */
+#define PMU_ALIVE_BASE 0x0000
+#define PMU_ALIVE_REG(r) (PMU_ALIVE_BASE + (r))
+#define WIFI2AP_MEM_CONFIG0 0x0150 /* Control WLBT_MEM_SIZE. */
+#define WLBT2AP_MIF_ACCESS_WIN0 0x0154 /* ACCESS_CONTROL_PERI_IP */
+#define WLBT2AP_MIF_ACCESS_WIN1 0x0158 /* ACCESS_CONTROL_PERI_IP */
+#define WLBT2AP_MIF_ACCESS_WIN2 0x015a /* ACCESS_CONTROL_PERI_IP */
+#define WLBT2AP_MIF_ACCESS_WIN3 0x0160 /* ACCESS_CONTROL_PERI_IP */
+#define WIFI2AP_MEM_CONFIG1 0x0164 /* Control WLBT_MEM_BA0 */
+#define WLBT_BOOT_TEST_RST_CFG 0x0168 /* WLBT_IRAM_BOOT_OFFSET */
+ /* WLBT_IRAM_BOOT_TEST */
+ /* WLBT2AP_PERI_PROT2 */
+#define WLBT2AP_PERI_ACCESS_WIN 0x016c /* WLBT2AP_PERI_ACCESS_END - WLBT2AP_PERI_ACCESS_START */
+#define WIFI2AP_MODAPIF_CONFIG 0x0170 /* WLBT2AP_PERI_ACCESS_END - WLBT2AP_PERI_ACCESS_START */
+#define WIFI2AP_QOS 0x0170 /* RT */
+#define WIFI2AP_MEM_CONFIG2 0x017c /* Control WLBT_MEM_BA1 */
+#define WIFI2AP_MEM_CONFIG3 0x0184 /* Control WLBT_ADDR_RNG */
+
+/* Power down registers */
+#define RESET_ASB_WIFI_SYS_PWR_REG 0x11f4 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define TCXO_GATE_WIFI_SYS_PWR_REG 0x11f0 /* Control power state in LOWPWR mode 1 - on, 0 */
+#define LOGIC_RESET_WIFI_SYS_PWR_REG 0x11f8 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CLEANY_BUS_WIFI_SYS_PWR_REG 0x11fc /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CENTRAL_SEQ_WIFI_CONFIGURATION 0x0380 /* bit 16. Decides whether system-level low-power mode
+ * is used HIGH: System-level Low-Power mode
+ * disabled. LOW: System-level Low-Power mode
+ * enabled. When system enters low-power mode,
+ * this field is automatically cleared to HIGH. */
+
+#define CENTRAL_SEQ_WIFI_STATUS 0x0384 /* 23:16 Check statemachine status */
+#define STATES 0xff0000
+
+#define SYS_PWR_CFG BIT(0)
+#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
+#define SYS_PWR_CFG_16 BIT(16)
+
+
+/* CMU registers to request PLL for USB Clock */
+#define USBPLL_CON0 0x1000
+#define AP2WIFI_USBPLL_REQ BIT(0) /* 1: Request PLL, 0: Release PLL */
+
+#define USBPLL_CON1 0x1004 /* */
+#define AP2WLBT_USBPLL_WPLL_SEL BIT(0) /* 1: WLBT, 0: AP */
+#define AP2WLBT_USBPLL_WPLL_EN BIT(1) /* 1: Enable, 0: Disable */
+
+/***** Interrupts ********
+ *
+ * - MBOX
+ * - WIFI_ACTIVE (pag 553)
+ * comes from BLK_WIFI. Initial value is low and then this value becomes high after WIFI booting. If
+ * some problem occurs within WIFI, WIFI_ACTIVE can be low by WIFI CPU. AP CPU detects that WIFI_ACTIVE is
+ * low after WIFI_ACTIVE is high. And WIFI_ACTIVE detected goes to GIC's interrupt source. At ISR, AP CPU
+ * makes wake source and interrupt clear as setting WIFI_CTRL__WIFI_ACTIVE_CLR. WIFI_ACTIVE_CLR is auto
+ * clear by direct-write function.
+ *
+ * - WIFI_RESET_REQ (pag 554)
+ * WIFI can request WIFI reset only by WIFI_RESET_REQ. If WIFI_RESET_REQ is asserted, AP PMU detects it as
+ * wakeup source and interrupt source. At ISR, AP CPU makes wakeup source clear as setting
+ * WIFI_CTRL__CP_RESET_REQ_CLR. But, interrupt can be not clear because the interrupt goes to GIC directly
+ * from WIFI. (It use make function within GIC) WIFI_RESET_REQ_CLR is auto clear by direct-write function.
+ */
+
+#endif /* __MIF_REG_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_7872_H
+#define __MIF_REG_7872_H
+
+/*********************************/
+/* PLATFORM register definitions */
+/*********************************/
+#define NUM_MBOX_PLAT 8
+#define NUM_SEMAPHORE 12
+
+#define MAILBOX_WLBT_BASE 0x0000
+#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
+#define MCUCTRL 0x000 /* MCU Controller Register */
+/* R0 [31:16] - Int FROM R4/M4 */
+#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
+#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
+#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
+#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
+#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
+/* R1 [15:0] - Int TO R4/M4 */
+#define INTGR1 0x01c /* Interrupt Generation Register 1 */
+#define INTCR1 0x020 /* Interrupt Clear Register 1 */
+#define INTMR1 0x024 /* Interrupt Mask Register 1 */
+#define INTSR1 0x028 /* Interrupt Status Register 1 */
+#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
+#define MIF_INIT 0x04c /* MIF_init */
+#define IS_VERSION 0x050 /* Version Information Register */
+#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
+#define ISSR(r) (ISSR_BASE + (4 * (r)))
+#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
+#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
+#define SEMA0CON 0x1c0
+#define SEMA0STATE 0x1c8
+#define SEMA1CON 0x1e0
+#define SEMA1STATE 0x1e8
+
+
+/* POWER */
+/* Page 594 datasheet */
+/* Base Address - 0x11C8_0000 */
+#define WIFI_CTRL_NS 0x0140 /* WIFI Control SFR non-secure */
+#define WIFI_PWRON BIT(1)
+#define WIFI_RESET_SET BIT(2)
+#define WIFI_ACTIVE_EN BIT(5) /* Enable of WIFI_ACTIVE_REQ */
+#define WIFI_ACTIVE_CLR BIT(6) /* WIFI_ACTIVE_REQ is clear internally on WAKEUP */
+#define WIFI_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WIFI_RESET_REQ */
+#define WIFI_RESET_REQ_CLR BIT(8) /* WIFI_RESET_REQ is clear internally on WAKEUP */
+#define MASK_WIFI_PWRDN_DONE BIT(9) /* 1:mask, 0 : pass RTC clock out enable to WIFI
+ * This mask WIFI_PWRDN_DONE come in from WIFI.
+ * If MASK_WIFI_PWRDN_DONE = 1, WIFI enter to DOWN
+ * state without checking WIFI_PWRDN_DONE*/
+#define RTC_OUT_EN BIT(10) /* 1:enable, 0 : disable This is enable signal on RTC
+ * CLK(32KHz). This clock can be used as WIFI PMU
+ * clock when WIFI is internal power-down and
+ * TCXO(26MHz) is disable at WIFI side.*/
+#define TCXO_ENABLE_SW BIT(11) /* 1:enable, 0 : disable This is enable signal on TCXO
+ * clock of WIFI. This signal can decide whether TCXO
+ * clock is active by software when WIFI is internal
+ * power-down or WIFI is in reset state at WIFI side. if
+ * this value is HIGH, TCXO is active regardless of
+ * hardware control */
+#define MASK_MIF_REQ BIT(12) /* 1:mask MIF_REQ comming from WIFI, 0 : disable */
+#define SET_SW_MIF_REQ BIT(13) /* MIF SLEEP control by SW 1: if MASK_MIF_REQ is
+ set to HIGH, MIF enters into down state by
+ SET_SW_MIF_REQ. */
+#define SWEEPER_BYPASS_DATA_EN BIT(16) /* CLEANY bypass mode control(WIFI2AP MEM path)
+ If this bit is set to 1, CLEANY in MIF block starts
+ operation. If this bit is set to 0, CLEANY is bypass
+ mode.*/
+#define SFR_SERIALIZER_DUR_DATA2REQ (BIT(20) | BIT(21)) /* Duration between DATA and REQUEST on
+ SFR_SERIALIZER */
+
+
+#define WIFI_CTRL_S 0x0144 /* WIFI Control SFR secure */
+#define WIFI_START BIT(3) /* WIFI Reset release control If WIFI_START = 1,
+ * WIFI exit from DOWN state and go to UP state.
+ * If this field is set to high (WIFI_START = 1)
+ * WIFI state can go to UP state. This signal can be
+ * auto-clear by DIRECTWR at UP */
+
+#define WIFI_STAT 0x0148
+#define WIFI_PWRDN_DONE BIT(0) /* Check WIFI power-down status.*/
+#define WIFI_ACCESS_MIF BIT(4) /* Check whether WIFI accesses MIF doman */
+
+#define WIFI_DEBUG 0x014c /* MIF sleep, wakeup debugging control */
+#define EN_MIF_REQ BIT(0) /* Control MIF_REQ through GPIO_ALIVE. */
+#define EN_WIFI_ACTIVE BIT(2) /* Control WIFI_ACTIVE through GPIO_ALIVE. */
+#define EN_MIF_RESET_REQ BIT(3) /* Control WIFI_RESET_REQ through GPIO_ALIVE. */
+#define MASK_CLKREQ_WIFI BIT(8) /* When this field is set to HIGH, ALIVE ignores
+ * CLKREQ from WIFI.*/
+
+/* TODO: Might be 0x10480000 */
+#define PMU_ALIVE_BASE 0x0000
+#define PMU_ALIVE_REG(r) (PMU_ALIVE_BASE + (r))
+#define WIFI2AP_MEM_CONFIG0 0x7300 /* MEM_SIZE SECTION_0 */
+#define WIFI2AP_MEM_CONFIG1 0x7304 /* BASE ADDRESS SECTION 0*/
+#define WIFI2AP_MEM_CONFIG2 0x7300 /* MEM_SIZE SECTION_0 */
+#define WIFI2AP_MEM_CONFIG3 0x7304 /* BASE ADDRESS SECTION 1*/
+#define WIFI2AP_MEM_CONFIG4 0x7300 /* MEM_SIZE SECTION_1 */
+#define WIFI2AP_MEM_CONFIG5 0x7304 /* BASE ADDRESS SECTION 0*/
+#define WIFI2AP_MIF_ACCESS_WIN0 0x7318 /* ACCESS_CONTROL SFR*/
+#define WIFI2AP_MIF_ACCESS_WIN1 0x731c /* ACCESS_CONTROL SFR*/
+#define WIFI2AP_PERI0_ACCESS_WIN0 0x7320 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN1 0x7324 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN2 0x7328 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN3 0x732c /* ACCESS WINDOW PERI */
+#define WLBT_BOOT_TEST_RST_CFG 0x7330 /* WLBT_IRAM_BOOT_OFFSET */
+ /* WLBT_IRAM_BOOT_TEST */
+ /* WLBT2AP_PERI_PROT2 */
+
+/* Power down registers */
+#define RESET_AHEAD_WIFI_PWR_REG 0x1360 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CLEANY_BUS_WIFI_SYS_PWR_REG 0x1364 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define LOGIC_RESET_WIFI_SYS_PWR_REG 0x1368 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define TCXO_GATE_WIFI_SYS_PWR_REG 0x136c /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WIFI_DISABLE_ISO_SYS_PWR_REG 0x1370 /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WIFI_RESET_ISO_SYS_PWR_REG 0x1374 /* Control power state in LOWPWR mode 1 - on, 0 */
+
+#define CENTRAL_SEQ_WIFI_CONFIGURATION 0x0380 /* bit 16. Decides whether system-level low-power mode
+ * is used HIGH: System-level Low-Power mode
+ * disabled. LOW: System-level Low-Power mode
+ * enabled. When system enters low-power mode,
+ * this field is automatically cleared to HIGH. */
+
+#define CENTRAL_SEQ_WIFI_STATUS 0x0384 /* 23:16 Check statemachine status */
+#define STATES 0xff0000
+
+#define SYS_PWR_CFG BIT(0)
+#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
+#define SYS_PWR_CFG_16 BIT(16)
+
+
+/* CMU registers to request PLL for USB Clock */
+#define USBPLL_CON0 0x0200
+#define AP2WIFI_USBPLL_REQ BIT(0) /* 1: Request PLL, 0: Release PLL */
+
+#define USBPLL_CON1 0x0204
+#define AP2WLBT_USBPLL_WPLL_SEL BIT(0) /* 1: WLBT, 0: AP */
+#define AP2WLBT_USBPLL_WPLL_EN BIT(1) /* 1: Enable, 0: Disable */
+
+/* TZASC configuration for Katmai onward */
+#define WLBT_TZASC 0
+#define EXYNOS_SMC_WLBT_TZASC_CMD 0x82000710
+#endif /* __MIF_REG_7872_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_7885_H
+#define __MIF_REG_7885_H
+
+/*********************************/
+/* PLATFORM register definitions */
+/*********************************/
+#define NUM_MBOX_PLAT 8
+#define NUM_SEMAPHORE 12
+
+#define MAILBOX_WLBT_BASE 0x0000
+#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
+#define MCUCTRL 0x000 /* MCU Controller Register */
+/* R0 [31:16] - Int FROM R4/M4 */
+#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
+#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
+#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
+#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
+#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
+/* R1 [15:0] - Int TO R4/M4 */
+#define INTGR1 0x01c /* Interrupt Generation Register 1 */
+#define INTCR1 0x020 /* Interrupt Clear Register 1 */
+#define INTMR1 0x024 /* Interrupt Mask Register 1 */
+#define INTSR1 0x028 /* Interrupt Status Register 1 */
+#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
+#define MIF_INIT 0x04c /* MIF_init */
+#define IS_VERSION 0x050 /* Version Information Register */
+#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
+#define ISSR(r) (ISSR_BASE + (4 * (r)))
+#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
+#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
+#define SEMA0CON 0x1c0
+#define SEMA0STATE 0x1c8
+#define SEMA1CON 0x1e0
+#define SEMA1STATE 0x1e8
+
+
+/* POWER */
+/* Page 1173 datasheet */
+/* Base Address - 0x11C8_0000 */
+#define WIFI_CTRL_NS 0x0140 /* WIFI Control SFR non-secure */
+#define WIFI_PWRON BIT(1)
+#define WIFI_RESET_SET BIT(2)
+#define WIFI_ACTIVE_EN BIT(5) /* Enable of WIFI_ACTIVE_REQ */
+#define WIFI_ACTIVE_CLR BIT(6) /* WIFI_ACTIVE_REQ is clear internally on WAKEUP */
+#define WIFI_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WIFI_RESET_REQ */
+#define WIFI_RESET_REQ_CLR BIT(8) /* WIFI_RESET_REQ is clear internally on WAKEUP */
+#define MASK_WIFI_PWRDN_DONE BIT(9) /* 1:mask, 0 : pass RTC clock out enable to WIFI
+ * This mask WIFI_PWRDN_DONE come in from WIFI.
+ * If MASK_WIFI_PWRDN_DONE = 1, WIFI enter to DOWN
+ * state without checking WIFI_PWRDN_DONE*/
+#define RTC_OUT_EN BIT(10) /* 1:enable, 0 : disable This is enable signal on RTC
+ * CLK(32KHz). This clock can be used as WIFI PMU
+ * clock when WIFI is internal power-down and
+ * TCXO(26MHz) is disable at WIFI side.*/
+#define TCXO_ENABLE_SW BIT(11) /* 1:enable, 0 : disable This is enable signal on TCXO
+ * clock of WIFI. This signal can decide whether TCXO
+ * clock is active by software when WIFI is internal
+ * power-down or WIFI is in reset state at WIFI side. if
+ * this value is HIGH, TCXO is active regardless of
+ * hardware control */
+#define MASK_MIF_REQ BIT(12) /* 1:mask MIF_REQ comming from WIFI, 0 : disable */
+#define SET_SW_MIF_REQ BIT(13) /* MIF SLEEP control by SW 1: if MASK_MIF_REQ is
+ set to HIGH, MIF enters into down state by
+ SET_SW_MIF_REQ. */
+#define SWEEPER_BYPASS_DATA_EN BIT(16) /* CLEANY bypass mode control(WIFI2AP MEM path)
+ If this bit is set to 1, CLEANY in MIF block starts
+ operation. If this bit is set to 0, CLEANY is bypass
+ mode.*/
+#define SFR_SERIALIZER_DUR_DATA2REQ (BIT(20) | BIT(21)) /* Duration between DATA and REQUEST on
+ SFR_SERIALIZER */
+
+
+#define WIFI_CTRL_S 0x0144 /* WIFI Control SFR secure */
+#define WIFI_START BIT(3) /* WIFI Reset release control If WIFI_START = 1,
+ * WIFI exit from DOWN state and go to UP state.
+ * If this field is set to high (WIFI_START = 1)
+ * WIFI state can go to UP state. This signal can be
+ * auto-clear by DIRECTWR at UP */
+
+#define WIFI_STAT 0x0148
+#define WIFI_PWRDN_DONE BIT(0) /* Check WIFI power-down status.*/
+#define WIFI_ACCESS_MIF BIT(4) /* Check whether WIFI accesses MIF doman */
+
+#define WIFI_DEBUG 0x014c /* MIF sleep, wakeup debugging control */
+#define EN_MIF_REQ BIT(0) /* Control MIF_REQ through GPIO_ALIVE. */
+#define EN_WIFI_ACTIVE BIT(2) /* Control WIFI_ACTIVE through GPIO_ALIVE. */
+#define EN_MIF_RESET_REQ BIT(3) /* Control WIFI_RESET_REQ through GPIO_ALIVE. */
+#define MASK_CLKREQ_WIFI BIT(8) /* When this field is set to HIGH, ALIVE ignores
+ * CLKREQ from WIFI.*/
+
+/* TODO: Might be 0x10480000 */
+#define PMU_ALIVE_BASE 0x0000
+#define PMU_ALIVE_REG(r) (PMU_ALIVE_BASE + (r))
+#define WIFI2AP_MEM_CONFIG0 0x7300 /* MEM_SIZE SECTION_0 */
+#define WIFI2AP_MEM_CONFIG1 0x7304 /* BASE ADDRESS SECTION 0*/
+#define WIFI2AP_MEM_CONFIG2 0x7300 /* MEM_SIZE SECTION_0 */
+#define WIFI2AP_MEM_CONFIG3 0x7304 /* BASE ADDRESS SECTION 1*/
+#define WIFI2AP_MEM_CONFIG4 0x7300 /* MEM_SIZE SECTION_1 */
+#define WIFI2AP_MEM_CONFIG5 0x7304 /* BASE ADDRESS SECTION 0*/
+#define WIFI2AP_MIF_ACCESS_WIN0 0x7318 /* ACCESS_CONTROL SFR*/
+#define WIFI2AP_MIF_ACCESS_WIN1 0x731c /* ACCESS_CONTROL SFR*/
+#define WIFI2AP_PERI0_ACCESS_WIN0 0x7320 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN1 0x7324 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN2 0x7328 /* ACCESS WINDOW PERI */
+#define WIFI2AP_PERI0_ACCESS_WIN3 0x732c /* ACCESS WINDOW PERI */
+#define WLBT_BOOT_TEST_RST_CFG 0x7330 /* WLBT_IRAM_BOOT_OFFSET */
+ /* WLBT_IRAM_BOOT_TEST */
+ /* WLBT2AP_PERI_PROT2 */
+
+/* Power down registers */
+#define RESET_AHEAD_WIFI_PWR_REG 0x1360 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CLEANY_BUS_WIFI_SYS_PWR_REG 0x1364 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define LOGIC_RESET_WIFI_SYS_PWR_REG 0x1368 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define TCXO_GATE_WIFI_SYS_PWR_REG 0x136c /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WIFI_DISABLE_ISO_SYS_PWR_REG 0x1370 /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WIFI_RESET_ISO_SYS_PWR_REG 0x1374 /* Control power state in LOWPWR mode 1 - on, 0 */
+
+#define CENTRAL_SEQ_WIFI_CONFIGURATION 0x0380 /* bit 16. Decides whether system-level low-power mode
+ * is used HIGH: System-level Low-Power mode
+ * disabled. LOW: System-level Low-Power mode
+ * enabled. When system enters low-power mode,
+ * this field is automatically cleared to HIGH. */
+
+#define CENTRAL_SEQ_WIFI_STATUS 0x0384 /* 23:16 Check statemachine status */
+#define STATES 0xff0000
+
+#define SYS_PWR_CFG BIT(0)
+#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
+#define SYS_PWR_CFG_16 BIT(16)
+
+/* TZASC configuration for Katmai onward */
+#define WLBT_TZASC 0
+#define EXYNOS_SMC_WLBT_TZASC_CMD 0x82000710
+#endif /* __MIF_REG_7885_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_9610_H
+#define __MIF_REG_9610_H
+
+/*********************************/
+/* PLATFORM register definitions */
+/*********************************/
+#define NUM_MBOX_PLAT 8
+#define NUM_SEMAPHORE 12
+
+#define MAILBOX_WLBT_BASE 0x0000
+#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
+#define MCUCTRL 0x000 /* MCU Controller Register */
+/* R0 [31:16] - Int FROM R4/M4 */
+#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
+#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
+#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
+#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
+#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
+/* R1 [15:0] - Int TO R4/M4 */
+#define INTGR1 0x01c /* Interrupt Generation Register 1 */
+#define INTCR1 0x020 /* Interrupt Clear Register 1 */
+#define INTMR1 0x024 /* Interrupt Mask Register 1 */
+#define INTSR1 0x028 /* Interrupt Status Register 1 */
+#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
+#define MIF_INIT 0x04c /* MIF_init */
+#define IS_VERSION 0x050 /* Version Information Register */
+#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
+#define ISSR(r) (ISSR_BASE + (4 * (r)))
+#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
+#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
+#define SEMA0CON 0x1c0
+#define SEMA0STATE 0x1c8
+#define SEMA1CON 0x1e0
+#define SEMA1STATE 0x1e8
+
+/* POWER */
+/* Exynos 9610 UM - 9.9.1.16 */
+#define WLBT_CTRL_NS 0x0050 /* WLBT Control SFR non-secure */
+
+#define WLBT_PWRON BIT(1)
+#define WLBT_RESET_SET BIT(0) /* WLBT reset assertion control by using
+ * PMU_ALIVE_WLBT.
+ * 0x1: Reset Assertion,
+ * 0x0: Reset Release
+ */
+#define WLBT_ACTIVE_EN BIT(5) /* Enable of WIFI_ACTIVE_REQ */
+#define WLBT_ACTIVE_CLR BIT(6) /* WLBT_ACTIVE_REQ is clear internally on WAKEUP */
+#define WLBT_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WLBT_RESET_REQ */
+#define WLBT_RESET_REQ_CLR BIT(8) /* WLBT_RESET_REQ is clear internally on WAKEUP */
+#define MASK_PWR_REQ BIT(18) /* 1:mask PWR_REQ coming from WLBT, 0 : disable */
+#define MASK_TCXO_REQ BIT(20) /* 1:mask TCXO_REQ coming from CP,
+ * 0:enable request source
+ */
+
+#define RTC_OUT_EN BIT(10) /* 1:enable, 0 : disable This is enable signal on RTC
+ * CLK(32KHz). This clock can be used as WLBT PMU
+ * clock when WLBT is internal power-down and
+ * TCXO(26MHz) is disable at WLBT side.
+ */
+#define TCXO_ENABLE_SW BIT(11) /* 1:enable, 0 : disable This is enable signal on TCXO
+ * clock of WLBT. This signal can decide whether TCXO
+ * clock is active by software when WLBT is internal
+ * power-down or WLBT is in reset state at WLBT side. if
+ * this value is HIGH, TCXO is active regardless of
+ * hardware control
+ */
+#define MASK_MIF_REQ BIT(12) /* 1:mask MIF_REQ coming from WLBT, 0 : disable */
+#define SET_SW_MIF_REQ BIT(13) /* MIF SLEEP control by SW 1: if MASK_MIF_REQ is
+ * set to HIGH, MIF enters into down state by
+ * SET_SW_MIF_REQ.
+ */
+#define SWEEPER_BYPASS_DATA_EN BIT(16) /* SWEEPER bypass mode control(WLBT2AP path) If
+ * this bit is set to 1, SWEEPER is bypass mode.
+ */
+
+#define WLBT_CTRL_S 0x0054 /* WLBT Control SFR secure */
+#define WLBT_START BIT(0) /* WLBT initial Reset release control
+ * If CP_START = 1, PMU_RESET_SEQUENCER_CP
+ * starts initial reset release sequence
+ * and goes to UP state.
+ */
+
+#define WLBT_STAT 0x0058
+#define WLBT_PWRDN_DONE BIT(0) /* Check WLBT power-down status.*/
+#define WLBT_ACCESS_MIF BIT(4) /* Check whether WLBT accesses MIF doman */
+
+#define WLBT_DEBUG 0x005c /* MIF sleep, wakeup debugging control */
+#define EN_MIF_REQ BIT(0) /* Control MIF_REQ through GPIO_ALIVE. */
+#define EN_WLBT_ACTIVE BIT(2) /* Control WLBT_ACTIVE through GPIO_ALIVE. */
+#define EN_WLBT_RESET_REQ BIT(3) /* Control WLBT_RESET_REQ through GPIO_ALIVE. */
+#define MASK_CLKREQ_WLBT BIT(8) /* When this field is set to HIGH, ALIVE ignores
+ * CLKREQ from WLBT.
+ */
+
+/* New WLBT SFRs for MEM config */
+
+/* end address is exclusive so the ENDx register should be set to the first
+ * address that is not accessible through that BAAW.
+ *
+ * Another very important point to note here is we are using BAAW0 to expose
+ * 16MB region, so other BAAWs can be used for other purposes
+ */
+#define WLBT_DBUS_BAAW_0_START 0x80000000
+#define WLBT_DBUS_BAAW_0_END WLBT_DBUS_BAAW_4_START
+#define WLBT_DBUS_BAAW_1_START 0x80400000
+#define WLBT_DBUS_BAAW_1_END WLBT_DBUS_BAAW_2_START
+#define WLBT_DBUS_BAAW_2_START 0x80800000
+#define WLBT_DBUS_BAAW_2_END WLBT_DBUS_BAAW_3_START
+#define WLBT_DBUS_BAAW_3_START 0x80C00000
+#define WLBT_DBUS_BAAW_3_END WLBT_DBUS_BAAW_4_START
+#define WLBT_DBUS_BAAW_4_START 0x81000000
+#define WLBT_DBUS_BAAW_4_END 0x81400000
+
+#define WLBT_BAAW_CON_INIT_DONE (1 << 31)
+#define WLBT_BAAW_CON_EN_WRITE (1 << 1)
+#define WLBT_BAAW_CON_EN_READ (1 << 0)
+#define WLBT_BAAW_ACCESS_CTRL (WLBT_BAAW_CON_INIT_DONE | WLBT_BAAW_CON_EN_WRITE | WLBT_BAAW_CON_EN_READ)
+
+#define WLBT_PBUS_BAAW_0_START 0xA0000000
+#define WLBT_PBUS_BAAW_0_END WLBT_PBUS_BAAW_1_START
+#define WLBT_PBUS_BAAW_1_START 0xA0010000
+#define WLBT_PBUS_BAAW_1_END WLBT_PBUS_BAAW_2_START
+#define WLBT_PBUS_BAAW_2_START 0xA0060000
+#define WLBT_PBUS_BAAW_2_END WLBT_PBUS_BAAW_3_START
+#define WLBT_PBUS_BAAW_3_START 0xA0100000
+#define WLBT_PBUS_BAAW_3_END WLBT_PBUS_BAAW_4_START
+#define WLBT_PBUS_BAAW_4_START 0xA0110000
+#define WLBT_PBUS_BAAW_4_END WLBT_PBUS_BAAW_5_START
+#define WLBT_PBUS_BAAW_5_START 0xA0120000
+#define WLBT_PBUS_BAAW_5_END 0xA0160000
+
+#define WLBT_PBUS_MBOX_CP2WLBT_BASE 0x11950000//0xA0000000
+#define WLBT_PBUS_MBOX_SHUB2WLBT_BASE 0x119A0000//0xA0010000
+#define WLBT_PBUS_USI_CMG00_BASE 0x11D00000//0xA0060000
+#define WLBT_PBUS_SYSREG_CMGP2WLBT_BASE 0x11C80000//0xA0100000
+#define WLBT_PBUS_GPIO_CMGP_BASE 0x11C20000//0xA0110000
+#define WLBT_PBUS_SHUB_BASE 0x11200000//0xA0120000
+
+/* EMA settings overloaded onto CHIP_VERSION_ID SFR
+ * (remap block)
+ */
+#define CHIP_VERSION_ID_VER_MASK 0xffc00000 /* [22:32] Version ID */
+#define CHIP_VERSION_ID_EMA_MASK 0x003fffff /* [0:21] EMA params */
+#define CHIP_VERSION_ID_EMA_VALUE (BIT(20) | \
+ BIT(18) | \
+ BIT(13) | \
+ BIT(11) | \
+ BIT(5) | \
+ BIT(2) )
+
+/* Power down registers */
+#define RESET_AHEAD_WLBT_SYS_PWR_REG 0x1360 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CLEANY_BUS_WLBT_SYS_PWR_REG 0x1364 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define LOGIC_RESET_WLBT_SYS_PWR_REG 0x1368 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define TCXO_GATE_WLBT_SYS_PWR_REG 0x136C /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WLBT_DISABLE_ISO_SYS_PWR_REG 0x1370 /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WLBT_RESET_ISO_SYS_PWR_REG 0x1374 /* Control power state in LOWPWR mode 1 - on, 0 */
+
+#define CENTRAL_SEQ_WLBT_CONFIGURATION 0x0180 /* bit 16. Decides whether system-level low-power mode
+ * is used HIGH: System-level Low-Power mode
+ * disabled. LOW: System-level Low-Power mode
+ * enabled. When system enters low-power mode,
+ * this field is automatically cleared to HIGH.
+ */
+
+#define CENTRAL_SEQ_WLBT_STATUS 0x0184 /* 23:16 Check statemachine status */
+#define STATES 0xff0000
+
+#define SYS_PWR_CFG BIT(0)
+#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
+#define SYS_PWR_CFG_16 BIT(16)
+
+/* TZASC (TrustZone Address Space Controller) configuration for Katmai onwards */
+#define EXYNOS_SET_CONN_TZPC 0
+//#define SMC_CMD_CONN_IF 0x82000710
+#endif /* __MIF_REG_9610_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_9630_H
+#define __MIF_REG_9630_H
+
+/*********************************/
+/* PLATFORM register definitions */
+/*********************************/
+#define NUM_MBOX_PLAT 8
+#define NUM_SEMAPHORE 12
+
+#define MAILBOX_WLBT_BASE 0x0000
+#define MAILBOX_WLBT_REG(r) (MAILBOX_WLBT_BASE + (r))
+#define MCUCTRL 0x000 /* MCU Controller Register */
+/* R0 [31:16] - Int FROM R4/M4 */
+#define INTGR0 0x008 /* Interrupt Generation Register 0 (r/w) */
+#define INTCR0 0x00C /* Interrupt Clear Register 0 (w) */
+#define INTMR0 0x010 /* Interrupt Mask Register 0 (r/w) */
+#define INTSR0 0x014 /* Interrupt Status Register 0 (r) */
+#define INTMSR0 0x018 /* Interrupt Mask Status Register 0 (r) */
+/* R1 [15:0] - Int TO R4/M4 */
+#define INTGR1 0x01c /* Interrupt Generation Register 1 */
+#define INTCR1 0x020 /* Interrupt Clear Register 1 */
+#define INTMR1 0x024 /* Interrupt Mask Register 1 */
+#define INTSR1 0x028 /* Interrupt Status Register 1 */
+#define INTMSR1 0x02c /* Interrupt Mask Status Register 1 */
+#define MIF_INIT 0x04c /* MIF_init */
+#define IS_VERSION 0x050 /* Version Information Register */
+#define ISSR_BASE 0x080 /* IS_Shared_Register Base address */
+#define ISSR(r) (ISSR_BASE + (4 * (r)))
+#define SEMAPHORE_BASE 0x180 /* IS_Shared_Register Base address */
+#define SEMAPHORE(r) (SEMAPHORE_BASE + (4 * (r)))
+#define SEMA0CON 0x1c0
+#define SEMA0STATE 0x1c8
+#define SEMA1CON 0x1e0
+#define SEMA1STATE 0x1e8
+
+#define WLBT_PBUS_BASE 0x14400000
+
+// CBUS : APM_BUS
+// PBUS : CFG_BUS
+
+/* New WLBT SFRs for MEM config */
+#define WLBT_PBUS_D_TZPC_SFR (WLBT_PBUS_BASE + 0x10000)
+#define WLBT_PBUS_BAAW_DBUS (WLBT_PBUS_BASE + 0x20000)
+#define WLBT_PBUS_BAAW_CBUS (WLBT_PBUS_BASE + 0x30000)
+#define WLBT_PBUS_SMAPPER (WLBT_PBUS_BASE + 0x40000)
+#define WLBT_PBUS_SYSREG (WLBT_PBUS_BASE + 0x50000)
+#define WLBT_PBUS_BOOT (WLBT_PBUS_BASE + 0x60000)
+
+#define TZPC_PROT0STAT 0x14410200
+#define TZPC_PROT0SET 0x14410204
+
+/* POWER */
+
+/* Exynos 9630 UM - 9.8.719 */
+#define WLBT_CONFIGURATION 0x3300
+#define LOCAL_PWR_CFG BIT(0) /* Control power state 0: Power down 1: Power on */
+
+/* Exynos 9630 UM - 9.8.720 */
+#define WLBT_STATUS 0x3304
+#define STATUS BIT(0) /* Status 0 : Power down 1 : Power on */
+
+/* Exynos 9630 UM - 9.8.721 */
+#define WLBT_STATES 0x3308 /* STATES [7:0] States index for debugging
+ * 0x00 : Reset
+ * 0x10 : Power up
+ * 0x80 : Power down
+ * */
+
+/* Exynos 9630 UM - 9.8.723 */
+#define WLBT_CTRL_NS 0x3310 /* WLBT Control SFR non-secure */
+#define WLBT_ACTIVE_CLR BIT(8) /* WLBT_ACTIVE_REQ is clear internally on WAKEUP */
+#define WLBT_ACTIVE_EN BIT(7) /* Enable of WIFI_ACTIVE_REQ */
+#define SW_TCXO_REQ BIT(6) /* SW TCXO Request register, if MASK_TCXO_REQ
+ * filed value is 1, This register value control TCXO Request*/
+#define MASK_TCXO_REQ BIT(5) /* 1:mask TCXO_REQ coming from CP,
+ * 0:enable request source
+ */
+#define TCXO_GATE BIT(4) /* TCXO gate control 0: TCXO enabled 1: TCXO gated */
+/*#define SET_SW_MIF_REQ BIT(13)*/ /* MIF SLEEP control by SW 1: if MASK_MIF_REQ is
+ * set to HIGH, MIF enters into down state by
+ * SET_SW_MIF_REQ.
+ */
+/*#define MASK_MIF_REQ BIT(12)*/ /* 1:mask MIF_REQ coming from WLBT, 0 : disable */
+/*#define RTC_OUT_EN BIT(10)*/ /* 1:enable, 0 : disable This is enable signal on RTC
+ * CLK(32KHz). This clock can be used as WLBT PMU
+ * clock when WLBT is internal power-down and
+ * TCXO(26MHz) is disable at WLBT side.
+ */
+
+
+/*------------------------------------*/
+
+//??????#define WLBT_PWRON BIT(1)
+#define WLBT_RESET_SET BIT(0) /* WLBT reset assertion control by using
+ * PMU_ALIVE_WLBT.
+ * 0x1: Reset Assertion,
+ * 0x0: Reset Release
+ */
+#define WLBT_RESET_REQ_EN BIT(7) /* 1:enable, 0:disable Enable of WLBT_RESET_REQ */
+#define WLBT_RESET_REQ_CLR BIT(8) /* WLBT_RESET_REQ is clear internally on WAKEUP */
+#define MASK_PWR_REQ BIT(1) /* 1:mask PWR_REQ coming from WLBT, 0 : disable */
+#define TCXO_ENABLE_SW BIT(1) /* 1:enable, 0 : disable This is enable signal on TCXO
+ * clock of WLBT. This signal can decide whether TCXO
+ * clock is active by software when WLBT is internal
+ * power-down or WLBT is in reset state at WLBT side. if
+ * this value is HIGH, TCXO is active regardless of
+ * hardware control
+ */
+/* from wlbt_if_S5E7920.h excite code */
+
+/* PMU_ALIVE Bit Field */
+/* WLBT_CTRL_NS */
+//#define CLEANY_BYPASS_DATA_EN BIT(16)
+//#define SET_SW_MIF_REQ BIT(13)
+//#define MASK_MIF_REQ BIT(12)
+//#define RTC_OUT_EN BIT(10)
+//#define MASK_WLBT_PWRDN_DONE BIT(9)
+//#define WLBT_RESET_REQ_CLR BIT(8)
+//#define WLBT_RESET_REQ_EN BIT(7)
+//#define WLBT_ACTIVE_CLR BIT(6)
+//#define WLBT_ACTIVE_EN BIT(5)
+//#define WLBT_RESET_SET BIT(0)
+//#define WLBT_PWRON BIT(1)
+
+/* WLBT_CTRL_S */
+//#define WLBT_START BIT(0)
+
+/* WLBT_STAT */
+//#define WLBT_ACCESS_MIF BIT(4)
+//#define WLBT_PWRDN_DONE BIT(0)
+
+/* WLBT_DEBUG */
+//#define MASK_CLKREQ_WLBT BIT(8)
+//#define EN_PWR_REQ BIT(5)
+//#define EN_WLBT_WAKEUP_REQ BIT(4)
+//#define EN_WLBT_RESET_REQ BIT(3)
+//#define EN_WLBT_ACTIVE BIT(2)
+//#define EN_MIF_REQ BIT(0)
+
+/* WLBT_BOOT_TEST_RST_CONFIG */
+#define WLBT_IRAM_BOOT_OFFSET (BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define WLBT_IRAM_BOOT_TEST BIT(5)
+#define WLBT_NOREMAP_BOOT_TEST BIT(4)
+#define WLBT2AP_PERI_PROT2 BIT(2)
+
+/* WLBT_QOS */
+#define WLBT_AWQOS (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define WLBT_ARQOS (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define WLBT_QOS_OVERRIDE BIT(0)
+
+/*------------------------------------*/
+
+/* Exynos 9630 UM - 9.8.724 */
+#define WLBT_CTRL_S 0x3314 /* WLBT Control SFR secure */
+#define WLBT_START BIT(3) /* CP control enable 0: Disable 1: Enable */
+
+/* Exynos 9630 UM - 9.8.725 */
+#define WLBT_OUT 0x3320
+#define SWEEPER_BYPASS BIT(13) /* SWEEPER bypass mode control(WLBT2AP path) If
+ * this bit is set to 1, SWEEPER is bypass mode.
+ */
+#define SWEEPER_PND_CLR_REQ BIT(7) /* SWEEPER_CLEAN Request. SWPPER is the IP
+ * that can clean up hung transaction in the Long hop
+ * async Bus Interface, when <SUBSYS> get hung
+ * state. 0: Normal 1: SWEEPER CLEAN Requested
+ */
+
+/* Exynos 9630 UM - 9.8.726 */
+#define WLBT_IN 0x3324
+#define BUS_READY BIT(4) /* BUS ready indication signal when reset released. 0:
+ * Normal 1: BUS ready state */
+#define PWRDOWN_IND BIT(2) /* PWRDOWN state indication 0: Normal 1: In the
+ * power down state */
+#define SWEEPER_PND_CLR_ACK BIT(0) /* SWEEPER_CLEAN ACK signal. SWPPER is the IP
+ * that can clean up hung transaction in the Long hop
+ * async Bus Interface, when <SUBSYS> get hung
+ * state. 0: Normal 1: SWEEPER CLEAN
+ * Acknowledged */
+/* Exynos 9630 UM - 9.8.728 */
+#define WLBT_INT_EN 0x3344
+#define PWR_REQ_F BIT(3)
+#define TCXO_REQ_F BIT(5)
+
+/* Exynos 9630 UM - 9.8.10 */
+#define WLBT_STAT 0x0058
+#define WLBT_PWRDN_DONE BIT(0) /* Check WLBT power-down status.*/
+#define WLBT_ACCESS_MIF BIT(4) /* Check whether WLBT accesses MIF domain */
+
+/* Exynos 9630 UM - 9.8.11 */
+#define WLBT_DEBUG 0x005c /* MIF sleep, wakeup debugging control */
+/* need to find where have they moved */
+//#define EN_MIF_REQ BIT(0) /* Control MIF_REQ through GPIO_ALIVE. */
+//#define EN_WLBT_ACTIVE BIT(2) /* Control WLBT_ACTIVE through GPIO_ALIVE. */
+//#define EN_WLBT_RESET_REQ BIT(3) /* Control WLBT_RESET_REQ through GPIO_ALIVE. */
+#define MASK_CLKREQ_WLBT BIT(8) /* When this field is set to HIGH, ALIVE ignores
+ * CLKREQ from WLBT.
+ */
+
+#define RESET_SEQUENCER_STATUS 0x0504
+#define RESET_STATUS_MASK (BIT(10)|BIT(9)|BIT(8))
+#define RESET_STATUS (5 << 8)
+
+#define PMU_SHARED_PWR_REQ_WLBT_CONTROL_STATUS 0x8008
+#define CTRL_STATUS_MASK 0x1
+
+#define CLEANY_BUS_WLBT_CONFIGURATION 0x3b20
+#define CLEANY_CFG_MASK 0x1
+
+#define CLEANY_BUS_WLBT_STATUS 0x3b24
+#define CLEANY_STATUS_MASK (BIT(17)|BIT(16))
+
+/* Exynos 9630 UM - 9.8.763 */
+#define SYSTEM_OUT 0x3a20
+#define PWRRGTON_CON BIT(9) /* XPWRRTON_CON control 0: Disable 1: Enable */
+
+/* Exynos 9630 UM - 9.8.812 */
+#define TCXO_BUF_CTRL 0x3c10
+#define TCXO_BUF_BIAS_EN_WLBT BIT(2)
+#define TCXO_BUF_EN_WLBT BIT(3)
+
+/* New WLBT SFRs for MEM config */
+
+/* end address is exclusive so the ENDx register should be set to the first
+ * address that is not accessible through that BAAW.
+ *
+ * Another very important point to note here is we are using BAAW0 to expose
+ * 16MB region, so other BAAWs can be used for other purposes
+ */
+#define WLBT_DBUS_BAAW_0_START 0x80000000 // Start of DRAM for WLBT R7
+#define WLBT_DBUS_BAAW_0_END WLBT_DBUS_BAAW_4_START // 16 MB
+#define WLBT_DBUS_BAAW_1_START 0x80400000
+#define WLBT_DBUS_BAAW_1_END WLBT_DBUS_BAAW_2_START
+#define WLBT_DBUS_BAAW_2_START 0x80800000
+#define WLBT_DBUS_BAAW_2_END WLBT_DBUS_BAAW_3_START
+#define WLBT_DBUS_BAAW_3_START 0x80C00000
+#define WLBT_DBUS_BAAW_3_END WLBT_DBUS_BAAW_4_START
+#define WLBT_DBUS_BAAW_4_START 0x81000000
+#define WLBT_DBUS_BAAW_4_END 0x813FFFFF
+
+#define WLBT_BAAW_CON_INIT_DONE (1 << 31)
+#define WLBT_BAAW_CON_EN_WRITE (1 << 1)
+#define WLBT_BAAW_CON_EN_READ (1 << 0)
+#define WLBT_BAAW_ACCESS_CTRL (WLBT_BAAW_CON_INIT_DONE | WLBT_BAAW_CON_EN_WRITE | WLBT_BAAW_CON_EN_READ)
+
+/* ref Confluence Maxwell450+Memory+Map */
+#define WLBT_CBUS_BAAW_0_START 0xA0000000 // CP2WLBT MBOX
+#define WLBT_CBUS_BAAW_0_END 0xA000FFFF
+
+#define WLBT_CBUS_BAAW_1_START 0xA0010000 // GNSS,APM,AP,ABOX,CHUB2WLBT MBOX
+#define WLBT_CBUS_BAAW_1_END 0xA005FFFF
+
+#define WLBT_CBUS_BAAW_2_START 0xA0060000 // CMGP SFR GPIO_CMGP_BASE
+#define WLBT_CBUS_BAAW_2_END 0xA009FFFF
+
+#define WLBT_CBUS_BAAW_3_START 0xA00A0000 // CMGP SFR SYSREG_CMGP2WLBT_BASE
+#define WLBT_CBUS_BAAW_3_END 0xA00CFFFF
+
+#define WLBT_CBUS_BAAW_4_START 0xA00D0000 // CMGP SFR USI_CMG00_BASE
+#define WLBT_CBUS_BAAW_4_END 0xA015FFFF
+
+#define WLBT_CBUS_BAAW_5_START 0xA0160000 // CHUB SFR CHUB_USICHUB0_BASE
+#define WLBT_CBUS_BAAW_5_END 0xA01BFFFF
+
+#define WLBT_CBUS_BAAW_6_START 0xA01C0000 // CHUB SFR CHUB_BASE
+#define WLBT_CBUS_BAAW_6_END 0xA01EFFFF
+
+#define WLBT_PBUS_MBOX_CP2WLBT_BASE 0x10F50000
+#define WLBT_PBUS_MBOX_GNSS2WLBT_BASE 0x10FA0000
+#define WLBT_PBUS_MBOX_SHUB2WLBT_BASE 0x119A0000
+#define WLBT_PBUS_USI_CMG00_BASE 0x11500000
+#define WLBT_PBUS_SYSREG_CMGP2WLBT_BASE 0x11490000
+#define WLBT_PBUS_GPIO_CMGP_BASE 0x11430000
+#define WLBT_PBUS_CHUB_USICHUB0_BASE 0x11B70000
+#define WLBT_PBUS_CHUB_BASE 0x11A00000
+
+
+
+/* EMA settings overloaded onto CHIP_VERSION_ID SFR
+ * (remap block)
+ */
+#define CHIP_VERSION_ID_VER_MASK 0xffc00000 /* [22:32] Version ID */
+#define CHIP_VERSION_ID_EMA_MASK 0x003fffff /* [0:21] EMA params */
+#define CHIP_VERSION_ID_EMA_VALUE (BIT(20) | \
+ BIT(18) | \
+ BIT(13) | \
+ BIT(11) | \
+ BIT(5) | \
+ BIT(2) )
+/* Power down registers */
+#define RESET_AHEAD_WLBT_SYS_PWR_REG 0x1360 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define CLEANY_BUS_WLBT_SYS_PWR_REG 0x1364 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define LOGIC_RESET_WLBT_SYS_PWR_REG 0x1368 /* Control power state in LOWPWR mode 1 - on, 0 - down*/
+#define TCXO_GATE_WLBT_SYS_PWR_REG 0x136C /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WLBT_DISABLE_ISO_SYS_PWR_REG 0x1370 /* Control power state in LOWPWR mode 1 - on, 0 */
+#define WLBT_RESET_ISO_SYS_PWR_REG 0x1374 /* Control power state in LOWPWR mode 1 - on, 0 */
+
+#define CENTRAL_SEQ_WLBT_CONFIGURATION 0x0180 /* bit 16. Decides whether system-level low-power mode
+ * is used HIGH: System-level Low-Power mode
+ * disabled. LOW: System-level Low-Power mode
+ * enabled. When system enters low-power mode,
+ * this field is automatically cleared to HIGH.
+ */
+
+//#define CENTRAL_SEQ_WLBT_STATUS 0x0184 /* 23:16 Check statemachine status */
+//#define STATES 0xff0000
+
+#define SYS_PWR_CFG BIT(0)
+#define SYS_PWR_CFG_2 (BIT(0) | BIT(1))
+#define SYS_PWR_CFG_16 BIT(16)
+
+/* TZASC (TrustZone Address Space Controller) configuration for Katmai onwards */
+#define EXYNOS_SET_CONN_TZPC 0
+#define SMC_CMD_CONN_IF (0x82000710)
+#endif /* __MIF_REG_9630_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIF_REG_SMAPPER_H
+#define __MIF_REG_SMAPPER_H
+
+/***************************************/
+/* SMAPPER v2.0.1 register definitions */
+/***************************************/
+#define NUM_BANKS_160 4
+#define NUM_BANKS_64 7
+#define NUM_BANKS (NUM_BANKS_160 + NUM_BANKS_64)
+
+/* It enables CP_ADDR_MAP */
+#define ADDR_MAP_EN_BASE 0x000
+#define ADDR_MAP_EN(b) (ADDR_MAP_EN_BASE + (0x10 * (b)))
+
+/* SRAM write control. Set this register before CP initializes SRAM.
+ * If disable this bit, CP cannot acces SRAM by APB I/F. You need to
+ * disable ADDR_MAP_CTRL before you set this bit 1'b1 means
+ * SRAM write, 1'b0 means SRAM read
+ */
+#define SRAM_WRITE_CTRL_BASE 0x004
+#define SRAM_WRITE_CTRL(b) (SRAM_WRITE_CTRL_BASE + (0x10 * (b)))
+
+/* It defines the start address of CP virtual addres for 0-31. You
+ * need to disable ADDR_MAP_EN before you set this bit
+ */
+#define START_ADDR_BASE 0x008
+#define START_ADDR(b) (START_ADDR_BASE + (0x10 * (b)))
+
+/* For CP_ADDR_GRANULARITY between 0-31. You
+ * need to disable ADDR_MAP_EN before you set this bit
+ */
+#define ADDR_GRANULARITY_BASE 0x00c
+#define ADDR_GRANULARITY(b) (ADDR_GRANULARITY_BASE + (0x10 * (b)))
+
+/* It defines the MSB part of 36-bit AP phys address [35:0]. It is starting
+ * point of access permission.
+ */
+#define AW_START_ADDR 0x100
+/* It defines the MSB part of 36-bit AP phys address [35:0]. It is end
+ * point of access permission.
+ */
+#define AW_END_ADDR 0x104
+/* It defines out-of-bound of access windows when it is set to 1'b1 by
+ * Access Window
+ */
+#define AW_ADDR_MAP_STATUS 0x200
+#define ORIGIN_ADDR_AR 0x204
+#define ORIGIN_ADDR_AW 0x208
+/* Read APB bus errors */
+#define APB_STATUS_0 0x300
+#define APB_STATUS_1 0x304
+
+/* The Q-channel interfaces enable communication to an external
+ * power controller
+ */
+#define SMAPPER_QCH_DISABLE 0x400
+
+/* SRAM r/w addres
+ * PWDATA[24:0] is used for 25'b SRAM read/write.
+ * Only can access ADDR_MAP_EN is disabled
+ */
+#define SRAM_BANK_BASE 0x1000
+
+#define SRAM_BANK_INDEX(b, r) ((SRAM_BANK_BASE + (b * 0x400)) + (4 * (r)))
+
+#define ADDR_MAP_EN_BIT BIT(0)
+
+#endif /*_MIF_REG_SMAPPER_H*/
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Uses */
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mif_abs.h"
+
+/* Implements */
+#include "mifintrbit.h"
+
+/* default handler just logs a warning and clears the bit */
+static void mifintrbit_default_handler(int irq, void *data)
+{
+ struct mifintrbit *intr = (struct mifintrbit *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+ intr->mif->irq_bit_clear(intr->mif, irq);
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+}
+
+static void print_bitmaps(struct mifintrbit *intr)
+{
+ unsigned long dst1, dst2, dst3;
+
+ bitmap_copy_le(&dst1, intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
+ bitmap_copy_le(&dst2, intr->bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
+ bitmap_copy_le(&dst3, intr->bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
+}
+
+static void mifiintrman_isr(int irq, void *data)
+{
+ struct mifintrbit *intr = (struct mifintrbit *)data;
+ unsigned long flags;
+ unsigned long int irq_reg = 0;
+ int bit;
+
+ /* Avoid unused parameter error */
+ (void)irq;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+ irq_reg = intr->mif->irq_get(intr->mif);
+
+ print_bitmaps(intr);
+ for_each_set_bit(bit, &irq_reg, MIFINTRBIT_NUM_INT) {
+ if (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler)
+ intr->mifintrbit_irq_handler[bit](bit, intr->irq_data[bit]);
+ }
+
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+}
+
+/* Public functions */
+int mifintrbit_alloc_tohost(struct mifintrbit *intr, mifintrbit_handler handler, void *data)
+{
+ struct scsc_mif_abs *mif;
+ unsigned long flags;
+ int which_bit = 0;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+
+ /* Search for free slots */
+ which_bit = find_first_zero_bit(intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
+
+ if (which_bit >= MIFINTRBIT_NUM_INT)
+ goto error;
+
+ if (intr->mifintrbit_irq_handler[which_bit] != mifintrbit_default_handler) {
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+ goto error;
+ }
+
+ /* Get abs implementation */
+ mif = intr->mif;
+
+ /* Mask to prevent spurious incoming interrupts */
+ mif->irq_bit_mask(mif, which_bit);
+ /* Clear the interrupt */
+ mif->irq_bit_clear(mif, which_bit);
+
+ /* Register the handler */
+ intr->mifintrbit_irq_handler[which_bit] = handler;
+ intr->irq_data[which_bit] = data;
+
+ /* Once registration is set, and IRQ has been cleared, unmask the interrupt */
+ mif->irq_bit_unmask(mif, which_bit);
+
+ /* Update bit mask */
+ set_bit(which_bit, intr->bitmap_tohost);
+
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+
+ return which_bit;
+
+error:
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+ SCSC_TAG_ERR(MIF, "Error registering irq\n");
+ return -EIO;
+}
+
+int mifintrbit_free_tohost(struct mifintrbit *intr, int which_bit)
+{
+ struct scsc_mif_abs *mif;
+ unsigned long flags;
+
+ if (which_bit >= MIFINTRBIT_NUM_INT)
+ goto error;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+ /* Get abs implementation */
+ mif = intr->mif;
+
+ /* Mask to prevent spurious incoming interrupts */
+ mif->irq_bit_mask(mif, which_bit);
+ /* Set the handler with default */
+ intr->mifintrbit_irq_handler[which_bit] = mifintrbit_default_handler;
+ intr->irq_data[which_bit] = NULL;
+ /* Clear the interrupt for hygiene */
+ mif->irq_bit_clear(mif, which_bit);
+ /* Update bit mask */
+ clear_bit(which_bit, intr->bitmap_tohost);
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+
+ return 0;
+
+error:
+ SCSC_TAG_ERR(MIF, "Error unregistering irq\n");
+ return -EIO;
+}
+
+int mifintrbit_alloc_fromhost(struct mifintrbit *intr, enum scsc_mif_abs_target target)
+{
+ unsigned long flags;
+ int which_bit = 0;
+ unsigned long *p;
+
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+
+ if (target == SCSC_MIF_ABS_TARGET_R4)
+ p = intr->bitmap_fromhost_r4;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (target == SCSC_MIF_ABS_TARGET_M4)
+ p = intr->bitmap_fromhost_r4;
+ else if (target == SCSC_MIF_ABS_TARGET_M4_1)
+ p = intr->bitmap_fromhost_r4;
+#else
+ else if (target == SCSC_MIF_ABS_TARGET_M4)
+ p = intr->bitmap_fromhost_m4;
+#endif
+ else
+ goto error;
+
+ /* Search for free slots */
+ which_bit = find_first_zero_bit(p, MIFINTRBIT_NUM_INT);
+
+ if (which_bit == MIFINTRBIT_NUM_INT)
+ goto error;
+
+ /* Update bit mask */
+ set_bit(which_bit, p);
+
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+
+ return which_bit;
+error:
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+ SCSC_TAG_ERR(MIF, "Error allocating bit %d on %s\n",
+ which_bit, target ? "M4" : "R4");
+ return -EIO;
+}
+
+int mifintrbit_free_fromhost(struct mifintrbit *intr, int which_bit, enum scsc_mif_abs_target target)
+{
+ unsigned long flags;
+ unsigned long *p;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+
+ if (which_bit >= MIFINTRBIT_NUM_INT)
+ goto error;
+
+ if (target == SCSC_MIF_ABS_TARGET_R4)
+ p = intr->bitmap_fromhost_r4;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (target == SCSC_MIF_ABS_TARGET_M4)
+ p = intr->bitmap_fromhost_r4;
+ else if (target == SCSC_MIF_ABS_TARGET_M4_1)
+ p = intr->bitmap_fromhost_r4;
+#else
+ else if (target == SCSC_MIF_ABS_TARGET_M4)
+ p = intr->bitmap_fromhost_m4;
+#endif
+ else
+ goto error;
+
+ /* Clear bit mask */
+ clear_bit(which_bit, p);
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+
+ return 0;
+error:
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+ SCSC_TAG_ERR(MIF, "Error freeing bit %d on %s\n",
+ which_bit, target ? "M4" : "R4");
+ return -EIO;
+}
+
+/* core API */
+void mifintrbit_deinit(struct mifintrbit *intr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&intr->spinlock, flags);
+ /* Set all handlers to default before unregistering the handler */
+ for (i = 0; i < MIFINTRBIT_NUM_INT; i++)
+ intr->mifintrbit_irq_handler[i] = mifintrbit_default_handler;
+ intr->mif->irq_unreg_handler(intr->mif);
+ spin_unlock_irqrestore(&intr->spinlock, flags);
+}
+
+void mifintrbit_init(struct mifintrbit *intr, struct scsc_mif_abs *mif)
+{
+ int i;
+
+ spin_lock_init(&intr->spinlock);
+ /* Set all handlersd to default before hooking the hardware interrupt */
+ for (i = 0; i < MIFINTRBIT_NUM_INT; i++)
+ intr->mifintrbit_irq_handler[i] = mifintrbit_default_handler;
+
+ /* reset bitmaps */
+ bitmap_zero(intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
+ bitmap_zero(intr->bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
+ bitmap_zero(intr->bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
+
+ /**
+ * Pre-allocate/reserve MIF interrupt bits 0 in both
+ * .._fromhost_r4 and .._fromhost_m4 interrupt bits.
+ *
+ * These bits are used for purpose of forcing Panics from
+ * either MX manager or GDB monitor channels.
+ */
+ set_bit(MIFINTRBIT_RESERVED_PANIC_R4, intr->bitmap_fromhost_r4);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ set_bit(MIFINTRBIT_RESERVED_PANIC_M4, intr->bitmap_fromhost_m4);
+ set_bit(MIFINTRBIT_RESERVED_PANIC_M4_1, intr->bitmap_fromhost_m4_1);
+#else
+ set_bit(MIFINTRBIT_RESERVED_PANIC_M4, intr->bitmap_fromhost_m4);
+#endif
+
+ /* register isr with mif abstraction */
+ mif->irq_reg_handler(mif, mifiintrman_isr, (void *)intr);
+
+ /* cache mif */
+ intr->mif = mif;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIFINTRBIT_H
+#define __MIFINTRBIT_H
+
+#include <linux/spinlock.h>
+
+/** MIF Interrupt Bit Handler prototype. */
+typedef void (*mifintrbit_handler)(int which_bit, void *data);
+
+struct mifintrbit; /* fwd - opaque pointer */
+
+#define MIFINTRBIT_NUM_INT 16
+
+/** Reserve MIF interrupt bits 0 in the to-r4 and to-m4 registers for purpose of forcing panics */
+#define MIFINTRBIT_RESERVED_PANIC_R4 0
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+#define MIFINTRBIT_RESERVED_PANIC_M4 0
+#define MIFINTRBIT_RESERVED_PANIC_M4_1 0
+#else
+#define MIFINTRBIT_RESERVED_PANIC_M4 0
+#endif
+
+void mifintrbit_init(struct mifintrbit *intr, struct scsc_mif_abs *mif);
+void mifintrbit_deinit(struct mifintrbit *intr);
+
+/** Allocates TOHOST MIF interrupt bits, and associates handler for the AP bit.
+ * Returns the bit index.*/
+int mifintrbit_alloc_tohost(struct mifintrbit *intr, mifintrbit_handler handler, void *data);
+/** Deallocates TOHOST MIF interrupt bits */
+int mifintrbit_free_tohost(struct mifintrbit *intr, int which_bit);
+/* Get an interrupt bit associated with the target (R4/M4) -FROMHOST direction
+ * Function returns the IRQ bit associated , -EIO if error */
+int mifintrbit_alloc_fromhost(struct mifintrbit *intr, enum scsc_mif_abs_target target);
+/* Free an interrupt bit associated with the target (R4/M4) -FROMHOST direction
+ * Function returns the 0 if succedes , -EIO if error */
+int mifintrbit_free_fromhost(struct mifintrbit *intr, int which_bit, enum scsc_mif_abs_target target);
+
+
+struct mifintrbit {
+ void(*mifintrbit_irq_handler[MIFINTRBIT_NUM_INT]) (int irq, void *data);
+ void *irq_data[MIFINTRBIT_NUM_INT];
+ struct scsc_mif_abs *mif;
+ /* Use spinlock is it may be in IRQ context */
+ spinlock_t spinlock;
+ /* Interrupt allocation bitmaps */
+ DECLARE_BITMAP(bitmap_tohost, MIFINTRBIT_NUM_INT);
+ DECLARE_BITMAP(bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
+ DECLARE_BITMAP(bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ DECLARE_BITMAP(bitmap_fromhost_m4_1, MIFINTRBIT_NUM_INT);
+#endif
+};
+
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* uses */
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mif_abs.h"
+
+/* Implements */
+#include "mifmboxman.h"
+
+int mifmboxman_init(struct mifmboxman *mbox)
+{
+ if (mbox->in_use)
+ return -EBUSY;
+
+ mutex_init(&mbox->lock);
+ mbox->mbox_free = MIFMBOX_NUM;
+ mbox->in_use = true;
+ bitmap_zero(mbox->bitmap, MIFMBOX_NUM);
+
+ return 0;
+}
+
+bool mifmboxman_alloc_mboxes(struct mifmboxman *mbox, int n, int *first_mbox_index)
+{
+ unsigned int index = 0;
+ unsigned int available;
+ u8 i;
+
+ mutex_lock(&mbox->lock);
+
+ if ((n > MIFMBOX_NUM) || (n == 0) || !mbox->in_use)
+ goto error;
+
+ while (index <= (MIFMBOX_NUM - n)) {
+ available = 0;
+
+ /* Search consecutive blocks */
+ for (i = 0; i < n; i++) {
+ if (test_bit((i + index), mbox->bitmap))
+ break;
+ available++;
+ }
+
+ if (available == n) {
+ *first_mbox_index = index;
+
+ for (i = 0; i < n; i++)
+ set_bit(index++, mbox->bitmap);
+
+ mbox->mbox_free -= n;
+ goto exit;
+ } else
+ index = index + available + 1;
+ }
+error:
+ SCSC_TAG_ERR(MIF, "Error allocating mbox\n");
+ mutex_unlock(&mbox->lock);
+ return false;
+exit:
+ mutex_unlock(&mbox->lock);
+ return true;
+
+}
+
+void mifmboxman_free_mboxes(struct mifmboxman *mbox, int first_mbox_index, int n)
+{
+ int index = 0;
+ int total_free = 0;
+
+ mutex_lock(&mbox->lock);
+ if ((n > MIFMBOX_NUM) ||
+ ((n + first_mbox_index) > MIFMBOX_NUM) ||
+ (n == 0) ||
+ !mbox->in_use)
+ goto error;
+
+ for (index = first_mbox_index; index < (first_mbox_index + n); index++)
+ if (test_bit(index, mbox->bitmap)) {
+ clear_bit(index, mbox->bitmap);
+ total_free++;
+ }
+
+ mbox->mbox_free += total_free;
+ mutex_unlock(&mbox->lock);
+ return;
+error:
+ SCSC_TAG_ERR(MIF, "Error freeing mbox\n");
+ mutex_unlock(&mbox->lock);
+}
+
+u32 *mifmboxman_get_mbox_ptr(struct mifmboxman *mbox, struct scsc_mif_abs *mif_abs, int mbox_index)
+{
+ /* Avoid unused parameter error */
+ (void)mbox;
+
+ return mif_abs->get_mbox_ptr(mif_abs, mbox_index);
+}
+
+
+int mifmboxman_deinit(struct mifmboxman *mbox)
+{
+ mutex_lock(&mbox->lock);
+ if (!mbox->in_use)
+ return -ENODEV;
+ mbox->in_use = false;
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIFMBOXMAN_H
+#define __MIFMBOXMAN_H
+
+#include <linux/mutex.h>
+
+/* TODO: Needs to define the max mem */
+
+struct mifmboxman;
+struct scsc_mif_abs;
+struct mutex;
+
+int mifmboxman_init(struct mifmboxman *mbox);
+bool mifmboxman_alloc_mboxes(struct mifmboxman *mbox, int n, int *first_mbox_index);
+void mifmboxman_free_mboxes(struct mifmboxman *mbox, int first_mbox_index, int n);
+u32 *mifmboxman_get_mbox_ptr(struct mifmboxman *mbox, struct scsc_mif_abs *mif_abs, int mbox_index);
+int mifmboxman_deinit(struct mifmboxman *mbox);
+
+#ifdef CONFIG_SOC_EXYNOS7570
+#define MIFMBOX_NUM 8
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885) || defined(CONFIG_SOC_EXYNOS9610)
+#define MIFMBOX_NUM 4
+#else /* emulation */
+#define MIFMBOX_NUM 8
+#endif
+
+/* Inclusion in core.c treat it as opaque */
+struct mifmboxman {
+ bool in_use;
+ u32 mbox_free;
+ DECLARE_BITMAP(bitmap, MIFMBOX_NUM);
+ struct mutex lock;
+};
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <scsc/scsc_logring.h>
+#include "mifproc.h"
+#include "scsc_mif_abs.h"
+#include "miframman.h"
+
+#define MX_MAX_PROC_RAMMAN 2 /* Number of RAMMANs to track */
+
+static struct proc_dir_entry *procfs_dir;
+static struct proc_dir_entry *procfs_dir_ramman[MX_MAX_PROC_RAMMAN];
+
+/* WARNING --- SINGLETON FOR THE TIME BEING */
+/* EXTEND PROC ENTRIES IF NEEDED!!!!! */
+struct scsc_mif_abs *mif_global;
+
+static int mifprocfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = MIF_PDE_DATA(inode);
+ return 0;
+}
+
+MIF_PROCFS_RW_FILE_OPS(mif_dump);
+MIF_PROCFS_RW_FILE_OPS(mif_writemem);
+MIF_PROCFS_RW_FILE_OPS(mif_reg);
+
+/* miframman ops */
+MIF_PROCFS_RO_FILE_OPS(ramman_total);
+MIF_PROCFS_RO_FILE_OPS(ramman_offset);
+MIF_PROCFS_RO_FILE_OPS(ramman_start);
+MIF_PROCFS_RO_FILE_OPS(ramman_free);
+MIF_PROCFS_RO_FILE_OPS(ramman_used);
+MIF_PROCFS_RO_FILE_OPS(ramman_size);
+
+MIF_PROCFS_SEQ_FILE_OPS(ramman_list);
+
+static ssize_t mifprocfs_mif_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ /* Avoid unused parameter error */
+ (void)file;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "OK");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+
+static ssize_t mifprocfs_mif_writemem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ /* Avoid unused parameter error */
+ (void)file;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "OK");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mifprocfs_mif_dump_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ /* Avoid unused parameter error */
+ (void)file;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "OK");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mifprocfs_mif_writemem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ char *sptr, *token;
+ unsigned int len = 0, pass = 0;
+ u32 value = 0, address = 0;
+ int match = 0;
+ void *mem;
+
+ /* Avoid unused parameter error */
+ (void)file;
+ (void)ppos;
+
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ while ((token = strsep(&sptr, " ")) != NULL) {
+ switch (pass) {
+ /* register */
+ case 0:
+ if ((token[0] == '0') && (token[1] == 'x')) {
+ if (kstrtou32(token, 16, &address)) {
+ SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
+ goto error;
+ }
+ } else {
+ SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
+ goto error;
+ }
+ break;
+ /* value */
+ case 1:
+ if ((token[0] == '0') && (token[1] == 'x')) {
+ if (kstrtou32(token, 16, &value)) {
+ SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
+ goto error;
+ }
+ } else {
+ SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
+ goto error;
+ }
+ break;
+ }
+ pass++;
+ }
+ if (pass != 2 && !match) {
+ SCSC_TAG_INFO(MIF, "Wrong format: <address> <value (hex)>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 0xcafecafe\"\n");
+ goto error;
+ }
+
+ /* Get memory offset */
+ mem = mif_global->get_mifram_ptr(mif_global, 0);
+ if (!mem) {
+ SCSC_TAG_INFO(MIF, "Mem not allocated\n");
+ goto error;
+ }
+
+ SCSC_TAG_INFO(MIF, "Setting value 0x%x at address 0x%x offset\n", value, address);
+
+
+ *((u32 *)(mem + address)) = value;
+error:
+ return count;
+}
+
+static ssize_t mifprocfs_mif_dump_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ char *sptr, *token;
+ unsigned int len = 0, pass = 0;
+ u32 address = 0;
+ u32 size;
+ u8 unit;
+ void *mem;
+
+ (void)file;
+ (void)ppos;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ while ((token = strsep(&sptr, " ")) != NULL) {
+ switch (pass) {
+ /* address */
+ case 0:
+ if ((token[0] == '0') && (token[1] == 'x')) {
+ if (kstrtou32(token, 16, &address)) {
+ SCSC_TAG_INFO(MIF, "Incorrect format,,,address should start by 0x\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
+ goto error;
+ }
+ SCSC_TAG_INFO(MIF, "address %d 0x%x\n", address, address);
+ } else {
+ SCSC_TAG_INFO(MIF, "Incorrect format,,,address should start by 0x\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
+ goto error;
+ }
+ break;
+ /* size */
+ case 1:
+ if (kstrtou32(token, 0, &size)) {
+ SCSC_TAG_INFO(MIF, "Incorrect format,,, for size\n");
+ goto error;
+ }
+ SCSC_TAG_INFO(MIF, "size: %d\n", size);
+ break;
+
+ /* unit */
+ case 2:
+ if (kstrtou8(token, 0, &unit)) {
+ SCSC_TAG_INFO(MIF, "Incorrect format,,, for unit\n");
+ goto error;
+ }
+ if ((unit != 8) && (unit != 16) && (unit != 32)) {
+ SCSC_TAG_INFO(MIF, "Unit %d should be 8/16/32\n", unit);
+ goto error;
+ }
+ SCSC_TAG_INFO(MIF, "unit: %d\n", unit);
+ break;
+ }
+ pass++;
+ }
+ if (pass != 3) {
+ SCSC_TAG_INFO(MIF, "Wrong format: <start_address> <size> <unit>\n");
+ SCSC_TAG_INFO(MIF, "Example: \"0xaaaabbbb 256 8\"\n");
+ goto error;
+ }
+
+ mem = mif_global->get_mifram_ptr(mif_global, 0);
+ if (!mem) {
+ SCSC_TAG_INFO(MIF, "Mem not allocated\n");
+ goto error;
+ }
+
+ /* Add offset */
+ mem = mem + address;
+
+ SCSC_TAG_INFO(MIF, "Phy addr :%p ref addr :%x\n", mem, address);
+ SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------\n");
+ print_hex_dump(KERN_WARNING, "ref addr offset: ", DUMP_PREFIX_OFFSET, 16, unit/8, mem, size, 1);
+ SCSC_TAG_INFO(MIF, "------------------------------------------------------------------------\n");
+error:
+ return count;
+}
+
+static ssize_t mifprocfs_mif_reg_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ void *mem;
+
+ if (!mif_global) {
+ SCSC_TAG_INFO(MIF, "Endpoint not registered\n");
+ return 0;
+ }
+
+ mem = mif_global->get_mifram_ptr(mif_global, 0);
+
+ SCSC_TAG_INFO(MIF, "Phy addr :%p\n", mem);
+
+ mif_global->mif_dump_registers(mif_global);
+
+ return count;
+}
+/*
+ * TODO: Add here any debug message should be exported
+static int mifprocfs_mif_dbg_show(struct seq_file *m, void *v)
+{
+ (void)v;
+
+ if (!mif_global) {
+ seq_puts(m, "endpoint not registered");
+ return 0;
+ }
+ return 0;
+}
+*/
+
+/* Total space in the memory region containing this ramman (assumes one region per ramman) */
+static ssize_t mifprocfs_ramman_total_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%zd\n", (ptrdiff_t)(ramman->start_dram - ramman->start_region) + ramman->size_pool);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* Offset of the pool within the region (the space before is reserved by FW) */
+static ssize_t mifprocfs_ramman_offset_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%zd\n", (ptrdiff_t)(ramman->start_dram - ramman->start_region));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* Start address of the pool within the region */
+static ssize_t mifprocfs_ramman_start_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%p\n", ramman->start_dram);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* Size of the pool */
+static ssize_t mifprocfs_ramman_size_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%zd\n", ramman->size_pool);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* Space remaining within the pool */
+static ssize_t mifprocfs_ramman_free_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n", ramman->free_mem);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* Bytes used within the pool */
+static ssize_t mifprocfs_ramman_used_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct miframman *ramman = (struct miframman *)file->private_data;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%zd\n", ramman->size_pool - (size_t)ramman->free_mem);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/* List allocations per ramman */
+static int mifprocfs_ramman_list_show(struct seq_file *m, void *v)
+{
+ struct miframman *ramman = (struct miframman *)m->private;
+ (void)v;
+
+ miframman_log(ramman, m);
+
+ return 0;
+}
+
+static const char *procdir = "driver/mif_ctrl";
+static int refcount;
+
+#define MIF_DIRLEN 128
+
+static struct proc_dir_entry *create_procfs_dir(void)
+{
+ char dir[MIF_DIRLEN];
+
+ if (refcount++ == 0) {
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ procfs_dir = proc_mkdir(dir, NULL);
+ }
+ return procfs_dir;
+}
+
+static void destroy_procfs_dir(void)
+{
+ char dir[MIF_DIRLEN];
+
+ if (--refcount == 0) {
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ remove_proc_entry(dir, NULL);
+ procfs_dir = NULL;
+ }
+ WARN_ON(refcount < 0);
+}
+
+
+int mifproc_create_proc_dir(struct scsc_mif_abs *mif)
+{
+ struct proc_dir_entry *parent;
+
+ /* WARNING --- SINGLETON FOR THE TIME BEING */
+ /* EXTEND PROC ENTRIES IF NEEDED!!!!! */
+ if (mif_global)
+ return -EBUSY;
+
+ /* Ref the root dir */
+ parent = create_procfs_dir();
+ if (parent) {
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = NULL;
+#endif
+ MIF_PROCFS_ADD_FILE(NULL, mif_writemem, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(NULL, mif_dump, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(NULL, mif_reg, parent, S_IRUSR | S_IRGRP);
+ } else {
+ SCSC_TAG_INFO(MIF, "failed to create /proc dir\n");
+ return -EINVAL;
+ }
+
+ mif_global = mif;
+
+ return 0;
+/*
+err:
+ return -EINVAL;
+*/
+}
+
+void mifproc_remove_proc_dir(void)
+{
+ if (procfs_dir) {
+ MIF_PROCFS_REMOVE_FILE(mif_writemem, procfs_dir);
+ MIF_PROCFS_REMOVE_FILE(mif_dump, procfs_dir);
+ MIF_PROCFS_REMOVE_FILE(mif_reg, procfs_dir);
+
+ /* De-ref the root dir */
+ destroy_procfs_dir();
+ }
+
+ mif_global = NULL;
+}
+
+/* /proc/driver/mif_ctrl/rammanX */
+static const char *ramman_procdir = "ramman";
+struct miframman *proc_miframman[MX_MAX_PROC_RAMMAN];
+static int ramman_instance;
+
+int mifproc_create_ramman_proc_dir(struct miframman *ramman)
+{
+ char dir[MIF_DIRLEN];
+ struct proc_dir_entry *parent;
+ struct proc_dir_entry *root;
+
+ if ((ramman_instance > ARRAY_SIZE(proc_miframman) - 1))
+ return -EINVAL;
+
+ /* Ref the root dir /proc/driver/mif_ctrl */
+ root = create_procfs_dir();
+ if (!root)
+ return -EINVAL;
+
+ (void)snprintf(dir, sizeof(dir), "%s%d", ramman_procdir, ramman_instance);
+ parent = proc_mkdir(dir, root);
+ if (parent) {
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = NULL;
+#endif
+ MIF_PROCFS_ADD_FILE(ramman, ramman_total, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(ramman, ramman_offset, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(ramman, ramman_start, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(ramman, ramman_size, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(ramman, ramman_free, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ MIF_PROCFS_ADD_FILE(ramman, ramman_used, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+
+ MIF_PROCFS_SEQ_ADD_FILE(ramman, ramman_list, parent, S_IRUSR | S_IRGRP | S_IROTH);
+
+ procfs_dir_ramman[ramman_instance] = parent;
+ proc_miframman[ramman_instance] = ramman;
+
+ ramman_instance++;
+
+ } else {
+ SCSC_TAG_INFO(MIF, "failed to create /proc dir\n");
+ destroy_procfs_dir();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mifproc_remove_ramman_proc_dir(struct miframman *ramman)
+{
+ (void)ramman;
+
+ if (ramman_instance <= 0) {
+ WARN_ON(ramman_instance < 0);
+ return;
+ }
+
+ --ramman_instance;
+
+ if (procfs_dir_ramman[ramman_instance]) {
+ char dir[MIF_DIRLEN];
+
+ MIF_PROCFS_REMOVE_FILE(ramman_total, procfs_dir_ramman[ramman_instance]);
+ MIF_PROCFS_REMOVE_FILE(ramman_offset, procfs_dir_ramman[ramman_instance]);
+ MIF_PROCFS_REMOVE_FILE(ramman_start, procfs_dir_ramman[ramman_instance]);
+ MIF_PROCFS_REMOVE_FILE(ramman_size, procfs_dir_ramman[ramman_instance]);
+ MIF_PROCFS_REMOVE_FILE(ramman_free, procfs_dir_ramman[ramman_instance]);
+ MIF_PROCFS_REMOVE_FILE(ramman_used, procfs_dir_ramman[ramman_instance]);
+
+ MIF_PROCFS_REMOVE_FILE(ramman_list, procfs_dir_ramman[ramman_instance]);
+
+ (void)snprintf(dir, sizeof(dir), "%s%d", ramman_procdir, ramman_instance);
+ remove_proc_entry(dir, procfs_dir);
+ procfs_dir_ramman[ramman_instance] = NULL;
+ proc_miframman[ramman_instance] = NULL;
+ }
+
+ /* De-ref the root dir */
+ destroy_procfs_dir();
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/*
+ * Chip Manager /proc interface
+ */
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/seq_file.h>
+
+#ifndef SCSC_MIF_PROC_H
+#define SCSC_MIF_PROC_H
+
+#ifndef AID_MX
+#define AID_MX 0444
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MIF_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define MIF_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+#define MIF_PROCFS_SEQ_FILE_OPS(name) \
+ static int mifprocfs_ ## name ## _show(struct seq_file *m, void *v); \
+ static int mifprocfs_ ## name ## _open(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, mifprocfs_ ## name ## _show, MIF_PDE_DATA(inode)); \
+ } \
+ static const struct file_operations mifprocfs_ ## name ## _fops = { \
+ .open = mifprocfs_ ## name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define MIF_PROCFS_SEQ_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = proc_create_data(# name, mode, parent, &mifprocfs_ ## name ## _fops, _sdev); \
+ if (!entry) { \
+ break; \
+ } \
+ MIF_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+
+#define MIF_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t mifprocfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t mifprocfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mifprocfs_ ## name ## _fops = { \
+ .read = mifprocfs_ ## name ## _read, \
+ .write = mifprocfs_ ## name ## _write, \
+ .open = mifprocfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+
+#define MIF_PROCFS_RO_FILE_OPS(name) \
+ static ssize_t mifprocfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mifprocfs_ ## name ## _fops = { \
+ .read = mifprocfs_ ## name ## _read, \
+ .open = mifprocfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MIF_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_MX); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_MX); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define MIF_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_MX; \
+ (entry)->gid = AID_MX; \
+ } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MIF_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mifprocfs_ ## name ## _fops, _sdev); \
+ MIF_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define MIF_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &mifprocfs_ ## name ## _fops; \
+ entry->data = _sdev; \
+ MIF_PROCFS_SET_UID_GID(entry); \
+ } \
+ } while (0)
+#endif
+
+#define MIF_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+struct scsc_mif_abs;
+struct miframman;
+
+int mifproc_create_proc_dir(struct scsc_mif_abs *mif);
+void mifproc_remove_proc_dir(void);
+int mifproc_create_ramman_proc_dir(struct miframman *miframman);
+void mifproc_remove_ramman_proc_dir(struct miframman *miframman);
+
+struct mifproc {
+};
+#endif /* SCSC_mif_PROC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* uses */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <scsc/scsc_logring.h>
+#include <linux/bitmap.h>
+#include "scsc_mif_abs.h"
+
+/* Implements */
+#include "mifqos.h"
+
+int mifqos_init(struct mifqos *qos, struct scsc_mif_abs *mif)
+{
+ u8 i;
+
+ if (!qos)
+ return -EIO;
+
+ SCSC_TAG_INFO(MIF, "Init MIF QoS\n");
+
+ for (i = 0; i < SCSC_SERVICE_TOTAL; i++)
+ qos->qos_in_use[i] = false;
+
+ mutex_init(&qos->lock);
+ qos->mif = mif;
+
+ return 0;
+}
+
+int mifqos_add_request(struct mifqos *qos, enum scsc_service_id id, enum scsc_qos_config config)
+{
+ struct scsc_mif_abs *mif;
+ struct scsc_mifqos_request *req;
+ int ret = 0;
+
+ if (!qos)
+ return -EIO;
+
+ mutex_lock(&qos->lock);
+ if (qos->qos_in_use[id]) {
+ mutex_unlock(&qos->lock);
+ return -EIO;
+ }
+
+ SCSC_TAG_INFO(MIF, "Service id %d add QoS request %d\n", id, config);
+
+ mif = qos->mif;
+ req = &qos->qos_req[id];
+
+ if (mif->mif_pm_qos_add_request)
+ ret = mif->mif_pm_qos_add_request(mif, req, config);
+ if (ret) {
+ mutex_unlock(&qos->lock);
+ return ret;
+ }
+ qos->qos_in_use[id] = true;
+ mutex_unlock(&qos->lock);
+
+ return 0;
+}
+
+int mifqos_update_request(struct mifqos *qos, enum scsc_service_id id, enum scsc_qos_config config)
+{
+ struct scsc_mif_abs *mif;
+ struct scsc_mifqos_request *req;
+
+ if (!qos)
+ return -EIO;
+
+ mutex_lock(&qos->lock);
+ if (!qos->qos_in_use[id]) {
+ mutex_unlock(&qos->lock);
+ return -EIO;
+ }
+
+ SCSC_TAG_INFO(MIF, "Service id %d update QoS request %d\n", id, config);
+
+ mif = qos->mif;
+ req = &qos->qos_req[id];
+
+ mutex_unlock(&qos->lock);
+
+ if (mif->mif_pm_qos_update_request)
+ return mif->mif_pm_qos_update_request(mif, req, config);
+ else
+ return 0;
+}
+
+int mifqos_remove_request(struct mifqos *qos, enum scsc_service_id id)
+{
+ struct scsc_mif_abs *mif;
+ struct scsc_mifqos_request *req;
+
+ if (!qos)
+ return -EIO;
+
+ mutex_lock(&qos->lock);
+ if (!qos->qos_in_use[id]) {
+ mutex_unlock(&qos->lock);
+ return -EIO;
+ }
+
+ SCSC_TAG_INFO(MIF, "Service id %d remove QoS\n", id);
+
+ mif = qos->mif;
+ req = &qos->qos_req[id];
+
+ qos->qos_in_use[id] = false;
+
+ mutex_unlock(&qos->lock);
+
+ if (mif->mif_pm_qos_remove_request)
+ return mif->mif_pm_qos_remove_request(mif, req);
+ else
+ return 0;
+}
+
+int mifqos_deinit(struct mifqos *qos)
+{
+ enum scsc_service_id i;
+
+ SCSC_TAG_INFO(MIF, "Deinit MIF QoS\n");
+
+ for (i = 0; i < SCSC_SERVICE_TOTAL; i++)
+ mifqos_remove_request(qos, i);
+
+ return 0;
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#ifndef __MIFQOS_H
+#define __MIFQOS_H
+
+#include <linux/mutex.h>
+#include <scsc/scsc_mx.h>
+
+struct scsc_mif_abs;
+struct mifqos;
+
+int mifqos_init(struct mifqos *qos, struct scsc_mif_abs *mif);
+int mifqos_add_request(struct mifqos *qos, enum scsc_service_id id, enum scsc_qos_config config);
+int mifqos_update_request(struct mifqos *qos, enum scsc_service_id id, enum scsc_qos_config config);
+int mifqos_remove_request(struct mifqos *qos, enum scsc_service_id id);
+int mifqos_list(struct mifqos *qos);
+int mifqos_deinit(struct mifqos *qos);
+
+struct scsc_mifqos_request;
+
+struct mifqos {
+ bool qos_in_use[SCSC_SERVICE_TOTAL];
+ struct mutex lock;
+ struct scsc_mif_abs *mif;
+ struct scsc_mifqos_request qos_req[SCSC_SERVICE_TOTAL];
+};
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mif_abs.h"
+
+#include "scsc/api/bt_audio.h"
+#include "miframman.h"
+#include "mifproc.h"
+
+/* Caller should provide locking */
+void miframman_init(struct miframman *ram, void *start_dram, size_t size_pool, void *start_region)
+{
+ mutex_init(&ram->lock);
+
+ SCSC_TAG_INFO(MIF, "MIFRAMMAN_BLOCK_SIZE = %d\n", MIFRAMMAN_BLOCK_SIZE);
+ ram->num_blocks = size_pool / MIFRAMMAN_BLOCK_SIZE;
+
+ if (ram->num_blocks == 0) {
+ SCSC_TAG_ERR(MIF, "Pool size < BLOCK_SIZE\n");
+ return;
+ }
+
+ if (ram->num_blocks >= MIFRAMMAN_NUM_BLOCKS) {
+ SCSC_TAG_ERR(MIF, "Not enough memory\n");
+ return;
+ }
+
+ memset(ram->bitmap, BLOCK_FREE, sizeof(ram->bitmap));
+
+ ram->start_region = start_region; /* For monitoring purposes only */
+ ram->start_dram = start_dram;
+ ram->size_pool = size_pool;
+ ram->free_mem = ram->num_blocks * MIFRAMMAN_BLOCK_SIZE;
+
+ mifproc_create_ramman_proc_dir(ram);
+}
+
+void miframabox_init(struct mifabox *mifabox, void *start_aboxram)
+{
+ /* No locking as not a shared resource */
+ mifabox->aboxram = (struct scsc_bt_audio_abox *)start_aboxram;
+}
+
+void *__miframman_alloc(struct miframman *ram, size_t nbytes, int tag)
+{
+ unsigned int index = 0;
+ unsigned int available;
+ unsigned int i;
+ size_t num_blocks;
+ void *free_mem = NULL;
+
+ if (!nbytes || nbytes > ram->free_mem)
+ goto end;
+
+ /* Number of blocks required (rounding up) */
+ num_blocks = nbytes / MIFRAMMAN_BLOCK_SIZE +
+ ((nbytes % MIFRAMMAN_BLOCK_SIZE) > 0 ? 1 : 0);
+
+ if (num_blocks > ram->num_blocks)
+ goto end;
+
+ while (index <= (ram->num_blocks - num_blocks)) {
+ available = 0;
+
+ /* Search consecutive blocks */
+ for (i = 0; i < num_blocks; i++) {
+ if (ram->bitmap[i + index] != BLOCK_FREE)
+ break;
+ available++;
+ }
+ if (available == num_blocks) {
+ free_mem = ram->start_dram +
+ MIFRAMMAN_BLOCK_SIZE * index;
+
+ /* Mark the block boundary as used */
+ ram->bitmap[index] = BLOCK_BOUND;
+ ram->bitmap[index] |= (u8)(tag << MIFRAMMAN_BLOCK_OWNER_SHIFT); /* Add owner tack for tracking */
+ index++;
+
+ /* Additional blocks in this allocation */
+ for (i = 1; i < num_blocks; i++) {
+ ram->bitmap[index] = BLOCK_INUSE;
+ ram->bitmap[index] |= (u8)(tag << MIFRAMMAN_BLOCK_OWNER_SHIFT); /* Add owner tack for tracking */
+ index++;
+ }
+
+ ram->free_mem -= num_blocks * MIFRAMMAN_BLOCK_SIZE;
+ goto exit;
+ } else
+ index = index + available + 1;
+ }
+end:
+ SCSC_TAG_INFO(MIF, "Not enough shared memory (nbytes %zd, free_mem %u)\n",
+ nbytes, ram->free_mem);
+ return NULL;
+exit:
+ return free_mem;
+}
+
+
+#define MIFRAMMAN_ALIGN(mem, align) \
+ ((void *)((((uintptr_t)(mem) + (align + sizeof(void *))) \
+ & (~(uintptr_t)(align - 1)))))
+
+#define MIFRAMMAN_PTR(mem) \
+ (*(((void **)((uintptr_t)(mem) & \
+ (~(uintptr_t)(sizeof(void *) - 1)))) - 1))
+
+/*
+ * Allocate shared DRAM block
+ *
+ * Parameters:
+ * ram - pool identifier
+ * nbytes - allocation size
+ * align - allocation alignment
+ * tag - owner identifier (typically service ID), 4 bits.
+ *
+ * Returns
+ * Pointer to allocated area, or NULL
+ */
+void *miframman_alloc(struct miframman *ram, size_t nbytes, size_t align, int tag)
+{
+ void *mem, *align_mem = NULL;
+
+ mutex_lock(&ram->lock);
+ if (!is_power_of_2(align) || nbytes == 0) {
+ SCSC_TAG_ERR(MIF, "Failed size/alignment check (nbytes %zd, align %zd)\n", nbytes, align);
+ goto end;
+ }
+
+ if (align < sizeof(void *))
+ align = sizeof(void *);
+
+ mem = __miframman_alloc(ram, nbytes + align + sizeof(void *), tag);
+ if (!mem)
+ goto end;
+
+ align_mem = MIFRAMMAN_ALIGN(mem, align);
+
+ /* Store allocated pointer */
+ MIFRAMMAN_PTR(align_mem) = mem;
+end:
+ mutex_unlock(&ram->lock);
+ return align_mem;
+}
+
+/*
+ * Free shared DRAM block
+ *
+ * Parameters:
+ * ram - pool identifier
+ * mem - buffer to free
+ */
+void __miframman_free(struct miframman *ram, void *mem)
+{
+ unsigned int index, num_blocks = 0;
+
+ if (ram->start_dram == NULL || !mem) {
+ SCSC_TAG_ERR(MIF, "Mem is NULL\n");
+ return;
+ }
+
+ /* Get block index */
+ index = (unsigned int)((mem - ram->start_dram)
+ / MIFRAMMAN_BLOCK_SIZE);
+
+ /* Check */
+ if (index >= ram->num_blocks) {
+ SCSC_TAG_ERR(MIF, "Incorrect index %d\n", index);
+ return;
+ }
+
+ /* Check it is a Boundary block */
+ if ((ram->bitmap[index] & MIFRAMMAN_BLOCK_STATUS_MASK) != BLOCK_BOUND) {
+ SCSC_TAG_ERR(MIF, "Incorrect Block descriptor\n");
+ return;
+ }
+ ram->bitmap[index++] = BLOCK_FREE;
+
+ /* Free remaining blocks */
+ num_blocks++;
+ while (index < ram->num_blocks && (ram->bitmap[index] & MIFRAMMAN_BLOCK_STATUS_MASK) == BLOCK_INUSE) {
+ ram->bitmap[index++] = BLOCK_FREE;
+ num_blocks++;
+ }
+
+ ram->free_mem += num_blocks * MIFRAMMAN_BLOCK_SIZE;
+}
+
+void miframman_free(struct miframman *ram, void *mem)
+{
+ mutex_lock(&ram->lock);
+ /* Restore allocated pointer */
+ if (mem)
+ __miframman_free(ram, MIFRAMMAN_PTR(mem));
+ mutex_unlock(&ram->lock);
+}
+
+/* Caller should provide locking */
+void miframman_deinit(struct miframman *ram)
+{
+ /* Mark all the blocks as INUSE (by Common) to prevent new allocations */
+ memset(ram->bitmap, BLOCK_INUSE, sizeof(ram->bitmap));
+
+ ram->num_blocks = 0;
+ ram->start_dram = NULL;
+ ram->size_pool = 0;
+ ram->free_mem = 0;
+
+ mifproc_remove_ramman_proc_dir(ram);
+}
+
+void miframabox_deinit(struct mifabox *mifabox)
+{
+ /* not dynamic - so just mark as NULL */
+ /* Maybe this function should be empty? */
+ mifabox->aboxram = NULL;
+}
+
+/* Log current allocations in a ramman in proc */
+void miframman_log(struct miframman *ram, struct seq_file *fd)
+{
+ unsigned int b;
+ unsigned int i;
+ int tag;
+ size_t num_blocks = 0;
+
+ if (!ram)
+ return;
+
+ seq_printf(fd, "ramman: start_dram %p, size %zd, free_mem %u\n\n",
+ ram->start_region, ram->size_pool, ram->free_mem);
+
+ for (b = 0; b < ram->num_blocks; b++) {
+ if ((ram->bitmap[b] & MIFRAMMAN_BLOCK_STATUS_MASK) == BLOCK_BOUND) {
+ /* Found a boundary allocation */
+ num_blocks++;
+ tag = (ram->bitmap[b] & MIFRAMMAN_BLOCK_OWNER_MASK) >> MIFRAMMAN_BLOCK_OWNER_SHIFT;
+
+ /* Count subsequent blocks in this group */
+ for (i = 1;
+ i < ram->num_blocks && (ram->bitmap[b + i] & MIFRAMMAN_BLOCK_STATUS_MASK) == BLOCK_INUSE;
+ i++) {
+ /* Check owner matches boundary block */
+ int newtag = (ram->bitmap[b + i] & MIFRAMMAN_BLOCK_OWNER_MASK) >> MIFRAMMAN_BLOCK_OWNER_SHIFT;
+ if (newtag != tag) {
+ seq_printf(fd, "Allocated block tag %d doesn't match boundary tag %d, index %d, %p\n",
+ newtag, tag, b + i,
+ ram->start_dram + (b + i) * MIFRAMMAN_BLOCK_SIZE);
+ }
+ num_blocks++;
+ }
+ seq_printf(fd, "index %8d, svc %d, bytes %12d, blocks %10d, %p\n",
+ b, tag,
+ (i * MIFRAMMAN_BLOCK_SIZE),
+ i,
+ ram->start_dram + (b * MIFRAMMAN_BLOCK_SIZE));
+ }
+ }
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MIFRAMMAN_H
+#define __MIFRAMMAN_H
+
+#include <linux/mutex.h>
+#include "scsc/api/bt_audio.h"
+
+/* TODO: Needs to define the max mem */
+
+struct miframman;
+struct mifabox;
+
+
+void miframman_init(struct miframman *ram, void *start_dram, size_t size_pool, void *start_region);
+void miframabox_init(struct mifabox *mifabox, void *start_aboxram);
+void *miframman_alloc(struct miframman *ram, size_t nbytes, size_t align, int tag);
+void miframman_free(struct miframman *ram, void *mem);
+void miframman_deinit(struct miframman *ram);
+void miframabox_deinit(struct mifabox *mifabox);
+void miframman_log(struct miframman *ram, struct seq_file *fd);
+
+#define MIFRAMMAN_MAXMEM (16 * 1024 * 1024)
+#define MIFRAMMAN_BLOCK_SIZE (64)
+
+#define MIFRAMMAN_NUM_BLOCKS ((MIFRAMMAN_MAXMEM) / (MIFRAMMAN_BLOCK_SIZE))
+
+/* Block status in lower nibble */
+#define MIFRAMMAN_BLOCK_STATUS_MASK 0x0f
+#define BLOCK_FREE 0
+#define BLOCK_INUSE 1
+#define BLOCK_BOUND 2 /* Block allocation boundary */
+
+/* Block owner in upper nibble */
+#define MIFRAMMAN_BLOCK_OWNER_MASK 0xf0
+#define MIFRAMMAN_BLOCK_OWNER_SHIFT 4
+
+#define MIFRAMMAN_OWNER_COMMON 0 /* Owner tag for Common driver */
+
+/* Inclusion in core.c treat it as opaque */
+struct miframman {
+ void *start_region; /* Base address of region containing the pool */
+ void *start_dram; /* Base address of allocator pool */
+ size_t size_pool; /* Size of allocator pool */
+ char bitmap[MIFRAMMAN_NUM_BLOCKS]; /* Zero initialized-> all blocks free */
+ u32 num_blocks; /* Blocks of MIFRAMMAN_BLOCK_SIZE in pool */
+ u32 free_mem; /* Bytes remaining in allocator pool */
+ struct mutex lock;
+};
+
+struct mifabox {
+ struct scsc_bt_audio_abox *aboxram;
+};
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* uses */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <scsc/scsc_logring.h>
+#include <linux/bitmap.h>
+#include "scsc_mif_abs.h"
+
+/* Implements */
+#include "mifsmapper.h"
+
+
+static int mifsmapper_get_num_banks(u8 *phy_map, u8 *log_map, bool large)
+{
+ u8 i = 0, count = 0;
+
+ for (i = 0; i < SCSC_MIF_SMAPPER_MAX_BANKS; i++) {
+ if (large && phy_map[i] == SCSC_MIF_ABS_LARGE_BANK) {
+ log_map[count] = i;
+ count++;
+ } else if (!large && phy_map[i] == SCSC_MIF_ABS_SMALL_BANK) {
+ log_map[count] = i;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int mifsmapper_init(struct mifsmapper *smapper, struct scsc_mif_abs *mif)
+{
+ /* TODO: Protect the function if allocations fail */
+ struct scsc_mif_smapper_info bank_info;
+ u8 i = 0, total_num_banks;
+ u8 phy_map[SCSC_MIF_SMAPPER_MAX_BANKS] = { 0 };
+ u8 log_map_large[SCSC_MIF_SMAPPER_MAX_BANKS] = { 0 };
+ u8 log_map_small[SCSC_MIF_SMAPPER_MAX_BANKS] = { 0 };
+
+ if (smapper->in_use)
+ return -EBUSY;
+
+ SCSC_TAG_INFO(MIF, "Init SMAPPER\n");
+
+ spin_lock_init(&smapper->lock);
+ /* Get physical mapping of the banks */
+ if (mif->mif_smapper_get_mapping(mif, phy_map, &smapper->align)) {
+ SCSC_TAG_ERR(MIF, "SMAPPER is not present\n");
+ return -EINVAL;
+ }
+
+ smapper->in_use = true;
+ smapper->mif = mif;
+
+ smapper->num_large_banks = mifsmapper_get_num_banks(phy_map, log_map_large, true);
+ smapper->num_small_banks = mifsmapper_get_num_banks(phy_map, log_map_small, false);
+ total_num_banks = smapper->num_large_banks + smapper->num_small_banks;
+
+ smapper->bank = kmalloc_array(total_num_banks, sizeof(struct mifsmapper_bank),
+ GFP_KERNEL);
+
+ smapper->bank_bm_large = kmalloc(BITS_TO_LONGS(smapper->num_large_banks) * sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zero(smapper->bank_bm_large, smapper->num_large_banks);
+
+ smapper->bank_bm_small = kmalloc(BITS_TO_LONGS(smapper->num_small_banks) * sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zero(smapper->bank_bm_small, smapper->num_small_banks);
+
+ /* LSB bit of banks will be the large banks the rest will be the small banks */
+ /* Get large bank info */
+ for (; i < smapper->num_large_banks; i++) {
+ /* get phy bank */
+ mif->mif_smapper_get_bank_info(mif, log_map_large[i], &bank_info);
+ smapper->bank[i].entries_bm = kmalloc(BITS_TO_LONGS(bank_info.num_entries) * sizeof(unsigned long), GFP_KERNEL);
+ smapper->bank[i].num_entries = bank_info.num_entries;
+ smapper->bank[i].mem_range_bytes = bank_info.mem_range_bytes;
+ smapper->bank[i].phy_index = log_map_large[i];
+ SCSC_TAG_INFO(MIF, "phy bank %d mapped to logical %d. Large, entries %d range 0x%x\n",
+ log_map_large[i], i, bank_info.num_entries, bank_info.mem_range_bytes);
+ bitmap_zero(smapper->bank[i].entries_bm, bank_info.num_entries);
+ }
+
+ /* Get small bank info */
+ for (; i < total_num_banks; i++) {
+ /* get phy bank */
+ mif->mif_smapper_get_bank_info(mif, log_map_small[i - smapper->num_large_banks], &bank_info);
+ smapper->bank[i].entries_bm = kmalloc(BITS_TO_LONGS(bank_info.num_entries) * sizeof(unsigned long), GFP_KERNEL);
+ smapper->bank[i].num_entries = bank_info.num_entries;
+ smapper->bank[i].mem_range_bytes = bank_info.mem_range_bytes;
+ smapper->bank[i].phy_index = log_map_small[i - smapper->num_large_banks];
+ SCSC_TAG_INFO(MIF, "phy bank %d mapped to logical %d. Small, entries %d range 0x%x\n",
+ log_map_small[i - smapper->num_large_banks], i, bank_info.num_entries, bank_info.mem_range_bytes);
+ bitmap_zero(smapper->bank[i].entries_bm, bank_info.num_entries);
+ }
+
+ return 0;
+}
+
+u16 mifsmapper_get_alignment(struct mifsmapper *smapper)
+{
+ return smapper->align;
+}
+
+int mifsmapper_alloc_bank(struct mifsmapper *smapper, bool large_bank, u32 entry_size, u16 *entries)
+{
+ struct mifsmapper_bank *bank;
+ unsigned long *bitmap;
+ u8 max_banks, offset = 0;
+ int which_bit = 0;
+
+ spin_lock(&smapper->lock);
+
+ if (!smapper->in_use)
+ goto error;
+
+ bank = smapper->bank;
+ if (large_bank) {
+ max_banks = smapper->num_large_banks;
+ bitmap = smapper->bank_bm_large;
+ } else {
+ max_banks = smapper->num_small_banks;
+ bitmap = smapper->bank_bm_small;
+ offset = smapper->num_large_banks;
+ }
+
+ /* Search for free slots */
+ which_bit = find_first_zero_bit(bitmap, max_banks);
+ if (which_bit >= max_banks)
+ goto error;
+
+ /* Update bit mask */
+ set_bit(which_bit, bitmap);
+
+ /* Retrieve Bank capabilities and return the number of entries available */
+ /* size must be a power of 2 */
+ /* TODO : check that granularity is correct */
+ BUG_ON(!is_power_of_2(entry_size));
+
+ /* Clear bank entries */
+ bitmap_zero(bank[which_bit + offset].entries_bm, bank[which_bit + offset].num_entries);
+
+ *entries = bank[which_bit + offset].mem_range_bytes/entry_size;
+ /* Saturate */
+ if (*entries > bank[which_bit + offset].num_entries)
+ *entries = bank[which_bit + offset].num_entries;
+ else if (*entries < bank[which_bit + offset].num_entries) {
+ u16 i;
+
+ SCSC_TAG_INFO(MIF, "Nominal entries %d reduced to %d\n",
+ bank[which_bit + offset].num_entries, *entries);
+
+ for (i = *entries; i < bank[which_bit + offset].num_entries; i++)
+ /* Mark the MSB of the bitmap as used */
+ set_bit(i, bank[which_bit + offset].entries_bm);
+ }
+ /* Update number of entries */
+ bank[which_bit + offset].num_entries = *entries;
+ bank[which_bit + offset].num_entries_left = *entries;
+ bank[which_bit + offset].in_use = true;
+ bank[which_bit + offset].granularity = entry_size;
+
+ SCSC_TAG_INFO(MIF, "entries %d bank.num_entries %d large bank %d logical bank %d entries left %d\n", *entries, bank[which_bit + offset].num_entries, large_bank, which_bit + offset,
+ bank[which_bit + offset].num_entries_left);
+
+ spin_unlock(&smapper->lock);
+ return which_bit + offset;
+
+error:
+ SCSC_TAG_ERR(MIF, "Error allocating bank\n");
+
+ *entries = 0;
+ spin_unlock(&smapper->lock);
+ return -EIO;
+}
+
+int mifsmapper_free_bank(struct mifsmapper *smapper, u8 bank)
+{
+ unsigned long *bitmap;
+ u8 max_banks, offset = 0;
+ struct mifsmapper_bank *bank_en;
+
+ spin_lock(&smapper->lock);
+
+ if (!smapper->in_use || ((bank >= (smapper->num_large_banks + smapper->num_small_banks))))
+ goto error;
+
+ /* check if it is a large or small bank */
+ if (bank >= smapper->num_large_banks) {
+ max_banks = smapper->num_small_banks;
+ bitmap = smapper->bank_bm_small;
+ offset = bank - smapper->num_large_banks;
+ } else {
+ max_banks = smapper->num_large_banks;
+ bitmap = smapper->bank_bm_large;
+ offset = bank;
+ }
+
+ /* Update bit mask */
+ if (!test_and_clear_bit(offset, bitmap))
+ SCSC_TAG_ERR(MIF, "bank was not allocated\n");
+
+ bank_en = smapper->bank;
+ bank_en[bank].in_use = false;
+
+ spin_unlock(&smapper->lock);
+
+ return 0;
+error:
+ SCSC_TAG_ERR(MIF, "Error freeing bank %d\n", bank);
+ spin_unlock(&smapper->lock);
+
+ return -EIO;
+}
+
+int mifsmapper_get_entries(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 *entries)
+{
+ struct mifsmapper_bank *bank_en;
+ unsigned long *bitmap;
+ u32 max_bits, i, ent;
+
+ if (!smapper->bank)
+ return -EINVAL;
+
+ bank_en = smapper->bank;
+
+ if (!bank_en[bank].in_use) {
+ SCSC_TAG_ERR(MIF, "Bank %d not allocated.\n", bank);
+ return -EINVAL;
+ }
+
+
+ max_bits = bank_en[bank].num_entries_left;
+ ent = bank_en[bank].num_entries;
+ if (num_entries > max_bits) {
+ SCSC_TAG_ERR(MIF, "Not enough entries. Requested %d, left %d\n", num_entries, max_bits);
+ return -ENOMEM;
+ }
+
+ bitmap = bank_en[bank].entries_bm;
+
+ for (i = 0; i < num_entries; i++) {
+ entries[i] = find_first_zero_bit(bitmap, ent);
+ if (entries[i] >= ent)
+ return -EIO;
+ /* Update bit mask */
+ set_bit(entries[i], bitmap);
+ }
+
+ smapper->bank[bank].num_entries_left -= num_entries;
+
+ return 0;
+}
+
+int mifsmapper_free_entries(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 *entries)
+{
+ struct mifsmapper_bank *bank_en;
+ unsigned long *bitmap;
+ u32 max_bits, i, ent, total = 0;
+
+ if (!smapper->bank)
+ return -EINVAL;
+
+ bank_en = smapper->bank;
+
+ if (!bank_en[bank].in_use) {
+ SCSC_TAG_ERR(MIF, "Bank %d not allocated.\n", bank);
+ return -EINVAL;
+ }
+
+
+ max_bits = bank_en[bank].num_entries_left;
+ ent = bank_en[bank].num_entries;
+ if ((max_bits + num_entries) > ent) {
+ SCSC_TAG_ERR(MIF, "Tried to free more entries. Requested %d, left %d\n", num_entries, max_bits);
+ return -ENOMEM;
+ }
+
+ bitmap = bank_en[bank].entries_bm;
+
+ for (i = 0; i < num_entries; i++) {
+ /* Update bit mask */
+ if (!test_and_clear_bit(entries[i], bitmap))
+ SCSC_TAG_ERR(MIF, "entry was not allocated\n");
+ else
+ total++;
+ }
+
+ smapper->bank[bank].num_entries_left += total;
+
+ return 0;
+}
+
+void mifsmapper_configure(struct mifsmapper *smapper, u32 granularity)
+{
+ struct scsc_mif_abs *mif;
+ /* Get abs implementation */
+ mif = smapper->mif;
+
+ mif->mif_smapper_configure(mif, granularity);
+}
+
+int mifsmapper_write_sram(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr)
+{
+ struct scsc_mif_abs *mif;
+
+ if (!smapper->bank[bank].in_use) {
+ SCSC_TAG_ERR(MIF, "Bank %d not allocated.\n", bank);
+ return -EINVAL;
+ }
+
+ /* Get abs implementation */
+ mif = smapper->mif;
+
+ /* use the phy address of the bank */
+ return mif->mif_smapper_write_sram(mif, smapper->bank[bank].phy_index, num_entries, first_entry, addr);
+}
+
+u32 mifsmapper_get_bank_base_address(struct mifsmapper *smapper, u8 bank)
+{
+ struct scsc_mif_abs *mif;
+
+ /* Get abs implementation */
+ mif = smapper->mif;
+
+ return mif->mif_smapper_get_bank_base_address(mif, bank);
+}
+
+int mifsmapper_deinit(struct mifsmapper *smapper)
+{
+ u8 i = 0, total_num_banks;
+
+ spin_lock(&smapper->lock);
+
+ SCSC_TAG_INFO(MIF, "Deinit SMAPPER\n");
+
+ if (!smapper->in_use) {
+ spin_unlock(&smapper->lock);
+ return -ENODEV;
+ }
+
+ total_num_banks = smapper->num_large_banks + smapper->num_small_banks;
+ for (; i < total_num_banks; i++) {
+ kfree(smapper->bank[i].entries_bm);
+ smapper->bank[i].num_entries = 0;
+ smapper->bank[i].mem_range_bytes = 0;
+ }
+
+ kfree(smapper->bank_bm_large);
+ kfree(smapper->bank_bm_small);
+ kfree(smapper->bank);
+
+ smapper->in_use = false;
+
+ spin_unlock(&smapper->lock);
+ return 0;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#ifndef __MIFSMAPPER_H
+#define __MIFSMAPPER_H
+
+#include <linux/mutex.h>
+
+struct mifsmapper;
+struct scsc_mif_abs;
+struct mutex;
+
+int mifsmapper_init(struct mifsmapper *smapper, struct scsc_mif_abs *mif);
+u16 mifsmapper_get_alignment(struct mifsmapper *smapper);
+int mifsmapper_alloc_bank(struct mifsmapper *smapper, bool large_bank, u32 entry_size, u16 *entries);
+int mifsmapper_free_bank(struct mifsmapper *smapper, u8 bank);
+int mifsmapper_get_entries(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 *entr);
+int mifsmapper_free_entries(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 *entries);
+void mifsmapper_configure(struct mifsmapper *smapper, u32 granularity);
+int mifsmapper_write_sram(struct mifsmapper *smapper, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr);
+u32 mifsmapper_get_bank_base_address(struct mifsmapper *smapper, u8 bank);
+int mifsmapper_deinit(struct mifsmapper *smapper);
+
+#define MIFSMAPPER_160 4
+#define MIFSMAPPER_64 7
+
+#define MIFSMAPPER_NOT_VALID 0
+#define MIFSMAPPER_VALID 1
+
+struct mifsmapper_bank {
+ unsigned long *entries_bm;
+ u32 num_entries;
+ u32 num_entries_left;
+ u32 mem_range_bytes;
+ u8 phy_index;
+ u32 granularity;
+ bool in_use;
+};
+
+/* Inclusion in core.c treat it as opaque */
+struct mifsmapper {
+ bool in_use;
+ spinlock_t lock;
+ struct scsc_mif_abs *mif;
+ struct mifsmapper_bank *bank; /* Bank reference created after reading HW capabilities */
+ unsigned long *bank_bm_large;
+ unsigned long *bank_bm_small;
+ u32 num_large_banks;
+ u32 num_small_banks;
+ u16 align;
+};
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * MIF Byte Stream (Implementation)
+ */
+
+/* Implements */
+#include "scsc_mx_impl.h"
+#include "mifstream.h"
+
+/* Uses */
+#include "mifintrbit.h"
+
+/* Public Functions */
+
+void mif_stream_config_serialise(struct mif_stream *stream, struct mxstreamconf *stream_conf)
+{
+ stream_conf->read_bit_idx = stream->read_bit_idx;
+ stream_conf->write_bit_idx = stream->write_bit_idx;
+ cpacketbuffer_config_serialise(&stream->buffer, &stream_conf->buf_conf);
+}
+
+int mif_stream_init(struct mif_stream *stream, enum scsc_mif_abs_target target, enum MIF_STREAM_DIRECTION direction, uint32_t num_packets, uint32_t packet_size,
+ struct scsc_mx *mx, enum MIF_STREAM_INTRBIT_TYPE intrbit, mifintrbit_handler tohost_irq_handler, void *data)
+{
+ struct mifintrbit *intr;
+ int r, r1, r2;
+
+ stream->mx = mx;
+ r = cpacketbuffer_init(&stream->buffer, num_packets, packet_size, mx);
+ if (r)
+ return r;
+
+ intr = scsc_mx_get_intrbit(mx);
+
+ r1 = mifintrbit_alloc_tohost(intr, tohost_irq_handler, data);
+ if (r1 < 0) {
+ cpacketbuffer_release(&stream->buffer);
+ return r1;
+ }
+
+ /**
+ * MIF interrupt bit 0 in both the to-r4 and to-m4 registers are reserved
+ * for purpose of forcing panics from the MX Manager directly or via the
+ * gdb monitor stacks.
+ *
+ * At stream initialization the gdb transport requests for the reserved bits
+ * rather than dynamic allocation of interrupt bits.
+ *
+ * So if the interrupt bit type requested is Reserved, just Assign the pre-
+ * reserved interrupt bits.
+ */
+ if (intrbit == MIF_STREAM_INTRBIT_TYPE_RESERVED) {
+ if (target == SCSC_MIF_ABS_TARGET_M4)
+ r2 = MIFINTRBIT_RESERVED_PANIC_M4;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (target == SCSC_MIF_ABS_TARGET_M4_1)
+ r2 = MIFINTRBIT_RESERVED_PANIC_M4_1;
+#endif
+ else
+ r2 = MIFINTRBIT_RESERVED_PANIC_R4;
+ } else
+ r2 = mifintrbit_alloc_fromhost(intr, target);
+
+ if (r2 < 0) {
+ cpacketbuffer_release(&stream->buffer);
+ mifintrbit_free_tohost(intr, r1);
+ return r2;
+ }
+ switch (direction) {
+ case MIF_STREAM_DIRECTION_OUT:
+ stream->read_bit_idx = r1;
+ stream->write_bit_idx = r2;
+ break;
+ case MIF_STREAM_DIRECTION_IN:
+ /* Default value for the shared memory region */
+ memset(stream->buffer.buffer, 0xff, num_packets * packet_size);
+ /* Commit */
+ smp_wmb();
+ stream->read_bit_idx = r2;
+ stream->write_bit_idx = r1;
+ break;
+ default:
+ cpacketbuffer_release(&stream->buffer);
+ mifintrbit_free_tohost(intr, r1);
+ mifintrbit_free_fromhost(intr, r2, target);
+ return -EINVAL;
+ }
+ stream->direction = direction;
+ stream->peer = (enum MIF_STREAM_PEER)target;
+ return 0;
+}
+
+void mif_stream_release(struct mif_stream *stream)
+{
+ struct mifintrbit *intr;
+
+ intr = scsc_mx_get_intrbit(stream->mx);
+ if (stream->direction == MIF_STREAM_DIRECTION_IN) {
+ mifintrbit_free_tohost(intr, stream->write_bit_idx);
+ mifintrbit_free_fromhost(intr, stream->read_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+ } else {
+ mifintrbit_free_tohost(intr, stream->read_bit_idx);
+ mifintrbit_free_fromhost(intr, stream->write_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+ }
+ cpacketbuffer_release(&stream->buffer);
+}
+
+uint32_t mif_stream_read(struct mif_stream *stream, void *buf, uint32_t num_bytes)
+{
+ struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
+
+ uint32_t num_bytes_read = cpacketbuffer_read(&stream->buffer, buf, num_bytes);
+
+ if (num_bytes_read > 0)
+ /* Signal that the read is finished to anyone interested */
+ mif_abs->irq_bit_set(mif_abs, stream->read_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+
+ return num_bytes_read;
+}
+
+const void *mif_stream_peek(struct mif_stream *stream, const void *current_packet)
+{
+ return cpacketbuffer_peek(&stream->buffer, current_packet);
+}
+
+void mif_stream_peek_complete(struct mif_stream *stream, const void *packet)
+{
+ struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
+
+ cpacketbuffer_peek_complete(&stream->buffer, packet);
+
+ /* Signal that the read is finished to anyone interested */
+ mif_abs->irq_bit_set(mif_abs, stream->read_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+}
+
+bool mif_stream_write(struct mif_stream *stream, const void *buf, uint32_t num_bytes)
+{
+ struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
+
+ if (!cpacketbuffer_write(&stream->buffer, buf, num_bytes))
+ return false;
+
+ /* Kick the assigned interrupt to let others know new data is available */
+ mif_abs->irq_bit_set(mif_abs, stream->write_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+
+ return true;
+}
+
+bool mif_stream_write_gather(struct mif_stream *stream, const void **bufs, uint32_t *lengths, uint32_t num_bufs)
+{
+ struct scsc_mif_abs *mif_abs = scsc_mx_get_mif_abs(stream->mx);
+
+ if (!cpacketbuffer_write_gather(&stream->buffer, bufs, lengths, num_bufs))
+ return false;
+
+ /* Kick the assigned interrupt to let others know new data is available */
+ mif_abs->irq_bit_set(mif_abs, stream->write_bit_idx, (enum scsc_mif_abs_target)stream->peer);
+ return true;
+}
+
+uint32_t mif_stream_block_size(struct mif_stream *stream)
+{
+ return cpacketbuffer_packet_size(&stream->buffer);
+}
+
+uint8_t mif_stream_read_interrupt(struct mif_stream *stream)
+{
+ return stream->read_bit_idx;
+}
+
+uint8_t mif_stream_write_interrupt(struct mif_stream *stream)
+{
+ return stream->write_bit_idx;
+}
+
+void mif_stream_log(const struct mif_stream *stream, enum scsc_log_level log_level)
+{
+ cpacketbuffer_log(&stream->buffer, log_level);
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * MIF stream (Interface)
+ *
+ * Provides a one-way communication mechanism between two points. The consumer side
+ * will be notified via an interrupt when the producer side writes data to the
+ * stream, and likewise the producer will be notified when the consumer has read
+ * data from the stream.
+ *
+ * It is expected that the data sent across the stream consists of fixed-size
+ * packets, and that the underlying storage mechanism is initialised to use a packet size
+ * that is at least as large as the largest message size. If this is not the case,
+ * callers are responsible for handling reading of partial messages from the stream
+ * in multiples of the packet size.
+ */
+
+#ifndef MIFSTREAM_H__
+#define MIFSTREAM_H__
+
+/* Uses */
+
+#include "cpacket_buffer.h"
+#include "mifintrbit.h"
+#include "scsc_logring_common.h"
+
+/* Public Types */
+
+enum MIF_STREAM_PEER {
+ MIF_STREAM_PEER_R4,
+ MIF_STREAM_PEER_M4,
+};
+
+enum MIF_STREAM_DIRECTION {
+ MIF_STREAM_DIRECTION_IN,
+ MIF_STREAM_DIRECTION_OUT,
+};
+
+/**
+ * Defines for the MIF Stream interrupt bits
+ *
+ * MIF_STREAM_INTRBIT_TYPE_RESERVED: the bits are reserved
+ * at initialization and are assigned to GDB transport channels.
+ * It is for purpose of forcing Panics from either MX manager or GDB
+ *
+ * MIF_STREAM_INTRBIT_TYPE_ALLOC: the bits are allocated dynamically
+ * when a stream is initialized
+ */
+enum MIF_STREAM_INTRBIT_TYPE {
+ MIF_STREAM_INTRBIT_TYPE_RESERVED,
+ MIF_STREAM_INTRBIT_TYPE_ALLOC,
+};
+
+/* Forward Decls */
+
+struct mif_stream;
+
+/* Public Functions */
+
+/**
+ * Initialises MIF Stream state.
+ */
+int mif_stream_init(struct mif_stream *stream, enum scsc_mif_abs_target target, enum MIF_STREAM_DIRECTION direction, uint32_t num_packets, uint32_t packet_size,
+ struct scsc_mx *mx, enum MIF_STREAM_INTRBIT_TYPE intrbit, mifintrbit_handler tohost_irq_handler, void *data);
+/**
+ * Initialises MIF Output Stream state.
+ */
+void mif_stream_release(struct mif_stream *stream);
+/**
+ * Reads the given number of bytes from the MIF stream, copying them
+ * to the provided address. This removes the read data from the stream.
+ *
+ * Returns the number of bytes read.
+ */
+uint32_t mif_stream_read(struct mif_stream *stream, void *buf, uint32_t num_bytes);
+
+/**
+ * Returns a pointer to the next packet of data within the stream, without
+ * removing it. This can be used to processss data in place without needing to
+ * copy it first.
+ *
+ * If multiple packets are present these can be read in turn by setting the value
+ * of current_packet to the returned value from the previous call to mif_stream_peek.
+ * Each time the returned pointer will advance in the stream by mif_stream_block_size()
+ * bytes.
+ *
+ * Callers cannot assume that multiple calls to mif_stream_peek() will return
+ * consecutive addresses.
+ *
+ * mif_stream_peek_complete must be called to remove the packet(s) from the stream.
+ *
+ * Returns a pointer to the beginning of the packet to read, or NULL if there is no
+ * packet to process.
+ *
+ * Example use:
+ * // Get the first data packet
+ * void *current_packet = mif_stream_peek( buffer, NULL );
+ * void *last_packet = NULL;
+ * while( current_packet != NULL )
+ * {
+ * // Process data packet
+ * ...
+ *
+ * // Get the next data packet
+ * last_packet = current_packet;
+ * current_packet = mif_stream_peek( buffer, current_packet );
+ * }
+ *
+ * // Remove all processed packets from the stream
+ * if( last_packet != NULL )
+ * {
+ * mif_stream_peek( buffer, last_packet );
+ * }
+ */
+const void *mif_stream_peek(struct mif_stream *stream, const void *current_packet);
+
+/**
+ * Removes all packets from the stream up to and including the given
+ * packet.
+ *
+ * This must be called after using mif_stream_peek to indicate that packet(s)
+ * can be removed from the stream.
+ */
+void mif_stream_peek_complete(struct mif_stream *stream, const void *packet);
+
+/**
+ * Writes the given number of bytes to the MIF stream.
+ *
+ * Returns true if the block was written, false if there is not enough
+ * free space in the buffer for the data.
+ */
+bool mif_stream_write(struct mif_stream *stream, const void *buf, uint32_t num_bytes);
+
+/**
+ * Writes a set of non-contiguous data blocks to the MIF stream
+ * as a contiguous set.
+ *
+ * Returns true if the blocks were written, false if there is not enough
+ * free space in the buffer for the block.
+ */
+bool mif_stream_write_gather(struct mif_stream *stream, const void **bufs, uint32_t *lengths, uint32_t num_bufs);
+
+/**
+ * Returns the size in bytes of each individual block within the stream.
+ *
+ * When reading data from the stream using mif_stream_read or mif_stream_peek
+ * this value is the amount of data
+ */
+uint32_t mif_stream_block_size(struct mif_stream *stream);
+
+/**
+ * Returns the interrupt number that will be triggered by reads from the stream
+ */
+uint8_t mif_stream_read_interrupt(struct mif_stream *stream);
+
+/**
+ * Returns the interrupt number that will be triggered by writes to the stream
+ */
+uint8_t mif_stream_write_interrupt(struct mif_stream *stream);
+
+/*
+ * Initialises the stream's part of the configuration area
+ */
+void mif_stream_config_serialise(struct mif_stream *stream, struct mxstreamconf *stream_conf);
+
+/**
+ * Log the state of this stream at the specified log_level.
+ */
+void mif_stream_log(const struct mif_stream *stream, enum scsc_log_level log_level);
+
+/**
+ * MIF Packet Stream Descriptor.
+ */
+struct mif_stream {
+ struct scsc_mx *mx;
+ struct cpacketbuffer buffer;
+
+ /** MIF stream peer, R4 or M4? */
+ enum MIF_STREAM_PEER peer;
+
+ /** MIF interrupt bit index, one in each direction */
+ uint8_t read_bit_idx;
+ uint8_t write_bit_idx;
+ enum MIF_STREAM_DIRECTION direction;
+};
+
+#endif /* MIFSTREAM_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/wakelock.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include "scsc_mx_impl.h"
+
+#ifdef CONFIG_SCSC_CLK20MHZ_TEST
+#include "mx140_clk_test.h"
+#endif
+
+/* Note: define MX140_CLK_VERBOSE_CALLBACKS to get more callbacks when events occur.
+ * without this, the only callbacks are failure/success from request()
+ */
+static int auto_start;
+module_param(auto_start, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_start, "Start service automatically: Default 0: disabled, 1: Enabled");
+
+static DEFINE_MUTEX(clk_lock);
+static DEFINE_MUTEX(clk_work_lock);
+
+struct workqueue_struct *mx140_clk20mhz_wq;
+struct work_struct mx140_clk20mhz_work;
+
+static int recovery;
+static int recovery_pending_stop_close;
+#define MX140_SERVICE_RECOVERY_TIMEOUT 20000
+
+/* Static Singleton */
+static struct mx140_clk20mhz {
+ /* scsc_service_client has to be the first */
+ struct scsc_service_client mx140_clk20mhz_service_client;
+ struct scsc_service *mx140_clk20mhz_service;
+ struct scsc_mx *mx;
+
+ atomic_t clk_request;
+ atomic_t maxwell_is_present;
+ atomic_t mx140_clk20mhz_service_started;
+ atomic_t request_pending;
+ atomic_t mx140_clk20mhz_service_failed;
+
+ void *data;
+ void (*mx140_clk20mhz_client_cb)(void *data, enum mx140_clk20mhz_status event);
+
+ struct proc_dir_entry *procfs_ctrl_dir;
+ u32 procfs_ctrl_dir_num;
+
+ struct wake_lock clk_wake_lock;
+ struct completion recovery_probe_completion;
+
+} clk20mhz;
+
+static void mx140_clk20mhz_wq_stop(void);
+static int mx140_clk20mhz_stop_service(struct scsc_mx *mx);
+
+#ifndef AID_MXPROC
+#define AID_MXPROC 0
+#endif
+
+static void mx140_clk20mhz_restart(void);
+
+#define MX_CLK20_DIRLEN 128
+static const char *procdir_ctrl = "driver/mx140_clk";
+static u32 proc_count;
+
+/* Framework for registering proc handlers */
+#define MX_CLK20_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t mx_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mx_procfs_ ## name ## _fops = { \
+ .read = mx_procfs_ ## name ## _read, \
+ .write = mx_procfs_ ## name ## _write, \
+ .open = mx_clk20_procfs_generic_open, \
+ .llseek = generic_file_llseek \
+ }
+#define MX_CLK20_PROCFS_RO_FILE_OPS(name) \
+ static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mx_procfs_ ## name ## _fops = { \
+ .read = mx_procfs_ ## name ## _read, \
+ .open = mx_clk20_procfs_generic_open, \
+ .llseek = generic_file_llseek \
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define MX_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_CLK20_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_MXPROC); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_MXPROC); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define MX_CLK20_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_MXPROC; \
+ (entry)->gid = AID_MXPROC; \
+ } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_CLK20_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mx_procfs_ ## name ## _fops, _sdev); \
+ MX_CLK20_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define MX_CLK20_PROCFS_ADD_FILE(_data, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &mx_procfs_ ## name ## _fops; \
+ entry->data = _data; \
+ MX_CLK20_PROCFS_SET_UID_GID(entry); \
+ } \
+ } while (0)
+#endif
+
+#define MX_CLK20_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+/* Open handler */
+static int mx_clk20_procfs_generic_open(struct inode *inode, struct file *file)
+{
+ file->private_data = MX_PDE_DATA(inode);
+ return 0;
+}
+
+/* No-op */
+static ssize_t mx_procfs_restart_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ (void)user_buf;
+ (void)count;
+ (void)ppos;
+
+ return 0;
+}
+
+/* Restart clock service */
+static ssize_t mx_procfs_restart_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ (void)file;
+ (void)user_buf;
+ (void)ppos;
+
+ mx140_clk20mhz_restart();
+
+ SCSC_TAG_INFO(MX_PROC, "OK\n");
+ return count;
+}
+
+/* Register proc handler */
+MX_CLK20_PROCFS_RW_FILE_OPS(restart);
+
+/* Populate proc node */
+static int mx140_clk20mhz_create_ctrl_proc_dir(struct mx140_clk20mhz *clk20mhz)
+{
+ char dir[MX_CLK20_DIRLEN];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, proc_count);
+ parent = proc_mkdir(dir, NULL);
+ if (!parent) {
+ SCSC_TAG_ERR(MX_PROC, "failed to create proc dir %s\n", procdir_ctrl);
+ return -EINVAL;
+ }
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = clk20mhz;
+#endif
+ clk20mhz->procfs_ctrl_dir = parent;
+ clk20mhz->procfs_ctrl_dir_num = proc_count;
+ MX_CLK20_PROCFS_ADD_FILE(clk20mhz, restart, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
+ proc_count++;
+
+ return 0;
+}
+
+/* Remove proc node */
+static void mx140_clk20mhz_remove_ctrl_proc_dir(struct mx140_clk20mhz *clk20mhz)
+{
+ if (clk20mhz->procfs_ctrl_dir) {
+ char dir[MX_CLK20_DIRLEN];
+
+ MX_CLK20_PROCFS_REMOVE_FILE(restart, clk20mhz->procfs_ctrl_dir);
+ (void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, clk20mhz->procfs_ctrl_dir_num);
+ remove_proc_entry(dir, NULL);
+ clk20mhz->procfs_ctrl_dir = NULL;
+ proc_count--;
+ SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
+ }
+}
+
+/* Maxwell manager has detected an issue and the service should freeze */
+static void mx140_clk20mhz_stop_on_failure(struct scsc_service_client *client)
+{
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
+
+ mutex_lock(&clk_work_lock);
+ recovery = 1;
+ reinit_completion(&clk20mhz.recovery_probe_completion);
+ mutex_unlock(&clk_work_lock);
+
+#ifdef MX140_CLK_VERBOSE_CALLBACKS
+ /* If call back is registered, inform the user about an asynchronous failure */
+ if (clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_ASYNC_FAIL);
+#endif
+
+ SCSC_TAG_INFO(CLK20, "\n");
+}
+
+/* Maxwell manager has handled a failure and the chip has been resat. */
+static void mx140_clk20mhz_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ (void)scsc_panic_code;
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
+
+#ifdef MX140_CLK_VERBOSE_CALLBACKS
+ /* If call back is registered, inform the user about an asynchronous failure */
+ if (clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_ASYNC_FAIL);
+#endif
+ SCSC_TAG_INFO(CLK20, "\n");
+}
+
+static int mx140_clk20mhz_start_service(struct scsc_mx *mx)
+{
+ int r;
+
+ /* Open the service and get resource pointers */
+ clk20mhz.mx140_clk20mhz_service = scsc_mx_service_open(mx, SCSC_SERVICE_ID_CLK20MHZ, &clk20mhz.mx140_clk20mhz_service_client, &r);
+ if (!clk20mhz.mx140_clk20mhz_service) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_open failed %d\n", r);
+ return r;
+ }
+
+ /* In case of recovery ensure WLBT has ownership */
+ if (atomic_read(&clk20mhz.mx140_clk20mhz_service_failed)) {
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(clk20mhz.mx);
+
+ if (!mif)
+ goto error;
+
+ if (mif->mif_restart)
+ mif->mif_restart(mif);
+
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 0);
+ }
+
+ /* Start service. Will bring-up the chip if the chip is disabled */
+ if (scsc_mx_service_start(clk20mhz.mx140_clk20mhz_service, 0)) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_start failed\n");
+ goto error;
+ }
+
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 1);
+
+ /* If call back is registered, inform the user that the service has started */
+ if (atomic_read(&clk20mhz.clk_request) && clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_STARTED);
+
+ return 0;
+error:
+ return -EIO;
+}
+
+static int mx140_clk20mhz_stop_service(struct scsc_mx *mx)
+{
+ int r;
+
+ if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started)) {
+ SCSC_TAG_INFO(CLK20, "Service not started\n");
+ return -ENODEV;
+ }
+
+ /* Stop service. */
+ if (scsc_mx_service_stop(clk20mhz.mx140_clk20mhz_service)) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_stop failed\n");
+#ifdef MX140_CLK_VERBOSE_CALLBACKS
+ /* If call back is registered, inform the user that the service has failed to stop */
+ if (clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_NOT_STOPPED);
+ return -EIO;
+#endif
+ }
+
+ /* Ignore a service_stop timeout above as it's better to try to close */
+
+ /* Close service, if no other service is using Maxwell, chip will turn off */
+ r = scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
+ if (r) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_close failed\n");
+
+#ifdef MX140_CLK_VERBOSE_CALLBACKS
+ /* If call back is registered, inform the user that the service has failed to close */
+ if (clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_NOT_STOPPED);
+ return -EIO;
+#endif
+ }
+
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+
+#ifdef MX140_CLK_VERBOSE_CALLBACKS
+ /* If call back is registered, inform the user that the service has stopped */
+ if (clk20mhz.mx140_clk20mhz_client_cb)
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, MX140_CLK_STOPPED);
+#endif
+ return 0;
+}
+
+#define MX140_CLK_TRIES (20)
+
+static void mx140_clk20mhz_work_func(struct work_struct *work)
+{
+ int i;
+ int r = 0;
+ enum mx140_clk20mhz_status status;
+
+ mutex_lock(&clk_work_lock);
+
+ for (i = 0; i < MX140_CLK_TRIES; i++) {
+ if (atomic_read(&clk20mhz.clk_request) == 0) {
+ SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service no longer requested\n");
+ recovery = 0;
+ mutex_unlock(&clk_work_lock);
+ return;
+ }
+
+ SCSC_TAG_INFO(CLK20, "Calling mx140_clk20mhz_start_service\n");
+ r = mx140_clk20mhz_start_service(clk20mhz.mx);
+ switch (r) {
+ case 0:
+ SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service OK\n");
+ recovery = 0;
+ mutex_unlock(&clk_work_lock);
+ return;
+ case -EAGAIN:
+ SCSC_TAG_INFO(CLK20, "FW not found because filesystem not mounted yet, retrying...\n");
+ msleep(500); /* No FS yet, retry */
+ break;
+ default:
+ SCSC_TAG_INFO(CLK20, "mx140_clk20mhz_start_service failed %d\n", r);
+ goto err;
+ }
+ }
+err:
+ SCSC_TAG_ERR(CLK20, "Unable to start the 20MHz clock service\n");
+
+ /* Deferred service start failure or timeout.
+ * We assume it'll never manage to start - e.g. bad or missing f/w.
+ */
+ if (r) {
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(clk20mhz.mx);
+
+ SCSC_TAG_ERR(CLK20, "Deferred start timeout (%d)\n", r);
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 1);
+
+ /* Switch USBPLL ownership to AP so USB may be used */
+ if (mif && mif->mif_cleanup)
+ mif->mif_cleanup(mif);
+ }
+
+ /* If call back is registered, inform the user that the service has failed to start */
+ if (atomic_read(&clk20mhz.clk_request) && clk20mhz.mx140_clk20mhz_client_cb) {
+ /* The USB/PLL driver has inadequate error handing...
+ * Lie that the start was successful when AP has control
+ */
+ status = atomic_read(&clk20mhz.mx140_clk20mhz_service_failed) ? MX140_CLK_STARTED : MX140_CLK_NOT_STARTED;
+
+ /* Also lie that the start was successful when the mx140 driver is halted after f/w panic */
+ if (r == -EILSEQ || r == -EPERM)
+ status = MX140_CLK_STARTED;
+
+ SCSC_TAG_INFO(CLK20, "cb %d\n", status);
+ clk20mhz.mx140_clk20mhz_client_cb(clk20mhz.data, status);
+ }
+ recovery = 0;
+ mutex_unlock(&clk_work_lock);
+}
+
+static void mx140_clk20mhz_wq_init(void)
+{
+ mx140_clk20mhz_wq = create_singlethread_workqueue("mx140_clk20mhz_wq");
+ INIT_WORK(&mx140_clk20mhz_work, mx140_clk20mhz_work_func);
+}
+
+static void mx140_clk20mhz_wq_stop(void)
+{
+ cancel_work_sync(&mx140_clk20mhz_work);
+ flush_workqueue(mx140_clk20mhz_wq);
+}
+
+static void mx140_clk20mhz_wq_deinit(void)
+{
+ mx140_clk20mhz_wq_stop();
+ destroy_workqueue(mx140_clk20mhz_wq);
+}
+
+static void mx140_clk20mhz_wq_start(void)
+{
+ queue_work(mx140_clk20mhz_wq, &mx140_clk20mhz_work);
+}
+
+/* Register a callback function to indicate to the (USB) client the status of
+ * the clock request
+ */
+int mx140_clk20mhz_register(void (*client_cb)(void *data, enum mx140_clk20mhz_status event), void *data)
+{
+ SCSC_TAG_INFO(CLK20, "cb %p, %p\n", client_cb, data);
+
+ mutex_lock(&clk_lock);
+ if (clk20mhz.mx140_clk20mhz_client_cb == NULL) {
+ SCSC_TAG_INFO(CLK20, "clk20Mhz client registered\n");
+ clk20mhz.mx140_clk20mhz_client_cb = client_cb;
+ clk20mhz.data = data;
+ mutex_unlock(&clk_lock);
+ return 0;
+ }
+
+ SCSC_TAG_ERR(CLK20, "clk20Mhz client already registered\n");
+ mutex_unlock(&clk_lock);
+ return -EEXIST;
+}
+EXPORT_SYMBOL(mx140_clk20mhz_register);
+
+/* Unregister callback function */
+void mx140_clk20mhz_unregister(void)
+{
+ SCSC_TAG_INFO(CLK20, "\n");
+
+ mutex_lock(&clk_lock);
+ if (clk20mhz.mx140_clk20mhz_client_cb == NULL) {
+ SCSC_TAG_INFO(CLK20, "clk20Mhz client not registered\n");
+ mutex_unlock(&clk_lock);
+ return;
+ }
+
+ clk20mhz.mx140_clk20mhz_client_cb = NULL;
+ clk20mhz.data = NULL;
+ mutex_unlock(&clk_lock);
+}
+EXPORT_SYMBOL(mx140_clk20mhz_unregister);
+
+/* Indicate that an external client requires mx140's 20 MHz clock.
+ * The Core driver will boot mx140 as required and ensure that the
+ * clock remains running.
+ *
+ * If a callback was installed by register(), do this asynchronously.
+ */
+int mx140_clk20mhz_request(void)
+{
+ mutex_lock(&clk_lock);
+ atomic_inc(&clk20mhz.clk_request);
+
+ SCSC_TAG_INFO(CLK20, "%d\n", atomic_read(&clk20mhz.clk_request));
+
+ if (!atomic_read(&clk20mhz.maxwell_is_present)) {
+ SCSC_TAG_INFO(CLK20, "Maxwell is not present yet, store request\n");
+ atomic_set(&clk20mhz.request_pending, 1);
+ mutex_unlock(&clk_lock);
+ return 0;
+ }
+
+ if (recovery) {
+ int r;
+
+ mutex_unlock(&clk_lock);
+ r = wait_for_completion_timeout(&clk20mhz.recovery_probe_completion,
+ msecs_to_jiffies(MX140_SERVICE_RECOVERY_TIMEOUT));
+ mutex_lock(&clk_lock);
+ if (r == 0) {
+ SCSC_TAG_INFO(CLK20, "recovery_probe_completion timeout - try a start\n");
+ mx140_clk20mhz_wq_start();
+ }
+ } else if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started))
+ mx140_clk20mhz_wq_start();
+ else
+ SCSC_TAG_INFO(CLK20, "Service already started\n");
+
+ mutex_unlock(&clk_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mx140_clk20mhz_request);
+
+/* Indicate that an external client no requires mx140's 20 MHz clock
+ * The Core driver will shut down mx140 if no other services are
+ * currently running
+ *
+ * If a callback was installed by register(), do this asynchronously.
+ */
+int mx140_clk20mhz_release(void)
+{
+ int ret = 0;
+
+ mutex_lock(&clk_lock);
+ atomic_dec(&clk20mhz.clk_request);
+ SCSC_TAG_INFO(CLK20, "%d\n", atomic_read(&clk20mhz.clk_request));
+
+ if (!atomic_read(&clk20mhz.maxwell_is_present)) {
+ SCSC_TAG_INFO(CLK20, "Maxwell is released before probe\n");
+ if (!atomic_read(&clk20mhz.request_pending)) {
+ SCSC_TAG_INFO(CLK20, "Maxwell had request pending. Cancel it\n");
+ atomic_set(&clk20mhz.request_pending, 0);
+ }
+ mutex_unlock(&clk_lock);
+ return 0;
+ }
+
+ /* Cancel any pending attempt */
+ mx140_clk20mhz_wq_stop();
+
+ if (recovery) {
+ recovery_pending_stop_close = 1;
+ } else {
+ ret = mx140_clk20mhz_stop_service(clk20mhz.mx);
+ if (ret == -ENODEV) {
+ /* Suppress error if it wasn't running */
+ ret = 0;
+ }
+ }
+
+ /* Suppress stop failure if the service is failed */
+ if (atomic_read(&clk20mhz.mx140_clk20mhz_service_failed)) {
+ SCSC_TAG_INFO(CLK20, "Return OK as control is with AP\n");
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+ ret = 0;
+ }
+
+ mutex_unlock(&clk_lock);
+ return ret;
+}
+EXPORT_SYMBOL(mx140_clk20mhz_release);
+
+/* Probe callback after platform driver is registered */
+void mx140_clk20mhz_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ SCSC_TAG_INFO(CLK20, "\n");
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery) {
+ SCSC_TAG_INFO(CLK20, "Ignore probe - no recovery in progress\n");
+ return;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery) {
+ SCSC_TAG_INFO(CLK20, "Recovery probe\n");
+
+ /**
+ * If recovery_pending_stop_close is set, then there was a stop
+ * during recovery (could be due to USB cable unplugged) so
+ * recovery should just stop here.
+ * The mx140_clk service has been closed in the remove callback.
+ */
+ mutex_lock(&clk_lock);
+ if (recovery_pending_stop_close) {
+ SCSC_TAG_INFO(CLK20, "Recovery probe - stop during recovery, so don't recover\n");
+ recovery_pending_stop_close = 0;
+ recovery = 0;
+ mutex_unlock(&clk_lock);
+ /**
+ * Should there have been a new start request during
+ * recovery (very unlikely), then the complete timeout
+ * will ensure that a start is requested.
+ */
+ return;
+ }
+ mutex_unlock(&clk_lock);
+
+ mutex_lock(&clk_work_lock);
+ mx140_clk20mhz_wq_start();
+ mutex_unlock(&clk_work_lock);
+ complete_all(&clk20mhz.recovery_probe_completion);
+ } else {
+ SCSC_TAG_INFO(CLK20, "Maxwell probed\n");
+ clk20mhz.mx = mx;
+ clk20mhz.mx140_clk20mhz_service_client.stop_on_failure = mx140_clk20mhz_stop_on_failure;
+ clk20mhz.mx140_clk20mhz_service_client.failure_reset = mx140_clk20mhz_failure_reset;
+
+ mx140_clk20mhz_create_ctrl_proc_dir(&clk20mhz);
+
+ mx140_clk20mhz_wq_init();
+
+ atomic_set(&clk20mhz.maxwell_is_present, 1);
+
+ mutex_lock(&clk_work_lock);
+ if ((auto_start || atomic_read(&clk20mhz.request_pending))) {
+ atomic_set(&clk20mhz.request_pending, 0);
+ SCSC_TAG_INFO(CLK20, "start pending service\n");
+ mx140_clk20mhz_wq_start();
+ }
+ mutex_unlock(&clk_work_lock);
+ }
+}
+
+
+static void mx140_clk20mhz_restart(void)
+{
+ int r;
+ struct scsc_mif_abs *mif;
+
+ SCSC_TAG_INFO(CLK20, "\n");
+
+ wake_lock(&clk20mhz.clk_wake_lock);
+
+ mutex_lock(&clk_lock);
+
+ if (!atomic_read(&clk20mhz.mx140_clk20mhz_service_started)) {
+ SCSC_TAG_INFO(CLK20, "service wasn't started\n");
+ goto done;
+ }
+
+ mif = scsc_mx_get_mif_abs(clk20mhz.mx);
+ if (mif == NULL)
+ goto done;
+
+ /* Don't stop the 20 MHz clock service. Leave it running until
+ * WLBT resets due to the service_close().
+ */
+
+ /* Ensure USBPLL is running and owned by AP, to stop USB disconnect */
+ if (mif->mif_cleanup)
+ mif->mif_cleanup(mif);
+
+ r = scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
+ if (r) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_close failed (%d)\n", r);
+ goto done;
+ }
+
+ /* ...and restart the 20 MHz clock service */
+ clk20mhz.mx140_clk20mhz_service = scsc_mx_service_open(clk20mhz.mx, SCSC_SERVICE_ID_CLK20MHZ, &clk20mhz.mx140_clk20mhz_service_client, &r);
+ if (clk20mhz.mx140_clk20mhz_service == NULL) {
+ SCSC_TAG_ERR(CLK20, "reopen failed %d\n", r);
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+ goto done;
+ }
+
+ /* Ensure USBPLL is owned by WLBT again */
+ if (mif->mif_restart)
+ mif->mif_restart(mif);
+
+ r = scsc_mx_service_start(clk20mhz.mx140_clk20mhz_service, 0);
+ if (r) {
+ SCSC_TAG_ERR(CLK20, "restart failed %d\n", r);
+ r = scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
+ if (r)
+ SCSC_TAG_ERR(CLK20, "scsc_mx_service_close failed %d\n", r);
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+ goto done;
+ }
+
+ SCSC_TAG_INFO(CLK20, "restarted\n");
+done:
+ mutex_unlock(&clk_lock);
+ wake_unlock(&clk20mhz.clk_wake_lock);
+}
+
+/* Remove callback platform driver is unregistered */
+void mx140_clk20mhz_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ int r;
+
+ mutex_lock(&clk_work_lock);
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery) {
+ SCSC_TAG_INFO(CLK20, "Ignore recovery remove: Service driver not active\n");
+ goto done;
+ } else if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery) {
+ struct scsc_mif_abs *mif;
+
+ SCSC_TAG_INFO(CLK20, "Recovery remove\n");
+
+ mutex_lock(&clk_lock);
+ mx140_clk20mhz_wq_stop();
+
+ mif = scsc_mx_get_mif_abs(clk20mhz.mx);
+ if (mif == NULL)
+ goto done_local;
+
+ /**
+ * If there's been a stop during recovery ensure that the
+ * mx140_clk service is closed in the mx driver, but do not
+ * touch USBPLL ownership since this will already have been
+ * handled.
+ */
+ if (!recovery_pending_stop_close) {
+ /* Don't stop the clock service - leave it running until
+ * service_close() resets WLBT.
+ */
+
+ /* Switch ownership of USBPLL to the AP. Ownership
+ * returns to WLBT after recovery completes.
+ */
+ if (mif->mif_cleanup)
+ mif->mif_cleanup(mif);
+ }
+
+ r = scsc_mx_service_close(clk20mhz.mx140_clk20mhz_service);
+ if (r)
+ SCSC_TAG_INFO(CLK20, "scsc_mx_service_close failed %d\n", r);
+
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+done_local:
+ mutex_unlock(&clk_lock);
+ } else {
+ SCSC_TAG_INFO(CLK20, "Maxwell removed\n");
+ mx140_clk20mhz_remove_ctrl_proc_dir(&clk20mhz);
+ atomic_set(&clk20mhz.maxwell_is_present, 0);
+ mx140_clk20mhz_wq_deinit();
+ }
+
+done:
+ mutex_unlock(&clk_work_lock);
+}
+
+/* 20MHz client driver */
+struct scsc_mx_module_client mx140_clk20mhz_driver = {
+ .name = "MX 20MHz clock client",
+ .probe = mx140_clk20mhz_probe,
+ .remove = mx140_clk20mhz_remove,
+};
+
+/* 20MHz service driver initialization */
+static int __init mx140_clk20mhz_init(void)
+{
+ int ret;
+
+ SCSC_TAG_INFO(CLK20, "Registering service\n");
+
+ wake_lock_init(&clk20mhz.clk_wake_lock, WAKE_LOCK_SUSPEND, "clk20_wl");
+ init_completion(&clk20mhz.recovery_probe_completion);
+
+ atomic_set(&clk20mhz.clk_request, 0);
+ atomic_set(&clk20mhz.maxwell_is_present, 0);
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_started, 0);
+ atomic_set(&clk20mhz.request_pending, 0);
+ atomic_set(&clk20mhz.mx140_clk20mhz_service_failed, 0);
+
+ /* Register with Maxwell Framework */
+ ret = scsc_mx_module_register_client_module(&mx140_clk20mhz_driver);
+ if (ret) {
+ SCSC_TAG_ERR(CLK20, "scsc_mx_module_register_client_module failed: r=%d\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_SCSC_CLK20MHZ_TEST
+ mx140_clk_test_init();
+#endif
+ return 0;
+}
+
+static void __exit mx140_clk20mhz_exit(void)
+{
+ scsc_mx_module_unregister_client_module(&mx140_clk20mhz_driver);
+#ifdef CONFIG_SCSC_CLK20MHZ_TEST
+ mx140_clk_test_exit();
+#endif
+
+ complete_all(&clk20mhz.recovery_probe_completion);
+ wake_lock_destroy(&clk20mhz.clk_wake_lock);
+}
+
+module_init(mx140_clk20mhz_init);
+module_exit(mx140_clk20mhz_exit);
+
+MODULE_DESCRIPTION("Samsung Maxwell 20MHz Clock Service");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include "scsc_mx_impl.h"
+
+/* char device entry declarations */
+static dev_t mx140_clk_test_dev_t;
+static struct class *mx140_clk_test_class;
+static struct cdev *mx140_clk_test_cdev;
+
+/* Call back function registered with 20MHz clock framework */
+static void client_cb(void *data, enum mx140_clk20mhz_status event)
+{
+ switch (event) {
+ case MX140_CLK_STARTED:
+ SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_STARTED received\n");
+ break;
+ case MX140_CLK_STOPPED:
+ SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_STOPPED received\n");
+ break;
+ case MX140_CLK_NOT_STARTED:
+ SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_NOT_STARTED received\n");
+ break;
+ case MX140_CLK_NOT_STOPPED:
+ SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_NOT_STOPPED received\n");
+ break;
+ case MX140_CLK_ASYNC_FAIL:
+ SCSC_TAG_INFO(CLK20_TEST, "Event MX140_CLK_ASYNC_FAIL received\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static int mx140_clk_test_dev_open(struct inode *inode, struct file *file)
+{
+ mx140_clk20mhz_register(client_cb, NULL);
+ return 0;
+}
+
+static int mx140_clk_test_dev_release(struct inode *inode, struct file *file)
+{
+ mx140_clk20mhz_unregister();
+ return 0;
+}
+
+
+static ssize_t mx140_clk_test_dev_write(struct file *file, const char *data, size_t len, loff_t *offset)
+{
+ unsigned long count;
+ char str[2]; /* One value and carry return */
+ long int val = 0;
+
+ if (len > 2) {
+ SCSC_TAG_ERR(CLK20_TEST, "Incorrect value len %zd\n", len);
+ goto error;
+ }
+
+ count = copy_from_user(str, data, len);
+
+ str[1] = 0;
+
+ if (kstrtol(str, 10, &val)) {
+ SCSC_TAG_ERR(CLK20_TEST, "Invalid value\n");
+ goto error;
+ }
+
+ if (val == 1)
+ mx140_clk20mhz_request();
+ else if (val == 0)
+ mx140_clk20mhz_release();
+ else
+ SCSC_TAG_INFO(CLK20_TEST, "val %ld is not valid, 1 - on, 0 - off\n", val);
+error:
+ return len;
+}
+
+static ssize_t mx140_clk_test_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
+{
+ return length;
+}
+
+static const struct file_operations mx140_clk_test_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = mx140_clk_test_dev_open,
+ .read = mx140_clk_test_dev_read,
+ .write = mx140_clk_test_dev_write,
+ .release = mx140_clk_test_dev_release,
+};
+
+/* 20MHz service driver registration */
+void mx140_clk_test_init(void)
+{
+ int ret;
+
+ SCSC_TAG_INFO(CLK20_TEST, "Registering mx140 TEST\n");
+
+ ret = alloc_chrdev_region(&mx140_clk_test_dev_t, 0, 1, "mx140_clk_test-cdev");
+ if (ret < 0) {
+ SCSC_TAG_ERR(CLK20_TEST, "failed to alloc chrdev region\n");
+ goto fail_alloc_chrdev_region;
+ }
+
+ mx140_clk_test_cdev = cdev_alloc();
+ if (!mx140_clk_test_cdev) {
+ ret = -ENOMEM;
+ SCSC_TAG_ERR(CLK20_TEST, "failed to alloc cdev\n");
+ goto fail_alloc_cdev;
+ }
+
+ cdev_init(mx140_clk_test_cdev, &mx140_clk_test_dev_fops);
+ ret = cdev_add(mx140_clk_test_cdev, mx140_clk_test_dev_t, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR(CLK20_TEST, "failed to add cdev\n");
+ goto fail_add_cdev;
+ }
+
+ mx140_clk_test_class = class_create(THIS_MODULE, "mx140_clk_test");
+ if (!mx140_clk_test_class) {
+ ret = -EEXIST;
+ SCSC_TAG_ERR(CLK20_TEST, "failed to create class\n");
+ goto fail_create_class;
+ }
+
+ if (!device_create(mx140_clk_test_class, NULL, mx140_clk_test_dev_t, NULL, "mx140_usb_clk_test_%d", MINOR(mx140_clk_test_dev_t))) {
+ ret = -EINVAL;
+ SCSC_TAG_ERR(CLK20_TEST, "failed to create device\n");
+ goto fail_create_device;
+ }
+
+ return;
+fail_create_device:
+ class_destroy(mx140_clk_test_class);
+fail_create_class:
+ cdev_del(mx140_clk_test_cdev);
+fail_add_cdev:
+fail_alloc_cdev:
+ unregister_chrdev_region(mx140_clk_test_dev_t, 1);
+fail_alloc_chrdev_region:
+ return;
+}
+
+void mx140_clk_test_exit(void)
+{
+ device_destroy(mx140_clk_test_class, mx140_clk_test_dev_t);
+ class_destroy(mx140_clk_test_class);
+ cdev_del(mx140_clk_test_cdev);
+ unregister_chrdev_region(mx140_clk_test_dev_t, 1);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MX140_CLK_TEST_H__
+#define __MX140_CLK_TEST_H___
+
+
+void mx140_clk_test_init(void);
+void mx140_clk_test_exit(void);
+#endif /* __MX140_CLK_TEST_H___*/
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+
+#include "scsc_mx_impl.h"
+
+/* Firmware directory definitions */
+
+#define SCSC_MULTI_RF_CHIP_ID /* Select FW by RF chip ID, not rev */
+
+#if defined(CONFIG_SCSC_CORE_FW_LOCATION) && !defined(CONFIG_SCSC_CORE_FW_LOCATION_AUTO)
+#define MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI CONFIG_SCSC_CORE_FW_LOCATION
+#define MX140_FW_BASE_DIR_VENDOR_ETC_WIFI CONFIG_SCSC_CORE_FW_LOCATION
+#else
+#define MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI "/system/etc/wifi"
+#define MX140_FW_BASE_DIR_VENDOR_ETC_WIFI "/vendor/etc/wifi"
+#endif
+
+/* Look for this file in <dir>/etc/wifi */
+#ifdef CONFIG_ANDROID
+#define MX140_FW_DETECT "mx"
+#else
+/* Linux host vfs_stat() doesn't find mx* with "mx" */
+#define MX140_FW_DETECT "mx140.bin"
+#endif
+
+/* Paths for vendor utilities, used when CONFIG_SCSC_CORE_FW_LOCATION_AUTO=n */
+#define MX140_EXE_DIR_VENDOR "/vendor/bin" /* Oreo */
+#define MX140_EXE_DIR_SYSTEM "/system/bin" /* Before Oreo */
+
+#define MX140_FW_CONF_SUBDIR "conf"
+#define MX140_FW_DEBUG_SUBDIR "debug"
+#define MX140_FW_BIN "mx140.bin"
+#define MX140_FW_PATH_MAX_LENGTH (512)
+
+#define MX140_FW_VARIANT_DEFAULT "mx140"
+
+/* Table of suffixes to append to f/w name */
+struct fw_suffix {
+ char suffix[6];
+ u32 hw_ver;
+};
+
+#ifdef SCSC_MULTI_RF_CHIP_ID /* Select by chip ID (S611, S612) */
+
+/* This scheme has one firmware binary for all revisions of an
+ * RF chip ID.
+ */
+
+/* Table of known RF h/w IDs */
+static const struct fw_suffix fw_suffixes[] = {
+ { .suffix = "", .hw_ver = 0xff, }, /* plain mx140.bin, always used if found */
+ { .suffix = "_s612", .hw_ver = 0xb1, },
+ { .suffix = "_s611", .hw_ver = 0xb0, },
+};
+
+#else /* Select by chip revision (EVT0.0, EVT0.1) */
+
+/* This legacy scheme assumes a different fw binary for each revision
+ * of an RF chip ID, and those will uniquely identify the
+ * right build. This was used for early S5E7570 until a unified
+ * binary was available.
+ */
+
+/* Table of known RF h/w revs */
+static const struct fw_suffix fw_suffixes[] = {
+ { .suffix = "_11", .hw_ver = 0x11, },
+ { .suffix = "_10", .hw_ver = 0x10, },
+ { .suffix = "_00", .hw_ver = 0x00, },
+ { .suffix = "", .hw_ver = 0xff, }, /* plain mx140.bin, must be last */
+};
+#endif
+
+/* Once set, we always load this firmware suffix */
+static int fw_suffix_found = -1;
+
+/* Variant of firmware binary to load */
+static char *firmware_variant = MX140_FW_VARIANT_DEFAULT;
+module_param(firmware_variant, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(firmware_variant, "mx140 firmware variant, default mx140");
+
+/* RF hardware version of firmware to load. If "auto" this gets replaced with
+ * the suffix of FW that got loaded.
+ * If "manual" it loads the version specified by firmware_variant, verbatim.
+ */
+static char *firmware_hw_ver = "auto";
+module_param(firmware_hw_ver, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(firmware_hw_ver, "mx140 hw version detect, manual=disable");
+
+/* FW base dir readable by usermode script */
+#ifdef CONFIG_SCSC_CORE_FW_LOCATION_AUTO
+static char *fw_base_dir;
+#else
+static char *fw_base_dir = CONFIG_SCSC_CORE_FW_LOCATION;
+#endif
+module_param_named(base_dir, fw_base_dir, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(base_dir, "WLBT FW base directory");
+
+/* Firmware and tool (moredump) exe base directory */
+#ifdef CONFIG_SCSC_CORE_FW_LOCATION_AUTO
+static char base_dir[MX140_FW_PATH_MAX_LENGTH]; /* auto detect */
+static char exe_dir[MX140_FW_PATH_MAX_LENGTH]; /* auto detect */
+#else
+static char base_dir[] = CONFIG_SCSC_CORE_FW_LOCATION; /* fixed in defconfig */
+static char exe_dir[] = CONFIG_SCSC_CORE_TOOL_LOCATION; /* fixed in defconfig */
+#endif
+
+
+static bool enable_auto_sense;
+module_param(enable_auto_sense, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_auto_sense, "deprecated");
+
+static bool use_new_fw_structure = true;
+module_param(use_new_fw_structure, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(use_new_fw_structure, "deprecated");
+
+static char *cfg_platform = "default";
+module_param(cfg_platform, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(cfg_platform, "HCF config subdirectory");
+
+/* Reads a configuration file into memory (f/w profile specific) */
+static int __mx140_file_request_conf(struct scsc_mx *mx,
+ const struct firmware **conf,
+ const char *platform_dir,
+ const char *config_rel_path,
+ const char *filename,
+ const bool flat)
+
+{
+ char config_path[MX140_FW_PATH_MAX_LENGTH];
+ int r;
+
+ if (mx140_basedir_file(mx))
+ return -ENOENT;
+
+ if (flat) {
+ /* e.g. /etc/wifi/mx140_wlan.hcf */
+
+ scnprintf(config_path, sizeof(config_path),
+ "%s/%s%s_%s",
+ base_dir,
+ firmware_variant,
+ fw_suffixes[fw_suffix_found].suffix,
+ filename);
+ } else {
+ /* e.g. /etc/wifi/mx140/conf/$platform_dir/wlan/wlan.hcf */
+
+ scnprintf(config_path, sizeof(config_path),
+ "%s/%s%s/%s/%s%s%s/%s",
+ base_dir,
+ firmware_variant,
+ fw_suffixes[fw_suffix_found].suffix,
+ MX140_FW_CONF_SUBDIR,
+ platform_dir,
+ (platform_dir[0] != '\0' ? "/" : ""), /* add "/" if platform_dir not empty */
+ config_rel_path,
+ filename);
+ }
+ SCSC_TAG_INFO(MX_FILE, "try %s\n", config_path);
+
+ r = mx140_request_file(mx, config_path, conf);
+
+ /* Confirm what we read */
+ if (r == 0)
+ SCSC_TAG_INFO(MX_FILE, "loaded %s\n", config_path);
+
+ return r;
+}
+
+int mx140_file_request_conf(struct scsc_mx *mx,
+ const struct firmware **conf,
+ const char *config_rel_path,
+ const char *filename)
+{
+ int r;
+
+ /* First, if the config subdirectory has been overriden by cfg_platform
+ * module parameter, search only in that location.
+ */
+ if (strcmp(cfg_platform, "default")) {
+ SCSC_TAG_INFO(MX_FILE, "module param cfg_platform = %s\n", cfg_platform);
+ return __mx140_file_request_conf(mx, conf, cfg_platform, config_rel_path, filename, false);
+ }
+
+ /* Search in generic location. This is an override.
+ * e.g. /etc/wifi/mx140/conf/wlan/wlan.hcf
+ */
+ r = __mx140_file_request_conf(mx, conf, "", config_rel_path, filename, false);
+
+#if defined CONFIG_SCSC_WLBT_CONFIG_PLATFORM
+ /* Then search in platform location
+ * e.g. /etc/wifi/mx140/conf/$platform_dir/wlan/wlan.hcf
+ */
+ if (r) {
+ const char *plat = CONFIG_SCSC_WLBT_CONFIG_PLATFORM;
+
+ /* Don't bother if plat is empty string */
+ if (plat[0] != '\0')
+ r = __mx140_file_request_conf(mx, conf, plat, config_rel_path, filename, false);
+ }
+#endif
+
+ /* Finally request "flat" conf, where all hcf files are in FW root dir
+ * e.g. /etc/wifi/<firmware-variant>-wlan.hcf
+ */
+ if (r)
+ r = __mx140_file_request_conf(mx, conf, "", config_rel_path, filename, true);
+
+ return r;
+}
+
+EXPORT_SYMBOL(mx140_file_request_conf);
+
+/* Reads a debug configuration file into memory (f/w profile specific) */
+int mx140_file_request_debug_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path)
+{
+ char config_path[MX140_FW_PATH_MAX_LENGTH];
+
+ if (mx140_basedir_file(mx))
+ return -ENOENT;
+
+ /* e.g. /etc/wifi/mx140/debug/log_strings.bin */
+
+ scnprintf(config_path, sizeof(config_path), "%s/%s%s/%s/%s",
+ base_dir,
+ firmware_variant,
+ fw_suffixes[fw_suffix_found].suffix,
+ MX140_FW_DEBUG_SUBDIR,
+ config_rel_path);
+
+ return mx140_request_file(mx, config_path, conf);
+}
+EXPORT_SYMBOL(mx140_file_request_debug_conf);
+
+/* Read device configuration file into memory (whole device specific) */
+int mx140_file_request_device_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path)
+{
+ char config_path[MX140_FW_PATH_MAX_LENGTH];
+
+ if (mx140_basedir_file(mx))
+ return -ENOENT;
+
+ /* e.g. /etc/wifi/conf/wlan/mac.txt */
+
+ snprintf(config_path, sizeof(config_path), "%s/%s%s/%s",
+ base_dir,
+ fw_suffixes[fw_suffix_found].suffix,
+ MX140_FW_CONF_SUBDIR,
+ config_rel_path);
+
+ return mx140_request_file(mx, config_path, conf);
+}
+EXPORT_SYMBOL(mx140_file_request_device_conf);
+
+/* Release configuration file memory. */
+void mx140_file_release_conf(struct scsc_mx *mx, const struct firmware *conf)
+{
+ (void)mx;
+
+ mx140_release_file(mx, conf);
+}
+EXPORT_SYMBOL(mx140_file_release_conf);
+
+static int __mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size, const char *fw_suffix)
+{
+ const struct firmware *firm;
+ int r = 0;
+ char img_path_name[MX140_FW_PATH_MAX_LENGTH];
+
+ if (mx140_basedir_file(mx))
+ return -ENOENT;
+
+ SCSC_TAG_INFO(MX_FILE, "firmware_variant=%s (%s)\n", firmware_variant, fw_suffix);
+
+ /* e.g. /etc/wifi/mx140.bin */
+ scnprintf(img_path_name, sizeof(img_path_name), "%s/%s%s.bin",
+ base_dir,
+ firmware_variant,
+ fw_suffix);
+
+ SCSC_TAG_INFO(MX_FILE, "Load WLBT fw %s in shared address %p\n", img_path_name, dest);
+ r = mx140_request_file(mx, img_path_name, &firm);
+ if (r) {
+ SCSC_TAG_ERR(MX_FILE, "Error Loading FW, error %d\n", r);
+ return r;
+ }
+ SCSC_TAG_DEBUG(MX_FILE, "FW Download, size %zu\n", firm->size);
+
+ if (firm->size > dest_size) {
+ SCSC_TAG_ERR(MX_FILE, "firmware image too big for buffer (%zu > %u)", dest_size, *fw_image_size);
+ r = -EINVAL;
+ } else {
+ memcpy(dest, firm->data, firm->size);
+ *fw_image_size = firm->size;
+ }
+ mx140_release_file(mx, firm);
+ return r;
+}
+
+/* Download firmware binary into a buffer supplied by the caller */
+int mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size)
+{
+ int r;
+ int i;
+ int manual;
+
+ /* Override to use the verbatim image only */
+ manual = !strcmp(firmware_hw_ver, "manual");
+ if (manual) {
+ SCSC_TAG_INFO(MX_FILE, "manual hw version\n");
+ fw_suffix_found = sizeof(fw_suffixes) / sizeof(fw_suffixes[0]) - 1;
+ }
+
+ SCSC_TAG_DEBUG(MX_FILE, "fw_suffix_found %d\n", fw_suffix_found);
+
+ /* If we know which f/w suffix to use, select it immediately */
+ if (fw_suffix_found != -1) {
+ r = __mx140_file_download_fw(mx, dest, dest_size, fw_image_size, fw_suffixes[fw_suffix_found].suffix);
+ goto done;
+ }
+
+ /* Otherwise try the list */
+ for (i = 0; i < sizeof(fw_suffixes) / sizeof(fw_suffixes[0]); i++) {
+ /* Try to find each suffix in turn */
+ SCSC_TAG_INFO(MX_FILE, "try %d %s\n", i, fw_suffixes[i].suffix);
+ r = __mx140_file_download_fw(mx, dest, dest_size, fw_image_size, fw_suffixes[i].suffix);
+ if (r != -ENOENT)
+ break;
+ }
+
+ /* Save this for next time */
+ if (r == 0)
+ fw_suffix_found = i;
+done:
+ /* Update firmware_hw_ver to reflect what got auto selected, for moredump */
+ if (fw_suffix_found != -1 && !manual) {
+ /* User will only read this, so casting away const is safe */
+ firmware_hw_ver = (char *)fw_suffixes[fw_suffix_found].suffix;
+ }
+ return r;
+}
+
+int mx140_request_file(struct scsc_mx *mx, char *path, const struct firmware **firmp)
+{
+ struct file *f;
+ mm_segment_t fs;
+ struct kstat stat;
+ const int max_read_size = 4096;
+ int r, whats_left, to_read, size;
+ struct firmware *firm;
+ char *buf, *p;
+
+ SCSC_TAG_DEBUG(MX_FILE, "request %s\n", path);
+
+ *firmp = NULL;
+
+ /* Check FS is ready */
+
+ /* Try to determine base dir */
+ r = mx140_basedir_file(mx);
+ if (r) {
+ SCSC_TAG_ERR(MX_FILE, "detect failed for fw base_dir %d\n", r);
+ return r;
+ }
+
+ /* Current segment. */
+ fs = get_fs();
+ /* Set to kernel segment. */
+ set_fs(get_ds());
+
+ r = vfs_stat(base_dir, &stat);
+ if (r != 0) {
+ set_fs(fs);
+ SCSC_TAG_ERR(MX_FILE, "vfs_stat() failed for %s\n", base_dir);
+ return -EAGAIN;
+ }
+
+ /* Check f/w bin */
+ r = vfs_stat(path, &stat);
+ if (r != 0) {
+ set_fs(fs);
+ SCSC_TAG_ERR(MX_FILE, "vfs_stat() failed for %s\n", path);
+ return -ENOENT;
+ }
+ /* Revert to original segment. */
+ set_fs(fs);
+
+ /* Round up for minimum sizes */
+ size = (stat.size + 256) & ~255;
+ /* Get memory for file contents. */
+ buf = vzalloc(size);
+ if (!buf) {
+ SCSC_TAG_ERR(MX_FILE, "kzalloc(%d) failed for %s\n", size, path);
+ return -ENOMEM;
+ }
+ p = buf;
+ /* Get firmware structure. */
+ firm = kzalloc(sizeof(*firm), GFP_KERNEL);
+ if (!firm) {
+ vfree(buf);
+ SCSC_TAG_ERR(MX_FILE, "kzalloc(%zu) failed for %s\n", sizeof(*firmp), path);
+ return -ENOMEM;
+ }
+ /* Open the file for reading. */
+ f = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(f)) {
+ vfree(buf);
+ kfree(firm);
+ SCSC_TAG_ERR(MX_FILE, "filp_open() failed for %s with %ld\n", path, PTR_ERR(f));
+ return -ENOENT;
+ }
+
+ whats_left = stat.size;
+
+ fs = get_fs();
+ set_fs(get_ds());
+
+ /* Special case if file length is reported as zero - try to read until it fails.
+ * This allows us to read /proc
+ */
+ if (whats_left == 0) {
+ do {
+ r = vfs_read(f, p, max_read_size, &f->f_pos);
+ if (r < 0) {
+ SCSC_TAG_INFO(MX_FILE, "No more data %s\n", path);
+ break;
+ }
+ p += r;
+ if (r < max_read_size) {
+ SCSC_TAG_INFO(MX_FILE, "Read %zd from %s\n", (ptrdiff_t)(p - buf), path);
+ break;
+ }
+ } while (r > 0);
+
+ goto done;
+ }
+
+ /* Read at most max_read_size in each read. Loop until the whole file has
+ * been copied to the local buffer.
+ */
+ while (whats_left) {
+ to_read = whats_left < max_read_size ? whats_left : max_read_size;
+ r = vfs_read(f, p, to_read, &f->f_pos);
+ if (r < 0) {
+ SCSC_TAG_ERR(MX_FILE, "error reading %s\n", path);
+ break;
+ }
+ if (r == 0 || r < to_read)
+ break;
+ whats_left -= r;
+ p += r;
+ }
+done:
+ set_fs(fs);
+ filp_close(f, NULL);
+
+ if (r >= 0) {
+ r = 0;
+ /* Pass to caller. Caller will free allocated memory through
+ * mx140_release_file().
+ */
+ firm->size = p - buf;
+ firm->data = buf;
+ *firmp = firm;
+ } else {
+ vfree(buf);
+ kfree(firm);
+ }
+ return r;
+
+}
+EXPORT_SYMBOL(mx140_request_file);
+
+int mx140_release_file(struct scsc_mx *mx, const struct firmware *firmp)
+{
+ if (!firmp || !firmp->data) {
+ SCSC_TAG_ERR(MX_FILE, "firmp=%p\n", firmp);
+ return -EINVAL;
+ }
+
+ SCSC_TAG_DEBUG(MX_FILE, "release firmp=%p, data=%p\n", firmp, firmp->data);
+
+ vfree(firmp->data);
+ kfree(firmp);
+ return 0;
+}
+EXPORT_SYMBOL(mx140_release_file);
+
+/* Work out correct path for vendor binaries */
+int mx140_exe_path(struct scsc_mx *mx, char *path, size_t len, const char *bin)
+{
+ (void)mx;
+
+ /* Set up when we detect FW path, or statically when
+ * auto-detect is off
+ */
+ if (exe_dir[0] == '\0')
+ return -ENOENT;
+
+ if (path == NULL)
+ return -EINVAL;
+
+ snprintf(path, len, "%s/%s", exe_dir, bin);
+
+ SCSC_TAG_DEBUG(MX_FILE, "exe: %s\n", path);
+ return 0;
+}
+EXPORT_SYMBOL(mx140_exe_path);
+
+/* Try to auto detect f/w directory */
+int mx140_basedir_file(struct scsc_mx *mx)
+{
+ struct kstat stat;
+ mm_segment_t fs;
+ int r = 0;
+
+ /* Already worked out base dir. This is
+ * static if auto-detect is off.
+ */
+ if (base_dir[0] != '\0')
+ return 0;
+
+ /* Default to pre-O bin dir, until we detect O */
+ strlcpy(exe_dir, MX140_EXE_DIR_SYSTEM, sizeof(exe_dir));
+
+ /* Current segment. */
+ fs = get_fs();
+ /* Set to kernel segment. */
+ set_fs(get_ds());
+
+ /* If /system isn't present, assume platform isn't ready yet */
+ r = vfs_stat("/system", &stat);
+ if (r != 0) {
+ SCSC_TAG_ERR(MX_FILE, "/system not mounted yet\n");
+ r = -EAGAIN;
+ goto done;
+ }
+
+ /* If /vendor isn't present, assume platform isn't ready yet.
+ * Android M and N still have /vendor, though we don't use it.
+ */
+ r = vfs_stat("/vendor", &stat);
+ if (r != 0) {
+ SCSC_TAG_ERR(MX_FILE, "/vendor not mounted yet\n");
+ r = -EAGAIN;
+ goto done;
+ }
+
+ /* Now partitions are mounted, so let's see what's in them. */
+
+ /* Try /vendor partition (Oreo) first.
+ * If it's present, it'll contain our FW
+ */
+ r = vfs_stat(MX140_FW_BASE_DIR_VENDOR_ETC_WIFI"/"MX140_FW_DETECT, &stat);
+ if (r != 0) {
+ SCSC_TAG_ERR(MX_FILE, "Base dir: %s/%s doesn't exist\n",
+ MX140_FW_BASE_DIR_VENDOR_ETC_WIFI, MX140_FW_DETECT);
+ base_dir[0] = '\0';
+ r = -ENOENT;
+ } else {
+ strlcpy(base_dir, MX140_FW_BASE_DIR_VENDOR_ETC_WIFI, sizeof(base_dir));
+ fw_base_dir = MX140_FW_BASE_DIR_VENDOR_ETC_WIFI;
+ strlcpy(exe_dir, MX140_EXE_DIR_VENDOR, sizeof(exe_dir));
+ goto done;
+ }
+
+ /* Try /system partition (pre-Oreo) */
+ r = vfs_stat(MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI"/"MX140_FW_DETECT, &stat);
+ if (r != 0) {
+ SCSC_TAG_ERR(MX_FILE, "Base dir: %s/%s doesn't exist\n",
+ MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI, MX140_FW_DETECT);
+ base_dir[0] = '\0';
+ r = -ENOENT;
+ } else {
+ strlcpy(base_dir, MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI, sizeof(base_dir));
+ fw_base_dir = MX140_FW_BASE_DIR_SYSTEM_ETC_WIFI;
+ }
+
+done:
+ /* Restore segment */
+ set_fs(fs);
+ SCSC_TAG_INFO(MX_FILE, "WLBT fw base dir is %s\n", base_dir[0] ? base_dir : "not found");
+
+ return r;
+}
+
+/* Select file for h/w version from filesystem */
+int mx140_file_select_fw(struct scsc_mx *mx, u32 hw_ver)
+{
+ int i;
+
+ SCSC_TAG_INFO(MX_FILE, "select f/w for 0x%04x\n", hw_ver);
+
+#ifdef SCSC_MULTI_RF_CHIP_ID
+ hw_ver = (hw_ver & 0x00ff); /* LSB is the RF HW ID (e.g. S610) */
+#else
+ hw_ver = (hw_ver & 0xff00) >> 8; /* MSB is the RF HW rev (e.g. EVT1.1) */
+#endif
+
+ for (i = 0; i < sizeof(fw_suffixes) / sizeof(fw_suffixes[0]); i++) {
+ if (fw_suffixes[i].hw_ver == hw_ver) {
+ fw_suffix_found = i;
+ SCSC_TAG_DEBUG(MX_FILE, "f/w for 0x%04x: index %d, suffix '%s'\n",
+ hw_ver, i, fw_suffixes[i].suffix);
+ return 0;
+ }
+ }
+
+ SCSC_TAG_ERR(MX_FILE, "No known f/w for 0x%04x, default to catchall\n", hw_ver);
+
+ /* Enable when a unified FW image is installed */
+#ifdef MX140_UNIFIED_HW_FW
+ /* The last f/w is the non-suffixed "<fw>.bin", assume it's compatible */
+ fw_suffix_found = i - 1;
+#else
+ fw_suffix_found = -1; /* None found */
+#endif
+ return -EINVAL;
+}
+
+/* Query whether this HW is supported by the current FW file set */
+bool mx140_file_supported_hw(struct scsc_mx *mx, u32 hw_ver)
+{
+#ifdef SCSC_MULTI_RF_CHIP_ID
+ hw_ver = (hw_ver & 0x00ff); /* LSB is the RF HW ID (e.g. S610) */
+#else
+ hw_ver = (hw_ver & 0xff00) >> 8; /* MSB is the RF HW rev (e.g. EVT1.0) */
+#endif
+ /* Assume installed 0xff is always compatible, and f/w will panic if it isn't */
+ if (fw_suffixes[fw_suffix_found].hw_ver == 0xff)
+ return true;
+
+ /* Does the select f/w match the hw_ver from chip? */
+ return (fw_suffixes[fw_suffix_found].hw_ver == hw_ver);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+
+#include "mxman.h" /* Special case service driver that looks inside mxman */
+
+#ifdef CONFIG_SCSC_FM_TEST
+#include "mx250_fm_test.h"
+#endif
+
+
+struct scsc_mx_fm_client {
+ /* scsc_service_client has to be the first */
+ struct scsc_service_client fm_service_client;
+ struct scsc_service *fm_service;
+ struct scsc_mx *mx;
+ bool fm_api_available;
+ scsc_mifram_ref ref;
+ struct workqueue_struct *fm_client_wq;
+ struct work_struct fm_client_work;
+ struct completion fm_client_work_completion;
+ int fm_client_work_completion_status;
+ bool ldo_on;
+};
+
+static struct scsc_mx_fm_client *fm_client;
+/* service to start */
+static int service_id = SCSC_SERVICE_ID_FM;
+
+static DEFINE_MUTEX(ss_lock);
+
+
+static void fm_client_stop_on_failure(struct scsc_service_client *client)
+{
+ (void)client;
+ mutex_lock(&ss_lock);
+ fm_client->fm_api_available = false;
+ mutex_unlock(&ss_lock);
+ SCSC_TAG_DEBUG(FM, "OK\n");
+}
+
+static void fm_client_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ (void)client;
+ (void)scsc_panic_code;
+ SCSC_TAG_DEBUG(FM, "OK\n");
+}
+
+static int stop_close_service(void)
+{
+ int r;
+
+ if (!fm_client->fm_service) {
+ SCSC_TAG_ERR(FM, "No fm_service\n");
+ r = -EINVAL;
+ goto done;
+ }
+
+ r = scsc_mx_service_stop(fm_client->fm_service);
+ if (r) {
+ SCSC_TAG_ERR(FM, "scsc_mx_service_stop(fm_service) failed %d\n", r);
+ goto done;
+ }
+ SCSC_TAG_DEBUG(FM, "scsc_mx_service_stop(fm_service) OK\n");
+
+ scsc_mx_service_mifram_free(fm_client->fm_service, fm_client->ref);
+
+ r = scsc_mx_service_close(fm_client->fm_service);
+ if (r) {
+ SCSC_TAG_ERR(FM, "scsc_mx_service_close(fm_service) failed %d\n", r);
+ goto done;
+ } else
+ SCSC_TAG_DEBUG(FM, "scsc_mx_service_close(fm_service) OK\n");
+
+ fm_client->fm_service = NULL;
+ fm_client->ref = 0;
+done:
+ return r;
+}
+
+static int open_start_service(void)
+{
+ struct scsc_service *fm_service;
+ int r;
+ int r2;
+ struct fm_ldo_conf *ldo_conf;
+ scsc_mifram_ref ref;
+
+ fm_service = scsc_mx_service_open(fm_client->mx, service_id, &fm_client->fm_service_client, &r);
+ if (!fm_service) {
+ r = -EINVAL;
+ SCSC_TAG_ERR(FM, "scsc_mx_service_open(fm_service) failed %d\n", r);
+ goto done;
+ }
+ /* Allocate memory */
+ r = scsc_mx_service_mifram_alloc(fm_service, sizeof(struct fm_ldo_conf), &ref, 32);
+ if (r) {
+ SCSC_TAG_ERR(FM, "scsc_mx_service_mifram_alloc(fm_service) failed %d\n", r);
+ r2 = scsc_mx_service_close(fm_service);
+ if (r2)
+ SCSC_TAG_ERR(FM, "scsc_mx_service_close(fm_service) failed %d\n", r2);
+ goto done;
+ }
+ ldo_conf = (struct fm_ldo_conf *)scsc_mx_service_mif_addr_to_ptr(fm_service, ref);
+ ldo_conf->version = FM_LDO_CONFIG_VERSION;
+ ldo_conf->ldo_on = fm_client->ldo_on;
+
+ r = scsc_mx_service_start(fm_service, ref);
+ if (r) {
+ SCSC_TAG_ERR(FM, "scsc_mx_service_start(fm_service) failed %d\n", r);
+ r2 = scsc_mx_service_close(fm_service);
+ if (r2)
+ SCSC_TAG_ERR(FM, "scsc_mx_service_close(fm_service) failed %d\n", r2);
+ scsc_mx_service_mifram_free(fm_service, ref);
+ goto done;
+ }
+
+ fm_client->fm_service = fm_service;
+ fm_client->ref = ref;
+done:
+ return r;
+}
+
+static int open_start_close_service(void)
+{
+ int r;
+
+ r = open_start_service();
+ if (r) {
+ SCSC_TAG_ERR(FM, "Error starting service: open_start_service(fm_service) failed %d\n", r);
+
+ if (!fm_client->ldo_on) {
+ /* Do not return here. For the case where WLBT FW is crashed, and FM off request is
+ * rejected, it's safest to continue to let scsc_service_on_halt_ldos_off() reset
+ * the global flag to indicate that FM is no longer needed when WLBT next boots.
+ * Otherwise LDO could be stuck always-on.
+ */
+ } else
+ return r;
+ }
+
+ if (fm_client->ldo_on) {
+ /* FM turning on */
+ mxman_fm_on_halt_ldos_on();
+
+ } else {
+ /* FM turning off */
+ mxman_fm_on_halt_ldos_off();
+
+ /* Invalidate stored FM params */
+ mxman_fm_set_params(NULL);
+ }
+
+ r = stop_close_service();
+ if (r) {
+ SCSC_TAG_ERR(FM, "Error starting service: stop_close_service(fm_service) failed %d\n", r);
+ return r;
+ }
+ return 0;
+}
+
+static void fm_client_work_func(struct work_struct *work)
+{
+ SCSC_TAG_DEBUG(FM, "mx250: %s\n", __func__);
+
+ fm_client->fm_client_work_completion_status = open_start_close_service();
+ if (fm_client->fm_client_work_completion_status) {
+ SCSC_TAG_ERR(FM, "open_start_close_service(fm_service) failed %d\n",
+ fm_client->fm_client_work_completion_status);
+ } else {
+ SCSC_TAG_DEBUG(FM, "OK\n");
+ }
+ complete(&fm_client->fm_client_work_completion);
+
+}
+
+static void fm_client_wq_init(void)
+{
+ fm_client->fm_client_wq = create_singlethread_workqueue("fm_client_wq");
+ INIT_WORK(&fm_client->fm_client_work, fm_client_work_func);
+}
+
+static void fm_client_wq_stop(void)
+{
+ cancel_work_sync(&fm_client->fm_client_work);
+ flush_workqueue(fm_client->fm_client_wq);
+}
+
+static void fm_client_wq_deinit(void)
+{
+ fm_client_wq_stop();
+ destroy_workqueue(fm_client->fm_client_wq);
+}
+
+static void fm_client_wq_start(void)
+{
+ queue_work(fm_client->fm_client_wq, &fm_client->fm_client_work);
+}
+
+static int fm_client_wq_start_blocking(void)
+{
+ SCSC_TAG_DEBUG(FM, "mx250: %s\n", __func__);
+
+ fm_client_wq_start();
+ wait_for_completion(&fm_client->fm_client_work_completion);
+ if (fm_client->fm_client_work_completion_status) {
+ SCSC_TAG_ERR(FM, "%s failed: fm_client_wq_completion_status = %d\n",
+ __func__, fm_client->fm_client_work_completion_status);
+ return fm_client->fm_client_work_completion_status;
+ }
+ SCSC_TAG_DEBUG(FM, "OK\n");
+ return 0;
+}
+
+static int mx250_fm_re(bool ldo_on)
+{
+ int r;
+
+ mutex_lock(&ss_lock);
+ SCSC_TAG_DEBUG(FM, "mx250: %s\n", __func__);
+ if (!fm_client) {
+ SCSC_TAG_ERR(FM, "fm_client = NULL\n");
+ mutex_unlock(&ss_lock);
+ return -ENODEV;
+ }
+
+ if (!fm_client->fm_api_available) {
+ SCSC_TAG_WARNING(FM, "FM LDO API unavailable\n");
+ mutex_unlock(&ss_lock);
+ return -EAGAIN;
+ }
+ fm_client->ldo_on = ldo_on;
+ reinit_completion(&fm_client->fm_client_work_completion);
+ r = fm_client_wq_start_blocking();
+ mutex_unlock(&ss_lock);
+ return r;
+
+}
+
+/*
+ * FM Radio is starting, tell WLBT drivers
+ */
+int mx250_fm_request(void)
+{
+
+ SCSC_TAG_INFO(FM, "request\n");
+ return mx250_fm_re(true);
+}
+EXPORT_SYMBOL(mx250_fm_request);
+
+/*
+ * FM Radio is stopping, tell WLBT drivers
+ */
+int mx250_fm_release(void)
+{
+ SCSC_TAG_INFO(FM, "release\n");
+ return mx250_fm_re(false);
+}
+EXPORT_SYMBOL(mx250_fm_release);
+
+/*
+ * FM Radio parameters are changing, tell WLBT drivers
+ */
+void mx250_fm_set_params(struct wlbt_fm_params *info)
+{
+ SCSC_TAG_DEBUG(FM, "mx250: %s\n", __func__);
+
+ if (!info)
+ return;
+
+ mutex_lock(&ss_lock);
+
+ SCSC_TAG_INFO(FM, "freq %u\n", info->freq);
+
+ mxman_fm_set_params(info);
+
+ mutex_unlock(&ss_lock);
+}
+EXPORT_SYMBOL(mx250_fm_set_params);
+
+void fm_client_module_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ /* Avoid unused error */
+ (void)module_client;
+
+ SCSC_TAG_INFO(FM, "probe\n");
+
+ mutex_lock(&ss_lock);
+ if (reason == SCSC_MODULE_CLIENT_REASON_HW_PROBE) {
+ fm_client = kzalloc(sizeof(*fm_client), GFP_KERNEL);
+ if (!fm_client) {
+ mutex_unlock(&ss_lock);
+ return;
+ }
+ init_completion(&fm_client->fm_client_work_completion);
+ fm_client_wq_init();
+ fm_client->fm_service_client.stop_on_failure = fm_client_stop_on_failure;
+ fm_client->fm_service_client.failure_reset = fm_client_failure_reset;
+ fm_client->mx = mx;
+ }
+ fm_client->fm_api_available = true;
+ SCSC_TAG_DEBUG(FM, "OK\n");
+ mutex_unlock(&ss_lock);
+}
+
+void fm_client_module_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ /* Avoid unused error */
+ (void)module_client;
+
+ SCSC_TAG_INFO(FM, "remove\n");
+
+ mutex_lock(&ss_lock);
+ if (reason == SCSC_MODULE_CLIENT_REASON_HW_REMOVE) {
+ if (!fm_client) {
+ mutex_unlock(&ss_lock);
+ return;
+ }
+ if (fm_client->mx != mx) {
+ SCSC_TAG_ERR(FM, "fm_client->mx != mx\n");
+ mutex_unlock(&ss_lock);
+ return;
+ }
+ fm_client_wq_deinit();
+ kfree(fm_client);
+ fm_client = NULL;
+ }
+ SCSC_TAG_DEBUG(FM, "OK\n");
+ mutex_unlock(&ss_lock);
+}
+
+/* FM client driver registration */
+struct scsc_mx_module_client fm_client_driver = {
+ .name = "FM client driver",
+ .probe = fm_client_module_probe,
+ .remove = fm_client_module_remove,
+};
+
+static int __init scsc_fm_client_module_init(void)
+{
+ int r;
+
+ SCSC_TAG_INFO(FM, "init\n");
+
+ r = scsc_mx_module_register_client_module(&fm_client_driver);
+ if (r) {
+ SCSC_TAG_ERR(FM, "scsc_mx_module_register_client_module failed: r=%d\n", r);
+ return r;
+ }
+#ifdef CONFIG_SCSC_FM_TEST
+ mx250_fm_test_init();
+#endif
+ return 0;
+}
+
+static void __exit scsc_fm_client_module_exit(void)
+{
+ SCSC_TAG_INFO(FM, "exit\n");
+ scsc_mx_module_unregister_client_module(&fm_client_driver);
+#ifdef CONFIG_SCSC_FM_TEST
+ mx250_fm_test_exit();
+#endif
+ SCSC_TAG_DEBUG(FM, "exit\n");
+}
+
+late_initcall(scsc_fm_client_module_init);
+module_exit(scsc_fm_client_module_exit);
+
+MODULE_DESCRIPTION("FM Client Driver");
+MODULE_AUTHOR("SCSC");
+MODULE_LICENSE("GPL");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include "scsc_mx_impl.h"
+
+/* char device entry declarations */
+static dev_t mx250_fm_test_dev_t;
+static struct class *mx250_fm_test_class;
+static struct cdev *mx250_fm_test_cdev;
+
+
+static int mx250_fm_test_dev_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int mx250_fm_test_dev_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+static ssize_t mx250_fm_test_dev_write(struct file *file, const char *data, size_t len, loff_t *offset)
+{
+ unsigned long count;
+ char str[20]; /* One value and carry return */
+ long val = 0;
+ struct wlbt_fm_params params;
+ int r;
+
+ count = copy_from_user(str, data, len);
+
+ str[sizeof(str) - 1] = 0;
+ if (len < sizeof(str))
+ str[len - 1] = 0;
+
+ r = kstrtol(str, 0, &val);
+ if (r) {
+ SCSC_TAG_ERR(FM_TEST, "parse error %d, l=%zd\n", r, len);
+ goto error;
+ }
+
+ if (val == 1)
+ mx250_fm_request();
+ else if (val == 0)
+ mx250_fm_release();
+ else {
+ /* All other values are frequency info */
+ params.freq = (u32)val;
+
+ SCSC_TAG_INFO(FM_TEST, "FM freq=%u\n", params.freq);
+
+ mx250_fm_set_params(¶ms);
+ }
+error:
+ return len;
+}
+
+static ssize_t mx250_fm_test_dev_read(struct file *filp, char *buffer, size_t length, loff_t *offset)
+{
+ return length;
+}
+
+static const struct file_operations mx250_fm_test_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = mx250_fm_test_dev_open,
+ .read = mx250_fm_test_dev_read,
+ .write = mx250_fm_test_dev_write,
+ .release = mx250_fm_test_dev_release,
+};
+
+/* FM service driver registration */
+void mx250_fm_test_init(void)
+{
+ int ret;
+
+ SCSC_TAG_INFO(FM_TEST, "Registering mx250 TEST\n");
+
+ ret = alloc_chrdev_region(&mx250_fm_test_dev_t, 0, 1, "mx250_fm_test-cdev");
+ if (ret < 0) {
+ SCSC_TAG_ERR(FM_TEST, "failed to alloc chrdev region\n");
+ goto fail_alloc_chrdev_region;
+ }
+
+ mx250_fm_test_cdev = cdev_alloc();
+ if (!mx250_fm_test_cdev) {
+ ret = -ENOMEM;
+ SCSC_TAG_ERR(FM_TEST, "failed to alloc cdev\n");
+ goto fail_alloc_cdev;
+ }
+
+ cdev_init(mx250_fm_test_cdev, &mx250_fm_test_dev_fops);
+ ret = cdev_add(mx250_fm_test_cdev, mx250_fm_test_dev_t, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR(FM_TEST, "failed to add cdev\n");
+ goto fail_add_cdev;
+ }
+
+ mx250_fm_test_class = class_create(THIS_MODULE, "mx250_fm_test");
+ if (!mx250_fm_test_class) {
+ ret = -EEXIST;
+ SCSC_TAG_ERR(FM_TEST, "failed to create class\n");
+ goto fail_create_class;
+ }
+
+ if (!device_create(mx250_fm_test_class, NULL, mx250_fm_test_dev_t, NULL, "mx250_fm_test_%d",
+ MINOR(mx250_fm_test_dev_t))) {
+ ret = -EINVAL;
+ SCSC_TAG_ERR(FM_TEST, "failed to create device\n");
+ goto fail_create_device;
+ }
+
+ return;
+fail_create_device:
+ class_destroy(mx250_fm_test_class);
+fail_create_class:
+ cdev_del(mx250_fm_test_cdev);
+fail_add_cdev:
+fail_alloc_cdev:
+ unregister_chrdev_region(mx250_fm_test_dev_t, 1);
+fail_alloc_chrdev_region:
+ return;
+}
+
+void mx250_fm_test_exit(void)
+{
+ device_destroy(mx250_fm_test_class, mx250_fm_test_dev_t);
+ class_destroy(mx250_fm_test_class);
+ cdev_del(mx250_fm_test_cdev);
+ unregister_chrdev_region(mx250_fm_test_dev_t, 1);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MX250_FM_TEST_H___
+#define __MX250_FM_TEST_H___
+
+
+void mx250_fm_test_init(void);
+void mx250_fm_test_exit(void);
+#endif /* __MX250_FM_TEST_H___*/
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <asm/page.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <scsc/scsc_mx.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include "mx_dbg_sampler.h"
+#include "scsc_mif_abs.h"
+#include "mxman.h"
+#include "scsc_mx_impl.h"
+#include "miframman.h"
+
+#include <scsc/scsc_logring.h>
+
+static unsigned int source_addr = 0xd0300028;
+module_param(source_addr, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(source_addr, "Relative address of Location to sample (usually a register) - default: 0xd0300028. Loaded at /dev open");
+
+static unsigned int num_bytes = 4;
+module_param(num_bytes, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(num_bytes, "Number of significant octets (1,2 or 4) to log (lsbytes from source) - default: 4. Loaded at /dev open");
+
+static unsigned int period_usecs;
+module_param(period_usecs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(period_usecs, "Sampling period. 0 means as fast as possible (powers of 2 only) - default: 0. Loaded at /dev open");
+
+static bool auto_start;
+module_param(auto_start, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(auto_start, "Start/stop sampling when service is started/stopped? - default: N. Loaded at /dev open");
+
+static unsigned int buf_len = 512 * 1024;
+module_param(buf_len, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_len, "Circular buffer length (octets, 2^n) in bytes - default: 524288. Loaded at /dev open");
+
+static unsigned int kfifo_len = 4 * 1024 * 1024;
+module_param(kfifo_len, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(kfifo_len, "Kfifo buffer length (octets, 2^n) in bytes - default: 4194304. Loaded at /dev open");
+
+static bool self_test;
+module_param(self_test, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(self_test, "Execute self test by triggering a Kernel thread which writes into shared memory and then calls irg handler - default: N. Loaded at /dev open");
+
+#define DRV_NAME "mx_dbg_sampler"
+#define DEVICE_NAME "mx_dbg_sampler"
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+
+#define SCSC_MX_DEBUG_NODE 1
+
+#define SCSC_MX_DEBUG_INTERFACES (5 * (SCSC_MX_DEBUG_NODE))
+
+DECLARE_BITMAP(bitmap_dbg_sampler_minor, SCSC_MX_DEBUG_INTERFACES);
+
+#define NO_ERROR 0
+#define BUFFER_OVERFLOW 1
+#define KFIFO_ERROR 2
+#define KFIFO_FULL 3
+
+struct mx_dbg_sampler_dev {
+ /* file pointer */
+ struct file *filp;
+ /* char device */
+ struct cdev cdev;
+ /*device pointer*/
+ struct device *dev;
+ /* mx_wlan_client */
+ struct scsc_service_client mx_client;
+ /*service pointer*/
+ struct scsc_service *service;
+ /*service pointer*/
+ scsc_mifram_ref ref;
+ /*mx pointer*/
+ struct scsc_mx *mx;
+ /* Associated kfifo */
+ DECLARE_KFIFO_PTR(fifo, u8);
+ /* Associated read_wait queue.*/
+ wait_queue_head_t read_wait;
+ /* Associated debug_buffer */
+ struct debug_sampler_config info;
+ /* Buffer read index */
+ u32 read_idx;
+ /* Device in error */
+ u8 error;
+ /* Device node spinlock for IRQ */
+ spinlock_t spinlock;
+ /* Device node mutex for fops */
+ struct mutex mutex;
+ /* To profile kfifo num elements */
+ u32 kfifo_max;
+ /* Device is in use */
+ bool in_use;
+};
+
+/**
+ * SCSC User Space debug sampler interface (singleton)
+ */
+static struct {
+ dev_t device;
+ struct class *class_mx_dbg_sampler;
+ struct mx_dbg_sampler_dev devs[SCSC_MX_DEBUG_INTERFACES];
+} mx_dbg_sampler;
+
+static int recovery_in_progress;
+
+static void mx_dbg_sampler_stop_on_failure(struct scsc_service_client *client)
+{
+ SCSC_TAG_INFO(MX_SAMPLER, "TODO\n");
+ recovery_in_progress = 1;
+}
+
+static void mx_dbg_sampler_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ (void)scsc_panic_code;
+ SCSC_TAG_INFO(MX_SAMPLER, "TODO\n");
+}
+
+static void mx_wlan_read_process(const void *data, size_t length, struct mx_dbg_sampler_dev *mx_dev)
+{
+ int ret;
+ void *read_ptr;
+ u32 elements;
+
+ /* Adjust lenght for kfifo type (u8)- elements -*/
+ elements = length;
+
+ if (mx_dev->filp) {
+ /* put string into the fifo */
+ if (kfifo_avail(&mx_dev->fifo) >= elements) {
+ /* Push values in Fifo*/
+ read_ptr = (void *)data + (mx_dev->read_idx & (buf_len - 1));
+ ret = kfifo_in(&mx_dev->fifo, read_ptr, elements);
+ mx_dev->read_idx += ret;
+ if (ret != elements || ret == 0) {
+ mx_dev->error = KFIFO_ERROR;
+ return;
+ }
+ ret = kfifo_len(&mx_dev->fifo);
+ if (ret > mx_dev->kfifo_max)
+ mx_dev->kfifo_max = ret;
+ } else {
+ /* Mask interrupt to avoid interrupt storm */
+ mx_dev->error = KFIFO_FULL;
+ return;
+ }
+ wake_up_interruptible(&mx_dev->read_wait);
+ }
+ /* Device is closed. Silenty return */
+}
+
+static void mx_dbg_sampler_irq_handler(int irq, void *data)
+{
+ struct mx_dbg_sampler_dev *mx_dev = (struct mx_dbg_sampler_dev *)data;
+ struct scsc_service *service = mx_dev->service;
+ u32 write_ref;
+ u32 data_ref;
+ void *write_ptr;
+ void *data_ptr;
+ u32 read_idx;
+ u32 write_idx;
+ size_t to_read;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mx_dev->spinlock, flags);
+
+ /* check whether service has been released */
+ if (!mx_dev->service) {
+ spin_unlock_irqrestore(&mx_dev->spinlock, flags);
+ return;
+ }
+
+ read_idx = mx_dev->read_idx;
+
+ write_ref = mx_dev->info.buffer_info.write_index_offset;
+ write_ptr = scsc_mx_service_mif_addr_to_ptr(service, write_ref);
+ write_idx = *((u32 *)write_ptr);
+
+ to_read = abs((s32)write_idx - (s32)read_idx);
+
+ /* TODO: Decide whether we need to do the memdump on a workqueue/tasklet or just in the int handler */
+ if (to_read > mx_dev->info.buffer_info.buf_len) {
+ scsc_service_mifintrbit_bit_clear(service, irq);
+ scsc_service_mifintrbit_bit_mask(service, irq);
+ mx_dev->error = BUFFER_OVERFLOW;
+ goto end;
+ }
+
+ data_ref = mx_dev->info.buffer_info.buf_offset;
+ data_ptr = scsc_mx_service_mif_addr_to_ptr(service, data_ref);
+ mx_wlan_read_process(data_ptr, to_read, mx_dev); /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(service, irq);
+ scsc_service_mifintrbit_bit_unmask(service, irq);
+end:
+ spin_unlock_irqrestore(&mx_dev->spinlock, flags);
+
+ /* Mask if dev is in error */
+ /* We shouldn't be printing out lots of stuff here, but it is in error condition */
+ if (mx_dev->error != NO_ERROR) {
+ scsc_service_mifintrbit_bit_mask(service, irq);
+ if (mx_dev->error == BUFFER_OVERFLOW)
+ SCSC_TAG_ERR(MX_SAMPLER, "Error, Buffer Overflow %zu write_idx 0x%x read_idex 0x%x\n", to_read, write_idx, read_idx);
+ else if (mx_dev->error == KFIFO_ERROR)
+ SCSC_TAG_ERR(MX_SAMPLER, "Error Pushing values in kfifo\n");
+ else if (mx_dev->error == KFIFO_FULL)
+ SCSC_TAG_ERR(MX_SAMPLER, "Error kfifo is full\n");
+ }
+}
+
+static struct task_struct *mx_dbg_sampler_task;
+
+#define BULK_DATA (16 * 1024)
+int mx_dbg_sampler_thread(void *data)
+{
+ struct mx_dbg_sampler_dev *dev = (struct mx_dbg_sampler_dev *)data;
+ struct scsc_service *service = dev->service;
+ u32 write;
+ u32 mem;
+ void *write_ptr;
+ u32 *mem_ptr;
+ u32 val;
+ u32 i;
+ u32 end;
+
+ while (!kthread_should_stop() && !(dev->error != NO_ERROR)) {
+ write = dev->info.buffer_info.write_index_offset;
+ write_ptr = scsc_mx_service_mif_addr_to_ptr(service, write);
+ val = *((u32 *)write_ptr);
+ val += BULK_DATA;
+ *((u32 *)write_ptr) = val;
+
+ end = BULK_DATA;
+
+
+ mem = dev->info.buffer_info.buf_offset;
+ mem_ptr = scsc_mx_service_mif_addr_to_ptr(service, mem);
+
+ mem_ptr += dev->read_idx / sizeof(u32);
+
+ for (i = 0; i < end / 4; i++)
+ *((u32 *)mem_ptr++) = 0x33323130;
+
+ mx_dbg_sampler_irq_handler(0, dev);
+ mdelay(100);
+ }
+ mx_dbg_sampler_task = NULL;
+ return 0;
+}
+
+static int mx_dbg_sampler_allocate_resources(struct scsc_service *service, struct mx_dbg_sampler_dev *mx_dev)
+{
+ scsc_mifram_ref ref, ref_buffer, ref_index;
+ int ret = 0;
+ struct debug_sampler_align *mem;
+
+ /* Allocate memory */
+ ret = scsc_mx_service_mifram_alloc(service, buf_len + sizeof(struct debug_sampler_align), &ref, 64);
+ if (ret)
+ return -ENOMEM;
+ mem = (struct debug_sampler_align *)scsc_mx_service_mif_addr_to_ptr(service, ref);
+
+ /* Allocate interrupt */
+ ret = scsc_service_mifintrbit_register_tohost(service, mx_dbg_sampler_irq_handler, mx_dev);
+ if (ret < 0) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Error allocating interrupt\n");
+ scsc_mx_service_mifram_free(service, ref);
+ return ret;
+ }
+ /* Populate the buffer_info */
+ mem->config.version = mx_dev->info.version = 0;
+
+ scsc_mx_service_mif_ptr_to_addr(service, &mem->mem, &ref_buffer);
+ mem->config.buffer_info.buf_offset = mx_dev->info.buffer_info.buf_offset = ref_buffer;
+
+ mem->config.buffer_info.buf_len = mx_dev->info.buffer_info.buf_len = buf_len;
+
+ scsc_mx_service_mif_ptr_to_addr(service, &mem->index, &ref_index);
+ mem->config.buffer_info.write_index_offset =
+ mx_dev->info.buffer_info.write_index_offset = ref_index;
+
+ /* Reset write index */
+ mem->index = 0;
+
+ mem->config.buffer_info.intr_num = mx_dev->info.buffer_info.intr_num = ret;
+
+ mem->config.sample_spec.source_addr = source_addr;
+ mem->config.sample_spec.num_bytes = num_bytes;
+ mem->config.sample_spec.period_usecs = period_usecs;
+ mem->config.auto_start = auto_start;
+
+ mx_dev->ref = ref;
+ /* Reset read index */
+ mx_dev->read_idx = 0;
+
+ return 0;
+}
+
+static int mx_dbg_sampler_free_resources(struct scsc_service *service, struct mx_dbg_sampler_dev *mx_dev)
+{
+ if (self_test)
+ if (mx_dbg_sampler_task)
+ kthread_stop(mx_dbg_sampler_task);
+
+ scsc_service_mifintrbit_unregister_tohost(service,
+ mx_dev->info.buffer_info.intr_num);
+ scsc_mx_service_mifram_free(service,
+ mx_dev->ref);
+ return 0;
+}
+
+int mx_dbg_sampler_open(struct inode *inode, struct file *filp)
+{
+ struct mx_dbg_sampler_dev *mx_dev;
+ int ret = 0, r;
+
+ mx_dev = container_of(inode->i_cdev, struct mx_dbg_sampler_dev, cdev);
+
+ if (mutex_lock_interruptible(&mx_dev->mutex))
+ return -ERESTARTSYS;
+
+ if (mx_dev->in_use) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Device node already opened. Only one instance allowed. Exit\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ if (filp->private_data) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Service already started\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ filp->private_data = mx_dev;
+ mx_dev->filp = filp;
+ /* Clear any remaining error */
+ mx_dev->error = NO_ERROR;
+
+ ret = kfifo_alloc(&mx_dev->fifo, kfifo_len, GFP_KERNEL);
+ if (ret) {
+ SCSC_TAG_ERR(MX_SAMPLER, "kfifo_alloc failed");
+ ret = -EIO;
+ goto error;
+ }
+
+ mx_dev->service = scsc_mx_service_open(mx_dev->mx, SCSC_SERVICE_ID_DBG_SAMPLER, &mx_dev->mx_client, &ret);
+ if (!mx_dev->service) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Error opening service is NULL\n");
+ kfifo_free(&mx_dev->fifo);
+ ret = -EIO;
+ goto error;
+ }
+ /* Allocate resources */
+ ret = mx_dbg_sampler_allocate_resources(mx_dev->service, mx_dev);
+ if (ret) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Error Allocating resources\n");
+ kfifo_free(&mx_dev->fifo);
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_close failed %d\n", r);
+ goto error;
+ }
+
+ ret = scsc_mx_service_start(mx_dev->service, mx_dev->ref);
+ if (ret) {
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_start failed\n");
+ mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
+ kfifo_free(&mx_dev->fifo);
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_close failed %d\n", r);
+ goto error;
+ }
+ /* WARNING: At this point we may be receiving interrupts from Maxwell */
+
+ /* Trigger the dummy thread to test the functionality */
+ if (self_test)
+ mx_dbg_sampler_task = kthread_run(mx_dbg_sampler_thread, (void *)mx_dev, "mx_dbg_sampler_thread");
+
+ SCSC_TAG_INFO(MX_SAMPLER, "%s: Sampling....\n", DRV_NAME);
+ mx_dev->in_use = true;
+ mutex_unlock(&mx_dev->mutex);
+ return 0;
+error:
+ filp->private_data = NULL;
+ mx_dev->filp = NULL;
+ mx_dev->service = NULL;
+end:
+ mutex_unlock(&mx_dev->mutex);
+ return ret;
+}
+
+static ssize_t mx_dbg_sampler_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+{
+ unsigned int copied;
+ int ret = 0;
+ struct mx_dbg_sampler_dev *mx_dev;
+
+ mx_dev = filp->private_data;
+
+ if (mutex_lock_interruptible(&mx_dev->mutex))
+ return -EINTR;
+
+ /* Check whether the device is in error */
+ if (mx_dev->error != NO_ERROR) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Device in error\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ while (len) {
+ if (kfifo_len(&mx_dev->fifo)) {
+ ret = kfifo_to_user(&mx_dev->fifo, buf, len, &copied);
+ if (!ret)
+ ret = copied;
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ ret = wait_event_interruptible(mx_dev->read_wait,
+ !kfifo_is_empty(&mx_dev->fifo));
+ if (ret < 0)
+ break;
+ }
+end:
+ mutex_unlock(&mx_dev->mutex);
+ return ret;
+}
+
+static unsigned mx_dbg_sampler_poll(struct file *filp, poll_table *wait)
+{
+ struct mx_dbg_sampler_dev *mx_dev;
+ int ret;
+
+ mx_dev = filp->private_data;
+
+ if (mutex_lock_interruptible(&mx_dev->mutex))
+ return -EINTR;
+
+ if (mx_dev->error != NO_ERROR) {
+ ret = POLLERR;
+ goto end;
+ }
+
+ poll_wait(filp, &mx_dev->read_wait, wait);
+
+ if (!kfifo_is_empty(&mx_dev->fifo)) {
+ ret = POLLIN | POLLRDNORM; /* readeable */
+ goto end;
+ }
+
+ ret = POLLOUT | POLLWRNORM; /* writable */
+
+end:
+ mutex_unlock(&mx_dev->mutex);
+ return ret;
+}
+
+int mx_dbg_sampler_release(struct inode *inode, struct file *filp)
+{
+ struct mx_dbg_sampler_dev *mx_dev;
+ unsigned long flags;
+ int r;
+
+ mx_dev = container_of(inode->i_cdev, struct mx_dbg_sampler_dev, cdev);
+
+ if (mutex_lock_interruptible(&mx_dev->mutex))
+ return -EINTR;
+
+ if (mx_dev->filp == NULL) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Device already closed\n");
+ mutex_unlock(&mx_dev->mutex);
+ return -EIO;
+ }
+
+ if (mx_dev != filp->private_data) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Data mismatch\n");
+ mutex_unlock(&mx_dev->mutex);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&mx_dev->spinlock, flags);
+ filp->private_data = NULL;
+ mx_dev->filp = NULL;
+ mx_dev->in_use = false;
+ kfifo_free(&mx_dev->fifo);
+ spin_unlock_irqrestore(&mx_dev->spinlock, flags);
+
+ if (mx_dev->service) {
+ r = scsc_mx_service_stop(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
+ mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_close failed %d\n", r);
+
+ spin_lock_irqsave(&mx_dev->spinlock, flags);
+ mx_dev->service = NULL;
+ spin_unlock_irqrestore(&mx_dev->spinlock, flags);
+ }
+
+ mutex_unlock(&mx_dev->mutex);
+ SCSC_TAG_INFO(MX_SAMPLER, "%s: Sampling... end. Kfifo_max = %d\n", DRV_NAME, mx_dev->kfifo_max);
+ return 0;
+}
+
+static const struct file_operations mx_dbg_sampler_fops = {
+ .owner = THIS_MODULE,
+ .open = mx_dbg_sampler_open,
+ .read = mx_dbg_sampler_read,
+ .release = mx_dbg_sampler_release,
+ .poll = mx_dbg_sampler_poll,
+};
+
+void mx_dbg_sampler_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ dev_t devn;
+ int ret, i = SCSC_MX_DEBUG_INTERFACES;
+ char dev_name[20];
+ long uid = 0;
+ int minor;
+ struct mx_dbg_sampler_dev *mx_dev;
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress) {
+ SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove - no recovery in progress\n");
+ return;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery_in_progress) {
+ SCSC_TAG_INFO(MX_SAMPLER, "Recovery probe\n");
+
+ while (i--)
+ if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
+ mx_dev = &mx_dbg_sampler.devs[i];
+ /* This should be never be true - as knod should prevent unloading while
+ * the service (device node) is open */
+
+ mx_dev->service = scsc_mx_service_open(mx_dev->mx, SCSC_SERVICE_ID_DBG_SAMPLER, &mx_dev->mx_client, &ret);
+ if (!mx_dev->service) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Error opening service is NULL\n");
+ } else {
+ int r;
+
+ ret = scsc_mx_service_start(mx_dev->service, mx_dev->ref);
+ if (ret) {
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_start failed\n");
+ mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER,
+ "scsc_mx_service_close failed %d\n", r);
+ }
+ }
+ }
+ recovery_in_progress = 0;
+ } else {
+ /* Search for free minors */
+ minor = find_first_zero_bit(bitmap_dbg_sampler_minor, SCSC_MX_DEBUG_INTERFACES);
+ if (minor >= SCSC_MX_DEBUG_INTERFACES) {
+ SCSC_TAG_ERR(MX_SAMPLER, "minor %d > SCSC_TTY_MINORS\n", minor);
+ return;
+ }
+
+#if 0
+ /* TODO GET UID */
+ if (kstrtol(dev_uid, 10, &uid)) {
+ SCSC_TAG_ERR(MX_SAMPLER, "Invalid device uid default to zero\n");
+ uid = 0;
+ }
+#endif
+
+ devn = MKDEV(MAJOR(mx_dbg_sampler.device), MINOR(minor));
+
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "debug_sampler");
+
+ cdev_init(&mx_dbg_sampler.devs[minor].cdev, &mx_dbg_sampler_fops);
+ mx_dbg_sampler.devs[minor].cdev.owner = THIS_MODULE;
+ mx_dbg_sampler.devs[minor].cdev.ops = &mx_dbg_sampler_fops;
+
+ ret = cdev_add(&mx_dbg_sampler.devs[minor].cdev, devn, 1);
+ if (ret) {
+ mx_dbg_sampler.devs[minor].cdev.dev = 0;
+ mx_dbg_sampler.devs[minor].dev = NULL;
+ return;
+ }
+
+ mx_dbg_sampler.devs[minor].dev = device_create(mx_dbg_sampler.class_mx_dbg_sampler, NULL, mx_dbg_sampler.devs[minor].cdev.dev, NULL, dev_name);
+
+ if (mx_dbg_sampler.devs[minor].dev == NULL) {
+ SCSC_TAG_ERR(MX_SAMPLER, "dev is NULL\n");
+ cdev_del(&mx_dbg_sampler.devs[minor].cdev);
+ return;
+ }
+
+ mx_dbg_sampler.devs[minor].mx = mx;
+ mx_dbg_sampler.devs[minor].mx_client.stop_on_failure = mx_dbg_sampler_stop_on_failure;
+ mx_dbg_sampler.devs[minor].mx_client.failure_reset = mx_dbg_sampler_failure_reset;
+
+ mutex_init(&mx_dbg_sampler.devs[minor].mutex);
+ spin_lock_init(&mx_dbg_sampler.devs[minor].spinlock);
+ mx_dbg_sampler.devs[minor].kfifo_max = 0;
+
+ init_waitqueue_head(&mx_dbg_sampler.devs[minor].read_wait);
+
+ /* Update bit mask */
+ set_bit(minor, bitmap_dbg_sampler_minor);
+
+ SCSC_TAG_INFO(MX_SAMPLER, "%s: Ready to start sampling....\n", DRV_NAME);
+ }
+}
+
+void mx_dbg_sampler_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ int i = SCSC_MX_DEBUG_INTERFACES, r;
+ struct mx_dbg_sampler_dev *mx_dev;
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress) {
+ SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove - no recovery in progress\n");
+ return;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery_in_progress) {
+ SCSC_TAG_INFO(MX_SAMPLER, "Recovery remove\n");
+
+ while (i--)
+ if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
+ mx_dev = &mx_dbg_sampler.devs[i];
+ /* This should be never be true - as knod should prevent unloading while
+ * the service (device node) is open */
+ if (mx_dbg_sampler.devs[i].service) {
+ r = scsc_mx_service_stop(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
+
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_close failed err: %d\n", r);
+ }
+ }
+ } else {
+ while (i--)
+ if (mx_dbg_sampler.devs[i].mx == mx) {
+ device_destroy(mx_dbg_sampler.class_mx_dbg_sampler, mx_dbg_sampler.devs[i].cdev.dev);
+ cdev_del(&mx_dbg_sampler.devs[i].cdev);
+ memset(&mx_dbg_sampler.devs[i].cdev, 0, sizeof(struct cdev));
+ mx_dbg_sampler.devs[i].mx = NULL;
+ clear_bit(i, bitmap_dbg_sampler_minor);
+ }
+ }
+}
+
+/* Test client driver registration */
+struct scsc_mx_module_client mx_dbg_sampler_driver = {
+ .name = "MX client test driver",
+ .probe = mx_dbg_sampler_probe,
+ .remove = mx_dbg_sampler_remove,
+};
+
+/* Test client driver registration */
+static int __init mx_dbg_sampler_init(void)
+{
+ int ret;
+
+ SCSC_TAG_INFO(MX_SAMPLER, "mx_dbg_sampler INIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
+
+ ret = alloc_chrdev_region(&mx_dbg_sampler.device, 0, SCSC_MX_DEBUG_INTERFACES, "mx_dbg_sampler_char");
+ if (ret)
+ goto error;
+
+ mx_dbg_sampler.class_mx_dbg_sampler = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(mx_dbg_sampler.class_mx_dbg_sampler)) {
+ SCSC_TAG_ERR(MX_SAMPLER, "mx_dbg_sampler class creation failed\n");
+ ret = PTR_ERR(mx_dbg_sampler.class_mx_dbg_sampler);
+ goto error_class;
+ }
+
+ ret = scsc_mx_module_register_client_module(&mx_dbg_sampler_driver);
+ if (ret) {
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_module_register_client_module failed: ret=%d\n", ret);
+ goto error_reg;
+ }
+
+ return 0;
+
+error_reg:
+ class_destroy(mx_dbg_sampler.class_mx_dbg_sampler);
+error_class:
+ unregister_chrdev_region(mx_dbg_sampler.device, SCSC_MX_DEBUG_INTERFACES);
+error:
+ return ret;
+}
+
+/* module level */
+static void __exit mx_dbg_sampler_unload(void)
+{
+ int i = SCSC_MX_DEBUG_INTERFACES;
+ unsigned long flags;
+ struct mx_dbg_sampler_dev *mx_dev;
+ int r;
+
+ while (i--)
+ if (mx_dbg_sampler.devs[i].cdev.dev && mx_dbg_sampler.devs[i].mx) {
+ mx_dev = &mx_dbg_sampler.devs[i];
+ /* This should be never be true - as knod should prevent unloading while
+ * the service (device node) is open */
+ if (mx_dbg_sampler.devs[i].service) {
+ r = scsc_mx_service_stop(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_stop failed err: %d\n", r);
+ mx_dbg_sampler_free_resources(mx_dev->service, mx_dev);
+ r = scsc_mx_service_close(mx_dev->service);
+ if (r)
+ SCSC_TAG_ERR(MX_SAMPLER, "scsc_mx_service_close failed err: %d\n", r);
+
+ spin_lock_irqsave(&mx_dbg_sampler.devs[i].spinlock, flags);
+ mx_dbg_sampler.devs[i].filp = NULL;
+ kfifo_free(&mx_dbg_sampler.devs[i].fifo);
+ mx_dbg_sampler.devs[i].service = NULL;
+ spin_unlock_irqrestore(&mx_dev->spinlock, flags);
+ }
+ device_destroy(mx_dbg_sampler.class_mx_dbg_sampler, mx_dbg_sampler.devs[i].cdev.dev);
+ cdev_del(&mx_dbg_sampler.devs[i].cdev);
+ memset(&mx_dbg_sampler.devs[i].cdev, 0, sizeof(struct cdev));
+ mx_dbg_sampler.devs[i].mx = NULL;
+ clear_bit(i, bitmap_dbg_sampler_minor);
+ }
+ class_destroy(mx_dbg_sampler.class_mx_dbg_sampler);
+ unregister_chrdev_region(mx_dbg_sampler.device, SCSC_MX_DEBUG_INTERFACES);
+
+ SCSC_TAG_INFO(MX_SAMPLER, "mx_dbg_sampler EXIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
+}
+
+module_init(mx_dbg_sampler_init);
+module_exit(mx_dbg_sampler_unload);
+
+MODULE_DESCRIPTION("Samsung debug sampler Driver");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
+/*
+ * MODULE_INFO(version, VER_MAJOR);
+ * MODULE_INFO(build, SLSI_BUILD_STRING);
+ * MODULE_INFO(release, SLSI_RELEASE_STRING);
+ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MX_DBG_SAMPLER_H__
+#define __MX_DBG_SAMPLER_H__
+
+
+/**
+ * Debug Sampler DRAM Buffer descriptor.
+ *
+ * Initialised by Debug Sampler Driver on AP and passed by
+ * reference to Debug Sampler (Proxy) on R4 (by reference in
+ * WLAN config).
+ *
+ * Integer fields are LittleEndian.
+ */
+struct debug_sampler_buffer_info {
+ /**
+ * Offset of circular octet buffer w.r.t. shared dram start
+ */
+ uint32_t buf_offset;
+
+ /**
+ * Circular buffer length (octets, 2^n)
+ *
+ * Default = 32KiB default
+ */
+ uint32_t buf_len;
+
+ /**
+ * Offset of 32bit write index (not wrapped, counts octets) w.r.t. shared dram start
+ */
+ uint32_t write_index_offset;
+
+ /**
+ * To AP interrupt number (0 – 15)
+ */
+ uint32_t intr_num;
+};
+
+struct debug_sampler_sample_spec {
+ /**
+ * -relative address of Location to sample (usually a register)
+ *
+ * Default = 0x00000000
+ */
+ uint32_t source_addr;
+
+ /**
+ * Number of significant octets (1,2 or 4) to log (lsbytes from source)
+ *
+ * Default = 4
+ */
+ uint32_t num_bytes;
+
+ /**
+ * Sampling period.
+ *
+ * 0 means as fast as possible (powers of 2 only)
+ *
+ * Default = 0
+ */
+ uint32_t period_usecs;
+};
+
+
+/**
+ * Debug Sampler Config Structure.
+ *
+ * This structure is allocated and initialised by the Debug Sampler driver
+ * on the AP and passed via the service_start message.
+ */
+struct debug_sampler_config {
+ /**
+ * Config Structure Version (= DBGSAMPLER_CONFIG_VERSION)
+ *
+ * Set by driver, checked by service.
+ */
+ uint32_t version;
+
+ /**
+ * To-host circular buffer desciptor.
+ */
+ struct debug_sampler_buffer_info buffer_info;
+
+ /**
+ * Init/default sampling specification.
+ *
+ * (There is also an API on R4 to allow dynamic specification
+ * change - e.g. by WLAN service)
+ */
+ struct debug_sampler_sample_spec sample_spec;
+
+ /**
+ * Start/stop sampling when service is started/stopped?
+ *
+ * (There is also an API on R4 to allow dynamic start/stop
+ * - e.g. by WLAN service)
+ *
+ * Default = 0
+ */
+ uint32_t auto_start;
+};
+
+struct debug_sampler_align {
+
+ struct debug_sampler_config config __aligned(4);
+
+ u32 index;
+
+ void *mem __aligned(64);
+
+};
+
+
+#endif /* __MX_DBG_SAMPLER_H__ */
+
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <asm/page.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include "scsc_mif_abs.h"
+#include "mxman.h"
+#include "scsc_mx_impl.h"
+#include "gdb_transport.h"
+
+#define DRV_NAME "mx_mmap"
+#define DEVICE_NAME "maxwell_mmap"
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+
+#define SCSC_MMAP_NODE 1
+#define SCSC_GDB_NODE 1
+#define SCSC_GDB_DEF_BUF_SZ 64
+
+#define SCSC_MAX_INTERFACES (5 * (SCSC_MMAP_NODE + SCSC_GDB_NODE))
+
+#define MAX_MEMORY (8 * 1024 * 1024UL) /* maximum memory: this should match MX_DRAM_SIZE_SECTION_1 */
+
+DECLARE_BITMAP(bitmap_minor, SCSC_MAX_INTERFACES);
+
+struct mx_mmap_dev {
+ /* file pointer */
+ struct file *filp;
+ /* char device */
+ struct cdev cdev;
+ /*device pointer*/
+ struct device *dev;
+ /*mif_abs pointer*/
+ struct scsc_mif_abs *mif_abs;
+ /*mif_abs pointer*/
+ struct scsc_mx *mx;
+ /*mif_abs pointer*/
+ struct gdb_transport *gdb_transport;
+ /*memory cache*/
+ void *mem;
+ /* Associated kfifo */
+ struct kfifo fifo;
+ /* Associated read_wait queue.*/
+ wait_queue_head_t read_wait;
+ /* User count */
+ volatile unsigned long lock;
+};
+
+/**
+ * SCSC User Space mmap interface (singleton)
+ */
+static struct {
+ dev_t device;
+ struct class *class_mx_mmap;
+ struct mx_mmap_dev devs[SCSC_MAX_INTERFACES]; /*MMAP NODE + GDB NODE*/
+} mx_mmap;
+
+int mx_mmap_open(struct inode *inode, struct file *filp)
+{
+ struct mx_mmap_dev *dev;
+
+ dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
+
+ SCSC_TAG_INFO(MX_MMAP, "open %p\n", filp);
+
+ filp->private_data = dev;
+
+ return 0;
+}
+
+/*
+ * This function maps the contiguous device mapped area
+ * to user space. This is specfic to device which is called though fd.
+ */
+int mx_mmap_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int err;
+ struct mx_mmap_dev *mx_dev;
+ uintptr_t pfn = 0;
+
+ if (vma->vm_end - vma->vm_start > MAX_MEMORY) {
+ SCSC_TAG_ERR(MX_MMAP, "Incorrect mapping size %ld, should be less than %ld\n",
+ vma->vm_end - vma->vm_start, MAX_MEMORY);
+ err = -EINVAL;
+ }
+ mx_dev = filp->private_data;
+
+ /* Get the memory */
+ mx_dev->mem = mx_dev->mif_abs->get_mifram_ptr(mx_dev->mif_abs, 0);
+
+ if (!mx_dev->mem)
+ return -ENODEV;
+
+ /* Get page frame number from virtual abstraction layer */
+ pfn = mx_dev->mif_abs->get_mifram_pfn(mx_dev->mif_abs);
+
+ /* remap kernel memory to userspace */
+ err = remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+
+ return err;
+}
+
+
+
+int mx_mmap_release(struct inode *inode, struct file *filp)
+{
+ SCSC_TAG_INFO(MX_MMAP, "close %p\n", filp);
+
+ /* TODO : Unmap pfn_range */
+ return 0;
+}
+
+static const struct file_operations mx_mmap_fops = {
+ .owner = THIS_MODULE,
+ .open = mx_mmap_open,
+ .mmap = mx_mmap_mmap,
+ .release = mx_mmap_release,
+};
+
+int mx_gdb_open(struct inode *inode, struct file *filp)
+{
+ struct mx_mmap_dev *mx_dev;
+ int ret;
+
+ mx_dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
+
+ SCSC_TAG_INFO(MX_MMAP, "open %p\n", filp);
+
+ if (!mx_dev->gdb_transport) {
+ SCSC_TAG_ERR(MX_MMAP, "no transport %p\n", filp);
+ return -ENODEV;
+ }
+
+ if (test_and_set_bit_lock(0, &mx_dev->lock)) {
+ SCSC_TAG_ERR(MX_MMAP, "already open %p\n", filp);
+ return -EBUSY;
+ }
+
+ /* Prevent channel teardown while client has open */
+ mutex_lock(&mx_dev->gdb_transport->channel_open_mutex);
+
+ filp->private_data = mx_dev;
+ mx_dev->filp = filp;
+ ret = kfifo_alloc(&mx_dev->fifo, GDB_TRANSPORT_BUF_LENGTH, GFP_KERNEL);
+ if (ret) {
+ mutex_unlock(&mx_dev->gdb_transport->channel_open_mutex);
+ clear_bit_unlock(0, &mx_dev->lock);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static ssize_t mx_gdb_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *offset)
+{
+ struct mx_mmap_dev *mx_dev;
+ char *wbuf = NULL, *lbuf = NULL, buf[SCSC_GDB_DEF_BUF_SZ] = {};
+
+ mx_dev = filp->private_data;
+ /* When write_req do NOT fit inside the auto array just dyn-alloc */
+ if (len <= SCSC_GDB_DEF_BUF_SZ) {
+ wbuf = buf;
+ } else {
+ wbuf = kzalloc(len, GFP_KERNEL);
+ if (!wbuf)
+ return -ENOMEM;
+ /* Use the freshly dyn-allocated buf */
+ SCSC_TAG_DEBUG(MX_MMAP, "Allocated GDB write dyn-buffer [%zd]\n", len);
+ lbuf = wbuf;
+ }
+
+ if (copy_from_user(wbuf, ubuf, len)) {
+ kfree(lbuf);
+ return -EINVAL;
+ }
+
+ gdb_transport_send(mx_dev->gdb_transport, (void *)wbuf, len);
+ kfree(lbuf);
+
+ return len;
+}
+
+static ssize_t mx_gdb_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+{
+ int ret = 0;
+ unsigned int copied;
+ struct mx_mmap_dev *mx_dev;
+
+ mx_dev = filp->private_data;
+
+ while (len) {
+ if (kfifo_len(&mx_dev->fifo)) {
+ ret = kfifo_to_user(&mx_dev->fifo, buf, len, &copied);
+ if (!ret) {
+ SCSC_TAG_DEBUG(MX_MMAP, "Copied %d bytes to user.\n", copied);
+ ret = copied;
+ }
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ ret = wait_event_interruptible(mx_dev->read_wait,
+ !kfifo_is_empty(&mx_dev->fifo));
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+void gdb_read_callback(const void *message, size_t length, void *data)
+{
+ struct mx_mmap_dev *mx_dev = (struct mx_mmap_dev *)data;
+ int ret;
+
+ if (mx_dev->filp) {
+ if (kfifo_avail(&mx_dev->fifo) >= length) {
+ ret = kfifo_in(&mx_dev->fifo, message, length);
+ if (ret != length) {
+ SCSC_TAG_ERR(MX_MMAP, "Unable to push into Kfifo Buffer\n");
+ return;
+ }
+ SCSC_TAG_DEBUG(MX_MMAP, "Buffered %zu bytes\n", length);
+ } else {
+ SCSC_TAG_ERR(MX_MMAP, "Kfifo Buffer Overflow\n");
+ return;
+ }
+
+ wake_up_interruptible(&mx_dev->read_wait);
+ } else
+ SCSC_TAG_ERR(MX_MMAP, "Device is closed. Dropping %zu octets\n",
+ length);
+}
+
+static unsigned int mx_gdb_poll(struct file *filp, poll_table *wait)
+{
+ struct mx_mmap_dev *mx_dev;
+
+ mx_dev = filp->private_data;
+
+ poll_wait(filp, &mx_dev->read_wait, wait);
+
+ if (!kfifo_is_empty(&mx_dev->fifo))
+ return POLLIN | POLLRDNORM; /* readeable */
+
+ return POLLOUT | POLLWRNORM; /* writable */
+}
+
+int mx_gdb_release(struct inode *inode, struct file *filp)
+{
+ struct mx_mmap_dev *mx_dev;
+
+ mx_dev = container_of(inode->i_cdev, struct mx_mmap_dev, cdev);
+
+ SCSC_TAG_INFO(MX_MMAP, "close %p\n", filp);
+
+ if (mx_dev->filp == NULL) {
+ SCSC_TAG_ERR(MX_MMAP, "Device already closed\n");
+ return -EIO;
+ }
+
+ if (mx_dev != filp->private_data) {
+ SCSC_TAG_ERR(MX_MMAP, "Data mismatch\n");
+ return -EIO;
+ }
+
+
+ clear_bit_unlock(0, &mx_dev->lock);
+
+ filp->private_data = NULL;
+ mx_dev->filp = NULL;
+ kfifo_free(&mx_dev->fifo);
+
+ mutex_unlock(&mx_dev->gdb_transport->channel_open_mutex);
+
+ return 0;
+}
+
+static const struct file_operations mx_gdb_fops = {
+ .owner = THIS_MODULE,
+ .open = mx_gdb_open,
+ .write = mx_gdb_write,
+ .read = mx_gdb_read,
+ .release = mx_gdb_release,
+ .poll = mx_gdb_poll,
+};
+
+/*
+ * Receive handler for messages from the FW along the maxwell management transport
+ */
+void client_gdb_probe(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport, char *dev_uid)
+{
+ dev_t devn;
+ int ret;
+ char dev_name[20];
+ struct mx_mmap_dev *mx_dev;
+ long uid = 0;
+ int minor;
+
+ /************/
+ /* GDB node */
+ /************/
+ /* Search for free minors */
+ minor = find_first_zero_bit(bitmap_minor, SCSC_MAX_INTERFACES);
+
+ if (minor >= SCSC_MAX_INTERFACES) {
+ SCSC_TAG_ERR(MX_MMAP, "minor %d > SCSC_TTY_MINORS\n", minor);
+ return;
+ }
+
+ if (kstrtol(dev_uid, 10, &uid)) {
+ SCSC_TAG_ERR(MX_MMAP, "Invalid device uid default to zero\n");
+ uid = 0;
+ }
+
+ devn = MKDEV(MAJOR(mx_mmap.device), MINOR(minor));
+
+ if (gdb_transport->type == GDB_TRANSPORT_M4)
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "m4_gdb");
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (gdb_transport->type == GDB_TRANSPORT_M4_1)
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "m4_1_gdb");
+#endif
+ else
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "r4_gdb");
+
+ cdev_init(&mx_mmap.devs[minor].cdev, &mx_gdb_fops);
+ mx_mmap.devs[minor].cdev.owner = THIS_MODULE;
+ mx_mmap.devs[minor].cdev.ops = &mx_gdb_fops;
+
+ ret = cdev_add(&mx_mmap.devs[minor].cdev, devn, 1);
+ if (ret) {
+ SCSC_TAG_ERR(MX_MMAP, "cdev_add failed for device %s\n", dev_name);
+ mx_mmap.devs[minor].cdev.dev = 0;
+ mx_mmap.devs[minor].dev = NULL;
+ return;
+ }
+
+ mx_mmap.devs[minor].dev = device_create(mx_mmap.class_mx_mmap, NULL, mx_mmap.devs[minor].cdev.dev, NULL, dev_name);
+
+ if (mx_mmap.devs[minor].dev == NULL) {
+ cdev_del(&mx_mmap.devs[minor].cdev);
+ return;
+ }
+
+ mx_dev = &mx_mmap.devs[minor];
+ mx_mmap.devs[minor].gdb_transport = gdb_transport;
+
+ gdb_transport_register_channel_handler(gdb_transport, gdb_read_callback, (void *)mx_dev);
+ init_waitqueue_head(&mx_mmap.devs[minor].read_wait);
+
+ /* Update bit mask */
+ set_bit(minor, bitmap_minor);
+}
+
+void client_gdb_remove(struct gdb_transport_client *gdb_client, struct gdb_transport *gdb_transport)
+{
+ int i = SCSC_MAX_INTERFACES;
+
+ while (i--)
+ if (mx_mmap.devs[i].gdb_transport == gdb_transport) {
+ device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
+ cdev_del(&mx_mmap.devs[i].cdev);
+ memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
+ mx_mmap.devs[i].gdb_transport = NULL;
+ clear_bit(i, bitmap_minor);
+ }
+}
+
+/* Test client driver registration */
+struct gdb_transport_client client_gdb_driver = {
+ .name = "GDB client driver",
+ .probe = client_gdb_probe,
+ .remove = client_gdb_remove,
+};
+
+void scsc_mx_mmap_module_probe(struct scsc_mif_mmap_driver *abs_driver, struct scsc_mif_abs *mif_abs)
+{
+ dev_t devn;
+ int ret;
+ char dev_name[20];
+ char *dev_uid;
+ long uid = 0;
+ int minor = 0;
+
+ /* Search for free minors */
+ minor = find_first_zero_bit(bitmap_minor, SCSC_MAX_INTERFACES);
+
+ if (minor >= SCSC_MAX_INTERFACES) {
+ SCSC_TAG_ERR(MX_MMAP, "minor %d > SCSC_TTY_MINORS\n", minor);
+ return;
+ }
+
+ /*************/
+ /* MMAP node */
+ /*************/
+ dev_uid = mif_abs->get_uid(mif_abs);
+ if (kstrtol(dev_uid, 10, &uid))
+ uid = 0;
+
+ devn = MKDEV(MAJOR(mx_mmap.device), MINOR(minor));
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "mx", (int)uid, "mmap");
+
+ cdev_init(&mx_mmap.devs[minor].cdev, &mx_mmap_fops);
+ mx_mmap.devs[minor].cdev.owner = THIS_MODULE;
+ mx_mmap.devs[minor].cdev.ops = &mx_mmap_fops;
+
+ ret = cdev_add(&mx_mmap.devs[minor].cdev, devn, 1);
+ if (ret) {
+ SCSC_TAG_ERR(MX_MMAP, "cdev_add failed for device %s\n", dev_name);
+ mx_mmap.devs[minor].cdev.dev = 0;
+ mx_mmap.devs[minor].dev = NULL;
+ return;
+ }
+
+ mx_mmap.devs[minor].dev = device_create(mx_mmap.class_mx_mmap, NULL, mx_mmap.devs[minor].cdev.dev, NULL, dev_name);
+
+ if (mx_mmap.devs[minor].dev == NULL) {
+ cdev_del(&mx_mmap.devs[minor].cdev);
+ return;
+ }
+
+ mx_mmap.devs[minor].mif_abs = mif_abs;
+
+ mx_mmap.devs[minor].mem = mif_abs->get_mifram_ptr(mif_abs, 0);
+
+ /* Update bit mask */
+ set_bit(minor, bitmap_minor);
+}
+
+
+void scsc_mx_mmap_module_remove(struct scsc_mif_abs *mif_abs)
+{
+ int i = SCSC_MAX_INTERFACES;
+
+ while (i--)
+ if (mx_mmap.devs[i].mif_abs == mif_abs) {
+ device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
+ cdev_del(&mx_mmap.devs[i].cdev);
+ memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
+ mx_mmap.devs[i].mif_abs = NULL;
+ clear_bit(i, bitmap_minor);
+ }
+}
+
+static struct scsc_mif_mmap_driver mx_module_mmap_if = {
+ .name = "Maxwell mmap Driver",
+ .probe = scsc_mx_mmap_module_probe,
+ .remove = scsc_mx_mmap_module_remove,
+};
+
+static int __init mx_mmap_init(void)
+{
+ int ret;
+
+ SCSC_TAG_INFO(MX_MMAP, "mx_mmap INIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
+
+
+ ret = alloc_chrdev_region(&mx_mmap.device, 0, SCSC_MAX_INTERFACES, "mx_mmap_char");
+ if (ret)
+ goto error;
+
+ mx_mmap.class_mx_mmap = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(mx_mmap.class_mx_mmap)) {
+ ret = PTR_ERR(mx_mmap.class_mx_mmap);
+ goto error_class;
+ }
+
+ scsc_mif_mmap_register(&mx_module_mmap_if);
+
+ ret = gdb_transport_register_client(&client_gdb_driver);
+ if (ret)
+ SCSC_TAG_ERR(MX_MMAP, "scsc_mx_module_register_client_module failed: r=%d\n", ret);
+
+
+ return 0;
+
+error_class:
+ unregister_chrdev_region(mx_mmap.device, SCSC_MAX_INTERFACES);
+error:
+ return ret;
+}
+
+static void __exit mx_mmap_cleanup(void)
+{
+ int i = SCSC_MAX_INTERFACES;
+
+ while (i--)
+ if (mx_mmap.devs[i].cdev.dev) {
+ device_destroy(mx_mmap.class_mx_mmap, mx_mmap.devs[i].cdev.dev);
+ cdev_del(&mx_mmap.devs[i].cdev);
+ memset(&mx_mmap.devs[i].cdev, 0, sizeof(struct cdev));
+ clear_bit(i, bitmap_minor);
+ }
+ class_destroy(mx_mmap.class_mx_mmap);
+ unregister_chrdev_region(mx_mmap.device, SCSC_MAX_INTERFACES);
+ SCSC_TAG_INFO(MX_MMAP, "mx_mmap EXIT; version: %d.%d\n", VER_MAJOR, VER_MINOR);
+
+ gdb_transport_unregister_client(&client_gdb_driver);
+ /* Notify lower layers that we are unloading */
+ scsc_mif_mmap_unregister(&mx_module_mmap_if);
+}
+
+module_init(mx_mmap_init);
+module_exit(mx_mmap_cleanup);
+
+MODULE_DESCRIPTION("Samsung MMAP/GDB Driver");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
+/*
+ * MODULE_INFO(version, VER_MAJOR);
+ * MODULE_INFO(build, SLSI_BUILD_STRING);
+ * MODULE_INFO(release, SLSI_RELEASE_STRING);
+ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * mx140 Infrastructure Configuration Structure.
+ *
+ * Used to pass configuration data from AP to R4 infrastructure
+ * on Maxwell Subsystem startup.
+ *
+ * Notes:
+ *
+ * - All multi-octet integers shall be stored LittleEndian.
+ *
+ * - All location fields ("*_loc") are 32 bit octet offsets w.r.t. the R4
+ * address map. They can therefore refer to DRAM memory or Mailbox registers.
+ *
+ * - "typedefs" are avoided to allow inclusion in linux source code.
+ */
+#ifndef MXCONF_H__
+#define MXCONF_H__
+
+/* Uses */
+
+/* It appears that due to the previous syntax "__packed struct foo" used in this
+ * header, the structures here don't actually get packed. Clang warns that
+ * that syntax is ignored. But correcting it causes a misalignment with FW.
+ * The __MXPACKED macro is used to stop packing the structures in this
+ * header until we've investigated further.
+ */
+#define __MXPACKED /* TODO: HACK - don't actually pack! */
+
+/* Definitions */
+
+/**
+ * Config structure magic number.
+ *
+ * The AP writes this value and the R4 checks it to trap endian mismatches.
+ */
+#define MXCONF_MAGIC 0x79828486
+
+/**
+ * Config structure version
+ *
+ * The AP writes these values and the R4 checks them to trap config structure
+ * mismatches.
+ */
+#define MXCONF_VERSION_MAJOR 0
+#define MXCONF_VERSION_MINOR 5 /* For fleximac moredump */
+
+/* Types */
+
+/**
+ * Maxwell Circular Packet Buffer Configuration.
+ */
+struct mxcbufconf {
+ scsc_mifram_ref buffer_loc; /**< Location of allocated buffer in DRAM */
+ uint32_t num_packets; /**< Total number of packets that can be stored in the buffer */
+ uint32_t packet_size; /**< Size of each individual packet within the buffer */
+ scsc_mifram_ref read_index_loc; /**< Location of 32bit read index in DRAM or Mailbox */
+ scsc_mifram_ref write_index_loc; /**< Location of 32bit write index */
+} __MXPACKED;
+
+/**
+ * Maxwell Management Simplex Stream Configuration
+ *
+ * A circular buffer plus a pair of R/W signaling bits.
+ */
+struct mxstreamconf {
+ /** Circular Packet Buffer configuration */
+ struct mxcbufconf buf_conf;
+
+ /** Allocated MIF Interrupt Read Bit Index */
+ uint8_t read_bit_idx;
+
+ /** Allocated MIF Interrupt Write Bit Index */
+ uint8_t write_bit_idx;
+} __MXPACKED;
+
+/**
+ * Maxwell Management Transport Configuration
+ *
+ * A pair of simplex streams.
+ */
+struct mxtransconf {
+ struct mxstreamconf to_ap_stream_conf;
+ struct mxstreamconf from_ap_stream_conf;
+} __MXPACKED;
+
+/**
+ * Maxwell Infrastructure Configuration Version
+ */
+struct mxconfversion {
+ uint16_t major;
+ uint16_t minor;
+} __MXPACKED;
+
+/**
+ * Mxlog Event Buffer Configuration.
+ *
+ * A circular buffer. Size must be a multiple of 2.
+ */
+struct mxlogconf
+{
+ struct mxstreamconf stream_conf;
+} __MXPACKED;
+
+
+/**
+ * Maxwell Infrastructure Configuration Override (HCF block)
+ */
+struct mxmibref {
+ uint32_t offset;
+ uint32_t size;
+} __MXPACKED;
+
+
+/**
+ * Maxwell Infrastructure Configuration
+ */
+struct mxconf {
+ /**
+ * Config Magic Number
+ *
+ * Always 1st field in config.
+ */
+ uint32_t magic;
+
+ /**
+ * Config Version.
+ *
+ * Always second field in config.
+ */
+ struct mxconfversion version;
+
+ /**
+ * MX Management Message Transport Configuration.
+ */
+ struct mxtransconf mx_trans_conf;
+
+ /**
+ * MX Management GDB Message Transport Configuration.
+ */
+ /* Cortex-R4 channel */
+ struct mxtransconf mx_trans_conf_gdb_r4;
+ /* Cortex-M4 channel */
+ struct mxtransconf mx_trans_conf_gdb_m4;
+
+ /**
+ * Mxlog Event Buffer Configuration.
+ */
+ struct mxlogconf mxlogconf;
+
+ /* FROM MINOR_2 */
+
+ /**
+ * SOC HW revision override from host
+ */
+ uint32_t soc_revision;
+
+ /* FROM MINOR_3 */
+
+ /**
+ * Setup flags
+ */
+#define MXCONF_FLAGS_FM_ON (BIT(0)) /* FM already on */
+ uint32_t flags;
+
+ /* FROM MINOR_4 */
+
+ /**
+ * Common HCF offset
+ */
+ struct mxmibref fwconfig;
+
+ /* FROM MINOR_5 */
+
+ /* Fleximac Cortex-M3_1 piggy back as M4 channel.
+ * (Driver must initialise from-ap buffer address to 0
+ * if channel is not in use).
+ */
+ struct mxtransconf mx_trans_conf_gdb_m4_1;
+
+} __MXPACKED;
+
+#endif /* MXCONF_H__ */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_logring.h>
+
+#include "mxfwconfig.h"
+#include "miframman.h"
+#include "scsc_mx_impl.h"
+#include "mxconf.h"
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#define MXFWCONFIG_CFG_SUBDIR "common"
+#define MXFWCONFIG_CFG_FILE_HW "common.hcf"
+#define MXFWCONFIG_CFG_FILE_SW "common_sw.hcf"
+
+static void mxfwconfig_get_dram_ref(struct scsc_mx *mx, struct mxmibref *cfg_ref);
+
+/* Load config into non-shared DRAM */
+static int mxfwconfig_load_cfg(struct scsc_mx *mx, struct mxfwconfig *cfg, const char *filename)
+{
+ int r = 0;
+ u32 i;
+
+ if (cfg->configs >= SCSC_MX_MAX_COMMON_CFG) {
+ SCSC_TAG_ERR(MX_CFG, "Too many common config files (%u)\n", cfg->configs);
+ return -E2BIG;
+ }
+
+ i = cfg->configs++; /* Claim next config slot */
+
+ /* Load config file from file system into DRAM */
+ r = mx140_file_request_conf(mx, &cfg->config[i].fw, MXFWCONFIG_CFG_SUBDIR, filename);
+ if (r)
+ return r;
+
+ /* Initial size of file */
+ cfg->config[i].cfg_len = cfg->config[i].fw->size;
+ cfg->config[i].cfg_data = cfg->config[i].fw->data;
+
+ /* Validate file in DRAM */
+ if (cfg->config[i].cfg_len >= MX_COMMON_HCF_HDR_SIZE && /* Room for header */
+ /*(cfg->config[i].cfg[6] & 0xF0) == 0x10 && */ /* Curator subsystem */
+ cfg->config[i].cfg_data[7] == 1) { /* First file format */
+ int j;
+
+ cfg->config[i].cfg_hash = 0;
+
+ /* Calculate hash */
+ for (j = 0; j < MX_COMMON_HASH_SIZE_BYTES; j++) {
+ cfg->config[i].cfg_hash =
+ (cfg->config[i].cfg_hash << 8) | cfg->config[i].cfg_data[j + MX_COMMON_HASH_OFFSET];
+ }
+
+ SCSC_TAG_INFO(MX_CFG, "CFG hash: 0x%.04x\n", cfg->config[i].cfg_hash);
+
+ /* All good - consume header and continue */
+ cfg->config[i].cfg_len -= MX_COMMON_HCF_HDR_SIZE;
+ cfg->config[i].cfg_data += MX_COMMON_HCF_HDR_SIZE;
+ } else {
+ SCSC_TAG_ERR(MX_CFG, "Invalid HCF header size %zu\n", cfg->config[i].cfg_len);
+
+ /* Caller must call mxfwconfig_unload_cfg() to release the buffer */
+ return -EINVAL;
+ }
+
+ /* Running shtotal payload */
+ cfg->shtotal += cfg->config[i].cfg_len;
+
+ SCSC_TAG_INFO(MX_CFG, "Loaded common config %s, size %zu, payload size %zu, shared dram total %zu\n",
+ filename, cfg->config[i].fw->size, cfg->config[i].cfg_len, cfg->shtotal);
+
+ return r;
+}
+
+/* Unload config from non-shared DRAM */
+static int mxfwconfig_unload_cfg(struct scsc_mx *mx, struct mxfwconfig *cfg, u32 index)
+{
+ if (index >= SCSC_MX_MAX_COMMON_CFG) {
+ SCSC_TAG_ERR(MX_CFG, "Out of range index (%u)\n", index);
+ return -E2BIG;
+ }
+
+ if (cfg->config[index].fw) {
+ SCSC_TAG_DBG3(MX_CFG, "Unload common config %u\n", index);
+
+ mx140_file_release_conf(mx, cfg->config[index].fw);
+
+ cfg->config[index].fw = NULL;
+ cfg->config[index].cfg_data = NULL;
+ cfg->config[index].cfg_len = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Load Common config files
+ */
+int mxfwconfig_load(struct scsc_mx *mx, struct mxmibref *cfg_ref)
+{
+ struct mxfwconfig *cfg = scsc_mx_get_mxfwconfig(mx);
+ struct miframman *miframman = scsc_mx_get_ramman(mx);
+ int r;
+ u32 i;
+ u8 *dest;
+
+ /* HW file is optional */
+ r = mxfwconfig_load_cfg(mx, cfg, MXFWCONFIG_CFG_FILE_HW);
+ if (r)
+ goto done;
+
+ /* SW file is optional, but not without HW file */
+ r = mxfwconfig_load_cfg(mx, cfg, MXFWCONFIG_CFG_FILE_SW);
+ if (r == -EINVAL) {
+ /* If SW file is corrupt, abandon both HW and SW */
+ goto done;
+ }
+
+ /* Allocate shared DRAM */
+ cfg->shdram = miframman_alloc(miframman, cfg->shtotal, 4, MIFRAMMAN_OWNER_COMMON);
+ if (!cfg->shdram) {
+ SCSC_TAG_ERR(MX_CFG, "MIF alloc failed for %zu octets\n", cfg->shtotal);
+ r = -ENOMEM;
+ goto done;
+ }
+
+ /* Copy files into shared DRAM */
+ for (i = 0, dest = (u8 *)cfg->shdram;
+ i < cfg->configs;
+ i++) {
+ /* Add to shared DRAM block */
+ memcpy(dest, cfg->config[i].cfg_data, cfg->config[i].cfg_len);
+ dest += cfg->config[i].cfg_len;
+ }
+
+done:
+ /* Release the files from non-shared DRAM */
+ for (i = 0; i < cfg->configs; i++)
+ mxfwconfig_unload_cfg(mx, cfg, i);
+
+ /* Configs abandoned on error */
+ if (r)
+ cfg->configs = 0;
+
+ /* Pass offset of common HCF data.
+ * FW must ignore if zero length, so set up even if we loaded nothing.
+ */
+ mxfwconfig_get_dram_ref(mx, cfg_ref);
+
+ return r;
+}
+
+/*
+ * Unload Common config data
+ */
+void mxfwconfig_unload(struct scsc_mx *mx)
+{
+ struct mxfwconfig *cfg = scsc_mx_get_mxfwconfig(mx);
+ struct miframman *miframman = scsc_mx_get_ramman(mx);
+
+ /* Free config block in shared DRAM */
+ if (cfg->shdram) {
+ SCSC_TAG_INFO(MX_CFG, "Free common config %zu bytes shared DRAM\n", cfg->shtotal);
+
+ miframman_free(miframman, cfg->shdram);
+
+ cfg->configs = 0;
+ cfg->shtotal = 0;
+ cfg->shdram = NULL;
+ }
+}
+
+/*
+ * Get ref (offset) of config block in shared DRAM
+ */
+static void mxfwconfig_get_dram_ref(struct scsc_mx *mx, struct mxmibref *cfg_ref)
+{
+ struct mxfwconfig *mxfwconfig = scsc_mx_get_mxfwconfig(mx);
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mx);
+
+ if (!mxfwconfig->shdram) {
+ cfg_ref->offset = (scsc_mifram_ref)0;
+ cfg_ref->size = 0;
+ } else {
+ mif->get_mifram_ref(mif, mxfwconfig->shdram, &cfg_ref->offset);
+ cfg_ref->size = mxfwconfig->shtotal;
+ }
+
+ SCSC_TAG_INFO(MX_CFG, "cfg_ref: 0x%x, size %u\n", cfg_ref->offset, cfg_ref->size);
+}
+
+/*
+ * Init config file module
+ */
+int mxfwconfig_init(struct scsc_mx *mx)
+{
+ struct mxfwconfig *cfg = scsc_mx_get_mxfwconfig(mx);
+
+ cfg->configs = 0;
+ cfg->shtotal = 0;
+ cfg->shdram = NULL;
+
+ return 0;
+}
+
+/*
+ * Exit config file module
+ */
+void mxfwconfig_deinit(struct scsc_mx *mx)
+{
+ struct mxfwconfig *cfg = scsc_mx_get_mxfwconfig(mx);
+
+ /* Leaked memory? */
+ WARN_ON(cfg->configs > 0);
+ WARN_ON(cfg->shdram);
+}
+
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MXFWCONFIG_H
+#define __MXFWCONFIG_H
+
+#define SCSC_MX_MAX_COMMON_CFG 2
+#define MX_COMMON_HCF_HDR_SIZE 8
+#define MX_COMMON_HASH_SIZE_BYTES 2 /* Hash will be contained in a uint32 */
+#define MX_COMMON_HASH_OFFSET 4
+
+struct mxfwconfig {
+ u32 configs; /* Number of files */
+ void *shdram; /* Combined payload in shared DRAM */
+ size_t shtotal; /* Size of combined payload in shared DRAM */
+
+ struct {
+ const struct firmware *fw; /* File image in DRAM */
+ const u8 *cfg_data; /* Payload in DRAM */
+ size_t cfg_len; /* Length of payload */
+ u32 cfg_hash; /* ID hash */
+ } config[SCSC_MX_MAX_COMMON_CFG];
+};
+
+struct mxmibref;
+
+int mxfwconfig_init(struct scsc_mx *mx);
+void mxfwconfig_deinit(struct scsc_mx *mx);
+int mxfwconfig_load(struct scsc_mx *mx, struct mxmibref *cfg_ref);
+void mxfwconfig_unload(struct scsc_mx *mx);
+
+#endif // __MXFWCONFIG_H
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include "scsc_mx_impl.h"
+#include "mxmgmt_transport.h"
+#include "mxlog_transport.h"
+#include "fwhdr.h"
+#include "mxlog.h"
+
+/*
+ * Receive handler for messages from the FW along the maxwell management transport
+ */
+static inline void mxlog_phase4_message_handler(const void *message,
+ size_t length, u32 level,
+ void *data)
+{
+ unsigned char *buf = (unsigned char *)message;
+
+ SCSC_TAG_LVL(MX_FW, level, SCSC_PREFIX"%d: %s\n", (int)length, buf);
+}
+
+/**
+ * This function is used to parse a NULL terminated format string
+ * and report on the provided output bitmaps smap/lmap which args
+ * are 'long' and which are signed..
+ *
+ * We will care only about length and specifier fields
+ *
+ * %[flags][width][.precision][length]specifier
+ *
+ * and since flags width and .precision are represented
+ * by NON chars, we will grossly compare simply against an 'A',
+ * because we are NOT trying to make a full sanity check here BUT only
+ * to search for long and signed values to provide the proper cast.
+ *
+ * Supporting:
+ * - ESCAPES %%ld
+ *
+ * - %x %X %d %ld %lld %i %li %lli %u %lu %llu %hd %hhd %hu %hhu
+ *
+ * NOT supporting:
+ * - %s -> MARKED AS UNSUPPORTED
+ */
+static inline void build_len_sign_maps(char *fmt, u32 *smap, u32 *lmap,
+ u32 *strmap)
+{
+ u32 p = 0;
+ char *s = fmt;
+ bool escaping = false;
+
+ if (!s)
+ return;
+ for (; *s != '\0'; ++s) {
+ /* Skip any escaped fmtstring like %%d and move on */
+ if (escaping) {
+ if (*s == ' ')
+ escaping = false;
+ continue;
+ }
+ if (*s != '%')
+ continue;
+ /* Start escape seq ... */
+ if (*(s + 1) == '%') {
+ escaping = true;
+ continue;
+ }
+ /* skip [flags][width][.precision] if any */
+ for (; *++s < 'A';)
+ ;
+ if (*s == 'l') {
+ *lmap |= (1 << p);
+ /* %lld ? skip */
+ if (*++s == 'l')
+ s++;
+ } else if (*s == 'h') {
+ /* just skip h modifiers */
+ /* hhd ? */
+ if (*++s == 'h')
+ s++;
+ }
+ if (*s == 'd' || *s == 'i')
+ *smap |= (1 << p);
+ else if (*s == 's')
+ *strmap |= (1 << p);
+ p++;
+ }
+}
+
+/**
+ * The binary protocol described at:
+ *
+ * http://wiki/Maxwell_common_firmware/Mxlog#Phase_5_:_string_decoded_on_the_host
+ *
+ * states that we'd receive the following record content on each mxlog
+ * message from FW, where:
+ *
+ * - each element is a 32bit word
+ * - 1st element is a record header
+ * - len = number of elements following the first element
+ *
+ * | 1st | 2nd | 3rd | 4th | 5th | 6th
+ * -----------------------------------------------------------
+ * | sync|lvl|len || tstamp || offset || arg1 || arg2 || arg3.
+ * -----------------------------------------------------------
+ * | e l o g m s g |
+ *
+ * BUT NOTE THAT: here we DO NOT receive 1st header element BUT
+ * instead we got:
+ * @message: pointer to 2nd element
+ * @length: in bytes of the message (so starting from 2nd element) and
+ * including tstamp and offset elements: we must calculate
+ * num_args accordingly.
+ * @level: the debug level already remapped from FW to Kernel namespace
+ */
+static inline void mxlog_phase5_message_handler(const void *message,
+ size_t length, u32 level,
+ void *data)
+{
+ struct mxlog *mxlog = (struct mxlog *)data;
+ struct mxlog_event_log_msg *elogmsg =
+ (struct mxlog_event_log_msg *)message;
+
+ if (length < MINIMUM_MXLOG_MSG_LEN_BYTES)
+ return;
+ if (mxlog && elogmsg) {
+ int num_args = 0;
+ char spare[MAX_SPARE_FMT + TSTAMP_LEN] = {};
+ char *fmt = NULL;
+ size_t fmt_sz = 0;
+ u32 smap = 0, lmap = 0, strmap = 0;
+ u32 *args = NULL;
+
+ /* Check OFFSET sanity... beware of FW guys :D ! */
+ if (elogmsg->offset >= MXLS_SZ(mxlog)) {
+ SCSC_TAG_ERR(MX_FW,
+ "Received fmtstr OFFSET(%d) is OUT OF range(%zd)...skip..\n",
+ elogmsg->offset, MXLS_SZ(mxlog));
+ return;
+ }
+ args = (u32 *)(elogmsg + 1);
+ num_args =
+ (length - MINIMUM_MXLOG_MSG_LEN_BYTES) /
+ MXLOG_ELEMENT_SIZE;
+ fmt = (char *)(MXLS_DATA(mxlog) + elogmsg->offset);
+ /* Avoid being fooled by a NON NULL-terminated strings too ! */
+ fmt_sz = strnlen(fmt, MXLS_SZ(mxlog) - elogmsg->offset);
+ if (fmt_sz >= MAX_SPARE_FMT - 1) {
+ SCSC_TAG_ERR(MX_FW,
+ "UNSUPPORTED message length %zd ... truncated.\n",
+ fmt_sz);
+ fmt_sz = MAX_SPARE_FMT - 2;
+ }
+ /* Pre-Process fmt string to be able to do proper casting */
+ if (num_args)
+ build_len_sign_maps(fmt, &smap, &lmap, &strmap);
+
+ /* Add FW provided tstamp on front and proper \n at
+ * the end when needed
+ */
+ snprintf(spare, MAX_SPARE_FMT + TSTAMP_LEN - 2, SCSC_PREFIX"%08X %s%c",
+ elogmsg->timestamp, fmt,
+ (fmt[fmt_sz] != '\n') ? '\n' : '\0');
+ fmt = spare;
+
+ switch (num_args) {
+ case 0:
+ SCSC_TAG_LVL(MX_FW, level, fmt);
+ break;
+ case 1:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 2:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 3:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 4:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[3], 3, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 5:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[3], 3, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[4], 4, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 6:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[3], 3, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[4], 4, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[5], 5, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 7:
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[3], 3, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[4], 4, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[5], 5, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[6], 6, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ case 8:
+ default:
+ if (num_args > MAX_MX_LOG_ARGS)
+ SCSC_TAG_ERR(MX_FW,
+ "MXLOG: Too many args:%d ... print only first %d\n",
+ num_args, MAX_MX_LOG_ARGS);
+ SCSC_TAG_LVL(MX_FW, level, fmt,
+ MXLOG_CAST(args[0], 0, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[1], 1, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[2], 2, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[3], 3, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[4], 4, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[5], 5, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[6], 6, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)),
+ MXLOG_CAST(args[7], 7, smap, lmap, strmap,
+ MXLS_DATA(mxlog), MXLS_SZ(mxlog)));
+ break;
+ }
+ }
+}
+
+/* A generic message handler to multiplex between phases */
+static void mxlog_message_handler(u8 phase, const void *message,
+ size_t length, u32 level, void *data)
+{
+ struct mxlog *mxlog = (struct mxlog *)data;
+
+ if (!mxlog) {
+ SCSC_TAG_ERR(MX_FW, "Missing MXLOG reference.\n");
+ return;
+ }
+
+ switch (phase) {
+ case MX_LOG_PHASE_4:
+ mxlog_phase4_message_handler(message, length, level, data);
+ break;
+ case MX_LOG_PHASE_5:
+ if (mxlog->logstrings)
+ mxlog_phase5_message_handler(message, length,
+ level, data);
+ else
+ SCSC_TAG_ERR(MX_FW,
+ "Missing LogStrings...dropping incoming PHASE5 message !\n");
+ break;
+ default:
+ SCSC_TAG_ERR(MX_FW,
+ "MXLOG Unsupported phase %d ... dropping message !\n",
+ phase);
+ break;
+ }
+}
+
+static int mxlog_header_parser(u32 header, u8 *phase,
+ u8 *level, u32 *num_bytes)
+{
+ u32 fw2kern_map[] = {
+ 0, /* 0 MX_ERROR --> 0 KERN_EMERG .. it's panic.*/
+ 4, /* 1 MX_WARN --> 4 KERN_WARNING */
+ 5, /* 2 MX_MAJOR --> 5 KERN_NOTICE */
+ 6, /* 3 MX_MINOR --> 6 KERN_INFO */
+ 7, /* 4 MX_DETAIL --> 7 KERN_DEBUG */
+ };
+ u16 sync = ((header & 0xFFFF0000) >> 16);
+
+ switch (sync) {
+ case SYNC_VALUE_PHASE_4:
+ *phase = MX_LOG_PHASE_4;
+ /* len() field represent number of chars bytes */
+ *num_bytes = header & 0x000000FF;
+ break;
+ case SYNC_VALUE_PHASE_5:
+ *phase = MX_LOG_PHASE_5;
+ /* len() field represent number of 4 bytes words */
+ *num_bytes = (header & 0x000000FF) * 4;
+ break;
+ default:
+ return -1;
+ }
+ /* Remap FW debug levels to KERN debug levels domain */
+ *level = (header & 0x0000FF00) >> 8;
+ if (*level < ARRAY_SIZE(fw2kern_map)) {
+ *level = fw2kern_map[*level];
+ } else {
+ SCSC_TAG_ERR(MX_FW,
+ "UNKNOWN MX debug level %d ... marking as MX_DETAIL.\n",
+ *level);
+ *level = fw2kern_map[ARRAY_SIZE(fw2kern_map) - 1];
+ }
+
+ return 0;
+}
+
+void mxlog_init(struct mxlog *mxlog, struct scsc_mx *mx, char *fw_build_id)
+{
+ int ret = 0;
+
+ mxlog->mx = mx;
+ mxlog->index = 0;
+ mxlog->logstrings = NULL;
+
+ /* File is in f/w profile directory */
+ ret = mx140_file_request_debug_conf(mx,
+ (const struct firmware **)&mxlog->logstrings,
+ MX_LOG_LOGSTRINGS_PATH);
+
+ if (!ret && mxlog->logstrings && mxlog->logstrings->data) {
+ SCSC_TAG_INFO(MX_FW, "Loaded %zd bytes of log-strings from %s\n",
+ mxlog->logstrings->size, MX_LOG_LOGSTRINGS_PATH);
+ if (fw_build_id && mxlog->logstrings->data[0] != 0x00 &&
+ mxlog->logstrings->size >= FW_BUILD_ID_SZ) {
+ SCSC_TAG_INFO(MX_FW, "Log-strings is versioned...checking against fw_build_id.\n");
+ if (strncmp(fw_build_id, mxlog->logstrings->data, FW_BUILD_ID_SZ)) {
+ char found[FW_BUILD_ID_SZ] = {};
+
+ /**
+ * NULL-terminate it just in case we fetched
+ * never-ending garbage.
+ */
+ strncpy(found, mxlog->logstrings->data,
+ FW_BUILD_ID_SZ - 1);
+ SCSC_TAG_WARNING(MX_FW,
+ "--> Log-strings VERSION MISMATCH !!!\n");
+ SCSC_TAG_WARNING(MX_FW,
+ "--> Expected: |%s|\n", fw_build_id);
+ SCSC_TAG_WARNING(MX_FW,
+ "--> FOUND: |%s|\n", found);
+ SCSC_TAG_WARNING(MX_FW,
+ "As a consequence the following mxlog debug messages could be corrupted.\n");
+ SCSC_TAG_WARNING(MX_FW,
+ "The whole firmware package should be pushed to device when updating (not only the mx140.bin).\n");
+ }
+ } else {
+ SCSC_TAG_INFO(MX_FW, "Log-strings is not versioned.\n");
+ }
+ } else {
+ SCSC_TAG_ERR(MX_FW, "Failed to read %s needed by MXlog Phase 5\n",
+ MX_LOG_LOGSTRINGS_PATH);
+ }
+ /* Registering a generic channel handler */
+ mxlog_transport_register_channel_handler(scsc_mx_get_mxlog_transport(mx),
+ &mxlog_header_parser,
+ &mxlog_message_handler, mxlog);
+}
+
+void mxlog_release(struct mxlog *mxlog)
+{
+ mxlog_transport_register_channel_handler(scsc_mx_get_mxlog_transport(mxlog->mx),
+ NULL, NULL, NULL);
+ if (mxlog->logstrings)
+ mx140_release_file(mxlog->mx, mxlog->logstrings);
+ mxlog->logstrings = NULL;
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _MXLOG_H
+#define _MXLOG_H
+
+#include <linux/firmware.h>
+
+#define MX_LOG_PHASE_4 4
+#define MX_LOG_PHASE_5 5
+
+#define SYNC_VALUE_PHASE_4 (0xA55A)
+#define SYNC_VALUE_PHASE_5 (0x9669)
+
+#define MXLOG_BUFFER_SIZE 512
+
+#define MINIMUM_MXLOG_MSG_LEN_BYTES (sizeof(u32) * 2)
+#define MXLOG_ELEMENT_SIZE (sizeof(u32))
+#define MAX_SPARE_FMT 256
+#define TSTAMP_LEN 9
+#define MAX_MX_LOG_ARGS 8
+#define MX_LOG_LOGSTRINGS_PATH "common/log-strings.bin" /* in f/w debug dir */
+#define MXLOG_SEXT(x) (((x) & 0x80000000) ? ((x) | 0xffffffff00000000) : (x))
+
+#define MXLS_DATA(mx) ((mx)->logstrings->data)
+#define MXLS_SZ(mx) ((mx)->logstrings->size)
+#define MXLOG_DEFSTR "<<%s OFFSET OUT OF RANGE. Check log-strings.bin>>"
+#define MXLOG_STR_SANE(x, base, size, cast) \
+ (((x) < (size)) ? (typeof(cast))((base) + (x)) : (typeof(cast))(MXLOG_DEFSTR))
+
+#ifdef __aarch64__
+/**
+ * ARM64
+ * -----
+ * We must process MXLOG messages 32bit-args coming from FW that have
+ * a different fmt string interpretation in Kernel:
+ *
+ * FW KERN MXLOG_CAST
+ * ---------------------------------------------------------
+ * %d s32 s32 (s32)
+ * %u %x u32 u32 (u32)
+ * %ld s32 s64 (SIGN_EXT((s64)))
+ * %lu u32 u64 (u64)
+ *
+ * Additionally we take care to skip any %s using defstr, a char pointer,
+ * as def value for the argument; we casted it to u64 (sizeof(char *)) to fool
+ * cond expr compilation warnings about types.
+ */
+#define MXLOG_CAST(x, p, smap, lmap, strmap, base, size) \
+ (((strmap) & 1 << (p)) ? MXLOG_STR_SANE(x, base, size, u64) : \
+ (((smap) & 1 << (p)) ? \
+ (((lmap) & 1 << (p)) ? MXLOG_SEXT((s64)(x)) : (s32)(x)) : \
+ (((lmap) & 1 << (p)) ? (u64)(x) : (u32)(x))))
+#else /* __arm__ */
+/**
+ * ARM32
+ * -----
+ * We must process MXLOG messages 32bit-args coming from FW BUT in
+ * ARM 32bit iMX6 they should have the same fmt string interpretation:
+ *
+ * FW KERN MXLOG_CAST
+ * ---------------------------------------------------------
+ * %d s32 s32 (s32)
+ * %u %x u32 u32 (u32)
+ * %ld s32 s32 (s32)
+ * %lu u32 u32 (u32)
+ *
+ * So here we ignore long modifiers and ONLY take care to skip any %s using
+ * defstr, a char pointer, as def value for the argument; we casted it to
+ * u32 (sizeof(char *) to fool cond expr compilation warnings about types.
+ */
+#define MXLOG_CAST(x, p, smap, lmap, strmap, base, size) \
+ (((strmap) & 1 << (p)) ? MXLOG_STR_SANE(x, base, size, u32) : \
+ (((smap) & 1 << (p)) ? ((s32)(x)) : (u32)(x)))
+#endif /* __arch64__ */
+
+struct mxlog_event_log_msg {
+ u32 timestamp;
+ u32 offset;
+} __packed;
+
+struct mxlog;
+
+void mxlog_init(struct mxlog *mxlog, struct scsc_mx *mx, char *fw_build_id);
+void mxlog_release(struct mxlog *mxlog);
+
+struct mxlog {
+ struct scsc_mx *mx;
+ u8 buffer[MXLOG_BUFFER_SIZE];
+ u16 index;
+ struct firmware *logstrings;
+};
+
+#endif /* _MXLOG_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+/** Uses */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mif_abs.h"
+#include "mifintrbit.h"
+/** Implements */
+#include "mxlog_transport.h"
+
+#define MXLOG_TRANSPORT_BUF_LENGTH (16 * 1024)
+#define MXLOG_TRANSPORT_PACKET_SIZE (4)
+
+/* Flag that an error has occurred so the I/O thread processing should stop */
+void mxlog_transport_set_error(struct mxlog_transport *mxlog_transport)
+{
+ SCSC_TAG_WARNING(MXLOG_TRANS, "I/O thread processing is suspended\n");
+
+ mxlog_transport->mxlog_thread.block_thread = 1;
+}
+
+static void input_irq_handler(int irq, void *data)
+{
+ struct mxlog_transport *mxlog_transport = (struct mxlog_transport *)data;
+ struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
+ struct scsc_mif_abs *mif_abs;
+
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "mxlog intr\n");
+ /* Clear the interrupt first to ensure we can't possibly miss one */
+ mif_abs = scsc_mx_get_mif_abs(mxlog_transport->mx);
+ mif_abs->irq_bit_clear(mif_abs, irq);
+
+ /* The the other side wrote some data to the input stream,
+ * wake up the thread that deals with this.
+ */
+ if (th->task == NULL) {
+ SCSC_TAG_ERR(MXLOG_TRANS, "mxlog_thread is NOT running\n");
+ return;
+ }
+ /*
+ * If an error has occured, we discard silently all messages from
+ * the stream until the error has been processed and the system has
+ * been reinitialised.
+ */
+ if (th->block_thread == 1) {
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "discard message.\n");
+ /*
+ * Do not try to acknowledge a pending interrupt here.
+ * This function is called by a function which in turn can be
+ * running in an atomic or 'disabled irq' level.
+ */
+ return;
+ }
+ th->wakeup_flag = 1;
+
+ /* wake up I/O thread */
+ wake_up_interruptible(&th->wakeup_q);
+}
+
+static void thread_wait_until_stopped(struct mxlog_transport *mxlog_transport)
+{
+ struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
+
+ /*
+ * kthread_stop() cannot handle the th exiting while
+ * kthread_should_stop() is false, so sleep until kthread_stop()
+ * wakes us up.
+ */
+ SCSC_TAG_INFO(MXLOG_TRANS, "%s waiting for the stop signal.\n", th->name);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop()) {
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "%s schedule....\n", th->name);
+ schedule();
+ }
+
+ /**
+ * Caller that spawned the kthread did a get_task_struct()
+ * on task_struct, that will be released on stop...
+ * ...so we should NOT nullify th->task here.
+ */
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "%s exiting.\n", th->name);
+}
+
+/**
+ * A thread that forwards messages sent across the transport to
+ * the registered handlers for each channel.
+ */
+static int mxlog_thread_function(void *arg)
+{
+ struct mxlog_transport *mxlog_transport = (struct mxlog_transport *)arg;
+ struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
+ int ret;
+ u32 header;
+ char *buf = NULL;
+ size_t buf_sz = 4096;
+
+ buf = kmalloc(buf_sz, GFP_KERNEL);
+ if (!buf) {
+ SCSC_TAG_ERR(MXLOG_TRANS, "Failed to alloc %s local buffer...exiting.\n", th->name);
+ return -ENOMEM;
+ }
+ /* completion is used only for startup thread-synchronization */
+ complete(&th->completion);
+ /* Thread ready...start ISR processing*/
+ th->block_thread = 0;
+ while (!kthread_should_stop()) {
+ /* wait until an error occurs, or we need to process */
+ ret = wait_event_interruptible(th->wakeup_q,
+ (th->wakeup_flag && !th->block_thread) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop()) {
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "signalled to exit\n");
+ break;
+ }
+ if (ret < 0) {
+ SCSC_TAG_DEBUG(MXLOG_TRANS,
+ "wait_event returned %d, thread will exit\n", ret);
+ thread_wait_until_stopped(mxlog_transport);
+ break;
+ }
+ th->wakeup_flag = 0;
+ SCSC_TAG_DEBUG(MXLOG_TRANS, "wokeup: r=%d\n", ret);
+ if (!mxlog_transport->header_handler_fn) {
+ /* Invalid header handler:
+ * unrecoverable log and terminate
+ */
+ SCSC_TAG_WARNING(MXLOG_TRANS,
+ "mxlog_transport->header_handler_fn_==NULL\n");
+ break;
+ }
+ while (mif_stream_read(&mxlog_transport->mif_stream,
+ &header, sizeof(uint32_t))) {
+ u8 level = 0;
+ u8 phase = 0;
+ u32 num_bytes = 0;
+
+ mutex_lock(&mxlog_transport->lock);
+ if (!mxlog_transport->header_handler_fn) {
+ /* Invalid header handler:
+ * unrecoverable log and terminate
+ */
+ SCSC_TAG_WARNING(MXLOG_TRANS,
+ "mxlog_transport->header_handler_fn_==NULL. Channel has been released\n");
+ mutex_unlock(&mxlog_transport->lock);
+ /* not recoverable, terminate straight away */
+ goto mxlog_thread_exit;
+ }
+ /**
+ * A generic header processor will properly retrieve
+ * level and num_bytes as specifically implemented
+ * by the phase.
+ */
+ if (mxlog_transport->header_handler_fn(header, &phase,
+ &level, &num_bytes)) {
+ SCSC_TAG_ERR(MXLOG_TRANS,
+ "Bad sync in header: header=0x%08x\n", header);
+ mutex_unlock(&mxlog_transport->lock);
+ /* not recoverable, terminate straight away */
+ goto mxlog_thread_exit;
+ }
+ if (num_bytes > 0 &&
+ num_bytes < (MXLOG_TRANSPORT_BUF_LENGTH - sizeof(uint32_t))) {
+ u32 ret_bytes = 0;
+
+ /* 2nd read - payload (msg) */
+ ret_bytes = mif_stream_read(&mxlog_transport->mif_stream,
+ buf, num_bytes);
+ mxlog_transport->channel_handler_fn(phase, buf,
+ ret_bytes,
+ level,
+ mxlog_transport->channel_handler_data);
+ } else {
+ SCSC_TAG_ERR(MXLOG_TRANS,
+ "Bad num_bytes(%d) in header: header=0x%08x\n",
+ num_bytes, header);
+ }
+ mutex_unlock(&mxlog_transport->lock);
+ }
+ }
+
+mxlog_thread_exit:
+ SCSC_TAG_INFO(MXLOG_TRANS, "exiting....\n");
+ kfree(buf);
+ return 0;
+}
+
+static int mxlog_thread_start(struct mxlog_transport *mxlog_transport)
+{
+ int err;
+ struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
+
+ if (th->task != NULL) {
+ SCSC_TAG_WARNING(MXLOG_TRANS, "%s thread already started\n", th->name);
+ return 0;
+ }
+
+ /* Initialise thread structure */
+ th->block_thread = 1;
+ init_waitqueue_head(&th->wakeup_q);
+ init_completion(&th->completion);
+ th->wakeup_flag = 0;
+ snprintf(th->name, MXLOG_THREAD_NAME_MAX_LENGTH, "mxlog_thread");
+
+ /* Start the kernel thread */
+ th->task = kthread_run(mxlog_thread_function, mxlog_transport, "%s", th->name);
+ if (IS_ERR(th->task)) {
+ int err = (int)PTR_ERR(th->task);
+
+ th->task = NULL;
+ return err;
+ }
+
+ /**
+ * Avoid this task_struct vanishes immediately
+ * if the kthread exits by its own.
+ */
+ get_task_struct(th->task);
+
+ /* wait until thread function is running */
+#define LOG_THREAD_START_TMO_SEC (3)
+ err = wait_for_completion_timeout(&th->completion, msecs_to_jiffies(LOG_THREAD_START_TMO_SEC * 1000));
+ if (err == 0) {
+ SCSC_TAG_ERR(MXLOG_TRANS, "timeout starting %s\n", th->name);
+ kthread_stop(th->task);
+ put_task_struct(th->task);
+ return -ETIMEDOUT;
+ }
+ SCSC_TAG_INFO(MXLOG_TRANS, "Started thread %s\n", th->name);
+
+ return 0;
+}
+
+static void mxlog_thread_stop(struct mxlog_transport *mxlog_transport)
+{
+ struct mxlog_thread *th = &mxlog_transport->mxlog_thread;
+
+ if (!th->task) {
+ SCSC_TAG_WARNING(MXLOG_TRANS, "%s is already stopped\n", th->name);
+ return;
+ }
+ SCSC_TAG_INFO(MXLOG_TRANS, "Stopping thread %s [%d]\n", th->name, th->task->pid);
+ /* kthread_stop() marks thread as KTHREAD_SHOULD_STOP
+ * and wait for it to terminate
+ */
+ if (kthread_stop(th->task))
+ SCSC_TAG_ERR(MXLOG_TRANS, "Failed to stop %s [%d]\n", th->name, th->task->pid);
+ /* Finally release the task_struct we held on start */
+ put_task_struct(th->task);
+ th->task = NULL;
+}
+
+void mxlog_transport_release(struct mxlog_transport *mxlog_transport)
+{
+ mxlog_thread_stop(mxlog_transport);
+ mif_stream_release(&mxlog_transport->mif_stream);
+}
+
+void mxlog_transport_config_serialise(struct mxlog_transport *mxlog_transport,
+ struct mxlogconf *mxlogconf)
+{
+ mif_stream_config_serialise(&mxlog_transport->mif_stream, &mxlogconf->stream_conf);
+}
+
+/** Public functions */
+int mxlog_transport_init(struct mxlog_transport *mxlog_transport, struct scsc_mx *mx)
+{
+ int r;
+ uint32_t mem_length = MXLOG_TRANSPORT_BUF_LENGTH;
+ uint32_t packet_size = MXLOG_TRANSPORT_PACKET_SIZE;
+ uint32_t num_packets;
+
+ /*
+ * Initialising a buffer of 1 byte is never legitimate, do not allow it.
+ * The memory buffer length must be a multiple of the packet size.
+ */
+
+ memset(mxlog_transport, 0, sizeof(struct mxlog_transport));
+ mutex_init(&mxlog_transport->lock);
+ num_packets = mem_length / packet_size;
+ mxlog_transport->mx = mx;
+ r = mif_stream_init(&mxlog_transport->mif_stream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, mxlog_transport);
+ if (r)
+ return r;
+ r = mxlog_thread_start(mxlog_transport);
+ if (r) {
+ mif_stream_release(&mxlog_transport->mif_stream);
+ return r;
+ }
+
+ return 0;
+}
+
+void mxlog_transport_register_channel_handler(struct mxlog_transport *mxlog_transport,
+ mxlog_header_handler parser,
+ mxlog_channel_handler handler,
+ void *data)
+{
+ mutex_lock(&mxlog_transport->lock);
+ mxlog_transport->header_handler_fn = parser;
+ mxlog_transport->channel_handler_fn = handler;
+ mxlog_transport->channel_handler_data = (void *)data;
+ mutex_unlock(&mxlog_transport->lock);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * Maxwell mxlog transport (Interface)
+ *
+ * Provides communication between the firmware and the host.
+ *
+ */
+
+#ifndef MXLOG_TRANSPORT_H__
+#define MXLOG_TRANSPORT_H__
+
+/** Uses */
+#include <linux/kthread.h>
+#include "mifstream.h"
+
+struct mxlog_transport;
+
+typedef int (*mxlog_header_handler)(u32 header, u8 *phase,
+ u8 *level, u32 *num_bytes);
+/**
+ * Transport channel callback handler. This will be invoked each time a message on a channel is
+ * received. Handlers may perform work within their callback implementation, but should not block.
+ * The detected phase is passed as first parameter.
+ *
+ * Note that the message pointer passed is only valid for the duration of the function call.
+ */
+typedef void (*mxlog_channel_handler)(u8 phase, const void *message,
+ size_t length, u32 level, void *data);
+
+/**
+ * Initialises the maxwell management transport and configures the necessary
+ * interrupt handlers.
+ */
+int mxlog_transport_init(struct mxlog_transport *mxlog_transport, struct scsc_mx *mx);
+void mxlog_transport_release(struct mxlog_transport *mxlog_transport);
+/*
+ * Initialises the configuration area incl. Maxwell Infrastructure Configuration,
+ * MIF Management Transport Configuration and MIF Management Stream Configuration.
+ */
+void mxlog_transport_config_serialise(struct mxlog_transport *mxlog_transport, struct mxlogconf *mxlogconf);
+void mxlog_transport_register_channel_handler(struct mxlog_transport *mxlog_transport,
+ mxlog_header_handler parser,
+ mxlog_channel_handler handler,
+ void *data);
+void mxlog_transport_set_error(struct mxlog_transport *mxlog_transport);
+
+#define MXLOG_THREAD_NAME_MAX_LENGTH 32
+struct mxlog_thread {
+ struct task_struct *task;
+ char name[MXLOG_THREAD_NAME_MAX_LENGTH];
+ int prio;
+ struct completion completion;
+ wait_queue_head_t wakeup_q;
+ unsigned int wakeup_flag;
+ /*
+ * Use it to block the I/O thread when
+ * an error occurs.
+ */
+ int block_thread;
+};
+
+struct mxlog_transport {
+ struct scsc_mx *mx;
+ struct mxlog_thread mxlog_thread;
+ struct mif_stream mif_stream;
+ mxlog_header_handler header_handler_fn;
+ mxlog_channel_handler channel_handler_fn;
+ void *channel_handler_data;
+ struct mutex lock;
+};
+
+#endif /* MXLOG_TRANSPORT_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/** Implements */
+#include "mxlogger.h"
+
+/** Uses */
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <scsc/scsc_logring.h>
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#include "srvman.h"
+#include "scsc_mif_abs.h"
+#include "miframman.h"
+#include "mifintrbit.h"
+#include "mxmgmt_transport.h"
+
+static bool mxlogger_disabled;
+module_param(mxlogger_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mxlogger_disabled, "Disable MXLOGGER Configuration. Effective only at next WLBT boot.");
+
+bool mxlogger_set_enabled_status(bool enable)
+{
+ mxlogger_disabled = !enable;
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER has been NOW %sABLED. Effective at next WLBT boot.\n",
+ mxlogger_disabled ? "DIS" : "EN");
+
+ return mxlogger_disabled;
+}
+EXPORT_SYMBOL(mxlogger_set_enabled_status);
+
+static bool mxlogger_forced_to_host;
+
+static void update_fake_observer(void)
+{
+ static bool mxlogger_fake_observers_registered;
+
+ if (mxlogger_forced_to_host) {
+ if (!mxlogger_fake_observers_registered) {
+ mxlogger_register_global_observer("FAKE_OBSERVER");
+ mxlogger_fake_observers_registered = true;
+ }
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER is now FORCED TO HOST.\n");
+ } else {
+ if (mxlogger_fake_observers_registered) {
+ mxlogger_unregister_global_observer("FAKE_OBSERVER");
+ mxlogger_fake_observers_registered = false;
+ }
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER is now operating NORMALLY.\n");
+ }
+}
+
+static int mxlogger_force_to_host_set_param_cb(const char *val,
+ const struct kernel_param *kp)
+{
+ bool nval;
+
+ if (!val || strtobool(val, &nval))
+ return -EINVAL;
+
+ if (mxlogger_forced_to_host ^ nval) {
+ mxlogger_forced_to_host = nval;
+ update_fake_observer();
+ }
+ return 0;
+}
+
+/**
+ * As described in struct kernel_param+ops the _get method:
+ * -> returns length written or -errno. Buffer is 4k (ie. be short!)
+ */
+static int mxlogger_force_to_host_get_param_cb(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%c", mxlogger_forced_to_host ? 'Y' : 'N');
+}
+
+static struct kernel_param_ops mxlogger_force_to_host_ops = {
+ .set = mxlogger_force_to_host_set_param_cb,
+ .get = mxlogger_force_to_host_get_param_cb,
+};
+module_param_cb(mxlogger_force_to_host, &mxlogger_force_to_host_ops, NULL, 0644);
+MODULE_PARM_DESC(mxlogger_force_to_host, "Force mxlogger to redirect to Host all the time, using a fake observer.");
+
+/**
+ * Observers of log material could come and go before mxman and mxlogger
+ * are initialized and started...so we keep this stuff here out of mxman,
+ * but all the lifecycle of mxlogger should be reviewed.
+ */
+static u8 active_global_observers;
+static DEFINE_MUTEX(global_lock);
+
+struct mxlogger_node {
+ struct list_head list;
+ struct mxlogger *mxl;
+};
+static struct mxlogger_list { struct list_head list; } mxlogger_list = {
+ .list = LIST_HEAD_INIT(mxlogger_list.list)
+};
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int mxlogger_collect_init(struct scsc_log_collector_client *collect_client);
+static int mxlogger_collect(struct scsc_log_collector_client *collect_client, size_t size);
+static int mxlogger_collect_end(struct scsc_log_collector_client *collect_client);
+
+/* Collect client registration SYNC buffer */
+/* SYNC - SHOULD BE THE FIRST CHUNK TO BE CALLED - SO USE THE INIT/END ON THIS CLIENT */
+struct scsc_log_collector_client mxlogger_collect_client_sync = {
+ .name = "Sync",
+ .type = SCSC_LOG_CHUNK_SYNC,
+ .collect_init = mxlogger_collect_init,
+ .collect = mxlogger_collect,
+ .collect_end = mxlogger_collect_end,
+ .prv = NULL,
+};
+
+/* Collect client registration IMP buffer */
+struct scsc_log_collector_client mxlogger_collect_client_imp = {
+ .name = "Important",
+ .type = SCSC_LOG_CHUNK_IMP,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+
+struct scsc_log_collector_client mxlogger_collect_client_rsv_common = {
+ .name = "Rsv_common",
+ .type = SCSC_LOG_RESERVED_COMMON,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+
+struct scsc_log_collector_client mxlogger_collect_client_rsv_bt = {
+ .name = "Rsv_bt",
+ .type = SCSC_LOG_RESERVED_BT,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+
+struct scsc_log_collector_client mxlogger_collect_client_rsv_wlan = {
+ .name = "Rsv_wlan",
+ .type = SCSC_LOG_RESERVED_WLAN,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+
+struct scsc_log_collector_client mxlogger_collect_client_rsv_radio = {
+ .name = "Rsv_radio",
+ .type = SCSC_LOG_RESERVED_RADIO,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+/* Collect client registration MXL buffer */
+struct scsc_log_collector_client mxlogger_collect_client_mxl = {
+ .name = "MXL",
+ .type = SCSC_LOG_CHUNK_MXL,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+
+/* Collect client registration MXL buffer */
+struct scsc_log_collector_client mxlogger_collect_client_udi = {
+ .name = "UDI",
+ .type = SCSC_LOG_CHUNK_UDI,
+ .collect_init = NULL,
+ .collect = mxlogger_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+#endif
+
+const char *mxlogger_buf_name[] = { "syn", "imp", "rsv_common", "rsv_bt", "rsv_wlan", "rsv_radio", "mxl", "udi" };
+
+static void mxlogger_message_handler(const void *message, void *data)
+{
+ struct mxlogger __attribute__((unused)) *mxlogger = (struct mxlogger *)data;
+ const struct log_msg_packet *msg = message;
+ u16 reason_code;
+
+ switch (msg->msg) {
+ case MM_MXLOGGER_INITIALIZED_EVT:
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Initialized.\n");
+ mxlogger->initialized = true;
+ complete(&mxlogger->rings_serialized_ops);
+ break;
+ case MM_MXLOGGER_STARTED_EVT:
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER:: RINGS Enabled.\n");
+ mxlogger->enabled = true;
+ complete(&mxlogger->rings_serialized_ops);
+ break;
+ case MM_MXLOGGER_STOPPED_EVT:
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER:: RINGS Disabled.\n");
+ mxlogger->enabled = false;
+ complete(&mxlogger->rings_serialized_ops);
+ break;
+ case MM_MXLOGGER_COLLECTION_FW_REQ_EVT:
+ /* If arg is zero, FW is using the 16bit reason code API */
+ /* therefore, the reason code is in the payload */
+ if (msg->arg == 0x00)
+ memcpy(&reason_code, &msg->payload[0], sizeof(u16));
+ else
+ /* old API */
+ reason_code = msg->arg;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER:: FW requested collection - Reason code:0x%04x\n", reason_code);
+ scsc_log_collector_schedule_collection(SCSC_LOG_FW, reason_code);
+#endif
+ break;
+ default:
+ SCSC_TAG_WARNING(MXMAN,
+ "Received UNKNOWN msg on MMTRANS_CHAN_ID_MAXWELL_LOGGING -- msg->msg=%d\n",
+ msg->msg);
+ break;
+ }
+}
+
+static int __mxlogger_generate_sync_record(struct mxlogger *mxlogger, enum mxlogger_sync_event event)
+{
+ struct mxlogger_sync_record *sync_r_mem;
+ struct timeval t;
+ struct log_msg_packet msg = {};
+ unsigned long int jd;
+ void *mem;
+ ktime_t t1, t2;
+
+ /* Assume mxlogger->lock mutex is held */
+ if (!mxlogger || !mxlogger->configured)
+ return -EIO;
+
+ msg.msg = MM_MXLOGGER_SYNC_RECORD;
+ msg.arg = MM_MXLOGGER_SYNC_INDEX;
+ memcpy(&msg.payload, &mxlogger->sync_buffer_index, sizeof(mxlogger->sync_buffer_index));
+
+ /* Get the pointer from the index of the sync array */
+ mem = mxlogger->mem_sync_buf + mxlogger->sync_buffer_index * sizeof(struct mxlogger_sync_record);
+ sync_r_mem = (struct mxlogger_sync_record *)mem;
+ /* Write values in record as FW migth be doing sanity checks */
+ sync_r_mem->tv_sec = 1;
+ sync_r_mem->tv_usec = 1;
+ sync_r_mem->kernel_time = 1;
+ sync_r_mem->sync_event = event;
+ sync_r_mem->fw_time = 0;
+ sync_r_mem->fw_wrap = 0;
+
+
+ SCSC_TAG_INFO(MXMAN, "Get FW time\n");
+ preempt_disable();
+ /* set the tight loop timeout - we do not require precission but something to not
+ * loop forever
+ */
+ jd = jiffies + msecs_to_jiffies(20);
+ /* Send the msg as fast as possible */
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &msg, sizeof(msg));
+ t1 = ktime_get();
+ /* Tight loop to read memory */
+ while (time_before(jiffies, jd) && sync_r_mem->fw_time == 0 && sync_r_mem->fw_wrap == 0)
+ ;
+ t2 = ktime_get();
+ do_gettimeofday(&t);
+ preempt_enable();
+
+ /* Do the processing */
+ if (sync_r_mem->fw_wrap == 0 && sync_r_mem->fw_time == 0) {
+ /* FW didn't update the record (FW panic?) */
+ SCSC_TAG_INFO(MXMAN, "FW failure updating the FW time\n");
+ SCSC_TAG_INFO(MXMAN, "Sync delta %lld\n", ktime_to_ns(ktime_sub(t2, t1)));
+ sync_r_mem->tv_sec = (u64)t.tv_sec;
+ sync_r_mem->tv_usec = (u64)t.tv_usec;
+ sync_r_mem->kernel_time = ktime_to_ns(t2);
+ sync_r_mem->sync_event = event;
+ return 0;
+ }
+
+ sync_r_mem->tv_sec = (u64)t.tv_sec;
+ sync_r_mem->tv_usec = (u64)t.tv_usec;
+ sync_r_mem->kernel_time = ktime_to_ns(t2);
+ sync_r_mem->sync_event = event;
+
+ SCSC_TAG_INFO(MXMAN, "Sample, %lld, %u, %lld.%06lld\n",
+ ktime_to_ns(sync_r_mem->kernel_time), sync_r_mem->fw_time, sync_r_mem->tv_sec, sync_r_mem->tv_usec);
+ SCSC_TAG_INFO(MXMAN, "Sync delta %lld\n", ktime_to_ns(ktime_sub(t2, t1)));
+
+ mxlogger->sync_buffer_index++;
+ mxlogger->sync_buffer_index &= SYNC_MASK;
+
+ return 0;
+}
+
+int mxlogger_generate_sync_record(struct mxlogger *mxlogger, enum mxlogger_sync_event event)
+{
+ int r;
+
+ mutex_lock(&mxlogger->lock);
+ r = __mxlogger_generate_sync_record(mxlogger, event);
+ mutex_unlock(&mxlogger->lock);
+
+ return r;
+}
+
+static void mxlogger_wait_for_msg_reinit_completion(struct mxlogger *mxlogger)
+{
+ reinit_completion(&mxlogger->rings_serialized_ops);
+}
+
+static bool mxlogger_wait_for_msg_reply(struct mxlogger *mxlogger)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&mxlogger->rings_serialized_ops, usecs_to_jiffies(MXLOGGER_RINGS_TMO_US));
+ if (ret) {
+ int i;
+
+ SCSC_TAG_DBG3(MXMAN, "MXLOGGER RINGS -- replied in %lu usecs.\n",
+ MXLOGGER_RINGS_TMO_US - jiffies_to_usecs(ret));
+
+ for (i = 0; i < MXLOGGER_NUM_BUFFERS; i++)
+ SCSC_TAG_DBG3(MXMAN, "MXLOGGER:: RING[%d] -- INFO[0x%X] STATUS[0x%X]\n", i,
+ mxlogger->cfg->bfds[i].info, mxlogger->cfg->bfds[i].status);
+ } else {
+ SCSC_TAG_ERR(MXMAN, "MXLOGGER timeout waiting for reply.\n");
+ }
+
+ return ret ? true : false;
+}
+
+static inline void __mxlogger_enable(struct mxlogger *mxlogger, bool enable, uint8_t reason)
+{
+ struct log_msg_packet msg = {};
+
+ msg.msg = MM_MXLOGGER_LOGGER_CMD;
+ msg.arg = (enable) ? MM_MXLOGGER_LOGGER_ENABLE : MM_MXLOGGER_LOGGER_DISABLE;
+ msg.payload[0] = reason;
+
+ /* Reinit the completion before sending the message over cpacketbuffer
+ * otherwise there might be a race condition
+ */
+ mxlogger_wait_for_msg_reinit_completion(mxlogger);
+
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &msg, sizeof(msg));
+
+ SCSC_TAG_DBG4(MXMAN, "MXLOGGER RINGS -- enable:%d reason:%d\n",
+ enable, reason);
+
+ mxlogger_wait_for_msg_reply(mxlogger);
+}
+
+static void mxlogger_enable(struct mxlogger *mxlogger, bool enable)
+{
+ return __mxlogger_enable(mxlogger, enable, MM_MXLOGGER_DISABLE_REASON_STOP);
+}
+
+static int mxlogger_send_config(struct mxlogger *mxlogger)
+{
+ struct log_msg_packet msg = {};
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Config mifram_ref: 0x%x size:%d\n",
+ mxlogger->mifram_ref, mxlogger->msz);
+
+ msg.msg = MM_MXLOGGER_CONFIG_CMD;
+ msg.arg = MM_MXLOGGER_CONFIG_BASE_ADDR;
+ memcpy(&msg.payload, &mxlogger->mifram_ref, sizeof(mxlogger->mifram_ref));
+
+ /* Reinit the completion before sending the message over cpacketbuffer
+ * otherwise there might be a race condition
+ */
+ mxlogger_wait_for_msg_reinit_completion(mxlogger);
+
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &msg, sizeof(msg));
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Config SENT\n");
+ if (!mxlogger_wait_for_msg_reply(mxlogger))
+ return -1;
+
+ return 0;
+}
+
+static void mxlogger_to_shared_dram(struct mxlogger *mxlogger)
+{
+ int r;
+ struct log_msg_packet msg = { .msg = MM_MXLOGGER_DIRECTION_CMD,
+ .arg = MM_MXLOGGER_DIRECTION_DRAM };
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER -- NO active observers detected. Send logs to DRAM\n");
+
+ r = __mxlogger_generate_sync_record(mxlogger, MXLOGGER_SYN_TORAM);
+ if (r)
+ return; /* mxlogger is not configured */
+
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &msg, sizeof(msg));
+}
+
+static void mxlogger_to_host(struct mxlogger *mxlogger)
+{
+ int r;
+ struct log_msg_packet msg = { .msg = MM_MXLOGGER_DIRECTION_CMD,
+ .arg = MM_MXLOGGER_DIRECTION_HOST };
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER -- active observers detected. Send logs to host\n");
+
+ r = __mxlogger_generate_sync_record(mxlogger, MXLOGGER_SYN_TOHOST);
+ if (r)
+ return; /* mxlogger is not configured */
+
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &msg, sizeof(msg));
+}
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static void mxlogger_disable_for_collection(struct mxlogger *mxlogger)
+{
+ return __mxlogger_enable(mxlogger, false, MM_MXLOGGER_DISABLE_REASON_COLLECTION);
+}
+
+static int mxlogger_collect_init(struct scsc_log_collector_client *collect_client)
+{
+ struct mxlogger *mxlogger = (struct mxlogger *)collect_client->prv;
+
+ if (!mxlogger->initialized)
+ return 0;
+
+ mutex_lock(&mxlogger->lock);
+
+ SCSC_TAG_INFO(MXMAN, "Started log collection\n");
+
+ __mxlogger_generate_sync_record(mxlogger, MXLOGGER_SYN_LOGCOLLECTION);
+
+ mxlogger->re_enable = mxlogger->enabled;
+ /**
+ * If enabled, tell FW we are stopping for collection:
+ * this way FW can dump last minute stuff and flush properly
+ * its cache
+ */
+ if (mxlogger->enabled)
+ mxlogger_disable_for_collection(mxlogger);
+
+ mutex_unlock(&mxlogger->lock);
+
+ return 0;
+}
+
+static int mxlogger_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ struct scsc_mif_abs *mif;
+ struct mxlogger *mxlogger = (struct mxlogger *)collect_client->prv;
+ void *buf;
+ int ret = 0;
+ int i;
+ size_t sz;
+
+ if (mxlogger && mxlogger->mx)
+ mif = scsc_mx_get_mif_abs(mxlogger->mx);
+ else
+ /* Return 0 as 'success' to continue the collection of other chunks */
+ return 0;
+
+ mutex_lock(&mxlogger->lock);
+
+ if (mxlogger->initialized == false) {
+ SCSC_TAG_ERR(MXMAN, "MXLOGGER not initialized\n");
+ mutex_unlock(&mxlogger->lock);
+ return 0;
+ }
+
+ if (collect_client->type == SCSC_LOG_CHUNK_SYNC)
+ i = MXLOGGER_SYNC;
+ else if (collect_client->type == SCSC_LOG_CHUNK_IMP)
+ i = MXLOGGER_IMP;
+ else if (collect_client->type == SCSC_LOG_RESERVED_COMMON)
+ i = MXLOGGER_RESERVED_COMMON;
+ else if (collect_client->type == SCSC_LOG_RESERVED_BT)
+ i = MXLOGGER_RESERVED_BT;
+ else if (collect_client->type == SCSC_LOG_RESERVED_WLAN)
+ i = MXLOGGER_RESERVED_WLAN;
+ else if (collect_client->type == SCSC_LOG_RESERVED_RADIO)
+ i = MXLOGGER_RESERVED_RADIO;
+ else if (collect_client->type == SCSC_LOG_CHUNK_MXL)
+ i = MXLOGGER_MXLOG;
+ else if (collect_client->type == SCSC_LOG_CHUNK_UDI)
+ i = MXLOGGER_UDI;
+ else {
+ SCSC_TAG_ERR(MXMAN, "MXLOGGER Incorrect type. Return 'success' and continue to collect other buffers\n");
+ mutex_unlock(&mxlogger->lock);
+ return 0;
+ }
+
+ sz = mxlogger->cfg->bfds[i].size;
+ buf = mif->get_mifram_ptr(mif, mxlogger->cfg->bfds[i].location);
+ SCSC_TAG_INFO(MXMAN, "Writing buffer %s size: %zu\n", mxlogger_buf_name[i], sz);
+ ret = scsc_log_collector_write(buf, sz, 1);
+ if (ret) {
+ mutex_unlock(&mxlogger->lock);
+ return ret;
+ }
+
+ mutex_unlock(&mxlogger->lock);
+ return 0;
+}
+
+static int mxlogger_collect_end(struct scsc_log_collector_client *collect_client)
+{
+ struct mxlogger *mxlogger = (struct mxlogger *)collect_client->prv;
+
+ if (!mxlogger->initialized)
+ return 0;
+
+ mutex_lock(&mxlogger->lock);
+
+ SCSC_TAG_INFO(MXMAN, "End log collection\n");
+
+ /* Renable again if was previoulsy enabled */
+ if (mxlogger->re_enable)
+ mxlogger_enable(mxlogger, true);
+
+ mutex_unlock(&mxlogger->lock);
+ return 0;
+}
+#endif
+
+void mxlogger_print_mapping(struct mxlogger_config_area *cfg)
+{
+ u8 i;
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER -- Configured Buffers [%d]\n", cfg->config.num_buffers);
+ for (i = 0; i < MXLOGGER_NUM_BUFFERS; i++)
+ SCSC_TAG_INFO(MXMAN, "buffer %s loc: 0x%08x size: %u\n",
+ mxlogger_buf_name[i], cfg->bfds[i].location, cfg->bfds[i].size);
+
+}
+
+/* Lock should be acquired by caller */
+int mxlogger_init(struct scsc_mx *mx, struct mxlogger *mxlogger, uint32_t mem_sz)
+{
+ struct miframman *miframman;
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mx);
+ struct mxlogger_config_area *cfg;
+ size_t remaining_mem;
+ size_t udi_mxl_mem_sz;
+ struct mxlogger_node *mn;
+
+ MEM_LAYOUT_CHECK();
+
+ mxlogger->configured = false;
+
+ if (mem_sz <= (sizeof(struct mxlogger_config_area) + MXLOGGER_TOTAL_FIX_BUF)) {
+ SCSC_TAG_ERR(MXMAN, "Insufficient memory allocation\n");
+ return -EIO;
+ }
+
+ mxlogger->mx = mx;
+ miframman = scsc_mx_get_ramman2(mx);
+ if (!miframman)
+ return -ENOMEM;
+ mxlogger->mem = miframman_alloc(miframman, mem_sz, 32, MIFRAMMAN_OWNER_COMMON);
+ if (!mxlogger->mem) {
+ SCSC_TAG_ERR(MXMAN, "Error allocating memory for MXLOGGER\n");
+ return -ENOMEM;
+ }
+ mxlogger->msz = mem_sz;
+
+ /* Clear memory to avoid reading old records */
+ memset(mxlogger->mem, 0, mxlogger->msz);
+ mif->get_mifram_ref(mif, mxlogger->mem, &mxlogger->mifram_ref);
+
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ &mxlogger_message_handler, mxlogger);
+
+ /* Initialize configuration structure */
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Configuration: 0x%x\n", (u32)mxlogger->mifram_ref);
+ cfg = (struct mxlogger_config_area *)mxlogger->mem;
+
+ cfg->config.magic_number = MXLOGGER_MAGIG_NUMBER;
+ cfg->config.config_major = MXLOGGER_MAJOR;
+ cfg->config.config_minor = MXLOGGER_MINOR;
+ cfg->config.num_buffers = MXLOGGER_NUM_BUFFERS;
+
+ /**
+ * Populate information of Fixed size buffers
+ * These are mifram-reletive references
+ */
+ cfg->bfds[MXLOGGER_SYNC].location = mxlogger->mifram_ref +
+ offsetof(struct mxlogger_config_area, buffers_start);
+ cfg->bfds[MXLOGGER_SYNC].size = MXLOGGER_SYNC_SIZE;
+ /* additionally cache the va of sync_buffer */
+ mxlogger->mem_sync_buf = mxlogger->mem +
+ offsetof(struct mxlogger_config_area, buffers_start);
+
+ cfg->bfds[MXLOGGER_IMP].location =
+ cfg->bfds[MXLOGGER_IMP - 1].location +
+ cfg->bfds[MXLOGGER_IMP - 1].size;
+ cfg->bfds[MXLOGGER_IMP].size = MXLOGGER_IMP_SIZE;
+
+ cfg->bfds[MXLOGGER_RESERVED_COMMON].location =
+ cfg->bfds[MXLOGGER_RESERVED_COMMON - 1].location +
+ cfg->bfds[MXLOGGER_RESERVED_COMMON - 1].size;
+ cfg->bfds[MXLOGGER_RESERVED_COMMON].size = MXLOGGER_RSV_COMMON_SZ;
+
+ cfg->bfds[MXLOGGER_RESERVED_BT].location =
+ cfg->bfds[MXLOGGER_RESERVED_BT - 1].location +
+ cfg->bfds[MXLOGGER_RESERVED_BT - 1].size;
+ cfg->bfds[MXLOGGER_RESERVED_BT].size = MXLOGGER_RSV_BT_SZ;
+
+ cfg->bfds[MXLOGGER_RESERVED_WLAN].location =
+ cfg->bfds[MXLOGGER_RESERVED_WLAN - 1].location +
+ cfg->bfds[MXLOGGER_RESERVED_WLAN - 1].size;
+ cfg->bfds[MXLOGGER_RESERVED_WLAN].size = MXLOGGER_RSV_WLAN_SZ;
+
+ cfg->bfds[MXLOGGER_RESERVED_RADIO].location =
+ cfg->bfds[MXLOGGER_RESERVED_RADIO - 1].location +
+ cfg->bfds[MXLOGGER_RESERVED_RADIO - 1].size;
+ cfg->bfds[MXLOGGER_RESERVED_RADIO].size = MXLOGGER_RSV_RADIO_SZ;
+
+ /* Compute buffer locations and size based on the remaining space */
+ remaining_mem = mem_sz - (sizeof(struct mxlogger_config_area) + MXLOGGER_TOTAL_FIX_BUF);
+
+ /* Align the buffer to be cache friendly */
+ udi_mxl_mem_sz = (remaining_mem >> 1) & ~(MXLOGGER_NON_FIX_BUF_ALIGN - 1);
+
+ SCSC_TAG_INFO(MXMAN, "remaining_mem %zu udi/mxlogger size %zu\n", remaining_mem, udi_mxl_mem_sz);
+
+ cfg->bfds[MXLOGGER_MXLOG].location =
+ cfg->bfds[MXLOGGER_MXLOG - 1].location +
+ cfg->bfds[MXLOGGER_MXLOG - 1].size;
+ cfg->bfds[MXLOGGER_MXLOG].size = udi_mxl_mem_sz;
+
+ cfg->bfds[MXLOGGER_UDI].location =
+ cfg->bfds[MXLOGGER_UDI - 1].location +
+ cfg->bfds[MXLOGGER_UDI - 1].size;
+ cfg->bfds[MXLOGGER_UDI].size = udi_mxl_mem_sz;
+
+ /* Save offset to buffers array */
+ mif->get_mifram_ref(mif, cfg->bfds, &cfg->config.bfds_ref);
+
+ mxlogger_print_mapping(cfg);
+
+ mxlogger->cfg = cfg;
+
+ init_completion(&mxlogger->rings_serialized_ops);
+ mxlogger->enabled = false;
+
+ mutex_init(&mxlogger->lock);
+
+ mn = kzalloc(sizeof(*mn), GFP_KERNEL);
+ if (!mn) {
+ miframman_free(miframman, mxlogger->mem);
+ return -ENOMEM;
+ }
+
+ /**
+ * Update observers status considering
+ * current value of mxlogger_forced_to_host
+ */
+ update_fake_observer();
+
+ mutex_lock(&global_lock);
+ mxlogger->observers = active_global_observers;
+ if (mxlogger->observers)
+ SCSC_TAG_INFO(MXMAN, "Detected global %d observer[s]\n", active_global_observers);
+ mutex_unlock(&global_lock);
+
+ mxlogger->sync_buffer_index = 0;
+
+ mn->mxl = mxlogger;
+ list_add_tail(&mn->list, &mxlogger_list.list);
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /**
+ * Register to the collection infrastructure
+ *
+ * All of mxlogger buffers are registered here, NO matter if
+ * MXLOGGER initialization was successfull FW side.
+ *
+ * In such a case MXLOGGER-FW will simply ignore all of our following
+ * requests and we'll end up dumping empty buffers, BUT with a partially
+ * meaningful sync buffer. (since this last is written also Host side)
+ */
+ mxlogger_collect_client_sync.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_sync);
+
+ mxlogger_collect_client_imp.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_imp);
+
+ mxlogger_collect_client_rsv_common.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_rsv_common);
+
+ mxlogger_collect_client_rsv_bt.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_rsv_bt);
+
+ mxlogger_collect_client_rsv_wlan.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_rsv_wlan);
+
+ mxlogger_collect_client_rsv_radio.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_rsv_radio);
+
+ mxlogger_collect_client_udi.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_udi);
+
+ mxlogger_collect_client_mxl.prv = mxlogger;
+ scsc_log_collector_register_client(&mxlogger_collect_client_mxl);
+#endif
+ mxlogger->configured = true;
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Configured\n");
+ return 0;
+}
+
+int mxlogger_start(struct mxlogger *mxlogger)
+{
+ if (mxlogger_disabled) {
+ SCSC_TAG_WARNING(MXMAN, "MXLOGGER is disabled. Not Starting.\n");
+ return -1;
+ }
+
+ if (!mxlogger || !mxlogger->configured) {
+ SCSC_TAG_WARNING(MXMAN, "MXLOGGER is not valid or not configured.\n");
+ return -1;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "Starting mxlogger with %d observer[s]\n", mxlogger->observers);
+
+ mutex_lock(&mxlogger->lock);
+ if (mxlogger_send_config(mxlogger)) {
+ mutex_unlock(&mxlogger->lock);
+ return -ENOMEM;
+ }
+
+ /**
+ * MXLOGGER on FW-side is at this point starting up too during
+ * WLBT chip boot and it cannot make any assumption till about
+ * the current number of observers and direction set: so, during
+ * MXLOGGER FW-side initialization, ZERO observers were registered.
+ *
+ * As a consequence on chip-boot FW-MXLOGGER defaults to:
+ * - direction DRAM
+ * - all rings disabled (ingressing messages discarded)
+ */
+ if (!mxlogger->observers) {
+ /* Enabling BEFORE communicating direction DRAM
+ * to avoid losing messages on rings.
+ */
+ mxlogger_enable(mxlogger, true);
+ mxlogger_to_shared_dram(mxlogger);
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_is_observer(false);
+#endif
+ } else {
+ mxlogger_to_host(mxlogger);
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_is_observer(true);
+#endif
+ /* Enabling AFTER communicating direction HOST
+ * to avoid wrongly spilling messages into the
+ * rings early at start (like at boot).
+ */
+ mxlogger_enable(mxlogger, true);
+ }
+
+ SCSC_TAG_INFO(MXMAN, "MXLOGGER Started.\n");
+ mutex_unlock(&mxlogger->lock);
+
+ return 0;
+}
+
+void mxlogger_deinit(struct scsc_mx *mx, struct mxlogger *mxlogger)
+{
+ struct miframman *miframman = NULL;
+ struct mxlogger_node *mn, *next;
+ bool match = false;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+
+ if (!mxlogger || !mxlogger->configured) {
+ SCSC_TAG_WARNING(MXMAN, "MXLOGGER is not valid or not configured.\n");
+ return;
+ }
+ /* Run deregistration before adquiring the mxlogger lock to avoid
+ * deadlock with log_collector.
+ */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_sync);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_imp);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_rsv_common);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_rsv_bt);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_rsv_wlan);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_rsv_radio);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_mxl);
+ scsc_log_collector_unregister_client(&mxlogger_collect_client_udi);
+#endif
+ mutex_lock(&mxlogger->lock);
+
+ mxlogger_to_host(mxlogger); /* immediately before deconfigure to get a last sync rec */
+ mxlogger->configured = false;
+ mxlogger->initialized = false;
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_is_observer(true);
+#endif
+ mxlogger_enable(mxlogger, false);
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxlogger->mx),
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING,
+ NULL, NULL);
+ miframman = scsc_mx_get_ramman2(mx);
+ if (miframman)
+ miframman_free(miframman, mxlogger->mem);
+
+ list_for_each_entry_safe(mn, next, &mxlogger_list.list, list) {
+ if (mn->mxl == mxlogger) {
+ match = true;
+ list_del(&mn->list);
+ kfree(mn);
+ }
+ }
+
+ if (match == false)
+ SCSC_TAG_ERR(MXMAN, "FATAL, no match for given scsc_mif_abs\n");
+
+ SCSC_TAG_INFO(MXMAN, "End\n");
+ mutex_unlock(&mxlogger->lock);
+}
+
+int mxlogger_register_observer(struct mxlogger *mxlogger, char *name)
+{
+ mutex_lock(&mxlogger->lock);
+
+ mxlogger->observers++;
+
+ SCSC_TAG_INFO(MXMAN, "Register observer[%d] -- %s\n",
+ mxlogger->observers, name);
+
+ /* Switch logs to host */
+ mxlogger_to_host(mxlogger);
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_is_observer(true);
+#endif
+
+ mutex_unlock(&mxlogger->lock);
+
+ return 0;
+}
+
+int mxlogger_unregister_observer(struct mxlogger *mxlogger, char *name)
+{
+ mutex_lock(&mxlogger->lock);
+
+ if (mxlogger->observers == 0) {
+ SCSC_TAG_INFO(MXMAN, "Incorrect number of observers\n");
+ mutex_unlock(&mxlogger->lock);
+ return -EIO;
+ }
+
+ mxlogger->observers--;
+
+ SCSC_TAG_INFO(MXMAN, "UN-register observer[%d] -- %s\n",
+ mxlogger->observers, name);
+
+ if (mxlogger->observers == 0) {
+ mxlogger_to_shared_dram(mxlogger);
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_is_observer(false);
+#endif
+ }
+
+ mutex_unlock(&mxlogger->lock);
+
+ return 0;
+}
+
+/* Global observer are not associated to any [mx] mxlogger instance. So it registers as
+ * an observer to all the [mx] mxlogger instances.
+ */
+int mxlogger_register_global_observer(char *name)
+{
+ struct mxlogger_node *mn, *next;
+
+ mutex_lock(&global_lock);
+ active_global_observers++;
+
+ SCSC_TAG_INFO(MXMAN, "Register global observer[%d] -- %s\n",
+ active_global_observers, name);
+
+ if (list_empty(&mxlogger_list.list)) {
+ SCSC_TAG_INFO(MXMAN, "No instances of mxman\n");
+ mutex_unlock(&global_lock);
+ return -EIO;
+ }
+
+ list_for_each_entry_safe(mn, next, &mxlogger_list.list, list) {
+ /* There is a mxlogger instance */
+ mxlogger_register_observer(mn->mxl, name);
+ }
+
+ mutex_unlock(&global_lock);
+
+ return 0;
+}
+
+int mxlogger_unregister_global_observer(char *name)
+{
+ struct mxlogger_node *mn, *next;
+
+ mutex_lock(&global_lock);
+ if (active_global_observers)
+ active_global_observers--;
+
+ SCSC_TAG_INFO(MXMAN, "UN-register global observer[%d] -- %s\n",
+ active_global_observers, name);
+
+ list_for_each_entry_safe(mn, next, &mxlogger_list.list, list) {
+ /* There is a mxlogger instance */
+ mxlogger_unregister_observer(mn->mxl, name);
+ }
+
+ mutex_unlock(&global_lock);
+
+ return 0;
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * Maxwell mxlogger (Interface)
+ *
+ * Provides bi-directional communication between the firmware and the
+ * host.
+ *
+ */
+
+#ifndef __MX_LOGGER_H__
+#define __MX_LOGGER_H__
+
+#include <linux/types.h>
+#include <scsc/scsc_mifram.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/completion.h>
+#include <linux/jiffies.h>
+
+#include "mxmgmt_transport_format.h"
+
+/**
+ * ___________________________________________________________________
+ * | Cmd | Arg | ... payload (opt) ... |
+ * -------------------------------------------------------------------
+ * <-- uint8_t --><-- uint8_t --><----- uint8_t[] buffer ----------->
+ *
+ */
+
+#define MXL_POOL_SZ (6 * 1024 * 1024)
+
+#define MXLOGGER_RINGS_TMO_US 200000
+
+/* CMD/EVENTS */
+#define MM_MXLOGGER_LOGGER_CMD (0)
+#define MM_MXLOGGER_DIRECTION_CMD (1)
+#define MM_MXLOGGER_CONFIG_CMD (2)
+#define MM_MXLOGGER_INITIALIZED_EVT (3)
+#define MM_MXLOGGER_SYNC_RECORD (4)
+#define MM_MXLOGGER_STARTED_EVT (5)
+#define MM_MXLOGGER_STOPPED_EVT (6)
+#define MM_MXLOGGER_COLLECTION_FW_REQ_EVT (7)
+
+/* ARG - LOGGER */
+#define MM_MXLOGGER_LOGGER_ENABLE (0)
+#define MM_MXLOGGER_LOGGER_DISABLE (1)
+#define MM_MXLOGGER_DISABLE_REASON_STOP (0)
+#define MM_MXLOGGER_DISABLE_REASON_COLLECTION (1)
+
+/* ARG - DIRECTION */
+#define MM_MXLOGGER_DIRECTION_DRAM (0)
+#define MM_MXLOGGER_DIRECTION_HOST (1)
+
+/* ARG - CONFIG TABLE */
+#define MM_MXLOGGER_CONFIG_BASE_ADDR (0)
+
+/* ARG - CONFIG TABLE */
+#define MM_MXLOGGER_SYNC_INDEX (0)
+
+#define MM_MXLOGGER_PAYLOAD_SZ (MXMGR_MESSAGE_PAYLOAD_SIZE - 2)
+
+#define MXLOGGER_SYNC_SIZE (10 * 1024)
+#define MXLOGGER_IMP_SIZE (102 * 1024)
+#define MXLOGGER_RSV_COMMON_SZ (4 * 1024)
+#define MXLOGGER_RSV_BT_SZ (4 * 1024)
+#define MXLOGGER_RSV_WLAN_SZ (2 * 1024 * 1024)
+#define MXLOGGER_RSV_RADIO_SZ (4 * 1024)
+
+#define MXLOGGER_TOTAL_FIX_BUF (MXLOGGER_SYNC_SIZE + MXLOGGER_IMP_SIZE + \
+ MXLOGGER_RSV_COMMON_SZ + MXLOGGER_RSV_BT_SZ + \
+ MXLOGGER_RSV_WLAN_SZ + MXLOGGER_RSV_RADIO_SZ)
+
+#define MXLOGGER_NON_FIX_BUF_ALIGN 32
+
+#define MXLOGGER_MAGIG_NUMBER 0xcaba0401
+#define MXLOGGER_MAJOR 0
+#define MXLOGGER_MINOR 0
+
+#define NUM_SYNC_RECORDS 256
+#define SYNC_MASK (NUM_SYNC_RECORDS - 1)
+
+/* Shared memory Layout
+ *
+ * |-------------------------| CONFIG
+ * | CONFIG AREA |
+ * | ... |
+ * | *bufs |------|
+ * | .... | |
+ * | .... | |
+ * | -------------------- |<-----|
+ * | |
+ * | loc | sz | state |info |---------------------|
+ * | loc | sz | state |info |---------------------|
+ * | loc | sz | state |info |---------------------|
+ * | ... | |
+ * |-------------------------| Fixed size buffers |
+ * | SYNC BUFFER |<--------------------|
+ * |-------------------------| |
+ * | IMPORTANT EVENTS |<--------------------|
+ * |-------------------------| |
+ * | Reserved COMMON |<--------------------|
+ * |-------------------------|
+ * | Reserved BT |
+ * |-------------------------|
+ * | Reserved WL |
+ * |-------------------------|
+ * | Reserved RADIO |
+ * |-------------------------| Not fixed size buffers
+ * | MXLOG |
+ * |-------------------------|
+ * | UDI |
+ * |-------------------------|
+ * | Future buffers (TBD) |
+ * |-------------------------|
+ * | Future buffers (TBD) |
+ * |-------------------------|
+ */
+
+enum mxlogger_buffers {
+ MXLOGGER_FIRST_FIXED_SZ,
+ MXLOGGER_SYNC = MXLOGGER_FIRST_FIXED_SZ,
+ MXLOGGER_IMP,
+ MXLOGGER_RESERVED_COMMON,
+ MXLOGGER_RESERVED_BT,
+ MXLOGGER_RESERVED_WLAN,
+ MXLOGGER_RESERVED_RADIO,
+ MXLOGGER_LAST_FIXED_SZ = MXLOGGER_RESERVED_RADIO,
+ MXLOGGER_MXLOG,
+ MXLOGGER_UDI,
+ MXLOGGER_NUM_BUFFERS
+};
+
+enum mxlogger_sync_event {
+ MXLOGGER_SYN_SUSPEND,
+ MXLOGGER_SYN_RESUME,
+ MXLOGGER_SYN_TOHOST,
+ MXLOGGER_SYN_TORAM,
+ MXLOGGER_SYN_LOGCOLLECTION,
+};
+
+struct mxlogger_sync_record {
+ u64 tv_sec; /* struct timeval.tv_sec */
+ u64 tv_usec; /* struct timeval.tv_usec */
+ u64 kernel_time; /* ktime_t */
+ u32 sync_event; /* type of sync event*/
+ u32 fw_time;
+ u32 fw_wrap;
+ u8 reserved[4];
+} __packed;
+
+struct buffer_desc {
+ u32 location; /* Buffer location */
+ u32 size; /* Buffer sz (in bytes) */
+ u32 status; /* buffer status */
+ u32 info; /* buffer info */
+} __packed;
+
+struct mxlogger_config {
+ u32 magic_number; /* 0xcaba0401 */
+ u32 config_major; /* Version Major */
+ u32 config_minor; /* Version Minor */
+ u32 num_buffers; /* configured buffers */
+ scsc_mifram_ref bfds_ref;
+} __packed;
+
+struct mxlogger_config_area {
+ struct mxlogger_config config;
+ struct buffer_desc bfds[MXLOGGER_NUM_BUFFERS];
+ uint8_t *buffers_start;
+} __packed;
+
+struct log_msg_packet {
+ uint8_t msg; /* cmd or event id */
+ uint8_t arg;
+ uint8_t payload[MM_MXLOGGER_PAYLOAD_SZ];
+} __packed;
+
+struct mxlogger {
+ bool initialized;
+ bool configured;
+ bool enabled;
+ struct scsc_mx *mx;
+ void *mem;
+ void *mem_sync_buf;
+ uint32_t msz;
+ scsc_mifram_ref mifram_ref;
+ struct mutex lock;
+ struct mxlogger_config_area *cfg;
+ u8 observers;
+ u8 sync_buffer_index;
+ /* collection variables */
+ bool re_enable;
+ struct completion rings_serialized_ops;
+};
+
+int mxlogger_generate_sync_record(struct mxlogger *mxlogger, enum mxlogger_sync_event event);
+int mxlogger_dump_shared_memory_to_file(struct mxlogger *mxlogger);
+int mxlogger_init(struct scsc_mx *mx, struct mxlogger *mxlogger, uint32_t mem_sz);
+void mxlogger_deinit(struct scsc_mx *mx, struct mxlogger *mxlogger);
+int mxlogger_start(struct mxlogger *mxlogger);
+int mxlogger_register_observer(struct mxlogger *mxlogger, char *name);
+int mxlogger_unregister_observer(struct mxlogger *mxlogger, char *name);
+int mxlogger_register_global_observer(char *name);
+int mxlogger_unregister_global_observer(char *name);
+bool mxlogger_set_enabled_status(bool enable);
+
+#define MEM_LAYOUT_CHECK() \
+({ \
+ BUILD_BUG_ON((sizeof(struct mxlogger_sync_record) * NUM_SYNC_RECORDS) > MXLOGGER_SYNC_SIZE); \
+ BUILD_BUG_ON((MXLOGGER_TOTAL_FIX_BUF + sizeof(struct mxlogger_config_area)) > MXL_POOL_SZ); \
+})
+
+#endif /* __MX_LOGGER_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/notifier.h>
+#include "scsc_mx_impl.h"
+#include "miframman.h"
+#include "mifmboxman.h"
+#include "mxman.h"
+#include "srvman.h"
+#include "mxmgmt_transport.h"
+#include "gdb_transport.h"
+#include "mxconf.h"
+#include "fwimage.h"
+#include "fwhdr.h"
+#include "mxlog.h"
+#include "mxlogger.h"
+#include "fw_panic_record.h"
+#include "panicmon.h"
+#include "mxproc.h"
+#include "mxlog_transport.h"
+#include "mxsyserr.h"
+#ifdef CONFIG_SCSC_SMAPPER
+#include "mifsmapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include "mifqos.h"
+#endif
+#include "mxfwconfig.h"
+#include <scsc/kic/slsi_kic_lib.h>
+#include <scsc/scsc_release.h>
+#include <scsc/scsc_mx.h>
+#include <linux/fs.h>
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#include <scsc/scsc_logring.h>
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#define SCSC_SCRIPT_MOREDUMP "moredump"
+#define SCSC_SCRIPT_LOGGER_DUMP "mx_logger_dump.sh"
+static struct work_struct wlbtd_work;
+#endif
+
+#include "scsc_lerna.h"
+
+#include <asm/page.h>
+#include <scsc/api/bt_audio.h>
+
+#define STRING_BUFFER_MAX_LENGTH 512
+#define NUMBER_OF_STRING_ARGS 5
+#define MX_DRAM_SIZE (4 * 1024 * 1024)
+#define MX_DRAM_SIZE_SECTION_1 (8 * 1024 * 1024)
+#define MX_DRAM_SIZE_SECTION_2 (8 * 1024 * 1024)
+#define MX_FW_RUNTIME_LENGTH (1024 * 1024)
+#define WAIT_FOR_FW_TO_START_DELAY_MS 1000
+#define MBOX2_MAGIC_NUMBER 0xbcdeedcb
+#define MBOX_INDEX_0 0
+#define MBOX_INDEX_1 1
+#define MBOX_INDEX_2 2
+#define MBOX_INDEX_3 3
+#ifdef CONFIG_SOC_EXYNOS7570
+#define MBOX_INDEX_4 4
+#define MBOX_INDEX_5 5
+#define MBOX_INDEX_6 6
+#define MBOX_INDEX_7 7
+#endif
+
+#define SCSC_PANIC_ORIGIN_FW (0x0 << 15)
+#define SCSC_PANIC_ORIGIN_HOST (0x1 << 15)
+
+#define SCSC_PANIC_TECH_WLAN (0x0 << 13)
+#define SCSC_PANIC_TECH_CORE (0x1 << 13)
+#define SCSC_PANIC_TECH_BT (0x2 << 13)
+#define SCSC_PANIC_TECH_UNSP (0x3 << 13)
+
+#define SCSC_PANIC_ORIGIN_MASK 0x8000
+#define SCSC_PANIC_TECH_MASK 0x6000
+#define SCSC_PANIC_SUBCODE_MASK_LEGACY 0x0FFF
+#define SCSC_PANIC_SUBCODE_MASK 0x7FFF
+
+#define SCSC_R4_V2_MINOR_52 52
+#define SCSC_R4_V2_MINOR_53 53
+
+#define MM_HALT_RSP_TIMEOUT_MS 100
+
+static char panic_record_dump[PANIC_RECORD_DUMP_BUFFER_SZ];
+static BLOCKING_NOTIFIER_HEAD(firmware_chain);
+
+/**
+ * This will be returned as fw version ONLY if Maxwell
+ * was never found or was unloaded.
+ */
+static char saved_fw_build_id[FW_BUILD_ID_SZ] = "Maxwell WLBT unavailable";
+
+static bool allow_unidentified_firmware;
+module_param(allow_unidentified_firmware, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(allow_unidentified_firmware, "Allow unidentified firmware");
+
+static bool skip_header;
+module_param(skip_header, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_header, "Skip header, assuming unidentified firmware");
+
+static bool crc_check_allow_none = true;
+module_param(crc_check_allow_none, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(crc_check_allow_none, "Allow skipping firmware CRC checks if CRC is not present");
+
+static int crc_check_period_ms = 30000;
+module_param(crc_check_period_ms, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(crc_check_period_ms, "Time period for checking the firmware CRCs");
+
+static ulong mm_completion_timeout_ms = 2000;
+module_param(mm_completion_timeout_ms, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mm_completion_timeout_ms, "Timeout wait_for_mm_msg_start_ind (ms) - default 1000. 0 = infinite");
+
+static bool skip_mbox0_check;
+module_param(skip_mbox0_check, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_mbox0_check, "Allow skipping firmware mbox0 signature check");
+
+static uint firmware_startup_flags;
+module_param(firmware_startup_flags, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(firmware_startup_flags, "0 = Proceed as normal (default); Bit 0 = 1 - spin at start of CRT0; Other bits reserved = 0");
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+/* First arg controls chv function */
+int chv_run;
+module_param(chv_run, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(chv_run, "Run chv f/w: 0 = feature disabled, 1 = for continuous checking, 2 = 1 shot, anything else, undefined");
+
+/* Optional array of args for firmware to interpret when chv_run = 1 */
+static unsigned int chv_argv[32];
+static int chv_argc;
+
+module_param_array(chv_argv, uint, &chv_argc, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(chv_argv, "Array of up to 32 x u32 args for the CHV firmware when chv_run = 1");
+#endif
+
+static bool disable_auto_coredump;
+module_param(disable_auto_coredump, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_auto_coredump, "Disable driver automatic coredump");
+
+static bool disable_error_handling;
+module_param(disable_error_handling, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_error_handling, "Disable error handling");
+
+int disable_recovery_handling = 1; /* MEMDUMP_FILE_FOR_RECOVERY : for /sys/wifi/memdump */
+module_param(disable_recovery_handling, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_recovery_handling, "Disable recovery handling");
+static bool disable_recovery_from_memdump_file = true;
+static int memdump = -1;
+static bool disable_recovery_until_reboot;
+
+static uint panic_record_delay = 1;
+module_param(panic_record_delay, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(panic_record_delay, "Delay in ms before accessing the panic record");
+
+static bool disable_logger = true;
+module_param(disable_logger, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_logger, "Disable launch of user space logger");
+
+/*
+ * shared between this module and mgt.c as this is the kobject referring to
+ * /sys/wifi directory. Core driver is called 1st we create the directory
+ * here and share the kobject, so in mgt.c wifi driver can create
+ * /sys/wif/mac_addr using sysfs_create_file api using the kobject
+ *
+ * If both modules tried to create the dir we were getting kernel panic
+ * failure due to kobject associated with dir already existed
+ */
+static struct kobject *wifi_kobj_ref;
+static int refcount;
+static ssize_t sysfs_show_memdump(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+static ssize_t sysfs_store_memdump(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+static struct kobj_attribute memdump_attr =
+ __ATTR(memdump, 0660, sysfs_show_memdump, sysfs_store_memdump);
+
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int mxman_minimoredump_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ int ret = 0;
+ struct mxman *mxman = (struct mxman *) collect_client->prv;
+
+ if (!mxman || !mxman->start_dram)
+ return ret;
+
+ SCSC_TAG_INFO(MXMAN, "Collecting Minimoredump runtime_length %d fw_image_size %d\n",
+ mxman->fwhdr.fw_runtime_length, mxman->fw_image_size);
+ /* collect RAM sections of FW */
+ ret = scsc_log_collector_write(mxman->start_dram + mxman->fw_image_size,
+ mxman->fwhdr.fw_runtime_length - mxman->fw_image_size, 1);
+
+ return ret;
+}
+
+struct scsc_log_collector_client mini_moredump_client = {
+ .name = "minimoredump",
+ .type = SCSC_LOG_MINIMOREDUMP,
+ .collect_init = NULL,
+ .collect = mxman_minimoredump_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+#endif
+
+/* Retrieve memdump in sysfs global */
+static ssize_t sysfs_show_memdump(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", memdump);
+}
+
+/* Update memdump in sysfs global */
+static ssize_t sysfs_store_memdump(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int r;
+
+ r = kstrtoint(buf, 10, &memdump);
+ if (r < 0)
+ memdump = -1;
+
+ switch (memdump) {
+ case 0:
+ case 2:
+ disable_recovery_from_memdump_file = false;
+ break;
+ case 3:
+ default:
+ disable_recovery_from_memdump_file = true;
+ break;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "memdump: %d\n", memdump);
+
+ return (r == 0) ? count : 0;
+}
+
+struct kobject *mxman_wifi_kobject_ref_get(void)
+{
+ if (refcount++ == 0) {
+ /* Create sysfs directory /sys/wifi */
+ wifi_kobj_ref = kobject_create_and_add("wifi", NULL);
+ kobject_get(wifi_kobj_ref);
+ kobject_uevent(wifi_kobj_ref, KOBJ_ADD);
+ SCSC_TAG_INFO(MXMAN, "wifi_kobj_ref: 0x%p\n", wifi_kobj_ref);
+ WARN_ON(refcount == 0);
+ }
+ return wifi_kobj_ref;
+}
+EXPORT_SYMBOL(mxman_wifi_kobject_ref_get);
+
+void mxman_wifi_kobject_ref_put(void)
+{
+ if (--refcount == 0) {
+ kobject_put(wifi_kobj_ref);
+ kobject_uevent(wifi_kobj_ref, KOBJ_REMOVE);
+ wifi_kobj_ref = NULL;
+ WARN_ON(refcount < 0);
+ }
+}
+EXPORT_SYMBOL(mxman_wifi_kobject_ref_put);
+
+/* Register memdump override */
+void mxman_create_sysfs_memdump(void)
+{
+ int r;
+ struct kobject *kobj_ref = mxman_wifi_kobject_ref_get();
+
+ SCSC_TAG_INFO(MXMAN, "kobj_ref: 0x%p\n", kobj_ref);
+
+ if (kobj_ref) {
+ /* Create sysfs file /sys/wifi/memdump */
+ r = sysfs_create_file(kobj_ref, &memdump_attr.attr);
+ if (r) {
+ /* Failed, so clean up dir */
+ SCSC_TAG_ERR(MXMAN, "Can't create /sys/wifi/memdump\n");
+ mxman_wifi_kobject_ref_put();
+ return;
+ }
+ } else {
+ SCSC_TAG_ERR(MXMAN, "failed to create /sys/wifi directory");
+ }
+}
+
+/* Unregister memdump override */
+void mxman_destroy_sysfs_memdump(void)
+{
+ if (!wifi_kobj_ref)
+ return;
+
+ /* Destroy /sys/wifi/memdump file */
+ sysfs_remove_file(wifi_kobj_ref, &memdump_attr.attr);
+
+ /* Destroy /sys/wifi virtual dir */
+ mxman_wifi_kobject_ref_put();
+}
+
+/* Track when WLBT reset fails to allow debug */
+bool reset_failed;
+static u64 reset_failed_time;
+
+/* Status of FM driver request, which persists beyond the lifecyle
+ * of the scsx_mx driver.
+ */
+#ifdef CONFIG_SCSC_FM
+static u32 is_fm_on;
+#endif
+
+static int firmware_runtime_flags;
+/**
+ * This mxman reference is initialized/nullified via mxman_init/deinit
+ * called by scsc_mx_create/destroy on module probe/remove.
+ */
+static struct mxman *active_mxman;
+static bool send_fw_config_to_active_mxman(uint32_t fw_runtime_flags);
+
+static int fw_runtime_flags_setter(const char *val, const struct kernel_param *kp)
+{
+ int ret = -EINVAL;
+ uint32_t fw_runtime_flags = 0;
+
+ if (!val)
+ return ret;
+ ret = kstrtouint(val, 10, &fw_runtime_flags);
+ if (!ret) {
+ if (send_fw_config_to_active_mxman(fw_runtime_flags))
+ firmware_runtime_flags = fw_runtime_flags;
+ else
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * We don't bother to keep an updated copy of the runtime flags effectively
+ * currently set into FW...we should add a new message answer handling both in
+ * Kenrel and FW side to be sure and this is just to easy debug at the end.
+ */
+static struct kernel_param_ops fw_runtime_kops = {
+ .set = fw_runtime_flags_setter,
+ .get = NULL
+};
+
+module_param_cb(firmware_runtime_flags, &fw_runtime_kops, NULL, 0200);
+MODULE_PARM_DESC(firmware_runtime_flags,
+ "0 = Proceed as normal (default); nnn = Provides FW runtime flags bitmask: unknown bits will be ignored.");
+
+/**
+ * Maxwell Agent Management Messages.
+ *
+ * TODO: common defn with firmware, generated.
+ *
+ * The numbers here *must* match the firmware!
+ */
+enum {
+ MM_START_IND = 0,
+ MM_HALT_REQ = 1,
+ MM_FORCE_PANIC = 2,
+ MM_HOST_SUSPEND = 3,
+ MM_HOST_RESUME = 4,
+ MM_FW_CONFIG = 5,
+ MM_HALT_RSP = 6,
+ MM_FM_RADIO_CONFIG = 7,
+ MM_LERNA_CONFIG = 8,
+ MM_SYSERR_IND = 9,
+} ma_msg;
+
+/**
+ * Format of the Maxwell agent messages
+ * on the Maxwell management transport stream.
+ */
+struct ma_msg_packet {
+
+ uint8_t ma_msg; /* Message from ma_msg enum */
+ uint32_t arg; /* Optional arg set by f/w in some to-host messages */
+} __packed;
+
+/**
+ * Special case Maxwell management, carrying FM radio configuration structure
+ */
+struct ma_msg_packet_fm_radio_config {
+
+ uint8_t ma_msg; /* Message from ma_msg enum */
+ struct wlbt_fm_params fm_params; /* FM Radio parameters */
+} __packed;
+
+static bool send_fw_config_to_active_mxman(uint32_t fw_runtime_flags)
+{
+ bool ret = false;
+ struct srvman *srvman = NULL;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "Active MXMAN NOT FOUND...cannot send running FW config.\n");
+ return ret;
+ }
+
+ mutex_lock(&active_mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(active_mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&active_mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return ret;
+ }
+
+ if (active_mxman->mxman_state == MXMAN_STATE_STARTED) {
+ struct ma_msg_packet message = { .ma_msg = MM_FW_CONFIG,
+ .arg = fw_runtime_flags };
+
+ SCSC_TAG_INFO(MXMAN, "MM_FW_CONFIG - firmware_runtime_flags:%d\n", message.arg);
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(active_mxman->mx),
+ MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message,
+ sizeof(message));
+ ret = true;
+ } else {
+ SCSC_TAG_INFO(MXMAN, "MXMAN is NOT STARTED...cannot send MM_FW_CONFIG msg.\n");
+ }
+ mutex_unlock(&active_mxman->mxman_mutex);
+
+ return ret;
+}
+
+static bool send_fm_params_to_active_mxman(struct wlbt_fm_params *params)
+{
+ bool ret = false;
+ struct srvman *srvman = NULL;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "Active MXMAN NOT FOUND...cannot send FM params\n");
+ return false;
+ }
+
+ mutex_lock(&active_mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(active_mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&active_mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return false;
+ }
+
+ if (active_mxman->mxman_state == MXMAN_STATE_STARTED) {
+ struct ma_msg_packet_fm_radio_config message = { .ma_msg = MM_FM_RADIO_CONFIG,
+ .fm_params = *params };
+
+ SCSC_TAG_INFO(MXMAN, "MM_FM_RADIO_CONFIG\n");
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(active_mxman->mx),
+ MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message,
+ sizeof(message));
+
+ ret = true; /* Success */
+ } else
+ SCSC_TAG_INFO(MXMAN, "MXMAN is NOT STARTED...cannot send MM_FM_RADIO_CONFIG msg.\n");
+
+ mutex_unlock(&active_mxman->mxman_mutex);
+
+ return ret;
+}
+
+static void mxman_stop(struct mxman *mxman);
+static void print_mailboxes(struct mxman *mxman);
+#ifdef CONFIG_SCSC_WLBTD
+static int _mx_exec(char *prog, int wait_exec) __attribute__((unused));
+#else
+static int _mx_exec(char *prog, int wait_exec);
+#endif
+static int wait_for_mm_msg(struct mxman *mxman, struct completion *mm_msg_completion, ulong timeout_ms)
+{
+ int r;
+
+ (void)mxman; /* unused */
+
+ if (timeout_ms == 0) {
+ /* Zero implies infinite wait */
+ r = wait_for_completion_interruptible(mm_msg_completion);
+ /* r = -ERESTARTSYS if interrupted, 0 if completed */
+ return r;
+ }
+ r = wait_for_completion_timeout(mm_msg_completion, msecs_to_jiffies(timeout_ms));
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int wait_for_mm_msg_start_ind(struct mxman *mxman)
+{
+ return wait_for_mm_msg(mxman, &mxman->mm_msg_start_ind_completion, mm_completion_timeout_ms);
+}
+
+static int wait_for_mm_msg_halt_rsp(struct mxman *mxman)
+{
+ int r;
+ (void)mxman; /* unused */
+
+ if (MM_HALT_RSP_TIMEOUT_MS == 0) {
+ /* Zero implies infinite wait */
+ r = wait_for_completion_interruptible(&mxman->mm_msg_halt_rsp_completion);
+ /* r = -ERESTARTSYS if interrupted, 0 if completed */
+ return r;
+ }
+
+ r = wait_for_completion_timeout(&mxman->mm_msg_halt_rsp_completion, msecs_to_jiffies(MM_HALT_RSP_TIMEOUT_MS));
+ if (r)
+ SCSC_TAG_INFO(MXMAN, "Received MM_HALT_RSP from firmware");
+
+ return r;
+}
+
+#ifndef CONFIG_SCSC_WLBTD
+static int coredump_helper(void)
+{
+ int r;
+ int i;
+ static char mdbin[128];
+
+ /* Determine path to moredump helper script */
+ r = mx140_exe_path(NULL, mdbin, sizeof(mdbin), "moredump");
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "moredump path error\n");
+ return r;
+ }
+
+ for (i = 0; i < 20; i++) {
+ r = _mx_exec(mdbin, UMH_WAIT_PROC);
+ if (r != -EBUSY)
+ break;
+ /* If the usermode helper fails with -EBUSY, the userspace is
+ * likely still frozen from suspend. Back off and retry.
+ */
+ SCSC_TAG_INFO(MXMAN, "waiting for userspace to thaw...\n");
+ msleep(1000);
+ }
+
+ /* Application return codes are in the MSB */
+ if (r > 0xffL)
+ SCSC_TAG_INFO(MXMAN, "moredump.bin exit(%ld), check syslog\n", (r & 0xff00L) >> 8);
+
+ return r;
+}
+#endif
+
+static int send_mm_msg_stop_blocking(struct mxman *mxman)
+{
+ int r;
+#ifdef CONFIG_SCSC_FM
+ struct ma_msg_packet message = { .ma_msg = MM_HALT_REQ,
+ .arg = mxman->on_halt_ldos_on };
+#else
+ struct ma_msg_packet message = { .ma_msg = MM_HALT_REQ };
+#endif
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
+
+ r = wait_for_mm_msg_halt_rsp(mxman);
+ if (r) {
+ /*
+ * MM_MSG_HALT_RSP is not implemented in all versions of firmware, so don't treat it's non-arrival
+ * as an error
+ */
+ SCSC_TAG_INFO(MXMAN, "wait_for_MM_HALT_RSP completed");
+ }
+
+ return 0;
+}
+
+static char *chip_version(u32 rf_hw_ver)
+{
+ switch (rf_hw_ver & 0x00ff) {
+ default:
+ break;
+ case 0x00b0:
+ if ((rf_hw_ver & 0xff00) > 0x1000)
+ return "S610/S611";
+ else
+ return "S610";
+ case 0x00b1:
+ return "S612";
+ case 0x00b2:
+ return "S620";
+ case 0x0000:
+#ifndef CONFIG_SOC_EXYNOS9610
+ return "Error: check if RF chip is present";
+#else
+ return "Unknown";
+#endif
+ }
+ return "Unknown";
+}
+
+/*
+ * This function is used in this file and in mxproc.c to generate consistent
+ * RF CHIP VERSION string for logging on console and for storing the same
+ * in proc/drivers/mxman_info/rf_chip_version file.
+ */
+int mxman_print_rf_hw_version(struct mxman *mxman, char *buf, const size_t bufsz)
+{
+ int r;
+
+ r = snprintf(buf, bufsz, "RF_CHIP_VERSION: 0x%04x: %s (0x%02x), EVT%x.%x\n",
+ mxman->rf_hw_ver,
+ chip_version(mxman->rf_hw_ver), (mxman->rf_hw_ver & 0x00ff),
+ ((mxman->rf_hw_ver >> 12) & 0xfU), ((mxman->rf_hw_ver >> 8) & 0xfU));
+
+ return r;
+}
+
+static void mxman_print_versions(struct mxman *mxman)
+{
+ char buf[80];
+
+ memset(buf, '\0', sizeof(buf));
+
+ (void)mxman_print_rf_hw_version(mxman, buf, sizeof(buf));
+
+ SCSC_TAG_INFO(MXMAN, "%s", buf);
+ SCSC_TAG_INFO(MXMAN, "WLBT FW: %s\n", mxman->fw_build_id);
+ SCSC_TAG_INFO(MXMAN, "WLBT Driver: %d.%d.%d.%d\n",
+ SCSC_RELEASE_PRODUCT, SCSC_RELEASE_ITERATION, SCSC_RELEASE_CANDIDATE, SCSC_RELEASE_POINT);
+#ifdef CONFIG_SCSC_WLBTD
+ scsc_wlbtd_get_and_print_build_type();
+#endif
+}
+
+/** Receive handler for messages from the FW along the maxwell management transport */
+static void mxman_message_handler(const void *message, void *data)
+{
+ struct mxman *mxman = (struct mxman *)data;
+
+ /* Forward the message to the applicable service to deal with */
+ const struct ma_msg_packet *msg = message;
+
+ switch (msg->ma_msg) {
+ case MM_START_IND:
+ /* The arg can be used to determine the WLBT/S610 hardware revision */
+ SCSC_TAG_INFO(MXMAN, "Received MM_START_IND message from the firmware, arg=0x%04x\n", msg->arg);
+ mxman->rf_hw_ver = msg->arg;
+ mxman_print_versions(mxman);
+ atomic_inc(&mxman->boot_count);
+ complete(&mxman->mm_msg_start_ind_completion);
+ break;
+ case MM_HALT_RSP:
+ complete(&mxman->mm_msg_halt_rsp_completion);
+ SCSC_TAG_INFO(MXMAN, "Received MM_HALT_RSP message from the firmware\n");
+ break;
+ case MM_LERNA_CONFIG:
+ /* Message response to a firmware configuration query. */
+ SCSC_TAG_INFO(MXMAN, "Received MM_LERNA_CONFIG message from firmware\n");
+ scsc_lerna_response(message);
+ break;
+ case MM_SYSERR_IND:
+ /* System Error report from firmware */
+ SCSC_TAG_INFO(MXMAN, "Received MM_SYSERR_IND message from firmware\n");
+ mx_syserr_handler(mxman, message);
+ break;
+ default:
+ /* HERE: Unknown message, raise fault */
+ SCSC_TAG_WARNING(MXMAN, "Received unknown message from the firmware: msg->ma_msg=%d\n", msg->ma_msg);
+ break;
+ }
+}
+
+/*
+ * This function calulates and checks two or three (depending on crc32_over_binary flag)
+ * crc32 values in the firmware header. The function will check crc32 over the firmware binary
+ * (i.e. everything in the file following the header) only if the crc32_over_binary is set to 'true'.
+ * This includes initialised data regions so it can be used to check when loading but will not be
+ * meaningful once execution starts.
+ */
+static int do_fw_crc32_checks(char *fw, u32 fw_image_size, struct fwhdr *fwhdr, bool crc32_over_binary)
+{
+ int r;
+
+ if ((fwhdr->fw_crc32 == 0 || fwhdr->header_crc32 == 0 || fwhdr->const_crc32 == 0) && crc_check_allow_none == 0) {
+ SCSC_TAG_ERR(MXMAN, "error: CRC is missing fw_crc32=%d header_crc32=%d crc_check_allow_none=%d\n",
+ fwhdr->fw_crc32, fwhdr->header_crc32, crc_check_allow_none);
+ return -EINVAL;
+ }
+
+ if (fwhdr->header_crc32 == 0 && crc_check_allow_none == 1) {
+ SCSC_TAG_INFO(MXMAN, "Skipping CRC check header_crc32=%d crc_check_allow_none=%d\n",
+ fwhdr->header_crc32, crc_check_allow_none);
+ } else {
+ /*
+ * CRC-32-IEEE of all preceding header fields (including other CRCs).
+ * Always the last word in the header.
+ */
+ r = fwimage_check_fw_header_crc(fw, fwhdr->hdr_length, fwhdr->header_crc32);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_header_crc() failed\n");
+ return r;
+ }
+ }
+
+ if (fwhdr->const_crc32 == 0 && crc_check_allow_none == 1) {
+ SCSC_TAG_INFO(MXMAN, "Skipping CRC check const_crc32=%d crc_check_allow_none=%d\n",
+ fwhdr->const_crc32, crc_check_allow_none);
+ } else {
+ /*
+ * CRC-32-IEEE over the constant sections grouped together at start of firmware binary.
+ * This CRC should remain valid during execution. It can be used by run-time checker on
+ * host to detect firmware corruption (not all memory masters are subject to MPUs).
+ */
+ r = fwimage_check_fw_const_section_crc(fw, fwhdr->const_crc32, fwhdr->const_fw_length, fwhdr->hdr_length);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_const_section_crc() failed\n");
+ return r;
+ }
+ }
+
+ if (crc32_over_binary) {
+ if (fwhdr->fw_crc32 == 0 && crc_check_allow_none == 1)
+ SCSC_TAG_INFO(MXMAN, "Skipping CRC check fw_crc32=%d crc_check_allow_none=%d\n",
+ fwhdr->fw_crc32, crc_check_allow_none);
+ else {
+ /*
+ * CRC-32-IEEE over the firmware binary (i.e. everything
+ * in the file following this header).
+ * This includes initialised data regions so it can be used to
+ * check when loading but will not be meaningful once execution starts.
+ */
+ r = fwimage_check_fw_crc(fw, fw_image_size, fwhdr->hdr_length, fwhdr->fw_crc32);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "fwimage_check_fw_crc() failed\n");
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+static void fw_crc_wq_start(struct mxman *mxman)
+{
+ if (mxman->check_crc && crc_check_period_ms)
+ queue_delayed_work(mxman->fw_crc_wq, &mxman->fw_crc_work, msecs_to_jiffies(crc_check_period_ms));
+}
+
+
+static void fw_crc_work_func(struct work_struct *work)
+{
+ int r;
+ struct mxman *mxman = container_of((struct delayed_work *)work, struct mxman, fw_crc_work);
+
+ r = do_fw_crc32_checks(mxman->fw, mxman->fw_image_size, &mxman->fwhdr, false);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "do_fw_crc32_checks() failed r=%d\n", r);
+ mxman_fail(mxman, SCSC_PANIC_CODE_HOST << 15, __func__);
+ return;
+ }
+ fw_crc_wq_start(mxman);
+}
+
+
+static void fw_crc_wq_init(struct mxman *mxman)
+{
+ mxman->fw_crc_wq = create_singlethread_workqueue("fw_crc_wq");
+ INIT_DELAYED_WORK(&mxman->fw_crc_work, fw_crc_work_func);
+}
+
+static void fw_crc_wq_stop(struct mxman *mxman)
+{
+ mxman->check_crc = false;
+ cancel_delayed_work(&mxman->fw_crc_work);
+ flush_workqueue(mxman->fw_crc_wq);
+}
+
+static void fw_crc_wq_deinit(struct mxman *mxman)
+{
+ fw_crc_wq_stop(mxman);
+ destroy_workqueue(mxman->fw_crc_wq);
+}
+
+static int transports_init(struct mxman *mxman)
+{
+ struct mxconf *mxconf;
+ int r;
+ struct scsc_mx *mx = mxman->mx;
+
+ /* Initialise mx management stack */
+ r = mxmgmt_transport_init(scsc_mx_get_mxmgmt_transport(mx), mx);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "mxmgmt_transport_init() failed %d\n", r);
+ return r;
+ }
+
+ /* Initialise gdb transport for cortex-R4 */
+ r = gdb_transport_init(scsc_mx_get_gdb_transport_r4(mx), mx, GDB_TRANSPORT_R4);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "gdb_transport_init() failed %d\n", r);
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
+ return r;
+ }
+
+ /* Initialise gdb transport for cortex-M4 */
+ r = gdb_transport_init(scsc_mx_get_gdb_transport_m4(mx), mx, GDB_TRANSPORT_M4);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "gdb_transport_init() failed %d\n", r);
+ gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
+ return r;
+ }
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ /* Initialise gdb transport for cortex-M4 */
+ r = gdb_transport_init(scsc_mx_get_gdb_transport_m4_1(mx), mx, GDB_TRANSPORT_M4_1);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "gdb_transport_init() failed %d\n", r);
+ gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
+ return r;
+ }
+#endif
+
+ /* Initialise mxlog transport */
+ r = mxlog_transport_init(scsc_mx_get_mxlog_transport(mx), mx);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "mxlog_transport_init() failed %d\n", r);
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4(mx));
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4_1(mx));
+#endif
+ gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
+ return r;
+ }
+
+ /*
+ * Allocate & Initialise Infrastructre Config Structure
+ * including the mx management stack config information.
+ */
+ mxconf = miframman_alloc(scsc_mx_get_ramman(mx), sizeof(struct mxconf), 4, MIFRAMMAN_OWNER_COMMON);
+ if (!mxconf) {
+ SCSC_TAG_ERR(MXMAN, "miframman_alloc() failed\n");
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4(mx));
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4_1(mx));
+#endif
+ gdb_transport_release(scsc_mx_get_gdb_transport_r4(mx));
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mx));
+ mxlog_transport_release(scsc_mx_get_mxlog_transport(mx));
+ return -ENOMEM;
+ }
+ mxman->mxconf = mxconf;
+ mxconf->magic = MXCONF_MAGIC;
+ mxconf->version.major = MXCONF_VERSION_MAJOR;
+ mxconf->version.minor = MXCONF_VERSION_MINOR;
+
+ /* Pass pre-existing FM status to FW */
+ mxconf->flags = 0;
+#ifdef CONFIG_SCSC_FM
+ mxconf->flags |= is_fm_on ? MXCONF_FLAGS_FM_ON : 0;
+#endif
+ SCSC_TAG_INFO(MXMAN, "mxconf flags 0x%08x\n", mxconf->flags);
+
+ /* serialise mxmgmt transport */
+ mxmgmt_transport_config_serialise(scsc_mx_get_mxmgmt_transport(mx), &mxconf->mx_trans_conf);
+ /* serialise Cortex-R4 gdb transport */
+ gdb_transport_config_serialise(scsc_mx_get_gdb_transport_r4(mx), &mxconf->mx_trans_conf_gdb_r4);
+ /* serialise Cortex-M4 gdb transport */
+ gdb_transport_config_serialise(scsc_mx_get_gdb_transport_m4(mx), &mxconf->mx_trans_conf_gdb_m4);
+
+ /* Default to Fleximac M4_1 monitor channel not in use.
+ * Allows CONFIG_SCSC_MX450_GDB_SUPPORT to be turned off in Kconfig even though mxconf
+ * struct v5 defines M4_1 channel
+ */
+ mxconf->mx_trans_conf_gdb_m4_1.from_ap_stream_conf.buf_conf.buffer_loc = 0;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ /* serialise Cortex-M4 gdb transport */
+ gdb_transport_config_serialise(scsc_mx_get_gdb_transport_m4_1(mx), &mxconf->mx_trans_conf_gdb_m4_1);
+#endif
+ /* serialise mxlog transport */
+ mxlog_transport_config_serialise(scsc_mx_get_mxlog_transport(mx), &mxconf->mxlogconf);
+ SCSC_TAG_DEBUG(MXMAN, "read_bit_idx=%d write_bit_idx=%d buffer=%p num_packets=%d packet_size=%d read_index=%d write_index=%d\n",
+ scsc_mx_get_mxlog_transport(mx)->mif_stream.read_bit_idx,
+ scsc_mx_get_mxlog_transport(mx)->mif_stream.write_bit_idx,
+ scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.buffer,
+ scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.num_packets,
+ scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.packet_size,
+ *scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.read_index,
+ *scsc_mx_get_mxlog_transport(mx)->mif_stream.buffer.write_index
+ );
+
+ /* Need to initialise fwconfig or else random data can make firmware data abort. */
+ mxconf->fwconfig.offset = 0;
+ mxconf->fwconfig.size = 0;
+#ifdef CONFIG_SCSC_COMMON_HCF
+ /* Load Common Config HCF */
+ mxfwconfig_load(mxman->mx, &mxconf->fwconfig);
+#endif
+ return 0;
+}
+
+static void transports_release(struct mxman *mxman)
+{
+ mxlog_transport_release(scsc_mx_get_mxlog_transport(mxman->mx));
+ mxmgmt_transport_release(scsc_mx_get_mxmgmt_transport(mxman->mx));
+ gdb_transport_release(scsc_mx_get_gdb_transport_r4(mxman->mx));
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4(mxman->mx));
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ gdb_transport_release(scsc_mx_get_gdb_transport_m4_1(mxman->mx));
+#endif
+ miframman_free(scsc_mx_get_ramman(mxman->mx), mxman->mxconf);
+}
+
+static void mbox_init(struct mxman *mxman, u32 firmware_entry_point)
+{
+ u32 *mbox0;
+ u32 *mbox1;
+ u32 *mbox2;
+ u32 *mbox3;
+ scsc_mifram_ref mifram_ref;
+ struct scsc_mx *mx = mxman->mx;
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
+
+ /* Place firmware entry address in MIF MBOX 0 so R4 ROM knows where to jump to! */
+ mbox0 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_0);
+ mbox1 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_1);
+
+ /* Write (and flush) entry point to MailBox 0, config address to MBOX 1 */
+ *mbox0 = firmware_entry_point;
+ mif->get_mifram_ref(mif, mxman->mxconf, &mifram_ref);
+ *mbox1 = mifram_ref; /* must be R4-relative address here */
+ /* CPU memory barrier */
+ wmb();
+ /*
+ * write the magic number "0xbcdeedcb" to MIF Mailbox #2 &
+ * copy the firmware_startup_flags to MIF Mailbox #3 before starting (reset = 0) the R4
+ */
+ mbox2 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_2);
+ *mbox2 = MBOX2_MAGIC_NUMBER;
+ mbox3 = mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif, MBOX_INDEX_3);
+ *mbox3 = firmware_startup_flags;
+}
+
+static int fwhdr_init(char *fw, struct fwhdr *fwhdr, bool *fwhdr_parsed_ok, bool *check_crc)
+{
+ /*
+ * Validate the fw image including checking the firmware header, majic #, version, checksum so on
+ * then do CRC on the entire image
+ *
+ * Derive some values from header -
+ *
+ * PORT: assumes little endian
+ */
+ if (skip_header)
+ *fwhdr_parsed_ok = false; /* Allows the forced start address to be used */
+ else
+ *fwhdr_parsed_ok = fwhdr_parse(fw, fwhdr);
+ *check_crc = false;
+ if (*fwhdr_parsed_ok) {
+ SCSC_TAG_INFO(MXMAN, "FW HEADER version: hdr_major: %d hdr_minor: %d\n", fwhdr->hdr_major, fwhdr->hdr_minor);
+ switch (fwhdr->hdr_major) {
+ case 0:
+ switch (fwhdr->hdr_minor) {
+ case 2:
+ *check_crc = true;
+ break;
+ default:
+ SCSC_TAG_ERR(MXMAN, "Unsupported FW HEADER version: hdr_major: %d hdr_minor: %d\n",
+ fwhdr->hdr_major, fwhdr->hdr_minor);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ *check_crc = true;
+ break;
+ default:
+ SCSC_TAG_ERR(MXMAN, "Unsupported FW HEADER version: hdr_major: %d hdr_minor: %d\n",
+ fwhdr->hdr_major, fwhdr->hdr_minor);
+ return -EINVAL;
+ }
+ switch (fwhdr->fwapi_major) {
+ case 0:
+ switch (fwhdr->fwapi_minor) {
+ case 2:
+ SCSC_TAG_INFO(MXMAN, "FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
+ fwhdr->fwapi_major, fwhdr->fwapi_minor);
+ break;
+ default:
+ SCSC_TAG_ERR(MXMAN, "Unsupported FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
+ fwhdr->fwapi_major, fwhdr->fwapi_minor);
+ return -EINVAL;
+ }
+ break;
+ default:
+ SCSC_TAG_ERR(MXMAN, "Unsupported FWAPI version: fwapi_major: %d fwapi_minor: %d\n",
+ fwhdr->fwapi_major, fwhdr->fwapi_minor);
+ return -EINVAL;
+ }
+ } else {
+ /* This is unidetified pre-header firmware - assume it is built to run at 0xb8000000 == 0 for bootrom */
+ if (allow_unidentified_firmware) {
+ SCSC_TAG_INFO(MXMAN, "Unidentified firmware override\n");
+ fwhdr->firmware_entry_point = 0;
+ fwhdr->fw_runtime_length = MX_FW_RUNTIME_LENGTH;
+ } else {
+ SCSC_TAG_ERR(MXMAN, "Unidentified firmware is not allowed\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int fw_init(struct mxman *mxman, void *start_dram, size_t size_dram, bool *fwhdr_parsed_ok)
+{
+ int r;
+ char *build_id;
+ char *ttid;
+ u32 fw_image_size;
+ struct fwhdr *fwhdr = &mxman->fwhdr;
+ char *fw = start_dram;
+
+ r = mx140_file_download_fw(mxman->mx, start_dram, size_dram, &fw_image_size);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "mx140_file_download_fw() failed (%d)\n", r);
+ return r;
+ }
+
+ r = fwhdr_init(fw, fwhdr, fwhdr_parsed_ok, &mxman->check_crc);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "fwhdr_init() failed\n");
+ return r;
+ }
+ mxman->fw = fw;
+ mxman->fw_image_size = fw_image_size;
+ if (mxman->check_crc) {
+ /* do CRC on the entire image */
+ r = do_fw_crc32_checks(fw, fw_image_size, &mxman->fwhdr, true);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "do_fw_crc32_checks() failed\n");
+ return r;
+ }
+ fw_crc_wq_start(mxman);
+ }
+
+ if (*fwhdr_parsed_ok) {
+ build_id = fwhdr_get_build_id(fw, fwhdr);
+ if (build_id) {
+ struct slsi_kic_service_info kic_info;
+
+ (void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "%s", build_id);
+ SCSC_TAG_INFO(MXMAN, "Firmware BUILD_ID: %s\n", mxman->fw_build_id);
+ memcpy(saved_fw_build_id, mxman->fw_build_id,
+ sizeof(saved_fw_build_id));
+
+ (void) snprintf(kic_info.ver_str,
+ min(sizeof(mxman->fw_build_id), sizeof(kic_info.ver_str)),
+ "%s", mxman->fw_build_id);
+ kic_info.fw_api_major = fwhdr->fwapi_major;
+ kic_info.fw_api_minor = fwhdr->fwapi_minor;
+ kic_info.release_product = SCSC_RELEASE_PRODUCT;
+ kic_info.host_release_iteration = SCSC_RELEASE_ITERATION;
+ kic_info.host_release_candidate = SCSC_RELEASE_CANDIDATE;
+
+ slsi_kic_service_information(slsi_kic_technology_type_common, &kic_info);
+ } else
+ SCSC_TAG_ERR(MXMAN, "Failed to get Firmware BUILD_ID\n");
+
+ ttid = fwhdr_get_ttid(fw, fwhdr);
+ if (ttid) {
+ (void)snprintf(mxman->fw_ttid, sizeof(mxman->fw_ttid), "%s", ttid);
+ SCSC_TAG_INFO(MXMAN, "Firmware ttid: %s\n", mxman->fw_ttid);
+ }
+ }
+
+ SCSC_TAG_DEBUG(MXMAN, "firmware_entry_point=0x%x fw_runtime_length=%d\n", fwhdr->firmware_entry_point, fwhdr->fw_runtime_length);
+
+ return 0;
+
+}
+
+static int mxman_start(struct mxman *mxman)
+{
+ void *start_dram;
+ size_t size_dram = MX_DRAM_SIZE;
+ struct scsc_mif_abs *mif;
+ struct fwhdr *fwhdr = &mxman->fwhdr;
+ bool fwhdr_parsed_ok;
+ void *start_mifram_heap;
+ u32 length_mifram_heap;
+ void *start_mifram_heap2;
+ u32 length_mifram_heap2;
+ int r;
+
+ if (reset_failed) {
+ struct timeval tval = ns_to_timeval(reset_failed_time);
+
+ SCSC_TAG_ERR(MXMAN, "previous reset failed at [%6lu.%06ld], ignoring\n", tval.tv_sec, tval.tv_usec);
+ return -EIO;
+ }
+
+ (void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
+
+ /* If the option is set to skip header, we must allow unidentified f/w */
+ if (skip_header) {
+ SCSC_TAG_INFO(MXMAN, "Ignoring firmware header block\n");
+ allow_unidentified_firmware = true;
+ }
+
+ mif = scsc_mx_get_mif_abs(mxman->mx);
+ start_dram = mif->map(mif, &size_dram);
+
+ if (!start_dram) {
+ SCSC_TAG_ERR(MXMAN, "Error allocating dram\n");
+ return -ENOMEM;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "Allocated %zu bytes\n", size_dram);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_run)
+ allow_unidentified_firmware = true;
+ /* Set up chv arguments. */
+
+#endif
+
+ mxman->start_dram = start_dram;
+
+ r = fw_init(mxman, start_dram, size_dram, &fwhdr_parsed_ok);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "fw_init() failed\n");
+ mif->unmap(mif, mxman->start_dram);
+ return r;
+ }
+
+ /* set up memory protection (read only) from start_dram to start_dram+fw_length
+ * rounding up the size if required
+ */
+ start_mifram_heap = (char *)start_dram + fwhdr->fw_runtime_length;
+ length_mifram_heap = MX_DRAM_SIZE_SECTION_1 - fwhdr->fw_runtime_length;
+
+
+ start_mifram_heap2 = (char *)start_dram + MX_DRAM_SIZE_SECTION_2;
+
+ /* ABox reserved at end so adjust length - round to multiple of PAGE_SIZE */
+ length_mifram_heap2 = MX_DRAM_SIZE_SECTION_2 -
+ ((sizeof(struct scsc_bt_audio_abox) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1));
+
+ miframman_init(scsc_mx_get_ramman(mxman->mx), start_mifram_heap, length_mifram_heap, start_dram);
+ miframman_init(scsc_mx_get_ramman2(mxman->mx), start_mifram_heap2, length_mifram_heap2, start_mifram_heap2);
+ miframabox_init(scsc_mx_get_aboxram(mxman->mx), start_mifram_heap2 + length_mifram_heap2);
+ mifmboxman_init(scsc_mx_get_mboxman(mxman->mx));
+ mifintrbit_init(scsc_mx_get_intrbit(mxman->mx), mif);
+ mxfwconfig_init(mxman->mx);
+
+ /* Initialise transports */
+ r = transports_init(mxman);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "transports_init() failed\n");
+ fw_crc_wq_stop(mxman);
+ mifintrbit_deinit(scsc_mx_get_intrbit(mxman->mx));
+ miframman_deinit(scsc_mx_get_ramman(mxman->mx));
+ miframman_deinit(scsc_mx_get_ramman2(mxman->mx));
+ miframabox_deinit(scsc_mx_get_aboxram(mxman->mx));
+ mifmboxman_deinit(scsc_mx_get_mboxman(mxman->mx));
+ /* Release the MIF memory resources */
+ mif->unmap(mif, mxman->start_dram);
+ return r;
+ }
+ mbox_init(mxman, fwhdr->firmware_entry_point);
+ init_completion(&mxman->mm_msg_start_ind_completion);
+ init_completion(&mxman->mm_msg_halt_rsp_completion);
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxman->mx),
+ MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
+ &mxman_message_handler, mxman);
+
+ mxlog_init(scsc_mx_get_mxlog(mxman->mx), mxman->mx, mxman->fw_build_id);
+#ifdef CONFIG_SCSC_MXLOGGER
+ mxlogger_init(mxman->mx, scsc_mx_get_mxlogger(mxman->mx), MXL_POOL_SZ);
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Register minimoredump client */
+ mini_moredump_client.prv = mxman;
+ scsc_log_collector_register_client(&mini_moredump_client);
+#endif
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Initialize SMAPPER */
+ mifsmapper_init(scsc_mx_get_smapper(mxman->mx), mif);
+#endif
+#ifdef CONFIG_SCSC_QOS
+ mifqos_init(scsc_mx_get_qos(mxman->mx), mif);
+#endif
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_run) {
+ int i;
+
+ u32 *p = (u32 *)((u8 *)start_dram + SCSC_CHV_ARGV_ADDR_OFFSET);
+
+ if (chv_argc == 0) {
+ /*
+ * Setup the chv f/w arguments.
+ * Argument of 0 means run once (driver never set this).
+ * Argument of 1 means run forever.
+ */
+ SCSC_TAG_INFO(MXMAN, "Setting up CHV arguments: start_dram=%p arg=%p, chv_run=%d\n", start_dram, p, chv_run);
+ *p++ = 1; /* argc */
+ *p++ = chv_run == 1 ? 0 : 1; /* arg */
+ } else {
+ /* Pass separate args */
+ *p++ = chv_argc; /* argc */
+ SCSC_TAG_INFO(MXMAN, "Setting up additional CHV args: chv_argc = %d\n", chv_argc);
+
+ for (i = 0; i < chv_argc; i++) {
+ SCSC_TAG_INFO(MXMAN, "Setting up additional CHV args: chv_argv[%d]: *(%p) = 0x%x\n", i, p, (u32)chv_argv[i]);
+ *p++ = (u32)chv_argv[i]; /* arg */
+ }
+ }
+ }
+#endif
+ mxproc_create_ctrl_proc_dir(&mxman->mxproc, mxman);
+ panicmon_init(scsc_mx_get_panicmon(mxman->mx), mxman->mx);
+
+ /* Change state to STARTING to allow coredump as we come out of reset */
+ mxman->mxman_state = MXMAN_STATE_STARTING;
+
+ /* release Maxwell from reset */
+ r = mif->reset(mif, 0);
+ if (r) {
+ reset_failed = true;
+ SCSC_TAG_INFO(MXMAN, "HW reset deassertion failed\n");
+
+ /* Save log at point of failure */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_COMMON, SCSC_LOG_HOST_COMMON_REASON_START);
+#else
+ mx140_log_dump();
+#endif
+ }
+ if (fwhdr_parsed_ok) {
+ r = wait_for_mm_msg_start_ind(mxman);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "wait_for_MM_START_IND() failed: r=%d\n", r);
+ print_mailboxes(mxman);
+ if (skip_mbox0_check) {
+ SCSC_TAG_ERR(MXMAN, "timeout ignored in skip_mbox0_check mode\n");
+ return 0;
+ }
+ mxman_stop(mxman);
+ return r;
+ }
+#ifdef CONFIG_SCSC_MXLOGGER
+ mxlogger_start(scsc_mx_get_mxlogger(mxman->mx));
+#endif
+ } else {
+ msleep(WAIT_FOR_FW_TO_START_DELAY_MS);
+ }
+
+ return 0;
+}
+
+static bool is_bug_on_enabled(struct scsc_mx *mx)
+{
+ bool bug_on_enabled;
+ const struct firmware *firm;
+ int r;
+
+ if (memdump == 3)
+ bug_on_enabled = true;
+ else
+ bug_on_enabled = false;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ (void)firm; /* unused */
+ (void)r; /* unused */
+ return bug_on_enabled;
+#else
+ /* non SABLE platforms should also follow /sys/wifi/memdump if enabled */
+ if (disable_recovery_handling == MEMDUMP_FILE_FOR_RECOVERY)
+ return bug_on_enabled;
+
+ /* for legacy platforms (including Andorid P) using .memdump.info */
+#if defined(ANDROID_VERSION) && (ANDROID_VERSION >= 90000)
+ #define MX140_MEMDUMP_INFO_FILE "/data/vendor/conn/.memdump.info"
+#else
+ #define MX140_MEMDUMP_INFO_FILE "/data/misc/conn/.memdump.info"
+#endif
+
+ SCSC_TAG_INFO(MX_FILE, "Loading %s file\n", MX140_MEMDUMP_INFO_FILE);
+ r = mx140_request_file(mx, MX140_MEMDUMP_INFO_FILE, &firm);
+ if (r) {
+ SCSC_TAG_WARNING(MX_FILE, "Error Loading %s file %d\n", MX140_MEMDUMP_INFO_FILE, r);
+ return bug_on_enabled;
+ }
+ if (firm->size < sizeof(char))
+ SCSC_TAG_WARNING(MX_FILE, "file is too small\n");
+ else if (*firm->data == '3')
+ bug_on_enabled = true;
+ mx140_release_file(mx, firm);
+ SCSC_TAG_INFO(MX_FILE, "bug_on_enabled %d\n", bug_on_enabled);
+ return bug_on_enabled;
+#endif //CONFIG_SCSC_LOG_COLLECTION
+}
+
+static void print_panic_code_legacy(u16 code)
+{
+ u16 tech = code & SCSC_PANIC_TECH_MASK;
+ u16 origin = code & SCSC_PANIC_ORIGIN_MASK;
+
+ SCSC_TAG_INFO(MXMAN, "Decoding panic code=0x%x:\n", code);
+ switch (origin) {
+ default:
+ SCSC_TAG_INFO(MXMAN, "Failed to identify panic origin\n");
+ break;
+ case SCSC_PANIC_ORIGIN_FW:
+ SCSC_TAG_INFO(MXMAN, "SCSC_PANIC_ORIGIN_FW\n");
+ break;
+ case SCSC_PANIC_ORIGIN_HOST:
+ SCSC_TAG_INFO(MXMAN, "SCSC_PANIC_ORIGIN_HOST\n");
+ break;
+ }
+
+ switch (tech) {
+ default:
+ SCSC_TAG_INFO(MXMAN, "Failed to identify panic technology\n");
+ break;
+ case SCSC_PANIC_TECH_WLAN:
+ SCSC_TAG_INFO(MXMAN, "SCSC_PANIC_TECH_WLAN\n");
+ break;
+ case SCSC_PANIC_TECH_CORE:
+ SCSC_TAG_INFO(MXMAN, "SCSC_PANIC_TECH_CORE\n");
+ break;
+ case SCSC_PANIC_TECH_BT:
+ SCSC_TAG_INFO(MXMAN, "SCSC_PANIC_TECH_BT\n");
+ break;
+ case SCSC_PANIC_TECH_UNSP:
+ SCSC_TAG_INFO(MXMAN, "PANIC_TECH_UNSP\n");
+ break;
+ }
+ SCSC_TAG_INFO(MXMAN, "panic subcode=0x%x\n", code & SCSC_PANIC_SUBCODE_MASK_LEGACY);
+}
+
+static void print_panic_code(u16 code)
+{
+ u16 origin = code & SCSC_PANIC_ORIGIN_MASK; /* Panic origin (host/fw) */
+ u16 subcode = code & SCSC_PANIC_SUBCODE_MASK; /* The panic code */
+
+ SCSC_TAG_INFO(MXMAN, "Decoding panic code=0x%x:\n", code);
+ SCSC_TAG_INFO(MXMAN, "panic subcode=0x%x\n", code & SCSC_PANIC_SUBCODE_MASK);
+
+ switch (origin) {
+ default:
+ SCSC_TAG_INFO(MXMAN, "Failed to identify panic origin\n");
+ break;
+ case SCSC_PANIC_ORIGIN_FW:
+ SCSC_TAG_INFO(MXMAN, "WLBT FW PANIC: 0x%02x\n", subcode);
+ break;
+ case SCSC_PANIC_ORIGIN_HOST:
+ SCSC_TAG_INFO(MXMAN, "WLBT HOST detected FW failure, service:\n");
+ switch (subcode >> SCSC_SYSERR_HOST_SERVICE_SHIFT) {
+ case SCSC_SERVICE_ID_WLAN:
+ SCSC_TAG_INFO(MXMAN, " WLAN\n");
+ break;
+ case SCSC_SERVICE_ID_BT:
+ SCSC_TAG_INFO(MXMAN, " BT\n");
+ break;
+ case SCSC_SERVICE_ID_ANT:
+ SCSC_TAG_INFO(MXMAN, " ANT\n");
+ break;
+ case SCSC_SERVICE_ID_CLK20MHZ:
+ SCSC_TAG_INFO(MXMAN, " CLK20MHZ\n");
+ break;
+ default:
+ SCSC_TAG_INFO(MXMAN, " Service 0x%x\n", subcode);
+ break;
+ }
+ break;
+ }
+}
+
+/**
+ * Print the last panic record collected to aid in post mortem.
+ *
+ * Helps when all we have is kernel log showing WLBT failed some time ago
+ *
+ * Only prints the R4 record
+ */
+void mxman_show_last_panic(struct mxman *mxman)
+{
+ u32 r4_panic_record_length = 0; /* in u32s */
+
+ /* Any valid panic? */
+ if (mxman->scsc_panic_code == 0)
+ return;
+
+ SCSC_TAG_INFO(MXMAN, "\n\n--- DETAILS OF LAST WLBT FAILURE ---\n\n");
+
+ switch (mxman->scsc_panic_code & SCSC_PANIC_ORIGIN_MASK) {
+ case SCSC_PANIC_ORIGIN_HOST:
+ SCSC_TAG_INFO(MXMAN, "Last panic was host induced:\n");
+ break;
+
+ case SCSC_PANIC_ORIGIN_FW:
+ SCSC_TAG_INFO(MXMAN, "Last panic was FW:\n");
+ fw_parse_r4_panic_record(mxman->last_panic_rec_r, &r4_panic_record_length);
+ break;
+
+ default:
+ SCSC_TAG_INFO(MXMAN, "Last panic unknown origin %d\n", mxman->scsc_panic_code & SCSC_PANIC_ORIGIN_MASK);
+ break;
+ }
+
+ print_panic_code(mxman->scsc_panic_code);
+
+ SCSC_TAG_INFO(MXMAN, "Reason: '%s'\n", mxman->failure_reason[0] ? mxman->failure_reason : "<null>");
+ SCSC_TAG_INFO(MXMAN, "Auto-recovery: %s\n", disable_recovery_handling ? "off" : "on");
+
+ if (mxman_recovery_disabled()) {
+ /* Labour the point that a reboot is needed when autorecovery is disabled */
+ SCSC_TAG_INFO(MXMAN, "\n\n*** HANDSET REBOOT NEEDED TO RESTART WLAN AND BT ***\n\n");
+ }
+
+ SCSC_TAG_INFO(MXMAN, "\n\n--- END DETAILS OF LAST WLBT FAILURE ---\n\n");
+}
+
+static void process_panic_record(struct mxman *mxman)
+{
+ u32 *r4_panic_record = NULL;
+ u32 *m4_panic_record = NULL;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ u32 *m4_1_panic_record = NULL;
+#endif
+ u32 r4_panic_record_length = 0; /* in u32s */
+ u32 m4_panic_record_length = 0; /* in u32s */
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ u32 m4_1_panic_record_length = 0; /* in u32s */
+#endif
+ bool r4_panic_record_ok = false;
+ bool m4_panic_record_ok = false;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ bool m4_1_panic_record_ok = false;
+#endif
+ bool r4_sympathetic_panic_flag = false;
+ bool m4_sympathetic_panic_flag = false;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ bool m4_1_sympathetic_panic_flag = false;
+#endif
+
+ /* some configurable delay before accessing the panic record */
+ msleep(panic_record_delay);
+ /*
+ * Check if the panic was trigered by MX and set the subcode if so.
+ */
+ if ((mxman->scsc_panic_code & SCSC_PANIC_ORIGIN_MASK) == SCSC_PANIC_ORIGIN_FW) {
+ if (mxman->fwhdr.r4_panic_record_offset) {
+ r4_panic_record = (u32 *)(mxman->fw + mxman->fwhdr.r4_panic_record_offset);
+ r4_panic_record_ok = fw_parse_r4_panic_record(r4_panic_record, &r4_panic_record_length);
+ } else {
+ SCSC_TAG_INFO(MXMAN, "R4 panic record doesn't exist in the firmware header\n");
+ }
+ if (mxman->fwhdr.m4_panic_record_offset) {
+ m4_panic_record = (u32 *)(mxman->fw + mxman->fwhdr.m4_panic_record_offset);
+ m4_panic_record_ok = fw_parse_m4_panic_record(m4_panic_record, &m4_panic_record_length);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ } else if (mxman->fwhdr.m4_1_panic_record_offset) {
+ m4_1_panic_record = (u32 *)(mxman->fw + mxman->fwhdr.m4_1_panic_record_offset);
+ m4_1_panic_record_ok = fw_parse_m4_panic_record(m4_panic_record, &m4_1_panic_record_length);
+#endif
+ } else {
+ SCSC_TAG_INFO(MXMAN, "M4 panic record doesn't exist in the firmware header\n");
+ }
+
+ /* Extract and print the panic code */
+ switch (r4_panic_record_length) {
+ default:
+ SCSC_TAG_WARNING(MXMAN, "Bad panic record length/subversion\n");
+ break;
+ case SCSC_R4_V2_MINOR_52:
+ if (r4_panic_record_ok)
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK_LEGACY & r4_panic_record[2];
+ else if (m4_panic_record_ok)
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK_LEGACY & m4_panic_record[2];
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ else if (m4_1_panic_record_ok)
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK_LEGACY & m4_1_panic_record[2];
+#endif
+ /* Set unspecified technology for now */
+ mxman->scsc_panic_code |= SCSC_PANIC_TECH_UNSP;
+ print_panic_code_legacy(mxman->scsc_panic_code);
+ break;
+ case SCSC_R4_V2_MINOR_53:
+ if (r4_panic_record_ok) {
+ /* Save the last R4 panic record for future display */
+ BUG_ON(sizeof(mxman->last_panic_rec_r) < SCSC_R4_V2_MINOR_53 * sizeof(u32));
+ memcpy((u8 *)mxman->last_panic_rec_r, (u8 *)r4_panic_record, SCSC_R4_V2_MINOR_53 * sizeof(u32));
+ mxman->last_panic_rec_sz = r4_panic_record_length;
+
+ r4_sympathetic_panic_flag = fw_parse_get_r4_sympathetic_panic_flag(r4_panic_record);
+ SCSC_TAG_INFO(MXMAN, "r4_panic_record_ok=%d r4_sympathetic_panic_flag=%d\n",
+ r4_panic_record_ok,
+ r4_sympathetic_panic_flag
+ );
+ if (r4_sympathetic_panic_flag == false) {
+ /* process R4 record */
+ SCSC_TAG_INFO(MXMAN, "process R4 record\n");
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK & r4_panic_record[3];
+ print_panic_code(mxman->scsc_panic_code);
+ break;
+ }
+ }
+ if (m4_panic_record_ok) {
+ m4_sympathetic_panic_flag = fw_parse_get_m4_sympathetic_panic_flag(m4_panic_record);
+ SCSC_TAG_INFO(MXMAN, "m4_panic_record_ok=%d m4_sympathetic_panic_flag=%d\n",
+ m4_panic_record_ok,
+ m4_sympathetic_panic_flag
+ );
+ if (m4_sympathetic_panic_flag == false) {
+ /* process M4 record */
+ SCSC_TAG_INFO(MXMAN, "process M4 record\n");
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK & m4_panic_record[3];
+ } else if (r4_panic_record_ok) {
+ /* process R4 record */
+ SCSC_TAG_INFO(MXMAN, "process R4 record\n");
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK & r4_panic_record[3];
+ }
+ print_panic_code(mxman->scsc_panic_code);
+ }
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT /* this is wrong but not sure what is "right" */
+/* "sympathetic panics" are not really a thing on the Neus architecture unless */
+/* generated by the host */
+ if (m4_1_panic_record_ok) {
+ m4_1_sympathetic_panic_flag = fw_parse_get_m4_sympathetic_panic_flag(m4_panic_record);
+ SCSC_TAG_INFO(MXMAN, "m4_1_panic_record_ok=%d m4_1_sympathetic_panic_flag=%d\n",
+ m4_1_panic_record_ok,
+ m4_1_sympathetic_panic_flag
+ );
+ if (m4_1_sympathetic_panic_flag == false) {
+ /* process M4 record */
+ SCSC_TAG_INFO(MXMAN, "process M4_1 record\n");
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK & m4_1_panic_record[3];
+ } else if (r4_panic_record_ok) {
+ /* process R4 record */
+ SCSC_TAG_INFO(MXMAN, "process R4 record\n");
+ mxman->scsc_panic_code |= SCSC_PANIC_SUBCODE_MASK & r4_panic_record[3];
+ }
+ print_panic_code(mxman->scsc_panic_code);
+ }
+#endif
+ break;
+ }
+ }
+}
+
+#define MAX_UHELP_TMO_MS 20000
+/*
+ * workqueue thread
+ */
+static void mxman_failure_work(struct work_struct *work)
+{
+ struct mxman *mxman = container_of(work, struct mxman, failure_work);
+ struct srvman *srvman;
+ struct scsc_mx *mx = mxman->mx;
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
+ int used = 0, r = 0;
+
+#ifdef CONFIG_ANDROID
+ wake_lock(&mxman->recovery_wake_lock);
+#endif
+
+ slsi_kic_system_event(slsi_kic_system_event_category_error,
+ slsi_kic_system_events_subsystem_crashed, GFP_KERNEL);
+
+ blocking_notifier_call_chain(&firmware_chain, SCSC_FW_EVENT_FAILURE, NULL);
+
+ SCSC_TAG_INFO(MXMAN, "Complete mm_msg_start_ind_completion\n");
+ complete(&mxman->mm_msg_start_ind_completion);
+ mutex_lock(&mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(mxman->mx);
+
+ if (mxman->mxman_state != MXMAN_STATE_STARTED && mxman->mxman_state != MXMAN_STATE_STARTING) {
+ SCSC_TAG_WARNING(MXMAN, "Not in started state: mxman->mxman_state=%d\n", mxman->mxman_state);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&mxman->recovery_wake_lock);
+#endif
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+
+ /**
+ * Set error on mxlog and unregister mxlog msg-handlers.
+ * mxlog ISR and kthread will ignore further messages
+ * but mxlog_thread is NOT stopped here.
+ */
+ mxlog_transport_set_error(scsc_mx_get_mxlog_transport(mx));
+ mxlog_release(scsc_mx_get_mxlog(mx));
+ /* unregister channel handler */
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
+ NULL, NULL);
+ mxmgmt_transport_set_error(scsc_mx_get_mxmgmt_transport(mx));
+ srvman_set_error(srvman);
+ fw_crc_wq_stop(mxman);
+
+ mxman->mxman_state = mxman->mxman_next_state;
+
+ if (mxman->mxman_state != MXMAN_STATE_FAILED
+ && mxman->mxman_state != MXMAN_STATE_FREEZED) {
+ WARN_ON(mxman->mxman_state != MXMAN_STATE_FAILED
+ && mxman->mxman_state != MXMAN_STATE_FREEZED);
+ SCSC_TAG_ERR(MXMAN, "Bad state=%d\n", mxman->mxman_state);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&mxman->recovery_wake_lock);
+#endif
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+ /* Signal panic to r4 and m4 processors */
+ SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_R4\n");
+ mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_R4, SCSC_MIF_ABS_TARGET_R4); /* SCSC_MIFINTR_TARGET_R4 */
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_M4\n");
+ mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_M4, SCSC_MIF_ABS_TARGET_M4); /* SCSC_MIFINTR_TARGET_M4 */
+ SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_M4_1\n");
+ mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_M4_1, SCSC_MIF_ABS_TARGET_M4_1); /* SCSC_MIFINTR_TARGET_M4 */
+#else
+ SCSC_TAG_INFO(MXMAN, "Setting MIFINTRBIT_RESERVED_PANIC_M4\n");
+ mif->irq_bit_set(mif, MIFINTRBIT_RESERVED_PANIC_M4, SCSC_MIF_ABS_TARGET_M4); /* SCSC_MIFINTR_TARGET_M4 */
+#endif
+
+ srvman_freeze_services(srvman);
+ if (mxman->mxman_state == MXMAN_STATE_FAILED) {
+ mxman->last_panic_time = local_clock();
+ process_panic_record(mxman);
+ SCSC_TAG_INFO(MXMAN, "Trying to schedule coredump\n");
+ SCSC_TAG_INFO(MXMAN, "scsc_release %d.%d.%d.%d\n",
+ SCSC_RELEASE_PRODUCT,
+ SCSC_RELEASE_ITERATION,
+ SCSC_RELEASE_CANDIDATE,
+ SCSC_RELEASE_POINT);
+ SCSC_TAG_INFO(MXMAN, "Auto-recovery: %s\n", mxman_recovery_disabled() ? "off" : "on");
+#ifdef CONFIG_SCSC_WLBTD
+ scsc_wlbtd_get_and_print_build_type();
+#endif
+
+ /* schedule coredump and wait for it to finish */
+ if (disable_auto_coredump) {
+ SCSC_TAG_INFO(MXMAN, "Driver automatic coredump disabled, not launching coredump helper\n");
+ } else {
+ /**
+ * Releasing mxman_mutex here gives way to any
+ * eventually running resume process while waiting for
+ * the usermode helper subsystem to be resurrected,
+ * since this last will be re-enabled right at the end
+ * of the resume process itself.
+ */
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN,
+ "waiting up to %dms for usermode_helper subsystem.\n",
+ MAX_UHELP_TMO_MS);
+ /* Waits for the usermode_helper subsytem to be re-enabled. */
+ if (usermodehelper_read_lock_wait(msecs_to_jiffies(MAX_UHELP_TMO_MS))) {
+ /**
+ * Release immediately the rwsem on usermode_helper
+ * enabled since we anyway already hold a wakelock here
+ */
+ usermodehelper_read_unlock();
+ /**
+ * We claim back the mxman_mutex immediately to avoid anyone
+ * shutting down the chip while we are dumping the coredump.
+ */
+ mutex_lock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Invoking coredump helper\n");
+ slsi_kic_system_event(slsi_kic_system_event_category_recovery,
+ slsi_kic_system_events_coredump_in_progress,
+ GFP_KERNEL);
+#ifdef CONFIG_SCSC_WLBTD
+ /* we can safely call call_wlbtd as we are
+ * in workqueue context
+ */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Collect mxlogger logs */
+ scsc_log_collector_schedule_collection(SCSC_LOG_FW_PANIC, mxman->scsc_panic_code);
+#else
+ r = call_wlbtd(SCSC_SCRIPT_MOREDUMP);
+#endif
+#else
+ r = coredump_helper();
+#endif
+ if (r >= 0) {
+ slsi_kic_system_event(slsi_kic_system_event_category_recovery,
+ slsi_kic_system_events_coredump_done, GFP_KERNEL);
+ }
+
+ used = snprintf(panic_record_dump,
+ PANIC_RECORD_DUMP_BUFFER_SZ,
+ "RF HW Ver: 0x%X\n", mxman->rf_hw_ver);
+ used += snprintf(panic_record_dump + used,
+ PANIC_RECORD_DUMP_BUFFER_SZ - used,
+ "SCSC Panic Code:: 0x%X\n", mxman->scsc_panic_code);
+ used += snprintf(panic_record_dump + used,
+ PANIC_RECORD_DUMP_BUFFER_SZ - used,
+ "SCSC Last Panic Time:: %lld\n", mxman->last_panic_time);
+ panic_record_dump_buffer("r4", mxman->last_panic_rec_r,
+ mxman->last_panic_rec_sz,
+ panic_record_dump + used,
+ PANIC_RECORD_DUMP_BUFFER_SZ - used);
+
+ /* Print the host code/reason again so it's near the FW panic
+ * record in the kernel log
+ */
+ print_panic_code(mxman->scsc_panic_code);
+ SCSC_TAG_INFO(MXMAN, "Reason: '%s'\n", mxman->failure_reason[0] ? mxman->failure_reason : "<null>");
+
+ blocking_notifier_call_chain(&firmware_chain,
+ SCSC_FW_EVENT_MOREDUMP_COMPLETE,
+ &panic_record_dump);
+ } else {
+ SCSC_TAG_INFO(MXMAN,
+ "timed out waiting for usermode_helper. Skipping coredump.\n");
+ mutex_lock(&mxman->mxman_mutex);
+ }
+ }
+
+ if (is_bug_on_enabled(mx)) {
+ SCSC_TAG_ERR(MX_FILE, "Deliberately panic the kernel due to WLBT firmware failure!\n");
+ SCSC_TAG_ERR(MX_FILE, "calling BUG_ON(1)\n");
+ BUG_ON(1);
+ }
+ /* Clean up the MIF following error handling */
+ if (mif->mif_cleanup && mxman_recovery_disabled())
+ mif->mif_cleanup(mif);
+ }
+ SCSC_TAG_INFO(MXMAN, "Auto-recovery: %s\n",
+ mxman_recovery_disabled() ? "off" : "on");
+
+ if (!mxman_recovery_disabled())
+ srvman_clear_error(srvman);
+ mutex_unlock(&mxman->mxman_mutex);
+ if (!mxman_recovery_disabled()) {
+ SCSC_TAG_INFO(MXMAN, "Calling srvman_unfreeze_services\n");
+ srvman_unfreeze_services(srvman, mxman->scsc_panic_code);
+ if (scsc_mx_module_reset() < 0)
+ SCSC_TAG_INFO(MXMAN, "failed to call scsc_mx_module_reset\n");
+ atomic_inc(&mxman->recovery_count);
+ }
+
+ /**
+ * If recovery is disabled and an scsc_mx_service_open has been hold up,
+ * release it, rather than wait for the recovery_completion to timeout.
+ */
+ if (mxman_recovery_disabled())
+ complete(&mxman->recovery_completion);
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&mxman->recovery_wake_lock);
+#endif
+}
+
+static void failure_wq_init(struct mxman *mxman)
+{
+ mxman->failure_wq = create_singlethread_workqueue("failure_wq");
+ INIT_WORK(&mxman->failure_work, mxman_failure_work);
+}
+
+static void failure_wq_stop(struct mxman *mxman)
+{
+ cancel_work_sync(&mxman->failure_work);
+ flush_workqueue(mxman->failure_wq);
+}
+
+static void failure_wq_deinit(struct mxman *mxman)
+{
+ failure_wq_stop(mxman);
+ destroy_workqueue(mxman->failure_wq);
+}
+
+static void failure_wq_start(struct mxman *mxman)
+{
+ if (disable_error_handling)
+ SCSC_TAG_INFO(MXMAN, "error handling disabled\n");
+ else
+ queue_work(mxman->failure_wq, &mxman->failure_work);
+}
+
+static void print_mailboxes(struct mxman *mxman)
+{
+ struct scsc_mif_abs *mif;
+ struct mifmboxman *mboxman;
+ int i;
+
+ mif = scsc_mx_get_mif_abs(mxman->mx);
+ mboxman = scsc_mx_get_mboxman(mxman->mx);
+
+ SCSC_TAG_INFO(MXMAN, "Printing mailbox values:\n");
+ for (i = 0; i < MIFMBOX_NUM; i++)
+ SCSC_TAG_INFO(MXMAN, "MBOX_%d: 0x%x\n", i, *mifmboxman_get_mbox_ptr(mboxman, mif, i));
+}
+#ifdef CONFIG_SCSC_WLBTD
+static void wlbtd_work_func(struct work_struct *work)
+{
+ /* require sleep-able workqueue to run successfully */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Collect mxlogger logs */
+ /* Extend to scsc_log_collector_collect() if required */
+#else
+ call_wlbtd(SCSC_SCRIPT_LOGGER_DUMP);
+#endif
+}
+
+static void wlbtd_wq_init(struct mxman *mx)
+{
+ INIT_WORK(&wlbtd_work, wlbtd_work_func);
+}
+
+static void wlbtd_wq_deinit(struct mxman *mx)
+{
+ /* flush and block until work is complete */
+ flush_work(&wlbtd_work);
+}
+#endif
+/*
+ * Check for matching f/w and h/w
+ *
+ * Returns 0: f/w and h/w match
+ * 1: f/w and h/w mismatch, try the next config
+ * -ve fatal error
+ */
+static int mxman_hw_ver_check(struct mxman *mxman)
+{
+ if (mx140_file_supported_hw(mxman->mx, mxman->rf_hw_ver))
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * Select the f/w version to load next
+ */
+static int mxman_select_next_fw(struct mxman *mxman)
+{
+ return mx140_file_select_fw(mxman->mx, mxman->rf_hw_ver);
+}
+
+/* Boot MX140 with given f/w */
+static int __mxman_open(struct mxman *mxman)
+{
+ int r;
+ struct srvman *srvman;
+
+ mx140_basedir_file(mxman->mx);
+
+ mutex_lock(&mxman->mxman_mutex);
+ if (mxman->scsc_panic_code) {
+ SCSC_TAG_INFO(MXMAN, "Previously recorded crash panic code: scsc_panic_code=0x%x\n", mxman->scsc_panic_code);
+ SCSC_TAG_INFO(MXMAN, "Reason: '%s'\n", mxman->failure_reason[0] ? mxman->failure_reason : "<null>");
+ print_panic_code(mxman->scsc_panic_code);
+ }
+ SCSC_TAG_INFO(MXMAN, "Auto-recovery: %s\n", mxman_recovery_disabled() ? "off" : "on");
+ srvman = scsc_mx_get_srvman(mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return -EINVAL;
+ }
+
+ /* Reset the state after a previous crash during f/w boot */
+ if (mxman->mxman_state == MXMAN_STATE_STARTING)
+ mxman->mxman_state = MXMAN_STATE_STOPPED;
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ /* if in the STARTED state there MUST already be some users */
+ if (WARN_ON(!mxman->users)) {
+ SCSC_TAG_ERR(MXMAN, "ERROR mxman->mxman_state=%d users=%d\n", mxman->mxman_state, mxman->users);
+ mutex_unlock(&mxman->mxman_mutex);
+ return -EINVAL;
+ }
+ mxman->users++;
+ SCSC_TAG_INFO(MXMAN, "Already opened: users=%d\n", mxman->users);
+ mxman_print_versions(mxman);
+ mutex_unlock(&mxman->mxman_mutex);
+ return 0;
+ } else if (mxman->mxman_state == MXMAN_STATE_STOPPED) {
+ r = mxman_start(mxman);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "maxwell_manager_start() failed r=%d users=%d\n", r, mxman->users);
+ mutex_unlock(&mxman->mxman_mutex);
+ return r;
+ }
+ mxman->users++;
+ mxman->mxman_state = MXMAN_STATE_STARTED;
+ mutex_unlock(&mxman->mxman_mutex);
+ /* Start mxlogger */
+ if (!disable_logger) {
+ static char mxlbin[128];
+
+ r = mx140_exe_path(NULL, mxlbin, sizeof(mxlbin), "mx_logger.sh");
+ if (r) {
+ /* Not found */
+ SCSC_TAG_ERR(MXMAN, "mx_logger.sh path error\n");
+ } else {
+ /* Launch it */
+ _mx_exec(mxlbin, UMH_WAIT_EXEC);
+ }
+ }
+ return 0;
+ }
+ WARN_ON(mxman->mxman_state != MXMAN_STATE_STARTED && mxman->mxman_state != MXMAN_STATE_STOPPED);
+ SCSC_TAG_ERR(MXMAN, "Bad state: mxman->mxman_state=%d\n", mxman->mxman_state);
+ mutex_unlock(&mxman->mxman_mutex);
+ return -EIO;
+}
+
+int mxman_open(struct mxman *mxman)
+{
+ int r;
+ int try = 0;
+
+ struct scsc_mif_abs *mif = scsc_mx_get_mif_abs(mxman->mx);
+
+ for (try = 0; try < 2; try++) {
+ /* Boot WLBT. This will determine the h/w version */
+ r = __mxman_open(mxman);
+ if (r)
+ return r;
+
+ /* On retries, restore USBPLL owner as WLBT */
+ if (try > 0 && mif->mif_restart)
+ mif->mif_restart(mif);
+
+ /* Check the h/w and f/w versions are compatible */
+ r = mxman_hw_ver_check(mxman);
+ if (r > 0) {
+ /* Not compatible, so try next f/w */
+ SCSC_TAG_INFO(MXMAN, "Incompatible h/w 0x%04x vs f/w, close and try next\n", mxman->rf_hw_ver);
+
+ /* Temporarily return USBPLL owner to AP to keep USB alive */
+ if (mif->mif_cleanup)
+ mif->mif_cleanup(mif);
+
+ /* Stop WLBT */
+ mxman_close(mxman);
+
+ /* Select the new f/w for this hw ver */
+ mxman_select_next_fw(mxman);
+ } else
+ break; /* Running or given up */
+ }
+
+ /* If we have stored FM radio parameters, deliver them to FW now */
+ if (r == 0 && mxman->fm_params_pending) {
+ SCSC_TAG_INFO(MXMAN, "Send pending FM params\n");
+ mxman_fm_set_params(&mxman->fm_params);
+ }
+
+ return r;
+}
+
+static void mxman_stop(struct mxman *mxman)
+{
+ int r;
+ struct scsc_mif_abs *mif;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+
+ /* If reset is failed, prevent new resets */
+ if (reset_failed) {
+ struct timeval tval = ns_to_timeval(reset_failed_time);
+
+ SCSC_TAG_ERR(MXMAN, "previous reset failed at [%6lu.%06ld], ignoring\n", tval.tv_sec, tval.tv_usec);
+ return;
+ }
+
+ (void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
+
+ mxproc_remove_ctrl_proc_dir(&mxman->mxproc);
+
+ /* Shutdown the hardware */
+ mif = scsc_mx_get_mif_abs(mxman->mx);
+ r = mif->reset(mif, 1);
+ if (r) {
+ reset_failed_time = local_clock();
+ SCSC_TAG_INFO(MXMAN, "HW reset failed\n");
+ reset_failed = true;
+
+ /* Save log at point of failure */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_COMMON, SCSC_LOG_HOST_COMMON_REASON_STOP);
+#else
+ mx140_log_dump();
+#endif
+ }
+
+ panicmon_deinit(scsc_mx_get_panicmon(mxman->mx));
+ transports_release(mxman);
+ mxfwconfig_unload(mxman->mx);
+
+ mxlog_release(scsc_mx_get_mxlog(mxman->mx));
+ /* unregister channel handler */
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT,
+ NULL, NULL);
+ fw_crc_wq_stop(mxman);
+
+ /* Unitialise components (they may perform some checks - e.g. all memory freed) */
+ mxfwconfig_deinit(mxman->mx);
+ mifintrbit_deinit(scsc_mx_get_intrbit(mxman->mx));
+ miframman_deinit(scsc_mx_get_ramman(mxman->mx));
+ miframman_deinit(scsc_mx_get_ramman2(mxman->mx));
+ miframabox_deinit(scsc_mx_get_aboxram(mxman->mx));
+ mifmboxman_deinit(scsc_mx_get_mboxman(mxman->mx));
+#ifdef CONFIG_SCSC_SMAPPER
+ mifsmapper_deinit(scsc_mx_get_smapper(mxman->mx));
+#endif
+#ifdef CONFIG_SCSC_QOS
+ mifqos_deinit(scsc_mx_get_qos(mxman->mx));
+#endif
+ /* Release the MIF memory resources */
+ mif->unmap(mif, mxman->start_dram);
+}
+
+void mxman_close(struct mxman *mxman)
+{
+ int r;
+ struct srvman *srvman;
+
+ mutex_lock(&mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ if (WARN_ON(!mxman->users)) {
+ SCSC_TAG_ERR(MXMAN, "ERROR users=%d\n", mxman->users);
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+ mxman->users--;
+ if (mxman->users) {
+ SCSC_TAG_INFO(MXMAN, "Current number of users=%d\n", mxman->users);
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+#ifdef CONFIG_SCSC_MXLOGGER
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Unregister minimoredump client */
+ scsc_log_collector_unregister_client(&mini_moredump_client);
+#endif
+ /**
+ * Deinit mxlogger on last service stop...BUT before asking for HALT
+ */
+ mxlogger_deinit(mxman->mx, scsc_mx_get_mxlogger(mxman->mx));
+#endif
+ /*
+ * Ask the subsystem to stop (MM_STOP_REQ), and wait
+ * for response (MM_STOP_RSP).
+ */
+ r = send_mm_msg_stop_blocking(mxman);
+ if (r)
+ SCSC_TAG_ERR(MXMAN, "send_mm_msg_stop_blocking failed: r=%d\n", r);
+
+ mxman_stop(mxman);
+ mxman->mxman_state = MXMAN_STATE_STOPPED;
+ mutex_unlock(&mxman->mxman_mutex);
+ } else if (mxman->mxman_state == MXMAN_STATE_FAILED) {
+ if (WARN_ON(!mxman->users))
+ SCSC_TAG_ERR(MXMAN, "ERROR users=%d\n", mxman->users);
+
+ mxman->users--;
+ if (mxman->users) {
+ SCSC_TAG_INFO(MXMAN, "Current number of users=%d\n", mxman->users);
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+#ifdef CONFIG_SCSC_MXLOGGER
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Unregister minimoredump client */
+ scsc_log_collector_unregister_client(&mini_moredump_client);
+#endif
+ /**
+ * Deinit mxlogger on last service stop...BUT before asking for HALT
+ */
+ mxlogger_deinit(mxman->mx, scsc_mx_get_mxlogger(mxman->mx));
+#endif
+
+ mxman_stop(mxman);
+ mxman->mxman_state = MXMAN_STATE_STOPPED;
+ mutex_unlock(&mxman->mxman_mutex);
+ complete(&mxman->recovery_completion);
+ } else {
+ WARN_ON(mxman->mxman_state != MXMAN_STATE_STARTED);
+ SCSC_TAG_ERR(MXMAN, "Bad state: mxman->mxman_state=%d\n", mxman->mxman_state);
+ mutex_unlock(&mxman->mxman_mutex);
+ return;
+ }
+}
+
+void mxman_syserr(struct mxman *mxman, struct mx_syserr *syserr)
+{
+}
+
+void mxman_fail(struct mxman *mxman, u16 scsc_panic_code, const char *reason)
+{
+ SCSC_TAG_WARNING(MXMAN, "WLBT FW failure\n");
+
+ /* The STARTING state allows a crash during firmware boot to be handled */
+ if (mxman->mxman_state == MXMAN_STATE_STARTED || mxman->mxman_state == MXMAN_STATE_STARTING) {
+ mxman->mxman_next_state = MXMAN_STATE_FAILED;
+ mxman->scsc_panic_code = scsc_panic_code;
+ strlcpy(mxman->failure_reason, reason, sizeof(mxman->failure_reason));
+ /* If recovery is disabled, don't let it be
+ * re-enabled from now on. Device must reboot
+ */
+ if (mxman_recovery_disabled())
+ disable_recovery_until_reboot = true;
+
+ failure_wq_start(mxman);
+ } else {
+ SCSC_TAG_WARNING(MXMAN, "Not in MXMAN_STATE_STARTED state, ignore (state %d)\n", mxman->mxman_state);
+ }
+}
+
+void mxman_freeze(struct mxman *mxman)
+{
+ SCSC_TAG_WARNING(MXMAN, "WLBT FW frozen\n");
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ mxman->mxman_next_state = MXMAN_STATE_FREEZED;
+ failure_wq_start(mxman);
+ } else {
+ SCSC_TAG_WARNING(MXMAN, "Not in MXMAN_STATE_STARTED state, ignore (state %d)\n", mxman->mxman_state);
+ }
+}
+
+void mxman_init(struct mxman *mxman, struct scsc_mx *mx)
+{
+ mxman->mx = mx;
+ mxman->suspended = 0;
+#ifdef CONFIG_SCSC_FM
+ mxman->on_halt_ldos_on = 0;
+ mxman->fm_params_pending = 0;
+#endif
+ fw_crc_wq_init(mxman);
+ failure_wq_init(mxman);
+#ifdef CONFIG_SCSC_WLBTD
+ wlbtd_wq_init(mxman);
+#endif
+ mutex_init(&mxman->mxman_mutex);
+ init_completion(&mxman->recovery_completion);
+#ifdef CONFIG_ANDROID
+ wake_lock_init(&mxman->recovery_wake_lock, WAKE_LOCK_SUSPEND, "mxman_recovery");
+#endif
+
+ /* set the initial state */
+ mxman->mxman_state = MXMAN_STATE_STOPPED;
+ (void)snprintf(mxman->fw_build_id, sizeof(mxman->fw_build_id), "unknown");
+ memcpy(saved_fw_build_id, mxman->fw_build_id,
+ sizeof(saved_fw_build_id));
+ (void)snprintf(mxman->fw_ttid, sizeof(mxman->fw_ttid), "unknown");
+ mxproc_create_info_proc_dir(&mxman->mxproc, mxman);
+ active_mxman = mxman;
+
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ mxman_create_sysfs_memdump();
+#endif
+ scsc_lerna_init();
+}
+
+void mxman_deinit(struct mxman *mxman)
+{
+ scsc_lerna_deinit();
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ mxman_destroy_sysfs_memdump();
+#endif
+ active_mxman = NULL;
+ mxproc_remove_info_proc_dir(&mxman->mxproc);
+ fw_crc_wq_deinit(mxman);
+ failure_wq_deinit(mxman);
+#ifdef CONFIG_SCSC_WLBTD
+ wlbtd_wq_deinit(mxman);
+#endif
+#ifdef CONFIG_ANDROID
+ wake_lock_destroy(&mxman->recovery_wake_lock);
+#endif
+ mutex_destroy(&mxman->mxman_mutex);
+}
+
+int mxman_force_panic(struct mxman *mxman)
+{
+ struct srvman *srvman;
+ struct ma_msg_packet message = { .ma_msg = MM_FORCE_PANIC };
+
+ mutex_lock(&mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return -EINVAL;
+ }
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
+ mutex_unlock(&mxman->mxman_mutex);
+ return 0;
+ }
+ mutex_unlock(&mxman->mxman_mutex);
+ return -EINVAL;
+}
+
+int mxman_suspend(struct mxman *mxman)
+{
+ struct srvman *srvman;
+ struct ma_msg_packet message = { .ma_msg = MM_HOST_SUSPEND };
+ int ret;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+
+ mutex_lock(&mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return 0;
+ }
+
+ /* Call Service suspend callbacks */
+ ret = srvman_suspend_services(srvman);
+ if (ret) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Service Suspend canceled - ignore %d\n", ret);
+ return ret;
+ }
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ SCSC_TAG_INFO(MXMAN, "MM_HOST_SUSPEND\n");
+#ifdef CONFIG_SCSC_MXLOGGER
+ mxlogger_generate_sync_record(scsc_mx_get_mxlogger(mxman->mx), MXLOGGER_SYN_SUSPEND);
+#endif
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
+ mxman->suspended = 1;
+ atomic_inc(&mxman->suspend_count);
+ }
+ mutex_unlock(&mxman->mxman_mutex);
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_FM
+void mxman_fm_on_halt_ldos_on(void)
+{
+ /* Should always be an active mxman unless module is unloaded */
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "No active MXMAN\n");
+ return;
+ }
+
+ active_mxman->on_halt_ldos_on = 1;
+
+ /* FM status to pass into FW at next FW init,
+ * by which time driver context is lost.
+ * This is required, because now WLBT gates
+ * LDOs with TCXO instead of leaving them
+ * always on, to save power in deep sleep.
+ * FM, however, needs them always on. So
+ * we need to know when to leave the LDOs
+ * alone at WLBT boot.
+ */
+ is_fm_on = 1;
+}
+
+void mxman_fm_on_halt_ldos_off(void)
+{
+ /* Should always be an active mxman unless module is unloaded */
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "No active MXMAN\n");
+ return;
+ }
+
+ /* Newer FW no longer need set shared LDOs
+ * always-off at WLBT halt, as TCXO gating
+ * has the same effect. But pass the "off"
+ * request for backwards compatibility
+ * with old FW.
+ */
+ active_mxman->on_halt_ldos_on = 0;
+ is_fm_on = 0;
+}
+
+/* Update parameters passed to WLBT FM */
+int mxman_fm_set_params(struct wlbt_fm_params *params)
+{
+ /* Should always be an active mxman unless module is unloaded */
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "No active MXMAN\n");
+ return -EINVAL;
+ }
+
+ /* Params are no longer valid (FM stopped) */
+ if (!params) {
+ active_mxman->fm_params_pending = 0;
+ SCSC_TAG_INFO(MXMAN, "FM params cleared\n");
+ return 0;
+ }
+
+ /* Once set the value needs to be remembered for each time WLBT starts */
+ active_mxman->fm_params = *params;
+ active_mxman->fm_params_pending = 1;
+
+ if (send_fm_params_to_active_mxman(params)) {
+ SCSC_TAG_INFO(MXMAN, "FM params sent to FW\n");
+ return 0;
+ }
+
+ /* Stored for next time FW is up */
+ SCSC_TAG_INFO(MXMAN, "FM params stored\n");
+
+ return -EAGAIN;
+}
+#endif
+
+void mxman_resume(struct mxman *mxman)
+{
+ struct srvman *srvman;
+ struct ma_msg_packet message = { .ma_msg = MM_HOST_RESUME };
+ int ret;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+
+ mutex_lock(&mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Called during error - ignore\n");
+ return;
+ }
+
+ if (mxman->mxman_state == MXMAN_STATE_STARTED) {
+ SCSC_TAG_INFO(MXMAN, "MM_HOST_RESUME\n");
+#ifdef CONFIG_SCSC_MXLOGGER
+ mxlogger_generate_sync_record(scsc_mx_get_mxlogger(mxman->mx), MXLOGGER_SYN_RESUME);
+#endif
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(mxman->mx), MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, &message, sizeof(message));
+ mxman->suspended = 0;
+ }
+
+ /* Call Service Resume callbacks */
+ ret = srvman_resume_services(srvman);
+ if (ret)
+ SCSC_TAG_INFO(MXMAN, "Service Resume error %d\n", ret);
+
+ mutex_unlock(&mxman->mxman_mutex);
+}
+
+static void _mx_exec_cleanup(struct subprocess_info *sp_info)
+{
+ if (!sp_info) {
+ SCSC_TAG_ERR(MXMAN, "sp_info is null\n");
+ return;
+ }
+ if (!sp_info->argv) {
+ SCSC_TAG_ERR(MXMAN, "argv is null\n");
+ return;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "0x%p\n", sp_info->argv);
+ argv_free(sp_info->argv);
+}
+
+/* prog - full path to programme
+ * wait_exec - one of UMH_WAIT_EXEC, UMH_WAIT_PROC, UMH_KILLABLE, UMH_NO_WAIT
+ */
+static int _mx_exec(char *prog, int wait_exec)
+{
+ /**
+ * ENV vars ANDROID_ROOT and ANDROID_DATA are needed to have
+ * the UMH spawned process working properly (as an example finding
+ * Timezones files)
+ */
+ static char const *envp[] = { "HOME=/", "PATH=/sbin:/system/sbin:/system/bin:/system/xbin:/vendor/bin:/vendor/xbin",
+ "ANDROID_ROOT=/system", "ANDROID_DATA=/data", NULL };
+ char **argv;
+ char argv_str[STRING_BUFFER_MAX_LENGTH];
+ int argc, result, len;
+ struct subprocess_info *sp_info;
+
+ len = snprintf(argv_str, STRING_BUFFER_MAX_LENGTH, "%s", prog);
+ if (len >= STRING_BUFFER_MAX_LENGTH) {
+ /* snprintf() returns a value of buffer size of greater if it had to truncate the format string. */
+ SCSC_TAG_ERR(MXMAN,
+ "exec string buffer insufficient (buffer size=%d, actual string=%d)\n",
+ STRING_BUFFER_MAX_LENGTH, len);
+ return -E2BIG;
+ }
+
+ /* Kernel library function argv_split() will allocate memory for argv. */
+ argc = 0;
+ argv = argv_split(GFP_KERNEL, argv_str, &argc);
+ if (!argv) {
+ SCSC_TAG_ERR(MXMAN, "failed to allocate argv for userspace helper\n");
+ return -ENOMEM;
+ }
+
+ /* Check the argument count just to avoid future abuse */
+ if (argc > NUMBER_OF_STRING_ARGS) {
+ SCSC_TAG_ERR(MXMAN,
+ "exec string has the wrong number of arguments (has %d, should be %d)\n",
+ argc, NUMBER_OF_STRING_ARGS);
+ argv_free(argv);
+ return -E2BIG;
+ }
+
+ /* Allocate sp_info and initialise pointers to argv and envp. */
+ sp_info = call_usermodehelper_setup(argv[0], argv, (char **)envp,
+ GFP_KERNEL, NULL, _mx_exec_cleanup,
+ NULL);
+
+ if (!sp_info) {
+ SCSC_TAG_ERR(MXMAN, "call_usermodehelper_setup() failed\n");
+ argv_free(argv);
+ return -EIO;
+ }
+
+ /* Put sp_info into work queue for processing by khelper. */
+ SCSC_TAG_INFO(MXMAN, "Launch %s\n", prog);
+
+ result = call_usermodehelper_exec(sp_info, wait_exec);
+
+ if (result != 0) {
+ /*
+ * call_usermodehelper_exec() will free sp_info and call any cleanup function
+ * whether it succeeds or fails, so do not free argv.
+ */
+ if (result == -ENOENT)
+ SCSC_TAG_ERR(MXMAN, "call_usermodehelper() failed with %d, Executable not found %s'\n",
+ result, prog);
+ else
+ SCSC_TAG_ERR(MXMAN, "call_usermodehelper_exec() failed with %d\n", result);
+ }
+ return result;
+}
+
+#if defined(CONFIG_SCSC_PRINTK) && !defined(CONFIG_SCSC_WLBTD)
+static int __stat(const char *file)
+{
+ struct kstat stat;
+ mm_segment_t fs;
+ int r;
+
+ fs = get_fs();
+ set_fs(get_ds());
+ r = vfs_stat(file, &stat);
+ set_fs(fs);
+
+ return r;
+}
+#endif
+
+int mx140_log_dump(void)
+{
+#ifdef CONFIG_SCSC_PRINTK
+ int r;
+# ifdef CONFIG_SCSC_WLBTD
+ r = schedule_work(&wlbtd_work);
+# else
+ char mxlbin[128];
+
+ r = mx140_exe_path(NULL, mxlbin, sizeof(mxlbin), "mx_logger_dump.sh");
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "mx_logger_dump.sh path error\n");
+ } else {
+ /*
+ * Test presence of script before invoking, to suppress
+ * unnecessary error message if not installed.
+ */
+ r = __stat(mxlbin);
+ if (r) {
+ SCSC_TAG_DEBUG(MXMAN, "%s not installed\n", mxlbin);
+ return r;
+ }
+ SCSC_TAG_INFO(MXMAN, "Invoking mx_logger_dump.sh UHM\n");
+ r = _mx_exec(mxlbin, UMH_WAIT_EXEC);
+ if (r)
+ SCSC_TAG_ERR(MXMAN, "mx_logger_dump.sh err:%d\n", r);
+ }
+# endif /* CONFIG_SCSC_WLBTD */
+ return r;
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(mx140_log_dump);
+
+bool mxman_recovery_disabled(void)
+{
+#ifdef CONFIG_SCSC_WLBT_AUTORECOVERY_PERMANENT_DISABLE
+ /* Add option to kill autorecovery, ignoring module parameter
+ * to work around platform that enables it against our wishes
+ */
+ SCSC_TAG_ERR(MXMAN, "CONFIG_SCSC_WLBT_AUTORECOVERY_PERMANENT_DISABLE is set\n");
+ return true;
+#endif
+ /* If FW has panicked when recovery was disabled, don't allow it to
+ * be enabled. The horse has bolted.
+ */
+ if (disable_recovery_until_reboot)
+ return true;
+
+ if (disable_recovery_handling == MEMDUMP_FILE_FOR_RECOVERY)
+ return disable_recovery_from_memdump_file;
+ else
+ return disable_recovery_handling ? true : false;
+}
+EXPORT_SYMBOL(mxman_recovery_disabled);
+
+/**
+ * This returns the last known loaded FW build_id
+ * even when the fw is NOT running at the time of the request.
+ *
+ * It could be used anytime by Android Enhanced Logging
+ * to query for fw version.
+ */
+void mxman_get_fw_version(char *version, size_t ver_sz)
+{
+ /* unavailable only if chip not probed ! */
+ snprintf(version, ver_sz, "%s", saved_fw_build_id);
+}
+EXPORT_SYMBOL(mxman_get_fw_version);
+
+void mxman_get_driver_version(char *version, size_t ver_sz)
+{
+ /* IMPORTANT - Do not change the formatting as User space tooling is parsing the string
+ * to read SAP fapi versions. */
+ snprintf(version, ver_sz, "drv_ver: %u.%u.%u.%u",
+ SCSC_RELEASE_PRODUCT, SCSC_RELEASE_ITERATION, SCSC_RELEASE_CANDIDATE, SCSC_RELEASE_POINT);
+#ifdef CONFIG_SCSC_WLBTD
+ scsc_wlbtd_get_and_print_build_type();
+#endif
+}
+EXPORT_SYMBOL(mxman_get_driver_version);
+
+int mxman_register_firmware_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&firmware_chain, nb);
+}
+EXPORT_SYMBOL(mxman_register_firmware_notifier);
+
+int mxman_unregister_firmware_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&firmware_chain, nb);
+}
+EXPORT_SYMBOL(mxman_unregister_firmware_notifier);
+
+
+int mxman_lerna_send(struct mxman *mxman, void *message, u32 message_size)
+{
+ struct srvman *srvman = NULL;
+
+ /* May be called when WLBT is off, so find the context in this case */
+ if (!mxman)
+ mxman = active_mxman;
+
+ if (!active_mxman) {
+ SCSC_TAG_ERR(MXMAN, "No active MXMAN\n");
+ return -EINVAL;
+ }
+
+ if (!message || (message_size == 0)) {
+ SCSC_TAG_INFO(MXMAN, "No lerna request provided.\n");
+ return 0;
+ }
+
+ mutex_lock(&active_mxman->mxman_mutex);
+ srvman = scsc_mx_get_srvman(active_mxman->mx);
+ if (srvman && srvman->error) {
+ mutex_unlock(&active_mxman->mxman_mutex);
+ SCSC_TAG_INFO(MXMAN, "Lerna configuration called during error - ignore\n");
+ return 0;
+ }
+
+ if (active_mxman->mxman_state == MXMAN_STATE_STARTED) {
+ SCSC_TAG_INFO(MXMAN, "MM_LERNA_CONFIG\n");
+ mxmgmt_transport_send(scsc_mx_get_mxmgmt_transport(active_mxman->mx),
+ MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT, message,
+ message_size);
+ mutex_unlock(&active_mxman->mxman_mutex);
+ return 0;
+ }
+
+ SCSC_TAG_INFO(MXMAN, "MXMAN is NOT STARTED...cannot send MM_LERNA_CONFIG msg.\n");
+ mutex_unlock(&active_mxman->mxman_mutex);
+ return -EAGAIN;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _MAXWELL_MANAGER_H
+#define _MAXWELL_MANAGER_H
+#include <linux/workqueue.h>
+#include "fwhdr.h"
+#include "mxmgmt_transport.h"
+#include "mxproc.h"
+#include <scsc/scsc_mx.h>
+#ifdef CONFIG_ANDROID
+#include <linux/wakelock.h>
+#endif
+
+struct mxman;
+
+void mxman_init(struct mxman *mxman, struct scsc_mx *mx);
+void mxman_deinit(struct mxman *mxman);
+int mxman_open(struct mxman *mxman);
+void mxman_close(struct mxman *mxman);
+void mxman_fail(struct mxman *mxman, u16 scsc_panic_code, const char *reason);
+void mxman_freeze(struct mxman *mxman);
+int mxman_force_panic(struct mxman *mxman);
+int mxman_suspend(struct mxman *mxman);
+void mxman_resume(struct mxman *mxman);
+void mxman_show_last_panic(struct mxman *mxman);
+
+#ifdef CONFIG_SCSC_FM
+void mxman_fm_on_halt_ldos_on(void);
+void mxman_fm_on_halt_ldos_off(void);
+int mxman_fm_set_params(struct wlbt_fm_params *params);
+#endif
+int mxman_lerna_send(struct mxman *mxman, void *data, u32 size);
+
+enum mxman_state {
+ MXMAN_STATE_STOPPED,
+ MXMAN_STATE_STARTING,
+ MXMAN_STATE_STARTED,
+ MXMAN_STATE_FAILED,
+ MXMAN_STATE_FREEZED,
+};
+
+#define SCSC_FAILURE_REASON_LEN 256
+
+struct mxman {
+ struct scsc_mx *mx;
+ int users;
+ void *start_dram;
+ struct workqueue_struct *fw_crc_wq;
+ struct delayed_work fw_crc_work;
+ struct workqueue_struct *failure_wq;
+ struct work_struct failure_work;
+ char *fw;
+ u32 fw_image_size;
+ struct completion mm_msg_start_ind_completion;
+ struct completion mm_msg_halt_rsp_completion;
+ struct fwhdr fwhdr;
+ struct mxconf *mxconf;
+ enum mxman_state mxman_state;
+ enum mxman_state mxman_next_state;
+ struct mutex mxman_mutex;
+ struct mxproc mxproc;
+ int suspended;
+ atomic_t suspend_count;
+ atomic_t recovery_count;
+ atomic_t boot_count;
+ bool check_crc;
+ char fw_build_id[FW_BUILD_ID_SZ]; /* Defined in SC-505846-SW */
+ struct completion recovery_completion;
+#ifdef CONFIG_ANDROID
+ struct wake_lock recovery_wake_lock;
+#endif
+ u32 rf_hw_ver;
+ u16 scsc_panic_code;
+ u64 last_panic_time;
+ u32 last_panic_rec_r[PANIC_RECORD_SIZE]; /* Must be at least SCSC_R4_V2_MINOR_53 */
+ u16 last_panic_rec_sz;
+#ifdef CONFIG_SCSC_FM
+ u32 on_halt_ldos_on;
+#endif
+ char failure_reason[SCSC_FAILURE_REASON_LEN]; /* previous failure reason */
+ struct wlbt_fm_params fm_params; /* FM freq info */
+ int fm_params_pending; /* FM freq info waiting to be delivered to FW */
+
+ char fw_ttid[FW_TTID_SZ]; /* Defined in SC-505846-SW */
+};
+
+void mxman_register_gdb_channel(struct scsc_mx *mx, mxmgmt_channel_handler handler, void *data);
+void mxman_send_gdb_channel(struct scsc_mx *mx, void *data, size_t length);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+#define SCSC_CHV_ARGV_ADDR_OFFSET 0x200008
+
+extern int chv_run;
+#endif
+
+#define SCSC_SYSERR_HOST_SERVICE_SHIFT 4
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * Maxwell management transport (implementation)
+ */
+
+/** Implements */
+#include "mxmgmt_transport.h"
+
+/** Uses */
+#include <scsc/scsc_logring.h>
+#include <linux/module.h>
+#include "mxmgmt_transport_format.h"
+#include "mifintrbit.h"
+
+/* Flag that an error has occurred so the I/O thread processing should stop */
+void mxmgmt_transport_set_error(struct mxmgmt_transport *mxmgmt_transport)
+{
+ SCSC_TAG_WARNING(MXMGT_TRANS, "I/O thread processing is suspended\n");
+
+ mxmgmt_transport->mxmgmt_thread.block_thread = 1;
+}
+
+/** MIF Interrupt handler for writes made to the AP */
+static void input_irq_handler(int irq, void *data)
+{
+ struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)data;
+ struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
+ struct scsc_mif_abs *mif_abs;
+
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "IN\n");
+ /* Clear the interrupt first to ensure we can't possibly miss one */
+ mif_abs = scsc_mx_get_mif_abs(mxmgmt_transport->mx);
+ mif_abs->irq_bit_clear(mif_abs, irq);
+
+ /* The the other side wrote some data to the input stream, wake up the thread
+ * that deals with this. */
+ if (th->task == NULL) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "th is NOT running\n");
+ return;
+ }
+ /*
+ * If an error has occured, we discard silently all messages from the stream
+ * until the error has been processed and the system has been reinitialised.
+ */
+ if (th->block_thread == 1) {
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "discard message.\n");
+ /*
+ * Do not try to acknowledge a pending interrupt here.
+ * This function is called by a function which in turn can be
+ * running in an atomic or 'disabled irq' level.
+ */
+ return;
+ }
+ th->wakeup_flag = 1;
+
+ /* wake up I/O thread */
+ wake_up_interruptible(&th->wakeup_q);
+}
+
+/** MIF Interrupt handler for acknowledging writes made by the AP */
+static void output_irq_handler(int irq, void *data)
+{
+ struct scsc_mif_abs *mif_abs;
+ struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)data;
+
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "OUT\n");
+
+ /* Clear the interrupt first to ensure we can't possibly miss one */
+ /* The FW read some data from the output stream.
+ * Currently we do not care, so just clear the interrupt. */
+ mif_abs = scsc_mx_get_mif_abs(mxmgmt_transport->mx);
+ mif_abs->irq_bit_clear(mif_abs, irq);
+
+ /* The driver doesn't use the ack IRQ, so mask it from now on,
+ * otherwise we may get spurious host-wakes.
+ */
+ mif_abs->irq_bit_mask(mif_abs, irq);
+}
+
+
+static void thread_wait_until_stopped(struct mxmgmt_transport *mxmgmt_transport)
+{
+ struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
+
+ /*
+ * kthread_stop() cannot handle the th exiting while
+ * kthread_should_stop() is false, so sleep until kthread_stop()
+ * wakes us up.
+ */
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "%s waiting for the stop signal.\n", th->name);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop()) {
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "%s schedule....\n", th->name);
+ schedule();
+ }
+
+ th->task = NULL;
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "%s exiting....\n", th->name);
+}
+
+/**
+ * A thread that forwards messages sent across the transport to
+ * the registered handlers for each channel.
+ */
+static int mxmgmt_thread_function(void *arg)
+{
+ struct mxmgmt_transport *mxmgmt_transport = (struct mxmgmt_transport *)arg;
+ struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
+ const struct mxmgr_message *current_message;
+ int ret;
+
+ complete(&th->completion);
+
+ th->block_thread = 0;
+ while (!kthread_should_stop()) {
+ /* wait until an error occurs, or we need to process something. */
+
+ ret = wait_event_interruptible(th->wakeup_q,
+ (th->wakeup_flag && !th->block_thread) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop()) {
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "signalled to exit\n");
+ break;
+ }
+ if (ret < 0) {
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "wait_event returned %d, thread will exit\n", ret);
+ thread_wait_until_stopped(mxmgmt_transport);
+ break;
+ }
+ th->wakeup_flag = 0;
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "wokeup: r=%d\n", ret);
+ /* Forward each pending message to the applicable channel handler */
+ current_message = mif_stream_peek(&mxmgmt_transport->mif_istream, NULL);
+ while (current_message != NULL) {
+ mutex_lock(&mxmgmt_transport->channel_handler_mutex);
+ if (current_message->channel_id < MMTRANS_NUM_CHANNELS &&
+ mxmgmt_transport->channel_handler_fns[current_message->channel_id]) {
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "Calling handler for channel_id: %d\n", current_message->channel_id);
+ (*mxmgmt_transport->channel_handler_fns[current_message->channel_id])(current_message->payload,
+ mxmgmt_transport->channel_handler_data[current_message->channel_id]);
+ } else
+ /* HERE: Invalid channel or no handler, raise fault or log message */
+ SCSC_TAG_WARNING(MXMGT_TRANS, "Invalid channel or no handler channel_id: %d\n", current_message->channel_id);
+ mutex_unlock(&mxmgmt_transport->channel_handler_mutex);
+ /* Remove the current message from the buffer before processing the next
+ * one in case it generated another message, otherwise it's possible we
+ * could run out of space in the stream before we get through all the messages. */
+ mif_stream_peek_complete(&mxmgmt_transport->mif_istream, current_message);
+ current_message = mif_stream_peek(&mxmgmt_transport->mif_istream, NULL);
+ }
+ }
+
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "exiting....\n");
+ complete(&th->completion);
+ return 0;
+}
+
+
+static int mxmgmt_thread_start(struct mxmgmt_transport *mxmgmt_transport)
+{
+ int err;
+ struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
+
+ if (th->task != NULL) {
+ SCSC_TAG_WARNING(MXMGT_TRANS, "%s thread already started\n", th->name);
+ return 0;
+ }
+
+ /* Initialise thread structure */
+ th->block_thread = 1;
+ init_waitqueue_head(&th->wakeup_q);
+ init_completion(&th->completion);
+ th->wakeup_flag = 0;
+ snprintf(th->name, MXMGMT_THREAD_NAME_MAX_LENGTH, "mxmgmt_thread");
+
+ /* Start the kernel thread */
+ th->task = kthread_run(mxmgmt_thread_function, mxmgmt_transport, "%s", th->name);
+ if (IS_ERR(th->task)) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "error creating kthread\n");
+ return (int)PTR_ERR(th->task);
+ }
+
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "Started thread %s\n", th->name);
+
+ /* wait until thread is started */
+#define MGMT_THREAD_START_TMO_SEC (3)
+ err = wait_for_completion_timeout(&th->completion, msecs_to_jiffies(MGMT_THREAD_START_TMO_SEC*1000));
+ if (err == 0) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "timeout in starting thread\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void mgmt_thread_stop(struct mxmgmt_transport *mxmgmt_transport)
+{
+ unsigned long left_jiffies;
+ struct mxmgmt_thread *th = &mxmgmt_transport->mxmgmt_thread;
+
+ if (!th->task) {
+ SCSC_TAG_WARNING(MXMGT_TRANS, "%s mgmt_thread is already stopped\n", th->name);
+ return;
+ }
+ SCSC_TAG_DEBUG(MXMGT_TRANS, "Stopping %s mgmt_thread\n", th->name);
+ kthread_stop(th->task);
+ /* wait until th stopped */
+#define MGMT_THREAD_STOP_TMO_SEC (3)
+ left_jiffies =
+ wait_for_completion_timeout(&th->completion, msecs_to_jiffies(MGMT_THREAD_STOP_TMO_SEC*1000));
+ if (!left_jiffies)
+ SCSC_TAG_ERR(MXMGT_TRANS, "Failed to stop mgmt_thread %s\n",
+ th->name);
+ else
+ th->task = NULL;
+}
+
+void mxmgmt_transport_release(struct mxmgmt_transport *mxmgmt_transport)
+{
+ mgmt_thread_stop(mxmgmt_transport);
+ mif_stream_release(&mxmgmt_transport->mif_istream);
+ mif_stream_release(&mxmgmt_transport->mif_ostream);
+}
+
+void mxmgmt_transport_config_serialise(struct mxmgmt_transport *mxmgmt_transport,
+ struct mxtransconf *trans_conf)
+{
+ mif_stream_config_serialise(&mxmgmt_transport->mif_istream, &trans_conf->to_ap_stream_conf);
+ mif_stream_config_serialise(&mxmgmt_transport->mif_ostream, &trans_conf->from_ap_stream_conf);
+}
+
+
+/** Public functions */
+int mxmgmt_transport_init(struct mxmgmt_transport *mxmgmt_transport, struct scsc_mx *mx)
+{
+#define MEM_LENGTH 512
+ int r;
+ uint32_t mem_length = MEM_LENGTH;
+ uint32_t packet_size = sizeof(struct mxmgr_message);
+ uint32_t num_packets;
+
+
+ /*
+ * Initialising a buffer of 1 byte is never legitimate, do not allow it.
+ * The memory buffer length must be a multiple of the packet size.
+ */
+ if (mem_length <= 1 || mem_length % packet_size != 0)
+ return -EIO;
+ memset(mxmgmt_transport, 0, sizeof(struct mxmgmt_transport));
+ num_packets = mem_length / packet_size;
+ mutex_init(&mxmgmt_transport->channel_handler_mutex);
+ mxmgmt_transport->mx = mx;
+ r = mif_stream_init(&mxmgmt_transport->mif_istream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_IN, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, input_irq_handler, mxmgmt_transport);
+ if (r) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "mif_stream_init IN failed %d\n", r);
+ return r;
+ }
+ r = mif_stream_init(&mxmgmt_transport->mif_ostream, SCSC_MIF_ABS_TARGET_R4, MIF_STREAM_DIRECTION_OUT, num_packets, packet_size, mx, MIF_STREAM_INTRBIT_TYPE_ALLOC, output_irq_handler, mxmgmt_transport);
+ if (r) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "mif_stream_init OUT failed %d\n", r);
+ mif_stream_release(&mxmgmt_transport->mif_istream);
+ return r;
+ }
+
+ r = mxmgmt_thread_start(mxmgmt_transport);
+ if (r) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "mxmgmt_thread_start failed %d\n", r);
+ mif_stream_release(&mxmgmt_transport->mif_istream);
+ mif_stream_release(&mxmgmt_transport->mif_ostream);
+ return r;
+ }
+ return 0;
+}
+
+void mxmgmt_transport_register_channel_handler(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
+ mxmgmt_channel_handler handler, void *data)
+{
+ if (channel_id >= MMTRANS_NUM_CHANNELS) {
+ SCSC_TAG_ERR(MXMGT_TRANS, "Invalid channel id: %d\n", channel_id);
+ return;
+ }
+ mutex_lock(&mxmgmt_transport->channel_handler_mutex);
+ mxmgmt_transport->channel_handler_fns[channel_id] = handler;
+ mxmgmt_transport->channel_handler_data[channel_id] = data;
+ mutex_unlock(&mxmgmt_transport->channel_handler_mutex);
+}
+
+void mxmgmt_transport_send(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
+ void *message, uint32_t message_length)
+{
+ struct mxmgr_message transport_msg = { .channel_id = channel_id };
+
+ const void *bufs[2] = { &transport_msg.channel_id, message };
+ uint32_t buf_lengths[2] = { sizeof(transport_msg.channel_id), message_length };
+
+ mif_stream_write_gather(&mxmgmt_transport->mif_ostream, bufs, buf_lengths, 2);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**
+ * mx140 management transport (Interface)
+ *
+ * Provides bi-directional communication between the firmware and the
+ * host. Messages sent across the transport are divided into a
+ * number of channels, with each channel having its own dedicated handler.
+ *
+ * This interface also provides a utility method for sending messages across
+ * the stream.
+ */
+
+#ifndef MXMANAGEMENT_TRANSPORT_H__
+#define MXMANAGEMENT_TRANSPORT_H__
+
+/** Uses */
+#include <linux/kthread.h>
+#include "mifstream.h"
+
+struct mxmgmt_transport;
+
+/**
+ * The various channels that can send messages
+ * across the transport.
+ *
+ * Channel IDs are limited to one byte.
+ */
+enum mxmgr_channels {
+ MMTRANS_CHAN_ID_MAXWELL_MANAGEMENT = 0,
+ MMTRANS_CHAN_ID_SERVICE_MANAGEMENT = 1,
+ MMTRANS_CHAN_ID_MAXWELL_LOGGING = 2,
+ MMTRANS_NUM_CHANNELS = 3
+};
+
+/**
+ * Transport channel callback handler. This will be invoked each time a message on a channel is
+ * received in the context of the transport stream's thread. Handlers may perform work within
+ * their callback implementation, but should not block.
+ *
+ * Note that the message pointer passed is only valid for the duration of the function call.
+ */
+typedef void (*mxmgmt_channel_handler)(const void *message, void *data);
+
+/**
+ * Registers the callback function that will be invoked to handle data coming in from the AP
+ * for the given channel.
+ */
+void mxmgmt_transport_register_channel_handler(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
+ mxmgmt_channel_handler handler, void *data);
+
+/**
+ * Sends a message to the AP across the given channel.
+ *
+ * This function is safe to call from any RTOS thread.
+ */
+void mxmgmt_transport_send(struct mxmgmt_transport *mxmgmt_transport, enum mxmgr_channels channel_id,
+ void *message, uint32_t message_length);
+
+/**
+ * Initialises the maxwell management transport and configures the necessary
+ * interrupt handlers. Called once during boot.
+ */
+int mxmgmt_transport_init(struct mxmgmt_transport *mxmgmt_transport, struct scsc_mx *mx);
+void mxmgmt_transport_release(struct mxmgmt_transport *mxmgmt_transport);
+
+/*
+ * Initialises the configuration area incl. Maxwell Infrastructure Configuration,
+ * MIF Management Transport Configuration and MIF Management Stream Configuration.
+ */
+void mxmgmt_transport_config_serialise(struct mxmgmt_transport *mxmgmt_transport, struct mxtransconf *trans_conf);
+void mxmgmt_transport_set_error(struct mxmgmt_transport *mxmgmt_transport);
+
+#define MXMGMT_THREAD_NAME_MAX_LENGTH 32
+struct mxmgmt_thread {
+ struct task_struct *task;
+ char name[MXMGMT_THREAD_NAME_MAX_LENGTH];
+ int prio;
+ struct completion completion;
+ wait_queue_head_t wakeup_q;
+ unsigned int wakeup_flag;
+ /*
+ * Use it to block the I/O thread when
+ * an error occurs.
+ */
+ int block_thread;
+};
+
+struct mxmgmt_transport {
+ struct scsc_mx *mx;
+ struct mif_stream mif_istream;
+ struct mif_stream mif_ostream;
+ struct mxmgmt_thread mxmgmt_thread;
+ /** Registered channel handlers for messages coming from the AP for each channel */
+ mxmgmt_channel_handler channel_handler_fns[MMTRANS_NUM_CHANNELS];
+ void *channel_handler_data[MMTRANS_NUM_CHANNELS];
+ struct mutex channel_handler_mutex;
+};
+
+#endif /* MXMANAGEMENT_TRANSPORT_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef MXMGR_TRANSPORT_FORMAT_H__
+#define MXMGR_TRANSPORT_FORMAT_H__
+
+#define MXMGR_MESSAGE_PAYLOAD_SIZE 127
+
+/**
+ * Layout of messages across the manager transport streams.
+ *
+ * HERE: This is a dummy definition and will be replaced
+ * once more of the service management infrastructure is completed.
+ */
+struct mxmgr_message {
+ uint8_t channel_id; /* Channel ID from mxmgr_channels */
+ uint8_t payload[MXMGR_MESSAGE_PAYLOAD_SIZE]; /* Message content to store in the transport stream - user defined format */
+} __packed;
+
+#endif /* MXMGR_TRANSPORT_FORMAT_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef MXMGR_TRANSPORT_STREAMS_H__
+#define MXMGR_TRANSPORT_STREAMS_H__
+
+/**
+ * MIF input/output streams to/from the AP.
+ * These are seperated out to allow their use directly from within unit tests.
+ */
+struct {
+ /** from AP */
+ mif_stream *istream;
+ /** to AP */
+ mif_stream *ostream;
+} mxmgr_stream_container;
+
+extern mxmgr_stream_container mxmgr_streams;
+
+
+#endif /* MXMGR_TRANSPORT_STREAMS_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/seq_file.h>
+#include <scsc/scsc_release.h>
+#include <scsc/scsc_logring.h>
+#include "mxman.h"
+#include "mxproc.h"
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#endif
+
+#ifndef AID_MXPROC
+#define AID_MXPROC 0
+#endif
+
+#define MX_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t mx_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mx_procfs_ ## name ## _fops = { \
+ .read = mx_procfs_ ## name ## _read, \
+ .write = mx_procfs_ ## name ## _write, \
+ .open = mx_procfs_generic_open, \
+ .llseek = generic_file_llseek \
+ }
+#define MX_PROCFS_RO_FILE_OPS(name) \
+ static ssize_t mx_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations mx_procfs_ ## name ## _fops = { \
+ .read = mx_procfs_ ## name ## _read, \
+ .open = mx_procfs_generic_open, \
+ .llseek = generic_file_llseek \
+ }
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define MX_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_MXPROC); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_MXPROC); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define MX_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_MXPROC; \
+ (entry)->gid = AID_MXPROC; \
+ } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define MX_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &mx_procfs_ ## name ## _fops, _sdev); \
+ MX_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define MX_PROCFS_ADD_FILE(_data, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &mx_procfs_ ## name ## _fops; \
+ entry->data = _data; \
+ MX_PROCFS_SET_UID_GID(entry); \
+ } \
+ } while (0)
+#endif
+
+#define MX_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+#define OS_UNUSED_PARAMETER(x) ((void)(x))
+#define MX_DIRLEN 128
+static const char *procdir_ctrl = "driver/mxman_ctrl";
+static const char *procdir_info = "driver/mxman_info";
+
+static int mx_procfs_generic_open(struct inode *inode, struct file *file)
+{
+ file->private_data = MX_PDE_DATA(inode);
+ return 0;
+}
+
+static ssize_t mx_procfs_mx_fail_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(mxproc);
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ SCSC_TAG_DEBUG(MX_PROC, "OK\n");
+ return 0;
+}
+
+static ssize_t mx_procfs_mx_fail_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ if (mxproc)
+ mxman_fail(mxproc->mxman, SCSC_PANIC_CODE_HOST << 15, __func__);
+ SCSC_TAG_DEBUG(MX_PROC, "OK\n");
+
+ return count;
+}
+
+static ssize_t mx_procfs_mx_freeze_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(mxproc);
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ SCSC_TAG_DEBUG(MX_PROC, "OK\n");
+ return 0;
+}
+
+static ssize_t mx_procfs_mx_freeze_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ if (mxproc)
+ mxman_freeze(mxproc->mxman);
+ SCSC_TAG_INFO(MX_PROC, "OK\n");
+
+ return count;
+}
+
+static ssize_t mx_procfs_mx_panic_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(mxproc);
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ /* Force a FW panic on read as well as write to allow test in user builds */
+ if (mxproc)
+ mxman_force_panic(mxproc->mxman);
+ SCSC_TAG_INFO(MX_PROC, "OK\n");
+
+ return 0;
+}
+
+static ssize_t mx_procfs_mx_panic_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(user_buf);
+ OS_UNUSED_PARAMETER(count);
+ OS_UNUSED_PARAMETER(ppos);
+
+ if (mxproc)
+ mxman_force_panic(mxproc->mxman);
+ SCSC_TAG_INFO(MX_PROC, "OK\n");
+
+ return count;
+}
+
+static ssize_t mx_procfs_mx_lastpanic_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ char buf[64];
+ int bytes;
+
+ if (!mxproc || !mxproc->mxman)
+ return -EINVAL;
+
+ memset(buf, '\0', sizeof(buf));
+
+ bytes = snprintf(buf, sizeof(buf), "scsc_panic_code : 0x%x\nscsc_panic_subcode : 0x%x\n",
+ (mxproc->mxman->scsc_panic_code), (mxproc->mxman->scsc_panic_code & 0x7FFF));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
+}
+
+static ssize_t mx_procfs_mx_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ char buf[3];
+
+ OS_UNUSED_PARAMETER(file);
+
+ buf[0] = mxproc->mxman->suspended ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+
+ SCSC_TAG_INFO(MX_PROC, "suspended: %c\n", buf[0]);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
+static ssize_t mx_procfs_mx_suspend_count_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+ u32 suspend_count;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (!mxproc || !mxproc->mxman)
+ return 0;
+
+ suspend_count = atomic_read(&mxproc->mxman->suspend_count);
+ SCSC_TAG_INFO(MX_PROC, "suspend_count: %u\n", suspend_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n", suspend_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mx_procfs_mx_recovery_count_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+ u32 recovery_count;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (!mxproc || !mxproc->mxman)
+ return 0;
+
+ recovery_count = atomic_read(&mxproc->mxman->recovery_count);
+ SCSC_TAG_INFO(MX_PROC, "recovery_count: %u\n", recovery_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n", recovery_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mx_procfs_mx_boot_count_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+ u32 boot_count;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (!mxproc || !mxproc->mxman)
+ return 0;
+
+ boot_count = atomic_read(&mxproc->mxman->boot_count);
+ SCSC_TAG_INFO(MX_PROC, "boot_count: %u\n", boot_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n", boot_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mx_procfs_mx_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ int r;
+
+ OS_UNUSED_PARAMETER(file);
+ OS_UNUSED_PARAMETER(ppos);
+
+ if (count && mxproc) {
+ switch (user_buf[0]) {
+ case 'Y':
+ SCSC_TAG_INFO(MX_PROC, "force suspend\n");
+ r = mxman_suspend(mxproc->mxman);
+ if (r) {
+ SCSC_TAG_INFO(MX_PROC, "mx_suspend failed %d\n", r);
+ return r;
+ }
+ break;
+ case 'N':
+ SCSC_TAG_INFO(MX_PROC, "force resume\n");
+ mxman_resume(mxproc->mxman);
+ break;
+ default:
+ SCSC_TAG_INFO(MX_PROC, "invalid value %c\n", user_buf[0]);
+ return -EINVAL;
+ }
+ }
+
+ return count;
+}
+
+static ssize_t mx_procfs_mx_status_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ char buf[32];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ if (!mxproc || !mxproc->mxman)
+ return 0;
+
+ switch (mxproc->mxman->mxman_state) {
+ case MXMAN_STATE_STOPPED:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_STOPPED");
+ break;
+ case MXMAN_STATE_STARTING:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_STARTING");
+ break;
+ case MXMAN_STATE_STARTED:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_STARTED");
+ break;
+ case MXMAN_STATE_FAILED:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_FAILED");
+ break;
+ case MXMAN_STATE_FREEZED:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "MXMAN_STATE_FREEZED");
+ break;
+ default:
+ return 0;
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mx_procfs_mx_services_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ if (!mxproc || !mxproc->mxman)
+ return 0;
+
+ pos = scsc_mx_list_services(mxproc->mxman, buf, bufsz);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t mx_procfs_mx_wlbt_stat_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct mxproc *mxproc = file->private_data;
+ struct scsc_mif_abs *mif_abs;
+ int pos = 0;
+ int r;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+ u32 val = 0xff;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (!mxproc || !mxproc->mxman || !mxproc->mxman->mx)
+ return 0;
+
+ mif_abs = scsc_mx_get_mif_abs(mxproc->mxman->mx);
+
+ /* Read WLBT_STAT register */
+ if (mif_abs->mif_read_register) {
+ r = mif_abs->mif_read_register(mif_abs, SCSC_REG_READ_WLBT_STAT, &val);
+ if (r)
+ val = 0xff; /* failed */
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%x\n", val);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+MX_PROCFS_RW_FILE_OPS(mx_fail);
+MX_PROCFS_RW_FILE_OPS(mx_freeze);
+MX_PROCFS_RW_FILE_OPS(mx_panic);
+MX_PROCFS_RW_FILE_OPS(mx_suspend);
+MX_PROCFS_RO_FILE_OPS(mx_suspend_count);
+MX_PROCFS_RO_FILE_OPS(mx_recovery_count);
+MX_PROCFS_RO_FILE_OPS(mx_boot_count);
+MX_PROCFS_RO_FILE_OPS(mx_status);
+MX_PROCFS_RO_FILE_OPS(mx_services);
+MX_PROCFS_RO_FILE_OPS(mx_lastpanic);
+MX_PROCFS_RO_FILE_OPS(mx_wlbt_stat);
+
+static u32 proc_count;
+
+int mxproc_create_ctrl_proc_dir(struct mxproc *mxproc, struct mxman *mxman)
+{
+ char dir[MX_DIRLEN];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, proc_count);
+ parent = proc_mkdir(dir, NULL);
+ if (!parent) {
+ SCSC_TAG_ERR(MX_PROC, "failed to create proc dir %s\n", procdir_ctrl);
+ return -EINVAL;
+ }
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = mxproc;
+#endif
+ mxproc->mxman = mxman;
+ mxproc->procfs_ctrl_dir = parent;
+ mxproc->procfs_ctrl_dir_num = proc_count;
+ MX_PROCFS_ADD_FILE(mxproc, mx_fail, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_freeze, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_panic, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_suspend, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_suspend_count, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_recovery_count, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_status, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_services, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_lastpanic, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_wlbt_stat, parent, S_IRUSR | S_IRGRP | S_IROTH);
+
+ SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
+ proc_count++;
+
+ return 0;
+}
+
+void mxproc_remove_ctrl_proc_dir(struct mxproc *mxproc)
+{
+ if (mxproc->procfs_ctrl_dir) {
+ char dir[MX_DIRLEN];
+
+ MX_PROCFS_REMOVE_FILE(mx_fail, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_freeze, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_panic, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_suspend, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_suspend_count, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_recovery_count, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_status, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_services, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_lastpanic, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_wlbt_stat, mxproc->procfs_ctrl_dir);
+ (void)snprintf(dir, sizeof(dir), "%s%d", procdir_ctrl, mxproc->procfs_ctrl_dir_num);
+ remove_proc_entry(dir, NULL);
+ mxproc->procfs_ctrl_dir = NULL;
+ proc_count--;
+ SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
+ }
+}
+
+static ssize_t mx_procfs_mx_rf_hw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[256];
+ int bytes;
+ struct mxproc *mxproc = file->private_data;
+
+ if (!mxproc || !mxproc->mxman)
+ return -EINVAL;
+
+ memset(buf, '\0', sizeof(buf));
+ bytes = snprintf(buf, sizeof(buf), "RF version: 0x%04x\n", (mxproc->mxman->rf_hw_ver));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
+}
+
+MX_PROCFS_RO_FILE_OPS(mx_rf_hw_ver);
+
+static ssize_t mx_procfs_mx_rf_hw_name_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[256];
+ int bytes;
+ struct mxproc *mxproc = file->private_data;
+
+ if (!mxproc || !mxproc->mxman)
+ return -EINVAL;
+
+ memset(buf, '\0', sizeof(buf));
+
+ bytes = mxman_print_rf_hw_version(mxproc->mxman, buf, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
+}
+
+MX_PROCFS_RO_FILE_OPS(mx_rf_hw_name);
+
+static ssize_t mx_procfs_mx_release_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[256];
+ int bytes;
+ struct mxproc *mxproc = file->private_data;
+ char *build_id = 0;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (mxproc && mxproc->mxman)
+ build_id = mxproc->mxman->fw_build_id;
+
+ memset(buf, '\0', sizeof(buf));
+
+ bytes = snprintf(buf, sizeof(buf), "Release: %d.%d.%d.%d (f/w: %s)\n",
+ SCSC_RELEASE_PRODUCT, SCSC_RELEASE_ITERATION, SCSC_RELEASE_CANDIDATE, SCSC_RELEASE_POINT,
+ build_id ? build_id : "unknown");
+
+ if (bytes > sizeof(buf))
+ bytes = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
+}
+
+MX_PROCFS_RO_FILE_OPS(mx_release);
+
+static ssize_t mx_procfs_mx_ttid_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[256];
+ int bytes;
+ struct mxproc *mxproc = file->private_data;
+ char *id = 0;
+
+ OS_UNUSED_PARAMETER(file);
+
+ if (mxproc && mxproc->mxman)
+ id = mxproc->mxman->fw_ttid;
+
+ memset(buf, '\0', sizeof(buf));
+
+ bytes = snprintf(buf, sizeof(buf), "%s\n", id);
+
+ if (bytes > sizeof(buf))
+ bytes = sizeof(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, bytes);
+}
+
+MX_PROCFS_RO_FILE_OPS(mx_ttid);
+
+int mxproc_create_info_proc_dir(struct mxproc *mxproc, struct mxman *mxman)
+{
+ char dir[MX_DIRLEN];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "%s", procdir_info);
+ parent = proc_mkdir(dir, NULL);
+ if (!parent) {
+ SCSC_TAG_ERR(MX_PROC, "failed to create /proc dir\n");
+ return -EINVAL;
+ }
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = mxproc;
+#endif
+ mxproc->mxman = mxman;
+ mxproc->procfs_info_dir = parent;
+ MX_PROCFS_ADD_FILE(mxproc, mx_release, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_rf_hw_ver, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_rf_hw_name, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_boot_count, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ MX_PROCFS_ADD_FILE(mxproc, mx_ttid, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ SCSC_TAG_DEBUG(MX_PROC, "created %s proc dir\n", dir);
+
+ return 0;
+}
+
+void mxproc_remove_info_proc_dir(struct mxproc *mxproc)
+{
+ if (mxproc->procfs_info_dir) {
+ char dir[MX_DIRLEN];
+
+ MX_PROCFS_REMOVE_FILE(mx_ttid, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_boot_count, mxproc->procfs_ctrl_dir);
+ MX_PROCFS_REMOVE_FILE(mx_release, mxproc->procfs_info_dir);
+ MX_PROCFS_REMOVE_FILE(mx_rf_hw_ver, mxproc->procfs_info_dir);
+ MX_PROCFS_REMOVE_FILE(mx_rf_hw_name, mxproc->procfs_info_dir);
+ (void)snprintf(dir, sizeof(dir), "%s", procdir_info);
+ remove_proc_entry(dir, NULL);
+ mxproc->procfs_info_dir = NULL;
+ SCSC_TAG_DEBUG(MX_PROC, "removed %s proc dir\n", dir);
+ }
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/*
+ * mx140 proc interface
+ */
+
+#ifndef MXPROC_H
+#define MXPROC_H
+
+struct mxproc;
+
+int mxproc_create_ctrl_proc_dir(struct mxproc *mxproc, struct mxman *mxman);
+void mxproc_remove_ctrl_proc_dir(struct mxproc *mxproc);
+int mxproc_create_info_proc_dir(struct mxproc *mxproc, struct mxman *mxman);
+void mxproc_remove_info_proc_dir(struct mxproc *mxproc);
+extern int scsc_mx_list_services(struct mxman *mxman_p, char *buf, const size_t bufsz);
+extern int mxman_print_rf_hw_version(struct mxman *mxman, char *buf, const size_t bufsz);
+
+struct mxproc {
+ struct mxman *mxman;
+ struct proc_dir_entry *procfs_ctrl_dir;
+ u32 procfs_ctrl_dir_num;
+ struct proc_dir_entry *procfs_info_dir;
+};
+
+#endif /* MXPROC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/notifier.h>
+
+#include "scsc_mx_impl.h"
+#include "miframman.h"
+#include "mifmboxman.h"
+#include "mxman.h"
+#include "srvman.h"
+#include "mxmgmt_transport.h"
+#include "mxlog.h"
+#include "mxlogger.h"
+#include "fw_panic_record.h"
+#include "panicmon.h"
+#include "mxproc.h"
+#include "mxsyserr.h"
+
+#include <scsc/scsc_release.h>
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_logring.h>
+
+void mx_syserr_handler(struct mxman *mx, const void *message)
+{
+ const struct mx_syserr_msg *msg = (const struct mx_syserr_msg *)message;
+
+ SCSC_TAG_INFO(MXMAN, "MM_SYSERR_IND len: %u, ts: 0x%08X, tf: 0x%08X, str: 0x%x, code: 0x%08x, p0: 0x%x, p1: 0x%x\n",
+ msg->syserr.length,
+ msg->syserr.slow_clock,
+ msg->syserr.fast_clock,
+ msg->syserr.string_index,
+ msg->syserr.syserr_code,
+ msg->syserr.param[0],
+ msg->syserr.param[1]);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __MX_SYSERR_H
+#define __MX_SYSERR_H
+
+
+struct mx_syserr {
+ u16 length; /* Length of this structure for future extension */
+ u32 slow_clock;
+ u32 fast_clock;
+ u32 string_index;
+ u32 syserr_code;
+ u32 param[2];
+} __packed;
+
+struct mx_syserr_msg {
+ u8 id;
+ struct mx_syserr syserr;
+} __packed;
+
+void mx_syserr_handler(struct mxman *mx, const void *message);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef PANIC_RECORD_R4_DEFS_H__
+#define PANIC_RECORD_R4_DEFS_H__
+/*
+ * R4 Panic Record Definitions.
+ *
+ * This record is used to pass summary information about the context of
+ * Maxwell R4 firmware panics to the host.
+ *
+ * The record location, relative to shared DRAM memory, is defined by the
+ * R4_PANIC_RECORD_OFFSET field in the firmware header [see SC-505846-SW].
+ *
+ * Notes:-
+ * - The host panic handler should _not_ expect the R4 record to be
+ * written prior to a panic indication from Maxwell, and it may never
+ * be written at all. The checksum should indicate a valid record.
+ *
+ * N.B. Defined in this standalone header for inclusion in .s and .c.
+ */
+
+/*
+ * The current version of the PANIC_RECORD_R4 structure defined below.
+ * Written to version field by firmware, checked by host.
+ * This also serves as a rudimentary endianess check.
+ */
+#define PANIC_RECORD_R4_VERSION_1 1
+
+/*
+ * Total number of R4 registers saved.
+ */
+#define PANIC_RECORD_R4_REGISTER_COUNT 18
+
+/*
+ * Number of panic info arguments.
+ */
+#define PANIC_RECORD_R4_INFO_COUNT 4
+
+/*
+ * Checksum seed to prevent false match on all zeros or ones.
+ */
+#define PANIC_RECORD_R4_CKSUM_SEED 0xa5a5a5a5
+
+/*****************************************************************************
+ * R4 Panic Record 32bit field indices.
+ *****************************************************************************/
+
+/*
+ * Version of this structure.
+ */
+#define PANIC_RECORD_R4_VERSION_INDEX 0
+
+/*
+ * Clock counters at time of the R4 panic.
+ *
+ * The 1M clock is generally the most useful but there is period
+ * after IP wake-up when it is not monotonic. The 32K count
+ * is included in-case of a panic during wake-up.
+ */
+#define PANIC_RECORD_R4_TIMESTAMP_1M_INDEX (PANIC_RECORD_R4_VERSION_INDEX + 1)
+#define PANIC_RECORD_R4_TIMESTAMP_32K_INDEX (PANIC_RECORD_R4_TIMESTAMP_1M_INDEX + 1)
+
+/*
+ * Snapshot of main r4 CPU registers.
+ */
+#define PANIC_RECORD_R4_REGISTERS_INDEX (PANIC_RECORD_R4_TIMESTAMP_32K_INDEX + 1)
+
+/*
+ * Panic info.
+ *
+ * 1st field is key/index of panic_string.
+ */
+#define PANIC_RECORD_R4_INFO_INDEX (PANIC_RECORD_R4_REGISTERS_INDEX + PANIC_RECORD_R4_REGISTER_COUNT)
+
+/*
+ * 32bit XOR of all the fields above + PANIC_RECORD_R4_CKSUM_SEED
+ *
+ * Written by firmware on panic, checked by host.
+ */
+#define PANIC_RECORD_R4_CKSUM_INDEX (PANIC_RECORD_R4_INFO_INDEX + PANIC_RECORD_R4_INFO_COUNT)
+
+/*
+ * Length of the r4 panic record (uint32s).
+ */
+#define PANIC_RECORD_R4_LEN (PANIC_RECORD_R4_CKSUM_INDEX + 1)
+
+/*****************************************************************************
+ * R4 uint32 Register indices relative to PANIC_RECORD_R4_REGISTERS_INDEX
+ *****************************************************************************/
+
+#define PANIC_RECORD_R4_REGISTER_R0 0
+#define PANIC_RECORD_R4_REGISTER_R1 1
+#define PANIC_RECORD_R4_REGISTER_R2 2
+#define PANIC_RECORD_R4_REGISTER_R3 3
+#define PANIC_RECORD_R4_REGISTER_R4 4
+#define PANIC_RECORD_R4_REGISTER_R5 5
+#define PANIC_RECORD_R4_REGISTER_R6 6
+#define PANIC_RECORD_R4_REGISTER_R7 7
+#define PANIC_RECORD_R4_REGISTER_R8 8
+#define PANIC_RECORD_R4_REGISTER_R9 9
+#define PANIC_RECORD_R4_REGISTER_R10 10
+#define PANIC_RECORD_R4_REGISTER_R11 11
+#define PANIC_RECORD_R4_REGISTER_R12 12
+#define PANIC_RECORD_R4_REGISTER_SP 13
+#define PANIC_RECORD_R4_REGISTER_LR 14
+#define PANIC_RECORD_R4_REGISTER_SPSR 15
+#define PANIC_RECORD_R4_REGISTER_PC 16
+#define PANIC_RECORD_R4_REGISTER_CPSR 17
+
+/*****************************************************************************
+ * R4 Register octet offsets relative to PANIC_RECORD_R4_REGISTERS_INDEX
+ *****************************************************************************/
+
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R0 (PANIC_RECORD_R4_REGISTER_R0 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R1 (PANIC_RECORD_R4_REGISTER_R1 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R2 (PANIC_RECORD_R4_REGISTER_R2 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R3 (PANIC_RECORD_R4_REGISTER_R3 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R4 (PANIC_RECORD_R4_REGISTER_R4 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R5 (PANIC_RECORD_R4_REGISTER_R5 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R6 (PANIC_RECORD_R4_REGISTER_R6 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R7 (PANIC_RECORD_R4_REGISTER_R7 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R8 (PANIC_RECORD_R4_REGISTER_R8 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R9 (PANIC_RECORD_R4_REGISTER_R9 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R10 (PANIC_RECORD_R4_REGISTER_R10 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R11 (PANIC_RECORD_R4_REGISTER_R11 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_R12 (PANIC_RECORD_R4_REGISTER_R12 * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_SP (PANIC_RECORD_R4_REGISTER_SP * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_LR (PANIC_RECORD_R4_REGISTER_LR * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_SPSR (PANIC_RECORD_R4_REGISTER_SPSR * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_PC (PANIC_RECORD_R4_REGISTER_PC * 4)
+#define PANIC_RECORD_R4_REGISTER_OFFSET_CPSR (PANIC_RECORD_R4_REGISTER_CPSR * 4)
+
+#endif /* PANIC_RECORD_R4_DEFS_H__ */
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <scsc/scsc_logring.h>
+
+#include "panicmon.h"
+#include "scsc_mif_abs.h"
+#include "mxman.h"
+
+static void panicmon_isr(int irq, void *data)
+{
+ struct panicmon *panicmon = (struct panicmon *)data;
+
+ SCSC_TAG_DEBUG(PANIC_MON, "panicmon=%p panicmon->mx=%p mxman=%p\n", panicmon, panicmon->mx, scsc_mx_get_mxman(panicmon->mx));
+ /* Avoid unused parameter error */
+ (void)irq;
+ mxman_fail(scsc_mx_get_mxman(panicmon->mx), SCSC_PANIC_CODE_FW << 15, __func__);
+}
+
+
+void panicmon_init(struct panicmon *panicmon, struct scsc_mx *mx)
+{
+ struct scsc_mif_abs *mif;
+
+ panicmon->mx = mx;
+ mif = scsc_mx_get_mif_abs(mx);
+ /* register isr with mif abstraction */
+ mif->irq_reg_reset_request_handler(mif, panicmon_isr, (void *)panicmon);
+}
+
+void panicmon_deinit(struct panicmon *panicmon)
+{
+ struct scsc_mif_abs *mif;
+
+ mif = scsc_mx_get_mif_abs(panicmon->mx);
+ mif->irq_unreg_reset_request_handler(mif);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#ifndef _PANICMON_H
+#define _PANICMON_H
+#include "mxman.h"
+
+struct panicmon;
+
+void panicmon_init(struct panicmon *panicmon, struct scsc_mx *mx);
+void panicmon_deinit(struct panicmon *panicmon);
+
+struct panicmon {
+ struct scsc_mx *mx;
+};
+
+#endif /* _PANICMON_H */
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Mailbox Hardware Emulation (Implementation)
+*
+****************************************************************************/
+
+/* Implements */
+
+#include "pcie_mbox.h"
+
+/* Uses */
+
+#include <linux/pci.h>
+#include <asm/barrier.h>
+#include <scsc/scsc_logring.h>
+#include "pcie_mbox_shared_data.h"
+#include "pcie_mbox_intgen.h"
+
+/* Private Functions */
+
+/**
+ * Initialise the mailbox emulation shared structure.
+ */
+static void pcie_mbox_shared_data_init(struct pcie_mbox_shared_data *shared_data)
+{
+ memset(shared_data, 0, sizeof(*shared_data));
+ shared_data->magic = PCIE_MIF_MBOX_MAGIC_NUMBER;
+ shared_data->version = PCIE_MIF_MBOX_VERSION_NUMBER;
+ pcie_mbox_shared_data_wmb();
+}
+
+/* Public Functions */
+
+void pcie_mbox_init(
+ struct pcie_mbox *mbox,
+ void *shared_data_region,
+ __iomem void *pcie_registers,
+ struct functor *ap_interrupt_trigger,
+ struct functor *r4_interrupt_trigger,
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct functor *m4_interrupt_trigger,
+ struct functor *m4_1_interrupt_trigger
+#else
+ struct functor *m4_interrupt_trigger
+#endif
+ )
+{
+ mbox->shared_data = (struct pcie_mbox_shared_data *)shared_data_region;
+
+ pcie_mbox_shared_data_init(mbox->shared_data);
+
+ /* Interrupt Generator Emulations */
+
+ pcie_mbox_intgen_init(&mbox->ap_intgen, "AP", &mbox->shared_data->ap_interrupt, ap_interrupt_trigger);
+ pcie_mbox_intgen_init(&mbox->r4_intgen, "R4", &mbox->shared_data->r4_interrupt, r4_interrupt_trigger);
+ pcie_mbox_intgen_init(&mbox->m4_intgen, "M4", &mbox->shared_data->m4_interrupt, m4_interrupt_trigger);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ pcie_mbox_intgen_init(&mbox->m4_intgen_1, "M4", &mbox->shared_data->m4_1_interrupt, m4_1_interrupt_trigger);
+#endif
+}
+
+u32 pcie_mbox_get_ap_interrupt_masked_bitmask(const struct pcie_mbox *mbox)
+{
+ /* Delegate to ap intgen component */
+ return pcie_mbox_intgen_get_masked_bitmask(&mbox->ap_intgen);
+}
+
+u32 pcie_mbox_get_ap_interrupt_pending_bitmask(const struct pcie_mbox *mbox)
+{
+ /* Delegate to ap intgen component */
+ return pcie_mbox_intgen_get_pending_bitmask(&mbox->ap_intgen);
+}
+
+bool pcie_mbox_is_ap_interrupt_source_pending(const struct pcie_mbox *mbox, int source_num)
+{
+ return pcie_mbox_intgen_is_source_pending(&mbox->ap_intgen, source_num);
+}
+
+void pcie_mbox_clear_ap_interrupt_source(struct pcie_mbox *mbox, int source_num)
+{
+ /* Delegate to ap intgen component */
+ pcie_mbox_intgen_clear_source(&mbox->ap_intgen, source_num);
+}
+
+void pcie_mbox_mask_ap_interrupt_source(struct pcie_mbox *mbox, int source_num)
+{
+ /* Delegate to ap intgen component */
+ pcie_mbox_intgen_mask_source(&mbox->ap_intgen, source_num);
+}
+
+void pcie_mbox_unmask_ap_interrupt_source(struct pcie_mbox *mbox, int source_num)
+{
+ /* Delegate to ap intgen component */
+ pcie_mbox_intgen_unmask_source(&mbox->ap_intgen, source_num);
+}
+
+void pcie_mbox_set_outgoing_interrupt_source(struct pcie_mbox *mbox, enum scsc_mif_abs_target target_node, int source_num)
+{
+ /* Delegate to appropriate intgen instance*/
+ switch (target_node) {
+ case SCSC_MIF_ABS_TARGET_R4:
+ pcie_mbox_intgen_set_source(&mbox->r4_intgen, source_num);
+ break;
+ case SCSC_MIF_ABS_TARGET_M4:
+ pcie_mbox_intgen_set_source(&mbox->m4_intgen, source_num);
+ break;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ case SCSC_MIF_ABS_TARGET_M4_1:
+ pcie_mbox_intgen_set_source(&mbox->m4_intgen, source_num);
+ break;
+#endif
+ default:
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid interrupt target %d\n", target_node);
+ return;
+ }
+}
+
+u32 *pcie_mbox_get_mailbox_ptr(struct pcie_mbox *mbox, u32 mbox_index)
+{
+ if (mbox_index >= PCIE_MIF_MBOX_ISSR_COUNT) {
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid mailbox index %d\n", mbox_index);
+ return NULL;
+ }
+
+ return &mbox->shared_data->mailbox[mbox_index];
+}
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Mailbox Hardware Emulation (Interface)
+*
+****************************************************************************/
+
+#ifndef __PCIE_MBOX_H
+#define __PCIE_MBOX_H
+
+/* Uses */
+
+#include "scsc_mif_abs.h" /* for enum scsc_mif_abs_target */
+#include "pcie_mbox_intgen.h"
+
+/* Forward */
+
+struct pcie_mbox_shared_data;
+
+/* Types */
+
+/**
+ * Maxwell Mailbox Hardware Emulation.
+ *
+ * Uses structure in shared memory to emulate the MX Mailbox Hardware in
+ * conjunction with matching logic in R4 & M4 firmware.
+ *
+ * The emulated hardware includes an array of simple 32 bit mailboxes and
+ * 3 instances of Interrupt Generator (intgen) hardware (to ap, to r4 & to m4).
+ */
+struct pcie_mbox {
+ /** Pointer to shared Mailbox emulation state */
+ struct pcie_mbox_shared_data *shared_data;
+
+ /** Interrupt Generator Emulations */
+ struct pcie_mbox_intgen ap_intgen;
+ struct pcie_mbox_intgen r4_intgen;
+ struct pcie_mbox_intgen m4_intgen;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct pcie_mbox_intgen m4_intgen_1;
+#endif
+};
+
+/* Public Functions */
+
+/**
+ * Initialise the mailbox emulation.
+ */
+void pcie_mbox_init(
+ struct pcie_mbox *mbox,
+ void *shared_data_region,
+ __iomem void *pcie_registers,
+ struct functor *ap_interrupt_trigger,
+ struct functor *r4_interrupt_trigger,
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct functor *m4_interrupt_trigger,
+ struct functor *m4_1_interrupt_trigger
+#else
+ struct functor *m4_interrupt_trigger
+#endif
+ );
+
+/**
+ * Get the AP interrupt source mask state as a bitmask.
+ */
+u32 pcie_mbox_get_ap_interrupt_masked_bitmask(const struct pcie_mbox *mbox);
+
+/**
+ * Get the AP interrupt source pending (set and not masked) state as a bitmask.
+ */
+u32 pcie_mbox_get_ap_interrupt_pending_bitmask(const struct pcie_mbox *mbox);
+
+/**
+ * Is the specified AP interrupt source pending (set and not masked)?
+ */
+bool pcie_mbox_is_ap_interrupt_source_pending(const struct pcie_mbox *mbox, int source_num);
+
+/**
+ * Clear the specified AP interrupt source.
+ */
+void pcie_mbox_clear_ap_interrupt_source(struct pcie_mbox *mbox, int source_num);
+
+/**
+ * Mask the specified AP interrupt source.
+ */
+void pcie_mbox_mask_ap_interrupt_source(struct pcie_mbox *mbox, int source_num);
+
+/**
+ * Unmask the specified AP interrupt source.
+ *
+ * The interrupt will trigger if the source is currently set.
+ */
+void pcie_mbox_unmask_ap_interrupt_source(struct pcie_mbox *mbox, int source_num);
+
+/**
+ * Set an outgoing interrupt source to R4 or M4 node.
+ *
+ * Triggers interrupt in target node if the source is not masked.
+ */
+void pcie_mbox_set_outgoing_interrupt_source(struct pcie_mbox *mbox, enum scsc_mif_abs_target target_node, int source_num);
+
+/**
+ * Get pointer to the specified 32bit Mailbox in shared memory.
+ */
+u32 *pcie_mbox_get_mailbox_ptr(struct pcie_mbox *mbox, u32 mbox_index);
+
+#endif /* __PCIE_MBOX_H */
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Mailbox Interrupt Generator Emulation (Implementation)
+*
+****************************************************************************/
+
+/* Implements */
+
+#include "pcie_mbox_intgen.h"
+
+/* Uses */
+
+#include "pcie_mbox_intgen_shared_data.h"
+#include "functor.h"
+#include <scsc/scsc_logring.h>
+
+/* Private Functions */
+
+/**
+ * Trigger the hardware interrupt associated with this Interrupt Generator.
+ */
+static void pcie_mbox_intgen_trigger_interrupt(struct pcie_mbox_intgen *intgen)
+{
+ /* Implementation is abstracted to hide the differences from this module */
+ functor_call(intgen->trigger_interrupt_fn);
+}
+
+/* Public Functions */
+
+void pcie_mbox_intgen_init(
+ struct pcie_mbox_intgen *intgen,
+ const char *name,
+ struct pcie_mbox_intgen_shared_data *shared_data,
+ struct functor *trigger_interrupt_fn
+ )
+{
+ strncpy(intgen->name, name, sizeof(intgen->name));
+ intgen->shared_data = shared_data;
+ intgen->trigger_interrupt_fn = trigger_interrupt_fn;
+}
+
+u32 pcie_mbox_intgen_get_masked_bitmask(const struct pcie_mbox_intgen *intgen)
+{
+ /* Compile bitmask from the emulation's separate source mask fields */
+
+ u32 masked_bitmask = 0;
+ int source_num;
+
+ for (source_num = 0; source_num < PCIE_MIF_MBOX_NUM_INTR_SOURCES; ++source_num)
+ if (intgen->shared_data->mask[source_num])
+ masked_bitmask |= (1 << source_num);
+
+ return masked_bitmask;
+}
+
+bool pcie_mbox_intgen_is_source_pending(const struct pcie_mbox_intgen *intgen, int source_num)
+{
+ return intgen->shared_data->status[source_num] && !intgen->shared_data->mask[source_num];
+}
+
+u32 pcie_mbox_intgen_get_pending_bitmask(const struct pcie_mbox_intgen *intgen)
+{
+ /* Compile bitmask from the emulation's separate source status and mask fields */
+
+ u32 pending_bitmask = 0;
+ int source_num;
+
+ for (source_num = 0; source_num < PCIE_MIF_MBOX_NUM_INTR_SOURCES; ++source_num)
+ if (pcie_mbox_intgen_is_source_pending(intgen, source_num))
+ pending_bitmask |= (1 << source_num);
+
+ return pending_bitmask;
+}
+
+void pcie_mbox_intgen_set_source(struct pcie_mbox_intgen *intgen, int source_num)
+{
+ if (source_num >= PCIE_MIF_MBOX_NUM_INTR_SOURCES) {
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid intgen source %d\n", source_num);
+ return;
+ }
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Set source %s:%d. (P %08x, M %08x)\n",
+ intgen->name, source_num,
+ pcie_mbox_intgen_get_pending_bitmask(intgen),
+ pcie_mbox_intgen_get_masked_bitmask(intgen)
+ );
+
+ intgen->shared_data->status[source_num] = 1;
+ pcie_mbox_shared_data_wmb();
+ if (!intgen->shared_data->mask[source_num])
+ pcie_mbox_intgen_trigger_interrupt(intgen);
+}
+
+void pcie_mbox_intgen_clear_source(struct pcie_mbox_intgen *intgen, int source_num)
+{
+ if (source_num >= PCIE_MIF_MBOX_NUM_INTR_SOURCES) {
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid intgen source %d\n", source_num);
+ return;
+ }
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Clear source %s:%d. (P %08x, M %08x)\n",
+ intgen->name, source_num,
+ pcie_mbox_intgen_get_pending_bitmask(intgen),
+ pcie_mbox_intgen_get_masked_bitmask(intgen)
+ );
+
+ intgen->shared_data->status[source_num] = 0;
+ pcie_mbox_shared_data_wmb();
+}
+
+void pcie_mbox_intgen_mask_source(struct pcie_mbox_intgen *intgen, int source_num)
+{
+ if (source_num >= PCIE_MIF_MBOX_NUM_INTR_SOURCES) {
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid intgen source %d\n", source_num);
+ return;
+ }
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Mask source %s:%d.\n", intgen->name, source_num);
+
+ intgen->shared_data->mask[source_num] = 1;
+ pcie_mbox_shared_data_wmb();
+}
+
+void pcie_mbox_intgen_unmask_source(struct pcie_mbox_intgen *intgen, int source_num)
+{
+ if (source_num >= PCIE_MIF_MBOX_NUM_INTR_SOURCES) {
+ SCSC_TAG_ERR(PCIE_MIF, "Invalid intgen source %d\n", source_num);
+ return;
+ }
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "UnMask source %s:%d.\n", intgen->name, source_num);
+
+ intgen->shared_data->mask[source_num] = 0;
+ pcie_mbox_shared_data_wmb();
+ if (intgen->shared_data->status[source_num])
+ pcie_mbox_intgen_trigger_interrupt(intgen);
+}
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Mailbox Interrupt Generator Emulation (Interface)
+*
+****************************************************************************/
+
+#ifndef __PCIE_MBOX_INTGEN_H
+#define __PCIE_MBOX_INTGEN_H
+
+/* Uses */
+
+#include "linux/types.h"
+
+/* Forward Types */
+
+struct pcie_mbox_intgen_shared_data;
+struct functor;
+
+/* Public Types */
+
+/**
+ * Maxwell Mailbox Interrupt Generator Emulation descriptor
+ *
+ * Uses structures in shared memory to emulate the Interrupt Generation
+ * Hardware in conjunction with matching logic in R4 & M4 firmware.
+ *
+ * This is used to implement 3 instances of the Interrupt Generation (intgen)
+ * hardware (to ap, to r4 & to m4).
+ */
+struct pcie_mbox_intgen {
+ char name[16];
+ struct pcie_mbox_intgen_shared_data *shared_data;
+ struct functor *trigger_interrupt_fn;
+};
+
+/* Public Functions */
+
+/**
+ * Initialise this Interrupt Generator.
+ */
+void pcie_mbox_intgen_init(
+
+ /** This Interrupt Generator emulation */
+ struct pcie_mbox_intgen *intgen,
+
+ /** Name for debugging purposes */
+ const char *name,
+
+ /** Pointer to shared data for this emulation */
+ struct pcie_mbox_intgen_shared_data *shared_data,
+
+ /**
+ * Functor to trigger associated hardware interrupt.
+ *
+ * The trigger is abstracted so that the same logic can be
+ * re-used for the incoming and outgoing emulations.
+ */
+ struct functor *trigger_interrupt_fn
+ );
+
+/**
+ * Get this Interrupt Generator's source masked state as a bitmask.
+ */
+u32 pcie_mbox_intgen_get_masked_bitmask(const struct pcie_mbox_intgen *intgen);
+
+/**
+ * Get this Interrupt Generator's source pending state (set and not masked) as a bitmask.
+ */
+u32 pcie_mbox_intgen_get_pending_bitmask(const struct pcie_mbox_intgen *intgen);
+
+/**
+ * Set specified Interrupt Generator source.
+ *
+ * Triggers interrupt on the interrupt target if the source is not masked.
+ */
+void pcie_mbox_intgen_set_source(struct pcie_mbox_intgen *intgen, int source_num);
+
+/**
+ * Clear specified Interrupt Generator source.
+ */
+void pcie_mbox_intgen_clear_source(struct pcie_mbox_intgen *intgen, int source_num);
+
+/**
+ * Mask specified Interrupt Generator source.
+ */
+void pcie_mbox_intgen_mask_source(struct pcie_mbox_intgen *intgen, int source_num);
+
+/**
+ * Unmask specified Interrupt Generator source.
+ *
+ * The associated hardware interrupt will be triggered if
+ * the specified source is currently set.
+ */
+void pcie_mbox_intgen_unmask_source(struct pcie_mbox_intgen *intgen, int source_num);
+
+/**
+ * Is the specified interrupt source pending (set and not masked)?
+ */
+bool pcie_mbox_intgen_is_source_pending(const struct pcie_mbox_intgen *intgen, int source_num);
+
+
+#endif /* __PCIE_MBOX_INTGEN_H */
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Software Mailbox Interrupt Generator Emulation shared data
+* structure.
+*
+****************************************************************************/
+
+#ifndef __PCIE_MBOX_INTGEN_SHARED_DATA_H
+#define __PCIE_MBOX_INTGEN_SHARED_DATA_H
+
+/* Uses */
+
+#include "pcie_mbox_shared_data_defs.h"
+#include "linux/types.h"
+
+/* Types */
+
+/**
+ * Mailbox Interrupt Generator Emulation shared state.
+ *
+ * Notes:
+ * - Structure must be packed.
+ * - All integers are LittleEndian.
+ */
+PCI_MBOX_SHARED_DATA_ATTR struct pcie_mbox_intgen_shared_data {
+ /** Interrupt source mask state (whole word each to avoid RMW issues) */
+ uint32_t mask[PCIE_MIF_MBOX_NUM_INTR_SOURCES];
+ /** Interrupt source set state (whole word each to avoid RMW issues) */
+ uint32_t status[PCIE_MIF_MBOX_NUM_INTR_SOURCES];
+};
+
+#endif /* __PCIE_MBOX_INTGEN_SHARED_DATA_H */
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Software Mailbox Emulation shared data structure.
+*
+* Ref: SC-506707-DD - Structure version 2
+*
+****************************************************************************/
+
+#ifndef __PCIE_MBOX_SHARED_DATA_H
+#define __PCIE_MBOX_SHARED_DATA_H
+
+/* Uses */
+
+#include "pcie_mbox_shared_data_defs.h"
+#include "pcie_mbox_intgen_shared_data.h"
+
+/* Types */
+
+/**
+ * Mailbox Emulation Generator shared state.
+ *
+ * Notes:
+ * - Structure must be packed.
+ * - All integers are LittleEndian.
+ */
+PCI_MBOX_SHARED_DATA_ATTR struct pcie_mbox_shared_data {
+ uint32_t mailbox[PCIE_MIF_MBOX_ISSR_COUNT];
+ uint32_t magic;
+ uint32_t version;
+ struct pcie_mbox_intgen_shared_data ap_interrupt;
+ struct pcie_mbox_intgen_shared_data r4_interrupt;
+ struct pcie_mbox_intgen_shared_data m4_interrupt;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct pcie_mbox_intgen_shared_data m4_1_interrupt;
+#endif
+};
+
+#endif /* __PCIE_MBOX_SHARED_DATA_H */
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+* Maxwell Software Mailbox Emulation shared data definitions.
+*
+* Ref: SC-506707-DD - Structure version 2
+*
+****************************************************************************/
+
+#ifndef __PCIE_MBOX_SHARED_DATA_DEFS_H
+#define __PCIE_MBOX_SHARED_DATA_DEFS_H
+
+/* Defines */
+
+/** Offset of shared data structure from end of shared ram */
+#define PCIE_MIF_MBOX_RESERVED_LEN (0x400)
+
+#define PCIE_MIF_MBOX_MAGIC_NUMBER (0x3a11b0c5)
+
+#define PCIE_MIF_MBOX_VERSION_NUMBER (0x00000002)
+
+/**
+ * Number of mailboxes.
+ *
+ * Note: Current hardware supports 16 mailboxes. The extra mailboxes
+ * in the emulation may be used to emulate other signals.
+ */
+#define PCIE_MIF_MBOX_ISSR_COUNT (32)
+
+/**
+ * Number of interrupt sources per Interrupt Generator Emulation instance.
+ *
+ * Note: Current hardware supports 16 sources. The extra sources
+ * in the emulation may be used to emulate other signals
+ * (e.g. RESET_REQUEST from MX to AP).
+ *
+ */
+#define PCIE_MIF_MBOX_NUM_INTR_SOURCES (32)
+
+/**
+ * Structure must be packed.
+ */
+#define PCI_MBOX_SHARED_DATA_ATTR __packed
+
+/**
+ * Write barrier for syncing writes to pcie_mbox_shared_data
+ * shared data area.
+ *
+ * HERE: Can we use something lighter? E.g. dma_wmb()?
+ */
+#define pcie_mbox_shared_data_wmb() wmb()
+
+#endif /* __PCIE_MBOX_SHARED_DATA_DEFS_H */
+
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+*
+****************************************************************************/
+
+/* Implements */
+
+#include "pcie_mif.h"
+
+/* Uses */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include <asm/barrier.h>
+#include <scsc/scsc_logring.h>
+#include "pcie_mif_module.h"
+#include "pcie_proc.h"
+#include "pcie_mbox.h"
+#include "functor.h"
+
+#define PCIE_MIF_RESET_REQUEST_SOURCE 31
+
+/* Module parameters */
+
+static bool enable_pcie_mif_arm_reset = true;
+module_param(enable_pcie_mif_arm_reset, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_pcie_mif_arm_reset, "Enables ARM cores reset");
+
+/* Types */
+
+struct pcie_mif {
+ struct scsc_mif_abs interface;
+ struct pci_dev *pdev;
+ int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
+ __iomem void *registers;
+
+ struct device *dev;
+
+ u8 *mem; /* DMA memory mapped to PCIe space for MX-AP comms */
+ struct pcie_mbox mbox; /* mailbox emulation */
+ size_t mem_allocated;
+ dma_addr_t dma_addr;
+
+ /* Callback function and dev pointer mif_intr manager handler */
+ void (*r4_handler)(int irq, void *data);
+ void *irq_dev;
+
+ /* Reset Request handler and context */
+ void (*reset_request_handler)(int irq_num_ignored, void *data);
+ void *reset_request_handler_data;
+
+ /**
+ * Functors to trigger, or simulate, MIF WLBT Mailbox interrupts.
+ *
+ * These functors isolates the Interrupt Generator logic
+ * from differences in physical interrupt generation.
+ */
+ struct functor trigger_ap_interrupt;
+ struct functor trigger_r4_interrupt;
+ struct functor trigger_m4_interrupt;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct functor trigger_m4_1_interrupt;
+#endif
+};
+
+/* Private Macros */
+
+/** Upcast from interface member to pcie_mif */
+#define pcie_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct pcie_mif, interface)
+
+/** Upcast from trigger_ap_interrupt member to pcie_mif */
+#define pcie_mif_from_trigger_ap_interrupt(trigger) container_of(trigger, struct pcie_mif, trigger_ap_interrupt)
+
+/** Upcast from trigger_r4_interrupt member to pcie_mif */
+#define pcie_mif_from_trigger_r4_interrupt(trigger) container_of(trigger, struct pcie_mif, trigger_r4_interrupt)
+
+/** Upcast from trigger_m4_interrupt member to pcie_mif */
+#define pcie_mif_from_trigger_m4_interrupt(trigger) container_of(trigger, struct pcie_mif, trigger_m4_interrupt)
+
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+/** Upcast from trigger_m4_interrupt member to pcie_mif */
+#define pcie_mif_from_trigger_m4_1_interrupt(trigger) container_of(trigger, struct pcie_mif, trigger_m4_1_interrupt)
+#endif
+
+/* Private Functions */
+
+static void pcie_mif_irq_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+}
+
+static void pcie_mif_emulate_reset_request_interrupt(struct pcie_mif *pcie)
+{
+ /* The RESET_REQUEST interrupt is emulated over PCIe using a spare MIF interrupt source */
+ if (pcie_mbox_is_ap_interrupt_source_pending(&pcie->mbox, PCIE_MIF_RESET_REQUEST_SOURCE)) {
+ /* Invoke handler if registered */
+ if (pcie->reset_request_handler)
+ pcie->reset_request_handler(0, pcie->reset_request_handler_data);
+ /* Clear the source to emulate hardware interrupt behaviour */
+ pcie_mbox_clear_ap_interrupt_source(&pcie->mbox, PCIE_MIF_RESET_REQUEST_SOURCE);
+ }
+}
+
+#ifdef CONFIG_SCSC_QOS
+static int pcie_mif_pm_qos_add_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ return 0;
+}
+
+static int pcie_mif_pm_qos_update_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ return 0;
+}
+
+static int pcie_mif_pm_qos_remove_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req)
+{
+ return 0;
+}
+#endif
+
+irqreturn_t pcie_mif_isr(int irq, void *data)
+{
+ struct pcie_mif *pcie = (struct pcie_mif *)data;
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "MIF Interrupt Received. (Pending 0x%08x, Mask 0x%08x)\n",
+ pcie_mbox_get_ap_interrupt_pending_bitmask(&pcie->mbox),
+ pcie_mbox_get_ap_interrupt_masked_bitmask(&pcie->mbox)
+ );
+
+ /*
+ * Intercept mailbox interrupt sources (numbers > 15) used to emulate other
+ * signalling paths missing from emulator/PCIe hardware.
+ */
+ pcie_mif_emulate_reset_request_interrupt(pcie);
+
+ /* Invoke the normal MIF interrupt handler */
+ if (pcie->r4_handler != pcie_mif_irq_default_handler)
+ pcie->r4_handler(irq, pcie->irq_dev);
+ else
+ SCSC_TAG_INFO(PCIE_MIF, "Any handler registered\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Trigger, or simulate, inbound (to AP) PCIe interrupt.
+ *
+ * Called back via functor.
+ */
+static void pcie_mif_trigger_ap_interrupt(struct functor *trigger)
+{
+ struct pcie_mif *pcie = pcie_mif_from_trigger_ap_interrupt(trigger);
+
+ /*
+ * Invoke the normal isr handler synchronously.
+ *
+ * If synchronous handling proves problematic then launch
+ * an async task or trigger GIC interrupt manually (if supported).
+ */
+ (void)pcie_mif_isr(0, (void *)pcie);
+};
+
+/**
+ * Trigger PCIe interrupt to R4.
+ *
+ * Called back via functor.
+ */
+static void pcie_mif_trigger_r4_interrupt(struct functor *trigger)
+{
+ struct pcie_mif *pcie = pcie_mif_from_trigger_r4_interrupt(trigger);
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Triggering R4 Mailbox interrupt.\n");
+
+ iowrite32(0x00000001, pcie->registers + SCSC_PCIE_NEWMSG);
+ mmiowb();
+};
+
+/**
+ * Trigger PCIe interrupt to M4.
+ *
+ * Called back via functor.
+ */
+static void pcie_mif_trigger_m4_interrupt(struct functor *trigger)
+{
+ struct pcie_mif *pcie = pcie_mif_from_trigger_m4_interrupt(trigger);
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Triggering M4 Mailbox interrupt.\n");
+
+ iowrite32(0x00000001, pcie->registers + SCSC_PCIE_NEWMSG2);
+ mmiowb();
+};
+
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+/**
+ * Trigger PCIe interrupt to M4.
+ *
+ * Called back via functor.
+ */
+static void pcie_mif_trigger_m4_1_interrupt(struct functor *trigger)
+{
+ struct pcie_mif *pcie = pcie_mif_from_trigger_m4_1_interrupt(trigger);
+
+ SCSC_TAG_DEBUG(PCIE_MIF, "Triggering M4 1 Mailbox interrupt.\n");
+
+ iowrite32(0x00000001, pcie->registers + SCSC_PCIE_NEWMSG3);
+ mmiowb();
+};
+#endif
+
+static void pcie_mif_destroy(struct scsc_mif_abs *interface)
+{
+ /* Avoid unused parameter error */
+ (void)interface;
+}
+
+static char *pcie_mif_get_uid(struct scsc_mif_abs *interface)
+{
+ /* Avoid unused parameter error */
+ (void)interface;
+ /* TODO */
+ /* return "0" for the time being */
+ return "0";
+}
+
+static int pcie_mif_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+ int ret;
+
+ if (enable_pcie_mif_arm_reset || !reset) {
+ /* Sanity check */
+ iowrite32(0xdeadbeef, pcie->registers + SCSC_PCIE_SIGNATURE);
+ mmiowb();
+ ret = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE);
+ if (ret != 0xdeadbeef) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 magic number. Readed: 0x%x Expected: 0x%x\n",
+ ret, 0xdeadbeef);
+ return -ENODEV;
+ }
+
+ iowrite32(reset ? 1 : 0,
+ pcie->registers + SCSC_PCIE_GRST_OFFSET);
+ mmiowb();
+ } else
+ SCSC_TAG_INFO(PCIE_MIF, "Not resetting ARM Cores enable_pcie_mif_arm_reset: %d\n",
+ enable_pcie_mif_arm_reset);
+ return 0;
+}
+
+static void *pcie_mif_map(struct scsc_mif_abs *interface, size_t *allocated)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+ int ret;
+ size_t map_len = PCIE_MIF_ALLOC_MEM;
+
+ if (allocated)
+ *allocated = 0;
+
+ if (map_len > (PCIE_MIF_PREALLOC_MEM - 1)) {
+ SCSC_TAG_ERR(PCIE_MIF, "Error allocating DMA memory, requested %zu, maximum %d, consider different size\n", map_len, PCIE_MIF_PREALLOC_MEM);
+ return NULL;
+ }
+
+ /* should return PAGE_ALIGN Memory */
+ pcie->mem = dma_alloc_coherent(pcie->dev,
+ PCIE_MIF_PREALLOC_MEM, &pcie->dma_addr, GFP_KERNEL);
+ if (pcie->mem == NULL) {
+ SCSC_TAG_ERR(PCIE_MIF, "Error allocating %d DMA memory\n", PCIE_MIF_PREALLOC_MEM);
+ return NULL;
+ }
+
+ pcie->mem_allocated = map_len;
+
+ SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Allocated dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr);
+
+ iowrite32((unsigned int)pcie->dma_addr,
+ pcie->registers + SCSC_PCIE_OFFSET);
+ mmiowb();
+ ret = ioread32(pcie->registers + SCSC_PCIE_OFFSET);
+ SCSC_TAG_INFO(PCIE_MIF, "Read SHARED_BA 0x%0x\n", ret);
+ if (ret != (unsigned int)pcie->dma_addr) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 Shared BA. Readed: 0x%x Expected: 0x%x\n", ret, (unsigned int)pcie->dma_addr);
+ return NULL;
+ }
+
+ /* Initialised the interrupt trigger functors required by mbox emulation */
+ functor_init(&pcie->trigger_ap_interrupt, pcie_mif_trigger_ap_interrupt);
+ functor_init(&pcie->trigger_r4_interrupt, pcie_mif_trigger_r4_interrupt);
+ functor_init(&pcie->trigger_m4_interrupt, pcie_mif_trigger_m4_interrupt);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ functor_init(&pcie->trigger_m4_1_interrupt, pcie_mif_trigger_m4_1_interrupt);
+#endif
+
+ /* Initialise mailbox emulation to use shared memory at the end of PCIE_MIF_PREALLOC_MEM */
+ pcie_mbox_init(
+ &pcie->mbox,
+ pcie->mem + PCIE_MIF_PREALLOC_MEM - PCIE_MIF_MBOX_RESERVED_LEN,
+ pcie->registers,
+ &pcie->trigger_ap_interrupt,
+ &pcie->trigger_r4_interrupt,
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ &pcie->trigger_m4_interrupt,
+ &pcie->trigger_m4_1_interrupt
+#else
+ &pcie->trigger_m4_interrupt
+#endif
+ );
+
+ /* Return the max allocatable memory on this abs. implementation */
+ if (allocated)
+ *allocated = map_len;
+
+ return pcie->mem;
+}
+
+/* HERE: Not sure why mem is passed in - its stored in pcie - as it should be */
+static void pcie_mif_unmap(struct scsc_mif_abs *interface, void *mem)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* Avoid unused parameter error */
+ (void)mem;
+
+ dma_free_coherent(pcie->dev, PCIE_MIF_PREALLOC_MEM, pcie->mem, pcie->dma_addr);
+ SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Freed dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr);
+}
+
+static u32 pcie_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation component */
+ return pcie_mbox_get_ap_interrupt_masked_bitmask(&pcie->mbox);
+}
+
+static u32 pcie_mif_irq_get(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation component */
+ return pcie_mbox_get_ap_interrupt_pending_bitmask(&pcie->mbox);
+}
+
+static void pcie_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation component */
+ pcie_mbox_clear_ap_interrupt_source(&pcie->mbox, bit_num);
+}
+
+static void pcie_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox interrupt emulation component */
+ pcie_mbox_mask_ap_interrupt_source(&pcie->mbox, bit_num);
+}
+
+static void pcie_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation component */
+ pcie_mbox_unmask_ap_interrupt_source(&pcie->mbox, bit_num);
+}
+
+static void pcie_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation sub-module */
+ pcie_mbox_set_outgoing_interrupt_source(&pcie->mbox, target, bit_num);
+}
+
+static void pcie_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ pcie->r4_handler = handler;
+ pcie->irq_dev = dev;
+}
+
+static void pcie_mif_irq_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ pcie->r4_handler = pcie_mif_irq_default_handler;
+ pcie->irq_dev = NULL;
+}
+
+static void pcie_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ pcie->reset_request_handler = handler;
+ pcie->reset_request_handler_data = dev;
+
+ pcie_mbox_clear_ap_interrupt_source(&pcie->mbox, PCIE_MIF_RESET_REQUEST_SOURCE);
+ pcie_mbox_unmask_ap_interrupt_source(&pcie->mbox, PCIE_MIF_RESET_REQUEST_SOURCE);
+}
+
+static void pcie_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ pcie_mbox_mask_ap_interrupt_source(&pcie->mbox, PCIE_MIF_RESET_REQUEST_SOURCE);
+ pcie->reset_request_handler = NULL;
+}
+
+static u32 *pcie_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ /* delegate to mbox emulation sub-module */
+ return pcie_mbox_get_mailbox_ptr(&pcie->mbox, mbox_index);
+}
+
+static int pcie_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ if ((u8 *)ptr > (pcie->mem + 4 * 1024 * 1024)) {
+ SCSC_TAG_ERR(PCIE_MIF, "ooops limits reached\n");
+ return -ENOMEM;
+ }
+
+ /* Ref is byte offset wrt start of shared memory */
+ *ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)pcie->mem);
+
+ return 0;
+}
+
+static void *pcie_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PCIE_MIF, pcie->dev, "\n");
+
+ /* Check limits */
+ if (ref >= 0 && ref < PCIE_MIF_ALLOC_MEM)
+ return (void *)((uintptr_t)pcie->mem + (uintptr_t)ref);
+ else
+ return NULL;
+}
+
+static uintptr_t pcie_mif_get_mif_pfn(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ return virt_to_phys(pcie->mem) >> PAGE_SHIFT;
+}
+
+static struct device *pcie_mif_get_mif_device(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ return pcie->dev;
+}
+
+static void pcie_mif_irq_clear(void)
+{
+}
+
+static void pcie_mif_dump_register(struct scsc_mif_abs *interface)
+{
+}
+
+struct scsc_mif_abs *pcie_mif_create(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rc = 0;
+ struct scsc_mif_abs *pcie_if;
+ struct pcie_mif *pcie = (struct pcie_mif *)devm_kzalloc(&pdev->dev, sizeof(struct pcie_mif), GFP_KERNEL);
+ u16 cmd;
+
+ /* Avoid unused parameter error */
+ (void)id;
+
+ if (!pcie)
+ return NULL;
+
+ pcie_if = &pcie->interface;
+
+ /* initialise interface structure */
+ pcie_if->destroy = pcie_mif_destroy;
+ pcie_if->get_uid = pcie_mif_get_uid;
+ pcie_if->reset = pcie_mif_reset;
+ pcie_if->map = pcie_mif_map;
+ pcie_if->unmap = pcie_mif_unmap;
+#ifdef MAILBOX_SETGET
+ pcie_if->mailbox_set = pcie_mif_mailbox_set;
+ pcie_if->mailbox_get = pcie_mif_mailbox_get;
+#endif
+ pcie_if->irq_bit_set = pcie_mif_irq_bit_set;
+ pcie_if->irq_get = pcie_mif_irq_get;
+ pcie_if->irq_bit_mask_status_get = pcie_mif_irq_bit_mask_status_get;
+ pcie_if->irq_bit_clear = pcie_mif_irq_bit_clear;
+ pcie_if->irq_bit_mask = pcie_mif_irq_bit_mask;
+ pcie_if->irq_bit_unmask = pcie_mif_irq_bit_unmask;
+ pcie_if->irq_reg_handler = pcie_mif_irq_reg_handler;
+ pcie_if->irq_unreg_handler = pcie_mif_irq_unreg_handler;
+ pcie_if->irq_reg_reset_request_handler = pcie_mif_irq_reg_reset_request_handler;
+ pcie_if->irq_unreg_reset_request_handler = pcie_mif_irq_unreg_reset_request_handler;
+ pcie_if->get_mbox_ptr = pcie_mif_get_mbox_ptr;
+ pcie_if->get_mifram_ptr = pcie_mif_get_mifram_ptr;
+ pcie_if->get_mifram_ref = pcie_mif_get_mifram_ref;
+ pcie_if->get_mifram_pfn = pcie_mif_get_mif_pfn;
+ pcie_if->get_mif_device = pcie_mif_get_mif_device;
+ pcie_if->irq_clear = pcie_mif_irq_clear;
+ pcie_if->mif_dump_registers = pcie_mif_dump_register;
+ pcie_if->mif_read_register = NULL;
+#ifdef CONFIG_SCSC_QOS
+ pcie_if->mif_pm_qos_add_request = pcie_mif_pm_qos_add_request;
+ pcie_if->mif_pm_qos_update_request = pcie_mif_pm_qos_update_request;
+ pcie_if->mif_pm_qos_remove_request = pcie_mif_pm_qos_remove_request;
+#endif
+ /* Suspend/resume not supported in PCIe MIF */
+ pcie_if->suspend_reg_handler = NULL;
+ pcie_if->suspend_unreg_handler = NULL;
+
+ /* Update state */
+ pcie->pdev = pdev;
+
+ pcie->dev = &pdev->dev;
+
+ pcie->r4_handler = pcie_mif_irq_default_handler;
+ pcie->irq_dev = NULL;
+
+ /* Just do whats is necessary to meet the pci probe
+ * -BAR0 stuff
+ * -Interrupt (will be able to handle interrupts?)
+ */
+
+ /* My stuff */
+ pci_set_drvdata(pdev, pcie);
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
+ "Error enabling device.\n");
+ return NULL;
+ }
+
+ /* This function returns the flags associated with this resource.*/
+ /* esource flags are used to define some features of the individual resource.
+ * For PCI resources associated with PCI I/O regions, the information is extracted from the base address registers */
+ /* IORESOURCE_MEM = If the associated I/O region exists, one and only one of these flags is set */
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ SCSC_TAG_ERR(PCIE_MIF, "Incorrect BAR configuration\n");
+ return NULL;
+ }
+
+ /* old --- rc = pci_request_regions(pdev, "foo"); */
+ /* Request and iomap regions specified by @mask (0x01 ---> BAR0)*/
+ rc = pcim_iomap_regions(pdev, BIT(0), DRV_NAME);
+ if (rc) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
+ "pcim_iomap_regions() failed. Aborting.\n");
+ return NULL;
+ }
+
+
+ pci_set_master(pdev);
+
+ /* Access iomap allocation table */
+ /* return __iomem * const * */
+ pcie->registers = pcim_iomap_table(pdev)[0];
+
+ /* Set up a single MSI interrupt */
+ if (pci_enable_msi(pdev)) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
+ "Failed to enable MSI interrupts. Aborting.\n");
+ return NULL;
+ }
+ rc = devm_request_irq(&pdev->dev, pdev->irq, pcie_mif_isr, 0,
+ DRV_NAME, pcie);
+ if (rc) {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev,
+ "Failed to register MSI handler. Aborting.\n");
+ return NULL;
+ }
+
+/* if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ * SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 64bits.\n");
+ * pcie->dma_using_dac = 1; */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 32bits.\n");
+ pcie->dma_using_dac = 0;
+ } else {
+ SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Failed to set DMA mask. Aborting.\n");
+ return NULL;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+
+ /* Make sure Mx is in the reset state */
+ pcie_mif_reset(pcie_if, true);
+
+ /* Create debug proc entry */
+ pcie_create_proc_dir(pcie);
+
+ return pcie_if;
+}
+
+void pcie_mif_destroy_pcie(struct pci_dev *pdev, struct scsc_mif_abs *interface)
+{
+ /* Create debug proc entry */
+ pcie_remove_proc_dir();
+
+ pci_disable_device(pdev);
+}
+
+struct pci_dev *pcie_mif_get_pci_dev(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !pcie);
+
+ return pcie->pdev;
+}
+
+struct device *pcie_mif_get_dev(struct scsc_mif_abs *interface)
+{
+ struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !pcie);
+
+ return pcie->dev;
+}
+
+
+
+/* Functions for proc entry */
+int pcie_mif_set_bar0_register(struct pcie_mif *pcie, unsigned int value, unsigned int offset)
+{
+ iowrite32(value, pcie->registers + offset);
+ mmiowb();
+
+ return 0;
+}
+
+void pcie_mif_get_bar0(struct pcie_mif *pcie, struct scsc_bar0_reg *bar0)
+{
+ bar0->NEWMSG = ioread32(pcie->registers + SCSC_PCIE_NEWMSG);
+ bar0->SIGNATURE = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE);
+ bar0->OFFSET = ioread32(pcie->registers + SCSC_PCIE_OFFSET);
+ bar0->RUNEN = ioread32(pcie->registers + SCSC_PCIE_RUNEN);
+ bar0->DEBUG = ioread32(pcie->registers + SCSC_PCIE_DEBUG);
+ bar0->AXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AXIWCNT);
+ bar0->AXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AXIRCNT);
+ bar0->AXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AXIWADDR);
+ bar0->AXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AXIRADDR);
+ bar0->TBD = ioread32(pcie->registers + SCSC_PCIE_TBD);
+ bar0->AXICTRL = ioread32(pcie->registers + SCSC_PCIE_AXICTRL);
+ bar0->AXIDATA = ioread32(pcie->registers + SCSC_PCIE_AXIDATA);
+ bar0->AXIRDBP = ioread32(pcie->registers + SCSC_PCIE_AXIRDBP);
+ bar0->IFAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIWCNT);
+ bar0->IFAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIRCNT);
+ bar0->IFAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIWADDR);
+ bar0->IFAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIRADDR);
+ bar0->IFAXICTRL = ioread32(pcie->registers + SCSC_PCIE_IFAXICTRL);
+ bar0->GRST = ioread32(pcie->registers + SCSC_PCIE_GRST);
+ bar0->AMBA2TRANSAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWCNT);
+ bar0->AMBA2TRANSAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRCNT);
+ bar0->AMBA2TRANSAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWADDR);
+ bar0->AMBA2TRANSAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRADDR);
+ bar0->AMBA2TRANSAXICTR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXICTR);
+ bar0->TRANS2PCIEREADALIGNAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWCNT);
+ bar0->TRANS2PCIEREADALIGNAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRCNT);
+ bar0->TRANS2PCIEREADALIGNAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWADDR);
+ bar0->TRANS2PCIEREADALIGNAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRADDR);
+ bar0->TRANS2PCIEREADALIGNAXICTRL = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXICTRL);
+ bar0->READROUNDTRIPMIN = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMIN);
+ bar0->READROUNDTRIPMAX = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMAX);
+ bar0->READROUNDTRIPLAST = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPLAST);
+ bar0->CPTAW0 = ioread32(pcie->registers + SCSC_PCIE_CPTAW0);
+ bar0->CPTAW1 = ioread32(pcie->registers + SCSC_PCIE_CPTAW1);
+ bar0->CPTAR0 = ioread32(pcie->registers + SCSC_PCIE_CPTAR0);
+ bar0->CPTAR1 = ioread32(pcie->registers + SCSC_PCIE_CPTAR1);
+ bar0->CPTB0 = ioread32(pcie->registers + SCSC_PCIE_CPTB0);
+ bar0->CPTW0 = ioread32(pcie->registers + SCSC_PCIE_CPTW0);
+ bar0->CPTW1 = ioread32(pcie->registers + SCSC_PCIE_CPTW1);
+ bar0->CPTW2 = ioread32(pcie->registers + SCSC_PCIE_CPTW2);
+ bar0->CPTR0 = ioread32(pcie->registers + SCSC_PCIE_CPTR0);
+ bar0->CPTR1 = ioread32(pcie->registers + SCSC_PCIE_CPTR1);
+ bar0->CPTR2 = ioread32(pcie->registers + SCSC_PCIE_CPTR2);
+ bar0->CPTRES = ioread32(pcie->registers + SCSC_PCIE_CPTRES);
+ bar0->CPTAWDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTAWDELAY);
+ bar0->CPTARDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTARDELAY);
+ bar0->CPTSRTADDR = ioread32(pcie->registers + SCSC_PCIE_CPTSRTADDR);
+ bar0->CPTENDADDR = ioread32(pcie->registers + SCSC_PCIE_CPTENDADDR);
+ bar0->CPTSZLTHID = ioread32(pcie->registers + SCSC_PCIE_CPTSZLTHID);
+ bar0->CPTPHSEL = ioread32(pcie->registers + SCSC_PCIE_CPTPHSEL);
+ bar0->CPTRUN = ioread32(pcie->registers + SCSC_PCIE_CPTRUN);
+ bar0->FPGAVER = ioread32(pcie->registers + SCSC_PCIE_FPGAVER);
+}
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+*
+****************************************************************************/
+
+#ifndef __PCIE_MIF_H
+#define __PCIE_MIF_H
+#include <linux/pci.h>
+#include "scsc_mif_abs.h"
+
+#ifdef CONDOR
+#define FPGA_OFFSET 0xb8000000
+#else
+#define FPGA_OFFSET 0x80000000
+#endif
+
+#define SCSC_PCIE_MAGIC_VAL 0xdeadbeef
+
+#define SCSC_PCIE_GRST_OFFSET 0x48
+
+/* BAR0 Registers [PCIeBridgeBAR0Regs.doc] */
+#define SCSC_PCIE_NEWMSG 0x0
+#define SCSC_PCIE_SIGNATURE 0x4
+#define SCSC_PCIE_OFFSET 0x8
+#define SCSC_PCIE_RUNEN 0xC
+#define SCSC_PCIE_DEBUG 0x10
+#define SCSC_PCIE_AXIWCNT 0x14
+#define SCSC_PCIE_AXIRCNT 0x18
+#define SCSC_PCIE_AXIWADDR 0x1C
+#define SCSC_PCIE_AXIRADDR 0x20
+#define SCSC_PCIE_TBD 0x24
+#define SCSC_PCIE_AXICTRL 0x28
+#define SCSC_PCIE_AXIDATA 0x2C
+#define SCSC_PCIE_AXIRDBP 0x30
+#define SCSC_PCIE_IFAXIWCNT 0x34
+#define SCSC_PCIE_IFAXIRCNT 0x38
+#define SCSC_PCIE_IFAXIWADDR 0x3C
+#define SCSC_PCIE_IFAXIRADDR 0x40
+#define SCSC_PCIE_IFAXICTRL 0x44
+#define SCSC_PCIE_GRST 0x48
+#define SCSC_PCIE_AMBA2TRANSAXIWCNT 0x4C
+#define SCSC_PCIE_AMBA2TRANSAXIRCNT 0x50
+#define SCSC_PCIE_AMBA2TRANSAXIWADDR 0x54
+#define SCSC_PCIE_AMBA2TRANSAXIRADDR 0x58
+#define SCSC_PCIE_AMBA2TRANSAXICTR 0x5C
+#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIWCNT 0x60
+#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIRCNT 0x64
+#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIWADDR 0x68
+#define SCSC_PCIE_TRANS2PCIEREADALIGNAXIRADDR 0x6C
+#define SCSC_PCIE_TRANS2PCIEREADALIGNAXICTRL 0x70
+#define SCSC_PCIE_READROUNDTRIPMIN 0x74
+#define SCSC_PCIE_READROUNDTRIPMAX 0x78
+#define SCSC_PCIE_READROUNDTRIPLAST 0x7C
+#define SCSC_PCIE_CPTAW0 0x80
+#define SCSC_PCIE_CPTAW1 0x84
+#define SCSC_PCIE_CPTAR0 0x88
+#define SCSC_PCIE_CPTAR1 0x8C
+#define SCSC_PCIE_CPTB0 0x90
+#define SCSC_PCIE_CPTW0 0x94
+#define SCSC_PCIE_CPTW1 0x98
+#define SCSC_PCIE_CPTW2 0x9C
+#define SCSC_PCIE_CPTR0 0xA0
+#define SCSC_PCIE_CPTR1 0xA4
+#define SCSC_PCIE_CPTR2 0xA8
+#define SCSC_PCIE_CPTRES 0xAC
+#define SCSC_PCIE_CPTAWDELAY 0xB0
+#define SCSC_PCIE_CPTARDELAY 0xB4
+#define SCSC_PCIE_CPTSRTADDR 0xB8
+#define SCSC_PCIE_CPTENDADDR 0xBC
+#define SCSC_PCIE_CPTSZLTHID 0xC0
+#define SCSC_PCIE_CPTPHSEL 0xC4
+#define SCSC_PCIE_CPTRUN 0xC8
+#define SCSC_PCIE_FPGAVER 0xCC
+
+/* from mx141 */
+#define SCSC_PCIE_NEWMSG2 0xD0
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+#define SCSC_PCIE_NEWMSG3 0xD4
+#endif
+
+struct scsc_bar0_reg {
+ u32 NEWMSG;
+ u32 SIGNATURE;
+ u32 OFFSET;
+ u32 RUNEN;
+ u32 DEBUG;
+ u32 AXIWCNT;
+ u32 AXIRCNT;
+ u32 AXIWADDR;
+ u32 AXIRADDR;
+ u32 TBD;
+ u32 AXICTRL;
+ u32 AXIDATA;
+ u32 AXIRDBP;
+ u32 IFAXIWCNT;
+ u32 IFAXIRCNT;
+ u32 IFAXIWADDR;
+ u32 IFAXIRADDR;
+ u32 IFAXICTRL;
+ u32 GRST;
+ u32 AMBA2TRANSAXIWCNT;
+ u32 AMBA2TRANSAXIRCNT;
+ u32 AMBA2TRANSAXIWADDR;
+ u32 AMBA2TRANSAXIRADDR;
+ u32 AMBA2TRANSAXICTR;
+ u32 TRANS2PCIEREADALIGNAXIWCNT;
+ u32 TRANS2PCIEREADALIGNAXIRCNT;
+ u32 TRANS2PCIEREADALIGNAXIWADDR;
+ u32 TRANS2PCIEREADALIGNAXIRADDR;
+ u32 TRANS2PCIEREADALIGNAXICTRL;
+ u32 READROUNDTRIPMIN;
+ u32 READROUNDTRIPMAX;
+ u32 READROUNDTRIPLAST;
+ u32 CPTAW0;
+ u32 CPTAW1;
+ u32 CPTAR0;
+ u32 CPTAR1;
+ u32 CPTB0;
+ u32 CPTW0;
+ u32 CPTW1;
+ u32 CPTW2;
+ u32 CPTR0;
+ u32 CPTR1;
+ u32 CPTR2;
+ u32 CPTRES;
+ u32 CPTAWDELAY;
+ u32 CPTARDELAY;
+ u32 CPTSRTADDR;
+ u32 CPTENDADDR;
+ u32 CPTSZLTHID;
+ u32 CPTPHSEL;
+ u32 CPTRUN;
+ u32 FPGAVER;
+
+ /* from mx141 */
+ u32 NEWMSG2;
+};
+
+struct scsc_mif_abs *pcie_mif_create(struct pci_dev *pdev, const struct pci_device_id *id);
+void pcie_mif_destroy_pcie(struct pci_dev *pdev, struct scsc_mif_abs *interface);
+struct pci_dev *pcie_mif_get_pci_dev(struct scsc_mif_abs *interface);
+struct device *pcie_mif_get_dev(struct scsc_mif_abs *interface);
+
+struct pcie_mif;
+
+void pcie_mif_get_bar0(struct pcie_mif *pcie, struct scsc_bar0_reg *bar0);
+int pcie_mif_set_bar0_register(struct pcie_mif *pcie, unsigned int value, unsigned int offset);
+
+#endif
--- /dev/null
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <scsc/scsc_logring.h>
+#include "pcie_mif_module.h"
+#include "pcie_mif.h"
+
+/* Implements */
+#include "scsc_mif_abs.h"
+
+struct mif_abs_node {
+ struct list_head list;
+ struct scsc_mif_abs *mif_abs;
+};
+
+struct mif_driver_node {
+ struct list_head list;
+ struct scsc_mif_abs_driver *driver; /* list of drivers (in practice just the core_module) */
+};
+
+struct mif_mmap_node {
+ struct list_head list;
+ struct scsc_mif_mmap_driver *driver; /* list of drivers (in practive just the core_module) */
+};
+
+static struct pcie_mif_module {
+ struct list_head mif_abs_list;
+ struct list_head mif_driver_list;
+ struct list_head mif_mmap_list;
+} mif_module = {
+ .mif_abs_list = LIST_HEAD_INIT(mif_module.mif_abs_list),
+ .mif_driver_list = LIST_HEAD_INIT(mif_module.mif_driver_list),
+ .mif_mmap_list = LIST_HEAD_INIT(mif_module.mif_mmap_list),
+};
+
+
+static const struct pci_device_id pcie_mif_module_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_SAMSUNG_SCSC) },
+ { /*End: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, pcie_mif_module_tbl);
+
+static void pcie_mif_module_probe_registered_clients(struct scsc_mif_abs *mif_abs)
+{
+ struct mif_driver_node *mif_driver_node, *next;
+ struct device *dev;
+ bool driver_registered = false;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
+ SCSC_TAG_INFO(PCIE_MIF, "node %p\n", mif_driver_node);
+
+ dev = pcie_mif_get_dev(mif_abs);
+ mif_driver_node->driver->probe(mif_driver_node->driver, mif_abs);
+ driver_registered = true;
+ }
+ if (driver_registered == false)
+ SCSC_TAG_INFO(PCIE_MIF, "No mif drivers registered\n");
+}
+
+static int pcie_mif_module_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mif_abs_node *mif_node;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_node = kzalloc(sizeof(*mif_node), GFP_KERNEL);
+ if (!mif_node)
+ return -ENODEV;
+
+ mif_abs = pcie_mif_create(pdev, id);
+ if (!mif_abs) {
+ SCSC_TAG_INFO(PCIE_MIF, "Error creating PCIe interface\n");
+ kfree(mif_node);
+ return -ENODEV;
+ }
+ /* Add node */
+ mif_node->mif_abs = mif_abs;
+ SCSC_TAG_INFO(PCIE_MIF, "mif_node A %p\n", mif_node);
+ list_add_tail(&mif_node->list, &mif_module.mif_abs_list);
+
+ pcie_mif_module_probe_registered_clients(mif_abs);
+
+ return 0;
+}
+
+static void pcie_mif_module_remove(struct pci_dev *pdev)
+{
+ struct mif_abs_node *mif_node, *next;
+ bool match = false;
+
+ /* Remove node */
+ list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
+ if (pcie_mif_get_pci_dev(mif_node->mif_abs) == pdev) {
+ match = true;
+ SCSC_TAG_INFO(PCIE_MIF, "Match, destroy pcie_mif\n");
+ pcie_mif_destroy_pcie(pdev, mif_node->mif_abs);
+ list_del(&mif_node->list);
+ kfree(mif_node);
+ }
+ }
+ if (match == false)
+ SCSC_TAG_INFO(PCIE_MIF, "FATAL, no match for given scsc_mif_abs\n");
+}
+
+static struct pci_driver scsc_pcie = {
+ .name = DRV_NAME,
+ .id_table = pcie_mif_module_tbl,
+ .probe = pcie_mif_module_probe,
+ .remove = pcie_mif_module_remove,
+};
+
+void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver)
+{
+ struct mif_driver_node *mif_driver_node;
+ struct mif_abs_node *mif_node;
+ struct device *dev;
+
+ /* Add node in driver linked list */
+ mif_driver_node = kzalloc(sizeof(*mif_driver_node), GFP_KERNEL);
+ if (!mif_driver_node)
+ return;
+
+ mif_driver_node->driver = driver;
+ list_add_tail(&mif_driver_node->list, &mif_module.mif_driver_list);
+
+ /* Traverse Linked List for each mif_abs node */
+ list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
+ dev = pcie_mif_get_dev(mif_node->mif_abs);
+ driver->probe(driver, mif_node->mif_abs);
+ }
+}
+EXPORT_SYMBOL(scsc_mif_abs_register);
+
+void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver)
+{
+ struct mif_driver_node *mif_driver_node, *next;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
+ if (mif_driver_node->driver == driver) {
+ list_del(&mif_driver_node->list);
+ kfree(mif_driver_node);
+ }
+ }
+}
+EXPORT_SYMBOL(scsc_mif_abs_unregister);
+
+/* Register a mmap - debug driver - for this specific transport*/
+void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver)
+{
+ struct mif_mmap_node *mif_mmap_node;
+ struct mif_abs_node *mif_node;
+
+ /* Add node in driver linked list */
+ mif_mmap_node = kzalloc(sizeof(*mif_mmap_node), GFP_KERNEL);
+ if (!mif_mmap_node)
+ return;
+
+ mif_mmap_node->driver = mmap_driver;
+ list_add_tail(&mif_mmap_node->list, &mif_module.mif_mmap_list);
+
+ /* Traverse Linked List for each mif_abs node */
+ list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
+ mmap_driver->probe(mmap_driver, mif_node->mif_abs);
+ }
+}
+EXPORT_SYMBOL(scsc_mif_mmap_register);
+
+/* Unregister a mmap - debug driver - for this specific transport*/
+void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver)
+{
+ struct mif_mmap_node *mif_mmap_node, *next;
+
+ /* Traverse Linked List for each mif_mmap_driver node */
+ list_for_each_entry_safe(mif_mmap_node, next, &mif_module.mif_mmap_list, list) {
+ if (mif_mmap_node->driver == mmap_driver) {
+ list_del(&mif_mmap_node->list);
+ kfree(mif_mmap_node);
+ }
+ }
+}
+EXPORT_SYMBOL(scsc_mif_mmap_unregister);
+
+module_pci_driver(scsc_pcie);
+
+MODULE_DESCRIPTION("SLSI PCIe mx140 MIF abstraction");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "pcie_mbox_shared_data_defs.h"
+
+#define PCI_DEVICE_ID_SAMSUNG_SCSC 0x7011
+#define DRV_NAME "scscPCIe"
+
+/* Max amount of memory allocated by dma_alloc_coherent */
+#define PCIE_MIF_PREALLOC_MEM (4 * 1024 * 1024)
+/* Allocatable memory for upper layers */
+/* This value should take into account PCIE_MIF_PREALLOC_MEM - mbox/register
+ * emulation - peterson mutex ex: */
+/* -------------------- PCIE_MIF_PREALLOC_MEM
+ | scsc_mbox_s |
+ | --------------------
+ | peterson_m |
+ | --------------------
+ | /////////////// |
+ | ------------------- PCIE_MIF_ALLOC_MEM
+ | alloc memory |
+ | |
+ | |
+ | --------------------
+ */
+#define PCIE_MIF_ALLOC_MEM ((PCIE_MIF_PREALLOC_MEM) - (PCIE_MIF_MBOX_RESERVED_LEN))
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/uaccess.h>
+#include <scsc/scsc_logring.h>
+#include "pcie_proc.h"
+#include "pcie_mif.h"
+
+static struct proc_dir_entry *procfs_dir;
+static bool pcie_val;
+
+/* singleton */
+struct pcie_mif *pcie_global;
+
+static int pcie_procfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = PCIE_PDE_DATA(inode);
+ return 0;
+}
+
+PCIE_PROCFS_RW_FILE_OPS(pcie_trg);
+PCIE_PROCFS_SEQ_FILE_OPS(pcie_dbg);
+
+static ssize_t pcie_procfs_pcie_trg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", (pcie_val ? 1 : 0));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#define ROW 52
+#define COL 2
+char *lookup_regs[ROW][COL] = {
+ { "NEWMSG", "0" },
+ { "SIGNATURE", "4" },
+ { "OFFSET", "8" },
+ { "RUNEN", "12" },
+ { "DEBUG", "16" },
+ { "AXIWCNT", "20" },
+ { "AXIRCNT", "24" },
+ { "AXIWADDR", "28" },
+ { "AXIRADDR", "32" },
+ { "TBD", "36" },
+ { "AXICTRL", "40" },
+ { "AXIDATA", "44" },
+ { "AXIRDBP", "48" },
+ { "IFAXIWCNT", "52" },
+ { "IFAXIRCNT", "56" },
+ { "IFAXIWADDR", "60" },
+ { "IFAXIRADDR", "64" },
+ { "IFAXICTRL", "68" },
+ { "GRST", "72" },
+ { "AMBA2TRANSAXIWCNT", "76" },
+ { "AMBA2TRANSAXIRCNT", "80" },
+ { "AMBA2TRANSAXIWADDR", "84" },
+ { "AMBA2TRANSAXIRADDR", "88" },
+ { "AMBA2TRANSAXICTR", "92" },
+ { "TRANS2PCIEREADALIGNAXIWCNT", "96" },
+ { "TRANS2PCIEREADALIGNAXIRCNT", "100" },
+ { "TRANS2PCIEREADALIGNAXIWADDR", "104" },
+ { "TRANS2PCIEREADALIGNAXIRADDR", "108" },
+ { "TRANS2PCIEREADALIGNAXICTRL", "112" },
+ { "READROUNDTRIPMIN", "116" },
+ { "READROUNDTRIPMAX", "120" },
+ { "READROUNDTRIPLAST", "124" },
+ { "CPTAW0", "128" },
+ { "CPTAW1", "132" },
+ { "CPTAR0", "136" },
+ { "CPTAR1", "140" },
+ { "CPTB0", "144" },
+ { "CPTW0", "148" },
+ { "CPTW1", "152" },
+ { "CPTW2", "156" },
+ { "CPTR0", "160" },
+ { "CPTR1", "164" },
+ { "CPTR2", "168" },
+ { "CPTRES", "172" },
+ { "CPTAWDELAY", "176" },
+ { "CPTARDELAY", "180" },
+ { "CPTSRTADDR", "184" },
+ { "CPTENDADDR", "188" },
+ { "CPTSZLTHID", "192" },
+ { "CPTPHSEL", "196" },
+ { "CPTRUN", "200" },
+ { "FPGAVER", "204" },
+};
+
+/* Trigger boot of Curator over SDIO without Chip Power Manager present */
+static ssize_t pcie_procfs_pcie_trg_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ char *sptr, *token;
+ unsigned int len = 0, pass = 0;
+ u32 value = 0;
+ int i = 0;
+ int rc;
+
+ int match = 0, offset = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ while ((token = strsep(&sptr, " ")) != NULL) {
+ switch (pass) {
+ /* register */
+ case 0:
+ SCSC_TAG_INFO(PCIE_MIF, "str %s\n", lookup_regs[0][0]);
+ SCSC_TAG_INFO(PCIE_MIF, "token %s\n", token);
+ SCSC_TAG_INFO(PCIE_MIF, "len %d\n", len);
+ for (i = 0; i < ROW; i++)
+ if (!strncmp(lookup_regs[i][0], token, len)) {
+ rc = kstrtou32(lookup_regs[i][1], 0, &offset);
+ if (rc)
+ match = 0;
+ else
+ match = 1;
+ break;
+ }
+
+ if (!match) {
+ SCSC_TAG_INFO(PCIE_MIF, "Register %s not Found!!\n", token);
+ SCSC_TAG_INFO(PCIE_MIF, "Type 'cat /proc/driver/pcie_ctrl/pcie_dbg' to get register names\n");
+ }
+ break;
+ /* value */
+ case 1:
+ if ((token[0] == '0') && (token[1] == 'x')) {
+ if (kstrtou32(token, 16, &value)) {
+ SCSC_TAG_INFO(PCIE_MIF, "Incorrect format,,,address should start by 0x\n");
+ SCSC_TAG_INFO(PCIE_MIF, "Example: \"0xaaaabbbb 256 8\"\n");
+ goto error;
+ }
+ } else {
+ SCSC_TAG_INFO(PCIE_MIF, "Incorrect format,,,address should start by 0x\n");
+ SCSC_TAG_INFO(PCIE_MIF, "Example: \"0xaaaabbbb 256 8\"\n");
+ goto error;
+ }
+ break;
+ }
+ pass++;
+ }
+ if (pass != 2 && !match) {
+ SCSC_TAG_INFO(PCIE_MIF, "Wrong format: <register> <value (hex)>\n");
+ SCSC_TAG_INFO(PCIE_MIF, "Example: \"DEBUGADDR 0xaaaabbbb\"\n");
+ goto error;
+ }
+ SCSC_TAG_INFO(PCIE_MIF, "Setting value 0x%x to register %s offset %d\n", value, lookup_regs[i][0], offset);
+ pcie_mif_set_bar0_register(pcie_global, value, offset);
+error:
+ return count;
+}
+
+static int pcie_procfs_pcie_dbg_show(struct seq_file *m, void *v)
+{
+ struct scsc_bar0_reg bar0;
+
+ if (!pcie_global) {
+ seq_puts(m, "endpoint not registered");
+ return 0;
+ }
+
+ pcie_mif_get_bar0(pcie_global, &bar0);
+
+ seq_puts(m, "\n---------BAR0---------\n");
+
+ seq_printf(m, "NEWMSG 0x%08X\n", bar0.NEWMSG);
+ seq_printf(m, "SIGNATURE 0x%08X\n", bar0.SIGNATURE);
+ seq_printf(m, "OFFSET 0x%08X\n", bar0.OFFSET);
+ seq_printf(m, "RUNEN 0x%08X\n", bar0.RUNEN);
+ seq_printf(m, "DEBUG 0x%08X\n", bar0.DEBUG);
+ seq_printf(m, "AXIWCNT 0x%08X\n", bar0.AXIWCNT);
+ seq_printf(m, "AXIRCNT 0x%08X\n", bar0.AXIRCNT);
+ seq_printf(m, "AXIWADDR 0x%08X\n", bar0.AXIWADDR);
+ seq_printf(m, "AXIRADDR 0x%08X\n", bar0.AXIRADDR);
+ seq_printf(m, "TBD 0x%08X\n", bar0.TBD);
+ seq_printf(m, "AXICTRL 0x%08X\n", bar0.AXICTRL);
+ seq_printf(m, "AXIDATA 0x%08X\n", bar0.AXIDATA);
+ seq_printf(m, "AXIRDBP 0x%08X\n", bar0.AXIRDBP);
+ seq_printf(m, "IFAXIWCNT 0x%08X\n", bar0.IFAXIWCNT);
+ seq_printf(m, "IFAXIRCNT 0x%08X\n", bar0.IFAXIRCNT);
+ seq_printf(m, "IFAXIWADDR 0x%08X\n", bar0.IFAXIWADDR);
+ seq_printf(m, "IFAXIRADDR 0x%08X\n", bar0.IFAXIRADDR);
+ seq_printf(m, "IFAXICTRL 0x%08X\n", bar0.IFAXICTRL);
+ seq_printf(m, "GRST 0x%08X\n", bar0.GRST);
+ seq_printf(m, "AMBA2TRANSAXIWCNT 0x%08X\n", bar0.AMBA2TRANSAXIWCNT);
+ seq_printf(m, "AMBA2TRANSAXIRCNT 0x%08X\n", bar0.AMBA2TRANSAXIRCNT);
+ seq_printf(m, "AMBA2TRANSAXIWADDR 0x%08X\n", bar0.AMBA2TRANSAXIWADDR);
+ seq_printf(m, "AMBA2TRANSAXIRADDR 0x%08X\n", bar0.AMBA2TRANSAXIRADDR);
+ seq_printf(m, "AMBA2TRANSAXICTR 0x%08X\n", bar0.AMBA2TRANSAXICTR);
+ seq_printf(m, "TRANS2PCIEREADALIGNAXIWCNT 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIWCNT);
+ seq_printf(m, "TRANS2PCIEREADALIGNAXIRCNT 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIRCNT);
+ seq_printf(m, "TRANS2PCIEREADALIGNAXIWADDR 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIWADDR);
+ seq_printf(m, "TRANS2PCIEREADALIGNAXIRADDR 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXIRADDR);
+ seq_printf(m, "TRANS2PCIEREADALIGNAXICTRL 0x%08X\n", bar0.TRANS2PCIEREADALIGNAXICTRL);
+ seq_printf(m, "READROUNDTRIPMIN 0x%08X\n", bar0.READROUNDTRIPMIN);
+ seq_printf(m, "READROUNDTRIPMAX 0x%08X\n", bar0.READROUNDTRIPMAX);
+ seq_printf(m, "READROUNDTRIPLAST 0x%08X\n", bar0.READROUNDTRIPLAST);
+ seq_printf(m, "CPTAW0 0x%08X\n", bar0.CPTAW0);
+ seq_printf(m, "CPTAW1 0x%08X\n", bar0.CPTAW1);
+ seq_printf(m, "CPTAR0 0x%08X\n", bar0.CPTAR0);
+ seq_printf(m, "CPTAR1 0x%08X\n", bar0.CPTAR1);
+ seq_printf(m, "CPTB0 0x%08X\n", bar0.CPTB0);
+ seq_printf(m, "CPTW0 0x%08X\n", bar0.CPTW0);
+ seq_printf(m, "CPTW1 0x%08X\n", bar0.CPTW1);
+ seq_printf(m, "CPTW2 0x%08X\n", bar0.CPTW2);
+ seq_printf(m, "CPTR0 0x%08X\n", bar0.CPTR0);
+ seq_printf(m, "CPTR1 0x%08X\n", bar0.CPTR1);
+ seq_printf(m, "CPTR2 0x%08X\n", bar0.CPTR2);
+ seq_printf(m, "CPTRES 0x%08X\n", bar0.CPTRES);
+ seq_printf(m, "CPTAWDELAY 0x%08X\n", bar0.CPTAWDELAY);
+ seq_printf(m, "CPTARDELAY 0x%08X\n", bar0.CPTARDELAY);
+ seq_printf(m, "CPTSRTADDR 0x%08X\n", bar0.CPTSRTADDR);
+ seq_printf(m, "CPTENDADDR 0x%08X\n", bar0.CPTENDADDR);
+ seq_printf(m, "CPTSZLTHID 0x%08X\n", bar0.CPTSZLTHID);
+ seq_printf(m, "CPTPHSEL 0x%08X\n", bar0.CPTPHSEL);
+ seq_printf(m, "CPTRUN 0x%08X\n", bar0.CPTRUN);
+ seq_printf(m, "FPGAVER 0x%08X\n", bar0.FPGAVER);
+ return 0;
+}
+
+static const char *procdir = "driver/pcie_ctrl";
+
+#define PCIE_DIRLEN 128
+
+
+int pcie_create_proc_dir(struct pcie_mif *pcie)
+{
+ char dir[PCIE_DIRLEN];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ parent = proc_mkdir(dir, NULL);
+ if (parent) {
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = NULL;
+#endif
+ procfs_dir = parent;
+ PCIE_PROCFS_ADD_FILE(NULL, pcie_trg, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ PCIE_PROCFS_SEQ_ADD_FILE(NULL, pcie_dbg, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ } else {
+ SCSC_TAG_INFO(PCIE_MIF, "failed to create /proc dir\n");
+ return -EINVAL;
+ }
+
+ pcie_global = pcie;
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+void pcie_remove_proc_dir(void)
+{
+ if (procfs_dir) {
+ char dir[PCIE_DIRLEN];
+
+ PCIE_PROCFS_REMOVE_FILE(pcie_trg, procfs_dir);
+ PCIE_PROCFS_REMOVE_FILE(pcie_dbg, procfs_dir);
+
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ remove_proc_entry(dir, NULL);
+ procfs_dir = NULL;
+ }
+
+ pcie_global = NULL;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/*
+ * Chip Manager /proc interface
+ */
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/seq_file.h>
+
+#include "pcie_mif.h"
+
+#ifndef SCSC_PCIE_PROC_H
+#define SCSC_PCIE_PROC_H
+
+#ifndef AID_WIFI
+#define AID_WIFI 0444
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define PCIE_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define PCIE_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+#define PCIE_PROCFS_SEQ_FILE_OPS(name) \
+ static int pcie_procfs_ ## name ## _show(struct seq_file *m, void *v); \
+ static int pcie_procfs_ ## name ## _open(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, pcie_procfs_ ## name ## _show, PCIE_PDE_DATA(inode)); \
+ } \
+ static const struct file_operations pcie_procfs_ ## name ## _fops = { \
+ .open = pcie_procfs_ ## name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define PCIE_PROCFS_SEQ_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = proc_create_data(# name, mode, parent, &pcie_procfs_ ## name ## _fops, _sdev); \
+ if (!entry) { \
+ goto err; \
+ } \
+ PCIE_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+
+#define PCIE_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t pcie_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t pcie_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations pcie_procfs_ ## name ## _fops = { \
+ .read = pcie_procfs_ ## name ## _read, \
+ .write = pcie_procfs_ ## name ## _write, \
+ .open = pcie_procfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define PCIE_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_WIFI); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_WIFI); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define PCIE_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_WIFI; \
+ (entry)->gid = AID_WIFI; \
+ } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define PCIE_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &pcie_procfs_ ## name ## _fops, _sdev); \
+ PCIE_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define PCIE_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &pcie_procfs_ ## name ## _fops; \
+ entry->data = _sdev; \
+ PCIE_PROCFS_SET_UID_GID(entry); \
+ } \
+ } while (0)
+#endif
+
+#define PCIE_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+int pcie_create_proc_dir(struct pcie_mif *pcie);
+void pcie_remove_proc_dir(void);
+
+#endif /* SCSC_PCIE_PROC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_PETERSON_H
+#define __SCSC_PETERSON_H
+
+#include <linux/delay.h>
+#include "mif_reg.h"
+
+#define R4_PROCESS 0
+#define AP_PROCESS 1
+#define DELAY_NS 100 /* delay in ns*/
+
+static inline void peterson_mutex_init(struct peterson_mutex *p_mutex)
+{
+ if (!p_mutex) {
+ pr_info("Mutex not declared\n");
+ return;
+ }
+
+ p_mutex->flag[0] = false;
+ p_mutex->flag[1] = false;
+ p_mutex->turn = 0;
+}
+
+static inline void peterson_mutex_lock(struct peterson_mutex *p_mutex, unsigned int process)
+{
+ unsigned int other = 1 - process;
+
+ p_mutex->flag[process] = true;
+ /* write barrier */
+ smp_wmb();
+ p_mutex->turn = other;
+ /* write barrier */
+ smp_wmb();
+
+ while ((p_mutex->flag[other]) && (p_mutex->turn == other))
+ ndelay(DELAY_NS);
+}
+
+static inline void peterson_mutex_unlock(struct peterson_mutex *p_mutex, unsigned int process)
+{
+ p_mutex->flag[process] = false;
+ /* write barrier */
+ smp_wmb();
+}
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Implements interface */
+
+#include "platform_mif.h"
+
+/* Interfaces it Uses */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#ifndef CONFIG_SOC_EXYNOS7570
+#include <linux/smc.h>
+#endif
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#endif
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <scsc/scsc_logring.h>
+#ifdef CONFIG_SOC_EXYNOS7570
+#include "mif_reg.h"
+#elif defined(CONFIG_SOC_EXYNOS7872)
+#include "mif_reg_S5E7872.h"
+#elif defined(CONFIG_SOC_EXYNOS7885)
+#include "mif_reg_S5E7885.h"
+#elif defined(CONFIG_SOC_EXYNOS9610)
+#include "mif_reg_S5E9610.h"
+#endif
+#include "platform_mif_module.h"
+#ifdef CONFIG_ARCH_EXYNOS
+#include <linux/soc/samsung/exynos-soc.h>
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+#include <linux/dma-mapping.h>
+#include "mif_reg_smapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include <linux/pm_qos.h>
+#endif
+
+#if !defined(CONFIG_SOC_EXYNOS7872) && !defined(CONFIG_SOC_EXYNOS7570) \
+ && !defined(CONFIG_SOC_EXYNOS7885) && !defined(CONFIG_SOC_EXYNOS9610) && !defined(CONFIG_SOC_EXYNOS9630)
+#error Target processor CONFIG_SOC_EXYNOS7570 or CONFIG_SOC_EXYNOS7872 or CONFIG_SOC_EXYNOS7885 or CONFIG_SOC_EXYNOS9610 not selected
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+/* TODO: this will be put in a header */
+extern int exynos_acpm_set_flag(void);
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+/* Time to wait for CFG_REQ IRQ on 9610 */
+#define WLBT_BOOT_TIMEOUT (HZ)
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of_reserved_mem.h>
+#endif
+static unsigned long sharedmem_base;
+static size_t sharedmem_size;
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+static bool chv_disable_irq;
+module_param(chv_disable_irq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(chv_disable_irq, "Do not register for irq");
+#endif
+
+#ifdef CONFIG_SCSC_GPR4_CON_DEBUG
+static u32 reg_bkp;
+static bool reg_update;
+static void __iomem *gpio_base;
+static bool gpr4_debug;
+module_param(gpr4_debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gpr4_debug, "GPR4 PIO muxes switching to the Maxwell. Default = N. Effective on Maxwell power on");
+#endif
+
+static bool enable_platform_mif_arm_reset = true;
+module_param(enable_platform_mif_arm_reset, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_platform_mif_arm_reset, "Enables WIFIBT ARM cores reset");
+
+#ifdef CONFIG_SCSC_QOS
+struct qos_table {
+ unsigned int freq_mif;
+ unsigned int freq_int;
+ unsigned int freq_cl0;
+ unsigned int freq_cl1;
+};
+#endif
+
+struct platform_mif {
+ struct scsc_mif_abs interface;
+ struct scsc_mbox_s *mbox;
+ struct platform_device *pdev;
+
+ struct device *dev;
+
+ struct {
+ int irq_num;
+ int flags;
+ atomic_t irq_disabled_cnt;
+ } wlbt_irq[PLATFORM_MIF_NUM_IRQS];
+
+ /* MIF registers preserved during suspend */
+ struct {
+ u32 irq_bit_mask;
+ } mif_preserve;
+
+ /* register MBOX memory space */
+ size_t reg_start;
+ size_t reg_size;
+ void __iomem *base;
+
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ /* register MBOX memory space for M4 */
+ size_t reg_start_m4;
+ size_t reg_size_m4;
+ void __iomem *base_m4;
+#endif
+ /* register CMU memory space */
+ struct regmap *cmu_base;
+#ifdef CONFIG_SCSC_CLK20MHZ
+ u32 usbpll_delay;
+#endif
+
+ void __iomem *con0_base;
+
+ /* pmu syscon regmap */
+ struct regmap *pmureg;
+#if defined(CONFIG_SOC_EXYNOS9610)
+ struct regmap *baaw_p_wlbt;
+ struct regmap *dbus_baaw;
+ struct regmap *pbus_baaw;
+ struct regmap *wlbt_remap;
+ struct regmap *boot_cfg;
+
+ /* Signalled when CFG_REQ IRQ handled */
+ struct completion cfg_ack;
+
+ /* State of CFG_REQ handler */
+ enum wlbt_boot_state {
+ WLBT_BOOT_IN_RESET = 0,
+ WLBT_BOOT_WAIT_CFG_REQ,
+ WLBT_BOOT_CFG_DONE,
+ WLBT_BOOT_CFG_ERROR
+ } boot_state;
+
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER */
+ void __iomem *smapper_base;
+ u8 smapper_banks;
+ struct {
+ u8 bank;
+ u32 ws;
+ bool large;
+ struct scsc_mif_smapper_info bank_info;
+ } *smapper;
+#endif
+ /* Shared memory space - reserved memory */
+ unsigned long mem_start;
+ size_t mem_size;
+ void __iomem *mem;
+
+ /* Callback function and dev pointer mif_intr manager handler */
+ void (*r4_handler)(int irq, void *data);
+ void *irq_dev;
+ /* spinlock to serialize driver access */
+ spinlock_t mif_spinlock;
+ void (*reset_request_handler)(int irq, void *data);
+ void *irq_reset_request_dev;
+
+#ifdef CONFIG_SCSC_QOS
+ /* QoS table */
+ struct qos_table *qos;
+ bool qos_enabled;
+#endif
+ /* Suspend/resume handlers */
+ int (*suspend_handler)(struct scsc_mif_abs *abs, void *data);
+ void (*resume_handler)(struct scsc_mif_abs *abs, void *data);
+ void *suspendresume_data;
+};
+
+extern int mx140_log_dump(void);
+
+#define platform_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct platform_mif, interface)
+
+#ifdef CONFIG_SCSC_CLK20MHZ
+static void __platform_mif_usbpll_claim(struct platform_mif *platform, bool wlbt);
+#endif
+
+inline void platform_mif_reg_write(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->base + offset);
+}
+
+inline u32 platform_mif_reg_read(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->base + offset);
+}
+
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+inline void platform_mif_reg_write_m4(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->base_m4 + offset);
+}
+
+inline u32 platform_mif_reg_read_m4(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->base_m4 + offset);
+}
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+inline void platform_mif_reg_write_smapper(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->smapper_base + offset);
+}
+
+inline u32 platform_mif_reg_read_smapper(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->smapper_base + offset);
+}
+
+#define PLATFORM_MIF_SHIFT_SMAPPER_ADDR 11 /* From 36 bits addres to 25 bits */
+#define PLATFORM_MIF_SHIFT_SMAPPER_END 4 /* End address aligment */
+
+/* Platform is responsible to give the phys mapping of the SMAPPER maps */
+static int platform_mif_smapper_get_mapping(struct scsc_mif_abs *interface, u8 *phy_map, u16 *align)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Mapping %d banks\n", platform->smapper_banks);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ for (i = 0; i < platform->smapper_banks; i++) {
+ if (platform->smapper[i].large)
+ phy_map[i] = SCSC_MIF_ABS_LARGE_BANK;
+ else
+ phy_map[i] = SCSC_MIF_ABS_SMALL_BANK;
+ }
+
+ if (align)
+ *align = 1 << PLATFORM_MIF_SHIFT_SMAPPER_ADDR;
+
+ return 0;
+}
+
+static int platform_mif_smapper_get_bank_info(struct scsc_mif_abs *interface, u8 bank, struct scsc_mif_smapper_info *bank_info)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ bank_info->num_entries = platform->smapper[bank].bank_info.num_entries;
+ bank_info->mem_range_bytes = platform->smapper[bank].bank_info.mem_range_bytes;
+
+ return 0;
+}
+
+static u8 platform_mif_smapper_granularity_to_bits(u32 granularity)
+{
+ if (granularity <= 2 * 1024)
+ return 0;
+ if (granularity <= 4 * 1024)
+ return 1;
+ if (granularity <= 8 * 1024)
+ return 2;
+ if (granularity <= 16 * 1024)
+ return 3;
+ if (granularity <= 32 * 1024)
+ return 4;
+ if (granularity <= 64 * 1024)
+ return 5;
+ if (granularity <= 128 * 1024)
+ return 6;
+ return 7;
+}
+
+static u32 platform_mif_smapper_get_bank_base_address(struct scsc_mif_abs *interface, u8 bank)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper)
+ return 0;
+
+ return platform->smapper[bank].ws;
+}
+
+/* Configure smapper according the memory map and range */
+static void platform_mif_smapper_configure(struct scsc_mif_abs *interface, u32 granularity)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u8 gran;
+ u8 nb = platform->smapper_banks;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Configure SMAPPER with granularity %d\n", granularity);
+
+ gran = platform_mif_smapper_granularity_to_bits(granularity);
+
+ platform_mif_reg_write_smapper(platform, SMAPPER_QCH_DISABLE, 1);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AR, 0);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AW, 0);
+ /* Program SMAPPER memmap */
+ for (i = 0; i < nb; i++) {
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(i), 0);
+ /* Set START_ADDR */
+ platform_mif_reg_write_smapper(platform, START_ADDR(i), platform->smapper[i].ws);
+ /* Set ADDR_GRANULARITY - FIXED AT 4KB */
+ platform_mif_reg_write_smapper(platform, ADDR_GRANULARITY(i), gran);
+ /* WLAN_ADDR_MAP operation is started */
+ }
+ /* Set access window control (MSB 32bits Start/End address) */
+ /* Remapped address should be ranged from AW_START_ADDR to AW_EN_ADDR */
+ platform_mif_reg_write_smapper(platform, AW_START_ADDR, 0);
+ platform_mif_reg_write_smapper(platform, AW_END_ADDR, dma_get_mask(platform->dev) >> PLATFORM_MIF_SHIFT_SMAPPER_END);
+ smp_mb();
+}
+
+/* Caller is responsible of validating the phys address (alignment) */
+static int platform_mif_smapper_write_sram(struct scsc_mif_abs *interface, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u32 rb;
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "SMAPPER not enabled\n");
+ return -EINVAL;
+ }
+
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 0);
+ /* Write mapping table to SRAM. Each entry consists of 25 bits MSB address to remap */
+ for (i = 0; i < num_entries; i++) {
+ if (!addr[i]) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ADDR IS NULL at bank %d entry %d/%d\n", bank, first_entry + i, num_entries);
+ return -EINVAL;
+ }
+ /* Set SRAM_WRITE_CTRL to 1'b1*/
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 1);
+ platform_mif_reg_write_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i), addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ /* check incorrect writings */
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 0);
+ rb = platform_mif_reg_read_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i));
+ if (rb != addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "incorrect mapping detected rb 0x%x, addr 0x%x\n", rb, (u32)addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ return -EFAULT;
+ }
+ }
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 1);
+ smp_mb();
+ return 0;
+}
+
+static int platform_mif_parse_smapper(struct platform_mif *platform, struct device_node *np, u8 num_banks)
+{
+ /* SMAPPER parsing */
+ struct device_node *np_banks;
+ char node_name[50];
+ u32 val[2];
+ u8 i;
+ u32 bank = 0, ws = 0, wsz = 0, ent = 0, large = 0;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "banks found: %d szof %zu\n", num_banks, sizeof(*platform->smapper));
+
+ platform->smapper = kmalloc_array(num_banks, sizeof(*platform->smapper), GFP_KERNEL);
+
+ if (!platform->smapper)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++) {
+ snprintf(node_name, sizeof(node_name), "smapper_bank_%d", i);
+ np_banks = of_find_node_by_name(np, node_name);
+ if (!np_banks) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "%s: could not find smapper_bank\n",
+ node_name);
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+ of_property_read_u32(np_banks, "bank_num", &bank);
+ of_property_read_u32(np_banks, "fw_window_start", &ws);
+ of_property_read_u32(np_banks, "fw_window_size", &wsz);
+ of_property_read_u32(np_banks, "num_entries", &ent);
+ of_property_read_u32(np_banks, "is_large", &large);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "bank %d fw_w_start 0x%x fw_w_sz 0x%x entries %d is_large %d\n",
+ bank, ws, wsz, ent, large);
+
+ platform->smapper[i].bank = (u8)bank;
+ platform->smapper[i].ws = ws;
+ platform->smapper[i].large = (bool)large;
+ platform->smapper[i].bank_info.num_entries = ent;
+ platform->smapper[i].bank_info.mem_range_bytes = wsz;
+ }
+
+ /* Update the number of banks before returning */
+ platform->smapper_banks = num_banks;
+
+ of_property_read_u32_array(np, "smapper_reg", val, 2);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "smapper reg address 0x%x size 0x%x\n", val[0], val[1]);
+ platform->smapper_base =
+ devm_ioremap_nocache(platform->dev, val[0], val[1]);
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping smapper register region\n");
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+#endif
+#ifdef CONFIG_SCSC_QOS
+static int platform_mif_parse_qos(struct platform_mif *platform, struct device_node *np)
+{
+ int len, i;
+
+ platform->qos_enabled = false;
+
+ len = of_property_count_u32_elems(np, "qos_table");
+ if (!(len == 12)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "No qos table for wlbt, or incorrect size\n");
+ return -ENOENT;
+ }
+
+ platform->qos = devm_kzalloc(platform->dev, sizeof(struct qos_table) * len / 4, GFP_KERNEL);
+ if (!platform->qos)
+ return -ENOMEM;
+
+ of_property_read_u32_array(np, "qos_table", (unsigned int *)platform->qos, len);
+
+ for (i = 0; i < len / 4; i++) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "QoS Table[%d] mif : %u int : %u cl0 : %u cl1: %u\n", i,
+ platform->qos[i].freq_mif,
+ platform->qos[i].freq_int,
+ platform->qos[i].freq_cl0,
+ platform->qos[i].freq_cl1);
+ }
+
+ platform->qos_enabled = true;
+ return 0;
+}
+
+struct qos_table platform_mif_pm_qos_get_table(struct platform_mif *platform, enum scsc_qos_config config)
+{
+ struct qos_table table;
+
+ switch (config) {
+ case SCSC_QOS_MIN:
+ table.freq_mif = platform->qos[0].freq_mif;
+ table.freq_int = platform->qos[0].freq_int;
+ table.freq_cl0 = platform->qos[0].freq_cl0;
+ table.freq_cl1 = platform->qos[0].freq_cl1;
+ break;
+
+ case SCSC_QOS_MED:
+ table.freq_mif = platform->qos[1].freq_mif;
+ table.freq_int = platform->qos[1].freq_int;
+ table.freq_cl0 = platform->qos[1].freq_cl0;
+ table.freq_cl1 = platform->qos[1].freq_cl1;
+ break;
+
+ case SCSC_QOS_MAX:
+ table.freq_mif = platform->qos[2].freq_mif;
+ table.freq_int = platform->qos[2].freq_int;
+ table.freq_cl0 = platform->qos[2].freq_cl0;
+ table.freq_cl1 = platform->qos[2].freq_cl1;
+ break;
+
+ default:
+ table.freq_mif = 0;
+ table.freq_int = 0;
+ table.freq_cl0 = 0;
+ table.freq_cl1 = 0;
+ }
+
+ return table;
+}
+
+static int platform_mif_pm_qos_add_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS add request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_add_request(&qos_req->pm_qos_req_mif, PM_QOS_BUS_THROUGHPUT, table.freq_mif);
+ pm_qos_add_request(&qos_req->pm_qos_req_int, PM_QOS_DEVICE_THROUGHPUT, table.freq_int);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl0, PM_QOS_CLUSTER0_FREQ_MIN, table.freq_cl0);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl1, PM_QOS_CLUSTER1_FREQ_MIN, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_update_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS update request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_update_request(&qos_req->pm_qos_req_mif, table.freq_mif);
+ pm_qos_update_request(&qos_req->pm_qos_req_int, table.freq_int);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl0, table.freq_cl0);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl1, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_remove_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform)
+ return -ENODEV;
+
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS remove request\n");
+ pm_qos_remove_request(&qos_req->pm_qos_req_mif);
+ pm_qos_remove_request(&qos_req->pm_qos_req_int);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl0);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl1);
+
+ return 0;
+}
+#endif
+
+static void platform_mif_irq_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT handler not registered\n");
+}
+
+static void platform_mif_irq_reset_request_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT reset_request handler not registered\n");
+}
+
+irqreturn_t platform_mif_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "INT %pS\n", platform->r4_handler);
+ if (platform->r4_handler != platform_mif_irq_default_handler)
+ platform->r4_handler(irq, platform->irq_dev);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MIF Interrupt Handler not registered\n");
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+irqreturn_t platform_alive_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+
+ return IRQ_HANDLED;
+}
+#endif
+
+irqreturn_t platform_wdog_isr(int irq, void *data)
+{
+ int ret = 0;
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ if (platform->reset_request_handler != platform_mif_irq_reset_request_default_handler) {
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ platform->reset_request_handler(irq, platform->irq_reset_request_dev);
+ } else {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG Interrupt reset_request_handler not registered\n");
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Disabling unhandled WDOG IRQ.\n");
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_inc(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt);
+ }
+#ifdef CONFIG_SOC_EXYNOS9610
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_RESET_REQ_CLR, WLBT_RESET_REQ_CLR);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clearing WLBT_RESET_REQ\n");
+ if (ret < 0)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_RESET_REQ_CLR]: %d\n", ret);
+#else
+ ret = regmap_update_bits(platform->pmureg, WIFI_CTRL_NS,
+ WIFI_RESET_REQ_CLR, WIFI_RESET_REQ_CLR);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clearing WIFI_RESET_REQ\n");
+ if (ret < 0)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WIFI_CTRL_NS[WIFI_RESET_REQ_CLR]: %d\n", ret);
+#endif
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SOC_EXYNOS9610
+/*
+ * Attached array contains the replacement PMU boot code which should
+ * be programmed using the CBUS during the config phase.
+ */
+uint32_t ka_patch[] = {
+ /* Low temp fix 28/1
+ * Maxwell142 PMU+PROC combined boot ROM
+ * IP Version: 0xA3
+ * Major Version: 0xF, Minor Version: 0xF
+ * PMU ROM version: 0x4
+ * PROC ROM version: 0x0
+ */
+ 0x90750002,
+ 0x11a4c218,
+ 0x75671191,
+ 0x9075e090,
+ 0x54b3e5e7,
+ 0x30f76001,
+ 0xb14315a2,
+ 0xb4b2e503,
+ 0xb153fb03,
+ 0xa90185fc,
+ 0xacf50774,
+ 0x75fdadb5,
+ 0xb47508a0,
+ 0x54b3e501,
+ 0x75fa7001,
+ 0x907500b4,
+ 0x78cb8018,
+ 0x80837982,
+ 0x07a075c5,
+ 0xb0783779,
+ 0xb40754e6,
+ 0x0b800207,
+ 0xc404f6d9,
+ 0xaf7590f5,
+ 0x75038000,
+ 0x53229090,
+ 0xce53eff7,
+ 0xd90479fe,
+ 0xfdce53fe,
+ 0xfed90c79,
+ 0x75fbce53,
+ 0x91530b92,
+ 0xf7ce53fd,
+ 0x5308f943,
+ 0xf922fef9,
+ 0xfbd8fed9,
+ 0x019e7522,
+ 0x75cfc175,
+ 0xc375a4c2,
+ 0x47c4754a,
+ 0x75a4c575,
+ 0xc7756dc6,
+ 0x03d27540,
+ 0x7510d375,
+ 0xca7500c9,
+ 0x00cb75d0,
+ 0x7500cc75,
+ 0x9b75009a,
+ 0x009c75c0,
+ 0x78009d75,
+ 0x12827402,
+ 0xc6438b80,
+ 0x74097802,
+ 0x8b8012e7,
+ 0x75d09075,
+ 0x9e750291,
+ 0x01a97502,
+ 0x00000022,
+};
+
+extern bool reset_failed;
+
+irqreturn_t platform_cfg_req_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+ const u64 EXYNOS_WLBT = 0x1;
+ u64 ret64 = 0;
+ s32 ret = 0;
+ unsigned int ka_addr = 0x1000;
+ uint32_t *ka_patch_addr = ka_patch;
+ u32 id;
+
+#define CHECK(x) do { \
+ int retval = (x); \
+ if (retval < 0) \
+ goto cfg_error; \
+} while (0)
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "disable_irq\n");
+
+ /* mask the irq */
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ /* Was the CFG_REQ irq received from WLBT before we expected it?
+ * Typically this indicates an issue returning WLBT HW to reset.
+ */
+ if (platform->boot_state != WLBT_BOOT_WAIT_CFG_REQ) {
+ u32 val;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Spurious CFG_REQ IRQ from WLBT!\n");
+
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_NS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_S, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_S 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ reset_failed = true; /* prevent further interaction with HW */
+
+ return IRQ_HANDLED;
+ }
+
+ /* CBUS should be ready before we get CFG_REQ, but we suspect
+ * CBUS is not ready yet. add some delay to see if that helps
+ */
+ udelay(100);
+
+ /* Set TZPC to non-secure mode */
+ ret64 = exynos_smc(SMC_CMD_CONN_IF, (EXYNOS_WLBT << 32) | EXYNOS_SET_CONN_TZPC, 0, 0);
+ if (ret64)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to set TZPC to non-secure mode: %llu\n", ret64);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SMC_CMD_CONN_IF run successfully : %llu\n", ret64);
+
+ /* WLBT_REMAP */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP begin\n");
+ CHECK(regmap_write(platform->wlbt_remap, 0x0, WLBT_DBUS_BAAW_0_START >> 12));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP end\n");
+
+ /* CHIP_VERSION_ID - overload with EMA settings */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "CHIP_VERSION_ID begin\n");
+ regmap_read(platform->wlbt_remap, 0x10, &id);
+ id &= ~CHIP_VERSION_ID_EMA_MASK;
+ id |= CHIP_VERSION_ID_EMA_VALUE;
+ CHECK(regmap_write(platform->wlbt_remap, 0x10, id));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "CHIP_VERSION_ID 0x%x end\n", id);
+
+ /* DBUS_BAAW regions */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW begin\n");
+ CHECK(regmap_write(platform->dbus_baaw, 0x0, WLBT_DBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x4, WLBT_DBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x8, platform->mem_start >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW end\n");
+
+ /* PBUS_BAAW regions */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW begin\n");
+ CHECK(regmap_write(platform->pbus_baaw, 0x0, WLBT_PBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4, WLBT_PBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x8, WLBT_PBUS_MBOX_CP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x10, WLBT_PBUS_BAAW_1_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x14, WLBT_PBUS_BAAW_1_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x18, WLBT_PBUS_MBOX_SHUB2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x1C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x20, WLBT_PBUS_BAAW_2_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x24, WLBT_PBUS_BAAW_2_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x28, WLBT_PBUS_USI_CMG00_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x2C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x30, WLBT_PBUS_BAAW_3_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x34, WLBT_PBUS_BAAW_3_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x38, WLBT_PBUS_SYSREG_CMGP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x3C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x40, WLBT_PBUS_BAAW_4_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x44, WLBT_PBUS_BAAW_4_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x48, WLBT_PBUS_GPIO_CMGP_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x50, WLBT_PBUS_BAAW_5_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x54, WLBT_PBUS_BAAW_5_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x58, WLBT_PBUS_SHUB_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x5C, WLBT_BAAW_ACCESS_CTRL));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW end\n");
+
+ /* PMU boot bug workaround */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT begin\n");
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x1));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT done\n");
+
+ while (ka_patch_addr < (ka_patch + ARRAY_SIZE(ka_patch))) {
+ CHECK(regmap_write(platform->boot_cfg, ka_addr, *ka_patch_addr));
+ ka_addr += sizeof(ka_patch[0]);
+ ka_patch_addr++;
+ }
+
+ /* Notify PMU of configuration done */
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x0));
+
+ /* BOOT_CFG_ACK */
+ CHECK(regmap_write(platform->boot_cfg, 0x4, 0x1));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT_CFG_ACK done\n");
+
+ /* Delay to allow HW to clear CFG_REQ and hence de-assert IRQ, which
+ * it does in response to CFG_ACK
+ */
+ udelay(100);
+
+ /* Release ownership of MASK_PWR_REQ */
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to clear WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ goto cfg_error;
+ }
+
+ /* Mark as CFQ_REQ handled, so boot may continue */
+ platform->boot_state = WLBT_BOOT_CFG_DONE;
+
+ /* Signal triggering function that the IRQ arrived and CFG was done */
+ complete(&platform->cfg_ack);
+
+ /* Re-enable IRQ here to allow spurious interrupt to be tracked */
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ return IRQ_HANDLED;
+cfg_error:
+ platform->boot_state = WLBT_BOOT_CFG_ERROR;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ERROR: WLBT Config failed. WLBT will not work\n");
+ complete(&platform->cfg_ack);
+ return IRQ_HANDLED;
+}
+#endif
+
+static void platform_mif_unregister_irq(struct platform_mif *platform)
+{
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering IRQs\n");
+
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform);
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform);
+ /* Reset irq_disabled_cnt for WDOG IRQ since the IRQ itself is here unregistered and disabled */
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* if ALIVE irq is required */
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform);
+#endif
+#ifdef CONFIG_SOC_EXYNOS9610
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform);
+#endif
+}
+
+static int platform_mif_register_irq(struct platform_mif *platform)
+{
+ int err;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering IRQs\n");
+
+ /* Register MBOX irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering MBOX irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform->wlbt_irq[PLATFORM_MIF_MBOX].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform_mif_isr,
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register MBOX handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+ /* Register WDOG irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering WDOG irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform->wlbt_irq[PLATFORM_MIF_WDOG].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform_wdog_isr,
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register WDOG handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* Register ALIVE irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering ALIVE irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform_alive_isr,
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE(err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register ALIVE handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+#endif
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* Mark as WLBT in reset before enabling IRQ to guard against spurious IRQ */
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+ smp_wmb(); /* commit before irq */
+
+ /* Register WB2AP_CFG_REQ irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering CFG_REQ irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform_cfg_req_isr,
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register CFG_REQ handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static void platform_mif_destroy(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ platform_mif_unregister_irq(platform);
+}
+
+static char *platform_mif_get_uid(struct scsc_mif_abs *interface)
+{
+ /* Avoid unused parameter error */
+ (void)interface;
+ return "0";
+}
+
+/* WLBT Power domain */
+static int platform_mif_power(struct scsc_mif_abs *interface, bool power)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "power %d\n", power);
+
+ if (power)
+ val = MASK_PWR_REQ;
+
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ return ret;
+ }
+#else
+ /* See sequence in 8.6.6 */
+ /* WIFI power on/off control If WIFI_PWRON = 1
+ * and WIFI_START=1, WIFI enters to UP state.
+ * This bit is 0 as default value because WIFI
+ * should be reset at AP boot mode after Power-on Reset.
+ */
+ if (power)
+ val = WIFI_PWRON;
+ ret = regmap_update_bits(platform->pmureg, WIFI_CTRL_NS,
+ WIFI_PWRON, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WIFI_CTRL_NS[WIFI_PWRON]: %d\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+/* WLBT RESET */
+static int platform_mif_hold_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "reset %d\n", reset);
+ if (reset)
+ val = WLBT_RESET_SET;
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_RESET_SET, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[WLBT_RESET_SET]: %d\n", ret);
+ return ret;
+ }
+#else
+ if (reset)
+ val = WIFI_RESET_SET;
+ /* See sequence in 8.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WIFI_CTRL_NS,
+ WIFI_RESET_SET, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WIFI_CTRL_NS[WIFI_RESET_SET]: %d\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+/* WLBT START */
+static int platform_mif_start(struct scsc_mif_abs *interface, bool start)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "start %d\n", start);
+ if (start)
+ val = WLBT_START;
+
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_S,
+ WLBT_START, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_S[WLBT_START]: %d\n", ret);
+ return ret;
+ }
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "update WIFI_CTRL_S[WIFI_START]: %d\n", ret);
+
+ /* At this point WLBT should assert the CFG_REQ IRQ, so wait for it */
+ if (start &&
+ wait_for_completion_timeout(&platform->cfg_ack, WLBT_BOOT_TIMEOUT) == 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Timeout waiting for CFG_REQ IRQ\n");
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+ /* only continue if CFG_REQ IRQ configured WLBT/PMU correctly */
+ if (platform->boot_state == WLBT_BOOT_CFG_ERROR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "CFG_REQ failed to configure WLBT.\n");
+ return -EIO;
+ }
+#else
+ if (start)
+ val = WIFI_START;
+ /* See sequence in 8.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WIFI_CTRL_S,
+ WIFI_START, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WIFI_CTRL_S[WIFI_START]: %d\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static int platform_mif_pmu_reset_release(struct scsc_mif_abs *interface)
+{
+ int ret = 0;
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* We're now ready for the IRQ */
+ platform->boot_state = WLBT_BOOT_WAIT_CFG_REQ;
+ smp_wmb(); /* commit before irq */
+#endif
+ ret = platform_mif_power(interface, true);
+ if (ret)
+ return ret;
+ ret = platform_mif_hold_reset(interface, false);
+ if (ret)
+ return ret;
+ ret = platform_mif_start(interface, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int __platform_mif_9610_recover_reset(struct platform_mif *platform)
+{
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Attempt to recover WLBT HW reset");
+
+ /*
+ * Set CFG_ACK = 1
+ * Poll CFG_REQ until REQ == 0
+ * Set CFG_ACK = 0
+ * Poll CENTRAL_SEQ_WLBT_STATUS SM status again
+ * Print WLBT_DEBUG at each step.
+ * (Remember to set "echo 1 > /sys/kernel/debug/exynos-rgt/vqmmc/enable" before the test).
+ */
+
+ /* CFG_ACK = 1 */
+ regmap_write(platform->boot_cfg, 0x4, 0x1);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Force BOOT_CFG_ACK = 1\n");
+
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ /* TBD poll CFG_REQ status */
+ udelay(250);
+
+ /* CFG_ACK = 0 */
+ regmap_write(platform->boot_cfg, 0x4, 0x0);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Force BOOT_CFG_ACK = 0\n");
+
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ /* Check WLBT_STATUS again */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ val &= STATES;
+ val >>= 16;
+ if (val == 0x80) {
+ ret = 0; /* Recovered OK */
+ goto done;
+ }
+ } while (time_before(jiffies, timeout));
+ ret = -ETIME;
+
+done:
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ if (ret == 0)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Recovered reset");
+ else
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Reset not recovered");
+
+ /* Save log at point of failure, last to show recovery attempt */
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_COMMON, SCSC_LOG_HOST_COMMON_RECOVER_RST);
+#else
+ mx140_log_dump();
+#endif
+ return ret;
+}
+
+static int platform_mif_pmu_reset(struct scsc_mif_abs *interface, u8 rst_case)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ if (rst_case == 0 || rst_case > 2) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Incorrect pmu reset case %d\n", rst_case);
+ return -EIO;
+ }
+
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ ret = regmap_update_bits(platform->pmureg, RESET_AHEAD_WIFI_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update RESET_ASB_WIFI_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+#elif defined(CONFIG_SOC_EXYNOS7570)
+ ret = regmap_update_bits(platform->pmureg, RESET_ASB_WIFI_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update RESET_ASB_WIFI_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+#endif
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "rst_case %d\n", rst_case);
+
+ /* Revert power control ownership to AP, as WLBT is going down (S9.6.6). */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, MASK_PWR_REQ);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ return ret;
+ }
+
+ /* reset sequence as per excite implementation for Leman */
+ ret = regmap_update_bits(platform->pmureg, CENTRAL_SEQ_WLBT_CONFIGURATION,
+ SYS_PWR_CFG_16, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CENTRAL_SEQ_WLBT_CONFIGURATION %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, RESET_AHEAD_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update RESET_AHEAD_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, CLEANY_BUS_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CLEANY_BUS_WLBT_SYS_PWR_REG%d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, LOGIC_RESET_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update LOGIC_RESET_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, TCXO_GATE_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update TCXO_GATE_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_DISABLE_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_DISABLE_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_RESET_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_RESET_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ /* rst_case is always 2 on 9610 */
+ ret = platform_mif_hold_reset(interface, true);
+
+ if (ret)
+ return ret;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ val &= STATES;
+ val >>= 16;
+ if (val == 0x80) {
+ /* OK. Switch CTRL_NS[MASK_PWR_REQ] ownership to FW following
+ * reset. WLBT PWR_REQ is cleared when it's put in reset.
+ * The SW PWR_REQ remains asserted, but as ownership is now FW,
+ * it'll be ignored. This leaves it as we found it.
+ */
+ platform_mif_power(interface, false);
+
+ return 0; /* OK - return */
+ }
+ } while (time_before(jiffies, timeout));
+
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Timeout waiting for CENTRAL_SEQ_WLBT_STATUS SM status\n");
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+#if 0
+ /* Try to recovery in case of reset failure */
+ ret = __platform_mif_9610_recover_reset(platform);
+ if (!ret)
+ return 0;
+#endif
+#else
+ /* 7885, 7872, 7570 */
+ ret = regmap_update_bits(platform->pmureg, CLEANY_BUS_WIFI_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CLEANY_BUS_WIFI_SYS_PWR_REG%d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, LOGIC_RESET_WIFI_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update LOGIC_RESET_WIFI_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, TCXO_GATE_WIFI_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update TCXO_GATE_WIFI_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+#endif
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ ret = regmap_update_bits(platform->pmureg, WIFI_DISABLE_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WIFI_DISABLE_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WIFI_RESET_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WIFI_RESET_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+#endif
+#ifdef CONFIG_SOC_EXYNOS9610
+ (void)platform;
+ (void)timeout;
+ (void)ret;
+ (void)val;
+#else
+ /* 7885, 7872, 7570 */
+ ret = regmap_update_bits(platform->pmureg, CENTRAL_SEQ_WIFI_CONFIGURATION,
+ SYS_PWR_CFG_16, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CENTRAL_SEQ_WIFI_CONFIGURATION %d\n", ret);
+ return ret;
+ }
+
+ if (rst_case == 1)
+ ret = platform_mif_power(interface, false);
+ else
+ ret = platform_mif_hold_reset(interface, true);
+
+ if (ret)
+ return ret;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WIFI_STATUS, &val);
+ val &= STATES;
+ val >>= 16;
+ if (val == 0x80)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Timeout waiting for CENTRAL_SEQ_WIFI_STATUS SM status\n");
+#endif
+ return -ETIME;
+}
+
+/* reset=0 - release from reset */
+/* reset=1 - hold reset */
+static int platform_mif_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 ret = 0;
+ u32 val;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (enable_platform_mif_arm_reset || !reset) {
+ if (!reset) { /* Release from reset */
+#ifdef CONFIG_ARCH_EXYNOS
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SOC_VERSION: product_id 0x%x, rev 0x%x\n",
+ exynos_soc_info.product_id, exynos_soc_info.revision);
+#endif
+#ifdef CONFIG_SOC_EXYNOS9610
+ (void)val;
+#else
+ /* Enable R4 access to MIF resources */
+ /* Address is fixed and its value is in the dts */
+ /* It defines the base address of DRAM space which WIFI accesses.
+ * Default value is 0x0_6000_0000. It can configure only MSB 14-bit.
+ * You must align this base address with WIFI memory access size that WIFI_MEM_SIZE
+ * defines
+ */
+ /* >>12 represents 4K aligment (see size)*/
+ val = (platform->mem_start & 0xFFFFFC000) >> 12;
+ regmap_write(platform->pmureg, WIFI2AP_MEM_CONFIG1, val);
+ /* Size */
+ /*It defines the DRAM memory size to which WLBT can
+ * access.
+ * Definition of the size has 4 KB resolution defined from
+ * minimum 4 KB to maximum 1GB.
+ * 20'b0000_0000_0000_0000_0001 = 4KB
+ * 20'b0000_0000_0000_0000_0010 = 8KB
+ * 20'b0000_0000_0001_0000_0000 = 1 MB
+ * 20'b0000_0000_0010_0000_0000 = 2 MB
+ * 20'b0000_0000_0100_0000_0000 = 4 MB (default)
+ * 20'b0000_0000_1000_0000_0000 = 8 MB
+ * 20'b0000_1000_0000_0000_0000 = 128 MB
+ * 20'b0100_0000_0000_0000_0000 = 1 GB
+ */
+ /* Size is fixed and its value is in the dts */
+ /* >>12 represents 4K aligment (see address)*/
+ val = platform->mem_size >> 12;
+ regmap_write(platform->pmureg, WIFI2AP_MEM_CONFIG0, val);
+#endif
+#if (defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)) && defined(CONFIG_ACPM_DVFS)
+ /* Immediately prior to reset release, set up ACPM
+ * to ensure BUCK2 gets the right voltage
+ */
+ exynos_acpm_set_flag();
+#endif
+ ret = platform_mif_pmu_reset_release(interface);
+ } else {
+ /* Put back into reset */
+ ret = platform_mif_pmu_reset(interface, 2);
+#if !defined(CONFIG_SOC_EXYNOS9610)
+ /* WLBT should be stopped/powered-down at this point */
+ regmap_write(platform->pmureg, WIFI2AP_MEM_CONFIG1, 0x00000);
+ regmap_write(platform->pmureg, WIFI2AP_MEM_CONFIG0, 0x00000);
+#endif
+ }
+ } else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Not resetting ARM Cores - enable_platform_mif_arm_reset: %d\n",
+ enable_platform_mif_arm_reset);
+ return ret;
+}
+
+static void __iomem *platform_mif_map_region(unsigned long phys_addr, size_t size)
+{
+ int i;
+ struct page **pages;
+ void *vmem;
+
+ size = PAGE_ALIGN(size);
+
+ pages = kmalloc((size >> PAGE_SHIFT) * sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ /* Map NORMAL_NC pages with kernel virtual space */
+ for (i = 0; i < (size >> PAGE_SHIFT); i++) {
+ pages[i] = phys_to_page(phys_addr);
+ phys_addr += PAGE_SIZE;
+ }
+
+ vmem = vmap(pages, size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+
+ kfree(pages);
+ return (void __iomem *)vmem;
+}
+
+static void platform_mif_unmap_region(void *vmem)
+{
+ vunmap(vmem);
+}
+
+static void *platform_mif_map(struct scsc_mif_abs *interface, size_t *allocated)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ if (allocated)
+ *allocated = 0;
+
+ platform->mem =
+ platform_mif_map_region(platform->mem_start, platform->mem_size);
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Error remaping shared memory\n");
+ return NULL;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Map: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+
+ /* Initialise MIF registers with documented defaults */
+ /* MBOXes */
+ for (i = 0; i < NUM_MBOX_PLAT; i++) {
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(ISSR(i)), 0x00000000);
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(ISSR(i)), 0x00000000);
+#endif
+ }
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+#ifdef CONFIG_SOC_EXYNOS7570
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR2), 0x0000ffff);
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+#endif
+
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+#ifdef CONFIG_SOC_EXYNOS7570
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR2), 0x0000ffff);
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+#endif
+#if !defined(CONFIG_SOC_EXYNOS9610)
+ /*Set WLBT_BOOT_TEST_RST_CFG to 0 to boot from external DRAM */
+ regmap_write(platform->pmureg, WLBT_BOOT_TEST_RST_CFG, 0x00000);
+#endif
+ /* Add more requrired initialization here: */
+
+#ifdef CONFIG_SCSC_GPR4_CON_DEBUG
+ /* PIO muxes switching to the Maxwell subsystem */
+ /* GPR4_CON (0x13750040) = 0x00666666 */
+ if (gpr4_debug) {
+ reg_bkp = readl(gpio_base);
+ writel(0x00666666, gpio_base);
+ SCSC_TAG_WARNING_DEV(PLAT_MIF, platform->dev, "[WARNING] Changing GPR4_CON from 0x%x to 0x%x", reg_bkp, readl(gpio_base));
+ reg_update = true;
+ }
+#endif
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_disable_irq == true) {
+ if (allocated)
+ *allocated = platform->mem_size;
+ return platform->mem;
+ }
+#endif
+ /* register interrupts */
+ if (platform_mif_register_irq(platform)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ return NULL;
+ }
+
+ if (allocated)
+ *allocated = platform->mem_size;
+ /* Set the CR4 base address in Mailbox??*/
+ return platform->mem;
+}
+
+/* HERE: Not sure why mem is passed in - its stored in platform - as it should be */
+static void platform_mif_unmap(struct scsc_mif_abs *interface, void *mem)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+#ifdef CONFIG_SCSC_GPR4_CON_DEBUG
+ u32 prev;
+
+ if (gpr4_debug && reg_update) {
+ prev = readl(gpio_base);
+ writel(reg_bkp, gpio_base);
+ SCSC_TAG_WARNING_DEV(PLAT_MIF, platform->dev, "[WARNING] Restoring GPR4_CON from 0x%x to 0x%x", prev, readl(gpio_base));
+ }
+ reg_update = false;
+#endif
+ /* Avoid unused parameter error */
+ (void)mem;
+
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+#ifdef CONFIG_SOC_EXYNOS7570
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR2), 0x0000ffff);
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+#endif
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ /* Restore PIO changed by Maxwell subsystem */
+ if (chv_disable_irq == false)
+ /* Unregister IRQs */
+ platform_mif_unregister_irq(platform);
+#else
+ platform_mif_unregister_irq(platform);
+#endif
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+#ifdef CONFIG_SOC_EXYNOS7570
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR2), 0x0000ffff);
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ platform->mem = NULL;
+}
+
+static u32 platform_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INTMR0: 0x%x\n", val);
+ return val;
+}
+
+static u32 platform_mif_irq_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ /* Function has to return the interrupts that are enabled *AND* not masked */
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INT-INTMSR0: 0x%x\n", val);
+
+ return val;
+}
+
+static void platform_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 reg;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+#ifdef CONFIG_SOC_EXYNOS9610
+ reg = INTGR1;
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTGR1: bit %d on target %d\n", bit_num, target);
+#else
+ /* Generate INT to R4/M4 - VIC */
+ if (target == SCSC_MIF_ABS_TARGET_R4)
+ reg = INTGR1;
+ else if (target == SCSC_MIF_ABS_TARGET_M4)
+#ifdef CONFIG_SOC_EXYNOS7570
+ reg = INTGR2;
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ reg = INTGR1;
+#endif
+ else {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect Target %d\n", target);
+ return;
+ }
+#endif
+#ifdef CONFIG_SOC_EXYNOS7570
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ if (target == SCSC_MIF_ABS_TARGET_R4)
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+ else
+ platform_mif_reg_write_m4(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+#endif
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTGR1: bit %d on target %d\n", bit_num, target);
+}
+
+static void platform_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ /* WRITE : 1 = Clears Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), ((1 << bit_num) << 16));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTCR0: bit %d\n", bit_num);
+}
+
+static void platform_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 1 = Mask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val | ((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTMR0: 0x%x bit %d\n", val | (1 << bit_num), bit_num);
+}
+
+static void platform_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 0 = Unmask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val & ~((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "UNMASK Setting INTMR0: 0x%x bit %d\n", val & ~((1 << bit_num) << 16), bit_num);
+}
+
+/* Return the contents of the mask register */
+static u32 __platform_mif_irq_bit_mask_read(struct platform_mif *platform)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Read INTMR0: 0x%x\n", val);
+
+ return val;
+}
+
+/* Write the mask register, destroying previous contents */
+static void __platform_mif_irq_bit_mask_write(struct platform_mif *platform, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val);
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Write INTMR0: 0x%x\n", val);
+}
+
+static void platform_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif int handler %pS in %p %p\n", handler, platform, interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = handler;
+ platform->irq_dev = dev;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif int handler %pS\n", interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif reset_request int handler %pS in %p %p\n", handler, platform, interface);
+ platform->reset_request_handler = handler;
+ platform->irq_reset_request_dev = dev;
+ if (atomic_read(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "Default WDOG handler disabled by spurios IRQ...re-enabling.\n");
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+ }
+}
+
+static void platform_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "UnRegistering mif reset_request int handler %pS\n", interface);
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+}
+
+static void platform_mif_suspend_reg_handler(struct scsc_mif_abs *interface,
+ int (*suspend)(struct scsc_mif_abs *abs, void *data),
+ void (*resume)(struct scsc_mif_abs *abs, void *data),
+ void *data)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = suspend;
+ platform->resume_handler = resume;
+ platform->suspendresume_data = data;
+}
+
+static void platform_mif_suspend_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+}
+
+static u32 *platform_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 *addr;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "mbox_index 0x%x\n", mbox_index);
+ addr = platform->base + MAILBOX_WLBT_REG(ISSR(mbox_index));
+ return addr;
+}
+
+static int platform_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return -ENOMEM;
+ }
+
+ /* Check limits! */
+ if (ptr >= (platform->mem + platform->mem_size)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Unable to get pointer reference\n");
+ return -ENOMEM;
+ }
+
+ *ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)platform->mem);
+
+ return 0;
+}
+
+static void *platform_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ /* Check limits */
+ if (ref >= 0 && ref < platform->mem_size)
+ return (void *)((uintptr_t)platform->mem + (uintptr_t)ref);
+ else
+ return NULL;
+}
+
+static void *platform_mif_get_mifram_phy_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem_start) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)platform->mem_start + (uintptr_t)ref);
+}
+
+static uintptr_t platform_mif_get_mif_pfn(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ return vmalloc_to_pfn(platform->mem);
+}
+
+static struct device *platform_mif_get_mif_device(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ return platform->dev;
+}
+
+static void platform_mif_irq_clear(void)
+{
+ /* Implement if required */
+}
+
+static int platform_mif_read_register(struct scsc_mif_abs *interface, u64 id, u32 *val)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (id == SCSC_REG_READ_WLBT_STAT) {
+ regmap_read(platform->pmureg, WLBT_STAT, val);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void platform_mif_dump_register(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR1)));
+#ifdef CONFIG_SOC_EXYNOS7570
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR2 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR2)));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR1(M4) 0x%08x\n", platform_mif_reg_read_m4(platform, MAILBOX_WLBT_REG(INTGR1)));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR1)));
+#ifdef CONFIG_SOC_EXYNOS7570
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR2 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR2)));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR1(M4) 0x%08x\n", platform_mif_reg_read_m4(platform, MAILBOX_WLBT_REG(INTCR1)));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR1)));
+#ifdef CONFIG_SOC_EXYNOS7570
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR2 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR2)));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR1(M4) 0x%08x\n", platform_mif_reg_read_m4(platform, MAILBOX_WLBT_REG(INTMR1)));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR1)));
+#ifdef CONFIG_SOC_EXYNOS7570
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR2 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR2)));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR1(M4) 0x%08x\n", platform_mif_reg_read_m4(platform, MAILBOX_WLBT_REG(INTSR1)));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR1)));
+#ifdef CONFIG_SOC_EXYNOS7570
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR2 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR2)));
+#elif defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR1(M4) 0x%08x\n", platform_mif_reg_read_m4(platform, MAILBOX_WLBT_REG(INTMSR1)));
+#endif
+
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_cleanup(struct scsc_mif_abs *interface)
+{
+#ifdef CONFIG_SCSC_CLK20MHZ
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* Restore USBPLL owenership to the AP so that USB driver may control it */
+ __platform_mif_usbpll_claim(platform, false);
+#endif
+}
+
+static void platform_mif_restart(struct scsc_mif_abs *interface)
+{
+#ifdef CONFIG_SCSC_CLK20MHZ
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* Restore USBPLL owenership to the wlbt */
+ __platform_mif_usbpll_claim(platform, true);
+#endif
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+static int __init platform_mif_wifibt_if_reserved_mem_setup(struct reserved_mem *remem)
+{
+ SCSC_TAG_DEBUG(PLAT_MIF, "memory reserved: mem_base=%#lx, mem_size=%zd\n",
+ (unsigned long)remem->base, (size_t)remem->size);
+
+ sharedmem_base = remem->base;
+ sharedmem_size = remem->size;
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(wifibt_if, "exynos,wifibt_if", platform_mif_wifibt_if_reserved_mem_setup);
+#endif
+
+#ifdef CONFIG_SCSC_CLK20MHZ
+static void __platform_mif_usbpll_claim(struct platform_mif *platform, bool wlbt)
+{
+
+ s32 ret = 0;
+
+ if (!platform->cmu_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "USBPLL claim not enabled\n");
+ return;
+ }
+
+ /* Set USBPLL ownership, by setting AP2WLBT_USBPLL_WPLL_SEL */
+
+ if (wlbt) {
+ /* WLBT f/w has control */
+ ret = regmap_update_bits(platform->cmu_base, USBPLL_CON1, AP2WLBT_USBPLL_WPLL_SEL, 0);
+ if (ret < 0)
+ goto error;
+ } else {
+ /* Ensure PLL runs */
+ ret = regmap_update_bits(platform->cmu_base, USBPLL_CON1, AP2WLBT_USBPLL_WPLL_EN, AP2WLBT_USBPLL_WPLL_EN);
+ if (ret < 0)
+ goto error;
+
+ /* AP has control */
+ udelay(platform->usbpll_delay);
+ ret = regmap_update_bits(platform->cmu_base, USBPLL_CON1, AP2WLBT_USBPLL_WPLL_SEL, AP2WLBT_USBPLL_WPLL_SEL);
+ if (ret < 0)
+ goto error;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "USBPLL_CON1 assigned to %s\n", wlbt ? "WLBT" : "AP");
+ return;
+
+error:
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Failed to update USBPLL_CON1 to %s\n", wlbt ? "WLBT" : "AP");
+
+}
+#endif
+
+struct scsc_mif_abs *platform_mif_create(struct platform_device *pdev)
+{
+ struct scsc_mif_abs *platform_if;
+ struct platform_mif *platform =
+ (struct platform_mif *)devm_kzalloc(&pdev->dev, sizeof(struct platform_mif), GFP_KERNEL);
+ int err = 0;
+ u8 i = 0;
+ struct resource *reg_res;
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ struct resource *reg_res_m4;
+#endif
+#ifdef CONFIG_SCSC_CLK20MHZ
+ /* usb pll ownership */
+ const char *usbowner = NULL;
+ u32 usbdelay = 0;
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ u32 smapper_banks = 0;
+#endif
+
+ if (!platform)
+ return NULL;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, &pdev->dev, "Creating MIF platform device\n");
+
+ platform_if = &platform->interface;
+
+ /* initialise interface structure */
+ platform_if->destroy = platform_mif_destroy;
+ platform_if->get_uid = platform_mif_get_uid;
+ platform_if->reset = platform_mif_reset;
+ platform_if->map = platform_mif_map;
+ platform_if->unmap = platform_mif_unmap;
+ platform_if->irq_bit_set = platform_mif_irq_bit_set;
+ platform_if->irq_get = platform_mif_irq_get;
+ platform_if->irq_bit_mask_status_get = platform_mif_irq_bit_mask_status_get;
+ platform_if->irq_bit_clear = platform_mif_irq_bit_clear;
+ platform_if->irq_bit_mask = platform_mif_irq_bit_mask;
+ platform_if->irq_bit_unmask = platform_mif_irq_bit_unmask;
+ platform_if->irq_reg_handler = platform_mif_irq_reg_handler;
+ platform_if->irq_unreg_handler = platform_mif_irq_unreg_handler;
+ platform_if->irq_reg_reset_request_handler = platform_mif_irq_reg_reset_request_handler;
+ platform_if->irq_unreg_reset_request_handler = platform_mif_irq_unreg_reset_request_handler;
+ platform_if->suspend_reg_handler = platform_mif_suspend_reg_handler;
+ platform_if->suspend_unreg_handler = platform_mif_suspend_unreg_handler;
+ platform_if->get_mbox_ptr = platform_mif_get_mbox_ptr;
+ platform_if->get_mifram_ptr = platform_mif_get_mifram_ptr;
+ platform_if->get_mifram_ref = platform_mif_get_mifram_ref;
+ platform_if->get_mifram_pfn = platform_mif_get_mif_pfn;
+ platform_if->get_mifram_phy_ptr = platform_mif_get_mifram_phy_ptr;
+ platform_if->get_mif_device = platform_mif_get_mif_device;
+ platform_if->irq_clear = platform_mif_irq_clear;
+ platform_if->mif_dump_registers = platform_mif_dump_register;
+ platform_if->mif_read_register = platform_mif_read_register;
+ platform_if->mif_cleanup = platform_mif_cleanup;
+ platform_if->mif_restart = platform_mif_restart;
+#ifdef CONFIG_SCSC_SMAPPER
+ platform_if->mif_smapper_get_mapping = platform_mif_smapper_get_mapping;
+ platform_if->mif_smapper_get_bank_info = platform_mif_smapper_get_bank_info;
+ platform_if->mif_smapper_write_sram = platform_mif_smapper_write_sram;
+ platform_if->mif_smapper_configure = platform_mif_smapper_configure;
+ platform_if->mif_smapper_get_bank_base_address = platform_mif_smapper_get_bank_base_address;
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_if->mif_pm_qos_add_request = platform_mif_pm_qos_add_request;
+ platform_if->mif_pm_qos_update_request = platform_mif_pm_qos_update_request;
+ platform_if->mif_pm_qos_remove_request = platform_mif_pm_qos_remove_request;
+#endif
+ /* Update state */
+ platform->pdev = pdev;
+ platform->dev = &pdev->dev;
+
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+
+#ifdef CONFIG_OF_RESERVED_MEM
+ platform->mem_start = sharedmem_base;
+ platform->mem_size = sharedmem_size;
+#else
+ /* If CONFIG_OF_RESERVED_MEM is not defined, sharedmem values should be
+ * parsed from the scsc_wifibt binding
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-base", &sharedmem_base)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_start = sharedmem_base;
+
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-size", &sharedmem_size)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_size = sharedmem_size;
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ platform->smapper = NULL;
+#endif
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ /* TZASC configuration is required for WLBT to access DRAM from Katmai onward */
+ /* Base address should be 4KB aligned. This call needs proper support in EL3_MON */
+ err = exynos_smc(EXYNOS_SMC_WLBT_TZASC_CMD, WLBT_TZASC, platform->mem_start,
+ platform->mem_size);
+ /* Anyway we keep on WLBT initialization even if TZASC failed to minimize disruption*/
+ if (err)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "WLBT Failed to configure TZASC, err=%d. DRAM could be NOT accessible in secure mode\n", err);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WLBT TZASC configured OK\n");
+#endif
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->mem_start 0x%x platform->mem_size 0x%x\n", (u32)platform->mem_start, (u32)platform->mem_size);
+ if (platform->mem_start == 0)
+ SCSC_TAG_WARNING_DEV(PLAT_MIF, platform->dev, "platform->mem_start is 0");
+
+ if (platform->mem_size == 0) {
+ /* We return return if mem_size is 0 as it does not make any sense.
+ * This may be an indication of an incorrect platform device binding.
+ */
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "platform->mem_size is 0");
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* Memory resource - Phys Address of MAILBOX_WLBT register map */
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error getting mem resource for MAILBOX_WLBT\n");
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ platform->reg_start = reg_res->start;
+ platform->reg_size = resource_size(reg_res);
+
+ platform->base =
+ devm_ioremap_nocache(platform->dev, reg_res->start, resource_size(reg_res));
+
+ if (!platform->base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping register region\n");
+ err = -EBUSY;
+ goto error_exit;
+ }
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->reg_start %lx size %x base %p\n", (uintptr_t)platform->reg_start, (u32)platform->reg_size, platform->base);
+
+#if defined(CONFIG_SOC_EXYNOS7872) || defined(CONFIG_SOC_EXYNOS7885)
+ /* Memory resource for M4 MBOX bank - Phys Address of MAILBOX_WLBT register map */
+ reg_res_m4 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!reg_res_m4) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Error getting mem resource for MAILBOX_WLBT\n");
+ err = -ENOENT;
+ goto error_exit;
+ }
+ platform->reg_start_m4 = reg_res_m4->start;
+ platform->reg_size_m4 = resource_size(reg_res_m4);
+
+ platform->base_m4 =
+ devm_ioremap_nocache(platform->dev, reg_res_m4->start, resource_size(reg_res_m4));
+
+ if (!platform->base_m4) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping M4 register region\n");
+ err = -EBUSY;
+ goto error_exit;
+ }
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->reg_start_m4 %lx size_m4 %x base_m4 %p\n", (uintptr_t)platform->reg_start_m4, (u32)platform->reg_size_m4, platform->base_m4);
+#endif
+
+#ifdef CONFIG_SCSC_CLK20MHZ
+ /* Get usbpll,owner if Maxwell has to provide 20Mhz clock to USB subsystem */
+ platform->cmu_base = NULL;
+ if (of_property_read_string(pdev->dev.of_node, "usbpll,owner", &usbowner)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "usbpll,owner of_property does not exist or is invalid\n");
+ goto cont;
+ } else {
+ if (strcasecmp(usbowner, "y"))
+ goto skip_usbpll;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "usbpll,owner is enabled\n");
+
+ /* CMU_FSYS reg map - syscon */
+ platform->cmu_base = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,syscon-cmu_fsys");
+ if (IS_ERR(platform->cmu_base)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "syscon regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->cmu_base));
+ goto skip_usbpll;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "usbpll,udelay", &usbdelay))
+ goto skip_usbpll;
+ platform->usbpll_delay = usbdelay;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Mapping CMU_FSYS with %dus delay\n", usbdelay);
+
+ goto cont;
+skip_usbpll:
+ platform->cmu_base = NULL;
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "usbpll owner skipped\n");
+ }
+cont:
+#endif
+#ifdef CONFIG_SCSC_GPR4_CON_DEBUG
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Mapping GPR4_CON 0x13750040\n");
+ gpio_base =
+ devm_ioremap_nocache(platform->dev, 0x13750040, 4);
+#endif
+
+ /* Get the 4 IRQ resources */
+ for (i = 0; i < 4; i++) {
+ struct resource *irq_res;
+ int irqtag;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "No IRQ resource at index %d\n", i);
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ if (!strcmp(irq_res->name, "MBOX")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MBOX irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_MBOX;
+ } else if (!strcmp(irq_res->name, "ALIVE")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "ALIVE irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_ALIVE;
+ } else if (!strcmp(irq_res->name, "WDOG")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_WDOG;
+ } else if (!strcmp(irq_res->name, "CFG_REQ")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "CFG_REQ irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_CFG_REQ;
+ } else {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, &pdev->dev, "Invalid irq res name: %s\n",
+ irq_res->name);
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->wlbt_irq[irqtag].irq_num = irq_res->start;
+ platform->wlbt_irq[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
+ atomic_set(&platform->wlbt_irq[irqtag].irq_disabled_cnt, 0);
+ }
+
+ /* PMU reg map - syscon */
+ platform->pmureg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(platform->pmureg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "syscon regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->pmureg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* Completion event and state used to indicate CFG_REQ IRQ occurred */
+ init_completion(&platform->cfg_ack);
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+
+ /* BAAW_P_WLBT */
+ platform->baaw_p_wlbt = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,baaw_p_wlbt-syscon-phandle");
+ if (IS_ERR(platform->baaw_p_wlbt)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "baaw_p_wlbt regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->baaw_p_wlbt));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* DBUS_BAAW */
+ platform->dbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,dbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->dbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "dbus_baaw regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->dbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* PBUS_BAAW */
+ platform->pbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,pbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->pbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "pbus_baaw regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->pbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* WLBT_REMAP */
+ platform->wlbt_remap = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,wlbt_remap-syscon-phandle");
+ if (IS_ERR(platform->wlbt_remap)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "wlbt_remap regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->wlbt_remap));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* BOOT_CFG */
+ platform->boot_cfg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,boot_cfg-syscon-phandle");
+ if (IS_ERR(platform->boot_cfg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "boot_cfg regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->boot_cfg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER parsing */
+ if (!of_property_read_u32(pdev->dev.of_node, "smapper_num_banks", &smapper_banks))
+ platform_mif_parse_smapper(platform, platform->dev->of_node, smapper_banks);
+
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_mif_parse_qos(platform, platform->dev->of_node);
+#endif
+#ifdef CONFIG_SCSC_CLK20MHZ
+ /* Assign USBPLL ownership to WLBT f/w */
+ __platform_mif_usbpll_claim(platform, true);
+#endif
+
+ /* Initialize spinlock */
+ spin_lock_init(&platform->mif_spinlock);
+
+#ifndef CONFIG_SOC_EXYNOS9610
+ /* Clear WIFI_ACTIVE flag in WAKEUP_STAT */
+ err = regmap_update_bits(platform->pmureg, WIFI_CTRL_NS, WIFI_ACTIVE_CLR, 1);
+ if (err < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WIFI_CTRL_NS[WIFI_ACTIVE_CLR]: %d\n", err);
+ }
+#endif
+ return platform_if;
+
+error_exit:
+ devm_kfree(&pdev->dev, platform);
+ return NULL;
+}
+
+void platform_mif_destroy_platform(struct platform_device *pdev, struct scsc_mif_abs *interface)
+{
+#ifdef CONFIG_SCSC_CLK20MHZ
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* Assign USBPLL ownership to AP */
+ __platform_mif_usbpll_claim(platform, false);
+#endif
+}
+
+struct platform_device *platform_mif_get_platform_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->pdev;
+}
+
+struct device *platform_mif_get_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->dev;
+}
+
+/* Preserve MIF registers during suspend.
+ * If all users of the MIF (AP, mx140, CP, etc) release it, the registers
+ * will lose their values. Save the useful subset here.
+ *
+ * Assumption: the AP will not change the register values between the suspend
+ * and resume handlers being called!
+ */
+static void platform_mif_reg_save(struct platform_mif *platform)
+{
+ platform->mif_preserve.irq_bit_mask = __platform_mif_irq_bit_mask_read(platform);
+}
+
+/* Restore MIF registers that may have been lost during suspend */
+static void platform_mif_reg_restore(struct platform_mif *platform)
+{
+ __platform_mif_irq_bit_mask_write(platform, platform->mif_preserve.irq_bit_mask);
+}
+
+int platform_mif_suspend(struct scsc_mif_abs *interface)
+{
+ int r = 0;
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (platform->suspend_handler)
+ r = platform->suspend_handler(interface, platform->suspendresume_data);
+
+ /* Save the MIF registers.
+ * This must be done last as the suspend_handler may use the MIF
+ */
+ platform_mif_reg_save(platform);
+
+ return r;
+}
+
+void platform_mif_resume(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ s32 ret;
+
+ /* Restore the MIF registers.
+ * This must be done first as the resume_handler may use the MIF.
+ */
+ platform_mif_reg_restore(platform);
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clear WLBT_ACTIVE_CLR flag\n");
+ /* Clear WLBT_ACTIVE_CLR flag in WLBT_CTRL_NS */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS, WLBT_ACTIVE_CLR, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_ACTIVE_CLR]: %d\n", ret);
+ }
+#else
+ /* Clear WIFI_ACTIVE flag in WAKEUP_STAT */
+ ret = regmap_update_bits(platform->pmureg, WIFI_CTRL_NS, WIFI_ACTIVE_CLR, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WIFI_CTRL_NS[WIFI_ACTIVE_CLR]: %d\n", ret);
+ }
+#endif
+ if (platform->resume_handler)
+ platform->resume_handler(interface, platform->suspendresume_data);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __PLATFORM_MIF_H
+#define __PLATFORM_MIF_H
+#include "scsc_mif_abs.h"
+
+enum wlbt_irqs {
+ PLATFORM_MIF_MBOX,
+ PLATFORM_MIF_ALIVE,
+ PLATFORM_MIF_WDOG,
+#if defined(CONFIG_SOC_EXYNOS9610) || defined(CONFIG_SOC_EXYNOS9630)
+ PLATFORM_MIF_CFG_REQ,
+#endif
+ /* must be last */
+ PLATFORM_MIF_NUM_IRQS
+};
+
+struct platform_device;
+
+struct scsc_mif_abs *platform_mif_create(struct platform_device *pdev);
+void platform_mif_destroy_platform(struct platform_device *pdev, struct scsc_mif_abs *interface);
+struct platform_device *platform_mif_get_platform_dev(struct scsc_mif_abs *interface);
+struct device *platform_mif_get_dev(struct scsc_mif_abs *interface);
+int platform_mif_suspend(struct scsc_mif_abs *interface);
+void platform_mif_resume(struct scsc_mif_abs *interface);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Implements interface */
+
+#include "platform_mif.h"
+
+/* Interfaces it Uses */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/smc.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#endif
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <scsc/scsc_logring.h>
+#include "mif_reg_S5E9610.h"
+#include "platform_mif_module.h"
+#ifdef CONFIG_ARCH_EXYNOS
+#include <linux/soc/samsung/exynos-soc.h>
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+#include <linux/dma-mapping.h>
+#include "mif_reg_smapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include <linux/pm_qos.h>
+#endif
+
+#if !defined(CONFIG_SOC_EXYNOS9610)
+#error Target processor CONFIG_SOC_EXYNOS9610 not selected
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+/* Time to wait for CFG_REQ IRQ on 9610 */
+#define WLBT_BOOT_TIMEOUT (HZ)
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of_reserved_mem.h>
+#endif
+static unsigned long sharedmem_base;
+static size_t sharedmem_size;
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+static bool chv_disable_irq;
+module_param(chv_disable_irq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(chv_disable_irq, "Do not register for irq");
+#endif
+
+static bool enable_platform_mif_arm_reset = true;
+module_param(enable_platform_mif_arm_reset, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_platform_mif_arm_reset, "Enables WIFIBT ARM cores reset");
+
+#ifdef CONFIG_SCSC_QOS
+struct qos_table {
+ unsigned int freq_mif;
+ unsigned int freq_int;
+ unsigned int freq_cl0;
+ unsigned int freq_cl1;
+};
+#endif
+
+struct platform_mif {
+ struct scsc_mif_abs interface;
+ struct scsc_mbox_s *mbox;
+ struct platform_device *pdev;
+
+ struct device *dev;
+
+ struct {
+ int irq_num;
+ int flags;
+ atomic_t irq_disabled_cnt;
+ } wlbt_irq[PLATFORM_MIF_NUM_IRQS];
+
+ /* MIF registers preserved during suspend */
+ struct {
+ u32 irq_bit_mask;
+ } mif_preserve;
+
+ /* register MBOX memory space */
+ size_t reg_start;
+ size_t reg_size;
+ void __iomem *base;
+
+ /* register CMU memory space */
+ struct regmap *cmu_base;
+
+ void __iomem *con0_base;
+
+ /* pmu syscon regmap */
+ struct regmap *pmureg;
+#if defined(CONFIG_SOC_EXYNOS9610)
+ struct regmap *baaw_p_wlbt;
+ struct regmap *dbus_baaw;
+ struct regmap *pbus_baaw;
+ struct regmap *wlbt_remap;
+ struct regmap *boot_cfg;
+
+ /* Signalled when CFG_REQ IRQ handled */
+ struct completion cfg_ack;
+
+ /* State of CFG_REQ handler */
+ enum wlbt_boot_state {
+ WLBT_BOOT_IN_RESET = 0,
+ WLBT_BOOT_WAIT_CFG_REQ,
+ WLBT_BOOT_CFG_DONE,
+ WLBT_BOOT_CFG_ERROR
+ } boot_state;
+
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER */
+ void __iomem *smapper_base;
+ u8 smapper_banks;
+ struct {
+ u8 bank;
+ u32 ws;
+ bool large;
+ struct scsc_mif_smapper_info bank_info;
+ } *smapper;
+#endif
+ /* Shared memory space - reserved memory */
+ unsigned long mem_start;
+ size_t mem_size;
+ void __iomem *mem;
+
+ /* Callback function and dev pointer mif_intr manager handler */
+ void (*r4_handler)(int irq, void *data);
+ void *irq_dev;
+ /* spinlock to serialize driver access */
+ spinlock_t mif_spinlock;
+ void (*reset_request_handler)(int irq, void *data);
+ void *irq_reset_request_dev;
+
+#ifdef CONFIG_SCSC_QOS
+ /* QoS table */
+ struct qos_table *qos;
+ bool qos_enabled;
+#endif
+ /* Suspend/resume handlers */
+ int (*suspend_handler)(struct scsc_mif_abs *abs, void *data);
+ void (*resume_handler)(struct scsc_mif_abs *abs, void *data);
+ void *suspendresume_data;
+};
+
+extern int mx140_log_dump(void);
+
+#define platform_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct platform_mif, interface)
+
+inline void platform_mif_reg_write(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->base + offset);
+}
+
+inline u32 platform_mif_reg_read(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->base + offset);
+}
+
+#ifdef CONFIG_SCSC_SMAPPER
+inline void platform_mif_reg_write_smapper(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->smapper_base + offset);
+}
+
+inline u32 platform_mif_reg_read_smapper(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->smapper_base + offset);
+}
+
+#define PLATFORM_MIF_SHIFT_SMAPPER_ADDR 11 /* From 36 bits addres to 25 bits */
+#define PLATFORM_MIF_SHIFT_SMAPPER_END 4 /* End address aligment */
+
+/* Platform is responsible to give the phys mapping of the SMAPPER maps */
+static int platform_mif_smapper_get_mapping(struct scsc_mif_abs *interface, u8 *phy_map, u16 *align)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Mapping %d banks\n", platform->smapper_banks);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ for (i = 0; i < platform->smapper_banks; i++) {
+ if (platform->smapper[i].large)
+ phy_map[i] = SCSC_MIF_ABS_LARGE_BANK;
+ else
+ phy_map[i] = SCSC_MIF_ABS_SMALL_BANK;
+ }
+
+ if (align)
+ *align = 1 << PLATFORM_MIF_SHIFT_SMAPPER_ADDR;
+
+ return 0;
+}
+
+static int platform_mif_smapper_get_bank_info(struct scsc_mif_abs *interface, u8 bank, struct scsc_mif_smapper_info *bank_info)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ bank_info->num_entries = platform->smapper[bank].bank_info.num_entries;
+ bank_info->mem_range_bytes = platform->smapper[bank].bank_info.mem_range_bytes;
+
+ return 0;
+}
+
+static u8 platform_mif_smapper_granularity_to_bits(u32 granularity)
+{
+ if (granularity <= 2 * 1024)
+ return 0;
+ if (granularity <= 4 * 1024)
+ return 1;
+ if (granularity <= 8 * 1024)
+ return 2;
+ if (granularity <= 16 * 1024)
+ return 3;
+ if (granularity <= 32 * 1024)
+ return 4;
+ if (granularity <= 64 * 1024)
+ return 5;
+ if (granularity <= 128 * 1024)
+ return 6;
+ return 7;
+}
+
+static u32 platform_mif_smapper_get_bank_base_address(struct scsc_mif_abs *interface, u8 bank)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper)
+ return 0;
+
+ return platform->smapper[bank].ws;
+}
+
+/* Configure smapper according the memory map and range */
+static void platform_mif_smapper_configure(struct scsc_mif_abs *interface, u32 granularity)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u8 gran;
+ u8 nb = platform->smapper_banks;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Configure SMAPPER with granularity %d\n", granularity);
+
+ gran = platform_mif_smapper_granularity_to_bits(granularity);
+
+ platform_mif_reg_write_smapper(platform, SMAPPER_QCH_DISABLE, 1);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AR, 0);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AW, 0);
+ /* Program SMAPPER memmap */
+ for (i = 0; i < nb; i++) {
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(i), 0);
+ /* Set START_ADDR */
+ platform_mif_reg_write_smapper(platform, START_ADDR(i), platform->smapper[i].ws);
+ /* Set ADDR_GRANULARITY - FIXED AT 4KB */
+ platform_mif_reg_write_smapper(platform, ADDR_GRANULARITY(i), gran);
+ /* WLAN_ADDR_MAP operation is started */
+ }
+ /* Set access window control (MSB 32bits Start/End address) */
+ /* Remapped address should be ranged from AW_START_ADDR to AW_EN_ADDR */
+ platform_mif_reg_write_smapper(platform, AW_START_ADDR, 0);
+ platform_mif_reg_write_smapper(platform, AW_END_ADDR, dma_get_mask(platform->dev) >> PLATFORM_MIF_SHIFT_SMAPPER_END);
+ smp_mb();
+}
+
+/* Caller is responsible of validating the phys address (alignment) */
+static int platform_mif_smapper_write_sram(struct scsc_mif_abs *interface, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u32 rb;
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "SMAPPER not enabled\n");
+ return -EINVAL;
+ }
+
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 0);
+ /* Write mapping table to SRAM. Each entry consists of 25 bits MSB address to remap */
+ for (i = 0; i < num_entries; i++) {
+ if (!addr[i]) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ADDR IS NULL at bank %d entry %d/%d\n", bank, first_entry + i, num_entries);
+ return -EINVAL;
+ }
+ /* Set SRAM_WRITE_CTRL to 1'b1*/
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 1);
+ platform_mif_reg_write_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i), addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ /* check incorrect writings */
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 0);
+ rb = platform_mif_reg_read_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i));
+ if (rb != addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "incorrect mapping detected rb 0x%x, addr 0x%x\n", rb, (u32)addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ return -EFAULT;
+ }
+ }
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 1);
+ smp_mb();
+ return 0;
+}
+
+static int platform_mif_parse_smapper(struct platform_mif *platform, struct device_node *np, u8 num_banks)
+{
+ /* SMAPPER parsing */
+ struct device_node *np_banks;
+ char node_name[50];
+ u32 val[2];
+ u8 i;
+ u32 bank = 0, ws = 0, wsz = 0, ent = 0, large = 0;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "banks found: %d szof %zu\n", num_banks, sizeof(*platform->smapper));
+
+ platform->smapper = kmalloc_array(num_banks, sizeof(*platform->smapper), GFP_KERNEL);
+
+ if (!platform->smapper)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++) {
+ snprintf(node_name, sizeof(node_name), "smapper_bank_%d", i);
+ np_banks = of_find_node_by_name(np, node_name);
+ if (!np_banks) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "%s: could not find smapper_bank\n",
+ node_name);
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+ of_property_read_u32(np_banks, "bank_num", &bank);
+ of_property_read_u32(np_banks, "fw_window_start", &ws);
+ of_property_read_u32(np_banks, "fw_window_size", &wsz);
+ of_property_read_u32(np_banks, "num_entries", &ent);
+ of_property_read_u32(np_banks, "is_large", &large);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "bank %d fw_w_start 0x%x fw_w_sz 0x%x entries %d is_large %d\n",
+ bank, ws, wsz, ent, large);
+
+ platform->smapper[i].bank = (u8)bank;
+ platform->smapper[i].ws = ws;
+ platform->smapper[i].large = (bool)large;
+ platform->smapper[i].bank_info.num_entries = ent;
+ platform->smapper[i].bank_info.mem_range_bytes = wsz;
+ }
+
+ /* Update the number of banks before returning */
+ platform->smapper_banks = num_banks;
+
+ of_property_read_u32_array(np, "smapper_reg", val, 2);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "smapper reg address 0x%x size 0x%x\n", val[0], val[1]);
+ platform->smapper_base =
+ devm_ioremap_nocache(platform->dev, val[0], val[1]);
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping smapper register region\n");
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+#endif
+#ifdef CONFIG_SCSC_QOS
+static int platform_mif_parse_qos(struct platform_mif *platform, struct device_node *np)
+{
+ int len, i;
+
+ platform->qos_enabled = false;
+
+ len = of_property_count_u32_elems(np, "qos_table");
+ if (!(len == 12)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "No qos table for wlbt, or incorrect size\n");
+ return -ENOENT;
+ }
+
+ platform->qos = devm_kzalloc(platform->dev, sizeof(struct qos_table) * len / 4, GFP_KERNEL);
+ if (!platform->qos)
+ return -ENOMEM;
+
+ of_property_read_u32_array(np, "qos_table", (unsigned int *)platform->qos, len);
+
+ for (i = 0; i < len / 4; i++) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "QoS Table[%d] mif : %u int : %u cl0 : %u cl1: %u\n", i,
+ platform->qos[i].freq_mif,
+ platform->qos[i].freq_int,
+ platform->qos[i].freq_cl0,
+ platform->qos[i].freq_cl1);
+ }
+
+ platform->qos_enabled = true;
+ return 0;
+}
+
+struct qos_table platform_mif_pm_qos_get_table(struct platform_mif *platform, enum scsc_qos_config config)
+{
+ struct qos_table table;
+
+ switch (config) {
+ case SCSC_QOS_MIN:
+ table.freq_mif = platform->qos[0].freq_mif;
+ table.freq_int = platform->qos[0].freq_int;
+ table.freq_cl0 = platform->qos[0].freq_cl0;
+ table.freq_cl1 = platform->qos[0].freq_cl1;
+ break;
+
+ case SCSC_QOS_MED:
+ table.freq_mif = platform->qos[1].freq_mif;
+ table.freq_int = platform->qos[1].freq_int;
+ table.freq_cl0 = platform->qos[1].freq_cl0;
+ table.freq_cl1 = platform->qos[1].freq_cl1;
+ break;
+
+ case SCSC_QOS_MAX:
+ table.freq_mif = platform->qos[2].freq_mif;
+ table.freq_int = platform->qos[2].freq_int;
+ table.freq_cl0 = platform->qos[2].freq_cl0;
+ table.freq_cl1 = platform->qos[2].freq_cl1;
+ break;
+
+ default:
+ table.freq_mif = 0;
+ table.freq_int = 0;
+ table.freq_cl0 = 0;
+ table.freq_cl1 = 0;
+ }
+
+ return table;
+}
+
+static int platform_mif_pm_qos_add_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS add request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_add_request(&qos_req->pm_qos_req_mif, PM_QOS_BUS_THROUGHPUT, table.freq_mif);
+ pm_qos_add_request(&qos_req->pm_qos_req_int, PM_QOS_DEVICE_THROUGHPUT, table.freq_int);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl0, PM_QOS_CLUSTER0_FREQ_MIN, table.freq_cl0);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl1, PM_QOS_CLUSTER1_FREQ_MIN, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_update_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS update request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_update_request(&qos_req->pm_qos_req_mif, table.freq_mif);
+ pm_qos_update_request(&qos_req->pm_qos_req_int, table.freq_int);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl0, table.freq_cl0);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl1, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_remove_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform)
+ return -ENODEV;
+
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS remove request\n");
+ pm_qos_remove_request(&qos_req->pm_qos_req_mif);
+ pm_qos_remove_request(&qos_req->pm_qos_req_int);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl0);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl1);
+
+ return 0;
+}
+#endif
+
+static void platform_mif_irq_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT handler not registered\n");
+}
+
+static void platform_mif_irq_reset_request_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT reset_request handler not registered\n");
+}
+
+irqreturn_t platform_mif_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "INT %pS\n", platform->r4_handler);
+ if (platform->r4_handler != platform_mif_irq_default_handler)
+ platform->r4_handler(irq, platform->irq_dev);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MIF Interrupt Handler not registered\n");
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+irqreturn_t platform_alive_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+
+ return IRQ_HANDLED;
+}
+#endif
+
+irqreturn_t platform_wdog_isr(int irq, void *data)
+{
+ int ret = 0;
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ if (platform->reset_request_handler != platform_mif_irq_reset_request_default_handler) {
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ platform->reset_request_handler(irq, platform->irq_reset_request_dev);
+ } else {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG Interrupt reset_request_handler not registered\n");
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Disabling unhandled WDOG IRQ.\n");
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_inc(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt);
+ }
+#ifdef CONFIG_SOC_EXYNOS9610
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_RESET_REQ_CLR, WLBT_RESET_REQ_CLR);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clearing WLBT_RESET_REQ\n");
+ if (ret < 0)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_RESET_REQ_CLR]: %d\n", ret);
+#endif
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SOC_EXYNOS9610
+/*
+ * Attached array contains the replacement PMU boot code which should
+ * be programmed using the CBUS during the config phase.
+ */
+uint32_t ka_patch[] = {
+ /* Low temp fix 28/1
+ * Maxwell142 PMU+PROC combined boot ROM
+ * IP Version: 0xA3
+ * Major Version: 0xF, Minor Version: 0xF
+ * PMU ROM version: 0x4
+ * PROC ROM version: 0x0
+ */
+ 0x90750002,
+ 0x11a4c218,
+ 0x75671191,
+ 0x9075e090,
+ 0x54b3e5e7,
+ 0x30f76001,
+ 0xb14315a2,
+ 0xb4b2e503,
+ 0xb153fb03,
+ 0xa90185fc,
+ 0xacf50774,
+ 0x75fdadb5,
+ 0xb47508a0,
+ 0x54b3e501,
+ 0x75fa7001,
+ 0x907500b4,
+ 0x78cb8018,
+ 0x80837982,
+ 0x07a075c5,
+ 0xb0783779,
+ 0xb40754e6,
+ 0x0b800207,
+ 0xc404f6d9,
+ 0xaf7590f5,
+ 0x75038000,
+ 0x53229090,
+ 0xce53eff7,
+ 0xd90479fe,
+ 0xfdce53fe,
+ 0xfed90c79,
+ 0x75fbce53,
+ 0x91530b92,
+ 0xf7ce53fd,
+ 0x5308f943,
+ 0xf922fef9,
+ 0xfbd8fed9,
+ 0x019e7522,
+ 0x75cfc175,
+ 0xc375a4c2,
+ 0x47c4754a,
+ 0x75a4c575,
+ 0xc7756dc6,
+ 0x03d27540,
+ 0x7510d375,
+ 0xca7500c9,
+ 0x00cb75d0,
+ 0x7500cc75,
+ 0x9b75009a,
+ 0x009c75c0,
+ 0x78009d75,
+ 0x12827402,
+ 0xc6438b80,
+ 0x74097802,
+ 0x8b8012e7,
+ 0x75d09075,
+ 0x9e750291,
+ 0x01a97502,
+ 0x00000022,
+};
+
+extern bool reset_failed;
+
+irqreturn_t platform_cfg_req_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+ const u64 EXYNOS_WLBT = 0x1;
+ u64 ret64 = 0;
+ s32 ret = 0;
+ unsigned int ka_addr = 0x1000;
+ uint32_t *ka_patch_addr = ka_patch;
+ u32 id;
+
+#define CHECK(x) do { \
+ int retval = (x); \
+ if (retval < 0) \
+ goto cfg_error; \
+} while (0)
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "disable_irq\n");
+
+ /* mask the irq */
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ /* Was the CFG_REQ irq received from WLBT before we expected it?
+ * Typically this indicates an issue returning WLBT HW to reset.
+ */
+ if (platform->boot_state != WLBT_BOOT_WAIT_CFG_REQ) {
+ u32 val;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Spurious CFG_REQ IRQ from WLBT!\n");
+
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_NS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_S, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_S 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ reset_failed = true; /* prevent further interaction with HW */
+
+ return IRQ_HANDLED;
+ }
+
+ /* CBUS should be ready before we get CFG_REQ, but we suspect
+ * CBUS is not ready yet. add some delay to see if that helps
+ */
+ udelay(100);
+
+ /* Set TZPC to non-secure mode */
+ ret64 = exynos_smc(SMC_CMD_CONN_IF, (EXYNOS_WLBT << 32) | EXYNOS_SET_CONN_TZPC, 0, 0);
+ if (ret64)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to set TZPC to non-secure mode: %llu\n", ret64);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SMC_CMD_CONN_IF run successfully : %llu\n", ret64);
+
+ /* WLBT_REMAP */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP begin\n");
+ CHECK(regmap_write(platform->wlbt_remap, 0x0, WLBT_DBUS_BAAW_0_START >> 12));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP end\n");
+
+ /* CHIP_VERSION_ID - overload with EMA settings */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "CHIP_VERSION_ID begin\n");
+ regmap_read(platform->wlbt_remap, 0x10, &id);
+ id &= ~CHIP_VERSION_ID_EMA_MASK;
+ id |= CHIP_VERSION_ID_EMA_VALUE;
+ CHECK(regmap_write(platform->wlbt_remap, 0x10, id));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "CHIP_VERSION_ID 0x%x end\n", id);
+
+ /* DBUS_BAAW regions */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW begin\n");
+ CHECK(regmap_write(platform->dbus_baaw, 0x0, WLBT_DBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x4, WLBT_DBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x8, platform->mem_start >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW end\n");
+
+ /* PBUS_BAAW regions */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW begin\n");
+ CHECK(regmap_write(platform->pbus_baaw, 0x0, WLBT_PBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4, WLBT_PBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x8, WLBT_PBUS_MBOX_CP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x10, WLBT_PBUS_BAAW_1_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x14, WLBT_PBUS_BAAW_1_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x18, WLBT_PBUS_MBOX_SHUB2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x1C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x20, WLBT_PBUS_BAAW_2_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x24, WLBT_PBUS_BAAW_2_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x28, WLBT_PBUS_USI_CMG00_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x2C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x30, WLBT_PBUS_BAAW_3_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x34, WLBT_PBUS_BAAW_3_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x38, WLBT_PBUS_SYSREG_CMGP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x3C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x40, WLBT_PBUS_BAAW_4_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x44, WLBT_PBUS_BAAW_4_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x48, WLBT_PBUS_GPIO_CMGP_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x50, WLBT_PBUS_BAAW_5_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x54, WLBT_PBUS_BAAW_5_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x58, WLBT_PBUS_SHUB_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x5C, WLBT_BAAW_ACCESS_CTRL));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW end\n");
+
+ /* PMU boot bug workaround */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT begin\n");
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x1));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT done\n");
+
+ while (ka_patch_addr < (ka_patch + ARRAY_SIZE(ka_patch))) {
+ CHECK(regmap_write(platform->boot_cfg, ka_addr, *ka_patch_addr));
+ ka_addr += sizeof(ka_patch[0]);
+ ka_patch_addr++;
+ }
+
+ /* Notify PMU of configuration done */
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x0));
+
+ /* BOOT_CFG_ACK */
+ CHECK(regmap_write(platform->boot_cfg, 0x4, 0x1));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT_CFG_ACK done\n");
+
+ /* Delay to allow HW to clear CFG_REQ and hence de-assert IRQ, which
+ * it does in response to CFG_ACK
+ */
+ udelay(100);
+
+ /* Release ownership of MASK_PWR_REQ */
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to clear WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ goto cfg_error;
+ }
+
+ /* Mark as CFQ_REQ handled, so boot may continue */
+ platform->boot_state = WLBT_BOOT_CFG_DONE;
+
+ /* Signal triggering function that the IRQ arrived and CFG was done */
+ complete(&platform->cfg_ack);
+
+ /* Re-enable IRQ here to allow spurious interrupt to be tracked */
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ return IRQ_HANDLED;
+cfg_error:
+ platform->boot_state = WLBT_BOOT_CFG_ERROR;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ERROR: WLBT Config failed. WLBT will not work\n");
+ complete(&platform->cfg_ack);
+ return IRQ_HANDLED;
+}
+#endif
+
+static void platform_mif_unregister_irq(struct platform_mif *platform)
+{
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering IRQs\n");
+
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform);
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform);
+ /* Reset irq_disabled_cnt for WDOG IRQ since the IRQ itself is here unregistered and disabled */
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* if ALIVE irq is required */
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform);
+#endif
+#ifdef CONFIG_SOC_EXYNOS9610
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform);
+#endif
+}
+
+static int platform_mif_register_irq(struct platform_mif *platform)
+{
+ int err;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering IRQs\n");
+
+ /* Register MBOX irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering MBOX irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform->wlbt_irq[PLATFORM_MIF_MBOX].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform_mif_isr,
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register MBOX handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+ /* Register WDOG irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering WDOG irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform->wlbt_irq[PLATFORM_MIF_WDOG].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform_wdog_isr,
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register WDOG handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* Register ALIVE irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering ALIVE irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform_alive_isr,
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE(err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register ALIVE handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+#endif
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* Mark as WLBT in reset before enabling IRQ to guard against spurious IRQ */
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+ smp_wmb(); /* commit before irq */
+
+ /* Register WB2AP_CFG_REQ irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering CFG_REQ irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform_cfg_req_isr,
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register CFG_REQ handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+#endif
+ return 0;
+}
+
+static void platform_mif_destroy(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ platform_mif_unregister_irq(platform);
+}
+
+static char *platform_mif_get_uid(struct scsc_mif_abs *interface)
+{
+ /* Avoid unused parameter error */
+ (void)interface;
+ return "0";
+}
+
+/* WLBT Power domain */
+static int platform_mif_power(struct scsc_mif_abs *interface, bool power)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "power %d\n", power);
+
+ if (power)
+ val = MASK_PWR_REQ;
+
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+/* WLBT RESET */
+static int platform_mif_hold_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "reset %d\n", reset);
+ if (reset)
+ val = WLBT_RESET_SET;
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_RESET_SET, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[WLBT_RESET_SET]: %d\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+/* WLBT START */
+static int platform_mif_start(struct scsc_mif_abs *interface, bool start)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val = 0;
+ s32 ret = 0;
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "start %d\n", start);
+ if (start)
+ val = WLBT_START;
+
+ /* See sequence in 9.6.6 */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_S,
+ WLBT_START, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_S[WLBT_START]: %d\n", ret);
+ return ret;
+ }
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "update WIFI_CTRL_S[WIFI_START]: %d\n", ret);
+
+ /* At this point WLBT should assert the CFG_REQ IRQ, so wait for it */
+ if (start &&
+ wait_for_completion_timeout(&platform->cfg_ack, WLBT_BOOT_TIMEOUT) == 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Timeout waiting for CFG_REQ IRQ\n");
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+ return -ETIMEDOUT;
+ }
+ /* only continue if CFG_REQ IRQ configured WLBT/PMU correctly */
+ if (platform->boot_state == WLBT_BOOT_CFG_ERROR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "CFG_REQ failed to configure WLBT.\n");
+ return -EIO;
+ }
+#endif
+ return 0;
+}
+
+static int platform_mif_pmu_reset_release(struct scsc_mif_abs *interface)
+{
+ int ret = 0;
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* We're now ready for the IRQ */
+ platform->boot_state = WLBT_BOOT_WAIT_CFG_REQ;
+ smp_wmb(); /* commit before irq */
+#endif
+ ret = platform_mif_power(interface, true);
+ if (ret)
+ return ret;
+ ret = platform_mif_hold_reset(interface, false);
+ if (ret)
+ return ret;
+ ret = platform_mif_start(interface, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int platform_mif_pmu_reset(struct scsc_mif_abs *interface, u8 rst_case)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ if (rst_case == 0 || rst_case > 2) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Incorrect pmu reset case %d\n", rst_case);
+ return -EIO;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "rst_case %d\n", rst_case);
+
+ /* Revert power control ownership to AP, as WLBT is going down (S9.6.6). */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, MASK_PWR_REQ);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ return ret;
+ }
+
+ /* reset sequence as per excite implementation for Leman */
+ ret = regmap_update_bits(platform->pmureg, CENTRAL_SEQ_WLBT_CONFIGURATION,
+ SYS_PWR_CFG_16, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CENTRAL_SEQ_WLBT_CONFIGURATION %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, RESET_AHEAD_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update RESET_AHEAD_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, CLEANY_BUS_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update CLEANY_BUS_WLBT_SYS_PWR_REG%d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, LOGIC_RESET_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG_2, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update LOGIC_RESET_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, TCXO_GATE_WLBT_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update TCXO_GATE_WLBT_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_DISABLE_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_DISABLE_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_RESET_ISO_SYS_PWR_REG,
+ SYS_PWR_CFG, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_RESET_ISO_SYS_PWR_REG %d\n", ret);
+ return ret;
+ }
+
+ /* rst_case is always 2 on 9610 */
+ ret = platform_mif_hold_reset(interface, true);
+
+ if (ret)
+ return ret;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ val &= STATES;
+ val >>= 16;
+ if (val == 0x80) {
+ /* OK. Switch CTRL_NS[MASK_PWR_REQ] ownership to FW following
+ * reset. WLBT PWR_REQ is cleared when it's put in reset.
+ * The SW PWR_REQ remains asserted, but as ownership is now FW,
+ * it'll be ignored. This leaves it as we found it.
+ */
+ platform_mif_power(interface, false);
+
+ return 0; /* OK - return */
+ }
+ } while (time_before(jiffies, timeout));
+
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Timeout waiting for CENTRAL_SEQ_WLBT_STATUS SM status\n");
+ regmap_read(platform->pmureg, CENTRAL_SEQ_WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "CENTRAL_SEQ_WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ return -ETIME;
+}
+
+/* reset=0 - release from reset */
+/* reset=1 - hold reset */
+static int platform_mif_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 ret = 0;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (enable_platform_mif_arm_reset || !reset) {
+ if (!reset) { /* Release from reset */
+#ifdef CONFIG_ARCH_EXYNOS
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SOC_VERSION: product_id 0x%x, rev 0x%x\n",
+ exynos_soc_info.product_id, exynos_soc_info.revision);
+#endif
+ ret = platform_mif_pmu_reset_release(interface);
+ } else {
+ /* Put back into reset */
+ ret = platform_mif_pmu_reset(interface, 2);
+ }
+ } else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Not resetting ARM Cores - enable_platform_mif_arm_reset: %d\n",
+ enable_platform_mif_arm_reset);
+ return ret;
+}
+
+static void __iomem *platform_mif_map_region(unsigned long phys_addr, size_t size)
+{
+ int i;
+ struct page **pages;
+ void *vmem;
+
+ size = PAGE_ALIGN(size);
+
+ pages = kmalloc((size >> PAGE_SHIFT) * sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ /* Map NORMAL_NC pages with kernel virtual space */
+ for (i = 0; i < (size >> PAGE_SHIFT); i++) {
+ pages[i] = phys_to_page(phys_addr);
+ phys_addr += PAGE_SIZE;
+ }
+
+ vmem = vmap(pages, size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+
+ kfree(pages);
+ return (void __iomem *)vmem;
+}
+
+static void platform_mif_unmap_region(void *vmem)
+{
+ vunmap(vmem);
+}
+
+static void *platform_mif_map(struct scsc_mif_abs *interface, size_t *allocated)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ if (allocated)
+ *allocated = 0;
+
+ platform->mem =
+ platform_mif_map_region(platform->mem_start, platform->mem_size);
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Error remaping shared memory\n");
+ return NULL;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Map: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+
+ /* Initialise MIF registers with documented defaults */
+ /* MBOXes */
+ for (i = 0; i < NUM_MBOX_PLAT; i++)
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(ISSR(i)), 0x00000000);
+
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_disable_irq == true) {
+ if (allocated)
+ *allocated = platform->mem_size;
+ return platform->mem;
+ }
+#endif
+ /* register interrupts */
+ if (platform_mif_register_irq(platform)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ return NULL;
+ }
+
+ if (allocated)
+ *allocated = platform->mem_size;
+ /* Set the CR4 base address in Mailbox??*/
+ return platform->mem;
+}
+
+/* HERE: Not sure why mem is passed in - its stored in platform - as it should be */
+static void platform_mif_unmap(struct scsc_mif_abs *interface, void *mem)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* Avoid unused parameter error */
+ (void)mem;
+
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ /* Restore PIO changed by Maxwell subsystem */
+ if (chv_disable_irq == false)
+ /* Unregister IRQs */
+ platform_mif_unregister_irq(platform);
+#else
+ platform_mif_unregister_irq(platform);
+#endif
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ platform->mem = NULL;
+}
+
+static u32 platform_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INTMR0: 0x%x\n", val);
+ return val;
+}
+
+static u32 platform_mif_irq_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ /* Function has to return the interrupts that are enabled *AND* not masked */
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INT-INTMSR0: 0x%x\n", val);
+
+ return val;
+}
+
+static void platform_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 reg;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ reg = INTGR1;
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTGR1: bit %d on target %d\n", bit_num, target);
+#endif
+}
+
+static void platform_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ /* WRITE : 1 = Clears Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), ((1 << bit_num) << 16));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTCR0: bit %d\n", bit_num);
+}
+
+static void platform_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 1 = Mask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val | ((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTMR0: 0x%x bit %d\n", val | (1 << bit_num), bit_num);
+}
+
+static void platform_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 0 = Unmask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val & ~((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "UNMASK Setting INTMR0: 0x%x bit %d\n", val & ~((1 << bit_num) << 16), bit_num);
+}
+
+/* Return the contents of the mask register */
+static u32 __platform_mif_irq_bit_mask_read(struct platform_mif *platform)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Read INTMR0: 0x%x\n", val);
+
+ return val;
+}
+
+/* Write the mask register, destroying previous contents */
+static void __platform_mif_irq_bit_mask_write(struct platform_mif *platform, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val);
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Write INTMR0: 0x%x\n", val);
+}
+
+static void platform_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif int handler %pS in %p %p\n", handler, platform, interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = handler;
+ platform->irq_dev = dev;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif int handler %pS\n", interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif reset_request int handler %pS in %p %p\n", handler, platform, interface);
+ platform->reset_request_handler = handler;
+ platform->irq_reset_request_dev = dev;
+ if (atomic_read(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "Default WDOG handler disabled by spurios IRQ...re-enabling.\n");
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+ }
+}
+
+static void platform_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "UnRegistering mif reset_request int handler %pS\n", interface);
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+}
+
+static void platform_mif_suspend_reg_handler(struct scsc_mif_abs *interface,
+ int (*suspend)(struct scsc_mif_abs *abs, void *data),
+ void (*resume)(struct scsc_mif_abs *abs, void *data),
+ void *data)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = suspend;
+ platform->resume_handler = resume;
+ platform->suspendresume_data = data;
+}
+
+static void platform_mif_suspend_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+}
+
+static u32 *platform_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 *addr;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "mbox_index 0x%x\n", mbox_index);
+ addr = platform->base + MAILBOX_WLBT_REG(ISSR(mbox_index));
+ return addr;
+}
+
+static int platform_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return -ENOMEM;
+ }
+
+ /* Check limits! */
+ if (ptr >= (platform->mem + platform->mem_size)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Unable to get pointer reference\n");
+ return -ENOMEM;
+ }
+
+ *ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)platform->mem);
+
+ return 0;
+}
+
+static void *platform_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ /* Check limits */
+ if (ref >= 0 && ref < platform->mem_size)
+ return (void *)((uintptr_t)platform->mem + (uintptr_t)ref);
+ else
+ return NULL;
+}
+
+static void *platform_mif_get_mifram_phy_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem_start) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)platform->mem_start + (uintptr_t)ref);
+}
+
+static uintptr_t platform_mif_get_mif_pfn(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ return vmalloc_to_pfn(platform->mem);
+}
+
+static struct device *platform_mif_get_mif_device(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ return platform->dev;
+}
+
+static void platform_mif_irq_clear(void)
+{
+ /* Implement if required */
+}
+
+static int platform_mif_read_register(struct scsc_mif_abs *interface, u64 id, u32 *val)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (id == SCSC_REG_READ_WLBT_STAT) {
+ regmap_read(platform->pmureg, WLBT_STAT, val);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void platform_mif_dump_register(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR1)));
+
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_cleanup(struct scsc_mif_abs *interface)
+{
+}
+
+static void platform_mif_restart(struct scsc_mif_abs *interface)
+{
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+static int __init platform_mif_wifibt_if_reserved_mem_setup(struct reserved_mem *remem)
+{
+ SCSC_TAG_DEBUG(PLAT_MIF, "memory reserved: mem_base=%#lx, mem_size=%zd\n",
+ (unsigned long)remem->base, (size_t)remem->size);
+
+ sharedmem_base = remem->base;
+ sharedmem_size = remem->size;
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(wifibt_if, "exynos,wifibt_if", platform_mif_wifibt_if_reserved_mem_setup);
+#endif
+
+struct scsc_mif_abs *platform_mif_create(struct platform_device *pdev)
+{
+ struct scsc_mif_abs *platform_if;
+ struct platform_mif *platform =
+ (struct platform_mif *)devm_kzalloc(&pdev->dev, sizeof(struct platform_mif), GFP_KERNEL);
+ int err = 0;
+ u8 i = 0;
+ struct resource *reg_res;
+
+#ifdef CONFIG_SCSC_SMAPPER
+ u32 smapper_banks = 0;
+#endif
+
+ if (!platform)
+ return NULL;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, &pdev->dev, "Creating MIF platform device\n");
+
+ platform_if = &platform->interface;
+
+ /* initialise interface structure */
+ platform_if->destroy = platform_mif_destroy;
+ platform_if->get_uid = platform_mif_get_uid;
+ platform_if->reset = platform_mif_reset;
+ platform_if->map = platform_mif_map;
+ platform_if->unmap = platform_mif_unmap;
+ platform_if->irq_bit_set = platform_mif_irq_bit_set;
+ platform_if->irq_get = platform_mif_irq_get;
+ platform_if->irq_bit_mask_status_get = platform_mif_irq_bit_mask_status_get;
+ platform_if->irq_bit_clear = platform_mif_irq_bit_clear;
+ platform_if->irq_bit_mask = platform_mif_irq_bit_mask;
+ platform_if->irq_bit_unmask = platform_mif_irq_bit_unmask;
+ platform_if->irq_reg_handler = platform_mif_irq_reg_handler;
+ platform_if->irq_unreg_handler = platform_mif_irq_unreg_handler;
+ platform_if->irq_reg_reset_request_handler = platform_mif_irq_reg_reset_request_handler;
+ platform_if->irq_unreg_reset_request_handler = platform_mif_irq_unreg_reset_request_handler;
+ platform_if->suspend_reg_handler = platform_mif_suspend_reg_handler;
+ platform_if->suspend_unreg_handler = platform_mif_suspend_unreg_handler;
+ platform_if->get_mbox_ptr = platform_mif_get_mbox_ptr;
+ platform_if->get_mifram_ptr = platform_mif_get_mifram_ptr;
+ platform_if->get_mifram_ref = platform_mif_get_mifram_ref;
+ platform_if->get_mifram_pfn = platform_mif_get_mif_pfn;
+ platform_if->get_mifram_phy_ptr = platform_mif_get_mifram_phy_ptr;
+ platform_if->get_mif_device = platform_mif_get_mif_device;
+ platform_if->irq_clear = platform_mif_irq_clear;
+ platform_if->mif_dump_registers = platform_mif_dump_register;
+ platform_if->mif_read_register = platform_mif_read_register;
+ platform_if->mif_cleanup = platform_mif_cleanup;
+ platform_if->mif_restart = platform_mif_restart;
+#ifdef CONFIG_SCSC_SMAPPER
+ platform_if->mif_smapper_get_mapping = platform_mif_smapper_get_mapping;
+ platform_if->mif_smapper_get_bank_info = platform_mif_smapper_get_bank_info;
+ platform_if->mif_smapper_write_sram = platform_mif_smapper_write_sram;
+ platform_if->mif_smapper_configure = platform_mif_smapper_configure;
+ platform_if->mif_smapper_get_bank_base_address = platform_mif_smapper_get_bank_base_address;
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_if->mif_pm_qos_add_request = platform_mif_pm_qos_add_request;
+ platform_if->mif_pm_qos_update_request = platform_mif_pm_qos_update_request;
+ platform_if->mif_pm_qos_remove_request = platform_mif_pm_qos_remove_request;
+#endif
+ /* Update state */
+ platform->pdev = pdev;
+ platform->dev = &pdev->dev;
+
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+
+#ifdef CONFIG_OF_RESERVED_MEM
+ platform->mem_start = sharedmem_base;
+ platform->mem_size = sharedmem_size;
+#else
+ /* If CONFIG_OF_RESERVED_MEM is not defined, sharedmem values should be
+ * parsed from the scsc_wifibt binding
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-base", &sharedmem_base)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_start = sharedmem_base;
+
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-size", &sharedmem_size)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_size = sharedmem_size;
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ platform->smapper = NULL;
+#endif
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->mem_start 0x%x platform->mem_size 0x%x\n",
+ (u32)platform->mem_start, (u32)platform->mem_size);
+ if (platform->mem_start == 0)
+ SCSC_TAG_WARNING_DEV(PLAT_MIF, platform->dev, "platform->mem_start is 0");
+
+ if (platform->mem_size == 0) {
+ /* We return return if mem_size is 0 as it does not make any sense.
+ * This may be an indication of an incorrect platform device binding.
+ */
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "platform->mem_size is 0");
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* Memory resource - Phys Address of MAILBOX_WLBT register map */
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error getting mem resource for MAILBOX_WLBT\n");
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ platform->reg_start = reg_res->start;
+ platform->reg_size = resource_size(reg_res);
+
+ platform->base =
+ devm_ioremap_nocache(platform->dev, reg_res->start, resource_size(reg_res));
+
+ if (!platform->base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping register region\n");
+ err = -EBUSY;
+ goto error_exit;
+ }
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->reg_start %lx size %x base %p\n",
+ (uintptr_t)platform->reg_start, (u32)platform->reg_size, platform->base);
+
+ /* Get the 4 IRQ resources */
+ for (i = 0; i < 4; i++) {
+ struct resource *irq_res;
+ int irqtag;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "No IRQ resource at index %d\n", i);
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ if (!strcmp(irq_res->name, "MBOX")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MBOX irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_MBOX;
+ } else if (!strcmp(irq_res->name, "ALIVE")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "ALIVE irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_ALIVE;
+ } else if (!strcmp(irq_res->name, "WDOG")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_WDOG;
+ } else if (!strcmp(irq_res->name, "CFG_REQ")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "CFG_REQ irq %d flag 0x%x\n", (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_CFG_REQ;
+ } else {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, &pdev->dev, "Invalid irq res name: %s\n",
+ irq_res->name);
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->wlbt_irq[irqtag].irq_num = irq_res->start;
+ platform->wlbt_irq[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
+ atomic_set(&platform->wlbt_irq[irqtag].irq_disabled_cnt, 0);
+ }
+
+ /* PMU reg map - syscon */
+ platform->pmureg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(platform->pmureg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "syscon regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->pmureg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+#ifdef CONFIG_SOC_EXYNOS9610
+ /* Completion event and state used to indicate CFG_REQ IRQ occurred */
+ init_completion(&platform->cfg_ack);
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+
+ /* BAAW_P_WLBT */
+ platform->baaw_p_wlbt = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,baaw_p_wlbt-syscon-phandle");
+ if (IS_ERR(platform->baaw_p_wlbt)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "baaw_p_wlbt regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->baaw_p_wlbt));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* DBUS_BAAW */
+ platform->dbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,dbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->dbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "dbus_baaw regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->dbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* PBUS_BAAW */
+ platform->pbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,pbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->pbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "pbus_baaw regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->pbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* WLBT_REMAP */
+ platform->wlbt_remap = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,wlbt_remap-syscon-phandle");
+ if (IS_ERR(platform->wlbt_remap)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "wlbt_remap regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->wlbt_remap));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* BOOT_CFG */
+ platform->boot_cfg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,boot_cfg-syscon-phandle");
+ if (IS_ERR(platform->boot_cfg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "boot_cfg regmap lookup failed. Aborting. %ld\n", PTR_ERR(platform->boot_cfg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER parsing */
+ if (!of_property_read_u32(pdev->dev.of_node, "smapper_num_banks", &smapper_banks))
+ platform_mif_parse_smapper(platform, platform->dev->of_node, smapper_banks);
+
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_mif_parse_qos(platform, platform->dev->of_node);
+#endif
+ /* Initialize spinlock */
+ spin_lock_init(&platform->mif_spinlock);
+
+ return platform_if;
+
+error_exit:
+ devm_kfree(&pdev->dev, platform);
+ return NULL;
+}
+
+void platform_mif_destroy_platform(struct platform_device *pdev, struct scsc_mif_abs *interface)
+{
+}
+
+struct platform_device *platform_mif_get_platform_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->pdev;
+}
+
+struct device *platform_mif_get_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->dev;
+}
+
+/* Preserve MIF registers during suspend.
+ * If all users of the MIF (AP, mx140, CP, etc) release it, the registers
+ * will lose their values. Save the useful subset here.
+ *
+ * Assumption: the AP will not change the register values between the suspend
+ * and resume handlers being called!
+ */
+static void platform_mif_reg_save(struct platform_mif *platform)
+{
+ platform->mif_preserve.irq_bit_mask = __platform_mif_irq_bit_mask_read(platform);
+}
+
+/* Restore MIF registers that may have been lost during suspend */
+static void platform_mif_reg_restore(struct platform_mif *platform)
+{
+ __platform_mif_irq_bit_mask_write(platform, platform->mif_preserve.irq_bit_mask);
+}
+
+int platform_mif_suspend(struct scsc_mif_abs *interface)
+{
+ int r = 0;
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (platform->suspend_handler)
+ r = platform->suspend_handler(interface, platform->suspendresume_data);
+
+ /* Save the MIF registers.
+ * This must be done last as the suspend_handler may use the MIF
+ */
+ platform_mif_reg_save(platform);
+
+ return r;
+}
+
+void platform_mif_resume(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ s32 ret;
+
+ /* Restore the MIF registers.
+ * This must be done first as the resume_handler may use the MIF.
+ */
+ platform_mif_reg_restore(platform);
+#ifdef CONFIG_SOC_EXYNOS9610
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clear WLBT_ACTIVE_CLR flag\n");
+ /* Clear WLBT_ACTIVE_CLR flag in WLBT_CTRL_NS */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS, WLBT_ACTIVE_CLR, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_ACTIVE_CLR]: %d\n", ret);
+ }
+#endif
+ if (platform->resume_handler)
+ platform->resume_handler(interface, platform->suspendresume_data);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Implements interface */
+
+#include "platform_mif.h"
+
+/* Interfaces it Uses */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/smc.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#endif
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <scsc/scsc_logring.h>
+#include "mif_reg_S5E9630.h"
+#include "platform_mif_module.h"
+#ifdef CONFIG_ARCH_EXYNOS
+#include <linux/soc/samsung/exynos-soc.h>
+#endif
+#ifdef CONFIG_SOC_EXYNOS9630
+#include <linux/mfd/samsung/s2mpu11-regulator.h>
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+#include <linux/dma-mapping.h>
+#include "mif_reg_smapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include <linux/pm_qos.h>
+#endif
+
+#if !defined(CONFIG_SOC_EXYNOS9630)
+#error Target processor CONFIG_SOC_EXYNOS9630 not selected
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+/* Time to wait for CFG_REQ IRQ on 9610 */
+#define WLBT_BOOT_TIMEOUT (HZ)
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of_reserved_mem.h>
+#endif
+static unsigned long sharedmem_base;
+static size_t sharedmem_size;
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+static bool chv_disable_irq;
+module_param(chv_disable_irq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(chv_disable_irq, "Do not register for irq");
+#endif
+
+static bool enable_platform_mif_arm_reset = true;
+module_param(enable_platform_mif_arm_reset, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_platform_mif_arm_reset, "Enables WIFIBT ARM cores reset");
+
+static bool disable_apm_setup;
+module_param(disable_apm_setup, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_apm_setup, "Disable host APM setup");
+
+#ifdef CONFIG_SCSC_QOS
+struct qos_table {
+ unsigned int freq_mif;
+ unsigned int freq_int;
+ unsigned int freq_cl0;
+ unsigned int freq_cl1;
+};
+#endif
+
+struct platform_mif {
+ struct scsc_mif_abs interface;
+ struct scsc_mbox_s *mbox;
+ struct platform_device *pdev;
+
+ struct device *dev;
+
+ struct {
+ int irq_num;
+ int flags;
+ atomic_t irq_disabled_cnt;
+ } wlbt_irq[PLATFORM_MIF_NUM_IRQS];
+
+ /* MIF registers preserved during suspend */
+ struct {
+ u32 irq_bit_mask;
+ } mif_preserve;
+
+ /* register MBOX memory space */
+ size_t reg_start;
+ size_t reg_size;
+ void __iomem *base;
+
+ /* register CMU memory space */
+ struct regmap *cmu_base;
+
+ void __iomem *con0_base;
+
+ /* pmu syscon regmap */
+ struct regmap *pmureg;
+#if defined(CONFIG_SOC_EXYNOS9630)
+ struct regmap *baaw_p_wlbt;
+ struct regmap *dbus_baaw;
+ struct regmap *pbus_baaw;
+ struct regmap *wlbt_remap;
+ struct regmap *boot_cfg;
+
+ /* Signalled when CFG_REQ IRQ handled */
+ struct completion cfg_ack;
+
+ /* State of CFG_REQ handler */
+ enum wlbt_boot_state {
+ WLBT_BOOT_IN_RESET = 0,
+ WLBT_BOOT_WAIT_CFG_REQ,
+ WLBT_BOOT_CFG_DONE,
+ WLBT_BOOT_CFG_ERROR
+ } boot_state;
+
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER */
+ void __iomem *smapper_base;
+ u8 smapper_banks;
+ struct {
+ u8 bank;
+ u32 ws;
+ bool large;
+ struct scsc_mif_smapper_info bank_info;
+ } *smapper;
+#endif
+ /* Shared memory space - reserved memory */
+ unsigned long mem_start;
+ size_t mem_size;
+ void __iomem *mem;
+
+ /* Callback function and dev pointer mif_intr manager handler */
+ void (*r4_handler)(int irq, void *data);
+ void *irq_dev;
+ /* spinlock to serialize driver access */
+ spinlock_t mif_spinlock;
+ void (*reset_request_handler)(int irq, void *data);
+ void *irq_reset_request_dev;
+
+#ifdef CONFIG_SCSC_QOS
+ /* QoS table */
+ struct qos_table *qos;
+ bool qos_enabled;
+#endif
+ /* Suspend/resume handlers */
+ int (*suspend_handler)(struct scsc_mif_abs *abs, void *data);
+ void (*resume_handler)(struct scsc_mif_abs *abs, void *data);
+ void *suspendresume_data;
+};
+
+static void power_supplies_on(struct platform_mif *platform);
+
+extern int mx140_log_dump(void);
+
+#define platform_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct platform_mif, interface)
+
+inline void platform_mif_reg_write(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->base + offset);
+}
+
+inline u32 platform_mif_reg_read(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->base + offset);
+}
+
+#ifdef CONFIG_SCSC_SMAPPER
+inline void platform_mif_reg_write_smapper(struct platform_mif *platform, u16 offset, u32 value)
+{
+ writel(value, platform->smapper_base + offset);
+}
+
+inline u32 platform_mif_reg_read_smapper(struct platform_mif *platform, u16 offset)
+{
+ return readl(platform->smapper_base + offset);
+}
+
+#define PLATFORM_MIF_SHIFT_SMAPPER_ADDR 11 /* From 36 bits addres to 25 bits */
+#define PLATFORM_MIF_SHIFT_SMAPPER_END 4 /* End address aligment */
+
+/* Platform is responsible to give the phys mapping of the SMAPPER maps */
+static int platform_mif_smapper_get_mapping(struct scsc_mif_abs *interface, u8 *phy_map, u16 *align)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Mapping %d banks\n", platform->smapper_banks);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ for (i = 0; i < platform->smapper_banks; i++) {
+ if (platform->smapper[i].large)
+ phy_map[i] = SCSC_MIF_ABS_LARGE_BANK;
+ else
+ phy_map[i] = SCSC_MIF_ABS_SMALL_BANK;
+ }
+
+ if (align)
+ *align = 1 << PLATFORM_MIF_SHIFT_SMAPPER_ADDR;
+
+ return 0;
+}
+
+static int platform_mif_smapper_get_bank_info(struct scsc_mif_abs *interface, u8 bank, struct scsc_mif_smapper_info *bank_info)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ bank_info->num_entries = platform->smapper[bank].bank_info.num_entries;
+ bank_info->mem_range_bytes = platform->smapper[bank].bank_info.mem_range_bytes;
+
+ return 0;
+}
+
+static u8 platform_mif_smapper_granularity_to_bits(u32 granularity)
+{
+ if (granularity <= 2 * 1024)
+ return 0;
+ if (granularity <= 4 * 1024)
+ return 1;
+ if (granularity <= 8 * 1024)
+ return 2;
+ if (granularity <= 16 * 1024)
+ return 3;
+ if (granularity <= 32 * 1024)
+ return 4;
+ if (granularity <= 64 * 1024)
+ return 5;
+ if (granularity <= 128 * 1024)
+ return 6;
+ return 7;
+}
+
+static u32 platform_mif_smapper_get_bank_base_address(struct scsc_mif_abs *interface, u8 bank)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform->smapper)
+ return 0;
+
+ return platform->smapper[bank].ws;
+}
+
+/* Configure smapper according the memory map and range */
+static void platform_mif_smapper_configure(struct scsc_mif_abs *interface, u32 granularity)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u8 gran;
+ u8 nb = platform->smapper_banks;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Configure SMAPPER with granularity %d\n", granularity);
+
+ gran = platform_mif_smapper_granularity_to_bits(granularity);
+
+ platform_mif_reg_write_smapper(platform, SMAPPER_QCH_DISABLE, 1);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AR, 0);
+ platform_mif_reg_write_smapper(platform, ORIGIN_ADDR_AW, 0);
+ /* Program SMAPPER memmap */
+ for (i = 0; i < nb; i++) {
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(i), 0);
+ /* Set START_ADDR */
+ platform_mif_reg_write_smapper(platform, START_ADDR(i), platform->smapper[i].ws);
+ /* Set ADDR_GRANULARITY - FIXED AT 4KB */
+ platform_mif_reg_write_smapper(platform, ADDR_GRANULARITY(i), gran);
+ /* WLAN_ADDR_MAP operation is started */
+ }
+ /* Set access window control (MSB 32bits Start/End address) */
+ /* Remapped address should be ranged from AW_START_ADDR to AW_EN_ADDR */
+ platform_mif_reg_write_smapper(platform, AW_START_ADDR, 0);
+ platform_mif_reg_write_smapper(platform, AW_END_ADDR, dma_get_mask(platform->dev) >> PLATFORM_MIF_SHIFT_SMAPPER_END);
+ smp_mb();
+}
+
+/* Caller is responsible of validating the phys address (alignment) */
+static int platform_mif_smapper_write_sram(struct scsc_mif_abs *interface, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+ u32 rb;
+
+ if (!platform->smapper_banks)
+ return -EINVAL;
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "SMAPPER not enabled\n");
+ return -EINVAL;
+ }
+
+ /* Set ADDR_MAP_EN to 1'b0*/
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 0);
+ /* Write mapping table to SRAM. Each entry consists of 25 bits MSB address to remap */
+ for (i = 0; i < num_entries; i++) {
+ if (!addr[i]) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ADDR IS NULL at bank %d entry %d/%d\n", bank, first_entry + i, num_entries);
+ return -EINVAL;
+ }
+ /* Set SRAM_WRITE_CTRL to 1'b1*/
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 1);
+ platform_mif_reg_write_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i), addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ /* check incorrect writings */
+ platform_mif_reg_write_smapper(platform, SRAM_WRITE_CTRL(bank), 0);
+ rb = platform_mif_reg_read_smapper(platform, SRAM_BANK_INDEX(bank, first_entry + i));
+ if (rb != addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "incorrect mapping detected rb 0x%x, addr 0x%x\n", rb, (u32)addr[i] >> PLATFORM_MIF_SHIFT_SMAPPER_ADDR);
+ return -EFAULT;
+ }
+ }
+ platform_mif_reg_write_smapper(platform, ADDR_MAP_EN(bank), 1);
+ smp_mb();
+ return 0;
+}
+
+static int platform_mif_parse_smapper(struct platform_mif *platform, struct device_node *np, u8 num_banks)
+{
+ /* SMAPPER parsing */
+ struct device_node *np_banks;
+ char node_name[50];
+ u32 val[2];
+ u8 i;
+ u32 bank = 0, ws = 0, wsz = 0, ent = 0, large = 0;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "banks found: %d szof %zu\n", num_banks, sizeof(*platform->smapper));
+
+ platform->smapper = kmalloc_array(num_banks, sizeof(*platform->smapper), GFP_KERNEL);
+
+ if (!platform->smapper)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++) {
+ snprintf(node_name, sizeof(node_name), "smapper_bank_%d", i);
+ np_banks = of_find_node_by_name(np, node_name);
+ if (!np_banks) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "%s: could not find smapper_bank\n",
+ node_name);
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+ of_property_read_u32(np_banks, "bank_num", &bank);
+ of_property_read_u32(np_banks, "fw_window_start", &ws);
+ of_property_read_u32(np_banks, "fw_window_size", &wsz);
+ of_property_read_u32(np_banks, "num_entries", &ent);
+ of_property_read_u32(np_banks, "is_large", &large);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "bank %d fw_w_start 0x%x fw_w_sz 0x%x entries %d is_large %d\n",
+ bank, ws, wsz, ent, large);
+
+ platform->smapper[i].bank = (u8)bank;
+ platform->smapper[i].ws = ws;
+ platform->smapper[i].large = (bool)large;
+ platform->smapper[i].bank_info.num_entries = ent;
+ platform->smapper[i].bank_info.mem_range_bytes = wsz;
+ }
+
+ /* Update the number of banks before returning */
+ platform->smapper_banks = num_banks;
+
+ of_property_read_u32_array(np, "smapper_reg", val, 2);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "smapper reg address 0x%x size 0x%x\n", val[0], val[1]);
+ platform->smapper_base =
+ devm_ioremap_nocache(platform->dev, val[0], val[1]);
+
+ if (!platform->smapper_base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping smapper register region\n");
+ kfree(platform->smapper);
+ platform->smapper = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+#endif
+#ifdef CONFIG_SCSC_QOS
+static int platform_mif_parse_qos(struct platform_mif *platform, struct device_node *np)
+{
+ int len, i;
+
+ platform->qos_enabled = false;
+
+ len = of_property_count_u32_elems(np, "qos_table");
+ if (!(len == 12)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "No qos table for wlbt, or incorrect size\n");
+ return -ENOENT;
+ }
+
+ platform->qos = devm_kzalloc(platform->dev, sizeof(struct qos_table) * len / 4, GFP_KERNEL);
+ if (!platform->qos)
+ return -ENOMEM;
+
+ of_property_read_u32_array(np, "qos_table", (unsigned int *)platform->qos, len);
+
+ for (i = 0; i < len / 4; i++) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "QoS Table[%d] mif : %u int : %u cl0 : %u cl1: %u\n", i,
+ platform->qos[i].freq_mif,
+ platform->qos[i].freq_int,
+ platform->qos[i].freq_cl0,
+ platform->qos[i].freq_cl1);
+ }
+
+ platform->qos_enabled = true;
+ return 0;
+}
+
+struct qos_table platform_mif_pm_qos_get_table(struct platform_mif *platform, enum scsc_qos_config config)
+{
+ struct qos_table table;
+
+ switch (config) {
+ case SCSC_QOS_MIN:
+ table.freq_mif = platform->qos[0].freq_mif;
+ table.freq_int = platform->qos[0].freq_int;
+ table.freq_cl0 = platform->qos[0].freq_cl0;
+ table.freq_cl1 = platform->qos[0].freq_cl1;
+ break;
+
+ case SCSC_QOS_MED:
+ table.freq_mif = platform->qos[1].freq_mif;
+ table.freq_int = platform->qos[1].freq_int;
+ table.freq_cl0 = platform->qos[1].freq_cl0;
+ table.freq_cl1 = platform->qos[1].freq_cl1;
+ break;
+
+ case SCSC_QOS_MAX:
+ table.freq_mif = platform->qos[2].freq_mif;
+ table.freq_int = platform->qos[2].freq_int;
+ table.freq_cl0 = platform->qos[2].freq_cl0;
+ table.freq_cl1 = platform->qos[2].freq_cl1;
+ break;
+
+ default:
+ table.freq_mif = 0;
+ table.freq_int = 0;
+ table.freq_cl0 = 0;
+ table.freq_cl1 = 0;
+ }
+
+ return table;
+}
+
+static int platform_mif_pm_qos_add_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS add request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_add_request(&qos_req->pm_qos_req_mif, PM_QOS_BUS_THROUGHPUT, table.freq_mif);
+ pm_qos_add_request(&qos_req->pm_qos_req_int, PM_QOS_DEVICE_THROUGHPUT, table.freq_int);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl0, PM_QOS_CLUSTER0_FREQ_MIN, table.freq_cl0);
+ pm_qos_add_request(&qos_req->pm_qos_req_cl1, PM_QOS_CLUSTER1_FREQ_MIN, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_update_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ struct qos_table table;
+
+ if (!platform)
+ return -ENODEV;
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ table = platform_mif_pm_qos_get_table(platform, config);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "PM QoS update request: %u. MIF %u INT %u CL0 %u CL1 %u\n", config, table.freq_mif, table.freq_int, table.freq_cl0, table.freq_cl1);
+
+ pm_qos_update_request(&qos_req->pm_qos_req_mif, table.freq_mif);
+ pm_qos_update_request(&qos_req->pm_qos_req_int, table.freq_int);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl0, table.freq_cl0);
+ pm_qos_update_request(&qos_req->pm_qos_req_cl1, table.freq_cl1);
+
+ return 0;
+}
+
+static int platform_mif_pm_qos_remove_request(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (!platform)
+ return -ENODEV;
+
+
+ if (!platform->qos_enabled) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS not configured\n");
+ return -EOPNOTSUPP;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PM QoS remove request\n");
+ pm_qos_remove_request(&qos_req->pm_qos_req_mif);
+ pm_qos_remove_request(&qos_req->pm_qos_req_int);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl0);
+ pm_qos_remove_request(&qos_req->pm_qos_req_cl1);
+
+ return 0;
+}
+#endif
+
+static void platform_mif_irq_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT handler not registered\n");
+}
+
+static void platform_mif_irq_reset_request_default_handler(int irq, void *data)
+{
+ /* Avoid unused parameter error */
+ (void)irq;
+ (void)data;
+
+ /* int handler not registered */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, NULL, "INT reset_request handler not registered\n");
+}
+
+irqreturn_t platform_mif_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "INT %pS\n", platform->r4_handler);
+ if (platform->r4_handler != platform_mif_irq_default_handler)
+ platform->r4_handler(irq, platform->irq_dev);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MIF Interrupt Handler not registered\n");
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+irqreturn_t platform_alive_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+
+ return IRQ_HANDLED;
+}
+#endif
+
+irqreturn_t platform_wdog_isr(int irq, void *data)
+{
+ int ret = 0;
+ struct platform_mif *platform = (struct platform_mif *)data;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ if (platform->reset_request_handler != platform_mif_irq_reset_request_default_handler) {
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ platform->reset_request_handler(irq, platform->irq_reset_request_dev);
+ } else {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG Interrupt reset_request_handler not registered\n");
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Disabling unhandled WDOG IRQ.\n");
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_inc(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt);
+ }
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_RESET_REQ_CLR, WLBT_RESET_REQ_CLR);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clearing WLBT_RESET_REQ\n");
+ if (ret < 0)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_RESET_REQ_CLR]: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Attached array contains the replacement PMU boot code which should
+ * be programmed using the CBUS during the config phase.
+ */
+uint32_t ka_patch[] = {
+ // Maxwell142 PMU+PROC combined boot ROM
+ // IP Version: 0xA3
+ // Major Version: 0xF, Minor Version: 0xF
+ // PMU ROM version: 0x4
+ // PROC ROM version: 0x0
+ // From excite 0605
+ 0xe8750002,
+ 0x1189113f,
+ 0x0e807558,
+ 0xe030b3e5,
+ 0x11a230fb,
+ 0xb1f50774,
+ 0x75fdb2b5,
+ 0x077400b1,
+ 0xadb5acf5,
+ 0x01b443fd,
+ 0xe020b3e5,
+ 0xfeb453fb,
+ 0xd880a3d2,
+ 0x83798278,
+ 0xa075d280,
+ 0x78377907,
+ 0x0754e6b0,
+ 0x800207b4,
+ 0x04f6d90b,
+ 0x7580f5c4,
+ 0x038000af,
+ 0x22098075,
+ 0x53f7f753,
+ 0x0479fece,
+ 0xce53fed9,
+ 0xd90c79fd,
+ 0xfbce53fe,
+ 0x53b79275,
+ 0x0074fd91,
+ 0xce5392f5,
+ 0xb79275f7,
+ 0x5302f943,
+ 0xf922fef9,
+ 0xfbd8fed9,
+ 0x019e7522,
+ 0x7501d275,
+ 0xc17580d5,
+ 0x74047880,
+ 0x838012c3,
+ 0x78c0c175,
+ 0x12827402,
+ 0x80758380,
+ 0x0291750d,
+ 0x75039375,
+ 0xa975029e,
+ 0x00002201,
+};
+
+//extern bool reset_failed;
+
+irqreturn_t platform_cfg_req_isr(int irq, void *data)
+{
+ struct platform_mif *platform = (struct platform_mif *)data;
+ u64 ret64 = 0;
+ const u64 EXYNOS_WLBT = 0x1;
+ /*s32 ret = 0;*/
+ unsigned int ka_addr = 0x1000;
+ uint32_t *ka_patch_addr = ka_patch;
+ //u32 id;
+
+#define CHECK(x) do { \
+ int retval = (x); \
+ if (retval < 0) {\
+ pr_err("%s failed at L%d", __FUNCTION__, __LINE__); \
+ goto cfg_error; \
+ } \
+} while (0)
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INT received\n");
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "disable_irq\n");
+
+ /* mask the irq */
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ /* Was the CFG_REQ irq received from WLBT before we expected it?
+ * Typically this indicates an issue returning WLBT HW to reset.
+ */
+ if (platform->boot_state != WLBT_BOOT_WAIT_CFG_REQ) {
+ u32 val;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Spurious CFG_REQ IRQ from WLBT!\n");
+
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_NS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_S, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_S 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+ //reset_failed = true; /* prevent further interaction with HW */
+
+ return IRQ_HANDLED;
+ }
+
+ /* Set TZPC to non-secure mode */
+ /* http://caeweb/~wshi/skylark/project-skylark.sim-basic/sky/regs/pmu_conf/pmu_conf/doc/main.html
+ */
+ ret64 = exynos_smc(SMC_CMD_CONN_IF, (EXYNOS_WLBT << 32) | EXYNOS_SET_CONN_TZPC, 0, 0);
+ if (ret64)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to set TZPC to non-secure mode: %llu\n", ret64);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SMC_CMD_CONN_IF run successfully : %llu\n", ret64);
+#if 0
+ /* WLBT_REMAP PMU_REMAP - PROC_RMP_BOOT_ADDR 0x14450400 */
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP begin\n");
+ CHECK(regmap_write(platform->wlbt_remap, 0x400, WLBT_DBUS_BAAW_0_START >> 12));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP end\n");
+#endif
+
+ /* update EMA parameters if required in future */
+
+ /* BAAW1_P_WLBT */
+#if 0
+ /* Not used by DRAM boot */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BAAW_P_WLBT begin\n");
+ CHECK(regmap_write(platform->baaw_p_wlbt, 0x0, 0x0 >> 12));
+ CHECK(regmap_write(platform->baaw_p_wlbt, 0x4, 0x1ffff + 1));
+ CHECK(regmap_write(platform->baaw_p_wlbt, 0x8, 0x0 >> 12));
+ CHECK(regmap_write(platform->baaw_p_wlbt, 0xC, WLBT_BAAW_ACCESS_CTRL));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BAAW_P_WLBT end\n");
+#endif
+ /* WLBT_REMAP PMU_REMAP - PROC_RMP_BOOT_ADDR 0x14450400 */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP begin\n");
+ CHECK(regmap_write(platform->wlbt_remap, 0x400, WLBT_DBUS_BAAW_0_START >> 12));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WLBT_REMAP end\n");
+
+ /* DBUS_BAAW regions */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW begin\n"); // PMU_DBUS_BAAW
+
+ /* Shared DRAM mapping. The destination address is the location reserved
+ * by the kernel.
+ */
+ CHECK(regmap_write(platform->dbus_baaw, 0x0, WLBT_DBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x4, WLBT_DBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->dbus_baaw, 0x8, platform->mem_start >> 12)); // FW AP base addr >> 12
+ CHECK(regmap_write(platform->dbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+#if 0
+ /* Additional DRAM mappings for future use */
+ CHECK(regmap_write(platform->dbus_baaw, 0x10, 0x000C0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x14, 0x000D0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x18, 0x000D0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x1C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->dbus_baaw, 0x20, 0x000D0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x24, 0x000E0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x28, 0x000E0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x2C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->dbus_baaw, 0x30, 0x000E0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x34, 0x000F0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x38, 0x000F0000));
+ CHECK(regmap_write(platform->dbus_baaw, 0x3C, WLBT_BAAW_ACCESS_CTRL));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "DBUS_BAAW end\n");
+
+ /* PBUS_BAAW regions */
+ /* ref wlbt_if_S5E9630.c, updated for MX450 memory map */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW begin\n");
+
+ /* Range for CP2WLBT mailbox */
+ CHECK(regmap_write(platform->pbus_baaw, 0x0, WLBT_CBUS_BAAW_0_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4, WLBT_CBUS_BAAW_0_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x8, WLBT_PBUS_MBOX_CP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0xC, WLBT_BAAW_ACCESS_CTRL));
+
+ /* Range includes AP2WLBT,APM2WLBT,GNSS2WLBT mailboxes */
+ CHECK(regmap_write(platform->pbus_baaw, 0x10, WLBT_CBUS_BAAW_1_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x14, WLBT_CBUS_BAAW_1_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x18, WLBT_PBUS_MBOX_GNSS2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x1C, WLBT_BAAW_ACCESS_CTRL));
+#if 0
+ /* These mappings are not yet used by WLBT FW */
+ CHECK(regmap_write(platform->pbus_baaw, 0x20, WLBT_CBUS_BAAW_2_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x24, WLBT_CBUS_BAAW_2_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x28, WLBT_PBUS_GPIO_CMGP_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x2C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x30, WLBT_CBUS_BAAW_3_START >>12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x34, WLBT_CBUS_BAAW_3_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x38, WLBT_PBUS_SYSREG_CMGP2WLBT_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x3C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x40, WLBT_CBUS_BAAW_4_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x44, WLBT_CBUS_BAAW_4_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x48, WLBT_PBUS_USI_CMG00_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x4C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x50, WLBT_CBUS_BAAW_5_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x54, WLBT_CBUS_BAAW_5_END >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x58, WLBT_PBUS_CHUB_USICHUB0_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x5C, WLBT_BAAW_ACCESS_CTRL));
+
+ CHECK(regmap_write(platform->pbus_baaw, 0x60, WLBT_CBUS_BAAW_6_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x64, WLBT_CBUS_BAAW_6_START >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x68, WLBT_PBUS_CHUB_BASE >> 12));
+ CHECK(regmap_write(platform->pbus_baaw, 0x6C, WLBT_BAAW_ACCESS_CTRL));
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "PBUS_BAAW end\n");
+
+ /* PMU boot patch */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT begin\n");
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x1));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT_WLBT done\n");
+
+ while (ka_patch_addr < (ka_patch + ARRAY_SIZE(ka_patch))) {
+ CHECK(regmap_write(platform->boot_cfg, ka_addr, *ka_patch_addr));
+ ka_addr += sizeof(ka_patch[0]);
+ ka_patch_addr++;
+ }
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "KA patch done\n");
+
+ /* Notify PMU of configuration done */
+ CHECK(regmap_write(platform->boot_cfg, 0x0, 0x0));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT config done\n");
+
+ /* BOOT_CFG_ACK */
+ CHECK(regmap_write(platform->boot_cfg, 0x4, 0x1));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "BOOT_CFG_ACK done\n");
+
+ /* Delay to allow HW to clear CFG_REQ and hence de-assert IRQ, which
+ * it does in response to CFG_ACK
+ */
+ //udelay(100);
+
+ /* Release ownership of MASK_PWR_REQ */
+ /* See sequence in 9.6.6 */
+ /*ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ MASK_PWR_REQ, 0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to clear WLBT_CTRL_NS[MASK_PWR_REQ]: %d\n", ret);
+ goto cfg_error;
+ }*/
+
+ /* Mark as CFQ_REQ handled, so boot may continue */
+ platform->boot_state = WLBT_BOOT_CFG_DONE;
+
+ /* Signal triggering function that the IRQ arrived and CFG was done */
+ complete(&platform->cfg_ack);
+
+ /* Re-enable IRQ here to allow spurious interrupt to be tracked */
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ /* as per wlbt_if_S5E9630.c - end */
+ return IRQ_HANDLED;
+cfg_error:
+ platform->boot_state = WLBT_BOOT_CFG_ERROR;
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "ERROR: WLBT Config failed. WLBT will not work\n");
+ complete(&platform->cfg_ack);
+ return IRQ_HANDLED;
+}
+
+static void platform_mif_unregister_irq(struct platform_mif *platform)
+{
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering IRQs\n");
+
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform);
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform);
+ /* Reset irq_disabled_cnt for WDOG IRQ since the IRQ itself is here unregistered and disabled */
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* if ALIVE irq is required */
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform);
+#endif
+ devm_free_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform);
+}
+
+static int platform_mif_register_irq(struct platform_mif *platform)
+{
+ int err;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering IRQs\n");
+
+ /* Register MBOX irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering MBOX irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform->wlbt_irq[PLATFORM_MIF_MBOX].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_MBOX].irq_num, platform_mif_isr,
+ platform->wlbt_irq[PLATFORM_MIF_MBOX].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register MBOX handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+ /* Register WDOG irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering WDOG irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform->wlbt_irq[PLATFORM_MIF_WDOG].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num, platform_wdog_isr,
+ platform->wlbt_irq[PLATFORM_MIF_WDOG].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register WDOG handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+#ifdef CONFIG_SCSC_ENABLE_ALIVE_IRQ
+ /* Register ALIVE irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering ALIVE irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_ALIVE].irq_num, platform_alive_isr,
+ platform->wlbt_irq[PLATFORM_MIF_ALIVE].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE(err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register ALIVE handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+#endif
+
+ /* Mark as WLBT in reset before enabling IRQ to guard against spurious IRQ */
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+ smp_wmb(); /* commit before irq */
+
+ /* Register WB2AP_CFG_REQ irq */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering CFG_REQ irq: %d flag 0x%x\n",
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags);
+
+ err = devm_request_irq(platform->dev, platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num, platform_cfg_req_isr,
+ platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].flags, DRV_NAME, platform);
+ if (IS_ERR_VALUE((unsigned long)err)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to register CFG_REQ handler: %d. Aborting.\n", err);
+ err = -ENODEV;
+ return err;
+ }
+
+ /* Leave disabled until ready to handle */
+ disable_irq_nosync(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ return 0;
+}
+
+static void platform_mif_destroy(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ platform_mif_unregister_irq(platform);
+}
+
+static char *platform_mif_get_uid(struct scsc_mif_abs *interface)
+{
+ /* Avoid unused parameter error */
+ (void)interface;
+ return "0";
+}
+
+static void wlbt_regdump(struct platform_mif *platform)
+{
+ u32 val = 0;
+
+ regmap_read(platform->pmureg, WLBT_CTRL_S, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_S 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CONFIGURATION, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CONFIGURATION 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_CTRL_NS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_IN, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_IN 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_OUT, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_OUT 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_STATUS 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_STATES, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_STATES 0x%x\n", val);
+
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+
+#if 0
+ /* Delay to let PMU process the cfg_ack */
+ udelay(10000);
+ regmap_read(platform->boot_cfg, 0x0, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "BOOT_SOURCE 0x%x\n", val);
+
+ regmap_read(platform->boot_cfg, 0x4, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "BOOT_CFG_ACK 0x%x\n", val);
+#endif
+}
+
+/* WLBT START */
+static int platform_mif_start(struct scsc_mif_abs *interface, bool start)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* done as part of platform_mif_pmu_reset_release() init_done sequence */
+#if 0
+ //s32 ret = 0;
+ if (start)
+ val = WLBT_START;
+
+ /* See sequence in TODO update when available */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_S,
+ WLBT_START, val);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_S[WLBT_START]: %d\n", ret);
+ return ret;
+ }
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "update WIFI_CTRL_S[WLBT_START]: %d\n", ret);
+
+#endif
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "start %d\n", start);
+ /* At this point WLBT should assert the CFG_REQ IRQ, so wait for it */
+ if (start &&
+ wait_for_completion_timeout(&platform->cfg_ack, WLBT_BOOT_TIMEOUT) == 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Timeout waiting for CFG_REQ IRQ\n");
+ wlbt_regdump(platform);
+ return -ETIMEDOUT;
+ }
+
+ wlbt_regdump(platform);
+
+ /* only continue if CFG_REQ IRQ configured WLBT/PMU correctly */
+ if (platform->boot_state == WLBT_BOOT_CFG_ERROR) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "CFG_REQ failed to configure WLBT.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int platform_mif_pmu_reset_release(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ int ret = 0;
+ u32 val = 0;
+ u32 v = 0;
+ unsigned long timeout;
+ static bool init_done;
+
+ /* We're now ready for the IRQ */
+ platform->boot_state = WLBT_BOOT_WAIT_CFG_REQ;
+ smp_wmb(); /* commit before irq */
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "on boot_state = WLBT_BOOT_WAIT_CFG_REQ\n");
+
+ /* INIT SEQUENCE - First WLBT boot only
+ * Cold reset wrt. AP power sequencer, cold reset for WLBT
+ */
+ if (!init_done) {
+ /* init sequence from excite
+ * SetBits((uinteger)REG_WLBT_CTRL_S, 3, 0x1, 0x1);
+ * SetBits((uinteger)REG_WLBT_CONFIGURATION, 0, 0x1, 0x1);
+ * while (GetBits((uinteger)REG_WLBT_STATUS, 0, 0x1) != 0x1);
+ * wlbt_if_check_wlbt2ap_bus_ready();
+ * SetBits((uinteger)REG_WLBT_CTRL_NS, 8, 0x1, 0x0);
+ * SetBits((uinteger)REG_WLBT_CTRL_NS, 7, 0x1, 0x1);
+ */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "init\n");
+
+ /* setup TZPC */
+ /*SCSC_TAG_INFO(PLAT_MIF, "trying to set TZPC register via exynos_smc(SMC_CMD_CONN_IF, (EXYNOS_WLBT << 32) | EXYNOS_SET_CONN_TZPC, 0, 0)\n");
+ ret64 = exynos_smc(SMC_CMD_CONN_IF, (EXYNOS_WLBT << 32) | EXYNOS_SET_CONN_TZPC, 0, 0);
+ if (ret64)
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to set TZPC to non-secure mode: %llu\n", ret64);
+ else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SMC_CMD_CONN_IF run successfully : %llu\n", ret64);*/
+
+ /* WLBT_CTRL_S[WLBT_START] = 1 enable */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_S,
+ WLBT_START, WLBT_START);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_S[WLBT_START]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CTRL_S, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CTRL_S[WLBT_START]: 0x%x\n", val);
+
+ /* WLBT_CONFIGURATION[LOCAL_PWR_CFG] = 1 Power On */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CONFIGURATION,
+ LOCAL_PWR_CFG, 0x1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CONFIGURATION[LOCAL_PWR_CFG]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CONFIGURATION, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CONFIGURATION[LOCAL_PWR_CFG]: 0x%x\n", val);
+
+ /* wait for power up complete WLBT_STATUS[STATUS] = 1 for Power On */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ val &= STATUS;
+ if (val) {
+ /* Power On complete */
+ SCSC_TAG_INFO(PLAT_MIF, "Power On complete: WLBT_STATUS 0x%x\n", val);
+ /* re affirming power on by reading WLBT_STATES */
+ /* STATES[7:0] = 0x10 for Power Up */
+ regmap_read(platform->pmureg, WLBT_STATES, &v);
+ SCSC_TAG_INFO(PLAT_MIF, "Power On complete: WLBT_STATES 0x%x\n", v);
+ break;
+ }
+ } while (time_before(jiffies, timeout));
+
+ if (!val) {
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "timeout waiting for power on time-out: "
+ "WLBT_STATUS 0x%x, WLBT_STATES 0x%x\n", val, v);
+ }
+
+ /* wait for WLBT_IN[BUS_READY] = 1 for BUS READY state */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, WLBT_IN, &val);
+ val &= BUS_READY;
+ if (val) {
+ /* BUS ready indication signal -> 1: BUS READY state */
+ SCSC_TAG_INFO(PLAT_MIF, "Bus Ready: WLBT_IN 0x%x\n", val);
+
+ /* OK to break */
+ break;
+ }
+ } while (time_before(jiffies, timeout));
+
+ if (!val) {
+ regmap_read(platform->pmureg, WLBT_IN, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "timeout waiting for Bus Ready: WLBT_IN 0x%x\n", val);
+ }
+
+ /* WLBT_CTRL_NS[WLBT_ACTIVE_CLR] = 0 Active interrupt clear */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_ACTIVE_CLR, 0x0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[WLBT_ACTIVE_CLR]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CTRL_NS[WLBT_ACTIVE_CLR]: 0x%x\n", val);
+
+ /* WLBT_CTRL_NS[WLBT_ACTIVE_EN] = 1 Active interrupt enable */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_ACTIVE_EN, WLBT_ACTIVE_EN);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[WLBT_ACTIVE_EN]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CTRL_NS[WLBT_ACTIVE_EN]: 0x%x\n", val);
+
+ init_done = true;
+
+ goto init_code_done;
+ }
+
+ /* RESET RELEASE - Subsequent WLBT reboots */
+ /* wlbt_if_reset_release - from excite code
+ * SetBits((uinteger)REG_WLBT_CONFIGURATION, 0, 0x1, 0x1);
+ * while (GetBits((uinteger)REG_WLBT_STATUS, 0, 0x1) != 0x1);
+ * SetBits((uinteger)REG_WLBT_INT_EN, 3, 0x1, 0x1);
+ * SetBits((uinteger)REG_WLBT_INT_EN, 5, 0x1, 0x1);
+ * while (GetBits((uinteger)REG_WLBT_IN, 4, 0x1) != 0x1);
+ * */
+
+ /* Warm reset wrt. AP power sequencer, but cold reset for WLBT */
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "release\n");
+
+ /* Power Down */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CONFIGURATION,
+ LOCAL_PWR_CFG, 0x1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CONFIGURATION[LOCAL_PWR_CFG]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CONFIGURATION, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CONFIGURATION[LOCAL_PWR_CFG]: 0x%x\n", val);
+
+ /* wait for power up complete WLBT_STATUS[0] = 0 for power down */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ val &= STATUS;
+ if (val) {
+ /* Power up complete */
+ SCSC_TAG_INFO(PLAT_MIF, "Power up complete: WLBT_STATUS 0x%x\n", val);
+ /* re affirming power down by reading WLBT_STATES */
+ /* STATES[7:0] = 0x80 for Power Down */
+ regmap_read(platform->pmureg, WLBT_STATES, &v);
+ SCSC_TAG_INFO(PLAT_MIF, "Power up complete: WLBT_STATES 0x%x\n", v);
+ break;
+ }
+ } while (time_before(jiffies, timeout));
+
+ if (!val) {
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "Timeout waiting for Power up complete: "
+ "WLBT_STATUS 0x%x, WLBT_STATES 0x%x\n", val, v);
+ }
+
+ /* enable PWR_REQ_F and TCXO_REQ_F interrupts */
+ ret = regmap_update_bits(platform->pmureg, WLBT_INT_EN,
+ PWR_REQ_F, 0x1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_INT_EN[PWR_REQ_F]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_INT_EN, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_INT_EN[PWR_REQ_F]: 0x%x\n", val);
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_INT_EN,
+ TCXO_REQ_F, 0x1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_INT_EN[TCXO_REQ_F]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_INT_EN, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_INT_EN[TCXO_REQ_F]: 0x%x\n", val);
+
+ /* wait for WLBT_IN[BUS_READY] = 0 for BUS RESET RELEASED state */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, WLBT_IN, &val);
+ val &= BUS_READY;
+ if (val) {
+ /* BUS ready indication signal -> 1: BUS RESET RELEASED Normal state */
+ SCSC_TAG_INFO(PLAT_MIF, "Bus Ready: WLBT_IN 0x%x\n", val);
+
+ /* OK to break */
+ break;
+ }
+ } while (time_before(jiffies, timeout));
+
+ if (!val) {
+ regmap_read(platform->pmureg, WLBT_IN, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "Timeout waiting for Bus Ready: WLBT_IN 0x%x != 0\n", val);
+ }
+
+init_code_done:
+ /* Now handle the CFG_REQ IRQ */
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_CFG_REQ].irq_num);
+
+ ret = platform_mif_start(interface, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int platform_mif_pmu_reset_assert(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ /* wlbt_if_reset_assertion() - from wlbt_if_S5E9630.c
+ * SetBits((uinteger)TCXO_BUF_CTRL, 2, 0x1, 0x1);
+ * SetBits((uinteger)TCXO_BUF_CTRL, 3, 0x1, 0x1);
+ * SetBits((uinteger)SYSTEM_OUT, 9, 0x1, 0x1);
+ * udelay(1000);
+ * SetBits((uinteger)REG_WLBT_INT_EN, 3, 0x1, 0x0);
+ * SetBits((uinteger)REG_WLBT_INT_EN, 5, 0x1, 0x0);
+ * SetBits((uinteger)REG_WLBT_CTRL_NS, 7, 0x1, 0x0);
+ * SetBits((uinteger)REG_WLBT_CONFIGURATION, 0, 0x1, 0x0);
+ * while (GetBits((uinteger)REG_WLBT_STATUS, 0, 0x1) != 0x0);
+ */
+
+ ret = regmap_update_bits(platform->pmureg, TCXO_BUF_CTRL,
+ TCXO_BUF_BIAS_EN_WLBT, TCXO_BUF_BIAS_EN_WLBT);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update TCXO_BUF_CTRL[TCXO_BUF_BIAS_EN_WLBT]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, TCXO_BUF_CTRL, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully TCXO_BUF_CTRL[TCXO_BUF_BIAS_EN_WLBT]: 0x%x\n", val & TCXO_BUF_BIAS_EN_WLBT);
+
+ ret = regmap_update_bits(platform->pmureg, TCXO_BUF_CTRL,
+ TCXO_BUF_EN_WLBT, TCXO_BUF_EN_WLBT);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update TCXO_BUF_CTRL[TCXO_BUF_EN_WLBT]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, TCXO_BUF_CTRL, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully TCXO_BUF_CTRL[TCXO_BUF_EN_WLBT]: 0x%x\n", val & TCXO_BUF_EN_WLBT);
+
+ ret = regmap_update_bits(platform->pmureg, SYSTEM_OUT,
+ PWRRGTON_CON, PWRRGTON_CON);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update SYSTEM_OUT[PWRRGTON_CON]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, TCXO_BUF_CTRL, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully SYSTEM_OUT[PWRRGTON_CON]: 0x%x\n", val & PWRRGTON_CON);
+
+ udelay(1000);
+
+ /* disable PWR_REQ_F and TCXO_REQ_F interrupts */
+ ret = regmap_update_bits(platform->pmureg, WLBT_INT_EN,
+ PWR_REQ_F, 0x0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_INT_EN[PWR_REQ_F]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_INT_EN, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_INT_EN[PWR_REQ_F]: 0x%x\n", val);
+
+ ret = regmap_update_bits(platform->pmureg, WLBT_INT_EN,
+ TCXO_REQ_F, 0x0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_INT_EN[TCXO_REQ_F]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_INT_EN, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_INT_EN[TCXO_REQ_F]: 0x%x\n", val);
+
+ /* Active interrupt disable */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS,
+ WLBT_ACTIVE_EN, 0x0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CTRL_NS[WLBT_ACTIVE_EN]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CTRL_NS, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CTRL_NS[WLBT_ACTIVE_EN]: 0x%x\n", val);
+
+ /* Power Down */
+ ret = regmap_write_bits(platform->pmureg, WLBT_CONFIGURATION,
+ LOCAL_PWR_CFG, 0x0);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to update WLBT_CONFIGURATION[LOCAL_PWR_CFG]: %d\n", ret);
+ return ret;
+ }
+ regmap_read(platform->pmureg, WLBT_CONFIGURATION, &val);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "updated successfully WLBT_CONFIGURATION[LOCAL_PWR_CFG]: 0x%x\n", val);
+
+ /* Wait for power Off WLBT_STATUS[STATUS] = 0 */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ val &= STATUS;
+ if (val == 0) {
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_STATUS 0x%x\n", val);
+ /* re affirming power down by reading WLBT_STATES */
+ /* STATES[7:0] = 0x80 for Power Down */
+ regmap_read(platform->pmureg, WLBT_STATES, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "Power down complete: WLBT_STATES 0x%x\n", val);
+
+ return 0; /* OK - return */
+ }
+ } while (time_before(jiffies, timeout));
+
+ /* Timed out */
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Timeout waiting for WLBT_STATUS status\n");
+
+ regmap_read(platform->pmureg, WLBT_STATUS, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_STATUS 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_DEBUG, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_DEBUG 0x%x\n", val);
+ regmap_read(platform->pmureg, WLBT_STATES, &val);
+ SCSC_TAG_INFO(PLAT_MIF, "WLBT_STATES 0x%x\n", val);
+ return -ETIME;
+}
+
+/* reset=0 - release from reset */
+/* reset=1 - hold reset */
+static int platform_mif_reset(struct scsc_mif_abs *interface, bool reset)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 ret = 0;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (enable_platform_mif_arm_reset || !reset) {
+ if (!reset) { /* Release from reset */
+#ifdef CONFIG_ARCH_EXYNOS
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "SOC_VERSION: product_id 0x%x, rev 0x%x\n",
+ exynos_soc_info.product_id, exynos_soc_info.revision);
+#endif
+ power_supplies_on(platform);
+
+ ret = platform_mif_pmu_reset_release(interface);
+ } else {
+ /* Put back into reset */
+ ret = platform_mif_pmu_reset_assert(interface);
+ }
+ } else
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Not resetting ARM Cores - enable_platform_mif_arm_reset: %d\n",
+ enable_platform_mif_arm_reset);
+ return ret;
+}
+
+static void __iomem *platform_mif_map_region(unsigned long phys_addr, size_t size)
+{
+ int i;
+ struct page **pages;
+ void *vmem;
+
+ size = PAGE_ALIGN(size);
+
+ pages = kmalloc((size >> PAGE_SHIFT) * sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ /* Map NORMAL_NC pages with kernel virtual space */
+ for (i = 0; i < (size >> PAGE_SHIFT); i++) {
+ pages[i] = phys_to_page(phys_addr);
+ phys_addr += PAGE_SIZE;
+ }
+
+ vmem = vmap(pages, size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+
+ kfree(pages);
+ return (void __iomem *)vmem;
+}
+
+static void platform_mif_unmap_region(void *vmem)
+{
+ vunmap(vmem);
+}
+
+static void *platform_mif_map(struct scsc_mif_abs *interface, size_t *allocated)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u8 i;
+
+ if (allocated)
+ *allocated = 0;
+
+ platform->mem =
+ platform_mif_map_region(platform->mem_start, platform->mem_size);
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Error remaping shared memory\n");
+ return NULL;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Map: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+
+ /* Initialise MIF registers with documented defaults */
+ /* MBOXes */
+ for (i = 0; i < NUM_MBOX_PLAT; i++)
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(ISSR(i)), 0x00000000);
+
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_disable_irq == true) {
+ if (allocated)
+ *allocated = platform->mem_size;
+ return platform->mem;
+ }
+#endif
+ /* register interrupts */
+ if (platform_mif_register_irq(platform)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ return NULL;
+ }
+
+ if (allocated)
+ *allocated = platform->mem_size;
+ /* Set the CR4 base address in Mailbox??*/
+ return platform->mem;
+}
+
+/* HERE: Not sure why mem is passed in - its stored in platform - as it should be */
+static void platform_mif_unmap(struct scsc_mif_abs *interface, void *mem)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ /* Avoid unused parameter error */
+ (void)mem;
+
+ /* MRs */ /*1's - set all as Masked */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR1), 0x0000ffff);
+
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ /* Restore PIO changed by Maxwell subsystem */
+ if (chv_disable_irq == false)
+ /* Unregister IRQs */
+ platform_mif_unregister_irq(platform);
+#else
+ platform_mif_unregister_irq(platform);
+#endif
+ /* CRs */ /* 1's - clear all the interrupts */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), 0xffff0000);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR1), 0x0000ffff);
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unmap: virt %p phys %lx\n", platform->mem, (uintptr_t)platform->mem_start);
+ platform_mif_unmap_region(platform->mem);
+ platform->mem = NULL;
+}
+
+static u32 platform_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INTMR0: 0x%x\n", val);
+ return val;
+}
+
+static u32 platform_mif_irq_get(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+
+ /* Function has to return the interrupts that are enabled *AND* not masked */
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)) >> 16;
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Getting INT-INTMSR0: 0x%x\n", val);
+
+ return val;
+}
+
+static void platform_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 reg;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+
+ reg = INTGR1;
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(reg), (1 << bit_num));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTGR1: bit %d on target %d\n", bit_num, target);
+}
+
+static void platform_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ /* WRITE : 1 = Clears Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTCR0), ((1 << bit_num) << 16));
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTCR0: bit %d\n", bit_num);
+}
+
+static void platform_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 1 = Mask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val | ((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Setting INTMR0: 0x%x bit %d\n", val | (1 << bit_num), bit_num);
+}
+
+static void platform_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 val;
+ unsigned long flags;
+
+ if (bit_num >= 16) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Incorrect INT number: %d\n", bit_num);
+ return;
+ }
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ /* WRITE : 0 = Unmask Interrupt */
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val & ~((1 << bit_num) << 16));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "UNMASK Setting INTMR0: 0x%x bit %d\n", val & ~((1 << bit_num) << 16), bit_num);
+}
+
+/* Return the contents of the mask register */
+static u32 __platform_mif_irq_bit_mask_read(struct platform_mif *platform)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ val = platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0));
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Read INTMR0: 0x%x\n", val);
+
+ return val;
+}
+
+/* Write the mask register, destroying previous contents */
+static void __platform_mif_irq_bit_mask_write(struct platform_mif *platform, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform_mif_reg_write(platform, MAILBOX_WLBT_REG(INTMR0), val);
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "Write INTMR0: 0x%x\n", val);
+}
+
+static void platform_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif int handler %pS in %p %p\n", handler, platform, interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = handler;
+ platform->irq_dev = dev;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif int handler %pS\n", interface);
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif reset_request int handler %pS in %p %p\n", handler, platform, interface);
+ platform->reset_request_handler = handler;
+ platform->irq_reset_request_dev = dev;
+ if (atomic_read(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt)) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev,
+ "Default WDOG handler disabled by spurios IRQ...re-enabling.\n");
+ enable_irq(platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_num);
+ atomic_set(&platform->wlbt_irq[PLATFORM_MIF_WDOG].irq_disabled_cnt, 0);
+ }
+}
+
+static void platform_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "UnRegistering mif reset_request int handler %pS\n", interface);
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+}
+
+static void platform_mif_suspend_reg_handler(struct scsc_mif_abs *interface,
+ int (*suspend)(struct scsc_mif_abs *abs, void *data),
+ void (*resume)(struct scsc_mif_abs *abs, void *data),
+ void *data)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Registering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = suspend;
+ platform->resume_handler = resume;
+ platform->suspendresume_data = data;
+}
+
+static void platform_mif_suspend_unreg_handler(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Unregistering mif suspend/resume handlers in %p %p\n", platform, interface);
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+}
+
+static u32 *platform_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ u32 *addr;
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "mbox_index 0x%x\n", mbox_index);
+ addr = platform->base + MAILBOX_WLBT_REG(ISSR(mbox_index));
+ return addr;
+}
+
+static int platform_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return -ENOMEM;
+ }
+
+ /* Check limits! */
+ if (ptr >= (platform->mem + platform->mem_size)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Unable to get pointer reference\n");
+ return -ENOMEM;
+ }
+
+ *ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)platform->mem);
+
+ return 0;
+}
+
+static void *platform_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ /* Check limits */
+ if (ref >= 0 && ref < platform->mem_size)
+ return (void *)((uintptr_t)platform->mem + (uintptr_t)ref);
+ else
+ return NULL;
+}
+
+static void *platform_mif_get_mifram_phy_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ if (!platform->mem_start) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "Memory unmmaped\n");
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)platform->mem_start + (uintptr_t)ref);
+}
+
+static uintptr_t platform_mif_get_mif_pfn(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ return vmalloc_to_pfn(platform->mem);
+}
+
+static struct device *platform_mif_get_mif_device(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ SCSC_TAG_DEBUG_DEV(PLAT_MIF, platform->dev, "\n");
+
+ return platform->dev;
+}
+
+static void platform_mif_irq_clear(void)
+{
+ /* Implement if required */
+}
+
+static int platform_mif_read_register(struct scsc_mif_abs *interface, u64 id, u32 *val)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (id == SCSC_REG_READ_WLBT_STAT) {
+ regmap_read(platform->pmureg, WLBT_STAT, val);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void platform_mif_dump_register(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ unsigned long flags;
+
+ spin_lock_irqsave(&platform->mif_spinlock, flags);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTGR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTGR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTCR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTCR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTSR1)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR0 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR0)));
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "INTMSR1 0x%08x\n", platform_mif_reg_read(platform, MAILBOX_WLBT_REG(INTMSR1)));
+
+ spin_unlock_irqrestore(&platform->mif_spinlock, flags);
+}
+
+static void platform_mif_cleanup(struct scsc_mif_abs *interface)
+{
+}
+
+static void platform_mif_restart(struct scsc_mif_abs *interface)
+{
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+static int __init platform_mif_wifibt_if_reserved_mem_setup(struct reserved_mem *remem)
+{
+ SCSC_TAG_DEBUG(PLAT_MIF, "memory reserved: mem_base=%#lx, mem_size=%zd\n",
+ (unsigned long)remem->base, (size_t)remem->size);
+
+ sharedmem_base = remem->base;
+ sharedmem_size = remem->size;
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(wifibt_if, "exynos,wifibt_if", platform_mif_wifibt_if_reserved_mem_setup);
+#endif
+
+struct scsc_mif_abs *platform_mif_create(struct platform_device *pdev)
+{
+ struct scsc_mif_abs *platform_if;
+ struct platform_mif *platform =
+ (struct platform_mif *)devm_kzalloc(&pdev->dev, sizeof(struct platform_mif), GFP_KERNEL);
+ int err = 0;
+ u8 i = 0;
+ struct resource *reg_res;
+
+#ifdef CONFIG_SCSC_SMAPPER
+ u32 smapper_banks = 0;
+#endif
+
+ if (!platform)
+ return NULL;
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, &pdev->dev, "Creating MIF platform device\n");
+
+ platform_if = &platform->interface;
+
+ /* initialise interface structure */
+ platform_if->destroy = platform_mif_destroy;
+ platform_if->get_uid = platform_mif_get_uid;
+ platform_if->reset = platform_mif_reset;
+ platform_if->map = platform_mif_map;
+ platform_if->unmap = platform_mif_unmap;
+ platform_if->irq_bit_set = platform_mif_irq_bit_set;
+ platform_if->irq_get = platform_mif_irq_get;
+ platform_if->irq_bit_mask_status_get = platform_mif_irq_bit_mask_status_get;
+ platform_if->irq_bit_clear = platform_mif_irq_bit_clear;
+ platform_if->irq_bit_mask = platform_mif_irq_bit_mask;
+ platform_if->irq_bit_unmask = platform_mif_irq_bit_unmask;
+ platform_if->irq_reg_handler = platform_mif_irq_reg_handler;
+ platform_if->irq_unreg_handler = platform_mif_irq_unreg_handler;
+ platform_if->irq_reg_reset_request_handler = platform_mif_irq_reg_reset_request_handler;
+ platform_if->irq_unreg_reset_request_handler = platform_mif_irq_unreg_reset_request_handler;
+ platform_if->suspend_reg_handler = platform_mif_suspend_reg_handler;
+ platform_if->suspend_unreg_handler = platform_mif_suspend_unreg_handler;
+ platform_if->get_mbox_ptr = platform_mif_get_mbox_ptr;
+ platform_if->get_mifram_ptr = platform_mif_get_mifram_ptr;
+ platform_if->get_mifram_ref = platform_mif_get_mifram_ref;
+ platform_if->get_mifram_pfn = platform_mif_get_mif_pfn;
+ platform_if->get_mifram_phy_ptr = platform_mif_get_mifram_phy_ptr;
+ platform_if->get_mif_device = platform_mif_get_mif_device;
+ platform_if->irq_clear = platform_mif_irq_clear;
+ platform_if->mif_dump_registers = platform_mif_dump_register;
+ platform_if->mif_read_register = platform_mif_read_register;
+ platform_if->mif_cleanup = platform_mif_cleanup;
+ platform_if->mif_restart = platform_mif_restart;
+#ifdef CONFIG_SCSC_SMAPPER
+ platform_if->mif_smapper_get_mapping = platform_mif_smapper_get_mapping;
+ platform_if->mif_smapper_get_bank_info = platform_mif_smapper_get_bank_info;
+ platform_if->mif_smapper_write_sram = platform_mif_smapper_write_sram;
+ platform_if->mif_smapper_configure = platform_mif_smapper_configure;
+ platform_if->mif_smapper_get_bank_base_address = platform_mif_smapper_get_bank_base_address;
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_if->mif_pm_qos_add_request = platform_mif_pm_qos_add_request;
+ platform_if->mif_pm_qos_update_request = platform_mif_pm_qos_update_request;
+ platform_if->mif_pm_qos_remove_request = platform_mif_pm_qos_remove_request;
+#endif
+ /* Update state */
+ platform->pdev = pdev;
+ platform->dev = &pdev->dev;
+
+ platform->r4_handler = platform_mif_irq_default_handler;
+ platform->irq_dev = NULL;
+ platform->reset_request_handler = platform_mif_irq_reset_request_default_handler;
+ platform->irq_reset_request_dev = NULL;
+ platform->suspend_handler = NULL;
+ platform->resume_handler = NULL;
+ platform->suspendresume_data = NULL;
+
+#ifdef CONFIG_OF_RESERVED_MEM
+ platform->mem_start = sharedmem_base;
+ platform->mem_size = sharedmem_size;
+#else
+ /* If CONFIG_OF_RESERVED_MEM is not defined, sharedmem values should be
+ * parsed from the scsc_wifibt binding
+ */
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-base", &sharedmem_base)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_start = sharedmem_base;
+
+ if (of_property_read_u32(pdev->dev.of_node, "sharedmem-size", &sharedmem_size)) {
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->mem_size = sharedmem_size;
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ platform->smapper = NULL;
+#endif
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->mem_start 0x%x platform->mem_size 0x%x\n",
+ (u32)platform->mem_start, (u32)platform->mem_size);
+ if (platform->mem_start == 0)
+ SCSC_TAG_WARNING_DEV(PLAT_MIF, platform->dev, "platform->mem_start is 0");
+
+ if (platform->mem_size == 0) {
+ /* We return return if mem_size is 0 as it does not make any sense.
+ * This may be an indication of an incorrect platform device binding.
+ */
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev, "platform->mem_size is 0");
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* Memory resource - Phys Address of MAILBOX_WLBT register map */
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!reg_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error getting mem resource for MAILBOX_WLBT\n");
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ platform->reg_start = reg_res->start;
+ platform->reg_size = resource_size(reg_res);
+
+ platform->base =
+ devm_ioremap_nocache(platform->dev, reg_res->start, resource_size(reg_res));
+
+ if (!platform->base) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Error mapping register region\n");
+ err = -EBUSY;
+ goto error_exit;
+ }
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "platform->reg_start %lx size %x base %p\n",
+ (uintptr_t)platform->reg_start, (u32)platform->reg_size, platform->base);
+
+ /* Get the 4 IRQ resources */
+ for (i = 0; i < 4; i++) {
+ struct resource *irq_res;
+ int irqtag;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "No IRQ resource at index %d\n", i);
+ err = -ENOENT;
+ goto error_exit;
+ }
+
+ if (!strcmp(irq_res->name, "MBOX")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "MBOX irq %d flag 0x%x\n",
+ (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_MBOX;
+ } else if (!strcmp(irq_res->name, "ALIVE")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "ALIVE irq %d flag 0x%x\n",
+ (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_ALIVE;
+ } else if (!strcmp(irq_res->name, "WDOG")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WDOG irq %d flag 0x%x\n",
+ (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_WDOG;
+ } else if (!strcmp(irq_res->name, "CFG_REQ")) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "CFG_REQ irq %d flag 0x%x\n",
+ (u32)irq_res->start, (u32)irq_res->flags);
+ irqtag = PLATFORM_MIF_CFG_REQ;
+ } else {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, &pdev->dev, "Invalid irq res name: %s\n",
+ irq_res->name);
+ err = -EINVAL;
+ goto error_exit;
+ }
+ platform->wlbt_irq[irqtag].irq_num = irq_res->start;
+ platform->wlbt_irq[irqtag].flags = (irq_res->flags & IRQF_TRIGGER_MASK);
+ atomic_set(&platform->wlbt_irq[irqtag].irq_disabled_cnt, 0);
+ }
+
+ /* PMU reg map - syscon */
+ platform->pmureg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(platform->pmureg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "syscon regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->pmureg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* Completion event and state used to indicate CFG_REQ IRQ occurred */
+ init_completion(&platform->cfg_ack);
+ platform->boot_state = WLBT_BOOT_IN_RESET;
+
+ /* BAAW_P_WLBT */
+ platform->baaw_p_wlbt = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,baaw_p_wlbt-syscon-phandle");
+ if (IS_ERR(platform->baaw_p_wlbt)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "baaw_p_wlbt regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->baaw_p_wlbt));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* DBUS_BAAW */
+ platform->dbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,dbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->dbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "dbus_baaw regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->dbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* PBUS_BAAW */
+ platform->pbus_baaw = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,pbus_baaw-syscon-phandle");
+ if (IS_ERR(platform->pbus_baaw)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "pbus_baaw regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->pbus_baaw));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* WLBT_REMAP */
+ platform->wlbt_remap = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,wlbt_remap-syscon-phandle");
+ if (IS_ERR(platform->wlbt_remap)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "wlbt_remap regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->wlbt_remap));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+ /* BOOT_CFG */
+ platform->boot_cfg = syscon_regmap_lookup_by_phandle(platform->dev->of_node,
+ "samsung,boot_cfg-syscon-phandle");
+ if (IS_ERR(platform->boot_cfg)) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "boot_cfg regmap lookup failed. Aborting. %ld\n",
+ PTR_ERR(platform->boot_cfg));
+ err = -EINVAL;
+ goto error_exit;
+ }
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER parsing */
+ if (!of_property_read_u32(pdev->dev.of_node, "smapper_num_banks", &smapper_banks))
+ platform_mif_parse_smapper(platform, platform->dev->of_node, smapper_banks);
+
+#endif
+#ifdef CONFIG_SCSC_QOS
+ platform_mif_parse_qos(platform, platform->dev->of_node);
+#endif
+ /* Initialize spinlock */
+ spin_lock_init(&platform->mif_spinlock);
+
+ return platform_if;
+
+error_exit:
+ devm_kfree(&pdev->dev, platform);
+ return NULL;
+}
+
+void platform_mif_destroy_platform(struct platform_device *pdev, struct scsc_mif_abs *interface)
+{
+}
+
+struct platform_device *platform_mif_get_platform_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->pdev;
+}
+
+struct device *platform_mif_get_dev(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ BUG_ON(!interface || !platform);
+
+ return platform->dev;
+}
+
+/* Preserve MIF registers during suspend.
+ * If all users of the MIF (AP, mx140, CP, etc) release it, the registers
+ * will lose their values. Save the useful subset here.
+ *
+ * Assumption: the AP will not change the register values between the suspend
+ * and resume handlers being called!
+ */
+static void platform_mif_reg_save(struct platform_mif *platform)
+{
+ platform->mif_preserve.irq_bit_mask = __platform_mif_irq_bit_mask_read(platform);
+}
+
+/* Restore MIF registers that may have been lost during suspend */
+static void platform_mif_reg_restore(struct platform_mif *platform)
+{
+ __platform_mif_irq_bit_mask_write(platform, platform->mif_preserve.irq_bit_mask);
+}
+
+int platform_mif_suspend(struct scsc_mif_abs *interface)
+{
+ int r = 0;
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+
+ if (platform->suspend_handler)
+ r = platform->suspend_handler(interface, platform->suspendresume_data);
+
+ /* Save the MIF registers.
+ * This must be done last as the suspend_handler may use the MIF
+ */
+ platform_mif_reg_save(platform);
+
+ return r;
+}
+
+void platform_mif_resume(struct scsc_mif_abs *interface)
+{
+ struct platform_mif *platform = platform_mif_from_mif_abs(interface);
+ s32 ret;
+
+ /* Restore the MIF registers.
+ * This must be done first as the resume_handler may use the MIF.
+ */
+ platform_mif_reg_restore(platform);
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "Clear WLBT_ACTIVE_CLR flag\n");
+ /* Clear WLBT_ACTIVE_CLR flag in WLBT_CTRL_NS */
+ ret = regmap_update_bits(platform->pmureg, WLBT_CTRL_NS, WLBT_ACTIVE_CLR, 1);
+ if (ret < 0) {
+ SCSC_TAG_ERR_DEV(PLAT_MIF, platform->dev,
+ "Failed to Set WLBT_CTRL_NS[WLBT_ACTIVE_CLR]: %d\n", ret);
+ }
+
+ if (platform->resume_handler)
+ platform->resume_handler(interface, platform->suspendresume_data);
+}
+
+
+/* Temporary workaround to power up slave PMIC LDOs before FW APM/WLBT signalling
+ * is complete
+ */
+static void power_supplies_on(struct platform_mif *platform)
+{
+ struct i2c_client i2c;
+
+ /* HACK: Note only addr field is needed by s2mpu11_write_reg() */
+ i2c.addr = 0x1;
+
+ /* The APM IPC in FW will be used instead */
+ if (disable_apm_setup) {
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WLBT LDOs firmware controlled\n");
+ return;
+ }
+
+ SCSC_TAG_INFO_DEV(PLAT_MIF, platform->dev, "WLBT LDOs on (PMIC i2c_addr = 0x%x)\n", i2c.addr);
+
+ /* SLAVE PMIC
+ * echo 0x22 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xE0 > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ *
+ * echo 0x23 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xE8 > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ *
+ * echo 0x24 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xEC > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ *
+ * echo 0x25 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xEC > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ *
+ * echo 0x26 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xFC > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ *
+ * echo 0x27 > /sys/kernel/debug/s2mpu11-regs/i2caddr
+ * echo 0xFC > /sys/kernel/debug/s2mpu11-regs/i2cdata
+ */
+
+ s2mpu11_write_reg(&i2c, 0x22, 0xe0);
+ s2mpu11_write_reg(&i2c, 0x23, 0xe8);
+ s2mpu11_write_reg(&i2c, 0x24, 0xec);
+ s2mpu11_write_reg(&i2c, 0x25, 0xec);
+ s2mpu11_write_reg(&i2c, 0x26, 0xfc);
+ s2mpu11_write_reg(&i2c, 0x27, 0xfc);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <scsc/scsc_logring.h>
+#include "platform_mif_module.h"
+#include "platform_mif.h"
+
+/* Implements */
+#include "scsc_mif_abs.h"
+
+/* Variables */
+struct mif_abs_node {
+ struct list_head list;
+ struct scsc_mif_abs *mif_abs;
+};
+
+struct mif_driver_node {
+ struct list_head list;
+ struct scsc_mif_abs_driver *driver;
+};
+
+struct mif_mmap_node {
+ struct list_head list;
+ struct scsc_mif_mmap_driver *driver;
+};
+
+static struct platform_mif_module {
+ struct list_head mif_abs_list;
+ struct list_head mif_driver_list;
+ struct list_head mif_mmap_list;
+} mif_module = {
+ .mif_abs_list = LIST_HEAD_INIT(mif_module.mif_abs_list),
+ .mif_driver_list = LIST_HEAD_INIT(mif_module.mif_driver_list),
+ .mif_mmap_list = LIST_HEAD_INIT(mif_module.mif_mmap_list),
+};
+
+/* Private Functions */
+
+static void platform_mif_module_probe_registered_clients(struct scsc_mif_abs *mif_abs)
+{
+ struct mif_driver_node *mif_driver_node, *next;
+ bool driver_registered = false;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
+ mif_driver_node->driver->probe(mif_driver_node->driver, mif_abs);
+ driver_registered = true;
+ }
+}
+
+static int platform_mif_module_probe(struct platform_device *pdev)
+{
+ struct mif_abs_node *mif_node;
+ struct scsc_mif_abs *mif_abs;
+
+ /* TODO: ADD EARLY BOARD INITIALIZATIONS IF REQUIRED */
+ /* platform_mif_init(); */
+
+ mif_node = kzalloc(sizeof(*mif_node), GFP_KERNEL);
+ if (!mif_node)
+ return -ENODEV;
+
+ mif_abs = platform_mif_create(pdev);
+ if (!mif_abs) {
+ SCSC_TAG_ERR(PLAT_MIF, "Error creating platform interface\n");
+ kfree(mif_node);
+ return -ENODEV;
+ }
+ /* Add node */
+ mif_node->mif_abs = mif_abs;
+ list_add_tail(&mif_node->list, &mif_module.mif_abs_list);
+ platform_mif_module_probe_registered_clients(mif_abs);
+
+ return 0;
+}
+
+static int platform_mif_module_remove(struct platform_device *pdev)
+{
+ struct mif_abs_node *mif_node, *next;
+ bool match = false;
+
+ /* Remove node */
+ list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
+ if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
+ match = true;
+ platform_mif_destroy_platform(pdev, mif_node->mif_abs);
+ list_del(&mif_node->list);
+ kfree(mif_node);
+ }
+ }
+ if (match == false)
+ SCSC_TAG_ERR(PLAT_MIF, "No match for given scsc_mif_abs\n");
+
+ return 0;
+}
+
+static int platform_mif_module_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mif_abs_node *mif_node, *next;
+ int r;
+
+ SCSC_TAG_INFO(PLAT_MIF, "\n");
+
+ /* Traverse mif_abs list for this platform_device to suspend */
+ list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
+ if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
+ /* Signal suspend, client can refuse */
+ r = platform_mif_suspend(mif_node->mif_abs);
+ if (r) {
+ SCSC_TAG_INFO(PLAT_MIF, "%d\n", r);
+ return r;
+ }
+ }
+ }
+ return 0;
+}
+
+static int platform_mif_module_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mif_abs_node *mif_node, *next;
+
+ SCSC_TAG_INFO(PLAT_MIF, "\n");
+
+ /* Traverse mif_abs list for this platform_device to resume */
+ list_for_each_entry_safe(mif_node, next, &mif_module.mif_abs_list, list) {
+ if (platform_mif_get_platform_dev(mif_node->mif_abs) == pdev) {
+ /* Signal resume */
+ platform_mif_resume(mif_node->mif_abs);
+ }
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops platform_mif_pm_ops = {
+ .suspend = platform_mif_module_suspend,
+ .resume = platform_mif_module_resume,
+};
+
+static const struct of_device_id scsc_wifibt[] = {
+ { .compatible = "samsung,scsc_wifibt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scsc_wifibt);
+
+static struct platform_driver platform_mif_driver = {
+ .probe = platform_mif_module_probe,
+ .remove = platform_mif_module_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &platform_mif_pm_ops,
+ .of_match_table = of_match_ptr(scsc_wifibt),
+ },
+};
+
+/* Choose when the driver should be probed */
+#if 1
+module_platform_driver(platform_mif_driver);
+#else
+static int platform_mif_init(void)
+{
+ SCSC_TAG_INFO(PLAT_MIF, "register platform driver\n");
+ return platform_driver_register(&platform_mif_driver);
+}
+core_initcall(platform_mif_init);
+#endif
+
+/* Public Functions */
+void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver)
+{
+ struct mif_driver_node *mif_driver_node;
+ struct mif_abs_node *mif_node;
+
+ /* Add node in driver linked list */
+ mif_driver_node = kzalloc(sizeof(*mif_driver_node), GFP_KERNEL);
+ if (!mif_driver_node)
+ return;
+
+ mif_driver_node->driver = driver;
+ list_add_tail(&mif_driver_node->list, &mif_module.mif_driver_list);
+
+ /* Traverse Linked List for each mif_abs node */
+ list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
+ driver->probe(driver, mif_node->mif_abs);
+ }
+}
+EXPORT_SYMBOL(scsc_mif_abs_register);
+
+void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver)
+{
+ struct mif_driver_node *mif_driver_node, *next;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(mif_driver_node, next, &mif_module.mif_driver_list, list) {
+ if (mif_driver_node->driver == driver) {
+ list_del(&mif_driver_node->list);
+ kfree(mif_driver_node);
+ }
+ }
+}
+EXPORT_SYMBOL(scsc_mif_abs_unregister);
+
+/* Register a mmap - debug driver - for this specific transport*/
+void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver)
+{
+ struct mif_mmap_node *mif_mmap_node;
+ struct mif_abs_node *mif_node;
+
+ /* Add node in driver linked list */
+ mif_mmap_node = kzalloc(sizeof(*mif_mmap_node), GFP_KERNEL);
+ if (!mif_mmap_node)
+ return;
+
+ mif_mmap_node->driver = mmap_driver;
+ list_add_tail(&mif_mmap_node->list, &mif_module.mif_mmap_list);
+
+ /* Traverse Linked List for each mif_abs node */
+ list_for_each_entry(mif_node, &mif_module.mif_abs_list, list) {
+ mmap_driver->probe(mmap_driver, mif_node->mif_abs);
+ }
+}
+EXPORT_SYMBOL(scsc_mif_mmap_register);
+
+/* Unregister a mmap - debug driver - for this specific transport*/
+void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver)
+{
+ struct mif_mmap_node *mif_mmap_node, *next;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(mif_mmap_node, next, &mif_module.mif_mmap_list, list) {
+ if (mif_mmap_node->driver == mmap_driver) {
+ list_del(&mif_mmap_node->list);
+ kfree(mif_mmap_node);
+ }
+ }
+}
+EXPORT_SYMBOL(scsc_mif_mmap_unregister);
+
+MODULE_DESCRIPTION("SCSC Platform device Maxwell MIF abstraction");
+MODULE_AUTHOR("SCSC");
+MODULE_LICENSE("GPL");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __PLATFORM_MIF_MODULE_H
+#define __PLATFORM_MIF_MODULE_H
+
+#define DRV_NAME "scsc_wlbt"
+
+#endif /* __PLATFORM_MIF_MODULE_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_APP_MSG_H__
+#define __SCSC_APP_MSG_H__
+
+
+#define BCSP_CSTOPB_MASK 0x0001
+#define BCSP_PARENB_MASK 0x0002
+#define BCSP_PAREVEN_MASK 0x0004
+#define BCSP_CRTSCTS_MASK 0x0008
+
+enum {
+ SCSC_APP_MSG_TYPE_APP_STARTED_REPLY = 0,
+ SCSC_APP_MSG_TYPE_GET_DB,
+ SCSC_APP_MSG_TYPE_GET_DB_REPLY,
+ SCSC_APP_MSG_TYPE_LD_REGISTER_LOW_RATE,
+ SCSC_APP_MSG_TYPE_LD_REGISTER_HIGH_RATE,
+ SCSC_APP_MSG_TYPE_LD_REGISTER_REPLY,
+ SCSC_APP_MSG_TYPE_LD_UNREGISTER,
+ SCSC_APP_MSG_TYPE_LD_UNREGISTER_BREAK,
+ SCSC_APP_MSG_TYPE_LD_UNREGISTER_REPLY,
+ SCSC_APP_MSG_TYPE_APP_EXIT,
+ SCSC_APP_MSG_TYPE_APP_EXIT_REPLY,
+ SCSC_APP_MSG_TYPE_SET_FAST_RATE,
+ SCSC_APP_MSG_TYPE_SET_FAST_RATE_REPLY,
+};
+
+enum {
+ SCSC_APP_MSG_STATUS_OK = 0,
+ SCSC_APP_MSG_STATUS_FAILURE,
+};
+
+
+struct scsc_app_msg_req {
+ __u16 type;
+};
+
+struct scsc_app_msg_resp {
+ __u16 type;
+ __u16 status;
+ __u32 len;
+ __u8 data[0];
+};
+
+#endif /* __SCSC_APP_MSG_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h> /* Required for copy_to_user. */
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include <scsc/scsc_logring.h>
+
+#include "mxman.h"
+#include "mxmgmt_transport_format.h" /* Required for MXMGR_MESSAGE_PAYLOAD_SIZE. */
+
+#define DEVICE_NAME "lerna"
+#define DEVICE_CLASS "scsc_config"
+#define DEVICE_COUNT (1)
+
+static const void *scsc_lerna_pending;
+#define SCSC_LERNA_WAIT_TIMEOUT (2000)
+static DECLARE_COMPLETION(scsc_lerna_wait);
+
+/**
+ * MSMGR_MESSAGE_PAYLOAD_SIZE is not a nice power of 2, so use sizeof(msmgr_message)
+ * just for something more aesthetically pleasing.
+ */
+#define SCSC_LERNA_BUFFER_SIZE (sizeof(struct mxmgr_message))
+static uint8_t scsc_lerna_request_buffer[SCSC_LERNA_BUFFER_SIZE];
+static uint8_t scsc_lerna_response_buffer[SCSC_LERNA_BUFFER_SIZE];
+
+static dev_t scsc_lerna_device_id;
+static struct class *scsc_lerna_class_p;
+static struct device *scsc_lerna_device_p;
+static struct cdev scsc_lerna_cdev;
+
+static int scsc_lerna_chardev_open(struct inode *inodep, struct file *filep);
+static ssize_t scsc_lerna_chardev_read(struct file *filep, char *buffer, size_t len, loff_t *offset);
+static ssize_t scsc_lerna_chardev_write(struct file *filep, const char *buffer, size_t len, loff_t *offset);
+static int scsc_lerna_chardev_release(struct inode *inodep, struct file *filep);
+
+static struct file_operations scsc_lerna_fops = {
+ .open = scsc_lerna_chardev_open,
+ .read = scsc_lerna_chardev_read,
+ .write = scsc_lerna_chardev_write,
+ .release = scsc_lerna_chardev_release,
+};
+
+static atomic_t scsc_lerna_atomic;
+
+struct scsc_lerna_cmd_header {
+ uint8_t magic_number; /* Set to 0x08. */
+ uint8_t cid; /* Action command identifier. */
+ uint16_t payload_length; /* Payload length. 0 for value query. */
+ uint16_t psid; /* PSID to query. */
+ uint8_t row_index; /* Row index, or 0 for non-table querying. */
+ uint8_t group_index; /* Group index, or 0 for default (group not assigned). */
+};
+
+static int scsc_lerna_chardev_open(struct inode *inodep, struct file *filep)
+{
+ (void)inodep;
+ (void)filep;
+
+ if (atomic_inc_return(&scsc_lerna_atomic) > 1) {
+ atomic_dec(&scsc_lerna_atomic);
+ /* Someone already has this open. Denied. */
+ SCSC_TAG_DEBUG(LERNA, "character device busy, try again later.\n");
+ return -EBUSY;
+ }
+
+ SCSC_TAG_DEBUG(LERNA, "opening lerna character device.\n");
+ return 0;
+}
+
+static ssize_t scsc_lerna_chardev_read(struct file *filep, char *buffer, size_t len, loff_t *offset)
+{
+ const struct scsc_lerna_cmd_header *header;
+ unsigned long wait_result;
+ ssize_t read_count;
+ int error_count;
+
+ (void)filep;
+ (void)offset;
+
+ wait_result = wait_for_completion_timeout(&scsc_lerna_wait, msecs_to_jiffies(SCSC_LERNA_WAIT_TIMEOUT));
+ if (wait_result == 0) {
+ SCSC_TAG_ERR(LERNA, "read timeout; firmware not responding, or read without write.\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!scsc_lerna_pending) {
+ /* Pointer is NULL, indicating that a reply hasn't been sent from firmware. */
+ SCSC_TAG_DEBUG(LERNA, "pending reply is null.\n");
+ return -ENOMSG;
+ }
+
+ header = (const struct scsc_lerna_cmd_header *)(scsc_lerna_pending);
+ read_count = sizeof(struct scsc_lerna_cmd_header) + header->payload_length;
+
+ /* Make sure there's enough space to read out the buffer. */
+ if (len < read_count) {
+ SCSC_TAG_ERR(LERNA, "insufficient buffer space supplied for read.\n");
+ return -ENOBUFS;
+ }
+
+ error_count = copy_to_user(buffer, scsc_lerna_pending, read_count);
+
+ if (error_count) {
+ SCSC_TAG_ERR(LERNA, "could not read from lerna character device.\n");
+ return -EFAULT;
+ }
+
+ SCSC_TAG_DEBUG(LERNA, "read buffer of size: %lu\n", read_count);
+ /* Value was read out, and is no longer considered valid. Need to write before another read. */
+ scsc_lerna_pending = NULL;
+ return read_count;
+}
+
+static ssize_t scsc_lerna_chardev_write(struct file *filep, const char *buffer, size_t len, loff_t *offset)
+{
+ SCSC_TAG_DEBUG(LERNA, "writing buffer of size: %lu\n", len);
+ /* At a minimum, any request (read or write) must include a command header. */
+ if (len >= sizeof(struct scsc_lerna_cmd_header)) {
+ /* Header at least fits, but maybe a write value wants more... */
+ if (len <= SCSC_LERNA_BUFFER_SIZE) {
+ copy_from_user(scsc_lerna_request_buffer, buffer, len);
+ mxman_lerna_send(NULL, scsc_lerna_request_buffer, len);
+ } else {
+ /* Message size too long, don't write anything. */
+ return -EMSGSIZE;
+ }
+ } else {
+ return -EBADR;
+ }
+
+ return len;
+}
+
+static int scsc_lerna_chardev_release(struct inode *inodep, struct file *filep)
+{
+ (void)inodep;
+ (void)filep;
+ if (atomic_read(&scsc_lerna_atomic) == 0) {
+ SCSC_TAG_ALERT(LERNA, "character device release without open.\n");
+ } else {
+ /* Done with the character device, release the lock on it. */
+ atomic_dec(&scsc_lerna_atomic);
+ }
+
+ SCSC_TAG_DEBUG(LERNA, "lerna character device closed.\n");
+ return 0;
+}
+
+
+int scsc_lerna_init(void)
+{
+ int result;
+
+ /**
+ * Reset important globals to some kind of sane value. This should be done
+ * whenever the module is loaded explicitly to be sure global values haven't
+ * been previously trashed.
+ */
+ scsc_lerna_device_id = 0;
+ scsc_lerna_class_p = NULL;
+ scsc_lerna_device_p = NULL;
+
+ /* Make sure to initialise the atomic used to lock char device access. */
+ atomic_set(&scsc_lerna_atomic, 0);
+
+ /**
+ * Allocate device id(s) for the character device. Use alloc_register_chrdev
+ * because this is the new way of doing things, and it will dynamically allocate
+ * a major number. Returns non-zero on failure.
+ */
+ result = alloc_chrdev_region(&scsc_lerna_device_id, 0, DEVICE_COUNT, DEVICE_NAME);
+ if (result) {
+ /* Failure to register char dev, auto fail to initialise module. */
+ SCSC_TAG_ALERT(LERNA, "lerna failed to register character device.\n");
+ return result;
+ }
+
+ scsc_lerna_class_p = class_create(THIS_MODULE, DEVICE_CLASS);
+ if (IS_ERR(scsc_lerna_class_p)) {
+ /* Could not create class, failure, remember to unregister device id(s). */
+ unregister_chrdev_region(scsc_lerna_device_id, DEVICE_COUNT);
+ SCSC_TAG_ALERT(LERNA, "lerna failed to create character class.\n");
+ return PTR_ERR(scsc_lerna_class_p);
+ }
+
+ scsc_lerna_device_p = device_create(scsc_lerna_class_p, NULL, scsc_lerna_device_id, NULL, DEVICE_NAME);
+ if (IS_ERR(scsc_lerna_device_p)) {
+ class_destroy(scsc_lerna_class_p);
+ unregister_chrdev_region(scsc_lerna_device_id, DEVICE_COUNT);
+ SCSC_TAG_ALERT(LERNA, "lerna failed to create character device.\n");
+ return PTR_ERR(scsc_lerna_device_p);
+ }
+
+ /**
+ * At this point, the device is registered, along with class definition. The character device
+ * itself can now be initialised to provide the kernel with callback information for various
+ * actions taken on the device.
+ */
+ cdev_init(&scsc_lerna_cdev, &scsc_lerna_fops);
+ scsc_lerna_cdev.owner = THIS_MODULE;
+
+ result = cdev_add(&scsc_lerna_cdev, scsc_lerna_device_id, DEVICE_COUNT);
+ if (result) {
+ /* Failure to add character device to file system. */
+ cdev_del(&scsc_lerna_cdev);
+ class_destroy(scsc_lerna_class_p);
+ unregister_chrdev_region(scsc_lerna_device_id, DEVICE_COUNT);
+ SCSC_TAG_ALERT(LERNA, "lerna failed to add character device.\n");
+ return result;
+ }
+ /* At this point, the cdev is live and can be used. */
+
+ SCSC_TAG_INFO(LERNA, "lerna intialisation complete.\n");
+ return 0; /* 0 for module loaded, non-zero for module load failure. */
+}
+
+void scsc_lerna_deinit(void)
+{
+ /* Character device needs deleting. */
+ cdev_del(&scsc_lerna_cdev);
+
+ /* Destroy device. */
+ device_destroy(scsc_lerna_class_p, scsc_lerna_device_id);
+
+ /* Unregister the device class. Not sure if this means that a register earlier is required. */
+ class_unregister(scsc_lerna_class_p);
+
+ /* Destroy created class. Be careful of the order this is called in. */
+ class_destroy(scsc_lerna_class_p);
+
+ /**
+ * Don't forget to unregister device id(s). Major number is dynamically allocated,
+ * so the base id is remembered and passed along to the unregister here.
+ */
+ unregister_chrdev_region(scsc_lerna_device_id, DEVICE_COUNT);
+
+ SCSC_TAG_INFO(LERNA, "lerna shutdown complete.\n");
+}
+
+void scsc_lerna_response(const void *message)
+{
+ /**
+ * Buffer the response from the firmware so that future messages from firmware
+ * don't overwrite this accidentally. This means async messages are allowed while
+ * waiting for the character device read from userspace, without impacting lerna's
+ * request/response communications.
+ */
+ const struct scsc_lerna_cmd_header *header;
+ ssize_t read_count;
+
+ if (message != NULL) {
+ header = (const struct scsc_lerna_cmd_header *)(message);
+ read_count = sizeof(struct scsc_lerna_cmd_header) + header->payload_length;
+
+ if (read_count <= SCSC_LERNA_BUFFER_SIZE) {
+ memcpy(scsc_lerna_response_buffer, message, read_count);
+ scsc_lerna_pending = scsc_lerna_response_buffer;
+ } else {
+ SCSC_TAG_DEBUG(LERNA, "readout too large for response buffering.\n");
+ /* No response possible, let the userspace application deal with it. */
+ scsc_lerna_pending = NULL;
+ }
+ }
+ complete(&scsc_lerna_wait);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _SCSC_LERNA_H
+#define _SCSC_LERNA_H
+
+int scsc_lerna_init(void);
+
+void scsc_lerna_deinit(void);
+
+void scsc_lerna_response(const void *message);
+
+#endif /* _SCSC_LERNA_H */
--- /dev/null
+/********************************************************************************
+ *
+ * Copyright (c) 2016 - 2019 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ********************************************************************************/
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/limits.h>
+#include <linux/workqueue.h>
+
+#include <scsc/scsc_log_collector.h>
+#include "scsc_log_collector_proc.h"
+#include "scsc_log_collector_mmap.h"
+#include <scsc/scsc_mx.h>
+#include "mxlogger.h"
+
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#endif
+
+#define SCSC_NUM_CHUNKS_SUPPORTED 13
+
+#define TO_RAM 0
+#define TO_FILE 1
+/* Add-remove supported chunks on this kernel */
+static u8 chunk_supported_sbl[SCSC_NUM_CHUNKS_SUPPORTED] = {
+ SCSC_LOG_CHUNK_SYNC,
+ SCSC_LOG_CHUNK_IMP,
+ SCSC_LOG_CHUNK_MXL,
+ SCSC_LOG_CHUNK_UDI,
+ SCSC_LOG_CHUNK_BT_HCF,
+ SCSC_LOG_CHUNK_WLAN_HCF,
+ SCSC_LOG_CHUNK_HIP4_SAMPLER,
+ SCSC_LOG_RESERVED_COMMON,
+ SCSC_LOG_RESERVED_BT,
+ SCSC_LOG_RESERVED_WLAN,
+ SCSC_LOG_RESERVED_RADIO,
+ SCSC_LOG_MINIMOREDUMP,
+ SCSC_LOG_CHUNK_LOGRING,
+};
+
+static int scsc_log_collector_collect(enum scsc_log_reason reason, u16 reason_code);
+
+static atomic_t in_collection;
+
+/* Collect logs in an intermediate buffer to be collected at later time (mmap or wq) */
+static bool collect_to_ram = true;
+module_param(collect_to_ram, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(collect_to_ram, "Collect buffer in ram");
+
+static char collection_dir_buf[256] = "/data/vendor/log/wifi";
+module_param_string(collection_target_directory, collection_dir_buf, sizeof(collection_dir_buf), S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(collection_target_directory, "Specify collection target directory");
+
+static bool sable_collection_off;
+static int sable_collection_off_set_param_cb(const char *val,
+ const struct kernel_param *kp)
+{
+ bool nval;
+
+ if (!val || strtobool(val, &nval))
+ return -EINVAL;
+
+ if (sable_collection_off ^ nval) {
+ sable_collection_off = nval;
+ mxlogger_set_enabled_status(!sable_collection_off);
+ pr_info("Sable Log Collection is now %sABLED.\n",
+ sable_collection_off ? "DIS" : "EN");
+ }
+ return 0;
+}
+
+/**
+ * As described in struct kernel_param+ops the _get method:
+ * -> returns length written or -errno. Buffer is 4k (ie. be short!)
+ */
+static int sable_collection_off_get_param_cb(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%c", sable_collection_off ? 'Y' : 'N');
+}
+
+static struct kernel_param_ops sable_collection_off_ops = {
+ .set = sable_collection_off_set_param_cb,
+ .get = sable_collection_off_get_param_cb,
+};
+module_param_cb(sable_collection_off, &sable_collection_off_ops, NULL, 0644);
+MODULE_PARM_DESC(sable_collection_off, "Disable SABLE Log Collection. This will inhibit also MXLOGGER");
+
+struct scsc_log_client {
+ struct list_head list;
+ struct scsc_log_collector_client *collect_client;
+};
+static struct scsc_log_collector_list { struct list_head list; } scsc_log_collector_list = {
+ .list = LIST_HEAD_INIT(scsc_log_collector_list.list)
+};
+
+struct scsc_log_status {
+ struct file *fp;
+ loff_t pos;
+ bool in_collection;
+ char fapi_ver[SCSC_LOG_FAPI_VERSION_SIZE];
+
+ unsigned char *buf;
+ struct workqueue_struct *collection_workq;
+ struct work_struct collect_work;
+ enum scsc_log_reason collect_reason;
+ u16 reason_code;
+ struct mutex collection_serial;
+ bool observer_present;
+} log_status;
+
+static DEFINE_MUTEX(log_mutex);
+
+static void collection_worker(struct work_struct *work)
+{
+ struct scsc_log_status *ls;
+
+ ls = container_of(work, struct scsc_log_status, collect_work);
+ if (!ls)
+ return;
+ pr_info("SCSC running scheduled Log Collection - collect reason:%d reason code:%d\n",
+ ls->collect_reason, ls->reason_code);
+ scsc_log_collector_collect(ls->collect_reason, ls->reason_code);
+ atomic_set(&in_collection, 0);
+}
+
+/* Module init */
+int __init scsc_log_collector(void)
+{
+ pr_info("Log Collector Init\n");
+
+ log_status.in_collection = false;
+ log_status.collection_workq = create_workqueue("log_collector");
+ if (log_status.collection_workq)
+ INIT_WORK(&log_status.collect_work, collection_worker);
+ /* Update mxlogger status on init.*/
+ pr_info("Sable Log Collection is now %sABLED.\n",
+ sable_collection_off ? "DIS" : "EN");
+ mxlogger_set_enabled_status(!sable_collection_off);
+
+ /* Create the buffer on the constructor */
+ log_status.buf = vzalloc(SCSC_LOG_COLLECT_MAX_SIZE);
+ if (IS_ERR_OR_NULL(log_status.buf)) {
+ pr_err("open allocating memmory err = %ld\n", PTR_ERR(log_status.buf));
+ log_status.buf = NULL;
+ }
+
+ mutex_init(&log_status.collection_serial);
+
+ scsc_log_collect_proc_create();
+ scsc_log_collector_mmap_create();
+ return 0;
+}
+
+void __exit scsc_log_collector_exit(void)
+{
+ if (log_status.buf)
+ vfree(log_status.buf);
+
+ scsc_log_collect_proc_remove();
+ if (log_status.collection_workq) {
+ flush_workqueue(log_status.collection_workq);
+ destroy_workqueue(log_status.collection_workq);
+ log_status.collection_workq = NULL;
+ }
+
+ pr_info("Log Collect Unloaded\n");
+}
+
+module_init(scsc_log_collector);
+module_exit(scsc_log_collector_exit);
+
+static bool scsc_is_chunk_supported(u8 type)
+{
+ u8 i;
+
+ for (i = 0; i < SCSC_NUM_CHUNKS_SUPPORTED; i++) {
+ if (type == chunk_supported_sbl[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int scsc_log_collector_compare(void *priv, struct list_head *A, struct list_head *B)
+{
+ struct scsc_log_client *a = list_entry(A, typeof(*a), list);
+ struct scsc_log_client *b = list_entry(B, typeof(*b), list);
+
+ if (a->collect_client->type < b->collect_client->type)
+ return -1;
+ else
+ return 1;
+}
+
+int scsc_log_collector_register_client(struct scsc_log_collector_client *collect_client)
+{
+ struct scsc_log_client *lc;
+
+ if (!scsc_is_chunk_supported(collect_client->type)) {
+ pr_info("Type not supported: %d\n", collect_client->type);
+ return -EIO;
+ }
+
+ mutex_lock(&log_mutex);
+ lc = kzalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc) {
+ mutex_unlock(&log_mutex);
+ return -ENOMEM;
+ }
+
+ lc->collect_client = collect_client;
+ list_add_tail(&lc->list, &scsc_log_collector_list.list);
+
+ /* Sort the list */
+ list_sort(NULL, &scsc_log_collector_list.list, scsc_log_collector_compare);
+
+ pr_info("Registered client: %s\n", collect_client->name);
+ mutex_unlock(&log_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(scsc_log_collector_register_client);
+
+int scsc_log_collector_unregister_client(struct scsc_log_collector_client *collect_client)
+{
+ struct scsc_log_client *lc, *next;
+ bool match = false;
+
+ /* block any attempt of unregistering while a collection is in progres */
+ mutex_lock(&log_mutex);
+ list_for_each_entry_safe(lc, next, &scsc_log_collector_list.list, list) {
+ if (lc->collect_client == collect_client) {
+ match = true;
+ list_del(&lc->list);
+ kfree(lc);
+ }
+ }
+
+ if (match == false)
+ pr_err("FATAL, no match for given scsc_log_collector_client\n");
+
+ pr_info("Unregistered client: %s\n", collect_client->name);
+ mutex_unlock(&log_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_log_collector_unregister_client);
+
+
+unsigned char *scsc_log_collector_get_buffer(void)
+{
+ return log_status.buf;
+}
+
+static int __scsc_log_collector_write_to_ram(char __user *buf, size_t count, u8 align)
+{
+ if (!log_status.in_collection || !log_status.buf)
+ return -EIO;
+
+ if (log_status.pos + count > SCSC_LOG_COLLECT_MAX_SIZE) {
+ pr_err("Write will exceed SCSC_LOG_COLLECT_MAX_SIZE. Abort write\n");
+ return -ENOMEM;
+ }
+
+ log_status.pos = (log_status.pos + align - 1) & ~(align - 1);
+ /* Write buf to RAM */
+ memcpy(log_status.buf + log_status.pos, buf, count);
+
+ log_status.pos += count;
+
+ return 0;
+}
+
+static int __scsc_log_collector_write_to_file(char __user *buf, size_t count, u8 align)
+{
+ int ret = 0;
+
+ if (!log_status.in_collection)
+ return -EIO;
+
+ if (log_status.pos + count > SCSC_LOG_COLLECT_MAX_SIZE) {
+ pr_err("Write will exceed SCSC_LOG_COLLECT_MAX_SIZE. Abort write\n");
+ return -ENOMEM;
+ }
+
+ log_status.pos = (log_status.pos + align - 1) & ~(align - 1);
+ /* Write buf to file */
+ ret = vfs_write(log_status.fp, buf, count, &log_status.pos);
+ if (ret < 0) {
+ pr_err("write file error, err = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+int scsc_log_collector_write(char __user *buf, size_t count, u8 align)
+{
+ if (collect_to_ram)
+ return __scsc_log_collector_write_to_ram(buf, count, align);
+ else
+ return __scsc_log_collector_write_to_file(buf, count, align);
+}
+EXPORT_SYMBOL(scsc_log_collector_write);
+
+#define align_chunk(ppos) (((ppos) + (SCSC_LOG_CHUNK_ALIGN - 1)) & \
+ ~(SCSC_LOG_CHUNK_ALIGN - 1))
+
+static int __scsc_log_collector_collect(enum scsc_log_reason reason, u16 reason_code, u8 buffer)
+{
+ struct scsc_log_client *lc, *next;
+ mm_segment_t old_fs;
+ char memdump_path[128];
+ int ret = 0;
+ char version_fw[SCSC_LOG_FW_VERSION_SIZE] = {0};
+ char version_host[SCSC_LOG_HOST_VERSION_SIZE] = {0};
+ u32 mem_pos, temp_pos, chunk_size;
+ ktime_t start;
+ u8 num_chunks = 0;
+ u16 first_chunk_pos = SCSC_LOG_OFFSET_FIRST_CHUNK;
+ struct scsc_log_sbl_header sbl_header;
+ struct scsc_log_chunk_header chk_header;
+ u8 j;
+ bool sbl_is_valid = false;
+
+ mutex_lock(&log_mutex);
+
+ pr_info("Log collection triggered %s reason_code 0x%x\n",
+ scsc_get_trigger_str((int)reason), reason_code);
+
+ start = ktime_get();
+
+ if (buffer == TO_FILE) {
+ snprintf(memdump_path, sizeof(memdump_path), "%s/%s.sbl",
+ collection_dir_buf, scsc_get_trigger_str((int)reason));
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ log_status.fp = filp_open(memdump_path, O_CREAT | O_WRONLY | O_SYNC | O_TRUNC, 0664);
+ if (IS_ERR(log_status.fp)) {
+ pr_err("open file error, err = %ld\n", PTR_ERR(log_status.fp));
+ mutex_unlock(&log_mutex);
+ return PTR_ERR(log_status.fp);
+ }
+ } else if (!log_status.buf) {
+ pr_err("RAM buffer not created. Aborting dump\n");
+ mutex_unlock(&log_mutex);
+ return -ENOMEM;
+ }
+
+ log_status.in_collection = true;
+ /* Position index to start of the first chunk */
+ log_status.pos = SCSC_LOG_OFFSET_FIRST_CHUNK;
+
+ /* Call client init callbacks if any */
+ list_for_each_entry_safe(lc, next, &scsc_log_collector_list.list, list) {
+ if (lc->collect_client && lc->collect_client->collect_init)
+ lc->collect_client->collect_init(lc->collect_client);
+ }
+ /* Traverse all the clients from the list.. Those would start calling scsc_log_collector_write!!*/
+ /* Create chunk */
+ list_for_each_entry_safe(lc, next, &scsc_log_collector_list.list, list) {
+ if (lc->collect_client) {
+ num_chunks++;
+ /* Create Chunk */
+ /* Store current post */
+ temp_pos = log_status.pos;
+ /* Make room for chunck header */
+ log_status.pos += SCSC_CHUNK_HEADER_SIZE;
+ /* Execute clients callbacks */
+ if (lc->collect_client->collect(lc->collect_client, 0))
+ goto exit;
+ /* Write chunk headers */
+ /* Align log_status.pos */
+ mem_pos = log_status.pos = align_chunk(log_status.pos);
+ chunk_size = log_status.pos - temp_pos - SCSC_CHUNK_HEADER_SIZE;
+ /* rewind pos */
+ log_status.pos = temp_pos;
+ /* Write chunk header */
+ memcpy(chk_header.magic, "CHK", 3);
+ chk_header.type = (char)lc->collect_client->type;
+ chk_header.chunk_size = chunk_size;
+ scsc_log_collector_write((char *)&chk_header, sizeof(struct scsc_log_chunk_header), 1);
+ /* restore position for next chunk */
+ log_status.pos = mem_pos;
+ }
+ }
+ /* Callbacks to clients have finished at this point. */
+ /* Write file header */
+ /* Move position to start of file */
+ log_status.pos = 0;
+ /* Write header */
+ memset(&sbl_header, 0, sizeof(sbl_header));
+ memcpy(sbl_header.magic, "SCSC", 4);
+ sbl_header.version_major = SCSC_LOG_HEADER_VERSION_MAJOR;
+ sbl_header.version_minor = SCSC_LOG_HEADER_VERSION_MINOR;
+ sbl_header.num_chunks = num_chunks;
+ sbl_header.trigger = reason;
+ sbl_header.reason_code = reason_code;
+ sbl_header.observer = log_status.observer_present;
+ sbl_header.offset_data = first_chunk_pos;
+ mxman_get_fw_version(version_fw, SCSC_LOG_FW_VERSION_SIZE);
+ memcpy(sbl_header.fw_version, version_fw, SCSC_LOG_FW_VERSION_SIZE);
+ mxman_get_driver_version(version_host, SCSC_LOG_HOST_VERSION_SIZE);
+ memcpy(sbl_header.host_version, version_host, SCSC_LOG_HOST_VERSION_SIZE);
+ memcpy(sbl_header.fapi_version, log_status.fapi_ver, SCSC_LOG_FAPI_VERSION_SIZE);
+
+ memset(sbl_header.supported_chunks, SCSC_LOG_CHUNK_INVALID, SCSC_SUPPORTED_CHUNKS_HEADER);
+ for (j = 0; j < SCSC_NUM_CHUNKS_SUPPORTED; j++)
+ sbl_header.supported_chunks[j] = chunk_supported_sbl[j];
+
+ scsc_log_collector_write((char *)&sbl_header, sizeof(struct scsc_log_sbl_header), 1);
+
+ if (buffer == TO_FILE) {
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(log_status.fp, 0);
+ if (ret < 0) {
+ pr_err("sync file error, error = %d\n", ret);
+ goto exit;
+ }
+ }
+
+ sbl_is_valid = true;
+exit:
+ if (buffer == TO_FILE) {
+ /* close file before return */
+ if (!IS_ERR(log_status.fp))
+ filp_close(log_status.fp, current->files);
+
+ /* restore previous address limit */
+ set_fs(old_fs);
+ }
+
+ log_status.in_collection = false;
+
+ list_for_each_entry_safe(lc, next, &scsc_log_collector_list.list, list) {
+ if (lc->collect_client && lc->collect_client->collect_end)
+ lc->collect_client->collect_end(lc->collect_client);
+ }
+
+ pr_info("Calling sable collection\n");
+
+#ifdef CONFIG_SCSC_WLBTD
+ if (sbl_is_valid)
+ call_wlbtd_sable((u8)reason, reason_code);
+#endif
+ pr_info("Log collection end. Took: %lld\n", ktime_to_ns(ktime_sub(ktime_get(), start)));
+
+ mutex_unlock(&log_mutex);
+
+ return ret;
+}
+
+static int scsc_log_collector_collect(enum scsc_log_reason reason, u16 reason_code)
+{
+ int ret = -1;
+
+ if (sable_collection_off) {
+ pr_info("Sable Log collection is currently DISABLED (sable_collection_off=Y).\n");
+ pr_info("Ignoring incoming Sable Collection request with Reason=%d.\n", reason);
+ return ret;
+ }
+
+ if (collect_to_ram)
+ ret = __scsc_log_collector_collect(reason, reason_code, TO_RAM);
+ else
+ ret = __scsc_log_collector_collect(reason, reason_code, TO_FILE);
+
+ return ret;
+}
+
+void scsc_log_collector_schedule_collection(enum scsc_log_reason reason, u16 reason_code)
+{
+
+ if (log_status.collection_workq) {
+ mutex_lock(&log_status.collection_serial);
+ pr_info("Log collection Schedule");
+
+ /* Serialize with previous work if the reason is a FW panic */
+ if (reason == SCSC_LOG_FW_PANIC)
+ flush_work(&log_status.collect_work);
+ else if (atomic_read(&in_collection)) {
+ pr_info("Log collection %s reason_code 0x%x rejected. Collection already scheduled\n",
+ scsc_get_trigger_str((int)reason), reason_code);
+ mutex_unlock(&log_status.collection_serial);
+ return;
+ }
+ log_status.collect_reason = reason;
+ log_status.reason_code = reason_code;
+ if (!queue_work(log_status.collection_workq, &log_status.collect_work)) {
+ pr_info("Log collection %s reason_code 0x%x queue_work error\n",
+ scsc_get_trigger_str((int)reason), reason_code);
+ mutex_unlock(&log_status.collection_serial);
+ return;
+ }
+ atomic_set(&in_collection, 1);
+ pr_info("Log collection Scheduled");
+
+ /* If dumping a FW panic (i.e. collecting a moredump), we need
+ * to wait for the collection to finish before returning.
+ */
+ if (reason == SCSC_LOG_FW_PANIC)
+ flush_work(&log_status.collect_work);
+
+ mutex_unlock(&log_status.collection_serial);
+
+ } else {
+ pr_err("Log Collection Workqueue NOT available...aborting scheduled collection.\n");
+ }
+}
+EXPORT_SYMBOL(scsc_log_collector_schedule_collection);
+
+void scsc_log_collector_write_fapi(char __user *buf, size_t len)
+{
+ if (len > SCSC_LOG_FAPI_VERSION_SIZE)
+ len = SCSC_LOG_FAPI_VERSION_SIZE;
+ memcpy(log_status.fapi_ver, buf, len);
+}
+EXPORT_SYMBOL(scsc_log_collector_write_fapi);
+
+void scsc_log_collector_is_observer(bool observer)
+{
+ log_status.observer_present = observer;
+}
+EXPORT_SYMBOL(scsc_log_collector_is_observer);
+
+MODULE_DESCRIPTION("SCSC Log collector");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <scsc/scsc_log_collector.h>
+#include "scsc_log_collector_mmap.h"
+
+#define DEVICE_NAME "scsc_log_collector"
+#define N_MINORS 1
+
+struct class *scsc_log_collector_class;
+struct cdev scsc_log_collector_dev[N_MINORS];
+dev_t dev_num;
+
+static int scsc_log_collector_mmap_open(struct inode *inode, struct file *filp)
+{
+ pr_info("scsc_log_collector_mmap_open\n");
+ return 0;
+}
+
+static int scsc_log_collector_release(struct inode *inode, struct file *filp)
+{
+ pr_info("scsc_log_collector_release\n");
+ return 0;
+}
+
+static int scsc_log_collector_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long start = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long page, pos;
+ unsigned char *buf;
+
+ if (size > SCSC_LOG_COLLECT_MAX_SIZE)
+ return -EINVAL;
+ if (offset > SCSC_LOG_COLLECT_MAX_SIZE - size)
+ return -EINVAL;
+
+ buf = scsc_log_collector_get_buffer();
+ if (!buf) {
+ pr_err("No buffer mapped\n");
+ return -ENOMEM;
+ }
+
+ pos = (unsigned long)buf + offset;
+
+ pr_info("scsc_log_collector_mmap size:%lu offset %ld\n", size, offset);
+
+ while (size > 0) {
+ page = vmalloc_to_pfn((void *)pos);
+ if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
+
+ return 0;
+}
+
+static const struct file_operations scsc_log_collector_mmap_fops = {
+ .owner = THIS_MODULE,
+ .open = scsc_log_collector_mmap_open,
+ .mmap = scsc_log_collector_mmap,
+ .release = scsc_log_collector_release,
+};
+
+int scsc_log_collector_mmap_create(void)
+{
+ struct device *dev;
+ int i;
+ int ret;
+ dev_t curr_dev;
+
+ /* Request the kernel for N_MINOR devices */
+ ret = alloc_chrdev_region(&dev_num, 0, N_MINORS, "scsc_log_collector");
+ if (ret) {
+ pr_err("alloc_chrdev_region failed");
+ goto error;
+ }
+
+ /* Create a class : appears at /sys/class */
+ scsc_log_collector_class = class_create(THIS_MODULE, "scsc_log_collector_class");
+ if (IS_ERR(scsc_log_collector_class)) {
+ ret = PTR_ERR(scsc_log_collector_class);
+ goto error_class;
+ }
+
+ /* Initialize and create each of the device(cdev) */
+ for (i = 0; i < N_MINORS; i++) {
+ /* Associate the cdev with a set of file_operations */
+ cdev_init(&scsc_log_collector_dev[i], &scsc_log_collector_mmap_fops);
+
+ ret = cdev_add(&scsc_log_collector_dev[i], dev_num, 1);
+ if (ret)
+ pr_err("cdev_add failed");
+
+ scsc_log_collector_dev[i].owner = THIS_MODULE;
+ /* Build up the current device number. To be used further */
+ dev = device_create(scsc_log_collector_class, NULL, dev_num, NULL, "scsc_log_collector_%d", i);
+ if (IS_ERR(dev)) {
+ pr_err("device_create failed");
+ ret = PTR_ERR(dev);
+ cdev_del(&scsc_log_collector_dev[i]);
+ continue;
+ }
+ curr_dev = MKDEV(MAJOR(dev_num), MINOR(dev_num) + i);
+ }
+
+ return 0;
+
+error_class:
+ unregister_chrdev_region(dev_num, N_MINORS);
+error:
+ return 0;
+}
+
+int scsc_log_collector_mmap_destroy(void)
+{
+ int i;
+
+ device_destroy(scsc_log_collector_class, dev_num);
+ for (i = 0; i < N_MINORS; i++)
+ cdev_del(&scsc_log_collector_dev[i]);
+ class_destroy(scsc_log_collector_class);
+ unregister_chrdev_region(dev_num, N_MINORS);
+ return 0;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef SCSC_LOG_COLLECT_MMAP_H
+#define SCSC_LOG_COLLECT_MMAP_H
+
+int scsc_log_collector_mmap_create(void);
+int scsc_log_collector_mmap_destroy(void);
+#endif /* SCSC_log_collect_MMAP_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_log_collector.h>
+#include "scsc_log_collector_proc.h"
+
+static struct proc_dir_entry *procfs_dir;
+
+static int log_collect_procfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = LOG_COLLECT_PDE_DATA(inode);
+ return 0;
+}
+
+LOG_COLLECT_PROCFS_RW_FILE_OPS(trigger_collection);
+
+static ssize_t log_collect_procfs_trigger_collection_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ /* Avoid unused parameter error */
+ (void)file;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "OK");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t log_collect_procfs_trigger_collection_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char val;
+
+ /* check that only one digit is passed */
+ if (count != 2)
+ pr_err("%s: Incorrect argument length\n", __func__);
+
+ if (copy_from_user(&val, user_buf, 1))
+ return -EFAULT;
+
+ if (val == '1') {
+ pr_info("%s: Userland has triggered log collection\n", __func__);
+ scsc_log_collector_schedule_collection(SCSC_LOG_USER, SCSC_LOG_USER_REASON_PROC);
+ } else if (val == '2') {
+ pr_info("%s: Dumpstate/dumpsys has triggered log collection\n", __func__);
+ scsc_log_collector_schedule_collection(SCSC_LOG_DUMPSTATE, SCSC_LOG_DUMPSTATE_REASON);
+ } else {
+ pr_err("%s: Incorrect argument\n", __func__);
+ }
+ return count;
+}
+
+static const char *procdir = "driver/scsc_log_collect";
+
+#define LOG_COLLECT_DIRLEN 128
+
+int scsc_log_collect_proc_create(void)
+{
+ char dir[LOG_COLLECT_DIRLEN];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ parent = proc_mkdir(dir, NULL);
+ if (parent) {
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = NULL;
+#endif
+ procfs_dir = parent;
+
+ LOG_COLLECT_PROCFS_ADD_FILE(NULL, trigger_collection, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ } else {
+ pr_err("failed to create /proc dir\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void scsc_log_collect_proc_remove(void)
+{
+ if (procfs_dir) {
+ char dir[LOG_COLLECT_DIRLEN];
+
+ LOG_COLLECT_PROCFS_REMOVE_FILE(trigger_collection, procfs_dir);
+ (void)snprintf(dir, sizeof(dir), "%s", procdir);
+ remove_proc_entry(dir, NULL);
+ procfs_dir = NULL;
+ }
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/*
+ * Chip Manager /proc interface
+ */
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/seq_file.h>
+
+#ifndef SCSC_LOG_COLLECT_PROC_H
+#define SCSC_LOG_COLLECT_PROC_H
+
+#ifndef AID_MX
+#define AID_MX 0444
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define LOG_COLLECT_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define LOG_COLLECT_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+#define LOG_COLLECT_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t log_collect_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t log_collect_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations log_collect_procfs_ ## name ## _fops = { \
+ .read = log_collect_procfs_ ## name ## _read, \
+ .write = log_collect_procfs_ ## name ## _write, \
+ .open = log_collect_procfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define LOG_COLLECT_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_MX); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_MX); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define LOG_COLLECT_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_MX; \
+ (entry)->gid = AID_MX; \
+ } while (0)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define LOG_COLLECT_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &log_collect_procfs_ ## name ## _fops, _sdev); \
+ LOG_COLLECT_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define LOG_COLLECT_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &log_collect_procfs_ ## name ## _fops; \
+ entry->data = _sdev; \
+ LOG_COLLECT_PROCFS_SET_UID_GID(entry); \
+ } \
+ } while (0)
+#endif
+
+#define LOG_COLLECT_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+int scsc_log_collect_proc_create(void);
+void scsc_log_collect_proc_remove(void);
+
+#endif /* SCSC_log_collect__PROC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_LOGRING_COMMON_H__
+#define __SCSC_LOGRING_COMMON_H__
+
+enum scsc_log_level {
+ SCSC_MIN_DBG = 0,
+ SCSC_EMERG = SCSC_MIN_DBG,
+ SCSC_ALERT,
+ SCSC_CRIT,
+ SCSC_ERR,
+ SCSC_WARNING,
+ SCSC_NOTICE,
+ SCSC_INFO,
+ SCSC_DEBUG,
+ SCSC_DBG1 = SCSC_DEBUG, /* 7 */
+ SCSC_DBG2,
+ SCSC_DBG3,
+ SCSC_DBG4, /* 10 */
+ SCSC_FULL_DEBUG
+};
+
+#define SCSC_SOH 0x01
+#define DEFAULT_DBGLEVEL SCSC_INFO /* KERN_INFO */
+#define DEFAULT_DROPLEVEL SCSC_FULL_DEBUG /* DBG4 + 1 */
+#define DEFAULT_ALL_DISABLED -1
+#define DEFAULT_DROP_ALL 0
+#define DEFAULT_REDIRECT_DROPLVL SCSC_DEBUG
+#define DEFAULT_NO_REDIRECT 0
+#define DEFAULT_TBUF_SZ 4096
+
+/**
+ * Nested macros needed to force expansion of 'defval'
+ * before stringification takes place. Allows for ONE level
+ * of indirection specifying params.
+ */
+#define SCSC_MODPARAM_DESC(kparam, descr, eff, defval) \
+ __SCSC_MODPARAM_DESC(kparam, descr, eff, defval)
+
+#define __SCSC_MODPARAM_DESC(kparam, descr, eff, defval) \
+ MODULE_PARM_DESC(kparam, " "descr " Effective @"eff " default=" # defval ".")
+
+
+#endif /* __SCSC_LOGRING_COMMON_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2016-2017 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+
+#include <scsc/scsc_mx.h>
+#include "scsc_logring_main.h"
+#include "scsc_logring_debugfs.h"
+
+static int scsc_max_records_per_read = SCSC_DEFAULT_MAX_RECORDS_PER_READ;
+module_param(scsc_max_records_per_read, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_max_records_per_read,
+ "Number of records a reader can try to get in a shot. 0 is infinite",
+ "run-time", SCSC_DEFAULT_MAX_RECORDS_PER_READ);
+
+static int scsc_double_buffer_sz = DEFAULT_TBUF_SZ;
+module_param(scsc_double_buffer_sz, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_double_buffer_sz,
+ "Determines the size of the per-reader allocted double buffer.",
+ "run-time", DEFAULT_TBUF_SZ);
+
+/**
+ * BIG NOTE on DOUBLE BUFFERING.
+ *
+ * In order to extract data from the ring buffer, protected by spinlocks,
+ * to user space we use a double buffer: data is so finally copied to
+ * userspace from a temporary double buffer, after having copied into it
+ * ALL the desired content and after all the spinlocks have been released.
+ * In order to avoid use of an additional mutex to protect such temporary
+ * buffer from multiple readers access we use a oneshot throwaway buffer
+ * dedicated to each reader and allocated at opening time.
+ * The most straightforward way to do this thing would have been to simply
+ * allocate such buffer inside the read method and throw it away on exit:
+ * this is what underlying printk mechanism does via a simple kmalloc.
+ * BUT we decided INSTEAD to use this buffer ALSO as a sort of caching
+ * area for each reader in order to cope with under-sized user read-request;
+ * basically no matter what the user has asked in term of size of the read
+ * request we'll ALWAYS RETRIEVE multiple of whole records from the ring,
+ * one record being the minimum internal ring-read-request this way.
+ * So no matter if the user ask for a few bytes, less than the next record
+ * size, we'll retrieve ONE WHOLE record from the ring into the double buffer:
+ * this way on the next read request we'll have already a cached copy of the
+ * record and we could deal with it inside the read callback without the
+ * need to access the ring anymore for such record.
+ * The main reason for this is that if we had instead accessed the ring and
+ * retrieved ONLY a fraction of the record, on the next request we could NOT
+ * be able to provide the remaining part of the record because, being the ring
+ * an overwriting buffer, it could have wrap in the meantime and we could have
+ * simply lost that data: this condition would have lead us to return to
+ * user partial truncated records when we hit this overwrap condition.
+ * Following instead the approach of WHOLE records retrieval we can instead be
+ * sure to always retrieve fully correct records, despite being vulnerable
+ * anyway to loss of data (whole records) while reading if fast writers
+ * overwrite our data. (since we'll never ever want to slow down and starve a
+ * writer.)
+ */
+static struct dentry *scsc_debugfs_root;
+static atomic_t scsc_debugfs_root_refcnt;
+static char *global_fmt_string = "%s";
+
+/**
+ * Generic open/close calls to use with every logring debugfs file.
+ * Any file in debugfs has an underlying associated ring buffer:
+ * opening ANY of these with O_TRUNC leads to ring_buffer truncated
+ * to zero len.
+ */
+static int debugfile_open(struct inode *ino, struct file *filp)
+{
+ struct scsc_ibox *i = NULL;
+
+ if (!filp->private_data) {
+ i = kzalloc(sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return -EFAULT;
+ i->rb = ino->i_private;
+ filp->private_data = i;
+ } else {
+ i = filp->private_data;
+ }
+ /* tbuf sz is now runtime-configurable so try a few fallback methods */
+ i->tbuf = kmalloc(scsc_double_buffer_sz, GFP_KERNEL);
+ /* Making sure we fallback to a safe size DEFAULT_TBUF_SZ */
+ if (!i->tbuf) {
+ i->tbuf = vmalloc(scsc_double_buffer_sz);
+ pr_err("LogRing: FAILED tbuf allocation of %d bytes...retried vmalloc()...\n",
+ scsc_double_buffer_sz);
+ if (!i->tbuf) {
+ scsc_double_buffer_sz = DEFAULT_TBUF_SZ;
+ pr_err("LogRing: FAILED tbuf vmalloc...using DEFAULT %d bytes size.\n",
+ scsc_double_buffer_sz);
+ i->tbuf = kmalloc(scsc_double_buffer_sz, GFP_KERNEL);
+ if (!i->tbuf) {
+ pr_err("LogRing: FAILED DEFINITELY allocation...aborting\n");
+ kfree(i);
+ return -ENOMEM;
+ }
+ } else {
+ i->tbuf_vm = true;
+ }
+ }
+ i->tsz = scsc_double_buffer_sz;
+ pr_info("LogRing: Allocated per-reader tbuf of %d bytes\n",
+ scsc_double_buffer_sz);
+ /* Truncate when attempting to write RO files samlog and samsg */
+ if (filp->f_flags & (O_WRONLY | O_RDWR) &&
+ filp->f_flags & O_TRUNC) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ scsc_ring_truncate(i->rb);
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ pr_info("LogRing Truncated to zerolen\n");
+ return -EACCES;
+ }
+ return 0;
+}
+
+static int debugfile_release(struct inode *ino, struct file *filp)
+{
+ struct scsc_ibox *i = NULL;
+
+ if (!filp->private_data)
+ return -EFAULT;
+ i = filp->private_data;
+ if (!i->tbuf_vm)
+ kfree(i->tbuf);
+ else
+ vfree(i->tbuf);
+ i->tbuf = NULL;
+
+ /* Were we using a snapshot ? Free it.*/
+ if (i->saved_live_rb) {
+ vfree(i->rb->buf);
+ kfree(i->rb);
+ }
+ /* Being paranoid... */
+ filp->private_data = NULL;
+ kfree(i);
+ return 0;
+}
+
+/**
+ * Initialize references for subsequent cached reads: in fact if
+ * data retrieved from the ring was more than the count-bytes required by
+ * the caller of this read, we can keep such data stored in tbuf and provide
+ * it to this same reader on its next read-call.
+ *
+ * @i: contains references useful to this reader
+ * @retrieved_bytes: how many bytes have been stored in tbuf
+ * @count: a pointer to the count bytes required by this reader
+ * for this call. We'll manipulate this to return an
+ * appropriate number of bytes.
+ */
+static inline
+size_t init_cached_read(struct scsc_ibox *i,
+ size_t retrieved_bytes, size_t *count)
+{
+ if (retrieved_bytes <= *count) {
+ *count = retrieved_bytes;
+ } else {
+ i->t_off = *count;
+ i->t_used = retrieved_bytes - *count;
+ i->cached_reads += *count;
+ }
+
+ return 0;
+}
+
+/**
+ * Here we'll serve to user space the next available chunk of
+ * record directly from the tbuf double buffer without
+ * accessing the ring anymore.
+ *
+ * @i: contains references useful to this reader
+ * @count: a pointer to the count bytes required by this reader
+ * for this call. We'll manipulate this to return an
+ * appropriate number of bytes.
+ */
+static inline
+size_t process_cached_read_data(struct scsc_ibox *i, size_t *count)
+{
+ size_t offset = 0;
+
+ offset = i->t_off;
+ if (i->t_used <= *count) {
+ /* this was the last chunk cached */
+ *count = i->t_used;
+ i->t_off = 0;
+ i->t_used = 0;
+ } else {
+ i->t_off += *count;
+ i->t_used -= *count;
+ i->cached_reads += *count;
+ }
+
+ return offset;
+}
+
+/**
+ * This file operation read from the ring using common routines, starting its
+ * read from head: in other words it immediately blocks waiting for some data to
+ * arrive. As soon as some data arrives and head moves away, the freshly
+ * available data is returned to userspace up to the required size , and this
+ * call goes back to sleeping waiting for more data.
+ *
+ * NOTE
+ * ----
+ * The need to copy_to_user imposes the use of a temp buffer tbuf which is used
+ * as a double buffer: being allocated to this reader on open() we do NOT need
+ * any additional form of mutual exclusion.
+ * Moreover we use such buffer here as an area to cache the retrieved records:
+ * if the retrieved record size is bigger than the count bytes required by user
+ * we'll return less data at first and then deal with the following requests
+ * pumping data directly from the double buffer without accessing the ring.
+ */
+static ssize_t samsg_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ unsigned long flags;
+ loff_t current_head = 0;
+ struct scsc_ibox *i = NULL;
+ size_t off = 0;
+ size_t retrieved_bytes = 0;
+
+ if (!filp->private_data || !access_ok(VERIFY_WRITE, ubuf, count))
+ return -ENOMEM;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ /* open() assures us that this private data is certainly non-NULL */
+ i = filp->private_data;
+ if (!i->t_used) {
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ current_head = *f_pos ? i->f_pos : i->rb->head;
+ while (current_head == i->rb->head) {
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ if (wait_event_interruptible(i->rb->wq,
+ current_head != i->rb->head))
+ return -ERESTARTSYS;
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ }
+ retrieved_bytes = read_next_records(i->rb,
+ scsc_max_records_per_read,
+ ¤t_head, i->tbuf, i->tsz);
+ /* We MUST keep track of the the last known READ record
+ * in order to keep going from the same place on the next
+ * read call coming from the same userspace process...
+ * ...this could NOT necessarily be the HEAD at the end of this
+ * read if we asked for few records.
+ * So we must annotate the really last read record got back,
+ * returned in current_head, inside i->f_pos in order to have a
+ * reference for the next read call by the same reader.
+ */
+ i->f_pos = current_head;
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ /* ANYWAY we could have got back more data from the ring (ONLY
+ * multiple of whole records) than required by usersapce.
+ */
+ off = init_cached_read(i, retrieved_bytes, &count);
+ } else {
+ /* Serve this read-request directly from cached data without
+ * accessing the ring
+ */
+ off = process_cached_read_data(i, &count);
+ }
+ if (copy_to_user(ubuf, i->tbuf + off, count))
+ return -EFAULT;
+ *f_pos += count;
+ return count;
+}
+
+/**
+ * This seek op assumes let userspace believe that it's dealing with a regular
+ * plain file, so f_pos is modified accordingly (linearly till the maximum
+ * number SCSC_LOGGED_BYTES is reached); in fact it's up to
+ * the read/write ops to properly 'cast' this value to a modulus value as
+ * required by the underlying ring buffer. This operates only on samlog.
+ */
+loff_t debugfile_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos, maxpos;
+ struct scsc_ibox *i = NULL;
+ unsigned long flags;
+
+ if (!filp->private_data)
+ return -EFAULT;
+ i = filp->private_data;
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ maxpos = SCSC_LOGGED_BYTES(i->rb) >= 1 ?
+ SCSC_LOGGED_BYTES(i->rb) - 1 : 0;
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = (off <= maxpos) ? off : maxpos;
+ break;
+ case 1: /* SEEK_CUR */
+ newpos = (filp->f_pos + off <= maxpos) ?
+ filp->f_pos + off : maxpos;
+ break;
+ case 2: /* SEEK_END */
+ newpos = maxpos;
+ break;
+ default: /* can't happen */
+ return -EINVAL;
+ }
+ if (newpos < 0)
+ return -EINVAL;
+ filp->f_pos = newpos;
+ return newpos;
+}
+
+static int samsg_open(struct inode *ino, struct file *filp)
+{
+ int ret;
+
+ ret = debugfile_open(ino, filp);
+#ifdef CONFIG_SCSC_MXLOGGER
+ if (!ret)
+ scsc_service_register_observer(NULL, "LOGRING");
+#endif
+ return ret;
+}
+
+static int samsg_release(struct inode *ino, struct file *filp)
+{
+#ifdef CONFIG_SCSC_MXLOGGER
+ scsc_service_unregister_observer(NULL, "LOGRING");
+#endif
+
+ return debugfile_release(ino, filp);
+}
+
+const struct file_operations samsg_fops = {
+ .owner = THIS_MODULE,
+ .open = samsg_open,
+ .read = samsg_read,
+ .release = samsg_release,
+};
+
+/**
+ * This is the samlog open and it is used by samlog read-process to grab
+ * a per-reader dedicated static snapshot of the ring, in order to be able
+ * then to fetch records from a static immutable image of the ring buffer,
+ * without the need to stop the ring in the meantime.
+ * This way samlog dumps exactly a snapshot at-a-point-in-time of the ring
+ * limiting at the same time the contention with the writers: ring is
+ * 'spinlocked' ONLY durng the snapshot phase.
+ * Being the snapshot buffer big as the ring we use a vmalloc to limit
+ * possibility of failures (especially on non-AOSP builds).
+ * If such vmalloc allocation fails we then quietly keep on using the old
+ * method that reads directly from the live buffer.
+ */
+static int debugfile_open_snapshot(struct inode *ino, struct file *filp)
+{
+ int ret;
+
+ ret = debugfile_open(ino, filp);
+ /* if regular debug_file_open has gone through, attempt snapshot */
+ if (!ret) {
+ /* filp && filp->private_data NON-NULL by debugfile_open */
+ void *snap_buf;
+ size_t snap_sz;
+ struct scsc_ibox *i = filp->private_data;
+
+ /* This is read-only...no spinlocking needed */
+ snap_sz = i->rb->bsz + i->rb->ssz;
+ /* Allocate here to minimize lock time... */
+ snap_buf = vmalloc(snap_sz);
+ if (snap_buf) {
+ struct scsc_ring_buffer *snap_rb;
+ char snap_name[RNAME_SZ] = "snapshot";
+ unsigned long flags;
+
+ snprintf(snap_name, RNAME_SZ, "%s_snap", i->rb->name);
+ /* lock while snapshot is taken */
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ snap_rb = scsc_ring_get_snapshot(i->rb, snap_buf, snap_sz, snap_name);
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ if (snap_rb) {
+ /* save real ring and swap into the snap_shot */
+ i->saved_live_rb = i->rb;
+ i->rb = snap_rb;
+ } else {
+ vfree(snap_buf);
+ snap_buf = NULL;
+ }
+ }
+
+ /* Warns when not possible to use a snapshot */
+ if (!snap_buf)
+ pr_warn("LogRing: no snapshot available, samlog dump from live ring.\n");
+ }
+
+ return ret;
+}
+
+/**
+ * samlog_read - Reads from the ring buffer the required number of bytes
+ * starting from the start of the ring. It is usually used to dump the
+ * whole ring buffer taking a snapshot at-a-point-in-time.
+ *
+ * If it had been possible at opening time to take a static snapshot of
+ * the ring, this routine will fetch records from such a snapshot without
+ * the need to lock the ring; if instead no snapshot was taken it reverts
+ * to the usual locked-access pattern.
+ *
+ * This function as a usual .read fops returns the number of bytes
+ * effectively read, and this could:
+ * - equal the required count bytes
+ * - be less than the required bytes if: less data WAS available
+ * (since we only GOT whole records at time from the ring)
+ * Returning less bytes usually triggers the userapp to reissue the syscall
+ * to complete the read up to the originaly required number of bytes.
+ * - be ZERO if NO more data available..this causes the reading userspace
+ * process to stop reading usually.
+ */
+static ssize_t samlog_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ struct scsc_ibox *i = NULL;
+ size_t off = 0, retrieved_bytes = 0;
+
+ if (!filp->private_data)
+ return -EFAULT;
+ i = filp->private_data;
+ if (!i->t_used) {
+ unsigned long flags;
+
+ /* Lock ONLY if NOT using a snapshot */
+ if (!i->saved_live_rb)
+ raw_spin_lock_irqsave(&i->rb->lock, flags);
+ /* On first read from userspace f_pos will be ZERO and in this
+ * case we'll want to trigger a read from the very beginning of
+ * ring (tail) and set i->f_pos accordingly.
+ * Internal RING API returns in i->f_pos the next record to
+ * read: when reading process has wrapped over you'll get back
+ * an f_pos ZERO as next read.
+ */
+ if (*f_pos == 0)
+ i->f_pos = i->rb->tail;
+ retrieved_bytes = read_next_records(i->rb,
+ scsc_max_records_per_read,
+ &i->f_pos, i->tbuf, i->tsz);
+ if (!i->saved_live_rb)
+ raw_spin_unlock_irqrestore(&i->rb->lock, flags);
+ /* ANYWAY we could have got back more data from the ring (ONLY
+ * multiple of whole records) than required by userspace.
+ */
+ off = init_cached_read(i, retrieved_bytes, &count);
+ } else {
+ /* Serve this read-request directly from cached data without
+ * accessing the ring
+ */
+ off = process_cached_read_data(i, &count);
+ }
+ if (copy_to_user(ubuf, i->tbuf + off, count))
+ return -EFAULT;
+ *f_pos += count;
+ return count;
+}
+
+const struct file_operations samlog_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfile_open_snapshot,
+ .read = samlog_read,
+ .llseek = debugfile_llseek,
+ .release = debugfile_release,
+};
+
+static int statfile_open(struct inode *ino, struct file *filp)
+{
+ if (!filp->private_data)
+ filp->private_data = ino->i_private;
+ if (!filp->private_data)
+ return -EFAULT;
+ return 0;
+}
+
+static int statfile_release(struct inode *ino, struct file *filp)
+{
+ if (!filp->private_data)
+ filp->private_data = ino->i_private;
+ if (!filp->private_data)
+ return -EFAULT;
+ return 0;
+}
+
+
+/* A simple read to dump some stats about the ring buffer. */
+static ssize_t statfile_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ unsigned long flags;
+ size_t bsz = 0;
+ loff_t head = 0, tail = 0, used = 0, max_chunk = 0, logged = 0,
+ last = 0;
+ int slen = 0, records = 0, wraps = 0, oos = 0;
+ u64 written = 0;
+ char statstr[STATSTR_SZ] = {};
+ struct scsc_ring_buffer *rb = filp->private_data;
+
+ raw_spin_lock_irqsave(&rb->lock, flags);
+ bsz = rb->bsz;
+ head = rb->head;
+ tail = rb->tail;
+ last = rb->last;
+ written = rb->written;
+ records = rb->records;
+ wraps = rb->wraps;
+ oos = rb->oos;
+ used = SCSC_USED_BYTES(rb);
+ max_chunk = SCSC_RING_FREE_BYTES(rb);
+ logged = SCSC_LOGGED_BYTES(rb);
+ raw_spin_unlock_irqrestore(&rb->lock, flags);
+
+ slen = snprintf(statstr, STATSTR_SZ,
+ "sz:%zd used:%lld free:%lld logged:%lld records:%d\nhead:%lld tail:%lld last:%lld written:%lld wraps:%d oos:%d\n",
+ bsz, used, max_chunk, logged, records,
+ head, tail, last, written, wraps, oos);
+ if (slen >= 0 && *f_pos < slen) {
+ count = (count <= slen - *f_pos) ? count : (slen - *f_pos);
+ if (copy_to_user(ubuf, statstr + *f_pos, count))
+ return -EFAULT;
+ *f_pos += count;
+ } else
+ count = 0;
+ return count;
+}
+
+const struct file_operations stat_fops = {
+ .owner = THIS_MODULE,
+ .open = statfile_open,
+ .read = statfile_read,
+ .release = statfile_release,
+};
+
+/**
+ * This implement samwrite interface to INJECT log lines into the ring from
+ * user space. The support, thought as an aid for testing mainly, is
+ * minimal, so the interface allows only for simple %s format string injection.
+ */
+static int samwritefile_open(struct inode *ino, struct file *filp)
+{
+ if (!filp->private_data) {
+ struct write_config *wc =
+ kzalloc(sizeof(struct write_config), GFP_KERNEL);
+ if (wc) {
+ wc->fmt = global_fmt_string;
+ wc->buf_sz = SAMWRITE_BUFSZ;
+ }
+ filp->private_data = wc;
+ }
+ if (!filp->private_data)
+ return -EFAULT;
+ return 0;
+}
+
+
+static int samwritefile_release(struct inode *ino, struct file *filp)
+{
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/**
+ * User injected string content is pushed to the ring as simple %s fmt string
+ * content using the TEST_ME tag. Default debuglevel (6 - INFO)will be used.
+ */
+static ssize_t samwritefile_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t written_bytes = 0;
+ struct write_config *wc = filp->private_data;
+
+ if (wc) {
+ /* wc->buf is null terminated as it's kzalloc'ed */
+ count = count < wc->buf_sz ? count : wc->buf_sz - 1;
+ if (copy_from_user(wc->buf, ubuf, count))
+ return -EINVAL;
+ written_bytes = scsc_printk_tag(NO_ECHO_PRK, TEST_ME,
+ wc->fmt, wc->buf);
+ /* Handle the case where the message is filtered out by
+ * droplevel filters...zero is returned BUT we do NOT want
+ * the applications to keep trying...it's NOT a transient
+ * error...at least till someone changes droplevels.
+ */
+ if (!written_bytes) {
+ pr_info("samwrite wrote 0 bytes...droplevels filtering ?\n");
+ return -EPERM;
+ }
+ /* Returned written bytes should be normalized since
+ * lower level functions returns the number of bytes
+ * effectively written including the prepended header
+ * file... IF, when required to write n, we return n+X,
+ * some applications could behave badly trying to access
+ * file at *fpos=n+X next time, ending up in a regular
+ * EFAULT error anyway.
+ */
+ if (written_bytes > count)
+ written_bytes = count;
+ *f_pos += written_bytes;
+ }
+
+ return written_bytes;
+}
+
+const struct file_operations samwrite_fops = {
+ .owner = THIS_MODULE,
+ .open = samwritefile_open,
+ .write = samwritefile_write,
+ .release = samwritefile_release,
+};
+
+/**
+ * Initializes debugfs support build the proper debugfs file dentries:
+ * - entries in debugfs are created under /sys/kernel/debugfs/scsc/@name/
+ * - using the provided rb ring buffer as underlying ring buffer, storing it
+ * into inode ptr for future retrieval (via open)
+ * - registers the proper fops
+ */
+void __init *samlog_debugfs_init(const char *root_name, void *rb)
+{
+ struct scsc_debugfs_info *di = NULL;
+
+ if (!rb || !root_name)
+ return NULL;
+ di = kmalloc(sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return NULL;
+ if (!scsc_debugfs_root) {
+ /* I could have multiple rings debugfs entry all rooted at
+ * the same /sys/kernel/debug/scsc/...so such entry could
+ * already exist.
+ */
+ scsc_debugfs_root = debugfs_create_dir(SCSC_DEBUGFS_ROOT, NULL);
+ if (!scsc_debugfs_root)
+ goto no_root;
+ }
+ di->rootdir = scsc_debugfs_root;
+ di->bufdir = debugfs_create_dir(root_name, di->rootdir);
+ if (!di->bufdir)
+ goto no_buf;
+ atomic_inc(&scsc_debugfs_root_refcnt);
+ /* Saving ring ref @rb to Inode */
+ di->samsgfile = debugfs_create_file(SCSC_SAMSG_FNAME, 0444,
+ di->bufdir, rb, &samsg_fops);
+ if (!di->samsgfile)
+ goto no_samsg;
+ /* Saving ring ref @rb to Inode */
+ di->samlogfile = debugfs_create_file(SCSC_SAMLOG_FNAME, 0444,
+ di->bufdir, rb, &samlog_fops);
+ if (!di->samlogfile)
+ goto no_samlog;
+ di->statfile = debugfs_create_file(SCSC_STAT_FNAME, 0444,
+ di->bufdir, rb, &stat_fops);
+ if (!di->statfile)
+ goto no_statfile;
+
+ di->samwritefile = debugfs_create_file(SCSC_SAMWRITE_FNAME, 0220,
+ di->bufdir, NULL,
+ &samwrite_fops);
+ if (!di->samwritefile)
+ goto no_samwrite;
+
+ pr_info("Samlog Debugfs Initialized\n");
+ return di;
+
+no_samwrite:
+ debugfs_remove(di->statfile);
+no_statfile:
+ debugfs_remove(di->samlogfile);
+no_samlog:
+ debugfs_remove(di->samsgfile);
+no_samsg:
+ debugfs_remove(di->bufdir);
+ atomic_dec(&scsc_debugfs_root_refcnt);
+no_buf:
+ if (!atomic_read(&scsc_debugfs_root_refcnt)) {
+ debugfs_remove(scsc_debugfs_root);
+ scsc_debugfs_root = NULL;
+ }
+no_root:
+ kfree(di);
+ return NULL;
+}
+
+void __exit samlog_debugfs_exit(void **priv)
+{
+ struct scsc_debugfs_info **di = NULL;
+
+ if (!priv)
+ return;
+ di = (struct scsc_debugfs_info **)priv;
+ if (di && *di) {
+ debugfs_remove_recursive(scsc_debugfs_root);
+ atomic_dec(&scsc_debugfs_root_refcnt);
+ if (!atomic_read(&scsc_debugfs_root_refcnt)) {
+ debugfs_remove(scsc_debugfs_root);
+ scsc_debugfs_root = NULL;
+ }
+ kfree(*di);
+ *di = NULL;
+ }
+ pr_info("Debugfs Cleaned Up\n");
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2016-2017 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+
+#ifndef _SCSC_LOGRING_DEBUGFS_H_
+#define _SCSC_LOGRING_DEBUGFS_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <scsc/scsc_logring.h>
+#include "scsc_logring_ring.h"
+
+#define STATSTR_SZ 256
+#define SCSC_DEBUGFS_ROOT "scsc"
+#define SCSC_SAMSG_FNAME "samsg"
+#define SCSC_SAMLOG_FNAME "samlog"
+#define SCSC_STAT_FNAME "stat"
+#define SCSC_SAMWRITE_FNAME "samwrite"
+
+#define SAMWRITE_BUFSZ 2048
+#define SCSC_DEFAULT_MAX_RECORDS_PER_READ 1
+
+struct scsc_ibox {
+ struct scsc_ring_buffer *rb;
+ char *tbuf;
+ size_t tsz;
+ bool tbuf_vm;
+ size_t t_off;
+ size_t t_used;
+ size_t cached_reads;
+ loff_t f_pos;
+ struct scsc_ring_buffer *saved_live_rb;
+};
+
+struct scsc_debugfs_info {
+ struct dentry *rootdir;
+ struct dentry *bufdir;
+ struct dentry *samsgfile;
+ struct dentry *samlogfile;
+ struct dentry *statfile;
+ struct dentry *samwritefile;
+};
+
+struct write_config {
+ char *fmt;
+ size_t buf_sz;
+ char buf[SAMWRITE_BUFSZ];
+};
+
+void *samlog_debugfs_init(const char *name, void *rb) __init;
+void samlog_debugfs_exit(void **priv) __exit;
+
+#endif /* _SCSC_LOGRING_DEBUGFS_H_ */
--- /dev/null
+/********************************************************************************
+ *
+ * Copyright (c) 2016 - 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ********************************************************************************/
+#include "scsc_logring_main.h"
+#include "scsc_logring_ring.h"
+#include "scsc_logring_debugfs.h"
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+/* Global module parameters */
+static int cached_enable;
+static int enable = DEFAULT_ENABLE_LOGRING;
+static bool initialized;
+#ifndef CONFIG_SCSC_STATIC_RING_SIZE
+static int ringsize = DEFAULT_RING_BUFFER_SZ;
+#else
+static int ringsize = CONFIG_SCSC_STATIC_RING_SIZE;
+#endif
+static int prepend_header = DEFAULT_ENABLE_HEADER;
+static int default_dbglevel = DEFAULT_DBGLEVEL;
+static int scsc_droplevel_wlbt = DEFAULT_DROPLEVEL;
+static int scsc_droplevel_all = DEFAULT_ALL_DISABLED;
+static int scsc_droplevel_atomic = DEFAULT_DROPLEVEL;
+static int scsc_redirect_to_printk_droplvl = DEFAULT_REDIRECT_DROPLVL;
+static int scsc_reset_all_droplevels_to;
+
+struct scsc_ring_buffer *the_ringbuf;
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int logring_collect(struct scsc_log_collector_client *collect_client, size_t size);
+
+struct scsc_log_collector_client logring_collect_client = {
+ .name = "Logring",
+ .type = SCSC_LOG_CHUNK_LOGRING,
+ .collect_init = NULL,
+ .collect = logring_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+#endif
+
+/* Module init and ring buffer allocation */
+int __init samlog_init(void)
+{
+ struct scsc_ring_buffer *rb = NULL;
+
+ pr_debug("Samlog Init\n");
+ if (!enable) {
+ pr_info("Samlog: module disabled...NOT starting.\n");
+ return 0;
+ }
+ if (the_ringbuf != NULL) {
+ pr_info("Samlog: Ring:%s already initialized...skipping.\n",
+ the_ringbuf->name);
+ return 0;
+ }
+ /* Check for power of two compliance with std Kernel func */
+ if (!is_power_of_2(ringsize)) {
+ ringsize = DEFAULT_RING_BUFFER_SZ;
+ pr_info("Samlog: scsc_logring.ringsize MUST be power-of-two. Using default: %d\n",
+ ringsize);
+ }
+ rb = alloc_ring_buffer(ringsize, BASE_SPARE_SZ, DEBUGFS_RING0_ROOT);
+
+ if (!rb)
+ goto tfail;
+ rb->private = samlog_debugfs_init(rb->name, rb);
+ if (!rb->private)
+ pr_info("Samlog: Cannot Initialize DebugFS.\n");
+#ifndef CONFIG_SCSC_STATIC_RING_SIZE
+ pr_info("scsc_logring:: Allocated ring buffer of size %zd bytes.\n",
+ rb->bsz);
+#else
+ pr_info("scsc_logring:: Allocated STATIC ring buffer of size %zd bytes.\n",
+ rb->bsz);
+#endif
+ the_ringbuf = rb;
+ initialized = true;
+ pr_info("Samlog Loaded.\n");
+ scsc_printk_tag(FORCE_PRK, NO_TAG, "Samlog Started.\n");
+ scsc_printk_tag(NO_ECHO_PRK, NO_TAG,
+ "Allocated ring buffer of size %zd bytes at %p - %p\n",
+ rb->bsz, virt_to_phys((const volatile void *)rb->buf),
+ virt_to_phys((const volatile void *)(rb->buf + rb->bsz)));
+ scsc_printk_tag(NO_ECHO_PRK, NO_TAG,
+ "Using THROWAWAY DYNAMIC per-reader buffer.\n");
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_register_client(&logring_collect_client);
+#endif
+ return 0;
+
+tfail:
+ pr_err("Samlog Initialization Failed. LogRing disabled.\n");
+ return -ENODEV;
+}
+
+void __exit samlog_exit(void)
+{
+ if (!the_ringbuf) {
+ pr_err("Cannot UNLOAD ringbuf\n");
+ return;
+ }
+ if (the_ringbuf && the_ringbuf->private)
+ samlog_debugfs_exit(&the_ringbuf->private);
+ initialized = false;
+ free_ring_buffer(the_ringbuf);
+ the_ringbuf = NULL;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_unregister_client(&logring_collect_client);
+#endif
+ pr_info("Samlog Unloaded\n");
+}
+
+module_init(samlog_init);
+module_exit(samlog_exit);
+
+module_param(enable, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(enable, "Enable/Disable scsc_logring as a whole.",
+ "load-time", DEFAULT_ENABLE_LOGRING);
+
+#ifndef CONFIG_SCSC_STATIC_RING_SIZE
+module_param(ringsize, int, S_IRUGO);
+SCSC_MODPARAM_DESC(ringsize,
+ "Ring buffer size. Available ONLY if ring is NOT statically allocated.",
+ "run-time", DEFAULT_RING_BUFFER_SZ);
+#endif
+
+module_param(prepend_header, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(prepend_header, "Enable/disable header prepending. ",
+ "run-time", DEFAULT_ENABLE_HEADER);
+
+module_param(default_dbglevel, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(default_dbglevel,
+ "The default debug level assigned to messages when NOT explicitly specified.",
+ "run-time", DEFAULT_DBGLEVEL);
+
+module_param(scsc_droplevel_wlbt, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_droplevel_wlbt,
+ "Droplevels for the 'no_tag/wlbt' tag family.", "run-time",
+ DEFAULT_DROP_ALL);
+
+module_param(scsc_droplevel_all, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_droplevel_all,
+ "This droplevel overrides any other, if set to a value >= 0",
+ "run-time", DEFAULT_ALL_DISABLED);
+
+module_param(scsc_redirect_to_printk_droplvl, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_redirect_to_printk_droplvl,
+ "Echoing messages up to the specified loglevel also to kernel standard ring buffer.",
+ "run-time", DEFAULT_REDIRECT_DROPLVL);
+
+module_param(scsc_droplevel_atomic, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_droplevel_atomic,
+ "This droplevel is applied to logmsg emitted in atomic context.",
+ "run-time", DEFAULT_KEEP_ALL);
+
+/**
+ * This macro code has been freely 'inspired' (read copied) from the
+ * slsi_ original/old debug.c implementaion: it takes care to register
+ * a new TAG with the debug subsystem including a module param to
+ * dynamically configure the droplevel for the specified tag at runtime.
+ *
+ * ALL debug is ON by default at FULL DEBUG LEVEL.
+ * There are THREE intended exceptions to this that instead stick to
+ * level 7: PLAT_MIF MBULK and MXLOG_TRANS tags.
+ *
+ * NOTE_CREATING_TAGS: when adding a tag here REMEMBER to add it also
+ * where required, taking care to maintain the same ordering.
+ * (Search 4 NOTE_CREATING_TAGS)
+ */
+
+/**
+ * This macros define module parameters used to configure per-subsystem
+ * filtering, and assign a default DROPLEVEL.
+ *
+ * NOTE THAT the default DROPLEVEL indicates the default value FROM which
+ * the system will start to discard messages, so as an example:
+ *
+ * - if set to SCSC_DBG1 (7) every message with a dbglvl >= 7 will be discarded
+ * - if set to SCSC_FULL_DEBUG (11) every message is logged
+ *
+ * Name, Default DROPLEVEL , FilterTag
+ */
+ADD_DEBUG_MODULE_PARAM(binary, SCSC_FULL_DEBUG, BINARY);
+ADD_DEBUG_MODULE_PARAM(bin_wifi_ctrl_rx, SCSC_FULL_DEBUG, BIN_WIFI_CTRL_RX);
+ADD_DEBUG_MODULE_PARAM(bin_wifi_data_rx, SCSC_FULL_DEBUG, BIN_WIFI_DATA_RX);
+ADD_DEBUG_MODULE_PARAM(bin_wifi_ctrl_tx, SCSC_FULL_DEBUG, BIN_WIFI_CTRL_TX);
+ADD_DEBUG_MODULE_PARAM(bin_wifi_data_tx, SCSC_FULL_DEBUG, BIN_WIFI_DATA_TX);
+ADD_DEBUG_MODULE_PARAM(wifi_rx, SCSC_FULL_DEBUG, WIFI_RX);
+ADD_DEBUG_MODULE_PARAM(wifi_tx, SCSC_FULL_DEBUG, WIFI_TX);
+ADD_DEBUG_MODULE_PARAM(bt_common, SCSC_FULL_DEBUG, BT_COMMON);
+ADD_DEBUG_MODULE_PARAM(bt_h4, SCSC_FULL_DEBUG, BT_H4);
+ADD_DEBUG_MODULE_PARAM(bt_fw, SCSC_FULL_DEBUG, BT_FW);
+ADD_DEBUG_MODULE_PARAM(bt_rx, SCSC_FULL_DEBUG, BT_RX);
+ADD_DEBUG_MODULE_PARAM(bt_tx, SCSC_FULL_DEBUG, BT_TX);
+ADD_DEBUG_MODULE_PARAM(cpktbuff, SCSC_DBG4, CPKTBUFF);
+ADD_DEBUG_MODULE_PARAM(fw_load, SCSC_FULL_DEBUG, FW_LOAD);
+ADD_DEBUG_MODULE_PARAM(fw_panic, SCSC_FULL_DEBUG, FW_PANIC);
+ADD_DEBUG_MODULE_PARAM(gdb_trans, SCSC_DBG1, GDB_TRANS);
+ADD_DEBUG_MODULE_PARAM(mif, SCSC_FULL_DEBUG, MIF);
+ADD_DEBUG_MODULE_PARAM(clk20, SCSC_FULL_DEBUG, CLK20);
+ADD_DEBUG_MODULE_PARAM(clk20_test, SCSC_FULL_DEBUG, CLK20_TEST);
+ADD_DEBUG_MODULE_PARAM(fm, SCSC_FULL_DEBUG, FM);
+ADD_DEBUG_MODULE_PARAM(fm_test, SCSC_FULL_DEBUG, FM_TEST);
+ADD_DEBUG_MODULE_PARAM(mx_file, SCSC_FULL_DEBUG, MX_FILE);
+ADD_DEBUG_MODULE_PARAM(mx_fw, SCSC_FULL_DEBUG, MX_FW);
+ADD_DEBUG_MODULE_PARAM(mx_sampler, SCSC_FULL_DEBUG, MX_SAMPLER);
+ADD_DEBUG_MODULE_PARAM(mxlog_trans, SCSC_DBG1, MXLOG_TRANS);
+ADD_DEBUG_MODULE_PARAM(mxman, SCSC_FULL_DEBUG, MXMAN);
+ADD_DEBUG_MODULE_PARAM(mxman_test, SCSC_FULL_DEBUG, MXMAN_TEST);
+ADD_DEBUG_MODULE_PARAM(mxmgt_trans, SCSC_FULL_DEBUG, MXMGT_TRANS);
+ADD_DEBUG_MODULE_PARAM(mx_mmap, SCSC_DBG1, MX_MMAP);
+ADD_DEBUG_MODULE_PARAM(mx_proc, SCSC_FULL_DEBUG, MX_PROC);
+ADD_DEBUG_MODULE_PARAM(panic_mon, SCSC_FULL_DEBUG, PANIC_MON);
+ADD_DEBUG_MODULE_PARAM(pcie_mif, SCSC_FULL_DEBUG, PCIE_MIF);
+ADD_DEBUG_MODULE_PARAM(plat_mif, SCSC_DBG1, PLAT_MIF);
+ADD_DEBUG_MODULE_PARAM(kic_common, SCSC_FULL_DEBUG, KIC_COMMON);
+ADD_DEBUG_MODULE_PARAM(wlbtd, SCSC_FULL_DEBUG, WLBTD);
+ADD_DEBUG_MODULE_PARAM(wlog, SCSC_DEBUG, WLOG);
+ADD_DEBUG_MODULE_PARAM(lerna, SCSC_FULL_DEBUG, LERNA);
+ADD_DEBUG_MODULE_PARAM(mxcfg, SCSC_FULL_DEBUG, MX_CFG);
+#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
+ADD_DEBUG_MODULE_PARAM(init_deinit, SCSC_FULL_DEBUG, SLSI_INIT_DEINIT);
+ADD_DEBUG_MODULE_PARAM(netdev, SCSC_DBG4, SLSI_NETDEV);
+ADD_DEBUG_MODULE_PARAM(cfg80211, SCSC_FULL_DEBUG, SLSI_CFG80211);
+ADD_DEBUG_MODULE_PARAM(mlme, SCSC_FULL_DEBUG, SLSI_MLME);
+ADD_DEBUG_MODULE_PARAM(summary_frames, SCSC_FULL_DEBUG, SLSI_SUMMARY_FRAMES);
+ADD_DEBUG_MODULE_PARAM(hydra, SCSC_FULL_DEBUG, SLSI_HYDRA);
+ADD_DEBUG_MODULE_PARAM(tx, SCSC_FULL_DEBUG, SLSI_TX);
+ADD_DEBUG_MODULE_PARAM(rx, SCSC_FULL_DEBUG, SLSI_RX);
+ADD_DEBUG_MODULE_PARAM(udi, SCSC_DBG4, SLSI_UDI);
+ADD_DEBUG_MODULE_PARAM(wifi_fcq, SCSC_DBG4, SLSI_WIFI_FCQ);
+ADD_DEBUG_MODULE_PARAM(hip, SCSC_FULL_DEBUG, SLSI_HIP);
+ADD_DEBUG_MODULE_PARAM(hip_init_deinit, SCSC_FULL_DEBUG, SLSI_HIP_INIT_DEINIT);
+ADD_DEBUG_MODULE_PARAM(hip_fw_dl, SCSC_FULL_DEBUG, SLSI_HIP_FW_DL);
+ADD_DEBUG_MODULE_PARAM(hip_sdio_op, SCSC_FULL_DEBUG, SLSI_HIP_SDIO_OP);
+ADD_DEBUG_MODULE_PARAM(hip_ps, SCSC_FULL_DEBUG, SLSI_HIP_PS);
+ADD_DEBUG_MODULE_PARAM(hip_th, SCSC_FULL_DEBUG, SLSI_HIP_TH);
+ADD_DEBUG_MODULE_PARAM(hip_fh, SCSC_FULL_DEBUG, SLSI_HIP_FH);
+ADD_DEBUG_MODULE_PARAM(hip_sig, SCSC_FULL_DEBUG, SLSI_HIP_SIG);
+ADD_DEBUG_MODULE_PARAM(func_trace, SCSC_FULL_DEBUG, SLSI_FUNC_TRACE);
+ADD_DEBUG_MODULE_PARAM(test, SCSC_FULL_DEBUG, SLSI_TEST);
+ADD_DEBUG_MODULE_PARAM(src_sink, SCSC_FULL_DEBUG, SLSI_SRC_SINK);
+ADD_DEBUG_MODULE_PARAM(fw_test, SCSC_DBG4, SLSI_FW_TEST);
+ADD_DEBUG_MODULE_PARAM(rx_ba, SCSC_FULL_DEBUG, SLSI_RX_BA);
+ADD_DEBUG_MODULE_PARAM(tdls, SCSC_FULL_DEBUG, SLSI_TDLS);
+ADD_DEBUG_MODULE_PARAM(gscan, SCSC_FULL_DEBUG, SLSI_GSCAN);
+ADD_DEBUG_MODULE_PARAM(mbulk, SCSC_DBG1, SLSI_MBULK);
+ADD_DEBUG_MODULE_PARAM(smapper, SCSC_DBG1, SLSI_SMAPPER);
+ADD_DEBUG_MODULE_PARAM(flowc, SCSC_FULL_DEBUG, SLSI_FLOWC);
+#endif
+ADD_DEBUG_MODULE_PARAM(test_me, SCSC_FULL_DEBUG, TEST_ME);
+
+/* Extend this list when you add ADD_DEBUG_MODULE_PARAM, above.
+ * You must also extend "enum scsc_logring_tags"
+ */
+int *scsc_droplevels[] = {
+ &scsc_droplevel_binary,
+ &scsc_droplevel_bin_wifi_ctrl_rx,
+ &scsc_droplevel_bin_wifi_data_rx,
+ &scsc_droplevel_bin_wifi_ctrl_tx,
+ &scsc_droplevel_bin_wifi_data_tx,
+ &scsc_droplevel_wlbt,
+ &scsc_droplevel_wifi_rx,
+ &scsc_droplevel_wifi_tx,
+ &scsc_droplevel_bt_common,
+ &scsc_droplevel_bt_h4,
+ &scsc_droplevel_bt_fw,
+ &scsc_droplevel_bt_rx,
+ &scsc_droplevel_bt_tx,
+ &scsc_droplevel_cpktbuff,
+ &scsc_droplevel_fw_load,
+ &scsc_droplevel_fw_panic,
+ &scsc_droplevel_gdb_trans,
+ &scsc_droplevel_mif,
+ &scsc_droplevel_clk20,
+ &scsc_droplevel_clk20_test,
+ &scsc_droplevel_fm,
+ &scsc_droplevel_fm_test,
+ &scsc_droplevel_mx_file,
+ &scsc_droplevel_mx_fw,
+ &scsc_droplevel_mx_sampler,
+ &scsc_droplevel_mxlog_trans,
+ &scsc_droplevel_mxman,
+ &scsc_droplevel_mxman_test,
+ &scsc_droplevel_mxmgt_trans,
+ &scsc_droplevel_mx_mmap,
+ &scsc_droplevel_mx_proc,
+ &scsc_droplevel_panic_mon,
+ &scsc_droplevel_pcie_mif,
+ &scsc_droplevel_plat_mif,
+ &scsc_droplevel_kic_common,
+ &scsc_droplevel_wlbtd,
+ &scsc_droplevel_wlog,
+ &scsc_droplevel_lerna,
+ &scsc_droplevel_mxcfg,
+#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
+ &scsc_droplevel_init_deinit,
+ &scsc_droplevel_netdev,
+ &scsc_droplevel_cfg80211,
+ &scsc_droplevel_mlme,
+ &scsc_droplevel_summary_frames,
+ &scsc_droplevel_hydra,
+ &scsc_droplevel_tx,
+ &scsc_droplevel_rx,
+ &scsc_droplevel_udi,
+ &scsc_droplevel_wifi_fcq,
+ &scsc_droplevel_hip,
+ &scsc_droplevel_hip_init_deinit,
+ &scsc_droplevel_hip_fw_dl,
+ &scsc_droplevel_hip_sdio_op,
+ &scsc_droplevel_hip_ps,
+ &scsc_droplevel_hip_th,
+ &scsc_droplevel_hip_fh,
+ &scsc_droplevel_hip_sig,
+ &scsc_droplevel_func_trace,
+ &scsc_droplevel_test,
+ &scsc_droplevel_src_sink,
+ &scsc_droplevel_fw_test,
+ &scsc_droplevel_rx_ba,
+ &scsc_droplevel_tdls,
+ &scsc_droplevel_gscan,
+ &scsc_droplevel_mbulk,
+ &scsc_droplevel_flowc,
+ &scsc_droplevel_smapper,
+#endif
+ &scsc_droplevel_test_me, /* Must be last */
+};
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int logring_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ int ret = 0, saved_droplevel;
+
+ if (!the_ringbuf)
+ return 0;
+
+ /**
+ * Inhibit logring during collection overriding with scsc_droplevel_all
+ */
+ saved_droplevel = scsc_droplevel_all;
+ scsc_droplevel_all = DEFAULT_DROP_ALL;
+
+ /* Write buffer */
+ ret = scsc_log_collector_write(the_ringbuf->buf, the_ringbuf->bsz, 1);
+
+ scsc_droplevel_all = saved_droplevel;
+
+ return ret;
+}
+#endif
+
+static int scsc_reset_all_droplevels_to_set_param_cb(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret = 0, i = 0;
+ long rval = 0;
+
+ if (!val)
+ return -EINVAL;
+ ret = kstrtol(val, 10, &rval);
+ if (!ret) {
+ if (rval < 0)
+ return -EINVAL;
+ scsc_droplevel_wlbt = (int)rval;
+ for (i = 0; i < ARRAY_SIZE(scsc_droplevels); i++)
+ *scsc_droplevels[i] = (int)rval;
+ scsc_reset_all_droplevels_to = (int)rval;
+ scsc_printk_tag(FORCE_PRK, NO_TAG,
+ KERN_INFO"Reset ALL droplevels to %d\n",
+ scsc_reset_all_droplevels_to);
+ }
+ return ret;
+}
+
+static struct kernel_param_ops scsc_reset_droplvl_ops = {
+ .set = scsc_reset_all_droplevels_to_set_param_cb,
+ .get = NULL,
+};
+module_param_cb(scsc_reset_all_droplevels_to, &scsc_reset_droplvl_ops,
+ NULL, 0200);
+MODULE_PARM_DESC(scsc_reset_all_droplevels_to,
+ "Reset ALL droplevels to the requested value. Effective @run-time.");
+
+
+/* SCSC_PRINTK API and Helpers */
+static inline int get_debug_level(const char *fmt)
+{
+ int level;
+
+ if (fmt && *fmt == SCSC_SOH && *(fmt + 1))
+ level = *(fmt + 1) - '0';
+ else
+ level = default_dbglevel;
+ return level;
+}
+
+static inline void drop_message_level_macro(const char *fmt, char **msg)
+{
+ if (fmt && *fmt == SCSC_SOH && *(fmt + 1)) {
+ if (msg)
+ *msg = (char *)(fmt + 2);
+ } else if (msg) {
+ *msg = (char *)fmt;
+ }
+}
+
+/**
+ * The workhorse function that receiving a droplevel to enforce, and a pair
+ * format_string/va_list decides what to do:
+ * - drop
+ * OR
+ * - insert into ring buffer accounting for wrapping
+ *
+ * ... then wakes up any waiting reading process
+ */
+static inline int _scsc_printk(int level, int tag,
+ const char *fmt, va_list args)
+{
+ int written = 0;
+ char *msg_head = NULL;
+
+ if (!initialized || !enable || !fmt ||
+ ((scsc_droplevel_all < 0 && level >= *scsc_droplevels[tag]) ||
+ (scsc_droplevel_all >= 0 && level >= scsc_droplevel_all)))
+ return written;
+ drop_message_level_macro(fmt, &msg_head);
+ written = push_record_string(the_ringbuf, tag, level,
+ prepend_header, msg_head, args);
+ return written;
+}
+
+/**
+ * Embeds the filtering behaviour towards std kenrel ring buffer for
+ * non binary stuff, and decides what to do based on current user config.
+ * Note that printk redirect doplevel filter is now completely disjoint
+ * from normal LogRing droplevel filters.
+ */
+static inline
+void handle_klogbuf_out_string(int level, struct device *dev, int tag,
+ const char *fmt, va_list args, int force)
+{
+ if (IS_PRINTK_REDIRECT_ALLOWED(force, level, tag)) {
+ if (!dev)
+ vprintk_emit(0, level, NULL, 0, fmt, args);
+ else
+ dev_vprintk_emit(level, dev, fmt, args);
+ }
+}
+
+const char *map2kern[] = {
+ KERN_EMERG,
+ KERN_ALERT,
+ KERN_CRIT,
+ KERN_ERR,
+ KERN_WARNING,
+ KERN_NOTICE,
+ KERN_INFO,
+ KERN_DEBUG
+};
+
+/**
+ * Embeds the filtering behaviour towards std kenrel ring buffer for
+ * BINARY stuff, and decides what to do based on current user config.
+ * Note that printk redirect doplevel filter is now completely disjoint
+ * from normal LogRing droplevel filters.
+ */
+static inline
+void handle_klogbuf_out_binary(int level, int tag, const void *start,
+ size_t len, int force)
+{
+ if (IS_PRINTK_REDIRECT_ALLOWED(force, level, tag)) {
+ if (level < SCSC_MIN_DBG || level >= ARRAY_SIZE(map2kern))
+ level = ARRAY_SIZE(map2kern) - 1;
+ print_hex_dump(map2kern[level], SCSC_PREFIX"SCSC_HEX->|",
+ DUMP_PREFIX_NONE, 16, 1, start, len, false);
+ }
+}
+
+/*
+ * scsc_printk - it's main API entry to the event logging mechanism. Prints
+ * the specified format string to the underlying ring_buffer, injecting
+ * timestamp and context information at the start of the line while
+ * classifying and filtering the message suing the specified TAG identifier.
+ *
+ * This function assumes that you'll never write a line longer than
+ * BASE_SPARE_SZ bytes; if this limit is obeyed any input string is correctly
+ * wrapped when placed at the end of the buffer. Any longer line will be
+ * trucated.
+ * This function recognize Kernel style debug level KERN_, checking the FIRST
+ * byte for ASCII SOH in order to recognize if some printk style kernel debug
+ * level has been specified.
+ * SO you can use KERN_INFO KERN_ERR etc etc INLINE macros to specify the
+ * desired debug level: that will be checked against the droplevel specified.
+ * If NOT specified a default debug level is assigned following what specified
+ * in module parameter default_dbglevel.
+ *
+ * It's usually NOT used directly but through the means of utility macros that
+ * can be easily compiled out in production builds.
+ */
+int scsc_printk_tag(int force, int tag, const char *fmt, ...)
+{
+ int ret = 0, level = 0;
+ va_list args;
+
+ /* Cannot use BINARY tag with strings logging */
+ if (tag < NO_TAG || tag > MAX_TAG)
+ return ret;
+ level = get_debug_level(fmt);
+ if ((in_interrupt() && level >= scsc_droplevel_atomic))
+ return ret;
+ va_start(args, fmt);
+ handle_klogbuf_out_string(level, NULL, tag, fmt, args, force);
+ va_end(args);
+ /* restart varargs */
+ va_start(args, fmt);
+ ret = _scsc_printk(level, tag, fmt, args);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(scsc_printk_tag);
+
+
+/**
+ * This is a variation on the main API that allows to specify loglevel
+ * by number.
+ */
+int scsc_printk_tag_lvl(int tag, int level, const char *fmt, ...)
+{
+ int ret = 0;
+ va_list args;
+
+ /* Cannot use BINARY tag with strings logging */
+ if (tag < NO_TAG || tag > MAX_TAG)
+ return ret;
+ if ((in_interrupt() && level >= scsc_droplevel_atomic))
+ return ret;
+ va_start(args, fmt);
+ handle_klogbuf_out_string(level, NULL, tag, fmt, args, NO_FORCE_PRK);
+ va_end(args);
+ /* restart varargs */
+ va_start(args, fmt);
+ ret = _scsc_printk(level, tag, fmt, args);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(scsc_printk_tag_lvl);
+
+
+/**
+ * This is a variation on the main API that allows to specify a
+ * struct device reference.
+ */
+int scsc_printk_tag_dev(int force, int tag, struct device *dev,
+ const char *fmt, ...)
+{
+ int ret = 0, level = 0;
+ va_list args;
+
+ /* Cannot use BINARY tag with strings logging */
+ if (tag < NO_TAG || tag > MAX_TAG)
+ return ret;
+ level = get_debug_level(fmt);
+ if ((in_interrupt() && level >= scsc_droplevel_atomic))
+ return ret;
+ va_start(args, fmt);
+ handle_klogbuf_out_string(level, dev, tag, fmt, args, force);
+ va_end(args);
+ /* restart varargs */
+ va_start(args, fmt);
+ ret = _scsc_printk(level, tag, fmt, args);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(scsc_printk_tag_dev);
+
+/**
+ * This is a variation on the main API that allows to specify a
+ * struct device reference and an explicit numerical debug level.
+ */
+int scsc_printk_tag_dev_lvl(int force, int tag, struct device *dev,
+ int level, const char *fmt, ...)
+{
+ int ret = 0;
+ va_list args;
+
+ /* Cannot use BINARY tag with strings logging */
+ if (tag < NO_TAG || tag > MAX_TAG)
+ return ret;
+ if ((in_interrupt() && level >= scsc_droplevel_atomic))
+ return ret;
+ va_start(args, fmt);
+ handle_klogbuf_out_string(level, dev, tag, fmt, args, force);
+ va_end(args);
+ /* restart varargs */
+ va_start(args, fmt);
+ ret = _scsc_printk(level, tag, fmt, args);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(scsc_printk_tag_dev_lvl);
+
+/**
+ * This is a variation on the main API used to push binary blob into the ring.
+ */
+int scsc_printk_bin(int force, int tag, int dlev, const void *start, size_t len)
+{
+ int ret = 0;
+
+ /* Cannot use NON BINARY tag with strings logging
+ * or NULLs start/len
+ */
+ if (!start || !len || tag < FIRST_BIN_TAG || tag > LAST_BIN_TAG)
+ return ret;
+ dlev = (dlev >= 0) ? dlev : default_dbglevel;
+ if ((in_interrupt() && dlev >= scsc_droplevel_atomic))
+ return ret;
+ handle_klogbuf_out_binary(dlev, tag, start, len, force);
+ /* consider proper tag droplevel */
+ if (!initialized || !enable || !start ||
+ (scsc_droplevel_all < 0 && *scsc_droplevels[tag] <= dlev) ||
+ (scsc_droplevel_all >= 0 && scsc_droplevel_all <= dlev))
+ return ret;
+ ret = push_record_blob(the_ringbuf, tag, dlev,
+ prepend_header, start, len);
+ return ret;
+}
+EXPORT_SYMBOL(scsc_printk_bin);
+
+/*
+ * This is a very basic mechanism to have implement the dynamic switch
+ * for one user (currently WLAN). If multiple users are
+ * required to use the dynamic logring switch, a new registration
+ * mechanism based on requests and use_count should be implemented to avoid one service
+ * re-enabling logring when some other has requested not to do so.
+ */
+int scsc_logring_enable(bool logging_enable)
+{
+ scsc_printk_tag(FORCE_PRK, NO_TAG, "scsc_logring %s\n", logging_enable ? "enable" : "disable");
+ /* User has requested to disable logring */
+ if (!logging_enable && enable) {
+ cached_enable = true;
+ enable = 0;
+ scsc_printk_tag(FORCE_PRK, NO_TAG, "Logring disabled\n");
+ } else if (logging_enable && cached_enable) {
+ cached_enable = false;
+ enable = 1;
+ scsc_printk_tag(FORCE_PRK, NO_TAG, "Logring re-enabled\n");
+ } else {
+ scsc_printk_tag(FORCE_PRK, NO_TAG, "Ignored\n");
+ }
+ return 0;
+}
+EXPORT_SYMBOL(scsc_logring_enable);
+
+
+MODULE_DESCRIPTION("SCSC Event Logger");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/********************************************************************************
+ *
+ * Copyright (c) 2016 - 2017 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ********************************************************************************/
+
+#ifndef _SCSC_LOGRING_MAIN_H_
+#define _SCSC_LOGRING_MAIN_H_
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/log2.h>
+
+#include "scsc_logring_common.h"
+
+#define ADD_DEBUG_MODULE_PARAM(tagname, default_level, filter) \
+ int scsc_droplevel_ ## tagname = default_level; \
+ module_param(scsc_droplevel_ ## tagname, int, S_IRUGO | S_IWUSR); \
+ SCSC_MODPARAM_DESC(scsc_droplevel_ ## tagname, \
+ "Droplevels for the '" # tagname "' family.", \
+ "run-time", default_level)
+
+#define IS_PRINTK_REDIRECT_ALLOWED(ff, level, tag) \
+ ((ff) == FORCE_PRK || \
+ ((ff) != NO_ECHO_PRK && (level) < scsc_redirect_to_printk_droplvl))
+
+#endif /* _SCSC_LOGRING_MAIN_H_ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016-2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include "scsc_logring_ring.h"
+
+#ifdef CONFIG_SCSC_STATIC_RING_SIZE
+static char a_ring[CONFIG_SCSC_STATIC_RING_SIZE + BASE_SPARE_SZ] __aligned(4);
+#endif
+
+static int scsc_decode_binary_len = DEFAULT_BIN_DECODE_LEN;
+module_param(scsc_decode_binary_len, int, S_IRUGO | S_IWUSR);
+SCSC_MODPARAM_DESC(scsc_decode_binary_len,
+ "When reading a binary record dump these bytes-len in ASCII human readable form when reading",
+ "run-time", DEFAULT_BIN_DECODE_LEN);
+
+/*
+ * NOTE_CREATING_TAGS: when adding a tag string here REMEMBER to add
+ * it also where required, taking care to maintain the same ordering.
+ * (Search 4 NOTE_CREATING_TAGS)
+ */
+const char *tagstr[] = {
+ "binary",
+ "bin_wifi_ctrl_rx",
+ "bin_wifi_data_rx",
+ "bin_wifi_ctrl_tx",
+ "bin_wifi_data_tx",
+ "wlbt", /* this is the generic one...NO_TAG */
+ "wifi_rx",
+ "wifi_tx",
+ "bt_common",
+ "bt_h4",
+ "bt_fw",
+ "bt_rx",
+ "bt_tx",
+ "cpktbuff",
+ "fw_load",
+ "fw_panic",
+ "gdb_trans",
+ "mif",
+ "clk20",
+ "clk20_test",
+ "fm",
+ "fm_test",
+ "mx_file",
+ "mx_fw",
+ "mx_sampler",
+ "mxlog_trans",
+ "mxman",
+ "mxman_test",
+ "mxmgt_trans",
+ "mx_mmap",
+ "mx_proc",
+ "panic_mon",
+ "pcie_mif",
+ "plat_mif",
+ "kic_common",
+ "wlbtd",
+ "wlog",
+#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
+ "init_deinit",
+ "netdev",
+ "cfg80211",
+ "mlme",
+ "summary_frames",
+ "hydra",
+ "tx",
+ "rx",
+ "udi",
+ "wifi_fcq",
+ "hip",
+ "hip_init_deinit",
+ "hip_fw_dl",
+ "hip_sdio_op",
+ "hip_ps",
+ "hip_th",
+ "hip_fh",
+ "hip_sig",
+ "func_trace",
+ "test",
+ "src_sink",
+ "fw_test",
+ "rx_ba",
+ "tdls",
+ "gscan",
+ "mbulk",
+ "flowc",
+ "smapper",
+#endif
+ "test_me"
+};
+
+/**
+ * Calculate and returns the CRC32 for the provided record and record pos.
+ * Before calculating the CRC32 the crc field is temporarily substituted
+ * with the 32 LSB record relative starting position.
+ * Assumes the rec ptr area-validity has been checked upstream in the
+ * caller chain.
+ * We SKIP the fixed blob of the SYNC field that is placed ahead of
+ * CRC field.
+ * Assumes the related ring buffer is currently atomically accessed by
+ * caller. MUST NOT SLEEP.
+ */
+static inline uint32_t get_calculated_crc(struct scsc_ring_record *rec,
+ loff_t pos)
+{
+ uint32_t calculated_crc = 0;
+ uint32_t saved_crc = 0;
+
+ saved_crc = rec->crc;
+ rec->crc = (uint32_t)pos;
+ /* we skip the fixed sync calculating crc */
+ calculated_crc =
+ crc32_le(~0, (unsigned char const *)&rec->crc,
+ SCSC_CRC_RINGREC_SZ);
+ rec->crc = saved_crc;
+ return calculated_crc;
+}
+
+/**
+ * Checks for record CRC sanity.
+ * Assumes the related ring buffer is currently atomically accessed by
+ * caller. MUST NOT SLEEP.
+ */
+static inline bool is_record_crc_valid(struct scsc_ring_record *rec,
+ loff_t pos)
+{
+ uint32_t calculated_crc = 0;
+
+ calculated_crc = get_calculated_crc(rec, pos);
+ return calculated_crc == rec->crc;
+}
+
+/**
+ * Calculate the proper CRC and set it into the crc field
+ * Assumes the related ring buffer is currently atomically accessed by
+ * caller. MUST NOT SLEEP.
+ */
+static inline void finalize_record_crc(struct scsc_ring_record *rec,
+ loff_t pos)
+{
+ uint32_t calculated_crc = 0;
+
+ if (!rec)
+ return;
+ rec->crc = (uint32_t)pos;
+ calculated_crc =
+ crc32_le(~0, (unsigned char const *)&rec->crc,
+ SCSC_CRC_RINGREC_SZ);
+ rec->crc = calculated_crc;
+}
+
+/**
+ * This function analyzes the pos provided relative to the provided
+ * ring, just to understand if it can be safely dereferenced.
+ * Assumes RING is already locked.
+ */
+static inline bool is_ring_pos_safe(struct scsc_ring_buffer *rb,
+ loff_t pos)
+{
+ if (!rb || pos > rb->bsz || pos < 0)
+ return false;
+ /* NOT Wrapped */
+ if (rb->head > rb->tail && pos > rb->head)
+ return false;
+ /* Wrapped... */
+ if (rb->head < rb->tail &&
+ (pos > rb->head && pos < rb->tail))
+ return false;
+ return true;
+}
+
+/**
+ * This sanitizes record header before using it.
+ * It must be in the proper area related to head and tail and
+ * the CRC must fit the header.
+ */
+static inline bool is_ring_read_pos_valid(struct scsc_ring_buffer *rb,
+ loff_t pos)
+{
+ if (!is_ring_pos_safe(rb, pos))
+ goto oos;
+ /* We do not check for SYNC before CRC since most of the time
+ * you are NOT OutOfSync and so you MUST check CRC anyway.
+ * It will be useful only for resync.
+ * At last...Check CRC ... doing this check LAST avoids the risk of
+ * dereferencing an already dangling pos pointer.
+ */
+ if (!is_record_crc_valid(SCSC_GET_REC(rb, pos), pos))
+ goto oos;
+ return true;
+oos:
+ if (rb)
+ rb->oos++;
+ return false;
+}
+
+
+/**
+ * Buid a header into the provided buffer,
+ * and append the optional trail string
+ */
+static inline
+int build_header(char *buf, int blen, struct scsc_ring_record *r,
+ const char *trail)
+{
+ int written = 0;
+ struct timeval tval = {};
+
+ tval = ns_to_timeval(r->nsec);
+ written = scnprintf(buf, blen,
+ "<%d>[%6lu.%06ld] [c%d] [%c] [%s] :: %s",
+ r->lev, tval.tv_sec, tval.tv_usec,
+ r->core, (char)r->ctx, tagstr[r->tag],
+ (trail) ? : "");
+ return written;
+}
+
+
+/**
+ * We're going to overwrite something writing from the head toward the tail
+ * so we must search for the next tail far enough from head in oder not to be
+ * overwritten: that will be our new tail after the wrap over.
+ */
+static inline
+loff_t find_next_tail_far_enough_from_start(struct scsc_ring_buffer *rb,
+ loff_t start, int len)
+{
+ loff_t new_tail = rb->tail;
+
+ while (start + len >= new_tail && new_tail != rb->last) {
+ new_tail = SCSC_GET_NEXT_REC_ENTRY_POS(rb, new_tail);
+ rb->records--;
+ }
+ if (start + len >= new_tail) {
+ new_tail = 0;
+ rb->records--;
+ }
+ return new_tail;
+}
+
+/**
+ * This handles the just plain append of a record to head without
+ * any need of wrapping or overwriting current tail
+ * You can provide two buffer here: the second, hbuf, is optional
+ * and will be written first. This is to account for the binary case
+ * in which the record data are written at first into the spare area
+ * (like we do with var strings, BUT then the bulk of binary data is
+ * written directly in place into the ring without double copies.
+ */
+static inline
+void scsc_ring_buffer_plain_append(struct scsc_ring_buffer *rb,
+ const char *srcbuf, int slen,
+ const char *hbuf, int hlen)
+{
+ /* empty condition is special case */
+ if (rb->records)
+ rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
+ if (hbuf)
+ memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
+ else
+ hlen = 0;
+ memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
+ finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
+ rb->head);
+ rb->records++;
+ if (rb->head > rb->last)
+ rb->last = rb->head;
+}
+
+
+/**
+ * This handles the case in which appending current record must account
+ * for overwriting: this sitiation can happen at the end of ring if we do NOT
+ * have enough space for the current record, or in any place when the buffer
+ * has wrapped, head is before tail and there's not enough space to write
+ * between current head and tail.
+ */
+static inline
+void scsc_ring_buffer_overlap_append(struct scsc_ring_buffer *rb,
+ const char *srcbuf, int slen,
+ const char *hbuf, int hlen)
+{
+ if (rb->head < rb->tail &&
+ slen + hlen < rb->bsz - SCSC_GET_NEXT_SLOT_POS(rb, rb->head))
+ rb->head += SCSC_GET_SLOT_LEN(rb, rb->head);
+ else {
+ rb->last = rb->head;
+ rb->head = 0;
+ rb->tail = 0;
+ rb->wraps++;
+ }
+ rb->tail =
+ find_next_tail_far_enough_from_start(rb, rb->head, slen + hlen);
+ if (hbuf)
+ memcpy(SCSC_GET_HEAD_PTR(rb), hbuf, hlen);
+ else
+ hlen = 0;
+ memcpy(SCSC_GET_HEAD_PTR(rb) + hlen, srcbuf, slen);
+ finalize_record_crc((struct scsc_ring_record *)SCSC_GET_HEAD_PTR(rb),
+ rb->head);
+ rb->records++;
+ if (rb->head > rb->last)
+ rb->last = rb->head;
+}
+
+
+/**
+ * This uses the spare area to prepare the record descriptor and to expand
+ * the format string into the spare area in order to get the final lenght of
+ * the whole record+data. Data is pre-pended with a header representing the
+ * data hold in binary form in the record descriptor.
+ * This data duplication helps when we'll read back a record holding string
+ * data, we won't have to build the header on the fly during the read.
+ */
+static inline
+int tag_writer_string(char *spare, int tag, int lev,
+ int prepend_header, const char *msg_head, va_list args)
+{
+ int written;
+ char bheader[SCSC_HBUF_LEN] = {};
+ struct scsc_ring_record *rrec;
+
+ /* Fill record in place */
+ rrec = (struct scsc_ring_record *)spare;
+ SCSC_FILL_RING_RECORD(rrec, tag, lev);
+ if (prepend_header)
+ build_header(bheader, SCSC_HBUF_LEN, rrec, NULL);
+ written = scnprintf(SCSC_GET_REC_BUF(spare),
+ BASE_SPARE_SZ - SCSC_RINGREC_SZ, "%s", bheader);
+ /**
+ * NOTE THAT
+ * ---------
+ * vscnprintf retvalue is the number of characters which have been
+ * written into the @buf NOT including the trailing '\0'.
+ * If @size is == 0 the function returns 0.
+ * Here we enforce a line lenght limit equal to
+ * BASE_SPARE_SZ - SCSC_RINGREC_SZ.
+ */
+ written += vscnprintf(SCSC_GET_REC_BUF(spare) + written,
+ BASE_SPARE_SZ - SCSC_RINGREC_SZ - written,
+ msg_head, args);
+ /* complete record metadata */
+ rrec->len = written;
+ return written;
+}
+
+/**
+ * A ring API function to push variable length format string into the buffer
+ * After the record has been created and pushed into the ring any process
+ * waiting on the related waiting queue is awakened.
+ */
+int push_record_string(struct scsc_ring_buffer *rb, int tag, int lev,
+ int prepend_header, const char *msg_head, va_list args)
+{
+ int rec_len = 0;
+ loff_t free_bytes;
+ unsigned long flags;
+
+ /* Prepare ring_record and header if needed */
+ raw_spin_lock_irqsave(&rb->lock, flags);
+ rec_len = tag_writer_string(rb->spare, tag, lev, prepend_header,
+ msg_head, args);
+ /* Line too long anyway drop */
+ if (rec_len >= BASE_SPARE_SZ - SCSC_RINGREC_SZ) {
+ raw_spin_unlock_irqrestore(&rb->lock, flags);
+ return 0;
+ }
+ free_bytes = SCSC_RING_FREE_BYTES(rb);
+ /**
+ * Evaluate if it's a trivial append or if we must account for
+ * any overwrap. Note that we do NOT truncate record across ring
+ * boundaries, if a record does NOT fit at the end of buffer,
+ * we'll write it from start directly.
+ */
+ if (rec_len + SCSC_RINGREC_SZ < free_bytes)
+ scsc_ring_buffer_plain_append(rb, rb->spare,
+ SCSC_RINGREC_SZ + rec_len,
+ NULL, 0);
+ else
+ scsc_ring_buffer_overlap_append(rb, rb->spare,
+ SCSC_RINGREC_SZ + rec_len,
+ NULL, 0);
+ rb->written += rec_len;
+ raw_spin_unlock_irqrestore(&rb->lock, flags);
+ /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
+ wake_up_interruptible(&rb->wq);
+ return rec_len;
+}
+
+/* This simply builds up a record descriptor for a binary entry. */
+static inline
+int tag_writer_binary(char *spare, int tag, int lev, size_t hexlen)
+{
+ struct scsc_ring_record *rrec;
+
+ rrec = (struct scsc_ring_record *)spare;
+ SCSC_FILL_RING_RECORD(rrec, tag, lev);
+ rrec->len = hexlen;
+
+ return hexlen;
+}
+
+/**
+ * A ring API function to push binary data into the ring buffer. Binary data
+ * is copied from the start/len specified location.
+ * After the record has been created and pushed into the ring any process
+ * waiting on the related waiting queue is awakened.
+ */
+int push_record_blob(struct scsc_ring_buffer *rb, int tag, int lev,
+ int prepend_header, const void *start, size_t len)
+{
+ loff_t free_bytes;
+ unsigned long flags;
+
+ if (len > SCSC_MAX_BIN_BLOB_SZ)
+ len = SCSC_MAX_BIN_BLOB_SZ;
+ /* Prepare ring_record and header if needed */
+ raw_spin_lock_irqsave(&rb->lock, flags);
+ memset(rb->spare, 0x00, rb->ssz);
+ tag_writer_binary(rb->spare, tag, lev, len);
+ free_bytes = SCSC_RING_FREE_BYTES(rb);
+ if (len + SCSC_RINGREC_SZ < free_bytes)
+ scsc_ring_buffer_plain_append(rb, start, len,
+ rb->spare, SCSC_RINGREC_SZ);
+ else
+ scsc_ring_buffer_overlap_append(rb, start, len,
+ rb->spare, SCSC_RINGREC_SZ);
+ rb->written += len;
+ raw_spin_unlock_irqrestore(&rb->lock, flags);
+ /* WAKEUP EVERYONE WAITING ON THIS BUFFER */
+ wake_up_interruptible(&rb->wq);
+ return len;
+}
+
+/* A simple reader used to retrieve a string from the record
+ * It always return ONE WHOLE RECORD if it fits the provided tbuf OR NOTHING.
+ */
+static inline
+size_t tag_reader_string(char *tbuf, struct scsc_ring_buffer *rb,
+ int start_rec, size_t tsz)
+{
+ size_t max_chunk = SCSC_GET_REC_LEN(SCSC_GET_PTR(rb, start_rec));
+
+ if (max_chunk <= tsz)
+ memcpy(tbuf, SCSC_GET_REC_BUF(rb->buf + start_rec), max_chunk);
+ else
+ max_chunk = 0;
+ return max_chunk;
+}
+
+/*
+ * Helper to dump binary data in ASCII readable form up to
+ * scsc_decode_binary_len bytes: when such modparam is set to -1
+ * this will dump all the available data. Data is dumped onto the
+ * output buffer with an endianity that conforms to the data as
+ * dumped by the print_hex_dump() kernel standard facility.
+ */
+static inline
+int binary_hexdump(char *tbuf, int tsz, struct scsc_ring_record *rrec,
+ int start, int dlen)
+{
+ int i, j, bytepos;
+ unsigned char *blob = SCSC_GET_REC_BUF(rrec);
+ char *hmap = "0123456789abcdef";
+
+ /**
+ * Scan the buffer reversing endianity when appropriate and
+ * producing ASCII human readable output while obeying chosen
+ * maximum decoden_len dlen.
+ */
+ for (j = start, i = 0; j < tsz && i < rrec->len && i < dlen; i += 4) {
+ bytepos = (rrec->len - i - 1 >= 3) ? 3 : rrec->len - i - 1;
+ /* Reverse endianity to little only on 4-byte boundary */
+ if (bytepos == 3) {
+ for (; bytepos >= 0; bytepos--) {
+ if (i + bytepos >= dlen)
+ continue;
+ tbuf[j++] = hmap[blob[i + bytepos] >> 4 & 0x0f];
+ tbuf[j++] = hmap[blob[i + bytepos] & 0x0f];
+ }
+ } else {
+ int bb;
+
+ /**
+ * Trailing bytes NOT aligned on a 4-byte boundary
+ * should be decoded maintaining the original endianity.
+ * This way we obtain a binary output perfectly equal
+ * to the one generated by the original UDI tools.
+ */
+ for (bb = 0; bb <= bytepos; bb++) {
+ if (i + bb >= dlen)
+ break;
+ tbuf[j++] = hmap[blob[i + bb] >> 4 & 0x0f];
+ tbuf[j++] = hmap[blob[i + bb] & 0x0f];
+ }
+ }
+ }
+ return j;
+}
+
+/**
+ * A reader used to dump binary records: this function first of all
+ * builds a proper human readable header to identify the record with the
+ * usual debuglevel and timestamps and then DUMPS some of the binary blob
+ * in ASCII human readable form: how much is dumped depends on the module
+ * param scsc_decode_binary_len (default 8 bytes).
+ * ANYWAY ONLY ONE WHOLE RECORD IS DUMPED OR NOTHING IF IT DOES NOT FIT
+ * THE PROVIDED DESTINATION BUFFER TBUF.
+ */
+static inline
+size_t tag_reader_binary(char *tbuf, struct scsc_ring_buffer *rb,
+ int start_rec, size_t tsz)
+{
+ size_t written;
+ int declen = scsc_decode_binary_len;
+ struct scsc_ring_record *rrec;
+ char bheader[SCSC_HBUF_LEN] = {};
+ char binfo[SCSC_BINFO_LEN] = {};
+ size_t max_chunk;
+
+ rrec = (struct scsc_ring_record *)SCSC_GET_PTR(rb, start_rec);
+ if (declen < 0 || declen > rrec->len)
+ declen = rrec->len;
+ if (declen)
+ snprintf(binfo, SCSC_BINFO_LEN, "HEX[%d/%d]: ",
+ declen, rrec->len);
+ written = build_header(bheader, SCSC_HBUF_LEN, rrec,
+ declen ? binfo : "");
+ /* Account for byte decoding: two ASCII char for each byte */
+ max_chunk = written + (declen * 2);
+ if (max_chunk <= tsz) {
+ memcpy(tbuf, bheader, written);
+ if (declen)
+ written = binary_hexdump(tbuf, tsz - written,
+ rrec, written, declen);
+ tbuf[written] = '\n';
+ written++;
+ } else {
+ written = 0;
+ }
+ return written;
+}
+
+/**
+ * This is a utility function to read from the specified ring_buffer
+ * up to 'tsz' amount of data starting from position record 'start_rec'.
+ * This function reads ONLY UP TO ONE RECORD and returns the effective
+ * amount of data bytes read; it invokes the proper tag_reader_* helper
+ * depending on the specific record is handling.
+ * Data is copied to a TEMP BUFFER provided by user of this function,
+ * IF AND ONLY IF a whole record CAN fit into the space available in the
+ * destination buffer, otherwise record is NOT copied and 0 is returned.
+ * This function DOES NOT SLEEP.
+ * Caller IS IN CHARGE to SOLVE any sync issue on provided tbuf and
+ * underlying ring buffer.
+ *
+ * @tbuf: a temp buffer destination for the read data
+ * @rb: the ring_buffer to use.
+ * @start_rec: the record from which to start expressed as a record
+ * starting position.
+ * @tsz: the available space in tbuf
+ * @return size_t: returns the bytes effectively read.
+ */
+static inline size_t
+_read_one_whole_record(void *tbuf, struct scsc_ring_buffer *rb,
+ int start_rec, size_t tsz)
+{
+ if (SCSC_GET_REC_TAG(SCSC_GET_PTR(rb, start_rec)) > LAST_BIN_TAG)
+ return tag_reader_string(tbuf, rb, start_rec, tsz);
+ else
+ return tag_reader_binary(tbuf, rb, start_rec, tsz);
+}
+
+
+/**
+ * This just inject a string into the buffer to signal we've gone
+ * OUT OF SYNC due to Ring WRAPPING too FAST, noting how many bytes
+ * we resynced.
+ */
+static inline size_t mark_out_of_sync(char *tbuf, size_t tsz,
+ int resynced_bytes)
+{
+ size_t written = 0;
+ struct timeval tval = {};
+
+ tval = ns_to_timeval(local_clock());
+ /* We should write something even if truncated ... */
+ written = scnprintf(tbuf, tsz,
+ "<7>[%6lu.%06ld] [c%d] [P] [OOS] :: [[[ OUT OF SYNC -- RESYNC'ED BYTES %d ]]]\n",
+ tval.tv_sec, tval.tv_usec, smp_processor_id(),
+ resynced_bytes);
+ return written;
+}
+
+/**
+ * Attempt resync searching for SYNC pattern and verifying CRC.
+ * ASSUMES that the invalid_pos provided is anyway safe to access, since
+ * it should be checked by the caller in advance.
+ * The amount of resynced bytes are not necessarily the number of bytes
+ * effectively lost....they could be much more...imagine the ring had
+ * overwrap multiple times before detecting OUT OF SYNC.
+ */
+static inline loff_t reader_resync(struct scsc_ring_buffer *rb,
+ loff_t invalid_pos, int *resynced_bytes)
+{
+ int bytes = 0;
+ loff_t sync_pos = rb->head;
+ struct scsc_ring_record *candidate = SCSC_GET_REC(rb, invalid_pos);
+
+ *resynced_bytes = 0;
+ /* Walking thorugh the ring in search of the sync one byte at time */
+ while (invalid_pos != rb->head &&
+ !SCSC_IS_REC_SYNC_VALID(candidate)) {
+ invalid_pos = (invalid_pos < rb->last) ?
+ (invalid_pos + sizeof(u8)) : 0;
+ bytes += sizeof(u8);
+ candidate = SCSC_GET_REC(rb, invalid_pos);
+ }
+ if (invalid_pos == rb->head ||
+ (SCSC_IS_REC_SYNC_VALID(candidate) &&
+ is_record_crc_valid(candidate, invalid_pos))) {
+ sync_pos = invalid_pos;
+ *resynced_bytes = bytes;
+ }
+ return sync_pos;
+}
+
+/**
+ * An Internal API ring function to retrieve into the provided tbuf
+ * up to N WHOLE RECORDS starting from *next_rec.
+ * It STOPS collecting records if:
+ * - NO MORE RECORDS TO READ: last_read_record record is head
+ * - NO MORE SPACE: on provided destination tbuf to collect
+ * one more WHOLE record
+ * - MAX NUMBER OF REQUIRED RECORDS READ: if max_recs was passed in
+ * as ZERO it means read as much as you can till head is reached.
+ *
+ * If at start it detects and OUT OF SYNC, so that next_rec is
+ * NO MORE pointing to a valid record, it tries to RE-SYNC on next
+ * GOOD KNOWN record or to HEAD as last resource and injects into
+ * the user buffer an OUT OF SYNC marker record.
+ *
+ * ASSUMES proper locking and syncing ALREADY inplace...does NOT SLEEP.
+ */
+size_t read_next_records(struct scsc_ring_buffer *rb, int max_recs,
+ loff_t *last_read_rec, void *tbuf, size_t tsz)
+{
+ size_t bytes_read = 0, last_read = -1;
+ int resynced_bytes = 0, records = 0;
+ loff_t next_rec = 0;
+
+ /* Nothing to read...simply return 0 causing reader to exit */
+ if (*last_read_rec == rb->head)
+ return bytes_read;
+ if (!is_ring_read_pos_valid(rb, *last_read_rec)) {
+ if (is_ring_pos_safe(rb, *last_read_rec)) {
+ /* Try to resync from *last_read_rec INVALID POS */
+ next_rec = reader_resync(rb, *last_read_rec,
+ &resynced_bytes);
+ } else {
+ /* Skip to head...ONLY safe place known in tis case. */
+ resynced_bytes = 0;
+ next_rec = rb->head;
+ }
+ bytes_read += mark_out_of_sync(tbuf, tsz, resynced_bytes);
+ } else {
+ /* next to read....we're surely NOT already at rb->head here */
+ next_rec = (*last_read_rec != rb->last) ?
+ SCSC_GET_NEXT_SLOT_POS(rb, *last_read_rec) : 0;
+ }
+ do {
+ /* Account for last read */
+ last_read = bytes_read;
+ bytes_read +=
+ _read_one_whole_record(tbuf + bytes_read, rb,
+ next_rec, tsz - bytes_read);
+ /* Did a WHOLE record fit into available tbuf ? */
+ if (bytes_read != last_read) {
+ records++;
+ *last_read_rec = next_rec;
+ if (*last_read_rec != rb->head)
+ next_rec = (next_rec != rb->last) ?
+ SCSC_GET_NEXT_SLOT_POS(rb, next_rec) : 0;
+ }
+ } while (*last_read_rec != rb->head &&
+ last_read != bytes_read &&
+ (!max_recs || records <= max_recs));
+
+ return bytes_read;
+}
+
+/**
+ * This function returns a static snapshot of the ring that can be used
+ * for further processing using usual records operations.
+ *
+ * It returns a freshly allocated scsc_ring_buffer descriptor whose
+ * internal references are exactly the same as the original buffer
+ * being snapshot, and with all the sync machinery re-initialized.
+ * Even if the current use-case does NOT make any use of spinlocks and
+ * waitqueues in the snapshot image, we provide an initialized instance
+ * in order to be safe for future (mis-)usage.
+ *
+ * It also takes care to copy the content of original ring buffer into
+ * the new snapshot image (including the spare area) using the provided
+ * pre-allocated snap_buf.
+ *
+ * Assumes ring is already spinlocked.
+ *
+ * @rb: the original buffer to snapshot
+ * @snap_buf: the pre-allocated ring-buffer area to use for copying records
+ * @snap_sz: pre-allocated area including spare
+ * @snap_name: a human readable descriptor
+ */
+struct scsc_ring_buffer *scsc_ring_get_snapshot(const struct scsc_ring_buffer *rb,
+ void *snap_buf, size_t snap_sz,
+ char *snap_name)
+{
+ struct scsc_ring_buffer *snap_rb = NULL;
+
+ if (!rb || !snap_buf || !snap_name || snap_sz != rb->bsz + rb->ssz)
+ return snap_rb;
+
+ /* Here we hold a lock starving writers...try to be quick using
+ * GFP_ATOMIC since scsc_ring_buffer is small enough (144 bytes)
+ */
+ snap_rb = kzalloc(sizeof(*rb), GFP_ATOMIC);
+ if (!snap_rb)
+ return snap_rb;
+
+ /* Copy original buffer content on provided snap_buf */
+ if (memcpy(snap_buf, rb->buf, snap_sz)) {
+ snap_rb->bsz = rb->bsz;
+ snap_rb->ssz = rb->ssz;
+ snap_rb->head = rb->head;
+ snap_rb->tail = rb->tail;
+ snap_rb->last = rb->last;
+ snap_rb->written = rb->written;
+ snap_rb->records = rb->records;
+ snap_rb->wraps = rb->wraps;
+ /* this is related to reads so must be re-init */
+ snap_rb->oos = 0;
+ strncpy(snap_rb->name, snap_name, RNAME_SZ - 1);
+ /* Link the copies */
+ snap_rb->buf = snap_buf;
+ snap_rb->spare = snap_rb->buf + snap_rb->bsz;
+ /* cleanup spare */
+ memset(snap_rb->spare, 0x00, snap_rb->ssz);
+ /* Re-init snapshot copies of sync tools */
+ raw_spin_lock_init(&snap_rb->lock);
+ init_waitqueue_head(&snap_rb->wq);
+ } else {
+ kfree(snap_rb);
+ snap_rb = NULL;
+ }
+
+ return snap_rb;
+}
+
+/* Assumes ring is already spinlocked. */
+void scsc_ring_truncate(struct scsc_ring_buffer *rb)
+{
+ rb->head = 0;
+ rb->tail = 0;
+ rb->records = 0;
+ rb->written = 0;
+ rb->wraps = 0;
+ rb->last = 0;
+ memset(rb->buf + rb->head, 0x00, SCSC_RINGREC_SZ);
+}
+
+/**
+ * alloc_ring_buffer - Allocates and initializes a basic ring buffer,
+ * including a basic spare area where to handle strings-splitting when
+ * buffer wraps. Basic spinlock/mutex init takes place here too.
+ *
+ * @bsz: the size of the ring buffer to allocate in bytes
+ * @ssz: the size of the spare area to allocate in bytes
+ * @name: a name for this ring buffer
+ */
+struct scsc_ring_buffer __init *alloc_ring_buffer(size_t bsz, size_t ssz,
+ const char *name)
+{
+ struct scsc_ring_buffer *rb = kmalloc(sizeof(*rb), GFP_KERNEL);
+
+ if (!rb)
+ return NULL;
+ rb->bsz = bsz;
+ rb->ssz = ssz;
+#ifndef CONFIG_SCSC_STATIC_RING_SIZE
+ rb->buf = kzalloc(rb->bsz + rb->ssz, GFP_KERNEL);
+ if (!rb->buf) {
+ kfree(rb);
+ return NULL;
+ }
+#else
+ rb->buf = a_ring;
+#endif
+ rb->head = 0;
+ rb->tail = 0;
+ rb->last = 0;
+ rb->written = 0;
+ rb->records = 0;
+ rb->wraps = 0;
+ rb->oos = 0;
+ rb->spare = rb->buf + rb->bsz;
+ memset(rb->name, 0x00, RNAME_SZ);
+ strncpy(rb->name, name, RNAME_SZ - 1);
+ raw_spin_lock_init(&rb->lock);
+ init_waitqueue_head(&rb->wq);
+
+ return rb;
+}
+
+/*
+ * free_ring_buffer - Free the ring what else...
+ * ...does NOT account for spinlocks existence currently
+ *
+ * @rb: a pointer to the ring buffer to free
+ */
+void free_ring_buffer(struct scsc_ring_buffer *rb)
+{
+ if (!rb)
+ return;
+#ifndef CONFIG_SCSC_STATIC_RING_SIZE
+ kfree(rb->buf);
+#endif
+ kfree(rb);
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2016-2017 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+
+#ifndef _SCSC_LOGRING_RING_H_
+#define _SCSC_LOGRING_RING_H_
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/hardirq.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/crc32.h>
+
+#include <scsc/scsc_logring.h>
+
+#include "scsc_logring_common.h"
+
+#define SCSC_BINFO_LEN 32
+#define SCSC_HBUF_LEN 128
+/* A safe size to enforce on ingressing binary blobs; this accounts
+ * for possible binary expansion while reading, in order to fit the reader
+ * DEFAULT_TBUF_SZ in any possible case: this way we avoid to have truncated
+ * data also on read while dumping big binary blobs.
+ */
+#define SCSC_MAX_BIN_BLOB_SZ 1920
+/**
+ * This spare area is used to prepare a logging entry before pushing it into
+ * the ring and so it's the maximum length allowed for a log entry.
+ * When this change (hugely) you should check the size of len field
+ * in the following struct scsc_ring_record.
+ */
+#define BASE_SPARE_SZ 2048
+#define RNAME_SZ 16
+#define DEFAULT_RING_BUFFER_SZ 1048576
+#define DEFAULT_ENABLE_HEADER 1
+#define DEFAULT_ENABLE_LOGRING 1
+/* The default len, in bytes, of the binary blob to decode in ASCII
+ * Human readable form. -1 means DECODE EVERYTHING !
+ */
+#define DEFAULT_BIN_DECODE_LEN -1
+#define DEBUGFS_ROOT "/sys/kernel/debug"
+#define DEBUGFS_RING0_ROOT "ring0"
+
+/**
+ * Our ring buffer is allocated simply as a bunch of contiguos bytes.
+ * Data is stored as a contiguos stream of concatenated records, each one
+ * starting with a record descriptor of type scsc_ring_record: data content
+ * is then appended to the record descriptor; in this way we can account
+ * for different types of content, pushing the TAG describing the content
+ * into the record descriptor itself, being then able to operate differently
+ * on read depending on the type of content.
+ * The tail and head references 'points' to the start of the first (oldest)
+ * and the last (newest) record: any write will happen after the end
+ * of the current head: these references in fact points to the starting byte
+ * of the record descriptor modulus the ring size (they're NOT abosolute
+ * pointers). Since our 'metadata' is embedded inside the ring itself (like
+ * printk does) we never write variable string content in place BUT use
+ * instead the spare area (allocated contiguosly at the end of the ring)
+ * to expand the provided format string and then memcpy the content to
+ * the final position after having properly updated the record descriptors
+ * and eventually moved away the tail when overwriting.
+ * Moreover we never wrap a record across the ring boundary: if there's NOT
+ * enough space at the end of the ring, we simply place it at the start.
+ * Moreover this structs holds a kcache reference to allocate temporary
+ * buffers to use when double buffering is needed, a spinlock_t for
+ * protection and a wait_queue_t for blocking I/O.
+ *
+ * @buf: the ring-buffer itself starts here
+ * @spare: start of spare area (buf[bsz])
+ * @name: a simple named identifier
+ * @bsz: ring size
+ * @ssz: size of spare (fixed at BASE_SPARE_SZ)
+ * @head: newest record written (first byte)...next write after it
+ * @tail: odelst record written (first byte)...full dump read will start
+ * from here
+ * @last: the last record before the end of the ring.
+ * @records: the number of records
+ * @written: a general progressive counter of total bytes written into
+ * the ring
+ * @lock: a spinlock_t to protetc concurrent access
+ * @wq: a wait queue where to put sleeping processes waiting for input.
+ * They're woken up at the end os scsc_printk().
+ * @refc: a reference counter...currently unused.
+ * @private: useful to hold some user provided data (used to hold debugfs
+ * initdata related to this ring)
+ * @kcache: a reference to a kmem_cache created at initialization time
+ * to get fresh temporary buffers on the fly when copying to user and in
+ * need of a double buffer
+ */
+struct scsc_ring_buffer {
+ char *buf;
+ char *spare;
+ char name[RNAME_SZ];
+ size_t bsz;
+ size_t ssz;
+ loff_t head;
+ loff_t tail;
+ loff_t last;
+ int records;
+ int wraps;
+ int oos;
+ u64 written;
+ raw_spinlock_t lock;
+ wait_queue_head_t wq;
+ atomic_t refc;
+ void *private;
+};
+
+/**
+ * Our ring buffer is now built concatenating entries prepended by a record
+ * that describes the content itself. This will allow us to store different
+ * types of data (NOT only string) and to interpret it.
+ * Each record is described by this struct that is laid out in front of the
+ * effective content:
+ *
+ * | SYNC | CRC | tag | len | lev | ctx | core | nsec | <buffer len - - |
+ *
+ * @SYNC: a fixed pattern to search for when re-syncing after a reader
+ * has got lost.
+ * @CRC: CRC32 calculated, using kernel crc32_le, on the whole record header,
+ * taking care to substitute this field with the 32 LSB of this record
+ * relative starting position (relative to the absolute ring buffer
+ * start.
+ * @tag: type of this record...matters expecially to identify binary data
+ * record
+ * @len: this is the length in bytes of buffer. All string content should
+ * be NULL terminated. This length will anyway NEVER exceed
+ * BASE_SPARE_SZ that's currently a few KB.
+ * @lev: the debuglevel associated to this message.
+ * @ctx: the execution context of the logged line:
+ * SoftIRQ / Interrupt / Process
+ * @core: the CPU core id
+ * @nsec: the timestamp in nanoseconds
+ */
+struct scsc_ring_record {
+ u32 sync;
+ u32 crc;
+ u8 tag;
+ u16 len;
+ u8 lev;
+ u8 ctx;
+ u8 core;
+ s64 nsec;
+} __packed; /* should NOT be needed */
+
+#define SYNC_MAGIC 0xDEADBEEF
+/**
+ * Fill a scsc_ring_record descriptor
+ * local_clock() is from the same family of time-func used
+ * by printk returns nanoseconds
+ */
+#define SCSC_FILL_RING_RECORD(r, tag, lev) \
+ do { \
+ (r)->sync = SYNC_MAGIC; \
+ (r)->crc = 0; \
+ (r)->nsec = local_clock(); \
+ (r)->tag = tag; \
+ (r)->len = 0; \
+ (r)->lev = lev; \
+ (r)->ctx = ((in_interrupt()) ? \
+ ((in_softirq()) ? 'S' : 'I') : 'P'); \
+ (r)->core = smp_processor_id(); \
+ } while (0)
+
+
+#define SCSC_RINGREC_SZ (sizeof(struct scsc_ring_record))
+#define SCSC_CRC_RINGREC_SZ (SCSC_RINGREC_SZ - sizeof(SYNC_MAGIC))
+
+#define SCSC_IS_RING_IN_USE(ring) \
+ ((atomic_read(&((struct scsc_ring_buffer *)(ring))->refc)) != 0)
+
+#define SCSC_GET_RING_REFC(ring) \
+ atomic_inc(&((struct scsc_ring_buffer *)(ring))->refc)
+
+#define SCSC_PUT_RING_REFC(ring) \
+ atomic_dec(&((struct scsc_ring_buffer *)(ring))->refc)
+
+
+#define SCSC_GET_REC_BUF(p) (((char *)(p)) + SCSC_RINGREC_SZ)
+
+#define SCSC_GET_REC_LEN(recp) (((struct scsc_ring_record *)(recp))->len)
+
+#define SCSC_GET_REC_TAG(recp) (((struct scsc_ring_record *)(recp))->tag)
+
+#define SCSC_GET_REC_CRC(recp) (((struct scsc_ring_record *)(recp))->crc)
+
+#define SCSC_GET_PTR(ring, pos) ((ring)->buf + (pos))
+
+#define SCSC_GET_REC(ring, pos) \
+ ((struct scsc_ring_record *)(SCSC_GET_PTR((ring), (pos))))
+
+#define SCSC_IS_REC_SYNC_VALID(recp) ((recp)->sync == SYNC_MAGIC)
+
+#define SCSC_GET_HEAD_PTR(ring) SCSC_GET_PTR((ring), (ring)->head)
+
+#define SCSC_GET_NEXT_FREE_SLOT_PTR(ring) \
+ (SCSC_GET_HEAD_PTR((ring)) + SCSC_RINGREC_SZ + \
+ SCSC_GET_REC_LEN(SCSC_GET_HEAD_PTR(ring)))
+
+#define SCSC_GET_SLOT_LEN(ring, pos) \
+ (((SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (pos)))) != 0) ? \
+ (SCSC_RINGREC_SZ + SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (pos)))) : 0)
+
+#define SCSC_GET_NEXT_SLOT_POS(ring, pos) \
+ ((pos) + SCSC_GET_SLOT_LEN((ring), (pos)))
+
+#define SCSC_RING_FREE_BYTES(rb) \
+ (((rb)->head >= (rb)->tail) ? \
+ ((rb)->bsz - SCSC_GET_NEXT_SLOT_POS(rb, rb->head)) : \
+ ((rb)->tail - SCSC_GET_NEXT_SLOT_POS(rb, rb->head)))
+
+#define SCSC_USED_BYTES(rb) ((rb)->bsz - SCSC_RING_FREE_BYTES(rb))
+
+#define SCSC_LOGGED_BYTES(rb) (SCSC_USED_BYTES(rb) - \
+ ((rb)->records * SCSC_RINGREC_SZ))
+
+#define SCSC_GET_NEXT_REC_ENTRY_POS(ring, rpos) \
+ (rpos + SCSC_RINGREC_SZ + \
+ SCSC_GET_REC_LEN(SCSC_GET_PTR((ring), (rpos))))
+
+/* Ring buffer API */
+struct scsc_ring_buffer *alloc_ring_buffer(size_t bsz, size_t ssz,
+ const char *name) __init;
+void free_ring_buffer(struct scsc_ring_buffer *rb);
+void scsc_ring_truncate(struct scsc_ring_buffer *rb);
+int push_record_string(struct scsc_ring_buffer *rb, int tag, int lev,
+ int prepend_header, const char *msg_head, va_list args);
+int push_record_blob(struct scsc_ring_buffer *rb, int tag, int lev,
+ int prepend_header, const void *start, size_t len);
+size_t read_next_records(struct scsc_ring_buffer *rb, int max_recs,
+ loff_t *last_read_rec, void *tbuf, size_t tsz);
+struct scsc_ring_buffer *scsc_ring_get_snapshot(const struct scsc_ring_buffer *rb,
+ void *snap_buf, size_t snap_sz,
+ char *snap_name);
+#endif /* _SCSC_LOGRING_RING_H_ */
--- /dev/null
+/**
+ * Loopback Protocol (Implementation)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <hydra/trace.h>
+
+#include "scsc_loopback.h"
+
+/*****************************************************************************/
+
+/**
+ * Handle data received on port by sending it back.
+ */
+static void scsc_loopback_port_recv(
+ struct scsc_mport *port,
+ const unsigned char *data,
+ size_t count)
+{
+ os_trace_dbg("%s: @%p, count %zu", __func__, port, count);
+
+ scsc_mport_emit(port, data, count);
+}
+
+static const struct scsc_mport_ops scsc_loopback_port_ops = {
+ scsc_loopback_port_recv
+};
+
+/*****************************************************************************/
+
+void scsc_loopback_init(struct scsc_loopback *loopback)
+{
+ os_trace_dbg("%s: @%p", __func__, loopback);
+
+ scsc_mport_init(&loopback->port, &scsc_loopback_port_ops);
+}
+
+void scsc_loopback_deinit(struct scsc_loopback *loopback)
+{
+}
+
+struct scsc_mport *scsc_loopback_get_port(
+ struct scsc_loopback *loopback)
+{
+ return &loopback->port;
+}
--- /dev/null
+#ifndef __HCI_LOOPBACK_H
+#define __HCI_LOOPBACK_H
+/**
+ * Loopback Protocol (Interface)
+ *
+ * Bounces anything send straight back.
+ */
+
+#include "scsc_mport.h"
+
+/*****************************************************************************/
+
+struct scsc_loopback {
+ struct scsc_mport port;
+};
+
+/*****************************************************************************/
+
+void scsc_loopback_init(struct scsc_loopback *loopback);
+void scsc_loopback_deinit(struct scsc_loopback *loopback);
+
+struct scsc_mport *scsc_loopback_get_port(struct scsc_loopback *loopback);
+
+#endif /* __HCI_LOOPBACK_H */
--- /dev/null
+/****************************************************************************
+*
+* Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+*
+****************************************************************************/
+
+#ifndef __SCSC_MIF_ABS_H
+#define __SCSC_MIF_ABS_H
+
+#ifdef CONFIG_SCSC_QOS
+#include <linux/pm_qos.h>
+#endif
+#include <linux/types.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/scsc_mx.h>
+
+struct device;
+
+/* To R4/M4 */
+enum scsc_mif_abs_target {
+ SCSC_MIF_ABS_TARGET_R4 = 0,
+ SCSC_MIF_ABS_TARGET_M4 = 1,
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ SCSC_MIF_ABS_TARGET_M4_1 = 2
+#endif
+};
+
+#ifdef CONFIG_SCSC_SMAPPER
+#define SCSC_MIF_SMAPPER_MAX_BANKS 32
+
+struct scsc_mif_smapper_info {
+ u32 num_entries;
+ u32 mem_range_bytes;
+};
+
+enum scsc_mif_abs_bank_type {
+ SCSC_MIF_ABS_NO_BANK = 0,
+ SCSC_MIF_ABS_SMALL_BANK = 1,
+ SCSC_MIF_ABS_LARGE_BANK = 2
+};
+#endif
+
+#ifdef CONFIG_SCSC_QOS
+struct scsc_mifqos_request {
+ struct pm_qos_request pm_qos_req_mif;
+ struct pm_qos_request pm_qos_req_int;
+ struct pm_qos_request pm_qos_req_cl0;
+ struct pm_qos_request pm_qos_req_cl1;
+};
+#endif
+
+#define SCSC_REG_READ_WLBT_STAT 0
+
+/**
+ * Abstraction of the Maxwell "Memory Interface" aka MIF.
+ *
+ * There will be at least two implementations of this
+ * interface - The native AXI one and a PCIe based emulation.
+ *
+ * A reference to an interface will be passed to the
+ * scsc_mx driver when the system startsup.
+ */
+struct scsc_mif_abs {
+/**
+ * Destroy this interface.
+ *
+ * This should be called when the underlying device is
+ * removed.
+ */
+ void (*destroy)(struct scsc_mif_abs *interface);
+ /* Return an unique id for this host, and prefreabrly identifies specific device (example pcie0, pcie1) */
+ char *(*get_uid)(struct scsc_mif_abs *interface);
+/**
+ * Controls the hardware "reset" state of the Maxwell
+ * subsystem.
+ *
+ * Setting reset=TRUE places the subsystem in its low
+ * power "reset" state. This function is called
+ * by the Maxwell Manager near the end of the subsystem
+ * shutdown process, before "unmapping" the interface.
+ *
+ * Setting reset=FALSE release the subsystem reset state.
+ * The subystem will then start its cold boot sequence. This
+ * function is called
+ * by the Subsystem Manager near the end of the subsystem
+ * startup process after installing the maxwell firmware and
+ * other resources in MIF RAM.
+ */
+ int (*reset)(struct scsc_mif_abs *interface, bool reset);
+/**
+ * This function maps the Maxwell interface hardware (MIF
+ * DRAM) into kernel memory space.
+ *
+ * Amount of memory allocated must be defined and returned
+ * on (*allocated) by the abstraction layer implemenation.
+ *
+ * This returns kernel-space pointer to the start of the
+ * shared MIF DRAM. The Maxwell Manager will load firmware
+ * to this location and configure the MIF Heap Manager to
+ * manage any unused memory at the end of the DRAM region.
+ *
+ * The scsc_mx driver should call this when the Maxwell
+ * subsystem is required by any service client.
+ *
+ * The mailbox, irq and dram functions are only usable once
+ * this call has returned. HERE: Should we rename this to
+ * "open" and return a handle to these conditional methods?
+ */
+ void *(*map)(struct scsc_mif_abs *interface, size_t *allocated);
+/**
+ * The inverse of "map". Should be called once the maxwell
+ * subsystem is no longer required and has been placed into
+ * "reset" state (see reset method).
+ */
+ void (*unmap)(struct scsc_mif_abs *interface, void *mem);
+
+/**
+ * The Mailbox pointer returned can be used for direct access
+ * to the hardware register for efficiency.
+ * The pointer is guaranteed to remain valid between map and unmap calls.
+ * HERE: If we are not assuming AP v R4 same-endianess then this
+ * should be explicitly leu32 or u8[4] (or something equivalent).
+ */
+ u32 *(*get_mbox_ptr)(struct scsc_mif_abs *interface, u32 mbox_index);
+/**
+ * Incoming MIF Interrupt Hardware Controls
+ */
+ /** Get the incoming interrupt source mask */
+ u32 (*irq_bit_mask_status_get)(struct scsc_mif_abs *interface);
+
+ /** Get the incoming interrupt pending (waiting *AND* not masked) mask */
+ u32 (*irq_get)(struct scsc_mif_abs *interface);
+
+ void (*irq_bit_clear)(struct scsc_mif_abs *interface, int bit_num);
+ void (*irq_bit_mask)(struct scsc_mif_abs *interface, int bit_num);
+ void (*irq_bit_unmask)(struct scsc_mif_abs *interface, int bit_num);
+
+/**
+ * Outgoing MIF Interrupt Hardware Controls
+ */
+ void (*irq_bit_set)(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target);
+
+/**
+ * Register handler for the interrupt from the
+ * MIF Interrupt Hardware.
+ *
+ * This is used by the MIF Interrupt Manager to
+ * register a handler that demultiplexes the
+ * individual interrupt sources (MIF Interrupt Bits)
+ * to source-specific handlers.
+ */
+ void (*irq_reg_handler)(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev);
+ void (*irq_unreg_handler)(struct scsc_mif_abs *interface);
+
+ /* Clear HW interrupt line */
+ void (*irq_clear)(void);
+ void (*irq_reg_reset_request_handler)(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev);
+ void (*irq_unreg_reset_request_handler)(struct scsc_mif_abs *interface);
+
+/**
+ * Install suspend/resume handlers for the MIF abstraction driver
+ */
+ void (*suspend_reg_handler)(struct scsc_mif_abs *abs,
+ int (*suspend)(struct scsc_mif_abs *abs, void *data),
+ void (*resume)(struct scsc_mif_abs *abs, void *data),
+ void *data);
+ void (*suspend_unreg_handler)(struct scsc_mif_abs *abs);
+
+/**
+ * Return kernel-space pointer to MIF ram.
+ * The pointer is guaranteed to remain valid between map and unmap calls.
+ */
+ void *(*get_mifram_ptr)(struct scsc_mif_abs *interface, scsc_mifram_ref ref);
+/* Maps kernel-space pointer to MIF RAM to portable reference */
+ int (*get_mifram_ref)(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref);
+
+/* Return physical page frame number corresponding to the physical addres to which
+ * the virtual address is mapped . Needed in mmap file operations*/
+ uintptr_t (*get_mifram_pfn)(struct scsc_mif_abs *interface);
+
+/**
+ * Return physical address from MIF ram address.
+ */
+ void *(*get_mifram_phy_ptr)(struct scsc_mif_abs *interface, scsc_mifram_ref ref);
+/** Return a kernel device associated 1:1 with the Maxwell instance.
+ * This is published only for the purpose of associating service drivers with a Maxwell instance
+ * for logging purposes. Clients should not make any assumptions about the device type.
+ * In some configurations this may be the associated host-interface device (AXI/PCIe),
+ * but this may change in future.
+ */
+ struct device *(*get_mif_device)(struct scsc_mif_abs *interface);
+
+
+ void (*mif_dump_registers)(struct scsc_mif_abs *interface);
+ void (*mif_cleanup)(struct scsc_mif_abs *interface);
+ void (*mif_restart)(struct scsc_mif_abs *interface);
+
+#ifdef CONFIG_SCSC_SMAPPER
+/* SMAPPER */
+ int (*mif_smapper_get_mapping)(struct scsc_mif_abs *interface, u8 *phy_map, u16 *align);
+ int (*mif_smapper_get_bank_info)(struct scsc_mif_abs *interface, u8 bank, struct scsc_mif_smapper_info *bank_info);
+ int (*mif_smapper_write_sram)(struct scsc_mif_abs *interface, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr);
+ void (*mif_smapper_configure)(struct scsc_mif_abs *interface, u32 granularity);
+ u32 (*mif_smapper_get_bank_base_address)(struct scsc_mif_abs *interface, u8 bank);
+#endif
+#ifdef CONFIG_SCSC_QOS
+ int (*mif_pm_qos_add_request)(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config);
+ int (*mif_pm_qos_update_request)(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req, enum scsc_qos_config config);
+ int (*mif_pm_qos_remove_request)(struct scsc_mif_abs *interface, struct scsc_mifqos_request *qos_req);
+#endif
+ int (*mif_read_register)(struct scsc_mif_abs *interface, u64 id, u32 *val);
+};
+
+struct device;
+
+struct scsc_mif_abs_driver {
+ char *name;
+ void (*probe)(struct scsc_mif_abs_driver *abs_driver, struct scsc_mif_abs *abs);
+ void (*remove)(struct scsc_mif_abs *abs);
+};
+
+extern void scsc_mif_abs_register(struct scsc_mif_abs_driver *driver);
+extern void scsc_mif_abs_unregister(struct scsc_mif_abs_driver *driver);
+
+/* mmap-debug driver */
+struct scsc_mif_mmap_driver {
+ char *name;
+ void (*probe)(struct scsc_mif_mmap_driver *mmap_driver, struct scsc_mif_abs *abs);
+ void (*remove)(struct scsc_mif_abs *abs);
+};
+
+extern void scsc_mif_mmap_register(struct scsc_mif_mmap_driver *mmap_driver);
+extern void scsc_mif_mmap_unregister(struct scsc_mif_mmap_driver *mmap_driver);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mx_impl.h"
+#include "mifintrbit.h"
+#include "miframman.h"
+#include "mifmboxman.h"
+#ifdef CONFIG_SCSC_SMAPPER
+#include "mifsmapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include "mifqos.h"
+#endif
+#include "mifproc.h"
+#include "mxman.h"
+#include "mxproc.h"
+#include "srvman.h"
+#include "mxmgmt_transport.h"
+#include "gdb_transport.h"
+#include "mxlog.h"
+#include "mxlogger.h"
+#include "panicmon.h"
+#include "mxlog_transport.h"
+#include "suspendmon.h"
+
+#include "scsc/api/bt_audio.h"
+#include "mxfwconfig.h"
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#endif
+
+struct scsc_mx {
+ struct scsc_mif_abs *mif_abs;
+ struct mifintrbit intr;
+ struct miframman ram;
+ struct miframman ram2;
+ struct mifmboxman mbox;
+ struct mifabox mifabox;
+#ifdef CONFIG_SCSC_SMAPPER
+ struct mifsmapper smapper;
+#endif
+#ifdef CONFIG_SCSC_QOS
+ struct mifqos qos;
+#endif
+ struct mifproc proc;
+ struct mxman mxman;
+ struct srvman srvman;
+ struct mxmgmt_transport mxmgmt_transport;
+ struct gdb_transport gdb_transport_r4;
+ struct gdb_transport gdb_transport_m4;
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+ struct gdb_transport gdb_transport_m4_1;
+#endif
+ int users;
+ struct mxlog mxlog;
+ struct mxlogger mxlogger;
+ struct panicmon panicmon;
+ struct mxlog_transport mxlog_transport;
+ struct suspendmon suspendmon;
+ struct mxfwconfig mxfwconfig;
+};
+
+
+struct scsc_mx *scsc_mx_create(struct scsc_mif_abs *mif)
+{
+ struct scsc_mx *mx;
+
+ mx = kzalloc(sizeof(*mx), GFP_KERNEL);
+ if (!mx)
+ return NULL;
+
+ mx->mif_abs = mif;
+
+ mifintrbit_init(&mx->intr, mif);
+
+ mifmboxman_init(&mx->mbox);
+ suspendmon_init(&mx->suspendmon, mx);
+ mxman_init(&mx->mxman, mx);
+ srvman_init(&mx->srvman, mx);
+ mifproc_create_proc_dir(mx->mif_abs);
+#ifdef CONFIG_SCSC_WLBTD
+ scsc_wlbtd_init();
+#endif
+ SCSC_TAG_DEBUG(MXMAN, "Hurray Maxwell is here with %p\n", mx);
+ return mx;
+}
+
+void scsc_mx_destroy(struct scsc_mx *mx)
+{
+ SCSC_TAG_DEBUG(MXMAN, "\n");
+ BUG_ON(mx == NULL);
+ mifintrbit_deinit(&mx->intr);
+ mifmboxman_deinit(scsc_mx_get_mboxman(mx));
+#ifdef CONFIG_SCSC_SMAPPER
+ mifsmapper_deinit(scsc_mx_get_smapper(mx));
+#endif
+ suspendmon_deinit(scsc_mx_get_suspendmon(mx));
+ mifproc_remove_proc_dir();
+ srvman_deinit(&mx->srvman);
+ mxman_deinit(&mx->mxman);
+#ifdef CONFIG_SCSC_WLBTD
+ scsc_wlbtd_deinit();
+#endif
+ kfree(mx);
+ SCSC_TAG_DEBUG(MXMAN, "OK\n");
+}
+
+struct scsc_mif_abs *scsc_mx_get_mif_abs(struct scsc_mx *mx)
+{
+ return mx->mif_abs;
+}
+
+struct mifintrbit *scsc_mx_get_intrbit(struct scsc_mx *mx)
+{
+ return &mx->intr;
+}
+
+struct miframman *scsc_mx_get_ramman(struct scsc_mx *mx)
+{
+ return &mx->ram;
+}
+
+struct miframman *scsc_mx_get_ramman2(struct scsc_mx *mx)
+{
+ return &mx->ram2;
+}
+
+struct mifabox *scsc_mx_get_aboxram(struct scsc_mx *mx)
+{
+ return &mx->mifabox;
+}
+
+struct mifmboxman *scsc_mx_get_mboxman(struct scsc_mx *mx)
+{
+ return &mx->mbox;
+}
+
+#ifdef CONFIG_SCSC_SMAPPER
+struct mifsmapper *scsc_mx_get_smapper(struct scsc_mx *mx)
+{
+ return &mx->smapper;
+}
+#endif
+
+#ifdef CONFIG_SCSC_QOS
+struct mifqos *scsc_mx_get_qos(struct scsc_mx *mx)
+{
+ return &mx->qos;
+}
+#endif
+
+struct device *scsc_mx_get_device(struct scsc_mx *mx)
+{
+ return mx->mif_abs->get_mif_device(mx->mif_abs);
+}
+EXPORT_SYMBOL_GPL(scsc_mx_get_device); /* TODO: export a top-level API for this */
+
+struct mxman *scsc_mx_get_mxman(struct scsc_mx *mx)
+{
+ return &mx->mxman;
+}
+
+struct srvman *scsc_mx_get_srvman(struct scsc_mx *mx)
+{
+ return &mx->srvman;
+}
+
+struct mxmgmt_transport *scsc_mx_get_mxmgmt_transport(struct scsc_mx *mx)
+{
+ return &mx->mxmgmt_transport;
+}
+
+struct gdb_transport *scsc_mx_get_gdb_transport_r4(struct scsc_mx *mx)
+{
+ return &mx->gdb_transport_r4;
+}
+
+struct gdb_transport *scsc_mx_get_gdb_transport_m4(struct scsc_mx *mx)
+{
+ return &mx->gdb_transport_m4;
+}
+
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+struct gdb_transport *scsc_mx_get_gdb_transport_m4_1(struct scsc_mx *mx)
+{
+ return &mx->gdb_transport_m4_1;
+}
+#endif
+
+struct mxlog *scsc_mx_get_mxlog(struct scsc_mx *mx)
+{
+ return &mx->mxlog;
+}
+
+struct panicmon *scsc_mx_get_panicmon(struct scsc_mx *mx)
+{
+ return &mx->panicmon;
+}
+
+struct mxlog_transport *scsc_mx_get_mxlog_transport(struct scsc_mx *mx)
+{
+ return &mx->mxlog_transport;
+}
+
+struct mxlogger *scsc_mx_get_mxlogger(struct scsc_mx *mx)
+{
+ return &mx->mxlogger;
+}
+
+struct suspendmon *scsc_mx_get_suspendmon(struct scsc_mx *mx)
+{
+ return &mx->suspendmon;
+}
+
+struct mxfwconfig *scsc_mx_get_mxfwconfig(struct scsc_mx *mx)
+{
+ return &mx->mxfwconfig;
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/firmware.h>
+#include "scsc_mif_abs.h"
+
+struct device;
+struct scsc_mx;
+struct mifintrbit;
+struct miframman;
+struct mifmboxman;
+struct mxman;
+struct srvman;
+struct mxmgmt_transport;
+struct mxproc;
+struct mxfwconfig;
+
+struct scsc_mx *scsc_mx_create(struct scsc_mif_abs *mif);
+void scsc_mx_destroy(struct scsc_mx *mx);
+struct scsc_mif_abs *scsc_mx_get_mif_abs(struct scsc_mx *mx);
+struct mifintrbit *scsc_mx_get_intrbit(struct scsc_mx *mx);
+struct mifmuxman *scsc_mx_get_muxman(struct scsc_mx *mx);
+struct miframman *scsc_mx_get_ramman(struct scsc_mx *mx);
+struct miframman *scsc_mx_get_ramman2(struct scsc_mx *mx);
+struct mifabox *scsc_mx_get_aboxram(struct scsc_mx *mx);
+struct mifmboxman *scsc_mx_get_mboxman(struct scsc_mx *mx);
+#ifdef CONFIG_SCSC_SMAPPER
+struct mifsmapper *scsc_mx_get_smapper(struct scsc_mx *mx);
+#endif
+#ifdef CONFIG_SCSC_QOS
+struct mifqos *scsc_mx_get_qos(struct scsc_mx *mx);
+#endif
+struct device *scsc_mx_get_device(struct scsc_mx *mx);
+struct mxman *scsc_mx_get_mxman(struct scsc_mx *mx);
+struct srvman *scsc_mx_get_srvman(struct scsc_mx *mx);
+struct mxproc *scsc_mx_get_mxproc(struct scsc_mx *mx);
+struct mxmgmt_transport *scsc_mx_get_mxmgmt_transport(struct scsc_mx *mx);
+struct gdb_transport *scsc_mx_get_gdb_transport_r4(struct scsc_mx *mx);
+struct gdb_transport *scsc_mx_get_gdb_transport_m4(struct scsc_mx *mx);
+#ifdef CONFIG_SCSC_MX450_GDB_SUPPORT
+struct gdb_transport *scsc_mx_get_gdb_transport_m4_1(struct scsc_mx *mx);
+#endif
+struct mxlog *scsc_mx_get_mxlog(struct scsc_mx *mx);
+struct mxlog_transport *scsc_mx_get_mxlog_transport(struct scsc_mx *mx);
+struct mxlogger *scsc_mx_get_mxlogger(struct scsc_mx *mx);
+struct panicmon *scsc_mx_get_panicmon(struct scsc_mx *mx);
+struct suspendmon *scsc_mx_get_suspendmon(struct scsc_mx *mx);
+struct mxfwconfig *scsc_mx_get_mxfwconfig(struct scsc_mx *mx);
+
+int mx140_file_download_fw(struct scsc_mx *mx, void *dest, size_t dest_size, u32 *fw_image_size);
+int mx140_request_file(struct scsc_mx *mx, char *path, const struct firmware **firmp);
+int mx140_release_file(struct scsc_mx *mx, const struct firmware *firmp);
+int mx140_basedir_file(struct scsc_mx *mx);
+int mx140_exe_path(struct scsc_mx *mx, char *path, size_t len, const char *bin);
+int mx140_file_select_fw(struct scsc_mx *mx, u32 suffix);
+bool mx140_file_supported_hw(struct scsc_mx *mx, u32 hw_ver);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_release.h>
+#include <scsc/scsc_logring.h>
+#include "scsc_mif_abs.h"
+#include "scsc_mx_impl.h"
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#endif
+
+#define SCSC_MX_CORE_MODDESC "mx140 Core Driver"
+
+struct clients_node {
+ struct list_head list;
+ struct scsc_mx_module_client *module_client;
+};
+
+struct mx_node {
+ struct list_head list;
+ struct scsc_mx *mx;
+};
+
+static struct mx_module {
+ struct list_head clients_list;
+ struct list_head mx_list;
+} mx_module = {
+ .clients_list = LIST_HEAD_INIT(mx_module.clients_list),
+ .mx_list = LIST_HEAD_INIT(mx_module.mx_list)
+};
+
+static void scsc_mx_module_probe_registered_clients(struct scsc_mx *new_mx)
+{
+ bool client_registered = false;
+ struct clients_node *client_node, *client_next;
+
+ /* Traverse Linked List for each mif_driver node */
+ list_for_each_entry_safe(client_node, client_next, &mx_module.clients_list, list) {
+ client_node->module_client->probe(client_node->module_client, new_mx, SCSC_MODULE_CLIENT_REASON_HW_PROBE);
+ client_registered = true;
+ }
+ if (client_registered == false)
+ SCSC_TAG_INFO(MXMAN, "No clients registered\n");
+}
+
+static void scsc_mx_module_probe(struct scsc_mif_abs_driver *abs_driver, struct scsc_mif_abs *mif_abs)
+{
+ struct scsc_mx *new_mx;
+ struct mx_node *mx_node;
+
+ /* Avoid unused parm error */
+ (void)abs_driver;
+
+ mx_node = kzalloc(sizeof(*mx_node), GFP_KERNEL);
+ if (!mx_node)
+ return;
+ /* Create new mx instance */
+ new_mx = scsc_mx_create(mif_abs);
+ if (!new_mx) {
+ kfree(mx_node);
+ SCSC_TAG_ERR(MXMAN, "Error allocating new_mx\n");
+ return;
+ }
+ /* Add instance in mx_node linked list */
+ mx_node->mx = new_mx;
+
+ list_add_tail(&mx_node->list, &mx_module.mx_list);
+
+ scsc_mx_module_probe_registered_clients(new_mx);
+}
+
+static void scsc_mx_module_remove(struct scsc_mif_abs *abs)
+{
+ bool match = false;
+ struct mx_node *mx_node, *next;
+
+ /* Traverse Linked List for each mx node */
+ list_for_each_entry_safe(mx_node, next, &mx_module.mx_list, list) {
+ /* If there is a match, call destroy */
+ if (scsc_mx_get_mif_abs(mx_node->mx) == abs) {
+ match = true;
+ scsc_mx_destroy(mx_node->mx);
+ list_del(&mx_node->list);
+ kfree(mx_node);
+ }
+ }
+ if (match == false)
+ SCSC_TAG_ERR(MXMAN, "FATAL, no match for given scsc_mif_abs\n");
+}
+
+static struct scsc_mif_abs_driver mx_module_mif_if = {
+ .name = "mx140 driver",
+ .probe = scsc_mx_module_probe,
+ .remove = scsc_mx_module_remove,
+};
+
+static int __init scsc_mx_module_init(void)
+{
+ SCSC_TAG_INFO(MXMAN, SCSC_MX_CORE_MODDESC " scsc_release %d.%d.%d.%d\n",
+ SCSC_RELEASE_PRODUCT,
+ SCSC_RELEASE_ITERATION,
+ SCSC_RELEASE_CANDIDATE,
+ SCSC_RELEASE_POINT);
+
+ scsc_mif_abs_register(&mx_module_mif_if);
+ return 0;
+}
+
+static void __exit scsc_mx_module_exit(void)
+{
+ struct mx_node *mx_node, *next_mx;
+
+ /* Traverse Linked List for each mx node */
+ list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list) {
+ scsc_mx_destroy(mx_node->mx);
+ list_del(&mx_node->list);
+ kfree(mx_node);
+ }
+
+ scsc_mif_abs_unregister(&mx_module_mif_if);
+
+ SCSC_TAG_INFO(MXMAN, SCSC_MX_CORE_MODDESC " unloaded\n");
+}
+
+/**
+ * Reset all registered service drivers by first calling the remove callback and
+ * then the probe callback. This function is used during recovery operations,
+ * where the chip has been reset as part of the recovery and the service drivers
+ * has to do the same.
+ */
+int scsc_mx_module_reset(void)
+{
+ struct clients_node *clients_node;
+ struct mx_node *mx_node, *next_mx;
+
+ /* Traverse Linked List and call registered removed callbacks */
+ list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list)
+ list_for_each_entry(clients_node, &mx_module.clients_list, list)
+ clients_node->module_client->remove(clients_node->module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_RECOVERY);
+
+ /* Traverse Linked List and call registered probed callbacks */
+ list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list)
+ list_for_each_entry(clients_node, &mx_module.clients_list, list)
+ clients_node->module_client->probe(clients_node->module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_RECOVERY);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_module_reset);
+
+int scsc_mx_module_register_client_module(struct scsc_mx_module_client *module_client)
+{
+ struct clients_node *module_client_node;
+ struct mx_node *mx_node;
+
+ /* Add node in modules linked list */
+ module_client_node = kzalloc(sizeof(*module_client_node), GFP_KERNEL);
+ if (!module_client_node)
+ return -ENOMEM;
+
+ module_client_node->module_client = module_client;
+ list_add_tail(&module_client_node->list, &mx_module.clients_list);
+
+ /* Traverse Linked List for each mx node */
+ list_for_each_entry(mx_node, &mx_module.mx_list, list) {
+ module_client->probe(module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_HW_PROBE);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_module_register_client_module);
+
+void scsc_mx_module_unregister_client_module(struct scsc_mx_module_client *module_client)
+{
+ struct clients_node *client_node, *client_next;
+ struct mx_node *mx_node, *next_mx;
+
+ /* Traverse Linked List for each client_list */
+ list_for_each_entry_safe(client_node, client_next, &mx_module.clients_list, list) {
+ if (client_node->module_client == module_client) {
+ list_for_each_entry_safe(mx_node, next_mx, &mx_module.mx_list, list) {
+ module_client->remove(module_client, mx_node->mx, SCSC_MODULE_CLIENT_REASON_HW_REMOVE);
+ }
+ list_del(&client_node->list);
+ kfree(client_node);
+ }
+ }
+}
+EXPORT_SYMBOL(scsc_mx_module_unregister_client_module);
+
+module_init(scsc_mx_module_init);
+module_exit(scsc_mx_module_exit);
+
+MODULE_DESCRIPTION(SCSC_MX_CORE_MODDESC);
+MODULE_AUTHOR("SCSC");
+MODULE_LICENSE("GPL");
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/firmware.h>
+#ifdef CONFIG_ANDROID
+#include <linux/wakelock.h>
+#endif
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_logring.h>
+
+#include "mxman.h"
+#include "scsc_mx_impl.h"
+#include "mifintrbit.h"
+#include "miframman.h"
+#include "mifmboxman.h"
+#ifdef CONFIG_SCSC_SMAPPER
+#include "mifsmapper.h"
+#endif
+#ifdef CONFIG_SCSC_QOS
+#include "mifqos.h"
+#endif
+#include "mxlogger.h"
+#include "srvman.h"
+#include "servman_messages.h"
+#include "mxmgmt_transport.h"
+
+static ulong sm_completion_timeout_ms = 1000;
+module_param(sm_completion_timeout_ms, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sm_completion_timeout_ms, "Timeout Service Manager start/stop (ms) - default 1000. 0 = infinite");
+
+#define SCSC_MIFRAM_INVALID_REF -1
+#define SCSC_MX_SERVICE_RECOVERY_TIMEOUT 20000 /* 20 seconds */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+#define reinit_completion(completion) INIT_COMPLETION(*(completion))
+#endif
+
+struct scsc_service {
+ struct list_head list;
+ struct scsc_mx *mx;
+ enum scsc_service_id id;
+ struct scsc_service_client *client;
+ struct completion sm_msg_start_completion;
+ struct completion sm_msg_stop_completion;
+};
+
+void srvman_init(struct srvman *srvman, struct scsc_mx *mx)
+{
+ SCSC_TAG_INFO(MXMAN, "\n");
+ srvman->mx = mx;
+ INIT_LIST_HEAD(&srvman->service_list);
+ mutex_init(&srvman->service_list_mutex);
+ mutex_init(&srvman->api_access_mutex);
+
+#ifdef CONFIG_ANDROID
+ wake_lock_init(&srvman->sm_wake_lock, WAKE_LOCK_SUSPEND, "srvman_wakelock");
+#endif
+}
+
+void srvman_deinit(struct srvman *srvman)
+{
+ struct scsc_service *service, *next;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ list_for_each_entry_safe(service, next, &srvman->service_list, list) {
+ list_del(&service->list);
+ kfree(service);
+ }
+ mutex_destroy(&srvman->api_access_mutex);
+ mutex_destroy(&srvman->service_list_mutex);
+
+#ifdef CONFIG_ANDROID
+ wake_lock_destroy(&srvman->sm_wake_lock);
+#endif
+}
+
+void srvman_set_error(struct srvman *srvman)
+{
+ struct scsc_service *service;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ srvman->error = true;
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ complete(&service->sm_msg_start_completion);
+ complete(&service->sm_msg_stop_completion);
+ }
+ mutex_unlock(&srvman->service_list_mutex);
+}
+
+void srvman_clear_error(struct srvman *srvman)
+{
+ SCSC_TAG_INFO(MXMAN, "\n");
+ srvman->error = false;
+}
+
+static int wait_for_sm_msg_start_cfm(struct scsc_service *service)
+{
+ int r;
+
+ if (0 == sm_completion_timeout_ms) {
+ /* Zero implies infinite wait, for development use only.
+ * r = -ERESTARTSYS if interrupted (e.g. Ctrl-C), 0 if completed
+ */
+ r = wait_for_completion_interruptible(&service->sm_msg_start_completion);
+ if (r == -ERESTARTSYS) {
+ /* Paranoid sink of any pending event skipped by the interrupted wait */
+ r = wait_for_completion_timeout(&service->sm_msg_start_completion, HZ / 2);
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+ return r;
+ }
+ r = wait_for_completion_timeout(&service->sm_msg_start_completion, msecs_to_jiffies(sm_completion_timeout_ms));
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "timeout\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int wait_for_sm_msg_stop_cfm(struct scsc_service *service)
+{
+ int r;
+
+ if (0 == sm_completion_timeout_ms) {
+ /* Zero implies infinite wait, for development use only.
+ * r = -ERESTARTSYS if interrupted (e.g. Ctrl-C), 0 if completed
+ */
+ r = wait_for_completion_interruptible(&service->sm_msg_stop_completion);
+ if (r == -ERESTARTSYS) {
+ /* Paranoid sink of any pending event skipped by the interrupted wait */
+ r = wait_for_completion_timeout(&service->sm_msg_stop_completion, HZ / 2);
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+ return r;
+ }
+ r = wait_for_completion_timeout(&service->sm_msg_stop_completion, msecs_to_jiffies(sm_completion_timeout_ms));
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "timeout\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int send_sm_msg_start_blocking(struct scsc_service *service, scsc_mifram_ref ref)
+{
+ struct scsc_mx *mx = service->mx;
+ struct mxmgmt_transport *mxmgmt_transport = scsc_mx_get_mxmgmt_transport(mx);
+ int r;
+ struct sm_msg_packet message = { .service_id = service->id,
+ .msg = SM_MSG_START_REQ,
+ .optional_data = ref };
+
+ reinit_completion(&service->sm_msg_start_completion);
+
+ /* Send to FW in MM stream */
+ mxmgmt_transport_send(mxmgmt_transport, MMTRANS_CHAN_ID_SERVICE_MANAGEMENT, &message, sizeof(message));
+ r = wait_for_sm_msg_start_cfm(service);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "wait_for_sm_msg_start_cfm() failed: r=%d\n", r);
+
+ /* Report the error in order to get a moredump. Avoid auto-recovering this type of failure */
+ if (mxman_recovery_disabled())
+ scsc_mx_service_service_failed(service, "SM_MSG_START_CFM timeout");
+ }
+ return r;
+}
+
+static int send_sm_msg_stop_blocking(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct mxman *mxman = scsc_mx_get_mxman(mx);
+ struct mxmgmt_transport *mxmgmt_transport = scsc_mx_get_mxmgmt_transport(mx);
+ int r;
+ struct sm_msg_packet message = { .service_id = service->id,
+ .msg = SM_MSG_STOP_REQ,
+ .optional_data = 0 };
+
+ if (mxman->mxman_state == MXMAN_STATE_FAILED)
+ return 0;
+
+ reinit_completion(&service->sm_msg_stop_completion);
+
+ /* Send to FW in MM stream */
+ mxmgmt_transport_send(mxmgmt_transport, MMTRANS_CHAN_ID_SERVICE_MANAGEMENT, &message, sizeof(message));
+ r = wait_for_sm_msg_stop_cfm(service);
+ if (r)
+ SCSC_TAG_ERR(MXMAN, "wait_for_sm_msg_stop_cfm() for service=%p service->id=%d failed: r=%d\n", service, service->id, r);
+ return r;
+}
+
+/*
+ * Receive handler for messages from the FW along the maxwell management transport
+ */
+static void srv_message_handler(const void *message, void *data)
+{
+ struct srvman *srvman = (struct srvman *)data;
+ struct scsc_service *service;
+ const struct sm_msg_packet *msg = message;
+ bool found = false;
+
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ if (service->id == msg->service_id) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ SCSC_TAG_ERR(MXMAN, "No service for msg->service_id=%d", msg->service_id);
+ mutex_unlock(&srvman->service_list_mutex);
+ return;
+ }
+ /* Forward the message to the applicable service to deal with */
+ switch (msg->msg) {
+ case SM_MSG_START_CFM:
+ SCSC_TAG_INFO(MXMAN, "Received SM_MSG_START_CFM message service=%p with service_id=%d from the firmware\n",
+ service, msg->service_id);
+ complete(&service->sm_msg_start_completion);
+ break;
+ case SM_MSG_STOP_CFM:
+ SCSC_TAG_INFO(MXMAN, "Received SM_MSG_STOP_CFM message for service=%p with service_id=%d from the firmware\n",
+ service, msg->service_id);
+ complete(&service->sm_msg_stop_completion);
+ break;
+ default:
+ /* HERE: Unknown message, raise fault */
+ SCSC_TAG_WARNING(MXMAN, "Received unknown message for service=%p with service_id=%d from the firmware: msg->msg=%d\n",
+ service, msg->msg, msg->service_id);
+ break;
+ }
+ mutex_unlock(&srvman->service_list_mutex);
+}
+
+int scsc_mx_service_start(struct scsc_service *service, scsc_mifram_ref ref)
+{
+ struct scsc_mx *mx = service->mx;
+ struct srvman *srvman = scsc_mx_get_srvman(mx);
+ struct mxman *mxman = scsc_mx_get_mxman(service->mx);
+ int r;
+ struct timeval tval = {};
+
+ SCSC_TAG_INFO(MXMAN, "%d\n", service->id);
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_run)
+ return 0;
+#endif
+ mutex_lock(&srvman->api_access_mutex);
+#ifdef CONFIG_ANDROID
+ wake_lock(&srvman->sm_wake_lock);
+#endif
+ if (srvman->error) {
+ tval = ns_to_timeval(mxman->last_panic_time);
+ SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure scsc_panic_code=0x%x happened at [%6lu.%06ld]\n",
+ mxman->scsc_panic_code, tval.tv_sec, tval.tv_usec);
+
+ /* Print the last panic record to help track ancient failures */
+ mxman_show_last_panic(mxman);
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return -EILSEQ;
+ }
+
+ r = send_sm_msg_start_blocking(service, ref);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "send_sm_msg_start_blocking() failed: r=%d\n", r);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return r;
+ }
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_service_start);
+
+int scsc_mx_list_services(struct mxman *mxman_p, char *buf, const size_t bufsz)
+{
+ struct scsc_service *service, *next;
+ int pos = 0;
+ struct srvman *srvman_p = scsc_mx_get_srvman(mxman_p->mx);
+
+ list_for_each_entry_safe(service, next, &srvman_p->service_list, list) {
+ switch (service->id) {
+ case SCSC_SERVICE_ID_NULL:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "null");
+ break;
+ case SCSC_SERVICE_ID_WLAN:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "wlan");
+ break;
+ case SCSC_SERVICE_ID_BT:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "bt");
+ break;
+ case SCSC_SERVICE_ID_ANT:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "ant");
+ break;
+ case SCSC_SERVICE_ID_R4DBG:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "r4dbg");
+ break;
+ case SCSC_SERVICE_ID_ECHO:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "echo");
+ break;
+ case SCSC_SERVICE_ID_DBG_SAMPLER:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "dbg sampler");
+ break;
+ case SCSC_SERVICE_ID_CLK20MHZ:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "clk20mhz");
+ break;
+ case SCSC_SERVICE_ID_FM:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "fm");
+ break;
+ case SCSC_SERVICE_ID_INVALID:
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n", "invalid");
+ break;
+ }
+ }
+ return pos;
+}
+EXPORT_SYMBOL(scsc_mx_list_services);
+
+int scsc_mx_service_stop(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct srvman *srvman = scsc_mx_get_srvman(mx);
+ struct mxman *mxman = scsc_mx_get_mxman(service->mx);
+ int r;
+ struct timeval tval = {};
+
+ SCSC_TAG_INFO(MXMAN, "%d\n", service->id);
+#ifdef CONFIG_SCSC_CHV_SUPPORT
+ if (chv_run)
+ return 0;
+#endif
+ mutex_lock(&srvman->api_access_mutex);
+#ifdef CONFIG_ANDROID
+ wake_lock(&srvman->sm_wake_lock);
+#endif
+ if (srvman->error) {
+ tval = ns_to_timeval(mxman->last_panic_time);
+ SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure scsc_panic_code=0x%x happened at [%6lu.%06ld]\n",
+ mxman->scsc_panic_code, tval.tv_sec, tval.tv_usec);
+
+ /* Print the last panic record to help track ancient failures */
+ mxman_show_last_panic(mxman);
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+
+ /* Return a special status to allow caller recovery logic to know
+ * that there will never be a recovery
+ */
+ if (mxman_recovery_disabled()) {
+ SCSC_TAG_ERR(MXMAN, "recovery disabled, return -EPERM (%d)\n", -EPERM);
+ return -EPERM; /* failed due to prior failure, recovery disabled */
+ } else {
+ return -EILSEQ; /* operation rejected due to prior failure */
+ }
+ }
+
+ r = send_sm_msg_stop_blocking(service);
+ if (r) {
+ SCSC_TAG_ERR(MXMAN, "send_sm_msg_stop_blocking() failed: r=%d\n", r);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return -EIO; /* operation failed */
+ }
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_service_stop);
+
+
+/* Returns 0 if Suspend succeeded, otherwise return error */
+int srvman_suspend_services(struct srvman *srvman)
+{
+ int ret = 0;
+ struct scsc_service *service;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ if (service->client->suspend) {
+ ret = service->client->suspend(service->client);
+ /* If any service returns error message and call resume callbacks */
+ if (ret) {
+ list_for_each_entry(service, &srvman->service_list, list) {
+ if (service->client->resume)
+ service->client->resume(service->client);
+ }
+ SCSC_TAG_INFO(MXMAN, "Service client suspend failure ret: %d\n", ret);
+ mutex_unlock(&srvman->service_list_mutex);
+ return ret;
+ }
+ }
+ }
+
+ mutex_unlock(&srvman->service_list_mutex);
+ SCSC_TAG_INFO(MXMAN, "OK\n");
+ return 0;
+}
+
+/* Returns always 0. Extend API and return value if required */
+int srvman_resume_services(struct srvman *srvman)
+{
+ struct scsc_service *service;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ if (service->client->resume)
+ service->client->resume(service->client);
+ }
+
+ mutex_unlock(&srvman->service_list_mutex);
+ SCSC_TAG_INFO(MXMAN, "OK\n");
+
+ return 0;
+}
+
+void srvman_freeze_services(struct srvman *srvman)
+{
+ struct scsc_service *service;
+ struct mxman *mxman = scsc_mx_get_mxman(srvman->mx);
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ service->client->stop_on_failure(service->client);
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ reinit_completion(&mxman->recovery_completion);
+#else
+ INIT_COMPLETION(mxman->recovery_completion);
+#endif
+ mutex_unlock(&srvman->service_list_mutex);
+ SCSC_TAG_INFO(MXMAN, "OK\n");
+}
+
+void srvman_unfreeze_services(struct srvman *srvman, u16 scsc_panic_code)
+{
+ struct scsc_service *service;
+
+ SCSC_TAG_INFO(MXMAN, "\n");
+ mutex_lock(&srvman->service_list_mutex);
+ list_for_each_entry(service, &srvman->service_list, list) {
+ service->client->failure_reset(service->client, scsc_panic_code);
+ }
+ mutex_unlock(&srvman->service_list_mutex);
+ SCSC_TAG_INFO(MXMAN, "OK\n");
+}
+
+/** Signal a failure detected by the Client. This will trigger the systemwide
+ * failure handling procedure: _All_ Clients will be called back via
+ * their stop_on_failure() handler as a side-effect.
+ */
+void scsc_mx_service_service_failed(struct scsc_service *service, const char *reason)
+{
+ struct scsc_mx *mx = service->mx;
+ struct srvman *srvman = scsc_mx_get_srvman(mx);
+ u16 host_panic_code;
+
+ host_panic_code = (SCSC_PANIC_CODE_HOST << 15) | (service->id << SCSC_SYSERR_HOST_SERVICE_SHIFT);
+
+ srvman_set_error(srvman);
+ switch (service->id) {
+ case SCSC_SERVICE_ID_WLAN:
+ SCSC_TAG_INFO(MXMAN, "WLAN: %s\n", ((reason != NULL) ? reason : ""));
+ break;
+ case SCSC_SERVICE_ID_BT:
+ SCSC_TAG_INFO(MXMAN, "BT: %s\n", ((reason != NULL) ? reason : ""));
+ break;
+ default:
+ SCSC_TAG_INFO(MXMAN, "service id %d failed\n", service->id);
+ break;
+
+ }
+
+ SCSC_TAG_INFO(MXMAN, "Reporting host hang code 0x%02x\n", host_panic_code);
+
+ mxman_fail(scsc_mx_get_mxman(mx), host_panic_code, reason);
+}
+EXPORT_SYMBOL(scsc_mx_service_service_failed);
+
+
+int scsc_mx_service_close(struct scsc_service *service)
+{
+ struct mxman *mxman = scsc_mx_get_mxman(service->mx);
+ struct scsc_mx *mx = service->mx;
+ struct srvman *srvman = scsc_mx_get_srvman(mx);
+ bool empty;
+ struct timeval tval = {};
+
+ SCSC_TAG_INFO(MXMAN, "%d\n", service->id);
+
+ mutex_lock(&srvman->api_access_mutex);
+#ifdef CONFIG_ANDROID
+ wake_lock(&srvman->sm_wake_lock);
+#endif
+
+ if (srvman->error) {
+ tval = ns_to_timeval(mxman->last_panic_time);
+ SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure scsc_panic_code=0x%x happened at [%6lu.%06ld]\n",
+ mxman->scsc_panic_code, tval.tv_sec, tval.tv_usec);
+
+ /* Print the last panic record to help track ancient failures */
+ mxman_show_last_panic(mxman);
+
+ mutex_unlock(&srvman->api_access_mutex);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+
+ /* Return a special status when recovery is disabled, to allow
+ * calling recovery logic to be aware that recovery is disabled,
+ * hence not wait for recovery events.
+ */
+ if (mxman_recovery_disabled()) {
+ SCSC_TAG_ERR(MXMAN, "recovery disabled, return -EPERM (%d)\n", -EPERM);
+ return -EPERM; /* rejected due to prior failure, recovery disabled */
+ } else {
+ return -EIO;
+ }
+ }
+
+ /* remove the service from the list and deallocate the service memory */
+ mutex_lock(&srvman->service_list_mutex);
+ list_del(&service->list);
+ empty = list_empty(&srvman->service_list);
+ mutex_unlock(&srvman->service_list_mutex);
+ if (empty) {
+ /* unregister channel handler */
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_SERVICE_MANAGEMENT,
+ NULL, NULL);
+ }
+
+ kfree(service);
+ mxman_close(mxman);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_service_close);
+
+/* Consider move to a public scsc_mx interface */
+struct scsc_service *scsc_mx_service_open(struct scsc_mx *mx, enum scsc_service_id id, struct scsc_service_client *client, int *status)
+{
+ int ret;
+ struct scsc_service *service;
+ struct srvman *srvman = scsc_mx_get_srvman(mx);
+ struct mxman *mxman = scsc_mx_get_mxman(mx);
+ bool empty;
+ struct timeval tval = {};
+
+ SCSC_TAG_INFO(MXMAN, "%d\n", id);
+
+ mutex_lock(&srvman->api_access_mutex);
+#ifdef CONFIG_ANDROID
+ wake_lock(&srvman->sm_wake_lock);
+#endif
+ if (srvman->error) {
+ tval = ns_to_timeval(mxman->last_panic_time);
+ SCSC_TAG_ERR(MXMAN, "error: refused due to previous f/w failure scsc_panic_code=0x%x happened at [%6lu.%06ld]\n",
+ mxman->scsc_panic_code, tval.tv_sec, tval.tv_usec);
+ /* Print the last panic record to help track ancient failures */
+ mxman_show_last_panic(mxman);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ *status = -EILSEQ;
+ return NULL;
+ }
+
+ if (mxman->mxman_state == MXMAN_STATE_FAILED) {
+ int r;
+
+ mutex_unlock(&srvman->api_access_mutex);
+ r = wait_for_completion_timeout(&mxman->recovery_completion,
+ msecs_to_jiffies(SCSC_MX_SERVICE_RECOVERY_TIMEOUT));
+ if (r == 0) {
+ SCSC_TAG_ERR(MXMAN, "Recovery timeout\n");
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ *status = -EIO;
+ return NULL;
+ }
+
+ mutex_lock(&srvman->api_access_mutex);
+ }
+
+ service = kmalloc(sizeof(struct scsc_service), GFP_KERNEL);
+ if (service) {
+ /* MaxwellManager Should allocate Mem and download FW */
+ ret = mxman_open(mxman);
+ if (ret) {
+ kfree(service);
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+ *status = ret;
+ return NULL;
+ }
+ /* Initialise service struct here */
+ service->mx = mx;
+ service->id = id;
+ service->client = client;
+ init_completion(&service->sm_msg_start_completion);
+ init_completion(&service->sm_msg_stop_completion);
+ mutex_lock(&srvman->service_list_mutex);
+ empty = list_empty(&srvman->service_list);
+ mutex_unlock(&srvman->service_list_mutex);
+ if (empty)
+ mxmgmt_transport_register_channel_handler(scsc_mx_get_mxmgmt_transport(mx), MMTRANS_CHAN_ID_SERVICE_MANAGEMENT,
+ &srv_message_handler, srvman);
+ mutex_lock(&srvman->service_list_mutex);
+ list_add_tail(&service->list, &srvman->service_list);
+ mutex_unlock(&srvman->service_list_mutex);
+ } else
+ *status = -ENOMEM;
+
+#ifdef CONFIG_ANDROID
+ wake_unlock(&srvman->sm_wake_lock);
+#endif
+ mutex_unlock(&srvman->api_access_mutex);
+
+ return service;
+}
+EXPORT_SYMBOL(scsc_mx_service_open);
+
+struct scsc_bt_audio_abox *scsc_mx_service_get_bt_audio_abox(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct mifabox *ptr;
+
+ ptr = scsc_mx_get_aboxram(mx);
+
+ return ptr->aboxram;
+}
+
+struct mifabox *scsc_mx_service_get_aboxram(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct mifabox *ptr;
+
+ ptr = scsc_mx_get_aboxram(mx);
+
+ return ptr;
+}
+
+/**
+ * Allocate a contiguous block of SDRAM accessible to Client Driver
+ *
+ * When allocation fails, beside returning -ENOMEM, the IN-param 'ref'
+ * is cleared to an INVALID value that can be safely fed to the companion
+ * function scsc_mx_service_mifram_free().
+ */
+int scsc_mx_service_mifram_alloc_extended(struct scsc_service *service, size_t nbytes, scsc_mifram_ref *ref, u32 align, uint32_t flags)
+{
+ struct scsc_mx *mx = service->mx;
+ void *mem;
+ int ret;
+ struct miframman *ramman;
+
+ if (flags & MIFRAMMAN_MEM_POOL_GENERIC) {
+ ramman = scsc_mx_get_ramman(mx);
+ } else if (flags & MIFRAMMAN_MEM_POOL_LOGGING) {
+ ramman = scsc_mx_get_ramman2(mx);
+ } else {
+ SCSC_TAG_ERR(MXMAN, "Unsupported flags value: %d", flags);
+ *ref = SCSC_MIFRAM_INVALID_REF;
+ return -ENOMEM;
+ }
+
+ mem = miframman_alloc(ramman, nbytes, align, service->id);
+ if (!mem) {
+ SCSC_TAG_ERR(MXMAN, "miframman_alloc() failed\n");
+ *ref = SCSC_MIFRAM_INVALID_REF;
+ return -ENOMEM;
+ }
+
+ SCSC_TAG_DEBUG(MXMAN, "Allocated mem %p\n", mem);
+
+ /* Transform native pointer and get mifram_ref type */
+ ret = scsc_mx_service_mif_ptr_to_addr(service, mem, ref);
+ if (ret) {
+ SCSC_TAG_ERR(MXMAN, "scsc_mx_service_mif_ptr_to_addr() failed: ret=%d", ret);
+ miframman_free(ramman, mem);
+ *ref = SCSC_MIFRAM_INVALID_REF;
+ } else {
+ SCSC_TAG_DEBUG(MXMAN, "mem %p ref %d\n", mem, *ref);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(scsc_mx_service_mifram_alloc_extended);
+
+int scsc_mx_service_mifram_alloc(struct scsc_service *service, size_t nbytes, scsc_mifram_ref *ref, u32 align)
+{
+ return scsc_mx_service_mifram_alloc_extended(service, nbytes, ref, align, MIFRAMMAN_MEM_POOL_GENERIC);
+}
+EXPORT_SYMBOL(scsc_mx_service_mifram_alloc);
+
+/** Free a contiguous block of SDRAM */
+void scsc_mx_service_mifram_free_extended(struct scsc_service *service, scsc_mifram_ref ref, uint32_t flags)
+{
+ struct scsc_mx *mx = service->mx;
+ void *mem;
+ struct miframman *ramman;
+
+ if (flags & MIFRAMMAN_MEM_POOL_GENERIC) {
+ ramman = scsc_mx_get_ramman(mx);
+ } else if (flags & MIFRAMMAN_MEM_POOL_LOGGING) {
+ ramman = scsc_mx_get_ramman2(mx);
+ } else {
+ SCSC_TAG_ERR(MXMAN, "Unsupported flags value: %d", flags);
+ return;
+ }
+
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+
+ SCSC_TAG_DEBUG(MXMAN, "**** Freeing %p\n", mem);
+
+ miframman_free(ramman, mem);
+}
+EXPORT_SYMBOL(scsc_mx_service_mifram_free_extended);
+
+void scsc_mx_service_mifram_free(struct scsc_service *service, scsc_mifram_ref ref)
+{
+ scsc_mx_service_mifram_free_extended(service, ref, MIFRAMMAN_MEM_POOL_GENERIC);
+}
+EXPORT_SYMBOL(scsc_mx_service_mifram_free);
+
+/* MIF ALLOCATIONS */
+bool scsc_mx_service_alloc_mboxes(struct scsc_service *service, int n, int *first_mbox_index)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifmboxman_alloc_mboxes(scsc_mx_get_mboxman(mx), n, first_mbox_index);
+}
+EXPORT_SYMBOL(scsc_mx_service_alloc_mboxes);
+
+void scsc_service_free_mboxes(struct scsc_service *service, int n, int first_mbox_index)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifmboxman_free_mboxes(scsc_mx_get_mboxman(mx), first_mbox_index, n);
+}
+EXPORT_SYMBOL(scsc_service_free_mboxes);
+
+u32 *scsc_mx_service_get_mbox_ptr(struct scsc_service *service, int mbox_index)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mifmboxman_get_mbox_ptr(scsc_mx_get_mboxman(mx), mif_abs, mbox_index);
+}
+EXPORT_SYMBOL(scsc_mx_service_get_mbox_ptr);
+
+int scsc_service_mifintrbit_bit_mask_status_get(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_bit_mask_status_get(mif_abs);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_bit_mask_status_get);
+
+int scsc_service_mifintrbit_get(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_get(mif_abs);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_get);
+
+void scsc_service_mifintrbit_bit_set(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_bit_set(mif_abs, which_bit, (enum scsc_mif_abs_target)dir);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_bit_set);
+
+void scsc_service_mifintrbit_bit_clear(struct scsc_service *service, int which_bit)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_bit_clear(mif_abs, which_bit);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_bit_clear);
+
+void scsc_service_mifintrbit_bit_mask(struct scsc_service *service, int which_bit)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_bit_mask(mif_abs, which_bit);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_bit_mask);
+
+void scsc_service_mifintrbit_bit_unmask(struct scsc_service *service, int which_bit)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->irq_bit_unmask(mif_abs, which_bit);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_bit_unmask);
+
+int scsc_service_mifintrbit_alloc_fromhost(struct scsc_service *service, enum scsc_mifintr_target dir)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifintrbit_alloc_fromhost(scsc_mx_get_intrbit(mx), (enum scsc_mif_abs_target)dir);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_alloc_fromhost);
+
+int scsc_service_mifintrbit_free_fromhost(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifintrbit_free_fromhost(scsc_mx_get_intrbit(mx), which_bit, (enum scsc_mif_abs_target)dir);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_free_fromhost);
+
+int scsc_service_mifintrbit_register_tohost(struct scsc_service *service, void (*handler)(int irq, void *data), void *data)
+{
+ struct scsc_mx *mx = service->mx;
+
+ SCSC_TAG_DEBUG(MXMAN, "Registering %pS\n", handler);
+
+ return mifintrbit_alloc_tohost(scsc_mx_get_intrbit(mx), handler, data);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_register_tohost);
+
+int scsc_service_mifintrbit_unregister_tohost(struct scsc_service *service, int which_bit)
+{
+ struct scsc_mx *mx = service->mx;
+
+ SCSC_TAG_DEBUG(MXMAN, "Deregistering int for bit %d\n", which_bit);
+ return mifintrbit_free_tohost(scsc_mx_get_intrbit(mx), which_bit);
+}
+EXPORT_SYMBOL(scsc_service_mifintrbit_unregister_tohost);
+
+void *scsc_mx_service_mif_addr_to_ptr(struct scsc_service *service, scsc_mifram_ref ref)
+{
+ struct scsc_mx *mx = service->mx;
+
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ return mif_abs->get_mifram_ptr(mif_abs, ref);
+}
+EXPORT_SYMBOL(scsc_mx_service_mif_addr_to_ptr);
+
+void *scsc_mx_service_mif_addr_to_phys(struct scsc_service *service, scsc_mifram_ref ref)
+{
+ struct scsc_mx *mx = service->mx;
+
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ if (mif_abs->get_mifram_phy_ptr)
+ return mif_abs->get_mifram_phy_ptr(mif_abs, ref);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(scsc_mx_service_mif_addr_to_phys);
+
+int scsc_mx_service_mif_ptr_to_addr(struct scsc_service *service, void *mem_ptr, scsc_mifram_ref *ref)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ /* Transform native pointer and get mifram_ref type */
+ if (mif_abs->get_mifram_ref(mif_abs, mem_ptr, ref)) {
+ SCSC_TAG_ERR(MXMAN, "ooops somethig went wrong");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_service_mif_ptr_to_addr);
+
+int scsc_mx_service_mif_dump_registers(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+ struct scsc_mif_abs *mif_abs;
+
+ mif_abs = scsc_mx_get_mif_abs(mx);
+
+ /* Dump registers */
+ mif_abs->mif_dump_registers(mif_abs);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_mx_service_mif_dump_registers);
+
+struct device *scsc_service_get_device(struct scsc_service *service)
+{
+ return scsc_mx_get_device(service->mx);
+}
+EXPORT_SYMBOL(scsc_service_get_device);
+
+struct device *scsc_service_get_device_by_mx(struct scsc_mx *mx)
+{
+ return scsc_mx_get_device(mx);
+}
+EXPORT_SYMBOL(scsc_service_get_device_by_mx);
+
+/* Force a FW panic for test purposes only */
+int scsc_service_force_panic(struct scsc_service *service)
+{
+ struct mxman *mxman = scsc_mx_get_mxman(service->mx);
+
+ SCSC_TAG_INFO(MXMAN, "%d\n", service->id);
+
+ return mxman_force_panic(mxman);
+}
+EXPORT_SYMBOL(scsc_service_force_panic);
+
+#ifdef CONFIG_SCSC_SMAPPER
+u16 scsc_service_get_alignment(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_get_alignment(scsc_mx_get_smapper(mx));
+}
+
+int scsc_service_mifsmapper_alloc_bank(struct scsc_service *service, bool large_bank, u32 entry_size, u16 *entries)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_alloc_bank(scsc_mx_get_smapper(mx), large_bank, entry_size, entries);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_alloc_bank);
+
+void scsc_service_mifsmapper_configure(struct scsc_service *service, u32 granularity)
+{
+ struct scsc_mx *mx = service->mx;
+
+ mifsmapper_configure(scsc_mx_get_smapper(mx), granularity);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_configure);
+
+int scsc_service_mifsmapper_write_sram(struct scsc_service *service, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_write_sram(scsc_mx_get_smapper(mx), bank, num_entries, first_entry, addr);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_write_sram);
+
+int scsc_service_mifsmapper_get_entries(struct scsc_service *service, u8 bank, u8 num_entries, u8 *entries)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_get_entries(scsc_mx_get_smapper(mx), bank, num_entries, entries);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_get_entries);
+
+int scsc_service_mifsmapper_free_entries(struct scsc_service *service, u8 bank, u8 num_entries, u8 *entries)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_free_entries(scsc_mx_get_smapper(mx), bank, num_entries, entries);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_free_entries);
+
+int scsc_service_mifsmapper_free_bank(struct scsc_service *service, u8 bank)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_free_bank(scsc_mx_get_smapper(mx), bank);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_free_bank);
+
+u32 scsc_service_mifsmapper_get_bank_base_address(struct scsc_service *service, u8 bank)
+{
+ struct scsc_mx *mx = service->mx;
+
+ return mifsmapper_get_bank_base_address(scsc_mx_get_smapper(mx), bank);
+}
+EXPORT_SYMBOL(scsc_service_mifsmapper_get_bank_base_address);
+#endif
+
+#ifdef CONFIG_SCSC_QOS
+int scsc_service_pm_qos_add_request(struct scsc_service *service, enum scsc_qos_config config)
+{
+ struct scsc_mx *mx = service->mx;
+
+ mifqos_add_request(scsc_mx_get_qos(mx), service->id, config);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_service_pm_qos_add_request);
+
+int scsc_service_pm_qos_update_request(struct scsc_service *service, enum scsc_qos_config config)
+{
+ struct scsc_mx *mx = service->mx;
+
+ mifqos_update_request(scsc_mx_get_qos(mx), service->id, config);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_service_pm_qos_update_request);
+
+int scsc_service_pm_qos_remove_request(struct scsc_service *service)
+{
+ struct scsc_mx *mx = service->mx;
+
+ if (!mx)
+ return -EIO;
+
+ mifqos_remove_request(scsc_mx_get_qos(mx), service->id);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsc_service_pm_qos_remove_request);
+#endif
+#ifdef CONFIG_SCSC_MXLOGGER
+/* If there is no service/mxman associated, register the observer as global (will affect all the mx instanes)*/
+/* Users of these functions should ensure that the registers/unregister functions are balanced (i.e. if observer is registed as global,
+ * it _has_ to unregister as global) */
+int scsc_service_register_observer(struct scsc_service *service, char *name)
+{
+ struct scsc_mx *mx;
+
+ if (!service)
+ return mxlogger_register_global_observer(name);
+
+ mx = service->mx;
+
+ if (!mx)
+ return -EIO;
+
+ return mxlogger_register_observer(scsc_mx_get_mxlogger(mx), name);
+}
+EXPORT_SYMBOL(scsc_service_register_observer);
+
+/* If there is no service/mxman associated, unregister the observer as global (will affect all the mx instanes)*/
+int scsc_service_unregister_observer(struct scsc_service *service, char *name)
+{
+ struct scsc_mx *mx;
+
+ if (!service)
+ return mxlogger_unregister_global_observer(name);
+
+ mx = service->mx;
+
+ if (!mx)
+ return -EIO;
+
+ return mxlogger_unregister_observer(scsc_mx_get_mxlogger(mx), name);
+}
+EXPORT_SYMBOL(scsc_service_unregister_observer);
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef _SCSC_WIFILOGGER_H_
+#define _SCSC_WIFILOGGER_H_
+/**
+ * Internal Reference docs for WiFi-Logger subsystem
+ *
+ * SC-507043-SW -- Android Wi-Fi Logger architecture
+ * SC-507780-DD -- Android Enhanced Logging
+ * WiFiLogger Core Driver Requirements and Design
+ *
+ * This is the CONSUMER API as implemented by scsc_wifilogger driver:
+ * the framework and WiFi HAL are the final consumer of WiFi logger provided
+ * data but this API is directly used by our driver NetLink layer to
+ * configure and start/stop Android Enhanced Logging - WiFi Logger.
+ *
+ * Workflow is as follows:
+ *
+ * - framework invokes wifi_logger.h exported methods implemented by WiFi HAL
+ * - WiFi HAL wifi_logger module translates wifi_logger.h requests into
+ * NetLink vendor messages dispatched to out driver
+ * - our SCSC netlink layer driver translates back NetLink received messages
+ * into invokations of methods exported by this driver into the current
+ * header file
+ * - this driver, manages all the basic ring operations, providing:
+ * + this consumer API used to configure and start/stop the data-consuming
+ * reader-process that pushes data up to the framework through the NetLink
+ * channel
+ * + a producer API that will be used to push data/record into the rings
+ * + all the machinery needed to create and manage multiple rings
+ *
+ * As a consequence this file's types and methods definitions closely resembles
+ * the interface and types defined in:
+ *
+ * hardware/libhardware_legacy/include/hardware_legacy/wifi_logger.h
+ * hardware/libhardware_legacy/include/hardware_legacy/wifi_hal.h
+ *
+ * Some function arguments, deemed un-needed in the driver layer, have been
+ * removed from function prototypes, and all the names have been prefixed
+ * with "scsc_".
+ *
+ * Types' definitions are splitted into scsc_wifilogger_types.h dedicated
+ * header since they will be used also by core implementation.
+ *
+ */
+#include "scsc_wifilogger_types.h"
+
+/**
+ * API to collect a firmware version string.
+ * - Caller is responsible to allocate / free a buffer to retrieve firmware
+ * version info.
+ * - Max string will be at most 256 bytes.
+ */
+wifi_error scsc_wifi_get_firmware_version(char *buffer, int buffer_size);
+
+/**
+ * API to collect a driver version string.
+ * - Caller is responsible to allocate / free a buffer to retrieve driver
+ * version info.
+ * - Max string will be at most 256 bytes.
+ */
+wifi_error scsc_wifi_get_driver_version(char *buffer, int buffer_size);
+
+/**
+ * API to get the status of all ring buffers supported by driver.
+ * - Caller is responsible to allocate / free ring buffer status.
+ * - Maximum no of ring buffer would be 10.
+ */
+wifi_error scsc_wifi_get_ring_buffers_status(u32 *num_rings,
+ struct scsc_wifi_ring_buffer_status *status);
+
+/**
+ * API to retrieve the current supportive features.
+ * - An integer variable is enough to have bit mapping info by caller.
+ */
+wifi_error scsc_wifi_get_logger_supported_feature_set(unsigned int *support);
+
+
+/**
+ * API to set/reset the log handler for getting ring data
+ * - Only a single instance of log handler can be instantiated for each
+ * ring buffer.
+ */
+wifi_error scsc_wifi_set_log_handler(on_ring_buffer_data handler, void *ctx);
+wifi_error scsc_wifi_reset_log_handler(void);
+
+/**
+ * API to set/reset the alert handler for the alert case in Wi-Fi Chip
+ * - Only a single instance of alert handler can be instantiated.
+ */
+wifi_error scsc_wifi_set_alert_handler(on_alert handler, void *ctx);
+wifi_error scsc_wifi_reset_alert_handler(void);
+
+/* API for framework to indicate driver has to upload and drain all data
+ * of a given ring
+ */
+wifi_error scsc_wifi_get_ring_data(char *ring_name);
+
+/**
+ * API to trigger the debug collection.
+ * Unless his API is invoked - logging is not triggered.
+ * - Verbose_level 0 corresponds to no collection,
+ * and it makes log handler stop by no more events from driver.
+ * - Verbose_level 1 correspond to normal log level, with minimal user impact.
+ * This is the default value.
+ * - Verbose_level 2 are enabled when user is lazily trying to reproduce
+ * a problem, wifi performances and power can be impacted but device should
+ * not otherwise be significantly impacted.
+ * - Verbose_level 3+ are used when trying to actively debug a problem.
+ *
+ * ring_name represent the name of the ring for which data
+ * collection shall start.
+ *
+ * flags: TBD parameter used to enable/disable specific events
+ * on a ring
+ * max_interval: maximum interval in seconds for driver to
+ * invoke on_ring_buffer_data,
+ * ignore if zero
+ * min_data_size: minimum data size in buffer for driver to
+ * invoke on_ring_buffer_data,
+ * ignore if zero
+ */
+wifi_error scsc_wifi_start_logging(u32 verbose_level, u32 flags, u32 max_interval_sec,
+ u32 min_data_size, char *ring_name);
+
+/**
+ * API to collect a firmware memory dump for a given iface by async memdump event.
+ * - Triggered by Alerthandler, esp. when FW problem or FW health check happens
+ * - Caller is responsible to store fw dump data into a local,
+ * e.g., /data/misc/wifi/alertdump-1.bin
+ */
+wifi_error scsc_wifi_get_firmware_memory_dump(on_firmware_memory_dump handler, void *ctx);
+
+/**
+ * API to collect driver state.
+ *
+ * Framework will call this API soon before or after (but not
+ * concurrently with) wifi_get_firmware_memory_dump(). Capturing
+ * firmware and driver dumps is intended to help identify
+ * inconsistent state between these components.
+ *
+ * - In response to this call, HAL implementation should make one or
+ * more calls to callbacks.on_driver_memory_dump(). Framework will
+ * copy data out of the received |buffer|s, and concatenate the
+ * contents thereof.
+ * - HAL implemention will indicate completion of the driver memory
+ * dump by returning from this call.
+ */
+wifi_error scsc_wifi_get_driver_memory_dump(on_driver_memory_dump handler, void *ctx);
+
+/**
+ * API to start packet fate monitoring.
+ * - Once stared, monitoring should remain active until HAL is unloaded.
+ * - When HAL is unloaded, all packet fate buffers should be cleared.
+ */
+wifi_error scsc_wifi_start_pkt_fate_monitoring(void);
+
+/**
+ * API to retrieve fates of outbound packets.
+ * - HAL implementation should fill |tx_report_bufs| with fates of
+ * _first_ min(n_requested_fates, actual packets) frames
+ * transmitted for the most recent association. The fate reports
+ * should follow the same order as their respective packets.
+ * - HAL implementation may choose (but is not required) to include
+ * reports for management frames.
+ * - Packets reported by firmware, but not recognized by driver,
+ * should be included. However, the ordering of the corresponding
+ * reports is at the discretion of HAL implementation.
+ * - Framework may call this API multiple times for the same association.
+ * - Framework will ensure |n_requested_fates <= MAX_FATE_LOG_LEN|.
+ * - Framework will allocate and free the referenced storage.
+ */
+wifi_error scsc_wifi_get_tx_pkt_fates(wifi_tx_report *tx_report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates);
+
+/**
+ * API to retrieve fates of inbound packets.
+ * - HAL implementation should fill |rx_report_bufs| with fates of
+ * _first_ min(n_requested_fates, actual packets) frames
+ * received for the most recent association. The fate reports
+ * should follow the same order as their respective packets.
+ * - HAL implementation may choose (but is not required) to include
+ * reports for management frames.
+ * - Packets reported by firmware, but not recognized by driver,
+ * should be included. However, the ordering of the corresponding
+ * reports is at the discretion of HAL implementation.
+ * - Framework may call this API multiple times for the same association.
+ * - Framework will ensure |n_requested_fates <= MAX_FATE_LOG_LEN|.
+ * - Framework will allocate and free the referenced storage.
+ */
+wifi_error scsc_wifi_get_rx_pkt_fates(wifi_rx_report *rx_report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates);
+
+#endif /*_SCSC_WIFILOGGER_H_*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger.h"
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger_internal.h"
+#include "scsc_wifilogger_ring_pktfate.h"
+
+wifi_error scsc_wifi_get_firmware_version(char *buffer, int buffer_size)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ mxman_get_fw_version(buffer, buffer_size);
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_firmware_version);
+
+wifi_error scsc_wifi_get_driver_version(char *buffer, int buffer_size)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ mxman_get_driver_version(buffer, buffer_size);
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_driver_version);
+
+wifi_error scsc_wifi_get_ring_buffers_status(u32 *num_rings,
+ struct scsc_wifi_ring_buffer_status *status)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ if (!scsc_wifilogger_get_rings_status(num_rings, status))
+ return WIFI_ERROR_UNINITIALIZED;
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_ring_buffers_status);
+
+wifi_error scsc_wifi_get_logger_supported_feature_set(unsigned int *support)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ if (!support)
+ return WIFI_ERROR_INVALID_ARGS;
+ *support = scsc_wifilogger_get_features();
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_logger_supported_feature_set);
+
+wifi_error scsc_wifi_set_log_handler(on_ring_buffer_data handler, void *ctx)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot register log_handler on UNINITIALIZED WiFi Logger.\n");
+ return WIFI_ERROR_UNINITIALIZED;
+ }
+ if (!handler) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot register NULL log_handler for WiFi Logger.\n");
+ return WIFI_ERROR_INVALID_ARGS;
+ }
+
+ mutex_lock(&wl->lock);
+ if (wl->on_ring_buffer_data_cb) {
+ SCSC_TAG_ERR(WLOG,
+ "Log handler already registered...request ignored.\n");
+ mutex_unlock(&wl->lock);
+ return WIFI_SUCCESS;
+ }
+ wl->on_ring_buffer_data_cb = handler;
+ wl->on_ring_buffer_ctx = ctx;
+ mutex_unlock(&wl->lock);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_set_log_handler);
+
+wifi_error scsc_wifi_reset_log_handler(void)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot reset log_handler on UNINITIALIZED WiFi Logger.\n");
+ return WIFI_ERROR_UNINITIALIZED;
+ }
+ mutex_lock(&wl->lock);
+ wl->on_ring_buffer_data_cb = NULL;
+ wl->on_ring_buffer_ctx = NULL;
+ mutex_unlock(&wl->lock);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_reset_log_handler);
+
+wifi_error scsc_wifi_set_alert_handler(on_alert handler, void *ctx)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot register alert_handler on UNINITIALIZED WiFi Logger.\n");
+ return WIFI_ERROR_UNINITIALIZED;
+ }
+ if (!handler) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot register NULL alert_handler for WiFi Logger.\n");
+ return WIFI_ERROR_INVALID_ARGS;
+ }
+
+ mutex_lock(&wl->lock);
+ if (wl->on_alert_cb) {
+ SCSC_TAG_ERR(WLOG,
+ "Alert handler already registered...request ignored.\n");
+ mutex_unlock(&wl->lock);
+ return WIFI_SUCCESS;
+ }
+ wl->on_alert_cb = handler;
+ wl->on_alert_ctx = ctx;
+ mutex_unlock(&wl->lock);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_set_alert_handler);
+
+wifi_error scsc_wifi_reset_alert_handler(void)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot reset alert_handler on UNINITIALIZED WiFi Logger.\n");
+ return WIFI_ERROR_UNINITIALIZED;
+ }
+ mutex_lock(&wl->lock);
+ wl->on_alert_cb = NULL;
+ wl->on_alert_ctx = NULL;
+ mutex_unlock(&wl->lock);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_reset_alert_handler);
+
+wifi_error scsc_wifi_get_ring_data(char *ring_name)
+{
+ struct scsc_wlog_ring *r;
+ struct scsc_wifi_logger *wl = NULL;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "Cannot drain ring %s on UNINITIALIZED WiFi Logger.\n",
+ ring_name);
+ return WIFI_ERROR_UNINITIALIZED;
+ }
+
+ mutex_lock(&wl->lock);
+ if (!wl->on_ring_buffer_data_cb)
+ SCSC_TAG_WARNING(WLOG,
+ "NO log-handler registered. Discarding data while draining ring: %s\n",
+ ring_name);
+ mutex_unlock(&wl->lock);
+
+ r = scsc_wifilogger_get_ring_from_name(ring_name);
+ if (!r) {
+ SCSC_TAG_ERR(WLOG,
+ "Ring %s NOT found. Cannot drain.\n",
+ ring_name);
+ return WIFI_ERROR_NOT_AVAILABLE;
+ }
+
+ scsc_wlog_drain_whole_ring(r);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_ring_data);
+
+wifi_error scsc_wifi_start_logging(u32 verbose_level, u32 flags, u32 max_interval_sec,
+ u32 min_data_size, char *ring_name)
+{
+ struct scsc_wlog_ring *r;
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ r = scsc_wifilogger_get_ring_from_name(ring_name);
+ if (!r) {
+ SCSC_TAG_ERR(WLOG,
+ "Ring %s NOT found. Cannot start logging\n",
+ ring_name);
+ return WIFI_ERROR_NOT_AVAILABLE;
+ }
+
+ return scsc_wlog_start_logging(r, verbose_level, flags,
+ max_interval_sec, min_data_size);
+}
+EXPORT_SYMBOL(scsc_wifi_start_logging);
+
+wifi_error scsc_wifi_get_firmware_memory_dump(on_firmware_memory_dump handler, void *ctx)
+{
+ char buf[] = "Full FW memory dump NOT available.\n";
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ handler(buf, sizeof(buf), ctx);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_firmware_memory_dump);
+
+wifi_error scsc_wifi_get_driver_memory_dump(on_driver_memory_dump handler, void *ctx)
+{
+ char buf[] = "Full DRIVER memory dump NOT available.\n";
+
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ handler(buf, sizeof(buf), ctx);
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_driver_memory_dump);
+
+wifi_error scsc_wifi_start_pkt_fate_monitoring(void)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ scsc_wifilogger_ring_pktfate_start_monitoring();
+
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_start_pkt_fate_monitoring);
+
+wifi_error scsc_wifi_get_tx_pkt_fates(wifi_tx_report *tx_report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ scsc_wifilogger_ring_pktfate_get_fates(TX_FATE, tx_report_bufs,
+ n_requested_fates, n_provided_fates);
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_tx_pkt_fates);
+
+wifi_error scsc_wifi_get_rx_pkt_fates(wifi_rx_report *rx_report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates)
+{
+ SCSC_TAG_DEBUG(WLOG, "\n");
+ scsc_wifilogger_ring_pktfate_get_fates(RX_FATE, rx_report_bufs,
+ n_requested_fates, n_provided_fates);
+ return WIFI_SUCCESS;
+}
+EXPORT_SYMBOL(scsc_wifi_get_rx_pkt_fates);
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+/* Uses */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <scsc/scsc_logring.h>
+
+/* Implements */
+#include "scsc_wifilogger_core.h"
+
+static atomic_t next_ring_id;
+
+static void wlog_drain_worker(struct work_struct *work)
+{
+ struct scsc_wlog_ring *r;
+
+ r = container_of(work, struct scsc_wlog_ring, drain_work);
+
+ if (r && r->ops.drain_ring)
+ r->ops.drain_ring(r, r->flushing ? r->st.rb_byte_size : DEFAULT_DRAIN_CHUNK_SZ(r));
+}
+
+static void drain_timer_callback(unsigned long data)
+{
+ struct scsc_wlog_ring *r = (struct scsc_wlog_ring *)data;
+
+ SCSC_TAG_DBG4(WLOG, "TIMER DRAIN : %p\n", r);
+ /* we should kick the workqueue here...no sleep */
+ queue_work(r->drain_workq, &r->drain_work);
+
+ if (r->st.verbose_level && r->max_interval_sec) {
+ mod_timer(&r->drain_timer,
+ jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
+ SCSC_TAG_DBG4(WLOG, "TIMER RELOADED !!!\n");
+ }
+}
+
+static int wlog_ring_init(struct scsc_wlog_ring *r)
+{
+ /* Allocate buffer and spare area */
+ r->buf = kzalloc(r->st.rb_byte_size + MAX_RECORD_SZ, GFP_KERNEL);
+ if (!r->buf)
+ return -ENOMEM;
+ r->drain_sz = DRAIN_BUF_SZ;
+ r->drain_buf = kzalloc(r->drain_sz, GFP_KERNEL);
+ if (!r->drain_buf) {
+ kfree(r->buf);
+ return -ENOMEM;
+ }
+ mutex_init(&r->drain_lock);
+
+ r->drain_workq = create_workqueue("wifilogger");
+ INIT_WORK(&r->drain_work, wlog_drain_worker);
+ setup_timer(&r->drain_timer, drain_timer_callback, (unsigned long)r);
+
+ r->st.ring_id = atomic_read(&next_ring_id);
+ atomic_inc(&next_ring_id);
+
+ SCSC_TAG_DBG3(WLOG, "Workers initialized for ring[%p]: %s\n",
+ r, r->st.name);
+
+ return 0;
+}
+
+static void wlog_ring_finalize(struct scsc_wlog_ring *r)
+{
+ if (!r)
+ return;
+
+ cancel_work_sync(&r->drain_work);
+ del_timer_sync(&r->drain_timer);
+ destroy_workqueue(r->drain_workq);
+
+ r->initialized = false;
+ kfree(r->drain_buf);
+ kfree(r->buf);
+ r->buf = NULL;
+}
+
+static wifi_error wlog_get_ring_status(struct scsc_wlog_ring *r,
+ struct scsc_wifi_ring_buffer_status *status)
+{
+ if (!r || !status)
+ return WIFI_ERROR_INVALID_ARGS;
+ //TODO locking SRCU ?
+ *status = r->st;
+
+ return WIFI_SUCCESS;
+}
+
+static int wlog_read_records(struct scsc_wlog_ring *r, u8 *buf,
+ size_t blen, u32 *records,
+ struct scsc_wifi_ring_buffer_status *status)
+{
+ u16 read_bytes = 0, rec_sz = 0;
+ u32 got_records = 0, req_records = -1;
+
+ if (scsc_wlog_ring_is_flushing(r))
+ return 0;
+
+ /**
+ * req_records has been loaded with a max u32 value by default
+ * on purpose...if a max number of records is provided in records
+ * update req_records accordingly
+ */
+ if (records)
+ req_records = *records;
+ /**
+ * We have ONLY ONE READER at any time that consumes data, impersonated
+ * here by the drain_ring drainer callback, whose read-ops are ensured
+ * atomic by the drain_lock mutex: this will guard against races
+ * between the periodic-drain worker and the threshold-drain procedure
+ * triggered by the write itself.
+ *
+ * But we want also to guard against any direct read_record invokation
+ * like in test rings via debugfs so we add a read spinlock: this last
+ * won't lead to any contention here anyway most of the time in a
+ * real scenario so the same reason we don't need either any irqsave
+ * spinlock version....so latency also is not impacted.
+ */
+ raw_spin_lock(&r->rlock);
+ while (!scsc_wlog_is_ring_empty(r) && got_records < req_records) {
+ rec_sz = REC_SZ(r, RPOS(r));
+ if (read_bytes + rec_sz > blen)
+ break;
+ /**
+ * Rollover is transparent on read...last written material in
+ * spare is still there...
+ */
+ memcpy(buf + read_bytes, REC_START(r, RPOS(r)), rec_sz);
+ read_bytes += rec_sz;
+ r->st.read_bytes += rec_sz;
+ got_records++;
+ }
+ if (status)
+ *status = r->st;
+ raw_spin_unlock(&r->rlock);
+
+ if (records)
+ *records = got_records;
+ SCSC_TAG_DBG4(WLOG, "BytesRead:%d -- RecordsRead:%d\n",
+ read_bytes, got_records);
+
+ return read_bytes;
+}
+
+static int wlog_default_ring_drainer(struct scsc_wlog_ring *r, size_t drain_sz)
+{
+ int rval = 0, drained_bytes = 0;
+ size_t chunk_sz = drain_sz <= r->drain_sz ? drain_sz : r->drain_sz;
+ struct scsc_wifi_ring_buffer_status ring_status = {};
+
+ /* An SRCU on callback here would better */
+ mutex_lock(&r->drain_lock);
+ do {
+ /* drain ... consumes data */
+ rval = r->ops.read_records(r, r->drain_buf, chunk_sz, NULL, &ring_status);
+ /* and push...if any callback defined */
+ if (!r->flushing) {
+ mutex_lock(&r->wl->lock);
+ if (rval > 0 && r->wl->on_ring_buffer_data_cb) {
+ SCSC_TAG_DEBUG(WLOG,
+ "Invoking registered log_handler:%p to drain %d bytes\n",
+ r->wl->on_ring_buffer_data_cb, rval);
+ r->wl->on_ring_buffer_data_cb(r->st.name, r->drain_buf, rval,
+ &ring_status, r->wl->on_ring_buffer_ctx);
+ SCSC_TAG_DBG4(WLOG, "Callback processed %d bytes\n", rval);
+ }
+ mutex_unlock(&r->wl->lock);
+ }
+ drained_bytes += rval;
+ } while (rval && drained_bytes <= drain_sz);
+ SCSC_TAG_DBG3(WLOG, "%s %d bytes\n", (r->flushing) ? "Flushed" : "Drained",
+ drained_bytes);
+
+ /* Execute flush if required... */
+ if (r->flushing) {
+ unsigned long flags;
+
+ /* Inhibit writers momentarily */
+ raw_spin_lock_irqsave(&r->wlock, flags);
+ r->dropped = 0;
+ r->st.written_records = 0;
+ r->st.read_bytes = r->st.written_bytes = 0;
+ r->flushing = false;
+ raw_spin_unlock_irqrestore(&r->wlock, flags);
+ SCSC_TAG_INFO(WLOG, "Ring '%s' flushed.\n", r->st.name);
+ }
+ mutex_unlock(&r->drain_lock);
+
+ return drained_bytes;
+}
+
+/**
+ * A generic write that takes care to build the final payload created
+ * concatenating:
+ * - the common record-header
+ * - an optionally provided ring_hdr
+ * - the provided payload buf
+ *
+ * The optional header is passed down as a separate parameters to avoid
+ * unnecessary intermediate copies: this function will copy all the bits
+ * in place directly into the proper calculated ring position.
+ *
+ * By design a read-end-point is always provided by the framework
+ * (in terms of netlink channels towards the WiFi-HAL) so we spawn a
+ * configurable reader-worker upon start of logging, and the same reader
+ * is also invoked when ring is running out of space: for these reasons
+ * the ring is meant NOT to overwrite itself ever.
+ *
+ * If NO periodic reader is spawned NOR a min_data_size threshold was
+ * specified to force kick the periodic drainer, we could just end-up
+ * filling up the ring: in that case we just drop and account for it.
+ *
+ * Data is drained and pushed periodically upstream using the
+ * on_ring_buffer_data_cb if any provided and periodic drain was
+ * configured.
+ *
+ * @r: the referenced ring
+ * @buf: payload
+ * @blen: payload_sz
+ * @ring_hdr: upper-layer-record-header
+ * @hlen: upper-layer-record-header length
+ * @verbose_level: loglevel for this message (to be checked against)
+ * @timestamp: a providewd timestamp (if any). If zero a timestamp will be
+ * calculated.
+ *
+ * Final injected record will be composed as follows:
+ *
+ * |common_hdr|ring_hdr|buf|
+ *
+ * where the common header is compued and filled in by this function, and the
+ * provided additional upper-layer header ring_hdr could be not provided.
+ *
+ * THIS BASIC RING OPERATION IS THE WORKHORSE USED BY THE PRODUCER API IMPLEMENTED
+ * BY REAL RINGS, AND AS SUCH COULD BE INVOKED FROM ANY CONTEXTS...SO IT MUST NOT SLEEP.
+ */
+static int wlog_write_record(struct scsc_wlog_ring *r, u8 *buf, size_t blen,
+ void *ring_hdr, size_t hlen, u32 verbose_level, u64 timestamp)
+{
+ u8 *start = NULL;
+ u16 chunk_sz;
+ unsigned long flags;
+
+ if (scsc_wlog_ring_is_flushing(r))
+ return 0;
+
+ /* Just drop messages above configured verbose level. 0 is disabled */
+ if (!scsc_wlog_is_message_allowed(r, verbose_level))
+ return 0;
+
+ //TODO Account for missing timestamp
+ chunk_sz = sizeof(struct scsc_wifi_ring_buffer_entry) + hlen + blen;
+ if (chunk_sz > MAX_RECORD_SZ) {
+ SCSC_TAG_WARNING(WLOG, "Dropping record exceeding %d bytes\n",
+ chunk_sz);
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&r->wlock, flags);
+ /**
+ * Are there enough data to drain ?
+ * if so...drain...queueing work....
+ * if not (min_data_size == 0) just do nothing
+ */
+ if (!r->drop_on_full && r->min_data_size &&
+ AVAIL_BYTES(r) >= r->min_data_size)
+ queue_work(r->drain_workq, &r->drain_work);
+ /**
+ * If no min_data_size was specified, NOR a periodic read-worker
+ * was configured (i.e. max_interval_sec == 0), we could end up
+ * filling up the ring...in that case just drop...accounting for it.
+ *
+ * This is the case when packet_fate rings fills up...
+ */
+ if (!CAN_FIT(r, chunk_sz)) {
+ SCSC_TAG_DBG4(WLOG, "[%s]:: dropped %zd bytes\n",
+ r->st.name, blen + hlen);
+ r->dropped += blen + hlen;
+ raw_spin_unlock_irqrestore(&r->wlock, flags);
+ return 0;
+ }
+
+ start = REC_START(r, WPOS(r));
+ REC_HEADER_FILL(start, hlen + blen, timestamp, (u8)r->st.flags, r->type);
+ start += sizeof(struct scsc_wifi_ring_buffer_entry);
+ if (hlen) {
+ memcpy(start, ring_hdr, hlen);
+ start += hlen;
+ }
+ if (blen)
+ memcpy(start, buf, blen);
+ /* Account for rollover using spare area at end of ring... */
+ if (start + blen > BUF_END(r))
+ memcpy(BUF_START(r), BUF_END(r), start + blen - BUF_END(r));
+ r->st.written_bytes += chunk_sz;
+ r->st.written_records++;
+ raw_spin_unlock_irqrestore(&r->wlock, flags);
+
+ return chunk_sz;
+}
+
+static int wlog_default_ring_config_change(struct scsc_wlog_ring *r,
+ u32 verbose_level, u32 flags,
+ u32 max_interval_sec,
+ u32 min_data_size)
+{
+ u32 old_interval_sec;
+
+ SCSC_TAG_DEBUG(WLOG, "Ring: %s -- configuration change.\n",
+ r->st.name);
+
+ r->min_data_size = min_data_size;
+ old_interval_sec = r->max_interval_sec;
+ r->max_interval_sec = max_interval_sec;
+
+ if (r->state == RING_STATE_SUSPEND && r->st.verbose_level) {
+ /* Restarting timeri where required ...
+ * it will take care to queue_work back.
+ */
+ if (r->max_interval_sec)
+ mod_timer(&r->drain_timer,
+ jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
+ r->state = RING_STATE_ACTIVE;
+ SCSC_TAG_INFO(WLOG, "ACTIVATED ring: %s\n", r->st.name);
+ } else if (r->state == RING_STATE_ACTIVE && !r->st.verbose_level) {
+ /* Stop timer, cancel pending work */
+ del_timer_sync(&r->drain_timer);
+ cancel_work_sync(&r->drain_work);
+ r->state = RING_STATE_SUSPEND;
+ SCSC_TAG_INFO(WLOG, "SUSPENDED ring: %s\n", r->st.name);
+ } else if (r->state == RING_STATE_ACTIVE) {
+ if (old_interval_sec != r->max_interval_sec) {
+ if (!r->max_interval_sec)
+ del_timer_sync(&r->drain_timer);
+ else
+ mod_timer(&r->drain_timer,
+ jiffies + msecs_to_jiffies(r->max_interval_sec * 1000));
+ }
+ SCSC_TAG_INFO(WLOG, "RECONFIGURED ring: %s\n", r->st.name);
+ }
+
+ return 0;
+}
+
+static wifi_error wlog_start_logging(struct scsc_wlog_ring *r,
+ u32 verbose_level, u32 flags,
+ u32 max_interval_sec,
+ u32 min_data_size)
+{
+ if (!r)
+ return WIFI_ERROR_INVALID_ARGS;
+
+ scsc_wlog_ring_change_verbosity(r, verbose_level);
+ wlog_default_ring_config_change(r, verbose_level, flags,
+ max_interval_sec, min_data_size);
+
+ return WIFI_SUCCESS;
+}
+
+static struct scsc_wlog_ring_ops default_ring_ops = {
+ .init = NULL,
+ .finalize = NULL,
+ .get_ring_status = wlog_get_ring_status,
+ .read_records = wlog_read_records,
+ .write_record = wlog_write_record,
+ .loglevel_change = NULL,
+ .drain_ring = wlog_default_ring_drainer,
+ .start_logging = wlog_start_logging,
+};
+
+void scsc_wlog_ring_destroy(struct scsc_wlog_ring *r)
+{
+ if (!r || r->registered) {
+ SCSC_TAG_ERR(WLOG, "Cannot destroy ring r:%p\n", r);
+ return;
+ }
+ /* If initialized call custom finalizer at first..reverse order */
+ if (r->initialized && r->ops.finalize)
+ r->ops.finalize(r);
+ wlog_ring_finalize(r);
+ kfree(r);
+}
+
+struct scsc_wlog_ring *scsc_wlog_ring_create(char *ring_name, u32 flags,
+ u8 type, u32 size,
+ unsigned int features_mask,
+ init_cb init, finalize_cb fini,
+ void *priv)
+{
+ struct scsc_wlog_ring *r = NULL;
+
+ WARN_ON(!ring_name || !size);
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return r;
+ r->type = type;
+ r->st.flags = flags;
+ r->st.rb_byte_size = size;
+ if (snprintf(r->st.name, RING_NAME_SZ - 1, "%s", ring_name) >= RING_NAME_SZ)
+ SCSC_TAG_WARNING(WLOG, "Ring name too long...truncated to: %s\n",
+ r->st.name);
+ /* Setup defaults and configure init finalize if any provided */
+ memcpy(&r->ops, &default_ring_ops, sizeof(struct scsc_wlog_ring_ops));
+ r->ops.init = init;
+ r->ops.finalize = fini;
+ r->priv = priv;
+ /* Basic common initialization is called first */
+ if (wlog_ring_init(r)) {
+ SCSC_TAG_ERR(WLOG,
+ "Wi-Fi Logger Ring %s basic initialization failed.\n",
+ r->st.name);
+ kfree(r);
+ return NULL;
+ }
+ if (r->ops.init) {
+ if (r->ops.init(r)) {
+ SCSC_TAG_DBG4(WLOG,
+ "Ring %s custom init completed\n",
+ r->st.name);
+ } else {
+ SCSC_TAG_ERR(WLOG,
+ "Ring %s custom init FAILED !\n",
+ r->st.name);
+ scsc_wlog_ring_destroy(r);
+ return NULL;
+ }
+ }
+ r->features_mask = features_mask;
+ raw_spin_lock_init(&r->rlock);
+ raw_spin_lock_init(&r->wlock);
+ r->initialized = true;
+ SCSC_TAG_DEBUG(WLOG, "Ring '%s' initialized.\n", r->st.name);
+
+ return r;
+}
+
+int scsc_wlog_register_loglevel_change_cb(struct scsc_wlog_ring *r,
+ int (*callback)(struct scsc_wlog_ring *r, u32 new_loglevel))
+{
+ if (!callback)
+ r->ops.loglevel_change = NULL;
+ else
+ r->ops.loglevel_change = callback;
+
+ return 0;
+}
+
+int scsc_wlog_drain_whole_ring(struct scsc_wlog_ring *r)
+{
+ SCSC_TAG_INFO(WLOG, "Draining whole ring %s\n", r->st.name);
+ return r->ops.drain_ring(r, r->st.rb_byte_size);
+}
+
+void scsc_wlog_flush_ring(struct scsc_wlog_ring *r)
+{
+ r->flushing = true;
+ /* kick the workq...which will take care of flushing */
+ queue_work(r->drain_workq, &r->drain_work);
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef _SCSC_WIFILOGGER_CORE_H_
+#define _SCSC_WIFILOGGER_CORE_H_
+/**
+ * @file
+ *
+ * Implements a basic ring abstraction to be used as a common foundation
+ * upon which all the Wi-Fi Logger real rings are built.
+ *
+ * It will provide:
+ *
+ * - the basic record header common to all rings' flavours as defined in
+ * scsc_wifilogger_types.h::struct scsc_wifi_ring_buffer_entry
+ *
+ * | entry | flags | type | timestamp |
+ *
+ * - a set of common basic rings' methods: default_ring_ops
+ *
+ * - a common periodic worker used to periodically drain the rings using
+ * one of the above operations (when configured to do so)
+ *
+ * General Ring Architecture
+ * -------------------------
+ * The ring is constituted by a buffer of contiguos memory of specified
+ * size followed by a spare area of MAX_RECORD_SZ; this latter area is used
+ * when a record would not fit the physical end of the ring buffer and would
+ * be going to wrap around: in a such a case we simply write down the record
+ * content and let it spill over into the spare area; we'll then take care
+ * to copy the overflown part from the spare area into the start of the
+ * physical buffer. For this reason a limit of MAX_RECORD_SZ length is
+ * enforced on write.
+ *
+ * Ring status is maintained inside <struct scsc_wifi_ring_buffer_status> that
+ * is a well known and defined structure defined in scsc_wifilogger_types.h;
+ * such structure format is expected by Framework itself when it queries for
+ * ring status using Wi-Fi HAL.
+ * Such structure contains also the @read_bytes and @written_bytes counters
+ * needed for all the ring mechanics based on modulo-ring-size aritmethic.
+ * Modulo arithmethic is achieved without using the % operator itself so ring
+ * is expected to be of power-of-two size.
+ *
+ * Ring's basic operations are defined as follows:
+ * - MULTIPLE concurrent writers are expected (and handled)
+ *
+ * - Only ONE active reader is expected at any time: such a reader could act
+ * out of the periodic reader worker or triggered by a write operation.
+ *
+ * - each ring read-behavior is configured by two params:
+ * + min_data_size: the minimum amount of available data that should trigger
+ * a read. Ignored if zero.
+ * + max_interval_sec: periodic-reader interval in seconds. Ignored if zero.
+ *
+ * NOTE THAT if both the above params are configured as zero, no periodic or
+ * threshold reading process will be performed and, in absence of any kind of
+ * polling-read mechanism, the ring would finally FILL-UP: in such a case all
+ * the data received once the ring is full will be DROPPED.
+ * This behavior fits the pkt_fate use case scenario.
+ */
+
+#include "scsc_wifilogger_types.h"
+#include "scsc_wifilogger_internal.h"
+
+#define DRAIN_BUF_SZ 4096
+
+#define BUF_SZ(r) ((r)->st.rb_byte_size)
+#define BUF_START(r) ((r)->buf)
+#define BUF_END(r) ((r)->buf + BUF_SZ(r))
+#define AVAIL_BYTES(r) ((r)->st.written_bytes - (r)->st.read_bytes)
+
+/**
+ * Avoid % when calculating ring-relative position
+ * Ring SIZE MUST BE A POWER OF TWO....currently is enforced in
+ * WiFi-Logger code since there's no way (API) to set the rings' sizes.
+ */
+#define RPOS(r) \
+ ((r)->st.read_bytes & (BUF_SZ(r) - 1))
+
+#define WPOS(r) \
+ ((r)->st.written_bytes & (BUF_SZ(r) - 1))
+
+#define WPOS_INC(r, bytes) \
+ (((r)->st.written_bytes + (bytes)) & (BUF_SZ(r) - 1))
+
+#define REC_PAYLOAD_SZ(r, pos) \
+ (*((u16 *)((r)->buf + (pos))))
+
+#define REC_SZ(r, pos) \
+ (REC_PAYLOAD_SZ(r, pos) + sizeof(struct scsc_wifi_ring_buffer_entry))
+
+#define REC_START(r, pos) ((r)->buf + (pos))
+
+#define IS_EMPTY(r) \
+ ((r)->st.read_bytes == (r)->st.written_bytes)
+
+#define CAN_FIT(r, bytes) \
+ (bytes < BUF_SZ(r) && \
+ (IS_EMPTY(r) || \
+ (WPOS(r) < RPOS(r) && WPOS(r) + (bytes) < RPOS(r)) || \
+ (WPOS(r) > RPOS(r) && (WPOS(r) + (bytes) < BUF_SZ(r) || WPOS_INC(r, bytes) < RPOS(r)))))
+
+#define REC_HEADER_FILL(ptr, payload_sz, rtimestamp, rflags, rtype) \
+ do { \
+ struct scsc_wifi_ring_buffer_entry *h = \
+ (struct scsc_wifi_ring_buffer_entry *)(ptr); \
+ \
+ h->entry_size = (payload_sz); \
+ h->flags |= (rflags) | RING_BUFFER_ENTRY_FLAGS_HAS_TIMESTAMP; \
+ h->type = (rtype); \
+ h->timestamp = (rtimestamp) ?: local_clock();\
+ } while (0)
+
+#define MINIMUM_DRAIN_CHUNK_BYTES 1024
+#define DEFAULT_DRAIN_CHUNK_SZ(r) ((r)->st.rb_byte_size / 2)
+#define FORCE_DRAIN_CHUNK_SZ(r) ((r)->st.rb_byte_size / 4)
+#define MAX_RECORD_SZ 8192
+
+enum {
+ RING_STATE_SUSPEND,
+ RING_STATE_ACTIVE
+};
+
+struct scsc_wlog_ring;
+
+typedef bool (*init_cb)(struct scsc_wlog_ring *r);
+typedef bool (*finalize_cb)(struct scsc_wlog_ring *r);
+
+struct scsc_wlog_ring_ops {
+ init_cb init;
+ finalize_cb finalize;
+
+ wifi_error (*get_ring_status)(struct scsc_wlog_ring *r,
+ struct scsc_wifi_ring_buffer_status *status);
+ int (*read_records)(struct scsc_wlog_ring *r, u8 *buf, size_t blen,
+ u32 *records, struct scsc_wifi_ring_buffer_status *status);
+ int (*write_record)(struct scsc_wlog_ring *r, u8 *buf, size_t blen,
+ void *hdr, size_t hlen, u32 verbose_level, u64 timestamp);
+ int (*loglevel_change)(struct scsc_wlog_ring *r, u32 new_loglevel);
+ int (*drain_ring)(struct scsc_wlog_ring *r, size_t drain_sz);
+ wifi_error (*start_logging)(struct scsc_wlog_ring *r, u32 verbose_level,
+ u32 flags, u32 max_interval_sec,
+ u32 min_data_size);
+};
+
+struct scsc_wlog_ring {
+ bool initialized;
+ bool registered;
+ bool flushing;
+ bool drop_on_full;
+ u8 state;
+
+ u8 *buf;
+ unsigned int features_mask;
+ u8 type;
+ u32 min_data_size;
+ u32 max_interval_sec;
+ u32 dropped;
+ u32 *verbosity;
+ raw_spinlock_t rlock, wlock;
+ struct scsc_wifi_ring_buffer_status st;
+
+ u8 *drain_buf;
+ size_t drain_sz;
+ struct mutex drain_lock;
+ struct timer_list drain_timer;
+ struct work_struct drain_work;
+ struct workqueue_struct *drain_workq;
+
+ struct scsc_wlog_ring_ops ops;
+
+ void *priv;
+
+ struct scsc_wifi_logger *wl;
+};
+
+struct scsc_wlog_ring *scsc_wlog_ring_create(char *ring_name, u32 flags,
+ u8 type, u32 size,
+ unsigned int features_mask,
+ init_cb init, finalize_cb fini,
+ void *priv);
+
+void scsc_wlog_ring_destroy(struct scsc_wlog_ring *r);
+
+int scsc_wlog_register_loglevel_change_cb(struct scsc_wlog_ring *r,
+ int (*callback)(struct scsc_wlog_ring *r,
+ u32 new_loglevel));
+
+int scsc_wlog_drain_whole_ring(struct scsc_wlog_ring *r);
+
+static inline bool scsc_wlog_is_ring_empty(struct scsc_wlog_ring *r)
+{
+ return r->st.written_bytes == r->st.read_bytes;
+}
+
+static inline wifi_error scsc_wlog_get_ring_status(struct scsc_wlog_ring *r,
+ struct scsc_wifi_ring_buffer_status *status)
+{
+ if (!r)
+ return WIFI_ERROR_INVALID_ARGS;
+
+ return r->ops.get_ring_status(r, status);
+}
+
+static inline bool scsc_wlog_is_message_allowed(struct scsc_wlog_ring *r, u32 verbose_level)
+{
+ return r->st.verbose_level && verbose_level <= r->st.verbose_level;
+}
+
+static inline int scsc_wlog_read_records(struct scsc_wlog_ring *r, u8 *buf, size_t blen)
+{
+ return r->ops.read_records(r, buf, blen, NULL, NULL);
+}
+
+static inline int scsc_wlog_read_max_records(struct scsc_wlog_ring *r, u8 *buf,
+ size_t blen, u32 *max_records)
+{
+ return r->ops.read_records(r, buf, blen, max_records, NULL);
+}
+
+static inline int scsc_wlog_write_record(struct scsc_wlog_ring *r, u8 *buf, size_t blen,
+ void *hdr, size_t hlen, u32 verbose_level, u64 timestamp)
+{
+ return r->ops.write_record(r, buf, blen, hdr, hlen, verbose_level, timestamp);
+}
+
+static inline wifi_error scsc_wlog_start_logging(struct scsc_wlog_ring *r,
+ u32 verbose_level, u32 flags,
+ u32 max_interval_sec, u32 min_data_size)
+{
+ return r->ops.start_logging(r, verbose_level, flags, max_interval_sec, min_data_size);
+}
+
+static inline void scsc_wlog_ring_set_drop_on_full(struct scsc_wlog_ring *r)
+{
+ r->drop_on_full = true;
+}
+
+static inline void scsc_wlog_register_verbosity_reference(struct scsc_wlog_ring *r, u32 *verbose_ref)
+{
+ r->verbosity = verbose_ref;
+}
+
+static inline bool scsc_wlog_ring_is_flushing(struct scsc_wlog_ring *r)
+{
+ if (!r->flushing)
+ return false;
+
+ SCSC_TAG_DBG4(WLOG, "Ring is flushing..abort pending read/write\n");
+ return true;
+}
+
+static inline void scsc_wlog_ring_change_verbosity(struct scsc_wlog_ring *r, u32 verbose_level)
+{
+ if (r->st.verbose_level != verbose_level) {
+ if (r->ops.loglevel_change)
+ r->ops.loglevel_change(r, verbose_level);
+ r->st.verbose_level = verbose_level;
+ if (r->verbosity)
+ *r->verbosity = r->st.verbose_level;
+ SCSC_TAG_INFO(WLOG, "Ring: %s -- verbose_level changed to: %d\n",
+ r->st.name, r->st.verbose_level);
+ }
+}
+
+void scsc_wlog_flush_ring(struct scsc_wlog_ring *r);
+
+#endif /*_SCSC_WIFI_LOGGER_CORE_H_*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#include <linux/uaccess.h>
+#include "scsc_wifilogger_debugfs.h"
+#include "scsc_wifilogger_ring_pktfate.h"
+
+struct dentry *scsc_wlog_debugfs_global_root;
+
+int dfs_open(struct inode *ino, struct file *filp)
+{
+ if (!filp->private_data) {
+ filp->private_data = ino->i_private;
+ if (!filp->private_data)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int dfs_release(struct inode *ino, struct file *filp)
+{
+ return 0;
+}
+
+#define SCSC_RING_TEST_STAT_SZ 512
+
+static ssize_t dfs_stats_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ int slen = 0;
+ char statstr[SCSC_RING_TEST_STAT_SZ] = {};
+ struct scsc_ring_test_object *rto = filp->private_data;
+
+ slen = snprintf(statstr, SCSC_RING_TEST_STAT_SZ,
+ "[%s]:: len:%d state:%d verbose:%d min_data_size:%d max_interval_sec:%d drop_on_full:%d\n"
+ "\tunread:%d written:%d read:%d written_records:%d dropped:%d buf:%p\n",
+ rto->r->st.name, rto->r->st.rb_byte_size, rto->r->state,
+ rto->r->st.verbose_level, rto->r->min_data_size,
+ rto->r->max_interval_sec, rto->r->drop_on_full,
+ rto->r->st.written_bytes - rto->r->st.read_bytes,
+ rto->r->st.written_bytes, rto->r->st.read_bytes,
+ rto->r->st.written_records, rto->r->dropped, rto->r->buf);
+ if (slen >= 0 && *f_pos < slen) {
+ count = (count <= slen - *f_pos) ? count : (slen - *f_pos);
+ if (copy_to_user(ubuf, statstr + *f_pos, count))
+ return -EFAULT;
+ *f_pos += count;
+ } else {
+ count = 0;
+ }
+ return count;
+}
+
+const struct file_operations stats_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_open,
+ .read = dfs_stats_read,
+ .release = dfs_release,
+};
+
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+static int dfs_read_record_open(struct inode *ino, struct file *filp)
+{
+ int ret;
+ struct scsc_ring_test_object *rto;
+
+ ret = dfs_open(ino, filp);
+ if (ret)
+ return ret;
+
+ rto = filp->private_data;
+ if (!mutex_trylock(&rto->readers_lock)) {
+ SCSC_TAG_ERR(WLOG,
+ "Failed to get readers mutex...ONLY one reader allowed !!!\n");
+ dfs_release(ino, filp);
+ return -EPERM;
+ }
+ /* NO Log handler here...only raise verbosity */
+ scsc_wifi_start_logging(1, 0x00, 0, 8192, rto->r->st.name);
+
+ return ret;
+}
+
+static int dfs_read_record_release(struct inode *ino, struct file *filp)
+{
+ struct scsc_ring_test_object *rto = filp->private_data;
+
+ /* Stop logging ... verbosity 0 */
+ scsc_wifi_start_logging(0, 0x00, 0, 8192, rto->r->st.name);
+ mutex_unlock(&rto->readers_lock);
+ SCSC_TAG_DEBUG(WLOG, "Readers mutex released.\n");
+
+ return dfs_release(ino, filp);
+}
+
+static ssize_t dfs_read_record(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ int ret;
+ struct scsc_ring_test_object *rto;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ rto = filp->private_data;
+
+ while (scsc_wlog_is_ring_empty(rto->r)) {
+ if (wait_event_interruptible(rto->rw_wq,
+ !scsc_wlog_is_ring_empty(rto->r)))
+ return -ERESTARTSYS;
+ }
+ ret = scsc_wlog_read_records(rto->r, rto->rbuf, rto->bsz);
+ count = ret <= count ? ret : count;
+ if (copy_to_user(ubuf, rto->rbuf, count))
+ return -EFAULT;
+ *f_pos += count;
+
+ return count;
+}
+
+const struct file_operations read_record_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_read_record_open,
+ .read = dfs_read_record,
+ .release = dfs_read_record_release,
+};
+
+static void on_ring_test_data_cb(char *ring_name, char *buf, int bsz,
+ struct scsc_wifi_ring_buffer_status *status,
+ void *ctx)
+{
+ struct scsc_ring_test_object *a_rto, *head_rto = ctx;
+
+ a_rto = kzalloc(sizeof(*a_rto), GFP_KERNEL);
+ if (!a_rto)
+ return;
+ a_rto->rbuf = kmalloc(bsz, GFP_KERNEL);
+ if (!a_rto->rbuf) {
+ kfree(a_rto);
+ return;
+ }
+ /* copy and pass over into a list to simulate a channel */
+ memcpy(a_rto->rbuf, buf, bsz);
+ a_rto->bsz = bsz;
+ list_add_tail(&a_rto->elem, &head_rto->elem);
+ wake_up_interruptible(&head_rto->drain_wq);
+}
+
+static int dfs_read_open(struct inode *ino, struct file *filp)
+{
+ int ret;
+ struct scsc_ring_test_object *rto;
+
+ ret = dfs_open(ino, filp);
+ if (ret)
+ return ret;
+
+ /* Filp private data NOW contains rto */
+ rto = filp->private_data;
+ if (!mutex_trylock(&rto->readers_lock)) {
+ SCSC_TAG_ERR(WLOG,
+ "Failed to get readers mutex...ONLY one reader allowed !!!\n");
+ dfs_release(ino, filp);
+ return -EPERM;
+ }
+
+ SCSC_TAG_DEBUG(WLOG,
+ "DebugFS Read opened...setting handlers...starting logging on: %s\n",
+ rto->r->st.name);
+ scsc_wifi_set_log_handler(on_ring_test_data_cb, rto);
+ scsc_wifi_start_logging(1, 0x00, 2, 8192, rto->r->st.name);
+
+ return ret;
+}
+
+static ssize_t dfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ size_t ret_count = 0;
+ struct list_head *pos = NULL, *tlist = NULL;
+ struct scsc_ring_test_object *head_rto, *a_rto;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ head_rto = filp->private_data;
+
+ while (list_empty(&head_rto->elem)) {
+ if (wait_event_interruptible(head_rto->drain_wq, !list_empty(&head_rto->elem)))
+ return -ERESTARTSYS;
+ }
+
+ list_for_each_safe(pos, tlist, &head_rto->elem) {
+ a_rto = list_entry(pos, struct scsc_ring_test_object, elem);
+ SCSC_TAG_DEBUG(WLOG, "Processing list item: %p\n", a_rto);
+ if (!a_rto || ret_count + a_rto->bsz >= count) {
+ SCSC_TAG_DEBUG(WLOG, "BREAK OUT on:%p\n", a_rto);
+ list_del(pos);
+ if (a_rto) {
+ kfree(a_rto->rbuf);
+ kfree(a_rto);
+ }
+ break;
+ }
+ if (copy_to_user(ubuf + ret_count, a_rto->rbuf, a_rto->bsz))
+ return -EFAULT;
+ ret_count += a_rto->bsz;
+ list_del(pos);
+ kfree(a_rto->rbuf);
+ kfree(a_rto);
+ }
+ *f_pos += ret_count;
+
+ return count;
+}
+
+static int dfs_read_release(struct inode *ino, struct file *filp)
+{
+ int ret;
+ struct scsc_ring_test_object *head_rto, *a_rto;
+
+ head_rto = filp->private_data;
+ if (head_rto) {
+ struct list_head *pos = NULL, *tlist = NULL;
+
+ list_for_each_safe(pos, tlist, &head_rto->elem) {
+ a_rto = list_entry(pos, struct scsc_ring_test_object, elem);
+ list_del(pos);
+ if (a_rto) {
+ kfree(a_rto->rbuf);
+ kfree(a_rto);
+ a_rto = NULL;
+ }
+ }
+ }
+
+ ret = dfs_read_record_release(ino, filp);
+ scsc_wifi_reset_log_handler();
+
+ return ret;
+}
+
+const struct file_operations read_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_read_open,
+ .read = dfs_read,
+ .release = dfs_read_release,
+};
+
+#define BUF_LEN 16
+static ssize_t dfs_verbose_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ char buf[BUF_LEN] = {};
+ struct scsc_ring_test_object *rto;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ rto = filp->private_data;
+
+ count = snprintf(buf, BUF_LEN, "%d\n", rto->r->st.verbose_level);
+ if (copy_to_user(ubuf, buf, count))
+ return -EFAULT;
+ /* emit EOF after having spitted the value once */
+ count = !*f_pos ? count : 0;
+ *f_pos += count;
+
+ return count;
+}
+
+static ssize_t dfs_verbose_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ char buf[BUF_LEN] = {};
+ size_t written;
+ unsigned long verb;
+ struct scsc_ring_test_object *rto;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ rto = filp->private_data;
+
+ count = count < BUF_LEN ? count : BUF_LEN - 1;
+ if (copy_from_user(buf, ubuf, count))
+ return -EINVAL;
+ if (!kstrtoul((const char *)buf, 10, &verb))
+ scsc_wlog_ring_change_verbosity(rto->r, verb);
+ written = strlen(buf);
+ *f_pos += written;
+
+ SCSC_TAG_DEBUG(WLOG, "Changed verbosity on ring %s to %d\n",
+ rto->r->st.name, rto->r->st.verbose_level);
+
+ switch (rto->r->st.verbose_level) {
+ case 10:
+ SCSC_TAG_DEBUG(WLOG, "Ring '%s' -- RING FULL DRAIN !\n",
+ rto->r->st.name);
+ scsc_wifi_get_ring_data(rto->r->st.name);
+ break;
+ case 20:
+ scsc_wlog_flush_ring(rto->r);
+ SCSC_TAG_DEBUG(WLOG, "Ring '%s' -- RING FLUSH !\n",
+ rto->r->st.name);
+ break;
+ case 30:
+ scsc_wlog_ring_set_drop_on_full(rto->r);
+ SCSC_TAG_DEBUG(WLOG, "Ring '%s' -- RING SET DROP ON FULL !\n",
+ rto->r->st.name);
+ break;
+ case 40:
+ if (rto->r->features_mask & WIFI_LOGGER_PACKET_FATE_SUPPORTED) {
+ scsc_wifilogger_ring_pktfate_start_monitoring();
+ SCSC_TAG_DEBUG(WLOG, "PKTFATE MONITORING STARTED !\n");
+ }
+ break;
+ case 50:
+ {
+ int i, num_rings = 10;
+ struct scsc_wifi_ring_buffer_status status[10];
+
+ scsc_wifilogger_get_rings_status(&num_rings, status);
+ SCSC_TAG_INFO(WLOG, "Returned rings: %d\n", num_rings);
+ for (i = 0; i < num_rings; i++)
+ SCSC_TAG_INFO(WLOG, "Retrieved ring: %s\n", status[i].name);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return written;
+}
+
+const struct file_operations verbosity_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_open,
+ .read = dfs_verbose_read,
+ .write = dfs_verbose_write,
+ .release = dfs_release,
+};
+
+#endif /* CONFIG_SCSC_WIFILOGGER_TEST */
+
+/*** Public ***/
+
+struct scsc_ring_test_object *init_ring_test_object(struct scsc_wlog_ring *r)
+{
+ struct scsc_ring_test_object *rto;
+
+ rto = kzalloc(sizeof(*rto), GFP_KERNEL);
+ if (!rto)
+ return rto;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ rto->bsz = MAX_RECORD_SZ;
+ rto->rbuf = kzalloc(rto->bsz, GFP_KERNEL);
+ if (!rto->rbuf) {
+ kfree(rto);
+ return NULL;
+ }
+ rto->wbuf = kzalloc(rto->bsz, GFP_KERNEL);
+ if (!rto->wbuf) {
+ kfree(rto->rbuf);
+ kfree(rto);
+ return NULL;
+ }
+ /* used by on_ring_data_cb simulation test */
+ INIT_LIST_HEAD(&rto->elem);
+ init_waitqueue_head(&rto->drain_wq);
+ init_waitqueue_head(&rto->rw_wq);
+ mutex_init(&rto->readers_lock);
+#endif
+ rto->r = r;
+
+ return rto;
+}
+
+void *scsc_wlog_register_debugfs_entry(const char *ring_name,
+ const char *fname,
+ const struct file_operations *fops,
+ void *rto,
+ struct scsc_wlog_debugfs_info *di)
+{
+ if (!ring_name || !fname || !fops || !rto || !di)
+ return NULL;
+
+ /* create root debugfs dirs if they not already exists */
+ if (!di->rootdir) {
+ if (scsc_wlog_debugfs_global_root) {
+ di->rootdir = scsc_wlog_debugfs_global_root;
+ } else {
+ di->rootdir = debugfs_create_dir(SCSC_DEBUGFS_ROOT_DIRNAME, NULL);
+ scsc_wlog_debugfs_global_root = di->rootdir;
+ }
+ if (!di->rootdir)
+ goto no_rootdir;
+ }
+
+ if (!di->ringdir) {
+ di->ringdir = debugfs_create_dir(ring_name, di->rootdir);
+ if (!di->ringdir)
+ goto no_ringdir;
+ }
+
+ /* Saving ring ref @r to Inode */
+ debugfs_create_file(fname, 0664, di->ringdir, rto, fops);
+
+ return di;
+
+no_ringdir:
+no_rootdir:
+ SCSC_TAG_ERR(WLOG, "Failed WiFiLogger Debugfs basic initialization\n");
+ return NULL;
+}
+
+void scsc_wifilogger_debugfs_remove_top_dir_recursive(void)
+{
+ if (!scsc_wlog_debugfs_global_root)
+ return;
+
+ debugfs_remove_recursive(scsc_wlog_debugfs_global_root);
+
+ SCSC_TAG_INFO(WLOG, "Wi-Fi Logger Debugfs Cleaned Up\n");
+}
+
+void scsc_register_common_debugfs_entries(char *ring_name, void *rto,
+ struct scsc_wlog_debugfs_info *di)
+{
+ scsc_wlog_register_debugfs_entry(ring_name, "stats",
+ &stats_fops, rto, di);
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ scsc_wlog_register_debugfs_entry(ring_name, "verbose_level",
+ &verbosity_fops, rto, di);
+ scsc_wlog_register_debugfs_entry(ring_name, "read_record",
+ &read_record_fops, rto, di);
+ scsc_wlog_register_debugfs_entry(ring_name, "read",
+ &read_fops, rto, di);
+#endif
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef _SCSC_WIFILOGGER_DEBUGFS_H_
+#define _SCSC_WIFILOGGER_DEBUGFS_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <asm/uaccess.h>
+
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger.h"
+
+#define SCSC_DEBUGFS_ROOT "/sys/kernel/debug/wifilogger"
+#define SCSC_DEBUGFS_ROOT_DIRNAME "wifilogger"
+
+extern struct dentry *scsc_wlog_debugfs_global_root;
+
+struct scsc_wlog_debugfs_info {
+ struct dentry *rootdir;
+ struct dentry *ringdir;
+};
+
+struct scsc_ring_test_object {
+ struct scsc_wlog_ring *r;
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ char *rbuf;
+ char *wbuf;
+ size_t bsz;
+ struct list_head elem;
+ wait_queue_head_t drain_wq;
+ wait_queue_head_t rw_wq;
+ struct mutex readers_lock;
+#endif
+};
+
+void *scsc_wlog_register_debugfs_entry(const char *ring_name,
+ const char *fname,
+ const struct file_operations *fops,
+ void *rto,
+ struct scsc_wlog_debugfs_info *di);
+
+struct scsc_ring_test_object *init_ring_test_object(struct scsc_wlog_ring *r);
+
+void scsc_wifilogger_debugfs_remove_top_dir_recursive(void);
+
+void scsc_register_common_debugfs_entries(char *ring_name, void *rto,
+ struct scsc_wlog_debugfs_info *di);
+
+int dfs_open(struct inode *ino, struct file *filp);
+
+int dfs_release(struct inode *ino, struct file *filp);
+
+#endif /* _SCSC_WIFILOGGER_DEBUGFS_H_ */
+
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+
+#include "scsc_wifilogger_internal.h"
+
+static struct scsc_wifi_logger *wifi_logger;
+
+struct scsc_wifi_logger *scsc_wifilogger_get_handle(void)
+{
+ return wifi_logger;
+}
+
+bool scsc_wifilogger_init(void)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = kzalloc(sizeof(*wl), GFP_KERNEL);
+ if (!wl) {
+ SCSC_TAG_ERR(WLOG,
+ "Failed to allocate scsc_wifilogger data. Abort.\n");
+ return false;
+ }
+
+ mutex_init(&wl->lock);
+ wl->initialized = true;
+ wifi_logger = wl;
+
+ return true;
+}
+
+static int scsc_wifilogger_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int ret = NOTIFY_DONE;
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG, "WiFi Logger NOT initialized !\n");
+ return NOTIFY_BAD;
+ }
+
+ switch (event) {
+ case SCSC_FW_EVENT_FAILURE:
+ break;
+ case SCSC_FW_EVENT_MOREDUMP_COMPLETE:
+ {
+ char *panic_record_dump = data;
+
+ SCSC_TAG_INFO(WLOG, "Notification received: MOREDUMP COMPLETED.\n");
+ SCSC_TAG_DEBUG(WLOG, "PANIC DUMP RX\n-------------------------\n\n%s\n",
+ panic_record_dump);
+ if (wl->on_alert_cb) {
+ wl->on_alert_cb(panic_record_dump,
+ PANIC_RECORD_DUMP_BUFFER_SZ, 0, wl->on_alert_ctx);
+ SCSC_TAG_DEBUG(WLOG, "Alert handler -- processed %d bytes @%p\n",
+ PANIC_RECORD_DUMP_BUFFER_SZ, panic_record_dump);
+ }
+ ret = NOTIFY_OK;
+ break;
+ }
+ default:
+ ret = NOTIFY_BAD;
+ break;
+ }
+
+ return ret;
+}
+
+static struct notifier_block firmware_nb = {
+ .notifier_call = scsc_wifilogger_notifier,
+};
+
+bool scsc_wifilogger_fw_alert_init(void)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG, "WiFi Logger NOT initialized !\n");
+ return false;
+ }
+
+ wl->features_mask |= WIFI_LOGGER_MEMORY_DUMP_SUPPORTED | WIFI_LOGGER_DRIVER_DUMP_SUPPORTED;
+ if (!mxman_register_firmware_notifier(&firmware_nb))
+ wl->features_mask |= WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED;
+
+ return true;
+}
+
+unsigned int scsc_wifilogger_get_features(void)
+{
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG, "WiFi Logger NOT initialized !\n");
+ return 0;
+ }
+
+ return wl->features_mask;
+}
+
+bool scsc_wifilogger_get_rings_status(u32 *num_rings,
+ struct scsc_wifi_ring_buffer_status *status)
+{
+ int i, j = 0;
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG, "WiFi Logger NOT initialized !\n");
+ *num_rings = 0;
+ return false;
+ }
+
+ for (i = 0; i < *num_rings && i < MAX_WIFI_LOGGER_RINGS; i++)
+ if (wl->rings[i] && wl->rings[i]->registered)
+ scsc_wlog_get_ring_status(wl->rings[i], &status[j++]);
+ *num_rings = j;
+
+ return true;
+}
+
+struct scsc_wlog_ring *scsc_wifilogger_get_ring_from_name(char *name)
+{
+ int i;
+ struct scsc_wlog_ring *r = NULL;
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "WiFi Logger NOT initialized..cannot find ring !\n");
+ return r;
+ }
+
+ mutex_lock(&wl->lock);
+ for (i = 0; i < MAX_WIFI_LOGGER_RINGS; i++) {
+ if (wl->rings[i] &&
+ !strncmp(name, wl->rings[i]->st.name, RING_NAME_SZ)) {
+ if (wl->rings[i]->initialized &&
+ wl->rings[i]->registered)
+ r = wl->rings[i];
+ break;
+ }
+ }
+ mutex_unlock(&wl->lock);
+
+ return r;
+}
+
+bool scsc_wifilogger_register_ring(struct scsc_wlog_ring *r)
+{
+ int pos;
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "WiFi Logger NOT initialized..cannot register ring !\n");
+ return false;
+ }
+ /**
+ * Calculate ring position in array from unique ring_id:
+ * there can be multiple distinct rings supporting the same
+ * feature....like pkt_fate_tx/rx
+ */
+ pos = r->st.ring_id % MAX_WIFI_LOGGER_RINGS;
+ mutex_lock(&wl->lock);
+ if (wl->rings[pos]) {
+ SCSC_TAG_ERR(WLOG,
+ "Ring %s already registered on position %d. Abort\n",
+ wl->rings[pos]->st.name, pos);
+ mutex_unlock(&wl->lock);
+ return false;
+ }
+ SCSC_TAG_DEBUG(WLOG, "Registering ring %s as position %d\n",
+ r->st.name, pos);
+ wl->rings[pos] = r;
+ wl->features_mask |= r->features_mask;
+ r->wl = wl;
+ r->registered = true;
+ mutex_unlock(&wl->lock);
+ SCSC_TAG_INFO(WLOG, "Ring '%s' registered\n", r->st.name);
+
+ return true;
+}
+
+void scsc_wifilogger_destroy(void)
+{
+ int i;
+ struct scsc_wlog_ring *r = NULL;
+ struct scsc_wifi_logger *wl = NULL;
+
+ wl = scsc_wifilogger_get_handle();
+ if (!wl || !wl->initialized) {
+ SCSC_TAG_ERR(WLOG,
+ "WiFi Logger NOT initialized..cannot destroy!\n");
+ return;
+ }
+
+ mxman_unregister_firmware_notifier(&firmware_nb);
+ /* Remove DebufgFS hooks at first... */
+ scsc_wifilogger_debugfs_remove_top_dir_recursive();
+
+ mutex_lock(&wl->lock);
+ for (i = 0; i < MAX_WIFI_LOGGER_RINGS; i++) {
+ if (wl->rings[i]) {
+ r = wl->rings[i];
+ scsc_wlog_ring_change_verbosity(r, WLOG_NONE);
+ r->registered = false;
+ wl->rings[i] = NULL;
+ if (r->initialized)
+ scsc_wlog_ring_destroy(r);
+ }
+ }
+ wl->features_mask = 0;
+ mutex_unlock(&wl->lock);
+ kfree(wl);
+ wifi_logger = NULL;
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef _SCSC_WIFILOGGER_INTERNAL_H_
+#define _SCSC_WIFILOGGER_INTERNAL_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include "scsc_wifilogger_types.h"
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger_debugfs.h"
+
+struct scsc_wifi_logger {
+ bool initialized;
+ struct mutex lock;
+ unsigned int features_mask;
+ struct scsc_wlog_ring *rings[MAX_WIFI_LOGGER_RINGS];
+
+ /**
+ * There is only one log_handler and alert_handler registered
+ * to be used across all rings: moreover just one instance of
+ * these handlers can exist per-ring.
+ */
+
+ /* log_handler callback registered by framework */
+ void (*on_ring_buffer_data_cb)(char *ring_name, char *buffer,
+ int buffer_size,
+ struct scsc_wifi_ring_buffer_status *status,
+ void *ctx);
+ /* alert_handler callback registered by framework */
+ void (*on_alert_cb)(char *buffer, int buffer_size, int err_code, void *ctx);
+
+ void *on_ring_buffer_ctx;
+ void *on_alert_ctx;
+};
+
+bool scsc_wifilogger_init(void);
+void scsc_wifilogger_destroy(void);
+bool scsc_wifilogger_fw_alert_init(void);
+struct scsc_wifi_logger *scsc_wifilogger_get_handle(void);
+unsigned int scsc_wifilogger_get_features(void);
+bool scsc_wifilogger_register_ring(struct scsc_wlog_ring *r);
+struct scsc_wlog_ring *scsc_wifilogger_get_ring_from_name(char *name);
+bool scsc_wifilogger_get_rings_status(u32 *num_rings,
+ struct scsc_wifi_ring_buffer_status *status);
+
+#endif /* _SCSC_WIFILOGGER_INTERNAL_H_ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+/**
+ * Internal Reference docs for WiFi-Logger subsystem
+ *
+ * SC-507043-SW -- Android Wi-Fi Logger architecture
+ * SC-507780-DD -- Android Enhanced Logging
+ * WiFiLogger Core Driver Requirements and Design
+ */
+#include "scsc_wifilogger_module.h"
+
+static int __init scsc_wifilogger_module_init(void)
+{
+ if (scsc_wifilogger_init()) {
+ scsc_wifilogger_ring_connectivity_init();
+ scsc_wifilogger_ring_wakelock_init();
+ scsc_wifilogger_ring_pktfate_init();
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ scsc_wifilogger_ring_test_init();
+#endif
+ scsc_wifilogger_fw_alert_init();
+ } else {
+ SCSC_TAG_ERR(WLOG, "Module init failed\n");
+ return -ENOMEM;
+ }
+
+ SCSC_TAG_INFO(WLOG, "Wi-Fi Logger subsystem initialized.\n");
+
+ return 0;
+}
+
+static void __exit scsc_wifilogger_module_exit(void)
+{
+ scsc_wifilogger_destroy();
+
+ SCSC_TAG_INFO(WLOG, "Wi-Fi Logger subsystem unloaded.\n");
+}
+
+module_init(scsc_wifilogger_module_init);
+module_exit(scsc_wifilogger_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Samsung SLSI");
+MODULE_DESCRIPTION("Android Wi-Fi Logger module");
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+
+#ifndef _SCSC_WIFILOGGER_MODULE_H_
+#define _SCSC_WIFILOGGER_MODULE_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_internal.h"
+
+#include "scsc_wifilogger_ring_connectivity.h"
+#include "scsc_wifilogger_ring_wakelock.h"
+#include "scsc_wifilogger_ring_pktfate.h"
+
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+#include "scsc_wifilogger_ring_test.h"
+#endif
+
+#endif /* _SCSC_WIFILOGGER_MODULE_H_ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ *****************************************************************************/
+/* Implements */
+#include "scsc_wifilogger_ring_connectivity.h"
+
+/* Uses */
+#include <stdarg.h>
+#include "scsc_wifilogger_ring_connectivity_api.h"
+#include "scsc_wifilogger_internal.h"
+
+static struct scsc_wlog_ring *the_ring;
+
+u32 cring_lev;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+#include "scsc_wifilogger_debugfs.h"
+
+static struct scsc_wlog_debugfs_info di;
+
+#endif /* CONFIG_SCSC_WIFILOGGER_DEBUGFS */
+
+bool scsc_wifilogger_ring_connectivity_init(void)
+{
+ struct scsc_wlog_ring *r = NULL;
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ struct scsc_ring_test_object *rto;
+#endif
+
+ r = scsc_wlog_ring_create(WLOGGER_RCONNECT_NAME,
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY,
+ ENTRY_TYPE_CONNECT_EVENT, 32768 * 8,
+ WIFI_LOGGER_CONNECT_EVENT_SUPPORTED,
+ NULL, NULL, NULL);
+
+ if (!r) {
+ SCSC_TAG_ERR(WLOG, "Failed to CREATE WiFiLogger ring: %s\n",
+ WLOGGER_RCONNECT_NAME);
+ return false;
+ }
+ scsc_wlog_register_verbosity_reference(r, &cring_lev);
+
+ if (!scsc_wifilogger_register_ring(r)) {
+ SCSC_TAG_ERR(WLOG, "Failed to REGISTER WiFiLogger ring: %s\n",
+ WLOGGER_RCONNECT_NAME);
+ scsc_wlog_ring_destroy(r);
+ return false;
+ }
+ the_ring = r;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ rto = init_ring_test_object(the_ring);
+ if (rto)
+ scsc_register_common_debugfs_entries(the_ring->st.name, rto, &di);
+#endif
+
+ return true;
+}
+
+/**** Producer API ******/
+
+int scsc_wifilogger_ring_connectivity_fw_event(wlog_verbose_level lev, u16 fw_event_id,
+ u64 fw_timestamp, void *fw_bulk_data, size_t fw_blen)
+{
+ struct scsc_wifi_ring_buffer_driver_connectivity_event event_item;
+
+ if (!the_ring)
+ return 0;
+
+ SCSC_TAG_DEBUG(WLOG, "EL -- RX MLME_EVENT_LOG_INFO - event_id[%d] @0x%x\n",
+ fw_event_id, fw_timestamp);
+ event_item.event = fw_event_id;
+
+ return scsc_wlog_write_record(the_ring, fw_bulk_data, fw_blen, &event_item,
+ sizeof(event_item), lev, fw_timestamp);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_connectivity_fw_event);
+
+int scsc_wifilogger_ring_connectivity_driver_event(wlog_verbose_level lev,
+ u16 driver_event_id, unsigned int tag_count, ...)
+{
+ int i;
+ u64 timestamp;
+ va_list ap;
+ u8 tlvs[MAX_TLVS_SZ];
+ size_t tlvs_sz = 0;
+ struct scsc_tlv_log *tlv = NULL;
+ struct scsc_wifi_ring_buffer_driver_connectivity_event event_item;
+
+ if (!the_ring)
+ return 0;
+
+ timestamp = local_clock();
+
+ SCSC_TAG_DEBUG(WLOG, "EL -- RX Driver CONNECTIVITY EVENT - event_id[%d] @0x%x\n",
+ driver_event_id, timestamp);
+
+ event_item.event = driver_event_id;
+ va_start(ap, tag_count);
+ for (i = 0; i < tag_count &&
+ tlvs_sz + sizeof(*tlv) < MAX_TLVS_SZ; i++) {
+ tlv = (struct scsc_tlv_log *)(tlvs + tlvs_sz);
+ tlv->tag = (u16)va_arg(ap, int);
+ tlv->length = (u16)va_arg(ap, int);
+ if (tlvs_sz + sizeof(*tlv) + tlv->length >= MAX_TLVS_SZ) {
+ WARN(true,
+ "TLVs container too small [%d]....truncating event's tags !\n",
+ MAX_TLVS_SZ);
+ break;
+ }
+ memcpy(&tlv->value, va_arg(ap, u8 *), tlv->length);
+ tlvs_sz += sizeof(*tlv) + tlv->length;
+ }
+ va_end(ap);
+
+ return scsc_wlog_write_record(the_ring, tlvs, tlvs_sz, &event_item,
+ sizeof(event_item), lev, timestamp);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_connectivity_driver_event);
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_CONNECTIVITY_H__
+#define __SCSC_WIFILOGGER_RING_CONNECTIVITY_H__
+
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger.h"
+
+#define MAX_TLVS_SZ 1024
+#define WLOGGER_RCONNECT_NAME "connectivity"
+
+/**
+ * A local mirror for this ring's current verbose level:
+ * avoids func-call and minimizes impact when ring is disabled
+ */
+extern u32 cring_lev;
+
+bool scsc_wifilogger_ring_connectivity_init(void);
+
+int scsc_wifilogger_ring_connectivity_driver_event(wlog_verbose_level lev,
+ u16 driver_event_id, unsigned int tag_count, ...);
+
+int scsc_wifilogger_ring_connectivity_fw_event(wlog_verbose_level lev, u16 fw_event_id,
+ u64 fw_timestamp, void *fw_bulk_data, size_t fw_blen);
+
+#endif /* __SCSC_WIFILOGGER_RING_CONNECTIVITY_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_CONNECTIVITY_API_H__
+#define __SCSC_WIFILOGGER_RING_CONNECTIVITY_API_H__
+/** Android Enhanced Logging
+ *
+ * CONNECTIVITY RING -- Public Producer API
+ *
+ * This ring collects a number of events originated by FW and Driver; given the different
+ * format of the provided payload between FW and Driver the API is splitted in two classes:
+ * once to be invoked on fw events and the other on driver events.
+ */
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+
+#include "scsc_wifilogger_ring_connectivity.h"
+/**
+ * DRIVER-Produced Connectivity Events
+ *
+ * @lev: chosen verbosity level
+ * @driver_event_id: id of the event reported by the driver
+ * @tag_count: number of TLV-TRIPLETS constituting the variadic portion of the call.
+ * Provided TLV triplets are composed as follows:
+ * - Types(Tag) are defined in scsc_wifilogger_types.h
+ * - Length in bytes of value
+ * - Value is a POINTER to the area holding the Length bytes to copy
+ *
+ * This function will take care to build the needed inner record-header and push the
+ * log material to the related ring, adding also a proper driver-built timestamp.
+ *
+ * An example invokation on Association Request received at driver:
+ *
+ * SCSC_WLOG_DRIVER_EVENT(WLOG_NORMAL, WIFI_EVENT_ASSOCIATION_REQUESTED, 3,
+ * WIFI_TAG_BSSID, ETH_ALEN, sme->bssid,
+ * WIFI_TAG_SSID, sme->ssid_len, sme->ssid,
+ * WIFI_TAG_CHANNEL, sizeof(u16), &sme->channel->center_freq);
+ *
+ * BE AWARE THAT tag_count parameeters expects the NUMBER of TRIPLETS
+ * NOT the number of variadic params.
+ */
+#define SCSC_WLOG_DRIVER_EVENT(lev, driver_event_id, tag_count, tag_args...) \
+ do { \
+ if (cring_lev && (lev) <= cring_lev) \
+ scsc_wifilogger_ring_connectivity_driver_event((lev), (driver_event_id), \
+ (tag_count), tag_args); \
+ } while (0)
+
+/**
+ * FW-Produced Connectivity Events
+ *
+ * @lev: chosen verbosity level
+ * @fw_event_id: id of the event as provided in the field
+ * MLME-EVENT-LOG.indication[Event]
+ * @fw_timestamp: timestamp of the event as provided in the field
+ * MLME-EVENT-LOG.indication[TSF Time]
+ * @fw_bulk_data: the bulk data contained in the MLME signal.
+ * "The bulk data shall contain TLV encoded parameters for that event"
+ * @fw_blen: the length of the above bulk_data
+ *
+ * This function will take care to build the needed inner record-header and push the
+ * log material to the related ring.
+ */
+#define SCSC_WLOG_FW_EVENT(lev, fw_event_id, fw_timestamp, fw_bulk_data, fw_blen) \
+ do { \
+ if (cring_lev && (lev) <= cring_lev) \
+ scsc_wifilogger_ring_connectivity_fw_event((lev), (fw_event_id), (fw_timestamp), \
+ (fw_bulk_data), (fw_blen)); \
+ } while (0)
+#else
+
+#define SCSC_WLOG_DRIVER_EVENT(lev, driver_event_id, tag_count, tag_args...) do {} while (0)
+#define SCSC_WLOG_FW_EVENT(lev, fw_event_id, fw_timestamp, fw_bulk_data, fw_blen) do {} while (0)
+
+#endif /* CONFIG_SCSC_WIFILOGGER */
+
+#endif /* __SCSC_WIFILOGGER_RING_CONNECTIVITY_API_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ *****************************************************************************/
+/* Implements */
+#include "scsc_wifilogger_ring_pktfate.h"
+
+/* Uses */
+#include <stdarg.h>
+#include "scsc_wifilogger_internal.h"
+
+static bool pktfate_monitor_started;
+
+static wifi_tx_report txr;
+static wifi_rx_report rxr;
+static struct scsc_wlog_ring *fate_ring_tx;
+static struct scsc_wlog_ring *fate_ring_rx;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+#include "scsc_wifilogger_debugfs.h"
+#include "scsc_wifilogger.h"
+
+static struct scsc_wlog_debugfs_info di_tx, di_rx;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+static ssize_t dfs_read_fates(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ size_t got_sz, n_provided_fates = 0, n_requested_fates;
+ struct scsc_ring_test_object *rto;
+ wifi_tx_report *tx_report_bufs = NULL;
+ wifi_rx_report *rx_report_bufs = NULL;
+ void *srcbuf = NULL;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ rto = filp->private_data;
+
+ if (!strncmp(rto->r->st.name, WLOGGER_RFATE_TX_NAME, RING_NAME_SZ - 1)) {
+ n_requested_fates = count / sizeof(wifi_tx_report);
+ tx_report_bufs = vmalloc(sizeof(wifi_tx_report) * n_requested_fates);
+ if (tx_report_bufs)
+ scsc_wifi_get_tx_pkt_fates(tx_report_bufs, n_requested_fates,
+ &n_provided_fates);
+ got_sz = sizeof(wifi_tx_report) * n_provided_fates;
+ srcbuf = tx_report_bufs;
+ } else {
+ n_requested_fates = count / sizeof(wifi_rx_report);
+ rx_report_bufs = vmalloc(sizeof(wifi_rx_report) * n_requested_fates);
+ if (rx_report_bufs)
+ scsc_wifi_get_rx_pkt_fates(rx_report_bufs, n_requested_fates,
+ &n_provided_fates);
+ got_sz = sizeof(wifi_rx_report) * n_provided_fates;
+ srcbuf = rx_report_bufs;
+ }
+ SCSC_TAG_DEBUG(WLOG, "Ring '%s'...asked for %d fates....GOT %d\n",
+ rto->r->st.name, n_requested_fates, n_provided_fates);
+ if (copy_to_user(ubuf, srcbuf, got_sz))
+ return -EFAULT;
+ *f_pos += got_sz;
+
+ return got_sz;
+}
+
+const struct file_operations get_fates_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_open,
+ .read = dfs_read_fates,
+ .release = dfs_release,
+};
+#endif /* CONFIG_SCSC_WIFILOGGER_TEST */
+
+#endif /* CONFIG_SCSC_WIFILOGGER_DEBUGFS */
+
+bool is_pktfate_monitor_started(void)
+{
+ return pktfate_monitor_started;
+}
+
+bool scsc_wifilogger_ring_pktfate_init(void)
+{
+ struct scsc_wlog_ring *r = NULL;
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ struct scsc_ring_test_object *rto_tx, *rto_rx;
+#endif
+
+ r = scsc_wlog_ring_create(WLOGGER_RFATE_TX_NAME,
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY,
+ ENTRY_TYPE_PKT, 32768 * 4,
+ WIFI_LOGGER_PACKET_FATE_SUPPORTED,
+ NULL, NULL, NULL);
+ if (!r) {
+ SCSC_TAG_ERR(WLOG, "Failed to CREATE WiFiLogger ring: %s\n",
+ WLOGGER_RFATE_TX_NAME);
+ return false;
+ }
+ fate_ring_tx = r;
+
+ r = scsc_wlog_ring_create(WLOGGER_RFATE_RX_NAME,
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY,
+ ENTRY_TYPE_PKT, 32768 * 4,
+ WIFI_LOGGER_PACKET_FATE_SUPPORTED,
+ NULL, NULL, NULL);
+ if (!r) {
+ SCSC_TAG_ERR(WLOG, "Failed to CREATE WiFiLogger ring: %s\n",
+ WLOGGER_RFATE_RX_NAME);
+ scsc_wlog_ring_destroy(fate_ring_tx);
+ return false;
+ }
+ fate_ring_rx = r;
+
+ if (!scsc_wifilogger_register_ring(fate_ring_tx)) {
+ SCSC_TAG_ERR(WLOG, "Failed to REGISTER WiFiLogger ring: %s\n",
+ fate_ring_tx->st.name);
+ scsc_wlog_ring_destroy(fate_ring_tx);
+ scsc_wlog_ring_destroy(fate_ring_rx);
+ return false;
+ }
+ if (!scsc_wifilogger_register_ring(fate_ring_rx)) {
+ SCSC_TAG_ERR(WLOG, "Failed to REGISTER WiFiLogger ring: %s\n",
+ fate_ring_rx->st.name);
+ scsc_wlog_ring_destroy(fate_ring_tx);
+ scsc_wlog_ring_destroy(fate_ring_rx);
+ return false;
+ }
+
+ // Just in case framework invokes with min_data_size != 0
+ scsc_wlog_ring_set_drop_on_full(fate_ring_tx);
+ scsc_wlog_ring_set_drop_on_full(fate_ring_rx);
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ /* The test object is shared between all the debugfs entries
+ * belonging to the same ring.
+ */
+ rto_tx = init_ring_test_object(fate_ring_tx);
+ if (rto_tx) {
+ scsc_register_common_debugfs_entries(fate_ring_tx->st.name, rto_tx, &di_tx);
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ scsc_wlog_register_debugfs_entry(fate_ring_tx->st.name, "get_fates",
+ &get_fates_fops, rto_tx, &di_tx);
+#endif
+ }
+
+ rto_rx = init_ring_test_object(fate_ring_rx);
+ if (rto_rx) {
+ scsc_register_common_debugfs_entries(fate_ring_rx->st.name, rto_rx, &di_rx);
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ scsc_wlog_register_debugfs_entry(fate_ring_rx->st.name, "get_fates",
+ &get_fates_fops, rto_rx, &di_rx);
+#endif
+ }
+#endif
+
+ return true;
+}
+
+void scsc_wifilogger_ring_pktfate_start_monitoring(void)
+{
+ if (!fate_ring_rx || !fate_ring_tx)
+ return;
+
+ /* Just in case */
+ scsc_wlog_flush_ring(fate_ring_tx);
+ scsc_wlog_start_logging(fate_ring_tx, WLOG_DEBUG, 0, 0, 0);
+ scsc_wlog_flush_ring(fate_ring_rx);
+ scsc_wlog_start_logging(fate_ring_rx, WLOG_DEBUG, 0, 0, 0);
+ pktfate_monitor_started = true;
+
+ SCSC_TAG_INFO(WLOG, "PacketFate monitor started.\n");
+}
+
+/**** Producer API ******/
+void scsc_wifilogger_ring_pktfate_new_assoc(void)
+{
+ SCSC_TAG_INFO(WLOG, "New Association started...flushing PacketFate rings.\n");
+ scsc_wlog_flush_ring(fate_ring_tx);
+ scsc_wlog_flush_ring(fate_ring_rx);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_pktfate_new_assoc);
+
+void scsc_wifilogger_ring_pktfate_get_fates(int fate, void *report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates)
+{
+ struct scsc_wlog_ring *r;
+ u32 n_req_fates = (u32)n_requested_fates;
+ size_t blen;
+
+ r = (fate == TX_FATE) ? fate_ring_tx : fate_ring_rx;
+ if (fate == TX_FATE)
+ blen = sizeof(wifi_tx_report) * n_req_fates;
+ else
+ blen = sizeof(wifi_rx_report) * n_req_fates;
+ scsc_wlog_read_max_records(r, report_bufs, blen, &n_req_fates);
+ *n_provided_fates = n_req_fates;
+
+ SCSC_TAG_INFO(WLOG, "[%s]:: GET %s pkt_fates -- Requested:%zd - Got:%zd\n",
+ r->st.name,
+ (fate == TX_FATE) ? "TX" : "RX", n_requested_fates, *n_provided_fates);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_pktfate_get_fates);
+
+/**
+ * Here we're Just saving the egressing ETH frame for now with the
+ * provided initial fate (which is fixed to TX_PKT_FATE_DRV_QUEUED).
+ * In a full pktfate implementation as required by WiFi-Logger we
+ * should track down this eth frame using the host_tag and then
+ * account for the final fate of the frame looking at Debugging
+ * Information Element provided in subsequent UnidataTx.confirm, BUT
+ * such confirm as of now does NOT provide any Debugging Element NOR
+ * any additional interesting information related to the packet fates
+ * defined.
+ */
+void scsc_wifilogger_ring_pktfate_log_tx_frame(wifi_tx_packet_fate fate,
+ u16 htag, void *frame,
+ size_t len, bool ma_unitdata)
+{
+ if (len > MAX_FRAME_LEN_ETHERNET) {
+ SCSC_TAG_WARNING(WLOG, "pktfate TX:: dropped unplausible length frame.\n");
+ return;
+ }
+
+ if (ma_unitdata)
+ len = len <= MAX_UNITDATA_LOGGED_SZ ? len : MAX_UNITDATA_LOGGED_SZ;
+
+ txr.fate = fate;
+ txr.frame_inf.payload_type = FRAME_TYPE_ETHERNET_II;
+ txr.frame_inf.frame_len = len;
+ txr.frame_inf.driver_timestamp_usec = ktime_to_ns(ktime_get_boottime());
+ txr.frame_inf.firmware_timestamp_usec = 0;
+ memcpy(&txr.frame_inf.frame_content, frame, len);
+ //TODO MD5 checksum using Kernel Crypto API
+ memset(&txr.md5_prefix, 0x00, MD5_PREFIX_LEN);
+ /**
+ * We have to waste a lot of space storing the frame in a full-sized
+ * frame_content array, even if the frame size is much smaller, because
+ * the wifi-logger API (get_tx/rx_pkt_fates) reports multiple of struct
+ * wifi_tx/rx_packet_fates and does NOT return the effectively read bytes.
+ * The real size (that we cannot use) being:
+ *
+ * real_txr_sz = sizeof(txr) - sizeof(txr.frame_inf.frame_content) + len;
+ *
+ * frame_len field is anyway provided to recognize the actual end of frame.
+ */
+ scsc_wlog_write_record(fate_ring_tx, NULL, 0, &txr, sizeof(txr), WLOG_DEBUG, 0);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_pktfate_log_tx_frame);
+
+void scsc_wifilogger_ring_pktfate_log_rx_frame(wifi_rx_packet_fate fate, u16 du_desc,
+ void *frame, size_t len, bool ma_unitdata)
+{
+ if ((du_desc == SCSC_DUD_ETHERNET_FRAME && len > MAX_FRAME_LEN_ETHERNET) ||
+ (du_desc == SCSC_DUD_80211_FRAME && len > MAX_FRAME_LEN_80211_MGMT)) {
+ SCSC_TAG_WARNING(WLOG, "pktfate RX:: dropped unplausible length frame.\n");
+ return;
+ }
+
+ if (ma_unitdata)
+ len = len <= MAX_UNITDATA_LOGGED_SZ ? len : MAX_UNITDATA_LOGGED_SZ;
+
+ rxr.fate = fate;
+ rxr.frame_inf.payload_type = du_desc == SCSC_DUD_ETHERNET_FRAME ? FRAME_TYPE_ETHERNET_II :
+ (du_desc == SCSC_DUD_80211_FRAME ? FRAME_TYPE_80211_MGMT : FRAME_TYPE_UNKNOWN);
+ rxr.frame_inf.frame_len = len;
+ rxr.frame_inf.driver_timestamp_usec = ktime_to_ns(ktime_get_boottime());
+ rxr.frame_inf.firmware_timestamp_usec = 0;
+ memcpy(&rxr.frame_inf.frame_content, frame, len);
+ //TODO MD5 checksum using Kernel Crypto API
+ memset(&rxr.md5_prefix, 0x00, MD5_PREFIX_LEN);
+ /**
+ * We have to waste a lot of space storing the frame in a full-sized
+ * frame_content array, even if the frame size is much smaller, because
+ * the wifi-logger API (get_tx/rx_pkt_fates) reports multiple of struct
+ * wifi_tx/rx_packet_fates and does NOT return the effectively read bytes.
+ * The real size (that we cannot use) being:
+ *
+ * real_txr_sz = sizeof(txr) - sizeof(txr.frame_inf.frame_content) + len;
+ *
+ * frame_len field is anyway provided to recognize the actual end of frame.
+ */
+ scsc_wlog_write_record(fate_ring_rx, NULL, 0, &rxr, sizeof(rxr), WLOG_DEBUG, 0);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_pktfate_log_rx_frame);
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_PKTFATE_H__
+#define __SCSC_WIFILOGGER_RING_PKTFATE_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/ktime.h>
+#include <linux/timekeeping.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger.h"
+
+#define TX_FATE 0
+#define RX_FATE 1
+#define WLOGGER_RFATE_TX_NAME "pkt_fate_tx"
+#define WLOGGER_RFATE_RX_NAME "pkt_fate_rx"
+#define MAX_UNITDATA_LOGGED_SZ 100
+
+#define SCSC_DUD_ETHERNET_FRAME 0
+#define SCSC_DUD_80211_FRAME 1
+#define SCSC_DUD_MLME 2
+
+bool is_pktfate_monitor_started(void);
+bool scsc_wifilogger_ring_pktfate_init(void);
+void scsc_wifilogger_ring_pktfate_start_monitoring(void);
+void scsc_wifilogger_ring_pktfate_new_assoc(void);
+void scsc_wifilogger_ring_pktfate_log_tx_frame(wifi_tx_packet_fate fate,
+ u16 htag, void *pkt,
+ size_t len, bool ma_unitdata);
+void scsc_wifilogger_ring_pktfate_log_rx_frame(wifi_rx_packet_fate fate, u16 du_desc,
+ void *frame, size_t len, bool ma_unitdata);
+void scsc_wifilogger_ring_pktfate_get_fates(int fate, void *report_bufs,
+ size_t n_requested_fates,
+ size_t *n_provided_fates);
+#endif /* __SCSC_WIFILOGGER_RING_PKTFATE_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_PKTFATE_API_H__
+#define __SCSC_WIFILOGGER_RING_PKTFATE_API_H__
+
+/** Android Enhanced Logging
+ *
+ * PKTFATE RING -- Public Producer API
+ *
+ */
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+#include "scsc_wifilogger_ring_pktfate.h"
+
+#define SCSC_WLOG_PKTFATE_NEW_ASSOC() \
+ do { \
+ if (is_pktfate_monitor_started()) \
+ scsc_wifilogger_ring_pktfate_new_assoc(); \
+ } while (0)
+
+#define SCSC_WLOG_PKTFATE_LOG_TX_DATA_FRAME(htag, frame, flen) \
+ do { \
+ if (is_pktfate_monitor_started()) \
+ scsc_wifilogger_ring_pktfate_log_tx_frame(TX_PKT_FATE_DRV_QUEUED, (htag), \
+ (void *)(frame), (flen), true); \
+ } while (0)
+
+#define SCSC_WLOG_PKTFATE_LOG_RX_DATA_FRAME(du_desc, frame, flen) \
+ do { \
+ if (is_pktfate_monitor_started() && \
+ ((du_desc) == SCSC_DUD_ETHERNET_FRAME || (du_desc) == SCSC_DUD_80211_FRAME)) \
+ scsc_wifilogger_ring_pktfate_log_rx_frame(RX_PKT_FATE_DRV_QUEUED, (du_desc), \
+ (void *)(frame), (flen), true); \
+ } while (0)
+
+#define SCSC_WLOG_PKTFATE_LOG_TX_CTRL_FRAME(htag, frame, flen) \
+ do { \
+ if (is_pktfate_monitor_started()) \
+ scsc_wifilogger_ring_pktfate_log_tx_frame(TX_PKT_FATE_DRV_QUEUED, (htag), \
+ (void *)(frame), (flen), false); \
+ } while (0)
+
+#define SCSC_WLOG_PKTFATE_LOG_RX_CTRL_FRAME(frame, flen) \
+ do { \
+ if (is_pktfate_monitor_started()) \
+ scsc_wifilogger_ring_pktfate_log_rx_frame(RX_PKT_FATE_DRV_QUEUED, SCSC_DUD_MLME, \
+ (void *)(frame), (flen), false); \
+ } while (0)
+
+#else
+
+#define SCSC_WLOG_PKTFATE_NEW_ASSOC() do {} while (0)
+#define SCSC_WLOG_PKTFATE_LOG_TX_DATA_FRAME(htag, skb_mac, skb_hlen) do {} while (0)
+#define SCSC_WLOG_PKTFATE_LOG_TX_CTRL_FRAME(htag, skb_mac, skb_hlen) do {} while (0)
+#define SCSC_WLOG_PKTFATE_LOG_RX_DATA_FRAME(du_desc, skb_mac, skb_hlen) do {} while (0)
+#define SCSC_WLOG_PKTFATE_LOG_RX_CTRL_FRAME(frame, flen) do {} while (0)
+
+#endif /* CONFIG_SCSC_WIFILOGGER */
+
+#endif /* __SCSC_WIFILOGGER_RING_PKTFATE_API_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ *****************************************************************************/
+/* Implements */
+#include "scsc_wifilogger_ring_test.h"
+
+/* Uses */
+#include "scsc_wifilogger_internal.h"
+
+static u32 seq;
+static struct scsc_wlog_ring *the_ring;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+#include "scsc_wifilogger_debugfs.h"
+
+static struct scsc_wlog_debugfs_info di;
+#endif /* CONFIG_SCSC_WIFILOGGER_DEBUGFS */
+
+static int ring_test_loglevel_change_cb(struct scsc_wlog_ring *r,
+ u32 new_loglevel)
+{
+ SCSC_TAG_DEBUG(WLOG,
+ "==>> TEST RING SETTING CUSTOM LogLevel for ring: %s -- to:%d\n",
+ r->st.name, new_loglevel);
+
+ return 0;
+}
+
+bool ring_test_custom_init(struct scsc_wlog_ring *r)
+{
+ SCSC_TAG_DEBUG(WLOG, "Custom init for ring:%s\n", r->st.name);
+
+ return true;
+}
+
+bool ring_test_custom_fini(struct scsc_wlog_ring *r)
+{
+ SCSC_TAG_DEBUG(WLOG, "Custom fini for ring:%s\n", r->st.name);
+
+ return true;
+}
+
+/* Producer API */
+int scsc_wifilogger_ring_test_write(char *buf, size_t blen)
+{
+ struct tring_hdr thdr;
+
+ if (!the_ring)
+ return 0;
+
+ thdr.seq = seq++;
+ thdr.fake = 666 & seq;
+
+ return scsc_wlog_write_record(the_ring, buf, blen, &thdr, sizeof(thdr), WLOG_NORMAL, 0);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_test_write);
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+static ssize_t dfs_record_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t written;
+ struct scsc_ring_test_object *rto;
+
+ if (!filp->private_data)
+ return -EINVAL;
+ rto = filp->private_data;
+
+ count = count < rto->bsz ? count : rto->bsz;
+ if (copy_from_user(rto->wbuf, ubuf, count))
+ return -EINVAL;
+ written = scsc_wifilogger_ring_test_write(rto->wbuf, count);
+ if (!written && count)
+ return -EAGAIN;
+ wake_up_interruptible(&rto->rw_wq);
+ *f_pos += written;
+
+ return written;
+}
+
+const struct file_operations write_record_fops = {
+ .owner = THIS_MODULE,
+ .open = dfs_open,
+ .write = dfs_record_write,
+ .release = dfs_release,
+};
+#endif
+
+bool scsc_wifilogger_ring_test_init(void)
+{
+ struct scsc_wlog_ring *r = NULL;
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ struct scsc_ring_test_object *rto;
+#endif
+
+ r = scsc_wlog_ring_create(WLOGGER_RTEST_NAME,
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY,
+ ENTRY_TYPE_DATA, 32768,
+ WIFI_LOGGER_SCSC_TEST_RING_SUPPORTED,
+ ring_test_custom_init, ring_test_custom_fini,
+ NULL);
+
+ if (!r) {
+ SCSC_TAG_ERR(WLOG, "Failed to CREATE WiFiLogger ring: %s\n",
+ WLOGGER_RTEST_NAME);
+ return false;
+ }
+ /* Registering custom loglevel change callback */
+ scsc_wlog_register_loglevel_change_cb(r, ring_test_loglevel_change_cb);
+
+ if (!scsc_wifilogger_register_ring(r)) {
+ SCSC_TAG_ERR(WLOG, "Failed to REGISTER WiFiLogger ring: %s\n",
+ WLOGGER_RTEST_NAME);
+ scsc_wlog_ring_destroy(r);
+ return false;
+ }
+ the_ring = r;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ /* This test object is shared between all the debugfs entries
+ * belonging to this ring.
+ */
+ rto = init_ring_test_object(the_ring);
+ if (rto) {
+ scsc_register_common_debugfs_entries(the_ring->st.name, rto, &di);
+ /* A write is specific to the ring...*/
+ scsc_wlog_register_debugfs_entry(the_ring->st.name, "write_record",
+ &write_record_fops, rto, &di);
+ }
+#endif
+
+ return true;
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_TEST_H__
+#define __SCSC_WIFILOGGER_RING_TEST_H__
+
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger.h"
+
+#define WLOGGER_RTEST_NAME "test"
+
+struct tring_hdr {
+ u32 seq;
+ u64 fake;
+} __packed;
+
+bool scsc_wifilogger_ring_test_init(void);
+
+int scsc_wifilogger_ring_test_write(char *buf, size_t blen);
+#endif /* __SCSC_WIFILOGGER_RING_TEST_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ *****************************************************************************/
+/* Implements */
+#include "scsc_wifilogger_ring_wakelock.h"
+
+/* Uses */
+#include "scsc_wifilogger_ring_wakelock_api.h"
+#include "scsc_wifilogger_internal.h"
+
+static struct scsc_wlog_ring *the_ring;
+u32 wring_lev;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+#include "scsc_wifilogger_debugfs.h"
+
+static struct scsc_wlog_debugfs_info di;
+
+#endif /* CONFIG_SCSC_WIFILOGGER_DEBUGFS */
+
+bool scsc_wifilogger_ring_wakelock_init(void)
+{
+ struct scsc_wlog_ring *r = NULL;
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ struct scsc_ring_test_object *rto = NULL;
+#endif
+
+ r = scsc_wlog_ring_create(WLOGGER_RWAKELOCK_NAME,
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY,
+ ENTRY_TYPE_WAKE_LOCK, 65536,
+ WIFI_LOGGER_WAKE_LOCK_SUPPORTED,
+ NULL, NULL, NULL);
+
+ if (!r) {
+ SCSC_TAG_ERR(WLOG, "Failed to CREATE WiFiLogger ring: %s\n",
+ WLOGGER_RWAKELOCK_NAME);
+ return false;
+ }
+ scsc_wlog_register_verbosity_reference(r, &wring_lev);
+
+ if (!scsc_wifilogger_register_ring(r)) {
+ SCSC_TAG_ERR(WLOG, "Failed to REGISTER WiFiLogger ring: %s\n",
+ WLOGGER_RWAKELOCK_NAME);
+ scsc_wlog_ring_destroy(r);
+ return false;
+ }
+ the_ring = r;
+
+#ifdef CONFIG_SCSC_WIFILOGGER_DEBUGFS
+ rto = init_ring_test_object(the_ring);
+ if (rto)
+ scsc_register_common_debugfs_entries(the_ring->st.name, rto, &di);
+#endif
+
+ return true;
+}
+
+/**** Producer API ******/
+
+int scsc_wifilogger_ring_wakelock_action(u32 verbose_level, int status,
+ char *wl_name, int reason)
+{
+ u64 timestamp;
+ struct scsc_wake_lock_event wl_event;
+
+ if (!the_ring)
+ return 0;
+
+ timestamp = local_clock();
+ SCSC_TAG_DBG4(WLOG, "EL -- WAKELOCK[%s] - status:%d reason:%d @0x%x\n",
+ wl_name, status, reason, timestamp);
+
+ wl_event.status = status;
+ wl_event.reason = reason;
+ return scsc_wlog_write_record(the_ring, wl_name, strlen(wl_name), &wl_event,
+ sizeof(wl_event), verbose_level, timestamp);
+}
+EXPORT_SYMBOL(scsc_wifilogger_ring_wakelock_action);
+
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_WAKELOCK_H__
+#define __SCSC_WIFILOGGER_RING_WAKELOCK_H__
+
+#include <linux/types.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_wifilogger_core.h"
+#include "scsc_wifilogger.h"
+
+#define WLOGGER_RWAKELOCK_NAME "wakelock"
+
+extern u32 wring_lev;
+
+int scsc_wifilogger_ring_wakelock_action(u32 verbose_level, int status,
+ char *wl_name, int reason);
+
+
+bool scsc_wifilogger_ring_wakelock_init(void);
+#endif /* __SCSC_WIFILOGGER_RING_WAKELOCK_H__ */
+
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RING_WAKELOCK_API_H__
+#define __SCSC_WIFILOGGER_RING_WAKELOCK_API_H__
+
+/**
+ * Android Enhanced Logging
+ *
+ * WAKELOCK EVENTS RING -- Public Producer API
+ *
+ */
+enum {
+ WL_TAKEN = 0,
+ WL_RELEASED
+};
+
+enum {
+ WL_REASON_TX = 0,
+ WL_REASON_RX,
+ WL_REASON_ROAM
+};
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+
+#include "scsc_wifilogger_ring_wakelock.h"
+
+#define SCSC_WLOG_WAKELOCK(lev, status, wl_name, reason) \
+ do { \
+ if (wring_lev && (lev) <= wring_lev) \
+ scsc_wifilogger_ring_wakelock_action((lev), (status), (wl_name), (reason)); \
+ } while (0)
+
+#else
+
+#define SCSC_WLOG_WAKELOCK(lev, status, wl_name, reason) do {} while (0)
+
+#endif
+
+#endif /* __SCSC_WIFILOGGER_RING_WAKELOCK_API_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef __SCSC_WIFILOGGER_RINGS_H__
+#define __SCSC_WIFILOGGER_RINGS_H__
+
+#include "scsc_wifilogger_types.h"
+/* This file groups and exports all rings Producer APIs */
+#include "scsc_wifilogger_ring_connectivity_api.h"
+#include "scsc_wifilogger_ring_wakelock_api.h"
+#include "scsc_wifilogger_ring_pktfate_api.h"
+
+#endif /* __SCSC_WIFILOGGER_RINGS_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ******************************************************************************/
+#ifndef _SCSC_WIFILOGGER_TYPES_H_
+#define _SCSC_WIFILOGGER_TYPES_H_
+/**
+ * These types are derived from definitions in:
+ *
+ * hardware/libhardware_legacy/include/hardware_legacy/wifi_logger.h
+ * hardware/libhardware_legacy/include/hardware_legacy/wifi_hal.h
+ *
+ * Descriptive comments are in wifi_logger.h original file.
+ * Here we avoided using typedef that are in contrast with Kernel
+ * coding style though.
+ */
+#include <linux/types.h>
+
+#define LOGGER_MAJOR_VERSION 1
+#define LOGGER_MINOR_VERSION 0
+#define LOGGER_MICRO_VERSION 0
+
+/**
+ * Be aware that there can be multiple distinct rings, registered
+ * with different names but supporting the same feature: an example
+ * being pkt_fate_tx and pkt_fate_rx. Rings are registered by ring_id.
+ */
+#ifndef CONFIG_SCSC_WIFILOGGER_TEST_RING
+#define MAX_WIFI_LOGGER_RINGS 10
+#else
+#define MAX_WIFI_LOGGER_RINGS 11
+#endif
+
+typedef enum {
+ WIFI_SUCCESS = 0,
+ WIFI_ERROR_NONE = 0,
+ WIFI_ERROR_UNKNOWN = -1,
+ WIFI_ERROR_UNINITIALIZED = -2,
+ WIFI_ERROR_NOT_SUPPORTED = -3,
+ WIFI_ERROR_NOT_AVAILABLE = -4, /* Not available right now, but try later */
+ WIFI_ERROR_INVALID_ARGS = -5,
+ WIFI_ERROR_INVALID_REQUEST_ID = -6,
+ WIFI_ERROR_TIMED_OUT = -7,
+ WIFI_ERROR_TOO_MANY_REQUESTS = -8, /* Too many instances of this request */
+ WIFI_ERROR_OUT_OF_MEMORY = -9,
+ WIFI_ERROR_BUSY = -10,
+} wifi_error;
+
+/* Verbosity */
+typedef enum {
+ WLOG_NONE = 0,
+ WLOG_NORMAL = 1,
+ WLOG_LAZY = 2,
+ WLOG_DEBUG = 3,
+} wlog_verbose_level;
+
+/* Feature set */
+enum {
+ WIFI_LOGGER_MEMORY_DUMP_SUPPORTED = (1 << (0)),
+ WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)),
+ WIFI_LOGGER_CONNECT_EVENT_SUPPORTED = (1 << (2)),
+ WIFI_LOGGER_POWER_EVENT_SUPPORTED = (1 << (3)),
+ WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)),
+ WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)),
+ WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)),
+ WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)),
+ WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8)),
+#ifdef CONFIG_SCSC_WIFILOGGER_TEST
+ WIFI_LOGGER_SCSC_TEST_RING_SUPPORTED = (1 << (9)),
+#endif
+};
+
+enum {
+ RING_BUFFER_ENTRY_FLAGS_HAS_BINARY = (1 << (0)),
+ RING_BUFFER_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1))
+};
+
+#define RING_NAME_SZ 32
+
+struct scsc_wifi_ring_buffer_status {
+ u8 name[RING_NAME_SZ];
+ u32 flags;
+ int ring_id; /* unique integer representing the ring */
+ u32 rb_byte_size; /* total memory size allocated for the buffer */
+ u32 verbose_level; /* verbose level for ring buffer */
+ u32 written_bytes; /* number of bytes that was written to the
+ * buffer by driver, monotonously increasing
+ * integer
+ */
+ u32 read_bytes; /* number of bytes that was read from the buffer
+ * by user land, monotonously increasing integer
+ */
+ u32 written_records; /* number of records that was written to the
+ * buffer by driver, monotonously increasing
+ * integer
+ */
+};
+
+typedef void (*on_ring_buffer_data)(char *ring_name, char *buffer, int buffer_size,
+ struct scsc_wifi_ring_buffer_status *status, void *ctx);
+typedef void (*on_alert)(char *buffer, int buffer_size, int err_code, void *ctx);
+
+enum {
+ ENTRY_TYPE_CONNECT_EVENT = 1,
+ ENTRY_TYPE_PKT,
+ ENTRY_TYPE_WAKE_LOCK,
+ ENTRY_TYPE_POWER_EVENT,
+ ENTRY_TYPE_DATA
+};
+
+struct scsc_wifi_ring_buffer_entry {
+ u16 entry_size; /* the size of payload excluding the header. */
+ u8 flags;
+ u8 type;
+ u64 timestamp; /* present if has_timestamp bit is set. */
+} __packed;
+
+/* set if binary entries are present */
+#define WIFI_RING_BUFFER_FLAG_HAS_BINARY_ENTRIES 0x00000001
+/* set if ascii entries are present */
+#define WIFI_RING_BUFFER_FLAG_HAS_ASCII_ENTRIES 0x00000002
+
+/* Below events refer to the wifi_connectivity_event ring and shall be supported */
+#define WIFI_EVENT_ASSOCIATION_REQUESTED 0 // driver receives association command from kernel
+#define WIFI_EVENT_AUTH_COMPLETE 1
+#define WIFI_EVENT_ASSOC_COMPLETE 2
+#define WIFI_EVENT_FW_AUTH_STARTED 3 // fw event indicating auth frames are sent
+#define WIFI_EVENT_FW_ASSOC_STARTED 4 // fw event indicating assoc frames are sent
+#define WIFI_EVENT_FW_RE_ASSOC_STARTED 5 // fw event indicating reassoc frames are sent
+#define WIFI_EVENT_DRIVER_SCAN_REQUESTED 6
+#define WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND 7
+#define WIFI_EVENT_DRIVER_SCAN_COMPLETE 8
+#define WIFI_EVENT_G_SCAN_STARTED 9
+#define WIFI_EVENT_G_SCAN_COMPLETE 10
+#define WIFI_EVENT_DISASSOCIATION_REQUESTED 11
+#define WIFI_EVENT_RE_ASSOCIATION_REQUESTED 12
+#define WIFI_EVENT_ROAM_REQUESTED 13
+#define WIFI_EVENT_BEACON_RECEIVED 14 // received beacon from AP (event enabled
+ // only in verbose mode)
+#define WIFI_EVENT_ROAM_SCAN_STARTED 15 // firmware has triggered a roam scan (not g-scan)
+#define WIFI_EVENT_ROAM_SCAN_COMPLETE 16 // firmware has completed a roam scan (not g-scan)
+#define WIFI_EVENT_ROAM_SEARCH_STARTED 17 // firmware has started searching for roam
+ // candidates (with reason =xx)
+#define WIFI_EVENT_ROAM_SEARCH_STOPPED 18 // firmware has stopped searching for roam
+// candidates (with reason =xx)
+#define WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT 20 // received channel switch anouncement from AP
+#define WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START 21 // fw start transmit eapol frame, with
+ // EAPOL index 1-4
+#define WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP 22 // fw gives up eapol frame, with rate,
+ // success/failure and number retries
+#define WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED 23 // kernel queue EAPOL for transmission
+ // in driver with EAPOL index 1-4
+#define WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED 24 // with rate, regardless of the fact that
+ // EAPOL frame is accepted or rejected by fw
+#define WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED 26 // with rate, and eapol index, driver has
+ // received EAPOL frame and will queue it up
+ // to wpa_supplicant
+#define WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE 27 // with success/failure, parameters
+#define WIFI_EVENT_BT_COEX_BT_SCO_START 28
+#define WIFI_EVENT_BT_COEX_BT_SCO_STOP 29
+#define WIFI_EVENT_BT_COEX_BT_SCAN_START 30 // for paging/scan etc., when BT starts transmiting
+ // twice per BT slot
+#define WIFI_EVENT_BT_COEX_BT_SCAN_STOP 31
+#define WIFI_EVENT_BT_COEX_BT_HID_START 32
+#define WIFI_EVENT_BT_COEX_BT_HID_STOP 33
+#define WIFI_EVENT_ROAM_AUTH_STARTED 34 // fw sends auth frame in roaming to next candidate
+#define WIFI_EVENT_ROAM_AUTH_COMPLETE 35 // fw receive auth confirm from ap
+#define WIFI_EVENT_ROAM_ASSOC_STARTED 36 // firmware sends assoc/reassoc frame in
+ // roaming to next candidate
+#define WIFI_EVENT_ROAM_ASSOC_COMPLETE 37 // firmware receive assoc/reassoc confirm from ap
+#define WIFI_EVENT_G_SCAN_STOP 38 // firmware sends stop G_SCAN
+#define WIFI_EVENT_G_SCAN_CYCLE_STARTED 39 // firmware indicates G_SCAN scan cycle started
+#define WIFI_EVENT_G_SCAN_CYCLE_COMPLETED 40 // firmware indicates G_SCAN scan cycle completed
+#define WIFI_EVENT_G_SCAN_BUCKET_STARTED 41 // firmware indicates G_SCAN scan start
+ // for a particular bucket
+#define WIFI_EVENT_G_SCAN_BUCKET_COMPLETED 42 // firmware indicates G_SCAN scan completed for
+ // for a particular bucket
+#define WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE 43 // Event received from firmware about G_SCAN scan
+// results being available
+#define WIFI_EVENT_G_SCAN_CAPABILITIES 44 // Event received from firmware with G_SCAN
+ // capabilities
+#define WIFI_EVENT_ROAM_CANDIDATE_FOUND 45 // Event received from firmware when eligible
+ // candidate is found
+#define WIFI_EVENT_ROAM_SCAN_CONFIG 46 // Event received from firmware when roam scan
+ // configuration gets enabled or disabled
+#define WIFI_EVENT_AUTH_TIMEOUT 47 // firmware/driver timed out authentication
+#define WIFI_EVENT_ASSOC_TIMEOUT 48 // firmware/driver timed out association
+#define WIFI_EVENT_MEM_ALLOC_FAILURE 49 // firmware/driver encountered allocation failure
+#define WIFI_EVENT_DRIVER_PNO_ADD 50 // driver added a PNO network in firmware
+#define WIFI_EVENT_DRIVER_PNO_REMOVE 51 // driver removed a PNO network in firmware
+#define WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND 52 // driver received PNO networks
+ // found indication from firmware
+#define WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED 53 // driver triggered a scan for PNO networks
+#define WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND 54 // driver received scan results
+ // of PNO networks
+#define WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE 55 // driver updated scan results from
+ // PNO networks to cfg80211
+
+/**
+ * Parameters of wifi logger events are TLVs
+ * Event parameters tags are defined as:
+ */
+#define WIFI_TAG_VENDOR_SPECIFIC 0 // take a byte stream as parameter
+#define WIFI_TAG_BSSID 1 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_ADDR 2 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_SSID 3 // takes a 32 bytes SSID address as parameter
+#define WIFI_TAG_STATUS 4 // takes an integer as parameter
+#define WIFI_TAG_CHANNEL_SPEC 5 // takes one or more wifi_channel_spec as parameter
+#define WIFI_TAG_WAKE_LOCK_EVENT 6 // takes a wake_lock_event struct as parameter
+#define WIFI_TAG_ADDR1 7 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_ADDR2 8 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_ADDR3 9 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_ADDR4 10 // takes a 6 bytes MAC address as parameter
+#define WIFI_TAG_TSF 11 // take a 64 bits TSF value as parameter
+#define WIFI_TAG_IE 12 // take one or more specific 802.11 IEs parameter,
+ // IEs are in turn indicated in TLV format as per
+ // 802.11 spec
+#define WIFI_TAG_INTERFACE 13 // take interface name as parameter
+#define WIFI_TAG_REASON_CODE 14 // take a reason code as per 802.11 as parameter
+#define WIFI_TAG_RATE_MBPS 15 // take a wifi rate in 0.5 mbps
+#define WIFI_TAG_REQUEST_ID 16 // take an integer as parameter
+#define WIFI_TAG_BUCKET_ID 17 // take an integer as parameter
+#define WIFI_TAG_GSCAN_PARAMS 18 // takes a wifi_scan_cmd_params struct as parameter
+#define WIFI_TAG_GSCAN_CAPABILITIES 19 // takes a wifi_gscan_capabilities struct as parameter
+#define WIFI_TAG_SCAN_ID 20 // take an integer as parameter
+#define WIFI_TAG_RSSI 21 // take an integer as parameter
+#define WIFI_TAG_CHANNEL 22 // take an integer as parameter
+#define WIFI_TAG_LINK_ID 23 // take an integer as parameter
+#define WIFI_TAG_LINK_ROLE 24 // take an integer as parameter
+#define WIFI_TAG_LINK_STATE 25 // take an integer as parameter
+#define WIFI_TAG_LINK_TYPE 26 // take an integer as parameter
+#define WIFI_TAG_TSCO 27 // take an integer as parameter
+#define WIFI_TAG_RSCO 28 // take an integer as parameter
+#define WIFI_TAG_EAPOL_MESSAGE_TYPE 29 // take an integer as parameter
+ // M1-1, M2-2, M3-3, M4-4
+
+struct scsc_tlv_log {
+ u16 tag;
+ u16 length; // length of value
+ u8 value[0];
+} __packed;
+
+struct scsc_wifi_ring_buffer_driver_connectivity_event {
+ u16 event;
+ struct scsc_tlv_log tlvs[0];
+} __packed;
+
+/**
+ * Ring buffer name for power events ring. note that power event are extremely frequents
+ * and thus should be stored in their own ring/file so as not to clobber connectivity events.
+ */
+struct scsc_wake_lock_event {
+ int status; // 0 taken, 1 released
+ int reason; // reason why this wake lock is taken
+ char name[0]; // null terminated
+} __packed;
+
+struct scsc_wifi_power_event {
+ u16 event;
+ struct scsc_tlv_log tlvs[0];
+} __packed;
+
+#define PER_PACKET_ENTRY_FLAGS_DIRECTION_TX 1 /* 0: TX, 1: RX */
+#define PER_PACKET_ENTRY_FLAGS_TX_SUCCESS 2 /* pkt TX or RX/decrypt successfully */
+#define PER_PACKET_ENTRY_FLAGS_80211_HEADER 4 /* full 802.11 header or 802.3 header */
+#define PER_PACKET_ENTRY_FLAGS_PROTECTED 8 /* whether packet was encrypted */
+
+struct scsc_wifi_ring_per_packet_status_entry {
+ u8 flags;
+ u8 tid;
+ u16 MCS;
+ u8 rssi;
+ u8 num_retries;
+ u16 last_transmit_rate;
+ u16 link_layer_transmit_sequence;
+ u64 firmware_entry_timestamp;
+ u64 start_contention_timestamp;
+ u64 transmit_success_timestamp;
+ u8 data[0];
+} __packed;
+
+typedef void (*on_driver_memory_dump)(char *buffer, int buffer_size, void *ctx);
+typedef void (*on_firmware_memory_dump)(char *buffer, int buffer_size, void *ctx);
+
+/* packet fate logs */
+
+#define MD5_PREFIX_LEN 4
+#define MAX_FATE_LOG_LEN 32
+#define MAX_FRAME_LEN_ETHERNET 1518
+#define MAX_FRAME_LEN_80211_MGMT 2352 // 802.11-2012 Fig. 8-34
+
+typedef enum {
+ // Sent over air and ACKed.
+ TX_PKT_FATE_ACKED,
+
+ // Sent over air but not ACKed. (Normal for broadcast/multicast.)
+ TX_PKT_FATE_SENT,
+
+ // Queued within firmware, but not yet sent over air.
+ TX_PKT_FATE_FW_QUEUED,
+
+ // Dropped by firmware as invalid. E.g. bad source address, bad checksum,
+ // or invalid for current state.
+ TX_PKT_FATE_FW_DROP_INVALID,
+
+ // Dropped by firmware due to lack of buffer space.
+ TX_PKT_FATE_FW_DROP_NOBUFS,
+
+ // Dropped by firmware for any other reason. Includes frames that
+ // were sent by driver to firmware, but unaccounted for by
+ // firmware.
+ TX_PKT_FATE_FW_DROP_OTHER,
+
+ // Queued within driver, not yet sent to firmware.
+ TX_PKT_FATE_DRV_QUEUED,
+
+ // Dropped by driver as invalid. E.g. bad source address, or
+ // invalid for current state.
+ TX_PKT_FATE_DRV_DROP_INVALID,
+
+ // Dropped by driver due to lack of buffer space.
+ TX_PKT_FATE_DRV_DROP_NOBUFS,
+
+ // Dropped by driver for any other reason.
+ TX_PKT_FATE_DRV_DROP_OTHER,
+} wifi_tx_packet_fate;
+
+typedef enum {
+ // Valid and delivered to network stack (e.g., netif_rx()).
+ RX_PKT_FATE_SUCCESS,
+
+ // Queued within firmware, but not yet sent to driver.
+ RX_PKT_FATE_FW_QUEUED,
+
+ // Dropped by firmware due to host-programmable filters.
+ RX_PKT_FATE_FW_DROP_FILTER,
+
+ // Dropped by firmware as invalid. E.g. bad checksum, decrypt failed,
+ // or invalid for current state.
+ RX_PKT_FATE_FW_DROP_INVALID,
+
+ // Dropped by firmware due to lack of buffer space.
+ RX_PKT_FATE_FW_DROP_NOBUFS,
+
+ // Dropped by firmware for any other reason.
+ RX_PKT_FATE_FW_DROP_OTHER,
+
+ // Queued within driver, not yet delivered to network stack.
+ RX_PKT_FATE_DRV_QUEUED,
+
+ // Dropped by driver due to filter rules.
+ RX_PKT_FATE_DRV_DROP_FILTER,
+
+ // Dropped by driver as invalid. E.g. not permitted in current state.
+ RX_PKT_FATE_DRV_DROP_INVALID,
+
+ // Dropped by driver due to lack of buffer space.
+ RX_PKT_FATE_DRV_DROP_NOBUFS,
+
+ // Dropped by driver for any other reason.
+ RX_PKT_FATE_DRV_DROP_OTHER,
+} wifi_rx_packet_fate;
+
+typedef enum {
+ FRAME_TYPE_UNKNOWN,
+ FRAME_TYPE_ETHERNET_II,
+ FRAME_TYPE_80211_MGMT,
+} frame_type;
+
+struct scsc_frame_info {
+ // The type of MAC-layer frame that this frame_info holds.
+ // - For data frames, use FRAME_TYPE_ETHERNET_II.
+ // - For management frames, use FRAME_TYPE_80211_MGMT.
+ // - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN.
+ frame_type payload_type;
+
+ // The number of bytes included in |frame_content|. If the frame
+ // contents are missing (e.g. RX frame dropped in firmware),
+ // |frame_len| should be set to 0.
+ size_t frame_len;
+
+ // Host clock when this frame was received by the driver (either
+ // outbound from the host network stack, or inbound from the
+ // firmware).
+ // - The timestamp should be taken from a clock which includes time
+ // the host spent suspended (e.g. ktime_get_boottime()).
+ // - If no host timestamp is available (e.g. RX frame was dropped in
+ // firmware), this field should be set to 0.
+ u32 driver_timestamp_usec;
+
+ // Firmware clock when this frame was received by the firmware
+ // (either outbound from the host, or inbound from a remote
+ // station).
+ // - The timestamp should be taken from a clock which includes time
+ // firmware spent suspended (if applicable).
+ // - If no firmware timestamp is available (e.g. TX frame was
+ // dropped by driver), this field should be set to 0.
+ // - Consumers of |frame_info| should _not_ assume any
+ // synchronization between driver and firmware clocks.
+ u32 firmware_timestamp_usec;
+
+ // Actual frame content.
+ // - Should be provided for TX frames originated by the host.
+ // - Should be provided for RX frames received by the driver.
+ // - Optionally provided for TX frames originated by firmware. (At
+ // discretion of HAL implementation.)
+ // - Optionally provided for RX frames dropped in firmware. (At
+ // discretion of HAL implementation.)
+ // - If frame content is not provided, |frame_len| should be set
+ // to 0.
+ union {
+ char ethernet_ii_bytes[MAX_FRAME_LEN_ETHERNET];
+ char ieee_80211_mgmt_bytes[MAX_FRAME_LEN_80211_MGMT];
+ } frame_content;
+};
+
+typedef struct {
+ // Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+ // content is not provided, prefix of MD5 hash over the same data
+ // that would be in frame_content, if frame content were provided.
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_tx_packet_fate fate;
+ struct scsc_frame_info frame_inf;
+} __packed wifi_tx_report;
+
+typedef struct {
+ // Prefix of MD5 hash of |frame_inf.frame_content|. If frame
+ // content is not provided, prefix of MD5 hash over the same data
+ // that would be in frame_content, if frame content were provided.
+ char md5_prefix[MD5_PREFIX_LEN];
+ wifi_rx_packet_fate fate;
+ struct scsc_frame_info frame_inf;
+} __packed wifi_rx_report;
+#endif /* _SCSC_WIFILOGGER_TYPES_H_ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/mutex.h>
+#include <linux/wakelock.h>
+
+#include "scsc_wlbtd.h"
+
+#define MAX_TIMEOUT 30000 /* in milisecounds */
+#define WRITE_FILE_TIMEOUT 1000 /* in milisecounds */
+
+/* completion to indicate when EVENT_* is done */
+static DECLARE_COMPLETION(event_done);
+static DECLARE_COMPLETION(fw_panic_done);
+static DECLARE_COMPLETION(write_file_done);
+static DEFINE_MUTEX(write_file_lock);
+
+static DEFINE_MUTEX(build_type_lock);
+static char *build_type;
+static DEFINE_MUTEX(sable_lock);
+
+static struct wake_lock wlbtd_wakelock;
+
+/* module parameter controlling recovery handling */
+extern int disable_recovery_handling;
+
+const char *response_code_to_str(int response_code)
+{
+ switch (response_code) {
+ case SCSC_WLBTD_ERR_PARSE_FAILED:
+ return "SCSC_WLBTD_ERR_PARSE_FAILED";
+ case SCSC_WLBTD_FW_PANIC_TAR_GENERATED:
+ return "SCSC_WLBTD_FW_PANIC_TAR_GENERATED";
+ case SCSC_WLBTD_FW_PANIC_ERR_SCRIPT_FILE_NOT_FOUND:
+ return "SCSC_WLBTD_FW_PANIC_ERR_SCRIPT_FILE_NOT_FOUND";
+ case SCSC_WLBTD_FW_PANIC_ERR_NO_DEV:
+ return "SCSC_WLBTD_FW_PANIC_ERR_NO_DEV";
+ case SCSC_WLBTD_FW_PANIC_ERR_MMAP:
+ return "SCSC_WLBTD_FW_PANIC_ERR_MMAP";
+ case SCSC_WLBTD_FW_PANIC_ERR_SABLE_FILE:
+ return "SCSC_WLBTD_FW_PANIC_ERR_SABLE_FILE";
+ case SCSC_WLBTD_FW_PANIC_ERR_TAR:
+ return "SCSC_WLBTD_FW_PANIC_ERR_TAR";
+ case SCSC_WLBTD_OTHER_SBL_GENERATED:
+ return "SCSC_WLBTD_OTHER_SBL_GENERATED";
+ case SCSC_WLBTD_OTHER_TAR_GENERATED:
+ return "SCSC_WLBTD_OTHER_TAR_GENERATED";
+ case SCSC_WLBTD_OTHER_ERR_SCRIPT_FILE_NOT_FOUND:
+ return "SCSC_WLBTD_OTHER_ERR_SCRIPT_FILE_NOT_FOUND";
+ case SCSC_WLBTD_OTHER_ERR_NO_DEV:
+ return "SCSC_WLBTD_OTHER_ERR_NO_DEV";
+ case SCSC_WLBTD_OTHER_ERR_MMAP:
+ return "SCSC_WLBTD_OTHER_ERR_MMAP";
+ case SCSC_WLBTD_OTHER_ERR_SABLE_FILE:
+ return "SCSC_WLBTD_OTHER_ERR_SABLE_FILE";
+ case SCSC_WLBTD_OTHER_ERR_TAR:
+ return "SCSC_WLBTD_OTHER_ERR_TAR";
+ case SCSC_WLBTD_OTHER_IGNORE_TRIGGER:
+ return "SCSC_WLBTD_OTHER_IGNORE_TRIGGER";
+ default:
+ SCSC_TAG_ERR(WLBTD, "UNKNOWN response_code %d", response_code);
+ return "UNKNOWN response_code";
+ }
+}
+
+/**
+ * This callback runs whenever the socket receives messages.
+ */
+static int msg_from_wlbtd_cb(struct sk_buff *skb, struct genl_info *info)
+{
+ int status = 0;
+
+ if (info->attrs[1])
+ SCSC_TAG_INFO(WLBTD, "ATTR_STR: %s\n",
+ (char *)nla_data(info->attrs[1]));
+
+ if (info->attrs[2]) {
+ status = *((__u32 *)nla_data(info->attrs[2]));
+ if (status)
+ SCSC_TAG_ERR(WLBTD, "ATTR_INT: %u\n", status);
+ }
+
+ complete(&event_done);
+
+ return 0;
+}
+
+static int msg_from_wlbtd_sable_cb(struct sk_buff *skb, struct genl_info *info)
+{
+ int status = 0;
+ const char *data = (const char *)nla_data(info->attrs[1]);
+
+ if (info->attrs[1])
+ SCSC_TAG_INFO(WLBTD, "%s\n", data);
+
+ if (info->attrs[2]) {
+ status = nla_get_u16(info->attrs[2]);
+ SCSC_TAG_ERR(WLBTD, "%s\n", response_code_to_str(status));
+ }
+
+ /* completion cases :
+ * 1) FW_PANIC_TAR_GENERATED
+ * for trigger scsc_log_fw_panic only one response from wlbtd when
+ * tar done
+ * ---> complete fw_panic_done
+ * 2) for all other triggers, we get 2 responses
+ * a) OTHER_SBL_GENERATED
+ * Once .sbl is written
+ * ---> complete event_done
+ * b) OTHER_TAR_GENERATED
+ * 2nd time when sable tar is done
+ * IGNORE this response and Don't complete
+ * 3) OTHER_IGNORE_TRIGGER
+ * When we get rapid requests for SABLE generation,
+ * to serialise while processing current request,
+ * we ignore requests other than "fw_panic" in wlbtd and
+ * send a msg "ignoring" back to kernel.
+ * ---> complete event_done
+ * 4) FW_PANIC_ERR_* and OTHER_ERR_*
+ * when something failed, file not found, mmap failed, etc.
+ * ---> complete the completion with waiter(s) based on if it was
+ * a fw_panic trigger or other trigger
+ * 5) ERR_PARSE_FAILED
+ * When msg parsing fails, wlbtd doesn't know the trigger type
+ * ---> complete the completion with waiter(s)
+ */
+
+ switch (status) {
+ case SCSC_WLBTD_ERR_PARSE_FAILED:
+ if (!completion_done(&fw_panic_done)) {
+ SCSC_TAG_INFO(WLBTD, "completing fw_panic_done\n");
+ complete(&fw_panic_done);
+ }
+ if (!completion_done(&event_done)) {
+ SCSC_TAG_INFO(WLBTD, "completing event_done\n");
+ complete(&event_done);
+ }
+ break;
+ case SCSC_WLBTD_FW_PANIC_TAR_GENERATED:
+ case SCSC_WLBTD_FW_PANIC_ERR_TAR:
+ case SCSC_WLBTD_FW_PANIC_ERR_SCRIPT_FILE_NOT_FOUND:
+ case SCSC_WLBTD_FW_PANIC_ERR_NO_DEV:
+ case SCSC_WLBTD_FW_PANIC_ERR_MMAP:
+ case SCSC_WLBTD_FW_PANIC_ERR_SABLE_FILE:
+ if (!completion_done(&fw_panic_done)) {
+ SCSC_TAG_INFO(WLBTD, "completing fw_panic_done\n");
+ complete(&fw_panic_done);
+ }
+ break;
+ case SCSC_WLBTD_OTHER_TAR_GENERATED:
+ /* ignore */
+ break;
+ case SCSC_WLBTD_OTHER_SBL_GENERATED:
+ case SCSC_WLBTD_OTHER_ERR_TAR:
+ case SCSC_WLBTD_OTHER_ERR_SCRIPT_FILE_NOT_FOUND:
+ case SCSC_WLBTD_OTHER_ERR_NO_DEV:
+ case SCSC_WLBTD_OTHER_ERR_MMAP:
+ case SCSC_WLBTD_OTHER_ERR_SABLE_FILE:
+ case SCSC_WLBTD_OTHER_IGNORE_TRIGGER:
+ if (!completion_done(&event_done)) {
+ SCSC_TAG_INFO(WLBTD, "completing event_done\n");
+ complete(&event_done);
+ }
+ break;
+ default:
+ SCSC_TAG_ERR(WLBTD, "UNKNOWN reponse from WLBTD\n");
+ }
+
+ return 0;
+}
+
+static int msg_from_wlbtd_build_type_cb(struct sk_buff *skb, struct genl_info *info)
+{
+ if (!info->attrs[1]) {
+ SCSC_TAG_WARNING(WLBTD, "info->attrs[1] = NULL\n");
+ return -1;
+ }
+
+ if (!nla_len(info->attrs[1])) {
+ SCSC_TAG_WARNING(WLBTD, "nla_len = 0\n");
+ return -1;
+ }
+
+ mutex_lock(&build_type_lock);
+ if (build_type) {
+ SCSC_TAG_WARNING(WLBTD, "ro.build.type = %s\n", build_type);
+ mutex_unlock(&build_type_lock);
+ return 0;
+ }
+ /* nla_len includes trailing zero. Tested.*/
+ build_type = kmalloc(info->attrs[1]->nla_len, GFP_KERNEL);
+ if (!build_type) {
+ SCSC_TAG_WARNING(WLBTD, "kmalloc failed: build_type = NULL\n");
+ mutex_unlock(&build_type_lock);
+ return -1;
+ }
+ memcpy(build_type, (char *)nla_data(info->attrs[1]), info->attrs[1]->nla_len);
+ SCSC_TAG_WARNING(WLBTD, "ro.build.type = %s\n", build_type);
+ mutex_unlock(&build_type_lock);
+ return 0;
+
+}
+
+static int msg_from_wlbtd_write_file_cb(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[3])
+ SCSC_TAG_INFO(WLBTD, "%s\n", (char *)nla_data(info->attrs[3]));
+
+ complete(&write_file_done);
+ return 0;
+}
+
+/**
+ * Here you can define some constraints for the attributes so Linux will
+ * validate them for you.
+ */
+static struct nla_policy policies[] = {
+ [ATTR_STR] = { .type = NLA_STRING, },
+ [ATTR_INT] = { .type = NLA_U32, },
+};
+
+static struct nla_policy policy_sable[] = {
+ [ATTR_INT] = { .type = NLA_U16, },
+ [ATTR_INT8] = { .type = NLA_U8, },
+};
+
+static struct nla_policy policies_build_type[] = {
+ [ATTR_STR] = { .type = NLA_STRING, },
+};
+
+static struct nla_policy policy_write_file[] = {
+ [ATTR_PATH] = { .type = NLA_STRING, },
+ [ATTR_CONTENT] = { .type = NLA_STRING, },
+};
+
+
+/**
+ * Actual message type definition.
+ */
+const struct genl_ops scsc_ops[] = {
+ {
+ .cmd = EVENT_SCSC,
+ .flags = 0,
+ .policy = policies,
+ .doit = msg_from_wlbtd_cb,
+ .dumpit = NULL,
+ },
+ {
+ .cmd = EVENT_SYSTEM_PROPERTY,
+ .flags = 0,
+ .policy = policies_build_type,
+ .doit = msg_from_wlbtd_build_type_cb,
+ .dumpit = NULL,
+ },
+ {
+ .cmd = EVENT_SABLE,
+ .flags = 0,
+ .policy = policy_sable,
+ .doit = msg_from_wlbtd_sable_cb,
+ .dumpit = NULL,
+ },
+ {
+ .cmd = EVENT_WRITE_FILE,
+ .flags = 0,
+ .policy = policy_write_file,
+ .doit = msg_from_wlbtd_write_file_cb,
+ .dumpit = NULL,
+ },
+
+};
+
+/* The netlink family */
+static struct genl_family scsc_nlfamily = {
+ .id = 0, /* Don't bother with a hardcoded ID */
+ .name = "scsc_mdp_family", /* Have users key off the name instead */
+ .hdrsize = 0, /* No private header */
+ .version = 1,
+ .maxattr = __ATTR_MAX,
+ .module = THIS_MODULE,
+ .ops = scsc_ops,
+ .n_ops = ARRAY_SIZE(scsc_ops),
+ .mcgrps = scsc_mcgrp,
+ .n_mcgrps = ARRAY_SIZE(scsc_mcgrp),
+};
+
+int scsc_wlbtd_get_and_print_build_type(void)
+{
+ struct sk_buff *skb;
+ void *msg;
+ int rc = 0;
+
+ SCSC_TAG_DEBUG(WLBTD, "start\n");
+ wake_lock(&wlbtd_wakelock);
+
+ /* check if the value wasn't cached yet */
+ mutex_lock(&build_type_lock);
+ if (build_type) {
+ SCSC_TAG_WARNING(WLBTD, "ro.build.type = %s\n", build_type);
+ SCSC_TAG_DEBUG(WLBTD, "sync end\n");
+ mutex_unlock(&build_type_lock);
+ goto done;
+ }
+ mutex_unlock(&build_type_lock);
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb) {
+ SCSC_TAG_ERR(WLBTD, "Failed to construct message\n");
+ goto error;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "create message\n");
+ msg = genlmsg_put(skb,
+ 0, // PID is whatever
+ 0, // Sequence number (don't care)
+ &scsc_nlfamily, // Pointer to family struct
+ 0, // Flags
+ EVENT_SYSTEM_PROPERTY // Generic netlink command
+ );
+ if (!msg) {
+ SCSC_TAG_ERR(WLBTD, "Failed to create message\n");
+ goto error;
+ }
+ rc = nla_put_string(skb, ATTR_STR, "ro.build.type");
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_string failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+ genlmsg_end(skb, msg);
+
+ SCSC_TAG_INFO(WLBTD, "finalize & send msg\n");
+ rc = genlmsg_multicast_allns(&scsc_nlfamily, skb, 0, 0, GFP_KERNEL);
+
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "failed to send message. rc = %d\n", rc);
+ goto error;
+ }
+
+ SCSC_TAG_DEBUG(WLBTD, "async end\n");
+done:
+ wake_unlock(&wlbtd_wakelock);
+ return rc;
+
+error:
+ if (rc == -ESRCH) {
+ /* If no one registered to scsc_mdp_mcgrp (e.g. in case wlbtd
+ * is not running) genlmsg_multicast_allns returns -ESRCH.
+ * Ignore and return.
+ */
+ SCSC_TAG_WARNING(WLBTD, "WLBTD not running ?\n");
+ wake_unlock(&wlbtd_wakelock);
+ return rc;
+ }
+ /* free skb */
+ nlmsg_free(skb);
+ wake_unlock(&wlbtd_wakelock);
+ return -1;
+}
+
+int wlbtd_write_file(const char *file_path, const char *file_content)
+{
+#ifdef CONFIG_SCSC_WRITE_INFO_FILE_WLBTD
+ struct sk_buff *skb;
+ void *msg;
+ int rc = 0;
+ unsigned long completion_jiffies = 0;
+ unsigned long max_timeout_jiffies = msecs_to_jiffies(WRITE_FILE_TIMEOUT);
+
+ SCSC_TAG_DEBUG(WLBTD, "start\n");
+
+ mutex_lock(&write_file_lock);
+ wake_lock(&wlbtd_wakelock);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb) {
+ SCSC_TAG_ERR(WLBTD, "Failed to construct message\n");
+ goto error;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "create message to write %s\n", file_path);
+ msg = genlmsg_put(skb,
+ 0, // PID is whatever
+ 0, // Sequence number (don't care)
+ &scsc_nlfamily, // Pointer to family struct
+ 0, // Flags
+ EVENT_WRITE_FILE// Generic netlink command
+ );
+ if (!msg) {
+ SCSC_TAG_ERR(WLBTD, "Failed to create message\n");
+ goto error;
+ }
+
+ SCSC_TAG_DEBUG(WLBTD, "add values to msg\n");
+ rc = nla_put_string(skb, ATTR_PATH, file_path);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_u32 failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ rc = nla_put_string(skb, ATTR_CONTENT, file_content);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_string failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ genlmsg_end(skb, msg);
+
+ SCSC_TAG_INFO(WLBTD, "finalize & send msg\n");
+ /* genlmsg_multicast_allns() frees skb */
+ rc = genlmsg_multicast_allns(&scsc_nlfamily, skb, 0, 0, GFP_KERNEL);
+
+ if (rc) {
+ if (rc == -ESRCH) {
+ /* If no one registered to scsc_mcgrp (e.g. in case
+ * wlbtd is not running) genlmsg_multicast_allns
+ * returns -ESRCH. Ignore and return.
+ */
+ SCSC_TAG_WARNING(WLBTD, "WLBTD not running ?\n");
+ goto done;
+ }
+ SCSC_TAG_ERR(WLBTD, "Failed to send message. rc = %d\n", rc);
+ goto done;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "waiting for completion\n");
+ /* wait for script to finish */
+ completion_jiffies = wait_for_completion_timeout(&write_file_done,
+ max_timeout_jiffies);
+
+ if (completion_jiffies == 0)
+ SCSC_TAG_ERR(WLBTD, "wait for completion timed out !\n");
+ else {
+ completion_jiffies = jiffies_to_msecs(max_timeout_jiffies - completion_jiffies);
+
+ SCSC_TAG_INFO(WLBTD, "written %s in %dms\n", file_path,
+ completion_jiffies ? completion_jiffies : 1);
+ }
+
+ /* reinit so completion can be re-used */
+ reinit_completion(&write_file_done);
+
+ SCSC_TAG_DEBUG(WLBTD, "end\n");
+done:
+ wake_unlock(&wlbtd_wakelock);
+ mutex_unlock(&write_file_lock);
+ return rc;
+
+error:
+ /* free skb */
+ nlmsg_free(skb);
+
+ wake_unlock(&wlbtd_wakelock);
+ mutex_unlock(&write_file_lock);
+ return -1;
+#else /* CONFIG_SCSC_WRITE_INFO_FILE_WLBTD */
+ return 0; /* stub */
+#endif
+}
+EXPORT_SYMBOL(wlbtd_write_file);
+
+int call_wlbtd_sable(u8 trigger_code, u16 reason_code)
+{
+ struct sk_buff *skb;
+ void *msg;
+ int rc = 0;
+ unsigned long completion_jiffies = 0;
+ unsigned long max_timeout_jiffies = msecs_to_jiffies(MAX_TIMEOUT);
+
+ mutex_lock(&sable_lock);
+ wake_lock(&wlbtd_wakelock);
+
+ SCSC_TAG_INFO(WLBTD, "start:trigger - %s\n",
+ scsc_get_trigger_str((int)trigger_code));
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb) {
+ SCSC_TAG_ERR(WLBTD, "Failed to construct message\n");
+ goto error;
+ }
+
+ SCSC_TAG_DEBUG(WLBTD, "create message\n");
+ msg = genlmsg_put(skb,
+ 0, // PID is whatever
+ 0, // Sequence number (don't care)
+ &scsc_nlfamily, // Pointer to family struct
+ 0, // Flags
+ EVENT_SABLE // Generic netlink command
+ );
+ if (!msg) {
+ SCSC_TAG_ERR(WLBTD, "Failed to create message\n");
+ goto error;
+ }
+ SCSC_TAG_DEBUG(WLBTD, "add values to msg\n");
+ rc = nla_put_u16(skb, ATTR_INT, reason_code);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_u16 failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ rc = nla_put_u8(skb, ATTR_INT8, trigger_code);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_u8 failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ genlmsg_end(skb, msg);
+
+ SCSC_TAG_DEBUG(WLBTD, "finalize & send msg\n");
+ /* genlmsg_multicast_allns() frees skb */
+ rc = genlmsg_multicast_allns(&scsc_nlfamily, skb, 0, 0, GFP_KERNEL);
+
+ if (rc) {
+ if (rc == -ESRCH) {
+ /* If no one registered to scsc_mcgrp (e.g. in case
+ * wlbtd is not running) genlmsg_multicast_allns
+ * returns -ESRCH. Ignore and return.
+ */
+ SCSC_TAG_WARNING(WLBTD, "WLBTD not running ?\n");
+ goto done;
+ }
+ SCSC_TAG_ERR(WLBTD, "Failed to send message. rc = %d\n", rc);
+ goto done;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "waiting for completion\n");
+
+ /* wait for script to finish */
+ if (trigger_code == SCSC_LOG_FW_PANIC)
+ completion_jiffies = wait_for_completion_timeout(&fw_panic_done,
+ max_timeout_jiffies);
+ else
+ completion_jiffies = wait_for_completion_timeout(&event_done,
+ max_timeout_jiffies);
+
+ if (completion_jiffies) {
+ completion_jiffies = max_timeout_jiffies - completion_jiffies;
+ SCSC_TAG_INFO(WLBTD, "sable generated in %dms\n",
+ (int)jiffies_to_msecs(completion_jiffies) ? : 1);
+ } else
+ SCSC_TAG_ERR(WLBTD, "wait for completion timed out for %s\n",
+ scsc_get_trigger_str((int)trigger_code));
+
+ /* reinit so completion can be re-used */
+ if (trigger_code == SCSC_LOG_FW_PANIC)
+ reinit_completion(&fw_panic_done);
+ else
+ reinit_completion(&event_done);
+
+ SCSC_TAG_INFO(WLBTD, " end:trigger - %s\n",
+ scsc_get_trigger_str((int)trigger_code));
+
+done:
+ wake_unlock(&wlbtd_wakelock);
+ mutex_unlock(&sable_lock);
+ return rc;
+
+error:
+ /* free skb */
+ nlmsg_free(skb);
+ wake_unlock(&wlbtd_wakelock);
+ mutex_unlock(&sable_lock);
+
+ return -1;
+}
+EXPORT_SYMBOL(call_wlbtd_sable);
+
+int call_wlbtd(const char *script_path)
+{
+ struct sk_buff *skb;
+ void *msg;
+ int rc = 0;
+ unsigned long completion_jiffies = 0;
+ unsigned long max_timeout_jiffies = msecs_to_jiffies(MAX_TIMEOUT);
+
+ SCSC_TAG_DEBUG(WLBTD, "start\n");
+
+ wake_lock(&wlbtd_wakelock);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb) {
+ SCSC_TAG_ERR(WLBTD, "Failed to construct message\n");
+ goto error;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "create message to run %s\n", script_path);
+ msg = genlmsg_put(skb,
+ 0, // PID is whatever
+ 0, // Sequence number (don't care)
+ &scsc_nlfamily, // Pointer to family struct
+ 0, // Flags
+ EVENT_SCSC // Generic netlink command
+ );
+ if (!msg) {
+ SCSC_TAG_ERR(WLBTD, "Failed to create message\n");
+ goto error;
+ }
+
+ SCSC_TAG_DEBUG(WLBTD, "add values to msg\n");
+ rc = nla_put_u32(skb, ATTR_INT, 9);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_u32 failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ rc = nla_put_string(skb, ATTR_STR, script_path);
+ if (rc) {
+ SCSC_TAG_ERR(WLBTD, "nla_put_string failed. rc = %d\n", rc);
+ genlmsg_cancel(skb, msg);
+ goto error;
+ }
+
+ genlmsg_end(skb, msg);
+
+ SCSC_TAG_INFO(WLBTD, "finalize & send msg\n");
+ /* genlmsg_multicast_allns() frees skb */
+ rc = genlmsg_multicast_allns(&scsc_nlfamily, skb, 0, 0, GFP_KERNEL);
+
+ if (rc) {
+ if (rc == -ESRCH) {
+ /* If no one registered to scsc_mcgrp (e.g. in case
+ * wlbtd is not running) genlmsg_multicast_allns
+ * returns -ESRCH. Ignore and return.
+ */
+ SCSC_TAG_WARNING(WLBTD, "WLBTD not running ?\n");
+ goto done;
+ }
+ SCSC_TAG_ERR(WLBTD, "Failed to send message. rc = %d\n", rc);
+ goto done;
+ }
+
+ SCSC_TAG_INFO(WLBTD, "waiting for completion\n");
+
+ /* wait for script to finish */
+ completion_jiffies = wait_for_completion_timeout(&event_done,
+ max_timeout_jiffies);
+
+ if (completion_jiffies) {
+
+ completion_jiffies = max_timeout_jiffies - completion_jiffies;
+ SCSC_TAG_INFO(WLBTD, "done in %dms\n",
+ (int)jiffies_to_msecs(completion_jiffies) ? : 1);
+ } else
+ SCSC_TAG_ERR(WLBTD, "wait for completion timed out !\n");
+
+ /* reinit so completion can be re-used */
+ reinit_completion(&event_done);
+
+ SCSC_TAG_DEBUG(WLBTD, "end\n");
+
+done:
+ wake_unlock(&wlbtd_wakelock);
+ return rc;
+
+error:
+ /* free skb */
+ nlmsg_free(skb);
+ wake_unlock(&wlbtd_wakelock);
+
+ return -1;
+}
+EXPORT_SYMBOL(call_wlbtd);
+
+int scsc_wlbtd_init(void)
+{
+ int r = 0;
+
+ wake_lock_init(&wlbtd_wakelock, WAKE_LOCK_SUSPEND, "wlbtd_wl");
+ init_completion(&event_done);
+ init_completion(&fw_panic_done);
+ init_completion(&write_file_done);
+
+ /* register the family so that wlbtd can bind */
+ r = genl_register_family(&scsc_nlfamily);
+ if (r) {
+ SCSC_TAG_ERR(WLBTD, "Failed to register family. (%d)\n", r);
+ return -1;
+ }
+
+ return r;
+}
+
+int scsc_wlbtd_deinit(void)
+{
+ int ret = 0;
+
+ /* unregister family */
+ ret = genl_unregister_family(&scsc_nlfamily);
+ if (ret) {
+ SCSC_TAG_ERR(WLBTD, "genl_unregister_family failed (%d)\n",
+ ret);
+ return -1;
+ }
+ kfree(build_type);
+ build_type = NULL;
+ wake_lock_destroy(&wlbtd_wakelock);
+
+ return ret;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <net/genetlink.h>
+#include <scsc/scsc_logring.h>
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_log_collector.h>
+
+/* module parameter value to indicate control of recovery via .memdump.info file */
+#define MEMDUMP_FILE_FOR_RECOVERY 2
+/* content of .memdump.info file indicating to panic kernel */
+#define MEMDUMP_FILE_KERNEL_PANIC 3
+
+/**
+ * Attributes are fields of data your messages will contain.
+ * The designers of Netlink really want you to use these instead of just dumping
+ * data to the packet payload.
+ */
+enum attributes {
+ /* The first one has to be a throwaway empty attribute */
+ ATTR_UNSPEC,
+
+ ATTR_STR,
+ ATTR_INT,
+ ATTR_PATH,
+ ATTR_CONTENT,
+ ATTR_INT8,
+
+ /* This must be last! */
+ __ATTR_MAX,
+};
+
+/**
+ * Message type codes.
+ */
+enum events {
+ /* must be first */
+ EVENT_UNSPEC,
+
+ EVENT_SCSC,
+ EVENT_SYSTEM_PROPERTY,
+ EVENT_WRITE_FILE,
+ EVENT_SABLE,
+ /* This must be last! */
+ __EVENT_MAX,
+};
+
+enum scsc_wlbtd_response_codes {
+ /* NOTE: keep the enum in sync with userspace wlbtd */
+ /* parse failed */
+ SCSC_WLBTD_ERR_PARSE_FAILED,
+
+ /* fw_panic trigger */
+ SCSC_WLBTD_FW_PANIC_TAR_GENERATED,
+ SCSC_WLBTD_FW_PANIC_ERR_SCRIPT_FILE_NOT_FOUND,
+ SCSC_WLBTD_FW_PANIC_ERR_NO_DEV,
+ SCSC_WLBTD_FW_PANIC_ERR_MMAP,
+ SCSC_WLBTD_FW_PANIC_ERR_SABLE_FILE,
+ SCSC_WLBTD_FW_PANIC_ERR_TAR,
+
+ /* other triggers */
+ SCSC_WLBTD_OTHER_SBL_GENERATED,
+ SCSC_WLBTD_OTHER_TAR_GENERATED,
+ SCSC_WLBTD_OTHER_ERR_SCRIPT_FILE_NOT_FOUND,
+ SCSC_WLBTD_OTHER_ERR_NO_DEV,
+ SCSC_WLBTD_OTHER_ERR_MMAP,
+ SCSC_WLBTD_OTHER_ERR_SABLE_FILE,
+ SCSC_WLBTD_OTHER_ERR_TAR,
+ SCSC_WLBTD_OTHER_IGNORE_TRIGGER,
+};
+
+static const struct genl_multicast_group scsc_mcgrp[] = {
+ { .name = "scsc_mdp_grp", },
+};
+
+int scsc_wlbtd_init(void);
+int scsc_wlbtd_deinit(void);
+int call_wlbtd(const char *script_path);
+int wlbtd_write_file(const char *path, const char *content);
+int call_wlbtd_sable(u8 trigger_code, u16 reason_code);
+int scsc_wlbtd_get_and_print_build_type(void);
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef SERVMAN_MESSAGES_H__
+#define SERVMAN_MESSAGES_H__
+
+#include <scsc/scsc_mifram.h>
+/**
+ * Maxwell Service Management Messages.
+ *
+ * TODO: common defn with host, generated.
+ */
+enum {
+ SM_MSG_START_REQ,
+ SM_MSG_START_CFM,
+ SM_MSG_STOP_REQ,
+ SM_MSG_STOP_CFM,
+} sm_msg;
+
+/* Transport format for service management messages across the
+ * Maxwell management transport.
+ *
+ * TODO: common defn with host, generated.
+ */
+struct sm_msg_packet {
+ uint8_t service_id;
+ uint8_t msg;
+ scsc_mifram_ref optional_data;
+} __packed;
+
+
+#endif /* SERVMAN_MESSAGES_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _SRVMAN_H
+#define _SRVMAN_H
+
+#ifdef CONFIG_ANDROID
+#include <linux/wakelock.h>
+#endif
+
+struct srvman;
+
+void srvman_init(struct srvman *srvman, struct scsc_mx *mx);
+int srvman_suspend_services(struct srvman *srvman);
+int srvman_resume_services(struct srvman *srvman);
+void srvman_freeze_services(struct srvman *srvman);
+void srvman_unfreeze_services(struct srvman *srvman, u16 scsc_panic_code);
+void srvman_set_error(struct srvman *srvman);
+void srvman_clear_error(struct srvman *srvman);
+void srvman_deinit(struct srvman *srvman);
+
+struct srvman {
+ struct scsc_mx *mx;
+ struct list_head service_list;
+ struct mutex service_list_mutex;
+ struct mutex api_access_mutex;
+ bool error;
+#ifdef CONFIG_ANDROID
+ struct wake_lock sm_wake_lock;
+#endif
+};
+
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+#include <scsc/scsc_logring.h>
+
+#include "suspendmon.h"
+#include "scsc_mif_abs.h"
+#include "mxman.h"
+
+static int suspendmon_suspend(struct scsc_mif_abs *mif, void *data)
+{
+ struct suspendmon *suspendmon = (struct suspendmon *)data;
+
+ SCSC_TAG_DEBUG(MXMAN, "suspendmon=%p suspendmon->mx=%p mxman=%p\n",
+ suspendmon, suspendmon->mx, scsc_mx_get_mxman(suspendmon->mx));
+
+ return mxman_suspend(scsc_mx_get_mxman(suspendmon->mx));
+}
+
+static void suspendmon_resume(struct scsc_mif_abs *mif, void *data)
+{
+ struct suspendmon *suspendmon = (struct suspendmon *)data;
+
+ SCSC_TAG_DEBUG(MXMAN, "suspendmon=%p suspendmon->mx=%p mxman=%p\n",
+ suspendmon, suspendmon->mx, scsc_mx_get_mxman(suspendmon->mx));
+
+ mxman_resume(scsc_mx_get_mxman(suspendmon->mx));
+}
+
+void suspendmon_init(struct suspendmon *suspendmon, struct scsc_mx *mx)
+{
+ struct scsc_mif_abs *mif;
+
+ suspendmon->mx = mx;
+ mif = scsc_mx_get_mif_abs(mx);
+
+ /* register callbacks with mif abstraction */
+ if (mif->suspend_reg_handler)
+ mif->suspend_reg_handler(mif, suspendmon_suspend, suspendmon_resume, (void *)suspendmon);
+}
+
+void suspendmon_deinit(struct suspendmon *suspendmon)
+{
+ struct scsc_mif_abs *mif;
+
+ mif = scsc_mx_get_mif_abs(suspendmon->mx);
+
+ if (mif->suspend_unreg_handler)
+ mif->suspend_unreg_handler(mif);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ ****************************************************************************/
+
+/* Baroque suspend/resume handler registration interface */
+
+#ifndef _SUSPENDMON_H
+#define _SUSPENDMON_H
+
+#include "mxman.h"
+
+struct suspendmon;
+
+void suspendmon_init(struct suspendmon *suspendmon, struct scsc_mx *mx);
+void suspendmon_deinit(struct suspendmon *suspendmon);
+
+struct suspendmon {
+ struct scsc_mx *mx;
+};
+
+#endif /* _SUSPENDMON_H */
--- /dev/null
+config SCSC_BT
+ tristate "SCSC MX BT support"
+ depends on SCSC_CORE
+
+config SCSC_BT_BLUEZ
+ default N
+ tristate "SCSC MX BlueZ support"
+ depends on SCSC_CORE
+
+config SCSC_ANT
+ default N
+ tristate "SCSC MX ANT support"
+ depends on SCSC_CORE
+ ---help---
+ This module adds support for using a ANT character device.
+
+config SCSC_BT_ADDRESS_IN_FILE
+ default N
+ tristate "SCSC MX BT support"
+ depends on SCSC_CORE
+ ---help---
+ Allow BT address to be read from a file
+
+config SCSC_BT_ADDRESS_FILENAME
+ string "BT address filename"
+ depends on SCSC_CORE
+ default "/mnt/vendor/efs/bluetooth/bt_addr"
+ ---help---
+ Select the named BT address override file.
--- /dev/null
+# Needed since this subdir is symlinked in the main Kernel tree
+# without this our samsung subdir is NOT cleaned.
+clean-files := *.o *.ko
+
+#
+# Maxwell BT
+obj-$(CONFIG_SCSC_BT) += scsc_bt.o
+scsc_bt-y += scsc_bt_module.o scsc_shm.o scsc_avdtp_detect.o
+scsc_bt-$(CONFIG_SCSC_BT_BLUEZ) += scsc_bluez.o
+scsc_bt-$(CONFIG_SCSC_ANT) += scsc_ant.o
+
+ccflags-y += $(CONFIG_SAMSUNG_MAXWELL_EXTRA)
+## See sibling scsc/ Makefile for an explanation of the reasons of
+## the following ifeq/else
+ifeq ($(CONFIG_SCSC_LOGRING), m)
+ccflags-y += -DCONFIG_SCSC_PRINTK
+else
+ccflags-$(CONFIG_SCSC_LOGRING) += -DCONFIG_SCSC_PRINTK
+endif
+
+ifeq ($(CONFIG_SCSC_BT_BLUEZ), m)
+ccflags-y += -DCONFIG_SCSC_BT_BLUEZ
+else
+ccflags-$(CONFIG_SCSC_BT_BLUEZ) += -DCONFIG_SCSC_BT_BLUEZ
+endif
+
+ifeq ($(CONFIG_SCSC_ANT), m)
+ccflags-y += -DCONFIG_SCSC_ANT
+else
+ccflags-$(CONFIG_SCSC_ANT) += -DCONFIG_SCSC_ANT
+endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+/* MX BT shared memory interface */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/kthread.h>
+#include <asm/io.h>
+#include <linux/wakelock.h>
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/api/bsmhcp.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_bt_priv.h"
+#include "scsc_shm.h"
+#include "scsc_bt_hci.h"
+
+static u8 ant_write_buffer[ASMHCP_BUFFER_SIZE];
+
+static void scsc_ant_shm_irq_handler(int irqbit, void *data)
+{
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(ant_service.service, irqbit);
+
+ ant_service.interrupt_count++;
+
+ /* Wake the reader operation */
+ if (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read ||
+ atomic_read(&ant_service.error_count) != 0 ||
+ ant_service.asmhcp_protocol->header.panic_deathbed_confession) {
+ ant_service.interrupt_read_count++;
+
+ wake_lock_timeout(&ant_service.read_wake_lock, HZ);
+ wake_up(&ant_service.read_wait);
+ }
+
+ if (ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write ==
+ ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_read &&
+ ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write ==
+ ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_read) {
+ ant_service.interrupt_write_count++;
+
+ if (wake_lock_active(&ant_service.write_wake_lock)) {
+ ant_service.write_wake_unlock_count++;
+ wake_unlock(&ant_service.write_wake_lock);
+ }
+ }
+}
+
+/* Assign firmware/host interrupts */
+static void scsc_ant_shm_init_interrupt(void)
+{
+ /* To-host f/w IRQ allocations and ISR registrations */
+ ant_service.asmhcp_protocol->header.bg_to_ap_int_src =
+ scsc_service_mifintrbit_register_tohost(ant_service.service, scsc_ant_shm_irq_handler, NULL);
+
+ /* From-host f/w IRQ allocations */
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src =
+ scsc_service_mifintrbit_alloc_fromhost(ant_service.service, SCSC_MIFINTR_TARGET_R4);
+
+ SCSC_TAG_DEBUG(BT_COMMON, "Registered to-host IRQ bit %d, from-host IRQ bit %d\n",
+ ant_service.asmhcp_protocol->header.bg_to_ap_int_src,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src);
+}
+
+static ssize_t scsc_shm_ant_cmd_write(const unsigned char *data, size_t count)
+{
+ /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
+ uint32_t tr_read = ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_read;
+ uint32_t tr_write = ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write;
+
+ struct ASMHCP_TD_CONTROL *td = &ant_service.asmhcp_protocol->cmd_driver_controller_transfer_ring[tr_write];
+ /* Temp vars */
+ SCSC_TAG_DEBUG(BT_H4, "ANT_COMMAND_PKT (len=%zu, read=%u, write=%u)\n",
+ count, tr_read, tr_write);
+
+ /* Index out of bounds check */
+ if (tr_read >= ASMHCP_TRANSFER_RING_CMD_SIZE || tr_write >= ASMHCP_TRANSFER_RING_CMD_SIZE) {
+ SCSC_TAG_ERR(BT_H4,
+ "ANT_COMMAND_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n",
+ tr_read, tr_write);
+ atomic_inc(&ant_service.error_count);
+ return -EIO;
+ }
+
+ /* Does the transfer ring have room for an entry */
+ if (BSMHCP_HAS_ROOM(tr_write, tr_read, ASMHCP_TRANSFER_RING_CMD_SIZE)) {
+ /* Fill the transfer descriptor with the ANT command data */
+ memcpy(td->data, data, count);
+ td->length = (u16)count;
+
+ /* Ensure the wake lock is acquired */
+ if (!wake_lock_active(&ant_service.write_wake_lock)) {
+ ant_service.write_wake_lock_count++;
+ wake_lock(&ant_service.write_wake_lock);
+ }
+
+ /* Increase the write pointer */
+ BSMHCP_INCREASE_INDEX(tr_write, ASMHCP_TRANSFER_RING_CMD_SIZE);
+ ant_service.asmhcp_protocol->header.mailbox_cmd_driv_ctr_write = tr_write;
+
+ /* Memory barrier to ensure out-of-order execution is completed */
+ mmiowb();
+
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(
+ ant_service.service,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ } else {
+ /* Transfer ring full. Only happens if the user attempt to send more ANT command packets than
+ * available credits
+ */
+ count = 0;
+ }
+
+ return count;
+}
+
+static ssize_t scsc_shm_ant_data_write(const unsigned char *data, size_t count)
+{
+ /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
+ uint32_t tr_read = ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_read;
+ uint32_t tr_write = ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write;
+
+ /* Temp vars */
+ struct ASMHCP_TD_CONTROL *td = &ant_service.asmhcp_protocol->data_driver_controller_transfer_ring[tr_write];
+
+ SCSC_TAG_DEBUG(BT_H4, "ANT_DATA_PKT (len=%zu, read=%u, write=%u)\n",
+ count, tr_read, tr_write);
+
+ /* Index out of bounds check */
+ if (tr_read >= ASMHCP_TRANSFER_RING_DATA_SIZE || tr_write >= ASMHCP_TRANSFER_RING_DATA_SIZE) {
+ SCSC_TAG_ERR(
+ BT_H4,
+ "ANT_DATA_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n",
+ tr_read, tr_write);
+ atomic_inc(&ant_service.error_count);
+ return -EIO;
+ }
+
+ /* Does the transfer ring have room for an entry */
+ if (BSMHCP_HAS_ROOM(tr_write, tr_read, ASMHCP_TRANSFER_RING_DATA_SIZE)) {
+ /* Fill the transfer descriptor with the ANT command data */
+ memcpy(td->data, data, count);
+ td->length = (u16)count;
+
+ /* Ensure the wake lock is acquired */
+ if (!wake_lock_active(&ant_service.write_wake_lock)) {
+ ant_service.write_wake_lock_count++;
+ wake_lock(&ant_service.write_wake_lock);
+ }
+
+ /* Increase the write pointer */
+ BSMHCP_INCREASE_INDEX(tr_write, ASMHCP_TRANSFER_RING_DATA_SIZE);
+ ant_service.asmhcp_protocol->header.mailbox_data_driv_ctr_write = tr_write;
+
+ /* Memory barrier to ensure out-of-order execution is completed */
+ mmiowb();
+
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(
+ ant_service.service,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ }
+ else
+ /* Transfer ring full */
+ count = 0;
+
+ return count;
+}
+
+static ssize_t scsc_ant_copy_td_to_buffer(char __user *buf, size_t len, struct ASMHCP_TD_CONTROL *td)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+ size_t copy_len = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "td (length=%u), len=%zu, read_offset=%zu\n",
+ td->length, len, ant_service.read_offset);
+
+ /* Has the header been copied to userspace (aka is this the start of the copy operation) */
+ if (ant_service.read_offset < ANT_HEADER_LENGTH) {
+ /* Calculate the amount of data that can be transferred */
+ copy_len = min(ANT_HEADER_LENGTH - ant_service.read_offset, len);
+
+ if (td->data[1] + ANT_HEADER_LENGTH + 1 != td->length) {
+ SCSC_TAG_ERR(BT_H4, "Firmware sent invalid ANT cmd/data\n");
+ atomic_inc(&ant_service.error_count);
+ ret = -EFAULT;
+ }
+ /* Copy the ANT header to the userspace buffer */
+ ret = copy_to_user(buf, &td->data[ant_service.read_offset], copy_len);
+ if (ret == 0) {
+ /* All good - Update our consumed information */
+ consumed = copy_len;
+ ant_service.read_offset += copy_len;
+ SCSC_TAG_DEBUG(BT_H4,
+ "copied header: read_offset=%zu, consumed=%zu, ret=%zd, len=%zu, copy_len=%zu\n",
+ ant_service.read_offset, consumed, ret, len, copy_len);
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ /* Can more data be put into the userspace buffer */
+ if (ret == 0 && ant_service.read_offset >= ANT_HEADER_LENGTH && (len - consumed)) {
+ /* Calculate the amount of data that can be transferred */
+ copy_len = min((td->length - ant_service.read_offset), (len - consumed));
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(&buf[consumed], &td->data[ant_service.read_offset], copy_len);
+ if (ret == 0) {
+ /* All good - Update our consumed information */
+ ant_service.read_offset += copy_len;
+ consumed += copy_len;
+
+ /* Have all data been copied to the userspace buffer */
+ if (ant_service.read_offset == td->length) {
+ /* All good - read operation is completed */
+ ant_service.read_offset = 0;
+ ant_service.read_operation = ANT_READ_OP_NONE;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "read_offset=%zu, consumed=%zu, ret=%zd, len=%zu, copy_len=%zu\n",
+ ant_service.read_offset, consumed, ret, len, copy_len);
+
+ return ret == 0 ? consumed : ret;
+}
+
+static ssize_t scsc_ant_cmd_read(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+
+ /* Temp vars */
+ if (ant_service.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_write) {
+ struct ASMHCP_PROTOCOL *ap = ant_service.asmhcp_protocol;
+ struct ASMHCP_TD_CONTROL *td = &ap->cmd_controller_driver_transfer_ring
+ [ant_service.mailbox_cmd_ctr_driv_read];
+
+ ret = scsc_ant_copy_td_to_buffer(buf, len, td);
+ }
+
+ return ret;
+}
+
+static ssize_t scsc_ant_data_read(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+
+ if (ant_service.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_write) {
+ struct ASMHCP_PROTOCOL *ap = ant_service.asmhcp_protocol;
+ struct ASMHCP_TD_CONTROL *td = &ap->data_controller_driver_transfer_ring
+ [ant_service.mailbox_data_ctr_driv_read];
+
+ ret = scsc_ant_copy_td_to_buffer(buf, len, td);
+ }
+
+ return ret;
+}
+
+static ssize_t scsc_bt_shm_ant_read_data(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ while (ant_service.read_operation == ANT_READ_OP_NONE &&
+ ret == 0 &&
+ ant_service.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_write) {
+ /* Start a data copy to userspace */
+ ant_service.read_operation = ANT_READ_OP_DATA;
+ ant_service.read_index = ant_service.mailbox_data_ctr_driv_read;
+ ret = scsc_ant_data_read(&buf[consumed], len - consumed);
+ if (ret > 0) {
+ /* All good - Update our consumed information */
+ consumed += ret;
+ ret = 0;
+
+ /* Update the index if all the data could be copied to the userspace buffer
+ * otherwise stop processing the data
+ */
+ if (ant_service.read_operation == ANT_READ_OP_NONE)
+ BSMHCP_INCREASE_INDEX(ant_service.mailbox_data_ctr_driv_read,
+ ASMHCP_TRANSFER_RING_DATA_SIZE);
+ else
+ break;
+ }
+ }
+
+ return ret == 0 ? consumed : ret;
+}
+
+static ssize_t scsc_bt_shm_ant_read_cmd(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ while (ant_service.read_operation == ANT_READ_OP_NONE &&
+ ret == 0 &&
+ ant_service.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_write) {
+ /* Start a cmd copy to userspace */
+ ant_service.read_operation = ANT_READ_OP_CMD;
+ ant_service.read_index = ant_service.mailbox_cmd_ctr_driv_read;
+ ret = scsc_ant_cmd_read(&buf[consumed], len - consumed);
+ if (ret > 0) {
+ /* All good - Update our consumed information */
+ consumed += ret;
+ ret = 0;
+
+ /* Update the index if all the data could be copied to the userspace buffer
+ * otherwise stop processing the cmds
+ */
+ if (ant_service.read_operation == ANT_READ_OP_NONE)
+ BSMHCP_INCREASE_INDEX(ant_service.mailbox_cmd_ctr_driv_read,
+ ASMHCP_TRANSFER_RING_CMD_SIZE);
+ else
+ break;
+ }
+ }
+
+ return ret == 0 ? consumed : ret;
+}
+
+static ssize_t scsc_shm_ant_read_continue(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+
+ /* Is a cmd read operation ongoing */
+ if (ant_service.read_operation == ANT_READ_OP_CMD) {
+ SCSC_TAG_DEBUG(BT_H4, "ANT_READ_OP_CMD\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_ant_cmd_read(buf, len);
+ if (ant_service.read_operation == ANT_READ_OP_NONE)
+ /* All done - increase the read pointer and continue */
+ if (ant_service.read_operation == ANT_READ_OP_NONE)
+ BSMHCP_INCREASE_INDEX(ant_service.mailbox_cmd_ctr_driv_read,
+ ASMHCP_TRANSFER_RING_CMD_SIZE);
+ /* Is a data read operation ongoing */
+ } else if (ant_service.read_operation == ANT_READ_OP_DATA) {
+ SCSC_TAG_DEBUG(BT_H4, "ANT_READ_OP_DATA\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_ant_data_read(buf, len);
+ if (ant_service.read_operation == ANT_READ_OP_NONE)
+ /* All done - increase the read pointer and continue */
+ BSMHCP_INCREASE_INDEX(ant_service.mailbox_data_ctr_driv_read, ASMHCP_TRANSFER_RING_DATA_SIZE);
+ }
+
+ return ret;
+}
+
+ssize_t scsc_shm_ant_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ ssize_t consumed = 0;
+ ssize_t ret = 0;
+ ssize_t res;
+ bool gen_bg_int = false;
+
+ /* Only 1 reader is allowed */
+ if (atomic_inc_return(&ant_service.ant_readers) != 1) {
+ atomic_dec(&ant_service.ant_readers);
+ return -EIO;
+ }
+
+ /* Has en error been detect then just return with an error */
+ if (atomic_read(&ant_service.error_count) != 0) {
+ atomic_dec(&ant_service.ant_readers);
+ return -EIO;
+ }
+
+ /* Update the cached variables with the non-cached variables */
+ ant_service.mailbox_cmd_ctr_driv_write = ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write;
+ ant_service.mailbox_data_ctr_driv_write = ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write;
+
+ /* put the remaining data from the transfer ring into the available userspace buffer */
+ if (ant_service.read_operation != ANT_READ_OP_NONE) {
+ ret = scsc_shm_ant_read_continue(buf, len);
+ /* Update the consumed variable in case a operation was ongoing */
+ if (ret > 0) {
+ consumed = ret;
+ ret = 0;
+ }
+ }
+
+ /* Main loop - Can only be entered when no operation is present on entering this function
+ * or no hardware error has been detected. It loops until data has been placed in the
+ * userspace buffer or an error has been detected
+ */
+ while (atomic_read(&ant_service.error_count) == 0 && consumed == 0) {
+ /* Does any of the read/write pairs differs */
+ if (ant_service.mailbox_data_ctr_driv_read == ant_service.mailbox_data_ctr_driv_write &&
+ ant_service.mailbox_cmd_ctr_driv_read == ant_service.mailbox_cmd_ctr_driv_write &&
+ atomic_read(&ant_service.error_count) == 0 &&
+ ant_service.asmhcp_protocol->header.panic_deathbed_confession == 0) {
+ /* Don't wait if in NONBLOCK mode */
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ /* All read/write pairs are identical - wait for the firmware. The conditional
+ * check is used to verify that a read/write pair has actually changed
+ */
+ ret = wait_event_interruptible(bt_service.read_wait,
+ (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read ||
+ atomic_read(&ant_service.error_count) != 0 ||
+ ant_service.asmhcp_protocol->header.panic_deathbed_confession));
+
+ /* Has an error been detected elsewhere in the driver then just return from this function */
+ if (atomic_read(&ant_service.error_count) != 0)
+ break;
+
+ /* Any failures is handled by the userspace application */
+ if (ret)
+ break;
+
+ /* Refresh our write indexes before starting to process the protocol */
+ ant_service.mailbox_cmd_ctr_driv_write =
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write;
+ ant_service.mailbox_data_ctr_driv_write =
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write;
+ }
+
+ /* First: process any pending cmd that needs to be sent to userspace */
+ res = scsc_bt_shm_ant_read_cmd(&buf[consumed], len - consumed);
+ if (res > 0)
+ consumed += res;
+ else
+ ret = res;
+
+ /* Second: process any pending data that needs to be sent to userspace */
+ res = scsc_bt_shm_ant_read_data(&buf[consumed], len - consumed);
+ if (res > 0)
+ consumed += res;
+ else
+ ret = res;
+ }
+
+ /* If anything was read, generate the appropriate interrupt(s) */
+ if (ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read != ant_service.mailbox_cmd_ctr_driv_read ||
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read != ant_service.mailbox_data_ctr_driv_read)
+ gen_bg_int = true;
+
+ /* Update the read index for all transfer rings */
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read = ant_service.mailbox_cmd_ctr_driv_read;
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read = ant_service.mailbox_data_ctr_driv_read;
+
+ /* Ensure the data is updating correctly in memory */
+ mmiowb();
+
+ if (gen_bg_int)
+ scsc_service_mifintrbit_bit_set(ant_service.service,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+
+ /* Decrease the ant readers counter */
+ atomic_dec(&ant_service.ant_readers);
+
+ return ret == 0 ? consumed : ret;
+}
+
+ssize_t scsc_shm_ant_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
+{
+ size_t length;
+ size_t ant_pkt_len;
+ ssize_t written = 0;
+ ssize_t ret = 0;
+ size_t pkt_count = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "enter\n");
+
+ UNUSED(file);
+ UNUSED(offset);
+
+ /* Only 1 writer is allowed */
+ if (atomic_inc_return(&ant_service.ant_writers) != 1) {
+ SCSC_TAG_DEBUG(BT_H4, "only one reader allowed\n");
+ atomic_dec(&ant_service.ant_writers);
+ return -EIO;
+ }
+
+ /* Has en error been detect then just return with an error */
+ if (atomic_read(&ant_service.error_count) != 0) {
+ SCSC_TAG_DEBUG(BT_H4, "error has occured\n");
+ atomic_dec(&ant_service.ant_writers);
+ return -EIO;
+ }
+
+ while (written != count && ret == 0) {
+ length = min(count - written, sizeof(ant_write_buffer) - ant_service.ant_write_offset);
+ SCSC_TAG_DEBUG(BT_H4, "count: %zu, length: %zu, ant_write_offset: %zu, written:%zu, size:%zu\n",
+ count, length, ant_service.ant_write_offset,
+ written - (pkt_count * 2), sizeof(ant_write_buffer));
+
+ /* Is there room in the temp buffer */
+ if (length == 0) {
+ SCSC_TAG_ERR(BT_H4, "no room in the buffer\n");
+ atomic_inc(&ant_service.error_count);
+ ret = -EIO;
+ break;
+ }
+
+ /* Copy the userspace data to the target buffer */
+ ret = copy_from_user(&ant_write_buffer[ant_service.ant_write_offset], &buf[written], length);
+
+ if (ret == 0) {
+ /* Is the message a data message? */
+ if (ant_write_buffer[0] == ANT_DATA_MSG) {
+ /* Extract the data packet length */
+ ant_pkt_len = ant_write_buffer[1] + ANT_HEADER_LENGTH + 1;
+
+ /* Is it a complete packet available */
+ if (ant_pkt_len <= (length + ant_service.ant_write_offset)) {
+ /* Transfer the packet to the ANT data transfer ring */
+ ret = scsc_shm_ant_data_write(&ant_write_buffer[2], ant_pkt_len - 2);
+ if (ret >= 0) {
+ written += (ant_pkt_len - ant_service.ant_write_offset);
+ pkt_count += 1;
+ ant_service.ant_write_offset = 0;
+ ret = 0;
+ }
+ } else {
+ /* Still needing data to have the complete packet */
+ SCSC_TAG_WARNING(BT_H4,
+ "missing data (need=%zu, got=%zu)\n",
+ ant_pkt_len, (length - ant_service.ant_write_offset));
+ written += length;
+ ant_service.ant_write_offset += (u32) length;
+ }
+ /* Is the message a command message? */
+ } else if (ant_write_buffer[0] == ANT_COMMAND_MSG) {
+ /* Extract the ANT command packet length */
+ ant_pkt_len = ant_write_buffer[1] + ANT_HEADER_LENGTH + 1;
+
+ /* Is it a complete packet available */
+ if ((ant_pkt_len) <= (length + ant_service.ant_write_offset)) {
+ /* Transfer the packet to the ANT command transfer ring */
+ ret = scsc_shm_ant_cmd_write(&ant_write_buffer[2], ant_pkt_len - 2);
+ if (ret >= 0) {
+ written += (ant_pkt_len - ant_service.ant_write_offset);
+ pkt_count += 1;
+ ant_service.ant_write_offset = 0;
+ ret = 0;
+ }
+ } else {
+ /* Still needing data to have the complete packet */
+ SCSC_TAG_WARNING(BT_H4,
+ "missing data (need=%zu, got=%zu)\n",
+ (ant_pkt_len), (length + ant_service.ant_write_offset));
+ written += length;
+ ant_service.ant_write_offset += (u32) length;
+ }
+ /* Is there less data than a header then just wait for more */
+ } else if (length <= ANT_HEADER_LENGTH) {
+ ant_service.ant_write_offset += length;
+ written += length;
+ /* Header is unknown - unable to proceed */
+ } else {
+ atomic_inc(&ant_service.error_count);
+ ret = -EIO;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_from_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "ant_write_offset=%zu, ret=%zu, written=%zu\n",
+ ant_service.ant_write_offset, ret, written - (pkt_count * 2));
+
+ /* Decrease the ant readers counter */
+ atomic_dec(&ant_service.ant_writers);
+
+ return ret == 0 ? written : ret;
+}
+
+unsigned int scsc_shm_ant_poll(struct file *file, poll_table *wait)
+{
+ /* Add the wait queue to the polling queue */
+ poll_wait(file, &ant_service.read_wait, wait);
+
+ if (atomic_read(&ant_service.error_count) != 0)
+ return POLLERR;
+
+ /* Has en error been detect then just return with an error */
+ if (ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_data_ctr_driv_read ||
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_write !=
+ ant_service.asmhcp_protocol->header.mailbox_cmd_ctr_driv_read) {
+ SCSC_TAG_DEBUG(BT_H4, "queue(s) changed\n");
+ return POLLIN | POLLRDNORM; /* readeable */
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "no change\n");
+
+ return (atomic_read(&ant_service.error_count) != 0) ? POLLERR : POLLOUT;
+}
+
+/* Initialise the shared memory interface for ANT */
+int scsc_ant_shm_init(void)
+{
+ /* Get kmem pointer to the shared memory ref */
+ ant_service.asmhcp_protocol = scsc_mx_service_mif_addr_to_ptr(ant_service.service, ant_service.asmhcp_ref);
+ if (ant_service.asmhcp_protocol == NULL) {
+ SCSC_TAG_ERR(BT_COMMON, "couldn't map kmem to shm_ref 0x%08x\n", (u32)ant_service.asmhcp_ref);
+ return -ENOMEM;
+ }
+
+ /* Clear the protocol shared memory area */
+ memset(ant_service.asmhcp_protocol, 0, sizeof(*ant_service.asmhcp_protocol));
+ ant_service.asmhcp_protocol->header.magic_value = ASMHCP_PROTOCOL_MAGICVALUE;
+ ant_service.mailbox_data_ctr_driv_read = 0;
+ ant_service.mailbox_data_ctr_driv_write = 0;
+ ant_service.mailbox_cmd_ctr_driv_read = 0;
+ ant_service.mailbox_cmd_ctr_driv_write = 0;
+ ant_service.read_index = 0;
+
+ /* Initialise the interrupt handlers */
+ scsc_ant_shm_init_interrupt();
+
+ return 0;
+}
+
+/* Terminate the shared memory interface for ANT, stopping its thread.
+ *
+ * Note: The service must be stopped prior to calling this function.
+ * The shared memory can only be released after calling this function.
+ */
+void scsc_ant_shm_exit(void)
+{
+ /* Release IRQs */
+ if (ant_service.asmhcp_protocol != NULL) {
+ scsc_service_mifintrbit_unregister_tohost(
+ ant_service.service,
+ ant_service.asmhcp_protocol->header.bg_to_ap_int_src);
+
+ scsc_service_mifintrbit_free_fromhost(
+ ant_service.service,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ }
+
+ /* Clear all control structures */
+ ant_service.asmhcp_protocol = NULL;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+/* Before submitting new changes to this file please make sure to run the module tests to verify
+ * that the change didn't break anything. Also, make sure to write new tests that captures the
+ * change. The module tests can be found in "vendor/samsung_slsi/scsc_tools/kernel_unit_test/"
+ * from where there are run with "make". If needed its git project nane is:
+ * "Connectivity/Android/platform/vendor/samsung_slsi/scsc_tools/kernel_unit_test" */
+
+/* MX BT shared memory interface */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/kthread.h>
+#include <asm/io.h>
+#include <linux/wakelock.h>
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/api/bsmhcp.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_bt_priv.h"
+#include "scsc_shm.h"
+/**
+ * Coex AVDTP detection.
+ *
+ * Strategy:
+ *
+ * - On the L2CAP signaling CID, look for connect requests with the AVDTP PSM
+ *
+ * - Assume the first AVDTP connection is the signaling channel.
+ * (AVDTP 1.3, section 5.4.6 "Transport and Signaling Channel Establishment")
+ *
+ * - If a signaling channel exists, assume the next connection is the streaming channel
+ *
+ * - If a streaming channel exists, look for AVDTP start, suspend, abort and close signals
+ * -- When one of these is found, signal the FW with updated acl_id and cid
+ *
+ * - If the ACL is torn down, make sure to clean up.
+ *
+ * */
+
+#define IS_VALID_CID_CONN_RESP(is_tx, avdtp, data) ((is_tx && avdtp->dst_cid == HCI_L2CAP_SOURCE_CID(data)) || \
+ (!is_tx && avdtp->src_cid == HCI_L2CAP_SOURCE_CID(data)))
+
+
+#define IS_VALID_CID_DISCONNECT_REQ(is_tx, avdtp, data) ((is_tx && avdtp.src_cid == HCI_L2CAP_SOURCE_CID(data) && \
+ avdtp.dst_cid == HCI_L2CAP_RSP_DEST_CID(data)) || \
+ (!is_tx && avdtp.src_cid == HCI_L2CAP_RSP_DEST_CID(data) && \
+ avdtp.dst_cid == HCI_L2CAP_SOURCE_CID(data)))
+
+#define STORE_DETECTED_CID_CONN_REQ(is_tx, avdtp, data) \
+ do { \
+ if (is_tx) { \
+ avdtp->src_cid = HCI_L2CAP_SOURCE_CID(data); \
+ } else { \
+ avdtp->dst_cid = HCI_L2CAP_SOURCE_CID(data); \
+ } \
+ } while (0)
+
+#define STORE_DETECTED_CID_CONN_RESP(is_tx, avdtp, data) \
+ do { \
+ if (is_tx) { \
+ avdtp->src_cid = HCI_L2CAP_RSP_DEST_CID(data); \
+ } else { \
+ avdtp->dst_cid = HCI_L2CAP_RSP_DEST_CID(data); \
+ } \
+ } while (0)
+
+#ifdef ENABLE_MODULE_TESTS_DEBUG
+#undef SCSC_TAG_DEBUG
+#define SCSC_TAG_DEBUG(tag, fmt, ...) \
+ fprintf(stdout, "%s:%d:%s(): " fmt, __FILE__, __LINE__, __func__, __VA_ARGS__)
+#endif
+
+/* Forward declarations */
+void scsc_avdtp_detect_reset(struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ bool reset_signal,
+ bool reset_signal_ongoing,
+ bool reset_stream,
+ bool reset_stream_ongoing,
+ bool reset_local_seids,
+ bool reset_remote_seids);
+
+/* Simple list traversal to find an existing AVDTP detection from a hci connection handle. If
+ * not found, the function returns NULL
+ */
+static struct scsc_bt_avdtp_detect_hci_connection *scsc_avdtp_detect_search_hci_connection(
+ u16 hci_connection_handle)
+{
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci;
+
+ avdtp_hci = bt_service.avdtp_detect.connections;
+ while (avdtp_hci) {
+ if (avdtp_hci->hci_connection_handle == hci_connection_handle) {
+ /* Found it */
+ break;
+ }
+ avdtp_hci = avdtp_hci->next;
+ }
+ return avdtp_hci;
+}
+
+/* Find existing detection for a given connection handle. If provided as argument, a new detection
+ * will be created if it doesn't exist. The returned element is locked and must be unlocked before
+ * moving to another context.
+ */
+static struct scsc_bt_avdtp_detect_hci_connection *scsc_avdtp_detect_find_or_create_hci_connection(
+ u16 hci_connection_handle,
+ bool create)
+{
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci;
+
+ spin_lock(&bt_service.avdtp_detect.lock);
+
+ avdtp_hci = scsc_avdtp_detect_search_hci_connection(hci_connection_handle);
+ if (avdtp_hci)
+ spin_lock(&avdtp_hci->lock);
+
+ /* Unlock the list again and check if a new element must be created. If that is the case,
+ * malloc the new memory and lock the list afterwards such that others are not spinning for
+ * some undefined amount of time. The trade-off in this construction is that the memory
+ * potentially is allocated twice in rare situations, and the list must therefore be locked
+ * and searched again before inserting.
+ */
+ spin_unlock(&bt_service.avdtp_detect.lock);
+
+ /* Check if the existing detection was found. If not create it */
+ if (!avdtp_hci && create) {
+ avdtp_hci = kmalloc(sizeof(struct scsc_bt_avdtp_detect_hci_connection), GFP_KERNEL);
+ if (avdtp_hci) {
+ struct scsc_bt_avdtp_detect_hci_connection *head;
+ struct scsc_bt_avdtp_detect_hci_connection *recheck_avdtp_hci;
+
+ memset(avdtp_hci, 0, sizeof(struct scsc_bt_avdtp_detect_hci_connection));
+ avdtp_hci->signal.type = BT_AVDTP_CONN_TYPE_SIGNAL;
+ avdtp_hci->stream.type = BT_AVDTP_CONN_TYPE_STREAM;
+ avdtp_hci->ongoing.incoming_signal.type = BT_AVDTP_CONN_TYPE_SIGNAL;
+ avdtp_hci->ongoing.outgoing_signal.type = BT_AVDTP_CONN_TYPE_SIGNAL;
+ avdtp_hci->ongoing.incoming_stream.type = BT_AVDTP_CONN_TYPE_STREAM;
+ avdtp_hci->ongoing.outgoing_stream.type = BT_AVDTP_CONN_TYPE_STREAM;
+ avdtp_hci->signal.state = BT_AVDTP_STATE_IDLE_SIGNALING;
+ avdtp_hci->signal.src_cid = 0;
+ avdtp_hci->signal.dst_cid = 0;
+ avdtp_hci->hci_connection_handle = hci_connection_handle;
+ scsc_avdtp_detect_reset(avdtp_hci, false, true, true, true, true, true);
+
+ /* The element is ready for insertion into the list. Recheck the list to make sure that
+ * the hci handle hasn't been detected meanwhile.
+ */
+ spin_lock(&bt_service.avdtp_detect.lock);
+
+ recheck_avdtp_hci = scsc_avdtp_detect_search_hci_connection(hci_connection_handle);
+ if (recheck_avdtp_hci == NULL) {
+ /* Insert into list */
+ spin_lock_init(&avdtp_hci->lock);
+ spin_lock(&avdtp_hci->lock);
+ head = bt_service.avdtp_detect.connections;
+ bt_service.avdtp_detect.connections = avdtp_hci;
+ avdtp_hci->next = head;
+ spin_unlock(&bt_service.avdtp_detect.lock);
+ } else {
+ /* The element was already present. Free the allocated memory and return the found
+ * element.
+ */
+ spin_lock(&recheck_avdtp_hci->lock);
+ spin_unlock(&bt_service.avdtp_detect.lock);
+ kfree(avdtp_hci);
+ avdtp_hci = NULL;
+ avdtp_hci = recheck_avdtp_hci;
+ }
+ }
+ }
+ return avdtp_hci;
+}
+
+/* Find an existing l2cap connection struct. Works for both signal and stream since their internal
+ * structures are equal */
+static struct scsc_bt_avdtp_detect_connection *scsc_avdtp_detect_find_l2cap_connection(struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ enum scsc_bt_avdtp_detect_conn_req_direction_enum direction)
+{
+ struct scsc_bt_avdtp_detect_connection *avdtp_l2cap = NULL;
+
+ /* Return either signal or stream l2cap connection */
+ if (avdtp_hci) {
+ /* Check if there is already a signal connection and return the ongoing stream */
+ if (avdtp_hci->signal.state == BT_AVDTP_STATE_COMPLETE_SIGNALING) {
+ if (direction == BT_AVDTP_CONN_REQ_DIR_OUTGOING)
+ avdtp_l2cap = &avdtp_hci->ongoing.outgoing_stream;
+ else
+ avdtp_l2cap = &avdtp_hci->ongoing.incoming_stream;
+ } else {
+ /* If not, use the ongoing signal */
+ if (direction == BT_AVDTP_CONN_REQ_DIR_OUTGOING)
+ avdtp_l2cap = &avdtp_hci->ongoing.outgoing_signal;
+ else
+ avdtp_l2cap = &avdtp_hci->ongoing.incoming_signal;
+ }
+ }
+ return avdtp_l2cap;
+}
+
+/* Handle CONNECTION_REQUEST and detect signal or stream connections. The function handles both RX and TX, where
+ * connect requests in TX direction is mapped to "outgoing". RX direction is mapped to "incoming" */
+static void scsc_bt_avdtp_detect_connection_conn_req_handling(uint16_t hci_connection_handle,
+ const unsigned char *data,
+ uint16_t length,
+ bool is_tx)
+{
+ struct scsc_bt_avdtp_detect_connection *avdtp_l2cap = NULL;
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci = NULL;
+
+ /* Ignore everything else than the PSM */
+ if (HCI_L2CAP_CON_REQ_PSM(data) == L2CAP_AVDTP_PSM) {
+ // Check if there is already an detection for the given hci handle
+ avdtp_hci = scsc_avdtp_detect_find_or_create_hci_connection(
+ hci_connection_handle,
+ true);
+
+ avdtp_l2cap = scsc_avdtp_detect_find_l2cap_connection(avdtp_hci,
+ is_tx ? BT_AVDTP_CONN_REQ_DIR_OUTGOING : BT_AVDTP_CONN_REQ_DIR_INCOMING);
+
+ if (avdtp_hci && avdtp_l2cap) {
+ if (avdtp_l2cap->state == BT_AVDTP_STATE_IDLE_SIGNALING) {
+
+ /* AVDTP signal channel was detected - store dst_cid or src_cid depending on the transmit
+ * direction, and store the connection_handle. */
+ STORE_DETECTED_CID_CONN_REQ(is_tx, avdtp_l2cap, data);
+ avdtp_l2cap->state = BT_AVDTP_STATE_PENDING_SIGNALING;
+ SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X, aclid: 0x%04X (tx=%u)\n",
+ avdtp_l2cap->dst_cid,
+ avdtp_l2cap->src_cid,
+ avdtp_hci->hci_connection_handle,
+ is_tx);
+ } else if (avdtp_l2cap->state == BT_AVDTP_STATE_IDLE_STREAMING &&
+ avdtp_hci->signal.state == BT_AVDTP_STATE_COMPLETE_SIGNALING) {
+
+ /* AVDTP stream channel was detected - store dst_cid or src_cid depending on the transmit
+ * direction. */
+ STORE_DETECTED_CID_CONN_REQ(is_tx, avdtp_l2cap, data);
+ avdtp_l2cap->state = BT_AVDTP_STATE_PENDING_STREAMING;
+ SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X, aclid: 0x%04X (%u)\n",
+ avdtp_l2cap->dst_cid,
+ avdtp_l2cap->src_cid,
+ avdtp_hci->hci_connection_handle,
+ is_tx);
+ }
+ }
+ if (avdtp_hci)
+ spin_unlock(&avdtp_hci->lock);
+ }
+}
+
+/* Handle CONNECTION_REPSONS and detect signal or stream connections. The function handles both RX and TX, where
+ * connect requests in TX direction is mapped to "incoming". RX direction is mapped to "outgoing" */
+static void scsc_bt_avdtp_detect_connection_conn_resp_handling(uint16_t hci_connection_handle,
+ const unsigned char *data,
+ uint16_t length,
+ bool is_tx)
+{
+
+ /* Check if there is already a signal connection */
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci =
+ scsc_avdtp_detect_find_or_create_hci_connection(hci_connection_handle, false);
+ struct scsc_bt_avdtp_detect_connection *avdtp_l2cap =
+ scsc_avdtp_detect_find_l2cap_connection(avdtp_hci,
+ is_tx ? BT_AVDTP_CONN_REQ_DIR_INCOMING : BT_AVDTP_CONN_REQ_DIR_OUTGOING);
+ /* Only consider RSP on expected connection handle */
+ if (avdtp_hci && avdtp_l2cap) {
+ if (HCI_L2CAP_CON_RSP_RESULT(data) == HCI_L2CAP_CON_RSP_RESULT_SUCCESS) {
+ if (IS_VALID_CID_CONN_RESP(is_tx, avdtp_l2cap, data) &&
+ avdtp_l2cap->state == BT_AVDTP_STATE_PENDING_SIGNALING) {
+
+ /* If we were waiting to complete an AVDTP signal detection - store the dst_cid or src_cid depending
+ * on the transmit direction */
+ STORE_DETECTED_CID_CONN_RESP(is_tx, avdtp_l2cap, data);
+ avdtp_l2cap->state = BT_AVDTP_STATE_COMPLETE_SIGNALING;
+
+ /* Switch to use "signal" and delete "ongoing" since the AVDTP signaling has now been
+ * detected */
+ avdtp_hci->signal = *avdtp_l2cap;
+ scsc_avdtp_detect_reset(avdtp_hci, false, true, false, false, false, false);
+ SCSC_TAG_DEBUG(BT_H4, "Signaling dst CID: 0x%04X, src CID: 0x%04X, aclid: 0x%04X (tx=%u)\n",
+ avdtp_hci->signal.dst_cid,
+ avdtp_hci->signal.src_cid,
+ avdtp_hci->hci_connection_handle,
+ is_tx);
+
+ } else if (IS_VALID_CID_CONN_RESP(is_tx, avdtp_l2cap, data) &&
+ avdtp_l2cap->state == BT_AVDTP_STATE_PENDING_STREAMING) {
+
+ /* If we were waiting to complete an AVDTP stream detection - store the dst_cid or src_cid depending
+ * on the transmit direction */
+ STORE_DETECTED_CID_CONN_RESP(is_tx, avdtp_l2cap, data);
+ avdtp_l2cap->state = BT_AVDTP_STATE_COMPLETE_STREAMING;
+
+ /* Switch to use "stream". If both an incoming and outgoing connection response was "expected"
+ * the first one wins. */
+ avdtp_hci->stream = *avdtp_l2cap;
+ scsc_avdtp_detect_reset(avdtp_hci, false, false, false, true, false, false);
+
+ SCSC_TAG_DEBUG(BT_H4, "Streaming dst CID: 0x%04X, src CID: 0x%04X, aclid: 0x%04X (tx=%u)\n",
+ avdtp_hci->stream.dst_cid,
+ avdtp_hci->stream.src_cid,
+ avdtp_hci->hci_connection_handle,
+ is_tx);
+ }
+ } else if (HCI_L2CAP_CON_RSP_RESULT(data) >= HCI_L2CAP_CON_RSP_RESULT_REFUSED) {
+ /* In case of a CONN_REFUSED the existing CIDs must be cleaned up such that the detection is ready
+ * for a new connection request */
+ if (IS_VALID_CID_CONN_RESP(is_tx, avdtp_l2cap, data) &&
+ avdtp_l2cap->state == BT_AVDTP_STATE_PENDING_SIGNALING) {
+ avdtp_l2cap->dst_cid = avdtp_l2cap->src_cid = 0;
+ avdtp_l2cap->state = BT_AVDTP_STATE_IDLE_SIGNALING;
+
+ } else if (IS_VALID_CID_CONN_RESP(is_tx, avdtp_l2cap, data) &&
+ avdtp_l2cap->state == BT_AVDTP_STATE_PENDING_STREAMING) {
+
+ /* Connection refused on streaming connect request. Reset dst_cid and src_cid, and
+ * reset the state to IDLE such that new connection requests can be detected */
+ avdtp_l2cap->dst_cid = avdtp_l2cap->src_cid = 0;
+ avdtp_l2cap->state = BT_AVDTP_STATE_IDLE_STREAMING;
+
+ }
+ }
+ }
+ if (avdtp_hci)
+ spin_unlock(&avdtp_hci->lock);
+}
+
+/* Handle DISCONNECT_REQUEST and remove all current detections on the specific CIDs */
+static bool scsc_bt_avdtp_detect_connection_disconnect_req_handling(uint16_t hci_connection_handle,
+ const unsigned char *data,
+ uint16_t length,
+ bool is_tx)
+{
+ bool result = false;
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci =
+ scsc_avdtp_detect_find_or_create_hci_connection(hci_connection_handle, false);
+
+ if (avdtp_hci) {
+ if (avdtp_hci->signal.state == BT_AVDTP_STATE_COMPLETE_SIGNALING &&
+ IS_VALID_CID_DISCONNECT_REQ(is_tx, avdtp_hci->signal, data)) {
+
+ /* Disconnect the current registered signaling and streaming AVDTP connection */
+ scsc_avdtp_detect_reset(avdtp_hci, true, true, true, true, true, true);
+
+ /* The detection was removed and it can therefore not be unlocked */
+ avdtp_hci = NULL;
+
+ SCSC_TAG_DEBUG(BT_H4, "Signaling src CID disconnected (aclid: 0x%04X) (TX=%u)\n",
+ hci_connection_handle,
+ is_tx);
+ } else if (avdtp_hci->stream.state == BT_AVDTP_STATE_COMPLETE_STREAMING &&
+ IS_VALID_CID_DISCONNECT_REQ(is_tx, avdtp_hci->stream, data)) {
+
+ /* Disconnect the current registered streaming AVDTP connection */
+ scsc_avdtp_detect_reset(avdtp_hci, false, false, true, true, false, false);
+
+ SCSC_TAG_DEBUG(BT_H4, "Streaming src CID disconnected (aclid: 0x%04X) (TX=%u)\n",
+ hci_connection_handle,
+ is_tx);
+ result = true;
+ }
+ if (avdtp_hci)
+ spin_unlock(&avdtp_hci->lock);
+ }
+ return result;
+}
+
+/* Detects if there is any of the L2CAP codes of interrest, and returns true of the FW should be signalled a change */
+static bool scsc_bt_avdtp_detect_connection_rxtx(uint16_t hci_connection_handle, const unsigned char *data, uint16_t length, bool is_tx)
+{
+ uint8_t code = 0;
+ if (length < AVDTP_DETECT_MIN_DATA_LENGTH) {
+ SCSC_TAG_DEBUG(BT_H4, "Ignoring L2CAP signal, length %u)\n", length);
+ return false;
+ }
+
+ code = HCI_L2CAP_CODE(data);
+ switch (code) {
+
+ case L2CAP_CODE_CONNECT_REQ:
+ {
+ /* Handle connection request */
+ scsc_bt_avdtp_detect_connection_conn_req_handling(hci_connection_handle, data, length, is_tx);
+ break;
+ }
+ case L2CAP_CODE_CONNECT_RSP:
+ {
+ if (length < AVDTP_DETECT_MIN_DATA_LENGTH_CON_RSP) {
+ SCSC_TAG_WARNING(BT_H4, "Ignoring L2CAP CON RSP in short packet, length %u)\n", length);
+ return false;
+ }
+ /* Handle connection response */
+ scsc_bt_avdtp_detect_connection_conn_resp_handling(hci_connection_handle, data, length, is_tx);
+ break;
+ }
+ case L2CAP_CODE_DISCONNECT_REQ:
+ {
+ /* Handle disconnect request */
+ return scsc_bt_avdtp_detect_connection_disconnect_req_handling(hci_connection_handle, data, length, is_tx);
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Check if there are any SEIDs from the discover that are SINK, and store them as SINK candidates */
+static void scsc_avdtp_detect_check_discover_for_snk_seids(uint16_t hci_connection_handle,
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ const unsigned char *data,
+ uint16_t length,
+ bool is_tx)
+{
+ uint16_t i = 0;
+ uint16_t n_seid_info = (length - HCI_L2CAP_CONF_SEID_OFFSET) / HCI_L2CAP_CONF_SEID_INFO_SIZE;
+ struct scsc_bt_avdtp_detect_snk_seid *seid = NULL;
+
+ /* Remove potential existing SEID infos on the either local or remote */
+ scsc_avdtp_detect_reset(avdtp_hci, false, false, false, false, is_tx, !is_tx);
+ for (i = 0; i < n_seid_info; i++) {
+ /* Only consider SEID if it's type is SINK. This means that for TX, look for TSEP equal to SNK.
+ * For RX look for TSEP equal to SRC, since this would result in our side being SNK */
+ if ((is_tx && HCI_L2CAP_CONF_TSEP(data, i) == HCI_L2CAP_CONF_TSEP_SNK) ||
+ (!is_tx && HCI_L2CAP_CONF_TSEP(data, i) == HCI_L2CAP_CONF_TSEP_SRC)) {
+
+ if (avdtp_hci)
+ spin_unlock(&avdtp_hci->lock);
+
+ seid = kmalloc(sizeof(struct scsc_bt_avdtp_detect_snk_seid), GFP_KERNEL);
+
+ avdtp_hci = scsc_avdtp_detect_find_or_create_hci_connection(hci_connection_handle,
+ false);
+
+ if (avdtp_hci && seid) {
+ memset(seid, 0, sizeof(struct scsc_bt_avdtp_detect_snk_seid));
+ seid->seid = HCI_L2CAP_CONF_SEID(data, i);
+ SCSC_TAG_DEBUG(BT_H4, "Storing seid=%u as candidate for SINK, aclid: 0x%04X\n",
+ seid->seid,
+ avdtp_hci->hci_connection_handle);
+ /* Store the information on either local or remote */
+ if (is_tx) {
+ if (avdtp_hci->tsep_detect.local_snk_seids)
+ seid->next = avdtp_hci->tsep_detect.local_snk_seids;
+ avdtp_hci->tsep_detect.local_snk_seids = seid;
+ } else {
+ if (avdtp_hci->tsep_detect.remote_snk_seids)
+ seid->next = avdtp_hci->tsep_detect.remote_snk_seids;
+ avdtp_hci->tsep_detect.remote_snk_seids = seid;
+ }
+ } else
+ kfree(seid);
+ }
+ }
+}
+
+/* Check if the set configuration matches any of the SINK candidates */
+static void scsc_avdtp_detect_match_set_conf_seid_with_discover(struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ const unsigned char *data,
+ bool is_tx)
+{
+ struct scsc_bt_avdtp_detect_snk_seid *seid_info;
+ uint8_t candidate = 0;
+
+ /* Default to SRC */
+ avdtp_hci->tsep_detect.tsep = BT_AVDTP_TSEP_SRC;
+
+ if (is_tx) {
+ seid_info = avdtp_hci->tsep_detect.local_snk_seids;
+ candidate = avdtp_hci->tsep_detect.local_snk_seid_candidate;
+ } else {
+ seid_info = avdtp_hci->tsep_detect.remote_snk_seids;
+ candidate = avdtp_hci->tsep_detect.remote_snk_seid_candidate;
+ }
+
+ while (seid_info) {
+ if (seid_info->seid == candidate) {
+ /* SINK was detected */
+ avdtp_hci->tsep_detect.tsep = BT_AVDTP_TSEP_SNK;
+ break;
+ }
+ seid_info = seid_info->next;
+ }
+ /* Clear the canditate SEID since it has now been checked */
+ avdtp_hci->tsep_detect.local_snk_seid_candidate = 0;
+ avdtp_hci->tsep_detect.remote_snk_seid_candidate = 0;
+ SCSC_TAG_DEBUG(BT_H4, "TSEP for active stream, snk=%d, aclid=0x%04X\n",
+ avdtp_hci->tsep_detect.tsep,
+ avdtp_hci->hci_connection_handle);
+}
+
+/* Detects if the AVDTP signal leads to a state change that the FW should know */
+static uint8_t scsc_avdtp_detect_signaling_rxtx(uint16_t hci_connection_handle,
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ const unsigned char *data,
+ uint16_t length,
+ bool is_tx)
+{
+ u8 signal_id = AVDTP_SIGNAL_ID(data);
+ u8 message_type = AVDTP_MESSAGE_TYPE(data);
+
+ SCSC_TAG_DEBUG(BT_H4, "id: 0x%02X, type: 0x%02X)\n", signal_id, message_type);
+
+ if (message_type == AVDTP_MESSAGE_TYPE_RSP_ACCEPT) {
+ if (signal_id == AVDTP_SIGNAL_ID_START)
+ return AVDTP_DETECT_SIGNALING_ACTIVE;
+ else if (signal_id == AVDTP_SIGNAL_ID_OPEN)
+ return AVDTP_DETECT_SIGNALING_OPEN;
+ else if (signal_id == AVDTP_SIGNAL_ID_CLOSE || signal_id == AVDTP_SIGNAL_ID_SUSPEND ||
+ signal_id == AVDTP_SIGNAL_ID_ABORT)
+ return AVDTP_DETECT_SIGNALING_INACTIVE;
+ else if (signal_id == AVDTP_SIGNAL_ID_DISCOVER) {
+ /* Check the discover signal for potential SNK candidate SEIDs */
+ scsc_avdtp_detect_check_discover_for_snk_seids(hci_connection_handle,
+ avdtp_hci, data, length, is_tx);
+ } else if (signal_id == AVDTP_SIGNAL_ID_SET_CONF) {
+ /* Check if the SEID from set config matches a SNK SEID */
+ scsc_avdtp_detect_match_set_conf_seid_with_discover(avdtp_hci, data, is_tx);
+ }
+ } else if (message_type == AVDTP_MESSAGE_TYPE_CMD) {
+ if (signal_id == AVDTP_SIGNAL_ID_SET_CONF) {
+ if (is_tx)
+ avdtp_hci->tsep_detect.remote_snk_seid_candidate = HCI_L2CAP_SET_CONF_ACP_SEID(data);
+ else
+ avdtp_hci->tsep_detect.local_snk_seid_candidate = HCI_L2CAP_SET_CONF_ACP_SEID(data);
+ SCSC_TAG_DEBUG(BT_H4, "Set configuration was detected; local_seid_candidate=%u, remote_seid_candidate=%u (aclid: 0x%04X)\n",
+ avdtp_hci->tsep_detect.local_snk_seid_candidate,
+ avdtp_hci->tsep_detect.remote_snk_seid_candidate,
+ avdtp_hci->hci_connection_handle);
+ }
+ } else if (message_type == AVDTP_MESSAGE_TYPE_GENERAL_REJECT || message_type == AVDTP_MESSAGE_TYPE_RSP_REJECT) {
+ if (signal_id == AVDTP_SIGNAL_ID_SET_CONF) {
+ if (is_tx) {
+ if (avdtp_hci->tsep_detect.local_snk_seid_candidate)
+ avdtp_hci->tsep_detect.local_snk_seid_candidate = 0;
+ } else {
+ if (avdtp_hci->tsep_detect.remote_snk_seid_candidate)
+ avdtp_hci->tsep_detect.remote_snk_seid_candidate = 0;
+ }
+ }
+ }
+ return AVDTP_DETECT_SIGNALING_IGNORE;
+}
+
+/* Public function that hooks into scsc_shm.c. It pass the provided data on the proper functions such that
+ * the AVDTP can be detected. If any state change is detected, the FW is signalled */
+void scsc_avdtp_detect_rxtx(u16 hci_connection_handle, const unsigned char *data, uint16_t length, bool is_tx)
+{
+ /* Look for AVDTP connections */
+ bool avdtp_gen_bg_int = false;
+ uint16_t cid_to_fw = 0;
+ bool is_sink = false;
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci = NULL;
+ bool avdtp_open = false;
+
+ /* Look for AVDTP connections */
+ if (HCI_L2CAP_RX_CID((const unsigned char *)(data)) == L2CAP_SIGNALING_CID) {
+ if (scsc_bt_avdtp_detect_connection_rxtx(hci_connection_handle, data, length, is_tx)) {
+ avdtp_gen_bg_int = true;
+
+ avdtp_hci = scsc_avdtp_detect_find_or_create_hci_connection(
+ hci_connection_handle,
+ false);
+
+ if (avdtp_hci) {
+ cid_to_fw = avdtp_hci->stream.dst_cid;
+ is_sink = avdtp_hci->tsep_detect.tsep == BT_AVDTP_TSEP_SNK;
+ }
+ }
+ } else {
+ /* Check if we have detected any signal on the connection handle */
+ avdtp_hci = scsc_avdtp_detect_find_or_create_hci_connection(
+ hci_connection_handle,
+ false);
+
+ if (avdtp_hci) {
+ if (avdtp_hci->signal.state == BT_AVDTP_STATE_COMPLETE_SIGNALING &&
+ length >= AVDTP_DETECT_MIN_AVDTP_LENGTH &&
+ ((is_tx && avdtp_hci->signal.dst_cid != 0 &&
+ avdtp_hci->signal.dst_cid == HCI_L2CAP_RX_CID((const unsigned char *)(data))) ||
+ (!is_tx && avdtp_hci->signal.src_cid != 0 &&
+ avdtp_hci->signal.src_cid == HCI_L2CAP_RX_CID((const unsigned char *)(data))))) {
+ /* Signaling has been detected on the given CID and hci_connection_handle */
+ uint8_t result = scsc_avdtp_detect_signaling_rxtx(hci_connection_handle,
+ avdtp_hci, data, length, is_tx);
+
+ if (result != AVDTP_DETECT_SIGNALING_IGNORE) {
+ avdtp_gen_bg_int = true;
+ if (result != AVDTP_DETECT_SIGNALING_INACTIVE)
+ cid_to_fw = avdtp_hci->stream.dst_cid;
+ if (result == AVDTP_DETECT_SIGNALING_OPEN)
+ avdtp_open = true;
+ is_sink = avdtp_hci->tsep_detect.tsep == BT_AVDTP_TSEP_SNK;
+ }
+
+ }
+ }
+ }
+
+ if (avdtp_hci)
+ spin_unlock(&avdtp_hci->lock);
+
+ if (avdtp_gen_bg_int) {
+ if (bt_service.bsmhcp_protocol->header.firmware_features & BSMHCP_FEATURE_AVDTP_TRANSFER_RING) {
+ uint32_t flags = 0;
+
+ if (avdtp_hci && avdtp_hci->tsep_detect.tsep == BT_AVDTP_TSEP_SNK)
+ flags |= AVDTP_SNK_FLAG_TD_MASK;
+ if (avdtp_open)
+ flags |= AVDTP_OPEN_FLAG_TD_MASK;
+ scsc_bt_shm_h4_avdtp_detect_write(flags, cid_to_fw, hci_connection_handle);
+ } else {
+ /* Legacy communication between driver and FW. This was replaced by a transfer ring but
+ * the implementation is kept for backward compability
+ */
+ u8 msg_counter = AVDTP_GET_MESSAGE_COUNT(
+ bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id);
+ msg_counter++;
+ msg_counter &= 0x3;
+
+ bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id = cid_to_fw |
+ (hci_connection_handle << 16) |
+ (msg_counter << 28);
+ bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id |= AVDTP_SIGNAL_FLAG_MASK;
+ if (is_sink)
+ bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id |= AVDTP_SNK_FLAG_MASK;
+ SCSC_TAG_DEBUG(
+ BT_H4,
+ "Found AVDTP signal. msgid: 0x%02X, aclid: 0x%04X, cid: 0x%04X, streamid: 0x%08X\n",
+ msg_counter,
+ hci_connection_handle,
+ cid_to_fw,
+ bt_service.bsmhcp_protocol->header.avdtp_detect_stream_id);
+ mmiowb();
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ }
+ }
+}
+
+/* Used to reset the different AVDTP detections */
+void scsc_avdtp_detect_reset(struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci,
+ bool reset_signal,
+ bool reset_signal_ongoing,
+ bool reset_stream,
+ bool reset_stream_ongoing,
+ bool reset_local_seids,
+ bool reset_remote_seids)
+{
+ if (reset_signal_ongoing) {
+ avdtp_hci->ongoing.outgoing_signal.state = BT_AVDTP_STATE_IDLE_SIGNALING;
+ avdtp_hci->ongoing.outgoing_signal.src_cid = 0;
+ avdtp_hci->ongoing.outgoing_signal.dst_cid = 0;
+ avdtp_hci->ongoing.incoming_signal.state = BT_AVDTP_STATE_IDLE_SIGNALING;
+ avdtp_hci->ongoing.incoming_signal.src_cid = 0;
+ avdtp_hci->ongoing.incoming_signal.dst_cid = 0;
+ }
+ if (reset_stream) {
+ avdtp_hci->stream.state = BT_AVDTP_STATE_IDLE_STREAMING;
+ avdtp_hci->stream.src_cid = 0;
+ avdtp_hci->stream.dst_cid = 0;
+ }
+ if (reset_stream_ongoing) {
+ avdtp_hci->ongoing.outgoing_stream.state = BT_AVDTP_STATE_IDLE_STREAMING;
+ avdtp_hci->ongoing.outgoing_stream.src_cid = 0;
+ avdtp_hci->ongoing.outgoing_stream.dst_cid = 0;
+ avdtp_hci->ongoing.incoming_stream.state = BT_AVDTP_STATE_IDLE_STREAMING;
+ avdtp_hci->ongoing.incoming_stream.src_cid = 0;
+ avdtp_hci->ongoing.incoming_stream.dst_cid = 0;
+ }
+ if (reset_local_seids) {
+ struct scsc_bt_avdtp_detect_snk_seid *seid = avdtp_hci->tsep_detect.local_snk_seids;
+
+ while (seid) {
+ struct scsc_bt_avdtp_detect_snk_seid *next = seid->next;
+
+ kfree(seid);
+ seid = next;
+ }
+ avdtp_hci->tsep_detect.local_snk_seids = NULL;
+ avdtp_hci->tsep_detect.local_snk_seid_candidate = 0;
+ }
+ if (reset_remote_seids) {
+ struct scsc_bt_avdtp_detect_snk_seid *seid = avdtp_hci->tsep_detect.remote_snk_seids;
+
+ while (seid) {
+ struct scsc_bt_avdtp_detect_snk_seid *next = seid->next;
+
+ kfree(seid);
+ seid = next;
+ }
+ avdtp_hci->tsep_detect.remote_snk_seids = NULL;
+ avdtp_hci->tsep_detect.remote_snk_seid_candidate = 0;
+ }
+ if (reset_local_seids && reset_remote_seids)
+ avdtp_hci->tsep_detect.tsep = BT_AVDTP_TSEP_SRC;
+
+ if (reset_signal) {
+ struct scsc_bt_avdtp_detect_hci_connection *prev;
+ /* Unlock the mutex to keep the order of lock/unlock between the connection list
+ * and the individual elements
+ */
+ spin_unlock(&avdtp_hci->lock);
+ spin_lock(&bt_service.avdtp_detect.lock);
+ /* The element could have been deleted at this point by another thread before the mutext
+ * on the list was taken. Therefore re-check.
+ */
+ if (avdtp_hci) {
+ prev = bt_service.avdtp_detect.connections;
+
+ if (prev && prev != avdtp_hci) {
+ /* The element was not the head of the list. Search for the previous element */
+ while (prev) {
+ if (prev->next == avdtp_hci) {
+ /* Remove the element from the list */
+ prev->next = avdtp_hci->next;
+ break;
+ }
+ prev = prev->next;
+ }
+ } else {
+ bt_service.avdtp_detect.connections = avdtp_hci->next;
+ }
+ /* Lock to make sure that no-one reads from it. Since it has been removed from the list
+ * unlocking it again will not make another thread read it since it cannot be found
+ */
+ spin_lock(&avdtp_hci->lock);
+ spin_unlock(&bt_service.avdtp_detect.lock);
+ spin_unlock(&avdtp_hci->lock);
+ kfree(avdtp_hci);
+ avdtp_hci = NULL;
+ } else
+ spin_unlock(&bt_service.avdtp_detect.lock);
+ }
+}
+
+/* Used to reset all current or ongoing detections for a given hci_connection_handle. This can e.g.
+ * used if the link is lost */
+bool scsc_avdtp_detect_reset_connection_handle(uint16_t hci_connection_handle)
+{
+ bool reset_anything = false;
+ struct scsc_bt_avdtp_detect_hci_connection *avdtp_hci =
+ scsc_avdtp_detect_find_or_create_hci_connection(hci_connection_handle, false);
+
+ /* Check already established connections */
+ if (avdtp_hci) {
+ scsc_avdtp_detect_reset(avdtp_hci, true, true, true, true, true, true);
+ /* No need to unlock the detection since it has been removed */
+ reset_anything = true;
+ }
+ return reset_anything;
+}
+
+void scsc_avdtp_detect_exit(void)
+{
+ struct scsc_bt_avdtp_detect_hci_connection *head;
+
+ /* Lock the detection list and find the head */
+ spin_lock(&bt_service.avdtp_detect.lock);
+ head = bt_service.avdtp_detect.connections;
+
+ while (head) {
+ spin_lock(&head->lock);
+ /* Clear the remote and local seids lists on head */
+ scsc_avdtp_detect_reset(head, false, false, false, false, true, true);
+
+ /* Update the head to bypass the current element */
+ bt_service.avdtp_detect.connections = head->next;
+
+ spin_unlock(&bt_service.avdtp_detect.lock);
+
+ /* Free the used memory */
+ spin_unlock(&head->lock);
+ kfree(head);
+ head = NULL;
+
+ /* Update the head variable */
+ spin_lock(&bt_service.avdtp_detect.lock);
+ head = bt_service.avdtp_detect.connections;
+ }
+
+ spin_unlock(&bt_service.avdtp_detect.lock);
+
+ /* The avdtp_detect has now been restored and doesn't contain other information
+ * than its two locks
+ */
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ * BT BlueZ interface
+ *
+ ****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/termios.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include "../../../bluetooth/h4_recv.h"
+
+#include <scsc/scsc_logring.h>
+
+static struct hci_dev *hdev;
+static struct device *dev_ref;
+static const struct file_operations *bt_fs;
+static struct workqueue_struct *wq;
+static struct work_struct open_worker;
+static struct work_struct close_worker;
+static struct work_struct read_work;
+static u8 receive_buffer[1024];
+static bool terminate_read;
+static atomic_t *error_count_ref;
+static wait_queue_head_t *read_wait_ref;
+static struct file s_file;
+
+static struct platform_device *slsi_btz_pdev;
+static struct rfkill *btz_rfkill;
+
+static const struct h4_recv_pkt scsc_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static void slsi_bt_fs_read_func(struct work_struct *work)
+{
+ int ret;
+ struct sk_buff *skb = NULL;
+
+ while ((ret = bt_fs->read(&s_file, receive_buffer, sizeof(receive_buffer), NULL)) >= 0) {
+ if (terminate_read)
+ break;
+
+ if (ret > 0) {
+ skb = h4_recv_buf(hdev, skb, receive_buffer,
+ ret, scsc_recv_pkts,
+ ARRAY_SIZE(scsc_recv_pkts));
+
+ if (IS_ERR(skb)) {
+ SCSC_TAG_ERR(BT_COMMON, "corrupted event packet\n");
+ hdev->stat.err_rx++;
+ break;
+ }
+ hdev->stat.byte_rx += ret;
+ }
+ }
+
+ SCSC_TAG_INFO(BT_COMMON, "BT BlueZ: Exiting %s\n", __func__);
+}
+
+static int slsi_bt_open(struct hci_dev *hdev)
+{
+ int err;
+
+ SCSC_TAG_INFO(BT_COMMON, "enter\n");
+
+ err = bt_fs->open(NULL, NULL);
+
+ if (0 == err) {
+ terminate_read = false;
+ if (wq == NULL) {
+ wq = create_singlethread_workqueue("slsi_bt_bluez_wq");
+ INIT_WORK(&read_work, slsi_bt_fs_read_func);
+ }
+ queue_work(wq, &read_work);
+ }
+
+ SCSC_TAG_INFO(BT_COMMON, "done\n");
+
+ return err;
+}
+
+static int slsi_bt_close(struct hci_dev *hdev)
+{
+ int ret;
+
+ SCSC_TAG_INFO(BT_COMMON, "terminating reader thread\n");
+
+ terminate_read = true;
+
+ if (error_count_ref != NULL)
+ atomic_inc(error_count_ref);
+
+ if (read_wait_ref != NULL)
+ wake_up(read_wait_ref);
+
+ cancel_work_sync(&read_work);
+
+ SCSC_TAG_INFO(BT_COMMON, "releasing service\n");
+
+ ret = bt_fs->release(NULL, NULL);
+
+ if (wq != NULL) {
+ destroy_workqueue(wq);
+ wq = NULL;
+ }
+
+ SCSC_TAG_INFO(BT_COMMON, "done\n");
+
+ return ret;
+}
+
+static int slsi_bt_flush(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int slsi_bt_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int ret;
+
+ SCSC_TAG_DEBUG(BT_H4, "sending frame(data=%p, len=%u)\n", skb->data, skb->len);
+
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ ret = bt_fs->write(NULL, skb->data, skb->len, NULL);
+ if (ret >= 0) {
+ kfree_skb(skb);
+
+ /* Update HCI stat counters */
+ hdev->stat.byte_tx += skb->len;
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void slsi_bt_open_worker(struct work_struct *work)
+{
+ int err;
+
+ if (!hdev) {
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ SCSC_TAG_ERR(BT_COMMON, "failed to allocate hci device\n");
+ return;
+ }
+
+ hdev->bus = HCI_VIRTUAL;
+ hdev->dev_type = HCI_BREDR;
+
+ SET_HCIDEV_DEV(hdev, dev_ref);
+
+ hdev->open = slsi_bt_open;
+ hdev->close = slsi_bt_close;
+ hdev->flush = slsi_bt_flush;
+ hdev->send = slsi_bt_send_frame;
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ SCSC_TAG_ERR(BT_COMMON, "failed to register hci device (err: %d)\n", err);
+ hci_free_dev(hdev);
+ hdev = NULL;
+ }
+ }
+}
+
+static void slsi_bt_close_worker(struct work_struct *work)
+{
+ if (hdev) {
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ hdev = NULL;
+ }
+}
+
+void slsi_bt_notify_probe(struct device *dev,
+ const struct file_operations *fs,
+ atomic_t *error_count,
+ wait_queue_head_t *read_wait)
+{
+ bt_fs = fs;
+ error_count_ref = error_count;
+ read_wait_ref = read_wait;
+ dev_ref = dev;
+
+ SCSC_TAG_INFO(BT_COMMON, "SLSI BT BlueZ probe\n");
+}
+
+void slsi_bt_notify_remove(void)
+{
+ error_count_ref = NULL;
+ read_wait_ref = NULL;
+ dev_ref = NULL;
+}
+
+static int slsi_bt_power_control_set_param_cb(const char *buffer,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u32 value;
+
+ ret = kstrtou32(buffer, 0, &value);
+ if (!ret) {
+ if (value && dev_ref) {
+ INIT_WORK(&open_worker, slsi_bt_open_worker);
+ schedule_work(&open_worker);
+ } else if (0 == value) {
+ INIT_WORK(&close_worker, slsi_bt_close_worker);
+ schedule_work(&close_worker);
+ }
+ }
+
+ return ret;
+}
+
+static int slsi_bt_power_control_get_param_cb(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%u\n", (hdev != NULL ? 1 : 0));
+}
+
+static struct kernel_param_ops slsi_bt_power_control_ops = {
+ .set = slsi_bt_power_control_set_param_cb,
+ .get = slsi_bt_power_control_get_param_cb,
+};
+
+module_param_cb(power_control, &slsi_bt_power_control_ops, NULL, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(power_control,
+ "Enables/disable BlueZ registration");
+
+static int btz_rfkill_set(void *data, bool blocked)
+{
+ if (!blocked && dev_ref) {
+ INIT_WORK(&open_worker, slsi_bt_open_worker);
+ schedule_work(&open_worker);
+ } else if (blocked) {
+ INIT_WORK(&close_worker, slsi_bt_close_worker);
+ schedule_work(&close_worker);
+ }
+
+ return 0;
+}
+
+static const struct rfkill_ops btz_rfkill_ops = {
+ .set_block = btz_rfkill_set,
+};
+
+static int __init slsi_bluez_init(void)
+{
+ int ret;
+
+ slsi_btz_pdev = platform_device_alloc("scsc-bluez", -1);
+ if (!slsi_btz_pdev) {
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add(slsi_btz_pdev);
+ if (ret) {
+ goto err_slsi_btz_pdev;
+ }
+
+ btz_rfkill = rfkill_alloc("scsc-bluez-rfkill", &slsi_btz_pdev->dev,
+ RFKILL_TYPE_BLUETOOTH, &btz_rfkill_ops, NULL);
+ if (!btz_rfkill) {
+ goto err_btz_rfkill_alloc;
+ }
+
+ rfkill_init_sw_state(btz_rfkill, 1);
+
+ ret = rfkill_register(btz_rfkill);
+ if (ret) {
+ goto err_btz_rfkill_reg;
+ }
+
+ return 0;
+
+err_btz_rfkill_reg:
+ rfkill_destroy(btz_rfkill);
+
+err_btz_rfkill_alloc:
+ platform_device_del(slsi_btz_pdev);
+
+err_slsi_btz_pdev:
+ platform_device_put(slsi_btz_pdev);
+
+ return ret;
+}
+
+static void __exit slsi_bluez_exit(void)
+{
+ platform_device_unregister(slsi_btz_pdev);
+ rfkill_unregister(btz_rfkill);
+ rfkill_destroy(btz_rfkill);
+}
+
+module_init(slsi_bluez_init);
+module_exit(slsi_bluez_exit);
+
+MODULE_DESCRIPTION("SCSC BT Bluez");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/****************************************************************************
+ *
+ * Internal BT driver HCI decoder
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_BT_HCI_H
+#define __SCSC_BT_HCI_H
+
+#define HCI_EVENT_HEADER_LENGTH (2)
+
+#define HCI_EV_INQUIRY_COMPLETE ((u8)0x01)
+#define HCI_EV_INQUIRY_RESULT ((u8)0x02)
+#define HCI_EV_CONN_COMPLETE ((u8)0x03)
+#define HCI_EV_CONN_REQUEST ((u8)0x04)
+#define HCI_EV_DISCONNECT_COMPLETE ((u8)0x05)
+#define HCI_EV_AUTH_COMPLETE ((u8)0x06)
+#define HCI_EV_REMOTE_NAME_REQ_COMPLETE ((u8)0x07)
+#define HCI_EV_ENCRYPTION_CHANGE ((u8)0x08)
+#define HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE ((u8)0x09)
+#define HCI_EV_MASTER_LINK_KEY_COMPLETE ((u8)0x0A)
+#define HCI_EV_READ_REM_SUPP_FEATURES_COMPLETE ((u8)0x0B)
+#define HCI_EV_READ_REMOTE_VER_INFO_COMPLETE ((u8)0x0C)
+#define HCI_EV_QOS_SETUP_COMPLETE ((u8)0x0D)
+#define HCI_EV_COMMAND_COMPLETE ((u8)0x0E)
+#define HCI_EV_COMMAND_STATUS ((u8)0x0F)
+#define HCI_EV_HARDWARE_ERROR ((u8)0x10)
+#define HCI_EV_FLUSH_OCCURRED ((u8)0x11)
+#define HCI_EV_ROLE_CHANGE ((u8)0x12)
+#define HCI_EV_NUMBER_COMPLETED_PKTS ((u8)0x13)
+#define HCI_EV_MODE_CHANGE ((u8)0x14)
+#define HCI_EV_RETURN_LINK_KEYS ((u8)0x15)
+#define HCI_EV_PIN_CODE_REQ ((u8)0x16)
+#define HCI_EV_LINK_KEY_REQ ((u8)0x17)
+#define HCI_EV_LINK_KEY_NOTIFICATION ((u8)0x18)
+#define HCI_EV_LOOPBACK_COMMAND ((u8)0x19)
+#define HCI_EV_DATA_BUFFER_OVERFLOW ((u8)0x1A)
+#define HCI_EV_MAX_SLOTS_CHANGE ((u8)0x1B)
+#define HCI_EV_READ_CLOCK_OFFSET_COMPLETE ((u8)0x1C)
+#define HCI_EV_CONN_PACKET_TYPE_CHANGED ((u8)0x1D)
+#define HCI_EV_QOS_VIOLATION ((u8)0x1E)
+#define HCI_EV_PAGE_SCAN_MODE_CHANGE ((u8)0x1F)
+#define HCI_EV_PAGE_SCAN_REP_MODE_CHANGE ((u8)0x20)
+/* 1.2 Events */
+#define HCI_EV_FLOW_SPEC_COMPLETE ((u8)0x21)
+#define HCI_EV_INQUIRY_RESULT_WITH_RSSI ((u8)0x22)
+#define HCI_EV_READ_REM_EXT_FEATURES_COMPLETE ((u8)0x23)
+#define HCI_EV_FIXED_ADDRESS ((u8)0x24)
+#define HCI_EV_ALIAS_ADDRESS ((u8)0x25)
+#define HCI_EV_GENERATE_ALIAS_REQ ((u8)0x26)
+#define HCI_EV_ACTIVE_ADDRESS ((u8)0x27)
+#define HCI_EV_ALLOW_PRIVATE_PAIRING ((u8)0x28)
+#define HCI_EV_ALIAS_ADDRESS_REQ ((u8)0x29)
+#define HCI_EV_ALIAS_NOT_RECOGNISED ((u8)0x2A)
+#define HCI_EV_FIXED_ADDRESS_ATTEMPT ((u8)0x2B)
+#define HCI_EV_SYNC_CONN_COMPLETE ((u8)0x2C)
+#define HCI_EV_SYNC_CONN_CHANGED ((u8)0x2D)
+
+/* 2.1 Events */
+#define HCI_EV_SNIFF_SUB_RATE ((u8)0x2E)
+#define HCI_EV_EXTENDED_INQUIRY_RESULT ((u8)0x2F)
+#define HCI_EV_ENCRYPTION_KEY_REFRESH_COMPLETE ((u8)0x30)
+#define HCI_EV_IO_CAPABILITY_REQUEST ((u8)0x31)
+#define HCI_EV_IO_CAPABILITY_RESPONSE ((u8)0x32)
+#define HCI_EV_USER_CONFIRMATION_REQUEST ((u8)0x33)
+#define HCI_EV_USER_PASSKEY_REQUEST ((u8)0x34)
+#define HCI_EV_REMOTE_OOB_DATA_REQUEST ((u8)0x35)
+#define HCI_EV_SIMPLE_PAIRING_COMPLETE ((u8)0x36)
+#define HCI_EV_LST_CHANGE ((u8)0x38)
+#define HCI_EV_ENHANCED_FLUSH_COMPLETE ((u8)0x39)
+#define HCI_EV_USER_PASSKEY_NOTIFICATION ((u8)0x3B)
+#define HCI_EV_KEYPRESS_NOTIFICATION ((u8)0x3C)
+#define HCI_EV_REM_HOST_SUPPORTED_FEATURES ((u8)0x3D)
+#define HCI_EV_ULP ((u8)0x3E)
+
+/* TCC + CSB Events */
+#define HCI_EV_TRIGGERED_CLOCK_CAPTURE ((u8)0x4E)
+#define HCI_EV_SYNCHRONIZATION_TRAIN_COMPLETE ((u8)0x4F)
+#define HCI_EV_SYNCHRONIZATION_TRAIN_RECEIVED ((u8)0x50)
+#define HCI_EV_CSB_RECEIVE ((u8)0x51)
+#define HCI_EV_CSB_TIMEOUT ((u8)0x52)
+#define HCI_EV_TRUNCATED_PAGE_COMPLETE ((u8)0x53)
+#define HCI_EV_SLAVE_PAGE_RESPONSE_TIMEOUT ((u8)0x54)
+#define HCI_EV_CSB_CHANNEL_MAP_CHANGE ((u8)0x55)
+#define HCI_EV_INQUIRY_RESPONSE_NOTIFICATION ((u8)0x56)
+
+/* 4.1 Events */
+#define HCI_EV_AUTHENTICATED_PAYLOAD_TIMEOUT_EXPIRED ((u8)0x57)
+
+/* ULP Sub-opcodes */
+#define HCI_EV_ULP_CONNECTION_COMPLETE ((u8)0x01)
+#define HCI_EV_ULP_ADVERTISING_REPORT ((u8)0x02)
+#define HCI_EV_ULP_CONNECTION_UPDATE_COMPLETE ((u8)0x03)
+#define HCI_EV_ULP_READ_REMOTE_USED_FEATURES_COMPLETE ((u8)0x04)
+#define HCI_EV_ULP_LONG_TERM_KEY_REQUEST ((u8)0x05)
+#define HCI_EV_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST ((u8)0x06)
+#define HCI_EV_ULP_DATA_LENGTH_CHANGE ((u8)0x07)
+#define HCI_EV_ULP_READ_LOCAL_P256_PUB_KEY_COMPLETE ((u8)0x08)
+#define HCI_EV_ULP_GENERATE_DHKEY_COMPLETE ((u8)0x09)
+#define HCI_EV_ULP_ENHANCED_CONNECTION_COMPLETE ((u8)0x0A)
+#define HCI_EV_ULP_DIRECT_ADVERTISING_REPORT ((u8)0x0B)
+#define HCI_EV_ULP_PHY_UPDATE_COMPLETE ((u8)0x0C)
+/* The subevent code of ULP_USED_CHANNEL_SELECTION_EVENT shall be updated
+ when it is defined in the spec.
+ Assign it as 0x0D temporarily. */
+#define HCI_EV_ULP_USED_CHANNEL_SELECTION ((u8)0x0D)
+
+#define HCI_EV_DECODE(entry) case entry: ret = #entry; break
+
+#endif /* __SCSC_BT_HCI_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ * BT driver entry point
+ *
+ ****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/termios.h>
+#include <linux/wakelock.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_ARCH_EXYNOS
+#include <linux/soc/samsung/exynos-soc.h>
+#endif
+
+#include <scsc/scsc_logring.h>
+#include <scsc/kic/slsi_kic_lib.h>
+#include <scsc/kic/slsi_kic_bt.h>
+#include <scsc/kic/slsi_kic_ant.h>
+
+#include "scsc_bt_priv.h"
+#include "../scsc/scsc_mx_impl.h"
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#define SCSC_MODDESC "SCSC MX BT Driver"
+#define SCSC_MODAUTH "Samsung Electronics Co., Ltd"
+#define SCSC_MODVERSION "-devel"
+
+#define SLSI_BT_SERVICE_CLOSE_RETRY 60
+#define SLSI_BT_SERVICE_STOP_RECOVERY_TIMEOUT 20000
+#define SLSI_BT_SERVICE_STOP_RECOVERY_DISABLED_TIMEOUT 2000
+
+#define SCSC_ANT_MAX_TIMEOUT (20*HZ)
+
+#ifdef CONFIG_SCSC_ANT
+static DECLARE_WAIT_QUEUE_HEAD(ant_recovery_complete_queue);
+#endif
+
+static DEFINE_MUTEX(bt_start_mutex);
+static DEFINE_MUTEX(bt_audio_mutex);
+#ifdef CONFIG_SCSC_ANT
+static DEFINE_MUTEX(ant_start_mutex);
+#endif
+
+static int bt_recovery_in_progress;
+#ifdef CONFIG_SCSC_ANT
+static int ant_recovery_in_progress;
+#endif
+
+static int recovery_timeout = SLSI_BT_SERVICE_STOP_RECOVERY_TIMEOUT;
+
+struct scsc_common_service common_service;
+struct scsc_bt_service bt_service;
+#ifdef CONFIG_SCSC_ANT
+struct scsc_ant_service ant_service;
+#endif
+
+static int service_start_count;
+#ifdef CONFIG_SCSC_ANT
+static int ant_service_start_count;
+#endif
+
+static u64 bluetooth_address;
+#ifdef CONFIG_ARCH_EXYNOS
+static char bluetooth_address_fallback[] = "00:00:00:00:00:00";
+#endif
+static u32 bt_info_trigger;
+static u32 bt_info_interrupt;
+static u32 firmware_control;
+static bool firmware_control_reset = true;
+static u32 firmware_mxlog_filter;
+static bool disable_service;
+
+/* Audio */
+static struct device *audio_device;
+static bool audio_device_probed;
+static struct scsc_bt_audio bt_audio;
+
+module_param(bluetooth_address, ullong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bluetooth_address,
+ "Bluetooth address");
+
+#ifdef CONFIG_ARCH_EXYNOS
+module_param_string(bluetooth_address_fallback, bluetooth_address_fallback,
+ sizeof(bluetooth_address_fallback), 0444);
+MODULE_PARM_DESC(bluetooth_address_fallback,
+ "Bluetooth address as proposed by the driver");
+#endif
+
+module_param(service_start_count, int, S_IRUGO);
+MODULE_PARM_DESC(service_start_count,
+ "Track how many times the BT service has been started");
+#ifdef CONFIG_SCSC_ANT
+module_param(ant_service_start_count, int, 0444);
+MODULE_PARM_DESC(ant_service_start_count,
+ "Track how many times the ANT service has been started");
+#endif
+
+module_param(firmware_control, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(firmware_control, "Control how the firmware behaves");
+
+module_param(firmware_control_reset, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(firmware_control_reset,
+ "Controls the resetting of the firmware_control variable");
+
+module_param(disable_service, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_service,
+ "Disables service startup");
+
+/*
+ * Service event callbacks called from mx-core when things go wrong
+ */
+static void bt_stop_on_failure(struct scsc_service_client *client)
+{
+ UNUSED(client);
+
+ SCSC_TAG_ERR(BT_COMMON, "\n");
+
+ reinit_completion(&bt_service.recovery_probe_complete);
+ bt_recovery_in_progress = 1;
+
+ atomic_inc(&bt_service.error_count);
+
+ /* Zero the shared memory on error. The A-Box does not stop using this
+ * memory immediately as designed. To prevent noise during recovery we zero the
+ * shared memory before freeing it
+ */
+ mutex_lock(&bt_audio_mutex);
+
+ if (bt_service.abox_ref != 0 && bt_audio.abox_virtual) {
+ memset(bt_audio.abox_virtual->abox_to_bt_streaming_if_data, 0, SCSC_BT_AUDIO_ABOX_DATA_SIZE);
+ memset(bt_audio.abox_virtual->bt_to_abox_streaming_if_data, 0, SCSC_BT_AUDIO_ABOX_DATA_SIZE);
+ }
+
+ mutex_unlock(&bt_audio_mutex);
+}
+
+static void bt_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ UNUSED(client);
+ UNUSED(scsc_panic_code);
+
+ SCSC_TAG_ERR(BT_COMMON, "\n");
+
+ wake_up(&bt_service.read_wait);
+}
+
+#ifdef CONFIG_SCSC_ANT
+static void ant_stop_on_failure(struct scsc_service_client *client)
+{
+ UNUSED(client);
+
+ SCSC_TAG_ERR(BT_COMMON, "\n");
+
+ reinit_completion(&ant_service.recovery_probe_complete);
+ ant_recovery_in_progress = 1;
+
+ atomic_inc(&ant_service.error_count);
+
+ /* Let the ANT stack call poll() to be notified about the reset asap */
+ wake_up(&ant_service.read_wait);
+}
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+static void ant_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ UNUSED(client);
+ UNUSED(scsc_panic_code);
+
+ SCSC_TAG_ERR(BT_COMMON, "\n");
+
+ wake_up(&ant_service.read_wait);
+}
+#endif
+
+static void scsc_bt_shm_irq_handler(int irqbit, void *data)
+{
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(bt_service.service, irqbit);
+
+ bt_info_interrupt++;
+
+ wake_up(&bt_service.info_wait);
+}
+
+static struct scsc_service_client mx_bt_client = {
+ .stop_on_failure = bt_stop_on_failure,
+ .failure_reset = bt_failure_reset,
+};
+
+#ifdef CONFIG_SCSC_ANT
+static struct scsc_service_client mx_ant_client = {
+ .stop_on_failure = ant_stop_on_failure,
+ .failure_reset = ant_failure_reset,
+};
+#endif
+
+static void slsi_sm_bt_service_cleanup_interrupts(void)
+{
+ u16 int_src = bt_service.bsmhcp_protocol->header.info_bg_to_ap_int_src;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "unregister firmware information interrupts\n");
+
+ scsc_service_mifintrbit_unregister_tohost(bt_service.service, int_src);
+ scsc_service_mifintrbit_free_fromhost(bt_service.service,
+ bt_service.bsmhcp_protocol->header.info_ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+}
+
+static int slsi_sm_bt_service_cleanup_stop_service(void)
+{
+ int ret;
+
+ /* Stop service first, then it's safe to release shared memory
+ resources */
+ ret = scsc_mx_service_stop(bt_service.service);
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "scsc_mx_service_stop failed err: %d\n", ret);
+ if (0 == atomic_read(&bt_service.error_count)) {
+ scsc_mx_service_service_failed(bt_service.service, "BT service stop failed");
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "force service fail complete\n");
+ }
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int slsi_bt_audio_probe(void)
+{
+ phys_addr_t paddr;
+ size_t size;
+
+ if (audio_device == NULL || bt_audio.dev_iommu_map == NULL) {
+ SCSC_TAG_ERR(BT_COMMON, "failed audio_device %p bt_audio.dev_iommu_map %p\n",
+ audio_device, bt_audio.dev_iommu_map);
+ return -EFAULT;
+ }
+
+ paddr = (phys_addr_t)bt_audio.abox_physical;
+ size = PAGE_ALIGN(sizeof(*bt_audio.abox_physical));
+
+ SCSC_TAG_DEBUG(BT_COMMON, "paddr %p size %zu\n", paddr, size);
+
+ return bt_audio.dev_iommu_map(audio_device, paddr, size);
+}
+
+/* Note A-Box memory should only be unmapped when A-Box driver is finished with it */
+static void slsi_bt_audio_remove(void)
+{
+ size_t size;
+
+ if (audio_device == NULL || bt_audio.dev_iommu_unmap == NULL || bt_audio.abox_physical == NULL)
+ return;
+
+ size = PAGE_ALIGN(sizeof(*bt_audio.abox_physical));
+ bt_audio.dev_iommu_unmap(audio_device, size);
+}
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int bt_hcf_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ struct scsc_bt_hcf_collection *hcf_collect = (struct scsc_bt_hcf_collection *) collect_client->prv;
+ int ret = 0;
+
+ if (hcf_collect == NULL)
+ return ret;
+
+ SCSC_TAG_DEBUG(BT_COMMON, "Collecting BT config file\n");
+ ret = scsc_log_collector_write(hcf_collect->hcf, hcf_collect->hcf_size, 1);
+
+ return ret;
+}
+
+struct scsc_log_collector_client bt_collect_hcf_client = {
+ .name = "bt_hcf",
+ .type = SCSC_LOG_CHUNK_BT_HCF,
+ .collect_init = NULL,
+ .collect = bt_hcf_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+#endif
+
+static int slsi_sm_bt_service_cleanup(bool allow_service_stop)
+{
+ SCSC_TAG_DEBUG(BT_COMMON, "enter (service=%p)\n", bt_service.service);
+
+ if (NULL != bt_service.service) {
+ SCSC_TAG_DEBUG(BT_COMMON, "stopping debugging thread\n");
+
+ /* If slsi_sm_bt_service_cleanup_stop_service fails, then let
+ recovery do the rest of the deinit later. */
+ if (!bt_recovery_in_progress && allow_service_stop)
+ if (slsi_sm_bt_service_cleanup_stop_service() < 0) {
+ SCSC_TAG_DEBUG(BT_COMMON, "slsi_sm_bt_service_cleanup_stop_service failed. Recovery has been triggered\n");
+ goto done_error;
+ }
+
+ /* Service is stopped - ensure polling function is existed */
+ SCSC_TAG_DEBUG(BT_COMMON, "wake reader/poller thread\n");
+ wake_up_interruptible(&bt_service.read_wait);
+
+ /* Unregister firmware information interrupts */
+ if (bt_service.bsmhcp_protocol)
+ slsi_sm_bt_service_cleanup_interrupts();
+
+ /* Shut down the shared memory interface */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "cleanup protocol structure and main interrupts\n");
+ scsc_bt_shm_exit();
+
+ /* Cleanup AVDTP detections */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "cleanup ongoing avdtp detections\n");
+ scsc_avdtp_detect_exit();
+
+ mutex_lock(&bt_audio_mutex);
+ if (audio_device) {
+ bt_audio.dev = NULL;
+ bt_audio.abox_virtual = NULL;
+ bt_audio.abox_physical = NULL;
+ }
+ mutex_unlock(&bt_audio_mutex);
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Deinit HCF log collection */
+ scsc_log_collector_unregister_client(&bt_collect_hcf_client);
+ bt_collect_hcf_client.prv = NULL;
+
+ if (bt_service.hcf_collection.hcf) {
+ /* Reset HCF pointer - memory will be freed later */
+ bt_service.hcf_collection.hcf_size = 0;
+ bt_service.hcf_collection.hcf = NULL;
+ }
+#endif
+
+ /* Release the shared memory */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "free memory allocated in the shared DRAM pool\n");
+ if (bt_service.config_ref != 0) {
+ scsc_mx_service_mifram_free(bt_service.service,
+ bt_service.config_ref);
+ bt_service.config_ref = 0;
+ }
+ if (bt_service.bsmhcp_ref != 0) {
+ scsc_mx_service_mifram_free(bt_service.service,
+ bt_service.bsmhcp_ref);
+ bt_service.bsmhcp_ref = 0;
+ }
+ if (bt_service.bhcs_ref != 0) {
+ scsc_mx_service_mifram_free(bt_service.service,
+ bt_service.bhcs_ref);
+ bt_service.bhcs_ref = 0;
+ }
+
+ SCSC_TAG_DEBUG(BT_COMMON, "closing service...\n");
+ if (0 != scsc_mx_service_close(bt_service.service)) {
+ int retry_counter, r;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "scsc_mx_service_close failed\n");
+
+ /**
+ * Error handling in progress - try and close again
+ * later. The service close call shall remain blocked
+ * until close service is successful. Will try up to
+ * 30 seconds.
+ */
+ for (retry_counter = 0;
+ SLSI_BT_SERVICE_CLOSE_RETRY > retry_counter;
+ retry_counter++) {
+ msleep(500);
+ r = scsc_mx_service_close(bt_service.service);
+ if (r == 0) {
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "scsc_mx_service_close closed after %d attempts\n",
+ retry_counter + 1);
+ break;
+ }
+ }
+
+ if (retry_counter + 1 == SLSI_BT_SERVICE_CLOSE_RETRY)
+ SCSC_TAG_ERR(BT_COMMON, "scsc_mx_service_close failed %d times\n",
+ SLSI_BT_SERVICE_CLOSE_RETRY);
+ }
+ bt_service.service = NULL;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "notify the KIC subsystem of the shutdown\n");
+ slsi_kic_system_event(
+ slsi_kic_system_event_category_deinitialisation,
+ slsi_kic_system_events_bt_off,
+ 0);
+ }
+
+ atomic_set(&bt_service.error_count, 0);
+
+ /* Release write wake lock if held */
+ if (wake_lock_active(&bt_service.write_wake_lock)) {
+ bt_service.write_wake_unlock_count++;
+ wake_unlock(&bt_service.write_wake_lock);
+ }
+
+ SCSC_TAG_DEBUG(BT_COMMON, "complete\n");
+ return 0;
+
+done_error:
+ return -EIO;
+}
+
+#ifdef CONFIG_SCSC_ANT
+static int slsi_sm_ant_service_cleanup_stop_service(void)
+{
+ int ret;
+
+ /* Stop service first, then it's safe to release shared memory
+ * resources
+ */
+ ret = scsc_mx_service_stop(ant_service.service);
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "scsc_mx_service_stop failed err: %d\n", ret);
+ if (atomic_read(&ant_service.error_count) == 0) {
+ scsc_mx_service_service_failed(ant_service.service, "ANT service stop failed");
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "force service fail complete\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+static int slsi_sm_ant_service_cleanup(bool allow_service_stop)
+{
+ SCSC_TAG_DEBUG(BT_COMMON, "enter (service=%p)\n", ant_service.service);
+
+ if (ant_service.service != NULL) {
+ SCSC_TAG_DEBUG(BT_COMMON, "stopping debugging thread\n");
+
+ /* If slsi_sm_ant_service_cleanup_stop_service fails, then let
+ * recovery do the rest of the deinit later.
+ **/
+ if (!ant_recovery_in_progress && allow_service_stop)
+ if (slsi_sm_ant_service_cleanup_stop_service() < 0) {
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "slsi_sm_ant_service_cleanup_stop_service failed. Recovery has been triggered\n");
+ goto done_error;
+ }
+
+ /* Service is stopped - ensure polling function is existed */
+ SCSC_TAG_DEBUG(BT_COMMON, "wake reader/poller thread\n");
+ wake_up_interruptible(&ant_service.read_wait);
+
+ /* Shut down the shared memory interface */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "cleanup protocol structure and main interrupts\n");
+ scsc_ant_shm_exit();
+
+ /* Release the shared memory */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "free memory allocated in the shared DRAM pool\n");
+ if (ant_service.config_ref != 0) {
+ scsc_mx_service_mifram_free(ant_service.service,
+ ant_service.config_ref);
+ ant_service.config_ref = 0;
+ }
+ if (ant_service.asmhcp_ref != 0) {
+ scsc_mx_service_mifram_free(ant_service.service,
+ ant_service.asmhcp_ref);
+ ant_service.asmhcp_ref = 0;
+ }
+ if (ant_service.bhcs_ref != 0) {
+ scsc_mx_service_mifram_free(ant_service.service,
+ ant_service.bhcs_ref);
+ ant_service.bhcs_ref = 0;
+ }
+
+ SCSC_TAG_DEBUG(BT_COMMON, "closing ant service...\n");
+ if (scsc_mx_service_close(ant_service.service) != 0) {
+ int retry_counter, r;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "scsc_mx_service_close failed\n");
+
+ /**
+ * Error handling in progress - try and close again
+ * later. The service close call shall remain blocked
+ * until close service is successful. Will try up to
+ * 30 seconds.
+ */
+ for (retry_counter = 0;
+ retry_counter < SLSI_BT_SERVICE_CLOSE_RETRY;
+ retry_counter++) {
+ msleep(500);
+ r = scsc_mx_service_close(ant_service.service);
+ if (r == 0) {
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "scsc_mx_service_close closed after %d attempts\n",
+ retry_counter + 1);
+ break;
+ }
+ }
+
+ if (retry_counter + 1 == SLSI_BT_SERVICE_CLOSE_RETRY)
+ SCSC_TAG_ERR(BT_COMMON,
+ "scsc_mx_service_close failed %d times\n",
+ SLSI_BT_SERVICE_CLOSE_RETRY);
+ }
+ ant_service.service = NULL;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "notify the KIC subsystem of the shutdown\n");
+ slsi_kic_system_event(
+ slsi_kic_system_event_category_deinitialisation,
+ slsi_kic_system_events_ant_off,
+ 0);
+ }
+
+ atomic_set(&ant_service.error_count, 0);
+
+ SCSC_TAG_DEBUG(BT_COMMON, "complete\n");
+ return 0;
+
+done_error:
+ return -EIO;
+}
+#endif
+
+static int setup_bhcs(struct scsc_service *service,
+ struct BHCS *bhcs,
+ uint32_t protocol_ref,
+ uint32_t protocol_length,
+ scsc_mifram_ref *config_ref,
+ scsc_mifram_ref *bhcs_ref)
+{
+ int err = 0;
+ unsigned char *conf_ptr;
+ const struct firmware *firm = NULL;
+ /* Fill the configuration information */
+ bhcs->version = BHCS_VERSION;
+ bhcs->bsmhcp_protocol_offset = protocol_ref;
+ bhcs->bsmhcp_protocol_length = protocol_length;
+ bhcs->configuration_offset = 0;
+ bhcs->configuration_length = 0;
+ bhcs->bluetooth_address_lap = 0;
+ bhcs->bluetooth_address_uap = 0;
+ bhcs->bluetooth_address_nap = 0;
+
+ /* Request the configuration file */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "loading configuration: " SCSC_BT_CONF "\n");
+ err = mx140_file_request_conf(common_service.maxwell_core,
+ &firm, "bluetooth", SCSC_BT_CONF);
+ if (err) {
+ /* Not found - just silently ignore this */
+ SCSC_TAG_DEBUG(BT_COMMON, "configuration not found\n");
+ *config_ref = 0;
+ } else if (firm && firm->size) {
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "configuration size = %zu\n", firm->size);
+
+ /* Allocate a region for the data */
+ err = scsc_mx_service_mifram_alloc(service,
+ firm->size,
+ config_ref,
+ BSMHCP_ALIGNMENT);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "mifram alloc failed\n");
+ mx140_file_release_conf(common_service.maxwell_core, firm);
+ return -EINVAL;
+ }
+
+ /* Map the region to a memory pointer */
+ conf_ptr = scsc_mx_service_mif_addr_to_ptr(service,
+ *config_ref);
+ if (conf_ptr == NULL) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "couldn't map kmem to bhcs_ref 0x%08x\n",
+ (u32)*bhcs_ref);
+ mx140_file_release_conf(common_service.maxwell_core, firm);
+ return -EINVAL;
+ }
+
+ /* Copy the configuration data to the shared memory area */
+ memcpy(conf_ptr, firm->data, firm->size);
+ bhcs->configuration_offset = *config_ref;
+ bhcs->configuration_length = firm->size;
+
+ /* Relase the configuration information */
+ mx140_file_release_conf(common_service.maxwell_core, firm);
+ firm = NULL;
+ } else {
+ /* Empty configuration - just silently ignore this */
+ SCSC_TAG_DEBUG(BT_COMMON, "empty configuration\n");
+ *config_ref = 0;
+
+ /* Relase the configuration information */
+ mx140_file_release_conf(common_service.maxwell_core, firm);
+ firm = NULL;
+ }
+
+#ifdef CONFIG_ARCH_EXYNOS
+ bhcs->bluetooth_address_nap =
+ (exynos_soc_info.unique_id & 0x000000FFFF00) >> 8;
+ bhcs->bluetooth_address_uap =
+ (exynos_soc_info.unique_id & 0x0000000000FF);
+ bhcs->bluetooth_address_lap =
+ (exynos_soc_info.unique_id & 0xFFFFFF000000) >> 24;
+#endif
+
+ if (bluetooth_address) {
+ SCSC_TAG_INFO(BT_COMMON,
+ "using stack supplied Bluetooth address\n");
+ bhcs->bluetooth_address_nap =
+ (bluetooth_address & 0xFFFF00000000) >> 32;
+ bhcs->bluetooth_address_uap =
+ (bluetooth_address & 0x0000FF000000) >> 24;
+ bhcs->bluetooth_address_lap =
+ (bluetooth_address & 0x000000FFFFFF);
+ }
+
+#ifdef SCSC_BT_ADDR
+ /* Request the Bluetooth address file */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "loading Bluetooth address configuration file: "
+ SCSC_BT_ADDR "\n");
+ err = mx140_request_file(common_service.maxwell_core, SCSC_BT_ADDR, &firm);
+ if (err) {
+ /* Not found - just silently ignore this */
+ SCSC_TAG_DEBUG(BT_COMMON, "Bluetooth address not found\n");
+ } else if (firm && firm->size) {
+ u32 u[SCSC_BT_ADDR_LEN];
+
+#ifdef CONFIG_SCSC_BT_BLUEZ
+ /* Convert the data into a native format */
+ if (sscanf(firm->data, "%04x %02X %06x",
+ &u[0], &u[1], &u[2])
+ == SCSC_BT_ADDR_LEN) {
+ bhcs->bluetooth_address_lap = u[2];
+ bhcs->bluetooth_address_uap = u[1];
+ bhcs->bluetooth_address_nap = u[0];
+ } else
+ SCSC_TAG_WARNING(BT_COMMON,
+ "data size incorrect = %zu\n", firm->size);
+#else
+ /* Convert the data into a native format */
+ if (sscanf(firm->data, "%02X:%02X:%02X:%02X:%02X:%02X",
+ &u[0], &u[1], &u[2], &u[3], &u[4], &u[5])
+ == SCSC_BT_ADDR_LEN) {
+ bhcs->bluetooth_address_lap =
+ (u[3] << 16) | (u[4] << 8) | u[5];
+ bhcs->bluetooth_address_uap = u[2];
+ bhcs->bluetooth_address_nap = (u[0] << 8) | u[1];
+ } else
+ SCSC_TAG_WARNING(BT_COMMON,
+ "data size incorrect = %zu\n", firm->size);
+#endif
+ /* Relase the configuration information */
+ mx140_release_file(common_service.maxwell_core, firm);
+ firm = NULL;
+ } else {
+ SCSC_TAG_DEBUG(BT_COMMON, "empty Bluetooth address\n");
+ mx140_release_file(common_service.maxwell_core, firm);
+ firm = NULL;
+ }
+#endif
+
+#ifdef CONFIG_SCSC_DEBUG
+ SCSC_TAG_DEBUG(BT_COMMON, "Bluetooth address: %04X:%02X:%06X\n",
+ bhcs->bluetooth_address_nap,
+ bhcs->bluetooth_address_uap,
+ bhcs->bluetooth_address_lap);
+
+ /* Always print Bluetooth Address in Kernel log */
+ printk(KERN_INFO "Bluetooth address: %04X:%02X:%06X\n",
+ bhcs->bluetooth_address_nap,
+ bhcs->bluetooth_address_uap,
+ bhcs->bluetooth_address_lap);
+#endif /* CONFIG_SCSC_DEBUG */
+
+ return err;
+}
+
+/* Start the BT service */
+int slsi_sm_bt_service_start(void)
+{
+ int err = 0;
+ struct BHCS *bhcs;
+
+ ++service_start_count;
+
+ /* Lock the start/stop procedures to handle multiple application
+ * starting the sercice
+ */
+ mutex_lock(&bt_start_mutex);
+
+ if (disable_service) {
+ SCSC_TAG_WARNING(BT_COMMON, "service disabled\n");
+ mutex_unlock(&bt_start_mutex);
+ return -EBUSY;
+ }
+
+ /* Has probe been called */
+ if (bt_recovery_in_progress) {
+ SCSC_TAG_WARNING(BT_COMMON, "recovery in progress\n");
+ mutex_unlock(&bt_start_mutex);
+ return -EFAULT;
+ }
+
+ /* Has probe been called */
+ if (common_service.maxwell_core == NULL) {
+ SCSC_TAG_WARNING(BT_COMMON, "service probe not arrived\n");
+ mutex_unlock(&bt_start_mutex);
+ return -EFAULT;
+ }
+
+ /* Is this the first service to enter */
+ if (atomic_inc_return(&bt_service.service_users) > 1) {
+ SCSC_TAG_WARNING(BT_COMMON, "service already opened\n");
+ mutex_unlock(&bt_start_mutex);
+ return 0;
+ }
+
+ /* Open service - will download FW - will set MBOX0 with Starting
+ * address
+ */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "open Bluetooth service id %d opened %d times\n",
+ SCSC_SERVICE_ID_BT, service_start_count);
+ wake_lock(&bt_service.service_wake_lock);
+ bt_service.service = scsc_mx_service_open(common_service.maxwell_core,
+ SCSC_SERVICE_ID_BT,
+ &mx_bt_client,
+ &err);
+ if (!bt_service.service) {
+ SCSC_TAG_WARNING(BT_COMMON, "service open failed %d\n", err);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Shorter completion timeout if autorecovery is disabled, as it will
+ * never be signalled.
+ */
+ if (mxman_recovery_disabled())
+ recovery_timeout = SLSI_BT_SERVICE_STOP_RECOVERY_DISABLED_TIMEOUT;
+ else
+ recovery_timeout = SLSI_BT_SERVICE_STOP_RECOVERY_TIMEOUT;
+
+ /* Get shared memory region for the configuration structure from
+ * the MIF
+ */
+ SCSC_TAG_DEBUG(BT_COMMON, "allocate mifram regions\n");
+ err = scsc_mx_service_mifram_alloc(bt_service.service,
+ sizeof(struct BHCS),
+ &bt_service.bhcs_ref,
+ BSMHCP_ALIGNMENT);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "mifram alloc failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Get shared memory region for the protocol structure from the MIF */
+ err = scsc_mx_service_mifram_alloc(bt_service.service,
+ sizeof(struct BSMHCP_PROTOCOL),
+ &bt_service.bsmhcp_ref,
+ BSMHCP_ALIGNMENT);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "mifram alloc failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* The A-Box driver must have registered before reaching this point
+ * otherwise there is no audio routing
+ */
+ if (audio_device != NULL) {
+ /* Get shared memory region for the A-Box structure from the MIF.
+ * The allocated memory is aligned to 4kB, but this is going to work
+ * only if the physical start address of the 4MB region is aligned
+ * to 4kB (which maybe will be always the case).
+ */
+
+ /* On 9610, do not unmap previously mapped memory from IOMMU.
+ * It may still be used by A-Box.
+ */
+
+ err = scsc_mx_service_mif_ptr_to_addr(bt_service.service,
+ scsc_mx_service_get_bt_audio_abox(bt_service.service),
+ &bt_service.abox_ref);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "scsc_mx_service_mif_ptr_to_addr failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ /* irrespective of the technical definition of probe - wrt to memory allocation it has been */
+
+ bt_audio.abox_virtual = (struct scsc_bt_audio_abox *)
+ scsc_mx_service_mif_addr_to_ptr(
+ bt_service.service,
+ bt_service.abox_ref);
+
+ memset(bt_audio.abox_virtual, 0, sizeof(struct scsc_bt_audio_abox));
+
+ bt_audio.abox_virtual->magic_value = SCSC_BT_AUDIO_ABOX_MAGIC_VALUE;
+ bt_audio.abox_virtual->version_major = SCSC_BT_AUDIO_ABOX_VERSION_MAJOR;
+ bt_audio.abox_virtual->version_minor = SCSC_BT_AUDIO_ABOX_VERSION_MINOR;
+ bt_audio.abox_virtual->abox_to_bt_streaming_if_0_size = SCSC_BT_AUDIO_ABOX_IF_0_SIZE;
+ bt_audio.abox_virtual->bt_to_abox_streaming_if_0_size = SCSC_BT_AUDIO_ABOX_IF_0_SIZE;
+ bt_audio.abox_virtual->abox_to_bt_streaming_if_1_size = SCSC_BT_AUDIO_ABOX_IF_1_SIZE;
+ bt_audio.abox_virtual->abox_to_bt_streaming_if_1_offset = SCSC_BT_AUDIO_ABOX_IF_0_SIZE;
+ bt_audio.abox_virtual->bt_to_abox_streaming_if_1_size = SCSC_BT_AUDIO_ABOX_IF_1_SIZE;
+ bt_audio.abox_virtual->bt_to_abox_streaming_if_1_offset = SCSC_BT_AUDIO_ABOX_IF_0_SIZE;
+
+ /* Resolve the physical address of the structure */
+ bt_audio.abox_physical = (struct scsc_bt_audio_abox *)scsc_mx_service_mif_addr_to_phys(
+ bt_service.service,
+ bt_service.abox_ref);
+
+
+ bt_audio.dev = bt_service.dev;
+ }
+
+ /* Map the configuration pointer */
+ bhcs = (struct BHCS *) scsc_mx_service_mif_addr_to_ptr(
+ bt_service.service,
+ bt_service.bhcs_ref);
+ if (bhcs == NULL) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "couldn't map kmem to bhcs_ref 0x%08x\n",
+ (u32)bt_service.bhcs_ref);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ SCSC_TAG_INFO(BT_COMMON,
+ "regions (bhcs_ref=0x%08x, bsmhcp_ref=0x%08x, config_ref=0x%08x, abox_ref=0x%08x)\n",
+ bt_service.bhcs_ref,
+ bt_service.bsmhcp_ref,
+ bt_service.config_ref,
+ bt_service.abox_ref);
+ SCSC_TAG_INFO(BT_COMMON, "version=%u\n", BHCS_VERSION);
+
+ err = setup_bhcs(bt_service.service,
+ bhcs,
+ bt_service.bsmhcp_ref,
+ sizeof(struct BSMHCP_PROTOCOL),
+ &bt_service.config_ref,
+ &bt_service.bhcs_ref);
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Save the binary BT config ref and register for
+ * log collector to collect the hcf file
+ */
+ if (bhcs->configuration_length > 0) {
+ bt_service.hcf_collection.hcf =
+ scsc_mx_service_mif_addr_to_ptr(bt_service.service,
+ bt_service.config_ref);
+ bt_service.hcf_collection.hcf_size = bhcs->configuration_length;
+ bt_collect_hcf_client.prv = &bt_service.hcf_collection;
+ scsc_log_collector_register_client(&bt_collect_hcf_client);
+ }
+#endif
+
+ if (err == -EINVAL)
+ goto exit;
+
+ /* Initialise the shared-memory interface */
+ err = scsc_bt_shm_init();
+ if (err) {
+ SCSC_TAG_ERR(BT_COMMON, "scsc_bt_shm_init err %d\n", err);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ bt_service.bsmhcp_protocol->header.info_ap_to_bg_int_src =
+ scsc_service_mifintrbit_alloc_fromhost(bt_service.service,
+ SCSC_MIFINTR_TARGET_R4);
+ bt_service.bsmhcp_protocol->header.info_bg_to_ap_int_src =
+ scsc_service_mifintrbit_register_tohost(bt_service.service,
+ scsc_bt_shm_irq_handler, NULL);
+ bt_service.bsmhcp_protocol->header.mxlog_filter = firmware_mxlog_filter;
+ bt_service.bsmhcp_protocol->header.firmware_control = firmware_control;
+ bt_service.bsmhcp_protocol->header.abox_offset = bt_service.abox_ref;
+ bt_service.bsmhcp_protocol->header.abox_length = sizeof(struct scsc_bt_audio_abox);
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "firmware_control=0x%08x, firmware_control_reset=%u\n",
+ firmware_control, firmware_control_reset);
+
+ if (firmware_control_reset)
+ firmware_control = 0;
+
+ /* Start service last - after setting up shared memory resources */
+ SCSC_TAG_DEBUG(BT_COMMON, "starting Bluetooth service\n");
+ err = scsc_mx_service_start(bt_service.service, bt_service.bhcs_ref);
+ if (err) {
+ SCSC_TAG_ERR(BT_COMMON, "scsc_mx_service_start err %d\n", err);
+ err = -EINVAL;
+ } else {
+ SCSC_TAG_DEBUG(BT_COMMON, "Bluetooth service running\n");
+ slsi_kic_system_event(
+ slsi_kic_system_event_category_initialisation,
+ slsi_kic_system_events_bt_on, 0);
+
+ mutex_lock(&bt_audio_mutex);
+ if (audio_device && !audio_device_probed) {
+ err = slsi_bt_audio_probe();
+
+ audio_device_probed = true;
+ }
+ mutex_unlock(&bt_audio_mutex);
+ }
+
+ if (bt_service.bsmhcp_protocol->header.firmware_features &
+ BSMHCP_FEATURE_M4_INTERRUPTS)
+ SCSC_TAG_DEBUG(BT_COMMON, "features enabled: M4_INTERRUPTS\n");
+
+exit:
+ if (err) {
+ if (slsi_sm_bt_service_cleanup(false) == 0)
+ atomic_dec(&bt_service.service_users);
+ }
+
+ wake_unlock(&bt_service.service_wake_lock);
+ mutex_unlock(&bt_start_mutex);
+ return err;
+}
+
+#ifdef CONFIG_SCSC_ANT
+/* Start the ANT service */
+int slsi_sm_ant_service_start(void)
+{
+ int err = 0;
+ struct BHCS *bhcs;
+
+ ++ant_service_start_count;
+
+ /* Lock the start/stop procedures to handle multiple application
+ * starting the sercice
+ */
+ mutex_lock(&ant_start_mutex);
+
+ if (disable_service) {
+ SCSC_TAG_WARNING(BT_COMMON, "service disabled\n");
+ mutex_unlock(&ant_start_mutex);
+ return -EBUSY;
+ }
+
+ /* Has probe been called */
+ if (common_service.maxwell_core == NULL) {
+ SCSC_TAG_WARNING(BT_COMMON, "service probe not arrived\n");
+ mutex_unlock(&ant_start_mutex);
+ return -EFAULT;
+ }
+
+ /* Is this the first service to enter */
+ if (atomic_inc_return(&ant_service.service_users) > 1) {
+ SCSC_TAG_WARNING(BT_COMMON, "service already opened\n");
+ mutex_unlock(&ant_start_mutex);
+ return 0;
+ }
+
+ /* Open service - will download FW - will set MBOX0 with Starting
+ * address
+ */
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "open ANT service id %d opened %d times\n",
+ SCSC_SERVICE_ID_ANT, ant_service_start_count);
+
+ wake_lock(&ant_service.service_wake_lock);
+ ant_service.service = scsc_mx_service_open(common_service.maxwell_core,
+ SCSC_SERVICE_ID_ANT,
+ &mx_ant_client,
+ &err);
+ if (!ant_service.service) {
+ SCSC_TAG_WARNING(BT_COMMON, "ant service open failed %d\n", err);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Shorter completion timeout if autorecovery is disabled, as it will
+ * never be signalled.
+ */
+ if (mxman_recovery_disabled())
+ recovery_timeout = SLSI_BT_SERVICE_STOP_RECOVERY_DISABLED_TIMEOUT;
+ else
+ recovery_timeout = SLSI_BT_SERVICE_STOP_RECOVERY_TIMEOUT;
+
+
+ /* Get shared memory region for the configuration structure from
+ * the MIF
+ */
+ SCSC_TAG_DEBUG(BT_COMMON, "allocate mifram regions\n");
+ err = scsc_mx_service_mifram_alloc(ant_service.service,
+ sizeof(struct BHCS),
+ &ant_service.bhcs_ref,
+ BSMHCP_ALIGNMENT);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "mifram alloc failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Get shared memory region for the protocol structure from the MIF */
+ err = scsc_mx_service_mifram_alloc(ant_service.service,
+ sizeof(struct ASMHCP_PROTOCOL),
+ &ant_service.asmhcp_ref,
+ BSMHCP_ALIGNMENT);
+ if (err) {
+ SCSC_TAG_WARNING(BT_COMMON, "mifram alloc failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Map the configuration pointer */
+ bhcs = (struct BHCS *) scsc_mx_service_mif_addr_to_ptr(
+ ant_service.service,
+ ant_service.bhcs_ref);
+ if (bhcs == NULL) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "couldn't map kmem to bhcs_ref 0x%08x\n",
+ (u32)ant_service.bhcs_ref);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ SCSC_TAG_INFO(BT_COMMON,
+ "regions (bhcs_ref=0x%08x, bsmhcp_ref=0x%08x, config_ref=0x%08x)\n",
+ ant_service.bhcs_ref,
+ ant_service.asmhcp_ref,
+ ant_service.config_ref);
+ SCSC_TAG_INFO(BT_COMMON, "version=%u\n", BHCS_VERSION);
+
+ err = setup_bhcs(ant_service.service,
+ bhcs,
+ ant_service.asmhcp_ref,
+ sizeof(struct ASMHCP_PROTOCOL),
+ &ant_service.config_ref,
+ &ant_service.bhcs_ref);
+
+ if (err == -EINVAL)
+ goto exit;
+
+ /* Initialise the shared-memory interface */
+ err = scsc_ant_shm_init();
+ if (err) {
+ SCSC_TAG_ERR(BT_COMMON, "scsc_ant_shm_init err %d\n", err);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ ant_service.asmhcp_protocol->header.mxlog_filter = firmware_mxlog_filter;
+ ant_service.asmhcp_protocol->header.firmware_control = firmware_control;
+
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "firmware_control=0x%08x, firmware_control_reset=%u\n",
+ firmware_control, firmware_control_reset);
+
+ if (firmware_control_reset)
+ firmware_control = 0;
+
+ /* Start service last - after setting up shared memory resources */
+ SCSC_TAG_DEBUG(BT_COMMON, "starting ANT service\n");
+ err = scsc_mx_service_start(ant_service.service, ant_service.bhcs_ref);
+ if (err) {
+ SCSC_TAG_ERR(BT_COMMON, "scsc_mx_service_start err %d\n", err);
+ err = -EINVAL;
+ } else {
+ SCSC_TAG_DEBUG(BT_COMMON, "Ant service running\n");
+ slsi_kic_system_event(
+ slsi_kic_system_event_category_initialisation,
+ slsi_kic_system_events_ant_on, 0);
+ }
+
+exit:
+ if (err) {
+ if (slsi_sm_ant_service_cleanup(false) == 0)
+ atomic_dec(&ant_service.service_users);
+ }
+
+ wake_unlock(&ant_service.service_wake_lock);
+ mutex_unlock(&ant_start_mutex);
+ return err;
+}
+#endif
+
+/* Stop the BT service */
+static int slsi_sm_bt_service_stop(void)
+{
+ SCSC_TAG_INFO(BT_COMMON, "bt service users %u\n", atomic_read(&bt_service.service_users));
+
+ if (1 < atomic_read(&bt_service.service_users)) {
+ atomic_dec(&bt_service.service_users);
+ } else if (1 == atomic_read(&bt_service.service_users)) {
+ if (slsi_sm_bt_service_cleanup(true) == 0)
+ atomic_dec(&bt_service.service_users);
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_ANT
+/* Stop the ANT service */
+static int slsi_sm_ant_service_stop(void)
+{
+ SCSC_TAG_INFO(BT_COMMON, "ant service users %u\n", atomic_read(&ant_service.service_users));
+
+ if (atomic_read(&ant_service.service_users) > 1) {
+ atomic_dec(&ant_service.service_users);
+ } else if (atomic_read(&ant_service.service_users) == 1) {
+ if (slsi_sm_ant_service_cleanup(true) == 0)
+ atomic_dec(&ant_service.service_users);
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+#endif
+
+static int scsc_bt_h4_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ SCSC_TAG_INFO(BT_COMMON, "(h4_users=%u)\n", bt_service.h4_users ? 1 : 0);
+
+ if (!bt_service.h4_users) {
+ ret = slsi_sm_bt_service_start();
+ if (0 == ret)
+ bt_service.h4_users = true;
+ } else {
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static int scsc_bt_h4_release(struct inode *inode, struct file *file)
+{
+ SCSC_TAG_INFO(BT_COMMON, "\n");
+
+ mutex_lock(&bt_start_mutex);
+ wake_lock(&bt_service.service_wake_lock);
+ if (!bt_recovery_in_progress) {
+ if (slsi_sm_bt_service_stop() == -EIO)
+ goto recovery;
+
+ /* Clear all control structures */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = 0;
+ bt_service.read_index = 0;
+ bt_service.h4_write_offset = 0;
+
+ bt_service.h4_users = false;
+
+ /* The recovery flag can be set in case of crossing release and
+ * recovery signaling. It's safe to check the flag here since
+ * the bt_start_mutex guarantees that the remove/probe callbacks
+ * will be called after the mutex is released. Jump to the
+ * normal recovery path.
+ */
+ if (bt_recovery_in_progress)
+ goto recovery;
+
+ wake_unlock(&bt_service.service_wake_lock);
+ mutex_unlock(&bt_start_mutex);
+ } else {
+ int ret;
+recovery:
+ complete_all(&bt_service.recovery_release_complete);
+ wake_unlock(&bt_service.service_wake_lock);
+ mutex_unlock(&bt_start_mutex);
+
+ ret = wait_for_completion_timeout(&bt_service.recovery_probe_complete,
+ msecs_to_jiffies(recovery_timeout));
+ if (ret == 0)
+ SCSC_TAG_INFO(BT_COMMON, "recovery_probe_complete timeout\n");
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_ANT
+static int scsc_ant_release(struct inode *inode, struct file *file)
+{
+ SCSC_TAG_INFO(BT_COMMON, "\n");
+
+ mutex_lock(&ant_start_mutex);
+ wake_lock(&ant_service.service_wake_lock);
+ if (!ant_recovery_in_progress) {
+ if (slsi_sm_ant_service_stop() == -EIO)
+ goto recovery;
+
+ /* Clear all control structures */
+ ant_service.read_offset = 0;
+ ant_service.read_operation = 0;
+ ant_service.read_index = 0;
+ ant_service.ant_write_offset = 0;
+
+ ant_service.ant_users = false;
+
+ /* The recovery flag can be set in case of crossing release and
+ * recovery signaling. It's safe to check the flag here since
+ * the bt_start_mutex guarantees that the remove/probe callbacks
+ * will be called after the mutex is released. Jump to the
+ * normal recovery path.
+ */
+ if (ant_recovery_in_progress)
+ goto recovery;
+
+ wake_unlock(&ant_service.service_wake_lock);
+ mutex_unlock(&ant_start_mutex);
+ } else {
+ int ret;
+recovery:
+ complete_all(&ant_service.recovery_release_complete);
+ wake_unlock(&ant_service.service_wake_lock);
+ mutex_unlock(&ant_start_mutex);
+
+ ret = wait_for_completion_timeout(&ant_service.recovery_probe_complete,
+ msecs_to_jiffies(recovery_timeout));
+ if (ret == 0)
+ SCSC_TAG_INFO(BT_COMMON, "recovery_probe_complete timeout\n");
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+static int scsc_ant_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ SCSC_TAG_INFO(BT_COMMON, "(ant_users=%u)\n", ant_service.ant_users ? 1 : 0);
+
+ if (ant_recovery_in_progress) {
+ SCSC_TAG_WARNING(BT_COMMON, "recovery in progress\n");
+ wait_event_interruptible_timeout(ant_recovery_complete_queue,
+ ant_recovery_in_progress == 0,
+ SCSC_ANT_MAX_TIMEOUT);
+ if (ant_recovery_in_progress) {
+ SCSC_TAG_WARNING(BT_COMMON, "recovery timeout, aborting\n");
+ return -EFAULT;
+ }
+ }
+ if (!ant_service.ant_users) {
+ ret = slsi_sm_ant_service_start();
+ if (ret == 0)
+ ant_service.ant_users = true;
+ } else {
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+#endif
+
+static long scsc_default_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ UNUSED(file);
+ UNUSED(cmd);
+ UNUSED(arg);
+
+ switch (cmd) {
+ case TCGETS:
+ SCSC_TAG_DEBUG(BT_COMMON, "TCGETS (arg=%lu)\n", arg);
+ break;
+ case TCSETS:
+ SCSC_TAG_DEBUG(BT_COMMON, "TCSETS (arg=%lu)\n", arg);
+ break;
+ default:
+ SCSC_TAG_DEBUG(BT_COMMON,
+ "trapped ioctl in virtual tty device, cmd %d arg %lu\n",
+ cmd, arg);
+ break;
+ }
+
+ return 0;
+}
+
+static int scsc_bt_trigger_recovery(void *priv,
+ enum slsi_kic_test_recovery_type type)
+{
+ int err = 0;
+
+ SCSC_TAG_INFO(BT_COMMON, "forcing panic\n");
+
+ mutex_lock(&bt_start_mutex);
+
+ if (0 < atomic_read(&bt_service.service_users) &&
+ bt_service.bsmhcp_protocol) {
+ SCSC_TAG_INFO(BT_COMMON, "trashing magic value\n");
+
+ if (slsi_kic_test_recovery_type_service_stop_panic == type)
+ bt_service.bsmhcp_protocol->header.firmware_control =
+ BSMHCP_CONTROL_STOP_PANIC;
+ else if (slsi_kic_test_recovery_type_service_start_panic ==
+ type)
+ firmware_control = BSMHCP_CONTROL_START_PANIC;
+ else
+ bt_service.bsmhcp_protocol->header.magic_value = 0;
+
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ } else {
+ if (slsi_kic_test_recovery_type_service_stop_panic == type)
+ firmware_control = BSMHCP_CONTROL_STOP_PANIC;
+ else if (slsi_kic_test_recovery_type_service_start_panic ==
+ type)
+ firmware_control = BSMHCP_CONTROL_START_PANIC;
+ else
+ err = -EFAULT;
+ }
+
+ mutex_unlock(&bt_start_mutex);
+
+ return err;
+}
+
+#ifdef CONFIG_SCSC_ANT
+static int scsc_ant_trigger_recovery(void *priv,
+ enum slsi_kic_test_recovery_type type)
+{
+ int err = 0;
+
+ SCSC_TAG_INFO(BT_COMMON, "forcing panic\n");
+
+ mutex_lock(&ant_start_mutex);
+
+ if (atomic_read(&ant_service.service_users) > 0 &&
+ ant_service.asmhcp_protocol) {
+ SCSC_TAG_INFO(BT_COMMON, "trashing magic value\n");
+
+ if (slsi_kic_test_recovery_type_service_stop_panic == type)
+ ant_service.asmhcp_protocol->header.firmware_control =
+ BSMHCP_CONTROL_STOP_PANIC;
+ else if (slsi_kic_test_recovery_type_service_start_panic ==
+ type)
+ firmware_control = BSMHCP_CONTROL_START_PANIC;
+ else
+ ant_service.asmhcp_protocol->header.magic_value = 0;
+
+ scsc_service_mifintrbit_bit_set(ant_service.service,
+ ant_service.asmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ } else {
+ if (slsi_kic_test_recovery_type_service_stop_panic == type)
+ firmware_control = BSMHCP_CONTROL_STOP_PANIC;
+ else if (slsi_kic_test_recovery_type_service_start_panic ==
+ type)
+ firmware_control = BSMHCP_CONTROL_START_PANIC;
+ else
+ err = -EFAULT;
+ }
+
+ mutex_unlock(&ant_start_mutex);
+
+ return err;
+}
+#endif
+
+static const struct file_operations scsc_bt_shm_fops = {
+ .owner = THIS_MODULE,
+ .open = scsc_bt_h4_open,
+ .release = scsc_bt_h4_release,
+ .read = scsc_bt_shm_h4_read,
+ .write = scsc_bt_shm_h4_write,
+ .poll = scsc_bt_shm_h4_poll,
+ .unlocked_ioctl = scsc_default_ioctl,
+};
+
+#ifdef CONFIG_SCSC_ANT
+static const struct file_operations scsc_ant_shm_fops = {
+ .owner = THIS_MODULE,
+ .open = scsc_ant_open,
+ .release = scsc_ant_release,
+ .read = scsc_shm_ant_read,
+ .write = scsc_shm_ant_write,
+ .poll = scsc_shm_ant_poll,
+};
+#endif
+
+static struct slsi_kic_bt_ops scsc_bt_kic_ops = {
+ .trigger_recovery = scsc_bt_trigger_recovery
+};
+
+#ifdef CONFIG_SCSC_ANT
+static struct slsi_kic_ant_ops scsc_ant_kic_ops = {
+ .trigger_recovery = scsc_ant_trigger_recovery
+};
+#endif
+
+/* A new MX instance is available */
+void slsi_bt_service_probe(struct scsc_mx_module_client *module_client,
+ struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ /* Note: mx identifies the instance */
+ SCSC_TAG_INFO(BT_COMMON,
+ "BT service probe (%s %p)\n", module_client->name, mx);
+
+ mutex_lock(&bt_start_mutex);
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !bt_recovery_in_progress) {
+ SCSC_TAG_INFO(BT_COMMON,
+ "BT service probe recovery, but no recovery in progress\n");
+ goto done;
+ }
+
+ bt_service.dev = scsc_mx_get_device(mx);
+ common_service.maxwell_core = mx;
+
+ get_device(bt_service.dev);
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && bt_recovery_in_progress) {
+ complete_all(&bt_service.recovery_probe_complete);
+ bt_recovery_in_progress = 0;
+ }
+
+ slsi_bt_notify_probe(bt_service.dev,
+ &scsc_bt_shm_fops,
+ &bt_service.error_count,
+ &bt_service.read_wait);
+
+done:
+ mutex_unlock(&bt_start_mutex);
+}
+
+/* The MX instance is now unavailable */
+static void slsi_bt_service_remove(struct scsc_mx_module_client *module_client,
+ struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ SCSC_TAG_INFO(BT_COMMON,
+ "BT service remove (%s %p)\n", module_client->name, mx);
+
+ mutex_lock(&bt_start_mutex);
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !bt_recovery_in_progress) {
+ SCSC_TAG_INFO(BT_COMMON,
+ "BT service remove recovery, but no recovery in progress\n");
+ goto done;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && bt_recovery_in_progress) {
+ mutex_unlock(&bt_start_mutex);
+
+ /* Wait forever for recovery_release_complete, as it will
+ * arrive even if autorecovery is disabled.
+ */
+ SCSC_TAG_INFO(BT_COMMON, "wait for recovery_release_complete\n");
+ wait_for_completion(&bt_service.recovery_release_complete);
+ reinit_completion(&bt_service.recovery_release_complete);
+
+ mutex_lock(&bt_start_mutex);
+ if (slsi_sm_bt_service_stop() == -EIO)
+ SCSC_TAG_INFO(BT_COMMON, "Service stop or close failed during recovery.\n");
+
+ bt_service.h4_users = false;
+
+ /* Clear all control structures */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = 0;
+ bt_service.read_index = 0;
+ bt_service.h4_write_offset = 0;
+ }
+
+ slsi_bt_notify_remove();
+ put_device(bt_service.dev);
+ common_service.maxwell_core = NULL;
+
+done:
+ mutex_unlock(&bt_start_mutex);
+
+ SCSC_TAG_INFO(BT_COMMON,
+ "BT service remove complete (%s %p)\n", module_client->name, mx);
+}
+
+/* BT service driver registration interface */
+static struct scsc_mx_module_client bt_driver = {
+ .name = "BT driver",
+ .probe = slsi_bt_service_probe,
+ .remove = slsi_bt_service_remove,
+};
+
+#ifdef CONFIG_SCSC_ANT
+/* A new MX instance is available */
+void slsi_ant_service_probe(struct scsc_mx_module_client *module_client,
+ struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ /* Note: mx identifies the instance */
+ SCSC_TAG_INFO(BT_COMMON,
+ "ANT service probe (%s %p)\n", module_client->name, mx);
+
+ mutex_lock(&ant_start_mutex);
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !ant_recovery_in_progress) {
+ SCSC_TAG_INFO(BT_COMMON,
+ "ANT service probe recovery, but no recovery in progress\n");
+ goto done;
+ }
+
+ ant_service.dev = scsc_mx_get_device(mx);
+ common_service.maxwell_core = mx;
+
+ get_device(ant_service.dev);
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && ant_recovery_in_progress) {
+ complete_all(&ant_service.recovery_probe_complete);
+ ant_recovery_in_progress = 0;
+ }
+
+done:
+ mutex_unlock(&ant_start_mutex);
+ wake_up_interruptible(&ant_recovery_complete_queue);
+}
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+/* The MX instance is now unavailable */
+static void slsi_ant_service_remove(struct scsc_mx_module_client *module_client,
+ struct scsc_mx *mx,
+ enum scsc_module_client_reason reason)
+{
+ SCSC_TAG_INFO(BT_COMMON,
+ "ANT service remove (%s %p)\n", module_client->name, mx);
+
+ mutex_lock(&ant_start_mutex);
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !ant_recovery_in_progress) {
+ SCSC_TAG_INFO(BT_COMMON,
+ "ANT service remove recovery, but no recovery in progress\n");
+ goto done;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && ant_recovery_in_progress) {
+ int ret;
+
+ mutex_unlock(&ant_start_mutex);
+
+ /* Wait full duration for recovery_release_complete, as it will
+ * arrive even if autorecovery is disabled.
+ */
+ ret = wait_for_completion_timeout(&ant_service.recovery_release_complete,
+ msecs_to_jiffies(SLSI_BT_SERVICE_STOP_RECOVERY_TIMEOUT));
+ reinit_completion(&ant_service.recovery_release_complete);
+ if (ret == 0)
+ SCSC_TAG_INFO(BT_COMMON, "recovery_release_complete timeout\n");
+
+ mutex_lock(&ant_start_mutex);
+ if (slsi_sm_ant_service_stop() == -EIO)
+ SCSC_TAG_INFO(BT_COMMON, "Service stop or close failed during recovery.\n");
+
+ ant_service.ant_users = false;
+
+ /* Clear all control structures */
+ ant_service.read_offset = 0;
+ ant_service.read_operation = 0;
+ ant_service.read_index = 0;
+ ant_service.ant_write_offset = 0;
+ }
+
+ put_device(ant_service.dev);
+ common_service.maxwell_core = NULL;
+
+done:
+ mutex_unlock(&ant_start_mutex);
+
+ SCSC_TAG_INFO(BT_COMMON,
+ "ANT service remove complete (%s %p)\n", module_client->name, mx);
+}
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+/* ANT service driver registration interface */
+static struct scsc_mx_module_client ant_driver = {
+ .name = "ANT driver",
+ .probe = slsi_ant_service_probe,
+ .remove = slsi_ant_service_remove,
+};
+#endif
+
+static void slsi_bt_service_proc_show_firmware(struct seq_file *m)
+{
+ struct BSMHCP_FW_INFO *info =
+ &bt_service.bsmhcp_protocol->information;
+ int res;
+ u32 index;
+ u32 user_defined_count = info->user_defined_count;
+
+ bt_info_trigger++;
+
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.info_ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+
+ res = wait_event_interruptible_timeout(bt_service.info_wait,
+ bt_info_trigger == bt_info_interrupt,
+ 2*HZ);
+
+ seq_printf(m, " r4_from_ap_interrupt_count = %u\n",
+ info->r4_from_ap_interrupt_count);
+ seq_printf(m, " m4_from_ap_interrupt_count = %u\n",
+ info->m4_from_ap_interrupt_count);
+ seq_printf(m, " r4_to_ap_interrupt_count = %u\n",
+ info->r4_to_ap_interrupt_count);
+ seq_printf(m, " m4_to_ap_interrupt_count = %u\n\n",
+ info->m4_to_ap_interrupt_count);
+ seq_printf(m, " bt_deep_sleep_time_total = %u\n",
+ info->bt_deep_sleep_time_total);
+ seq_printf(m, " bt_deep_sleep_wakeup_duration = %u\n\n",
+ info->bt_deep_sleep_wakeup_duration);
+ seq_printf(m, " sched_n_messages = %u\n\n",
+ info->sched_n_messages);
+ seq_printf(m, " user_defined_count = %u\n\n",
+ info->user_defined_count);
+
+ if (user_defined_count > BSMHCP_FW_INFO_USER_DEFINED_COUNT)
+ user_defined_count = BSMHCP_FW_INFO_USER_DEFINED_COUNT;
+
+ for (index = 0; index < user_defined_count; index++)
+ seq_printf(m, " user%02u = 0x%08x (%u)\n",
+ index, info->user_defined[index], info->user_defined[index]);
+
+ if (user_defined_count)
+ seq_puts(m, "\n");
+
+ seq_printf(m, " bt_info_trigger = %u\n",
+ bt_info_trigger);
+ seq_printf(m, " bt_info_interrupt = %u\n\n",
+ bt_info_interrupt);
+ seq_printf(m, " result = %d\n", res);
+}
+
+static int slsi_bt_service_proc_show(struct seq_file *m, void *v)
+{
+ char allocated_text[BSMHCP_DATA_BUFFER_TX_ACL_SIZE + 1];
+ char processed_text[BSMHCP_TRANSFER_RING_EVT_SIZE + 1];
+ size_t index;
+ struct scsc_bt_avdtp_detect_hci_connection *cur = bt_service.avdtp_detect.connections;
+
+ seq_puts(m, "Driver statistics:\n");
+ seq_printf(m, " write_wake_lock_count = %zu\n",
+ bt_service.write_wake_lock_count);
+ seq_printf(m, " write_wake_unlock_count = %zu\n\n",
+ bt_service.write_wake_unlock_count);
+
+ seq_printf(m, " mailbox_hci_evt_read = %u\n",
+ bt_service.mailbox_hci_evt_read);
+ seq_printf(m, " mailbox_hci_evt_write = %u\n",
+ bt_service.mailbox_hci_evt_write);
+ seq_printf(m, " mailbox_acl_rx_read = %u\n",
+ bt_service.mailbox_acl_rx_read);
+ seq_printf(m, " mailbox_acl_rx_write = %u\n",
+ bt_service.mailbox_acl_rx_write);
+ seq_printf(m, " mailbox_acl_free_read = %u\n",
+ bt_service.mailbox_acl_free_read);
+ seq_printf(m, " mailbox_acl_free_read_scan = %u\n",
+ bt_service.mailbox_acl_free_read_scan);
+ seq_printf(m, " mailbox_acl_free_write = %u\n",
+ bt_service.mailbox_acl_free_write);
+
+ seq_printf(m, " hci_event_paused = %u\n",
+ bt_service.hci_event_paused);
+ seq_printf(m, " acldata_paused = %u\n\n",
+ bt_service.acldata_paused);
+
+ seq_printf(m, " interrupt_count = %zu\n",
+ bt_service.interrupt_count);
+ seq_printf(m, " interrupt_read_count = %zu\n",
+ bt_service.interrupt_read_count);
+ seq_printf(m, " interrupt_write_count = %zu\n",
+ bt_service.interrupt_write_count);
+
+ for (index = 0; index < BSMHCP_DATA_BUFFER_TX_ACL_SIZE; index++)
+ allocated_text[index] = bt_service.allocated[index] ? '1' : '0';
+ allocated_text[BSMHCP_DATA_BUFFER_TX_ACL_SIZE] = 0;
+
+ for (index = 0; index < BSMHCP_TRANSFER_RING_EVT_SIZE; index++)
+ processed_text[index] = bt_service.processed[index] ? '1' : '0';
+ processed_text[BSMHCP_DATA_BUFFER_TX_ACL_SIZE] = 0;
+
+ seq_printf(m, " allocated_count = %u\n",
+ bt_service.allocated_count);
+ seq_printf(m, " freed_count = %u\n",
+ bt_service.freed_count);
+ seq_printf(m, " allocated = %s\n",
+ allocated_text);
+ seq_printf(m, " processed = %s\n\n",
+ processed_text);
+
+ while (cur) {
+ seq_printf(m, " avdtp_hci_connection_handle = %u\n\n",
+ cur->hci_connection_handle);
+ seq_printf(m, " avdtp_signaling_src_cid = %u\n",
+ cur->signal.src_cid);
+ seq_printf(m, " avdtp_signaling_dst_cid = %u\n",
+ cur->signal.dst_cid);
+ seq_printf(m, " avdtp_streaming_src_cid = %u\n",
+ cur->stream.src_cid);
+ seq_printf(m, " avdtp_streaming_dst_cid = %u\n",
+ cur->stream.dst_cid);
+ cur = cur->next;
+ }
+ seq_puts(m, "Firmware statistics:\n");
+
+ mutex_lock(&bt_start_mutex);
+
+ if (NULL != bt_service.service) {
+ if (bt_service.bsmhcp_protocol->header.firmware_features &
+ BSMHCP_FEATURE_FW_INFORMATION) {
+ slsi_bt_service_proc_show_firmware(m);
+ } else
+ seq_puts(m,
+ " Firmware does not provide this information\n");
+ } else
+ seq_puts(m,
+ " Error: bluetooth service is currently disabled\n");
+
+ mutex_unlock(&bt_start_mutex);
+
+ return 0;
+}
+
+static int slsi_bt_service_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, slsi_bt_service_proc_show, NULL);
+}
+
+static const struct file_operations scsc_bt_procfs_fops = {
+ .owner = THIS_MODULE,
+ .open = slsi_bt_service_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int scsc_mxlog_filter_set_param_cb(const char *buffer,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u32 value;
+
+ ret = kstrtou32(buffer, 0, &value);
+ if (!ret) {
+ firmware_mxlog_filter = value;
+
+ mutex_lock(&bt_start_mutex);
+ if (bt_service.service) {
+ bt_service.bsmhcp_protocol->header.mxlog_filter =
+ firmware_mxlog_filter;
+
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ }
+ mutex_unlock(&bt_start_mutex);
+ }
+
+ return ret;
+}
+
+static int scsc_mxlog_filter_get_param_cb(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "filter=0x%08x\n", firmware_mxlog_filter);
+}
+
+static struct kernel_param_ops scsc_mxlog_filter_ops = {
+ .set = scsc_mxlog_filter_set_param_cb,
+ .get = scsc_mxlog_filter_get_param_cb,
+};
+
+module_param_cb(mxlog_filter, &scsc_mxlog_filter_ops, NULL, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mxlog_filter,
+ "Set the filter for MX log in the Bluetooth firmware");
+
+static int scsc_force_crash_set_param_cb(const char *buffer,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u32 value;
+
+ ret = kstrtou32(buffer, 0, &value);
+ if (!ret && value == 0xDEADDEAD) {
+ mutex_lock(&bt_start_mutex);
+ if (bt_service.service) {
+ atomic_inc(&bt_service.error_count);
+ wake_up(&bt_service.read_wait);
+ }
+ mutex_unlock(&bt_start_mutex);
+ }
+
+ return ret;
+}
+
+static struct kernel_param_ops scsc_force_crash_ops = {
+ .set = scsc_force_crash_set_param_cb,
+ .get = NULL,
+};
+
+module_param_cb(force_crash, &scsc_force_crash_ops, NULL, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(force_crash,
+ "Forces a crash of the Bluetooth driver");
+
+
+phys_addr_t scsc_bt_audio_get_paddr_buf(bool tx)
+{
+ if (bt_audio.abox_physical) {
+ struct scsc_bt_audio_abox *abox_physical;
+ void *ptr;
+
+ abox_physical = bt_audio.abox_physical;
+ if (tx)
+ ptr = abox_physical->bt_to_abox_streaming_if_data;
+ else
+ ptr = abox_physical->abox_to_bt_streaming_if_data;
+
+ return (phys_addr_t)ptr;
+ } else
+ return 0;
+}
+EXPORT_SYMBOL(scsc_bt_audio_get_paddr_buf);
+
+unsigned int scsc_bt_audio_get_rate(int id)
+{
+ if (!bt_audio.abox_virtual)
+ return 0;
+
+ switch (id) {
+ case 0:
+ return bt_audio.abox_virtual->streaming_if_0_sample_rate;
+ case 1:
+ return bt_audio.abox_virtual->streaming_if_1_sample_rate;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(scsc_bt_audio_get_rate);
+
+int scsc_bt_audio_register(struct device *dev,
+ int (*dev_iommu_map)(struct device *, phys_addr_t, size_t),
+ void (*dev_iommu_unmap)(struct device *, size_t))
+{
+ int ret = 0;
+
+ mutex_lock(&bt_audio_mutex);
+
+ if (audio_device != NULL || dev == NULL ||
+ dev_iommu_map == NULL || dev_iommu_unmap == NULL) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "failed audio_device %p dev %p dev_iommu_map %p dev_iommu_unmap %p\n",
+ audio_device, dev, dev_iommu_map, dev_iommu_unmap);
+ ret = -EINVAL;
+ } else {
+ audio_device = dev;
+ bt_audio.dev_iommu_map = dev_iommu_map;
+ bt_audio.dev_iommu_unmap = dev_iommu_unmap;
+ }
+
+ mutex_unlock(&bt_audio_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsc_bt_audio_register);
+
+int scsc_bt_audio_unregister(struct device *dev)
+{
+ int ret = 0;
+
+ mutex_lock(&bt_audio_mutex);
+
+ if (audio_device != NULL && dev == audio_device) {
+
+ /* Unmap ringbuffer IOMMU now that A-Box is finished with it,
+ * but for safety don't allow this if BT is running.
+ *
+ * In practice, A-Box driver only unregisters if platform
+ * driver unloads at shutdown, so it would be safe to leave the
+ * memmory mapped.
+ */
+ if (atomic_read(&bt_service.service_users) == 0 && audio_device_probed)
+ slsi_bt_audio_remove();
+
+ bt_audio.dev = NULL;
+ bt_audio.abox_virtual = NULL;
+ bt_audio.abox_physical = NULL;
+ bt_audio.dev_iommu_map = NULL;
+ bt_audio.dev_iommu_unmap = NULL;
+ audio_device = NULL;
+ audio_device_probed = false;
+ } else
+ ret = -EINVAL;
+
+ mutex_unlock(&bt_audio_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsc_bt_audio_unregister);
+
+/******* Module entry/exit point ********/
+static int __init scsc_bt_module_init(void)
+{
+ int ret;
+ struct proc_dir_entry *procfs_dir;
+
+ SCSC_TAG_INFO(BT_COMMON, "%s %s (C) %s\n",
+ SCSC_MODDESC, SCSC_MODVERSION, SCSC_MODAUTH);
+
+ memset(&bt_service, 0, sizeof(bt_service));
+#ifdef CONFIG_SCSC_ANT
+ memset(&ant_service, 0, sizeof(ant_service));
+#endif
+
+ init_waitqueue_head(&bt_service.read_wait);
+ init_waitqueue_head(&bt_service.info_wait);
+
+ wake_lock_init(&bt_service.read_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "bt_read_wake_lock");
+ wake_lock_init(&bt_service.write_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "bt_write_wake_lock");
+ wake_lock_init(&bt_service.service_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "bt_service_wake_lock");
+
+#ifdef CONFIG_SCSC_ANT
+ init_waitqueue_head(&ant_service.read_wait);
+
+ wake_lock_init(&ant_service.read_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "ant_read_wake_lock");
+ wake_lock_init(&ant_service.write_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "ant_write_wake_lock");
+ wake_lock_init(&ant_service.service_wake_lock,
+ WAKE_LOCK_SUSPEND,
+ "ant_service_wake_lock");
+#endif
+
+ procfs_dir = proc_mkdir("driver/scsc_bt", NULL);
+ if (NULL != procfs_dir) {
+ proc_create_data("stats", S_IRUSR | S_IRGRP,
+ procfs_dir, &scsc_bt_procfs_fops, NULL);
+ }
+
+ ret = alloc_chrdev_region(&bt_service.device, 0,
+ SCSC_TTY_MINORS, "scsc_char");
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON, "error alloc_chrdev_region %d\n", ret);
+ return ret;
+ }
+
+ common_service.class = class_create(THIS_MODULE, "scsc_char");
+ if (IS_ERR(common_service.class)) {
+ ret = PTR_ERR(common_service.class);
+ goto error;
+ }
+
+ cdev_init(&bt_service.h4_cdev, &scsc_bt_shm_fops);
+ ret = cdev_add(&bt_service.h4_cdev,
+ MKDEV(MAJOR(bt_service.device), MINOR(0)), 1);
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "cdev_add failed for device %s\n",
+ SCSC_H4_DEVICE_NAME);
+ bt_service.h4_cdev.dev = 0;
+ goto error;
+ }
+
+ bt_service.h4_device = device_create(common_service.class,
+ NULL,
+ bt_service.h4_cdev.dev,
+ NULL,
+ SCSC_H4_DEVICE_NAME);
+ if (bt_service.h4_device == NULL) {
+ cdev_del(&bt_service.h4_cdev);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ init_completion(&bt_service.recovery_probe_complete);
+ init_completion(&bt_service.recovery_release_complete);
+
+#ifdef CONFIG_SCSC_ANT
+ ret = alloc_chrdev_region(&ant_service.device, 0,
+ SCSC_TTY_MINORS, "scsc_ant_char");
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON, "error alloc_chrdev_region %d\n", ret);
+ return ret;
+ }
+
+ cdev_init(&ant_service.ant_cdev, &scsc_ant_shm_fops);
+ ret = cdev_add(&ant_service.ant_cdev,
+ MKDEV(MAJOR(ant_service.device), MINOR(0)), 1);
+ if (ret) {
+ SCSC_TAG_ERR(BT_COMMON,
+ "cdev_add failed for device %s\n",
+ SCSC_ANT_DEVICE_NAME);
+ ant_service.ant_cdev.dev = 0;
+ goto error;
+ }
+
+ ant_service.ant_device = device_create(common_service.class,
+ NULL,
+ ant_service.ant_cdev.dev,
+ NULL,
+ SCSC_ANT_DEVICE_NAME);
+ if (ant_service.ant_device == NULL) {
+ cdev_del(&ant_service.ant_cdev);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ init_completion(&ant_service.recovery_probe_complete);
+ init_completion(&ant_service.recovery_release_complete);
+#endif
+
+ /* Register KIC interface */
+ slsi_kic_bt_ops_register(NULL, &scsc_bt_kic_ops);
+
+ /* Register with MX manager */
+ scsc_mx_module_register_client_module(&bt_driver);
+
+#ifdef CONFIG_SCSC_ANT
+/* Register KIC interface */
+ slsi_kic_ant_ops_register(NULL, &scsc_ant_kic_ops);
+ SCSC_TAG_DEBUG(BT_COMMON, "Register the KIC interface, %p\n",
+ &scsc_ant_kic_ops);
+
+ /* Register with MX manager */
+ scsc_mx_module_register_client_module(&ant_driver);
+#endif
+
+ SCSC_TAG_DEBUG(BT_COMMON, "dev=%u class=%p\n",
+ bt_service.device, common_service.class);
+
+ spin_lock_init(&bt_service.avdtp_detect.lock);
+ spin_lock_init(&bt_service.avdtp_detect.fw_write_lock);
+
+#ifdef CONFIG_ARCH_EXYNOS
+ sprintf(bluetooth_address_fallback, "%02X:%02X:%02X:%02X:%02X:%02X",
+ (exynos_soc_info.unique_id & 0x000000FF0000) >> 16,
+ (exynos_soc_info.unique_id & 0x00000000FF00) >> 8,
+ (exynos_soc_info.unique_id & 0x0000000000FF) >> 0,
+ (exynos_soc_info.unique_id & 0xFF0000000000) >> 40,
+ (exynos_soc_info.unique_id & 0x00FF00000000) >> 32,
+ (exynos_soc_info.unique_id & 0x0000FF000000) >> 24);
+#endif
+
+#ifdef CONFIG_SCSC_ANT
+ SCSC_TAG_DEBUG(BT_COMMON, "dev=%u class=%p\n",
+ ant_service.device, common_service.class);
+#endif
+
+ return 0;
+
+error:
+ SCSC_TAG_ERR(BT_COMMON, "error class_create bt device\n");
+ unregister_chrdev_region(bt_service.device, SCSC_TTY_MINORS);
+
+#ifdef CONFIG_SCSC_ANT
+ SCSC_TAG_ERR(BT_COMMON, "error class_create ant device\n");
+ unregister_chrdev_region(ant_service.device, SCSC_TTY_MINORS);
+#endif
+
+ return ret;
+}
+
+
+static void __exit scsc_bt_module_exit(void)
+{
+ SCSC_TAG_INFO(BT_COMMON, "\n");
+
+ wake_lock_destroy(&bt_service.write_wake_lock);
+ wake_lock_destroy(&bt_service.read_wake_lock);
+ wake_lock_destroy(&bt_service.service_wake_lock);
+ complete_all(&bt_service.recovery_probe_complete);
+ complete_all(&bt_service.recovery_release_complete);
+
+#ifdef CONFIG_SCSC_ANT
+ wake_lock_destroy(&ant_service.write_wake_lock);
+ wake_lock_destroy(&ant_service.read_wake_lock);
+ wake_lock_destroy(&ant_service.service_wake_lock);
+ complete_all(&ant_service.recovery_probe_complete);
+ complete_all(&ant_service.recovery_release_complete);
+#endif
+
+ slsi_kic_bt_ops_unregister(&scsc_bt_kic_ops);
+
+ /* Register with MX manager */
+ scsc_mx_module_unregister_client_module(&bt_driver);
+
+ if (bt_service.h4_device) {
+ device_destroy(common_service.class, bt_service.h4_cdev.dev);
+ bt_service.h4_device = NULL;
+ }
+
+ cdev_del(&bt_service.h4_cdev);
+
+ unregister_chrdev_region(bt_service.device, SCSC_TTY_MINORS);
+
+#ifdef CONFIG_SCSC_ANT
+ slsi_kic_ant_ops_unregister(&scsc_ant_kic_ops);
+
+ /* Register with MX manager */
+ scsc_mx_module_unregister_client_module(&ant_driver);
+
+ if (ant_service.ant_device) {
+ device_destroy(common_service.class, ant_service.ant_cdev.dev);
+ ant_service.ant_device = NULL;
+ }
+
+ cdev_del(&ant_service.ant_cdev);
+
+ unregister_chrdev_region(ant_service.device, SCSC_TTY_MINORS);
+#endif
+
+ SCSC_TAG_INFO(BT_COMMON, "exit, module unloaded\n");
+}
+
+module_init(scsc_bt_module_init);
+module_exit(scsc_bt_module_exit);
+
+MODULE_DESCRIPTION(SCSC_MODDESC);
+MODULE_AUTHOR(SCSC_MODAUTH);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SCSC_MODVERSION);
--- /dev/null
+/****************************************************************************
+ *
+ * Internal BT driver definitions
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_BT_PRIV_H
+#define __SCSC_BT_PRIV_H
+
+#include <scsc/scsc_mx.h>
+#include <scsc/api/bsmhcp.h>
+#include <scsc/api/bhcs.h>
+#include <scsc/api/bt_audio.h>
+
+#include "scsc_shm.h"
+
+#ifndef UNUSED
+#define UNUSED(x) ((void)(x))
+#endif
+
+/**
+ * Size of temporary buffer (on stack) for peeking at HCI/H4
+ * packet header held in FIFO.
+ *
+ * Must be big enough to decode the
+ * length of any HCI packet type.
+ *
+ * For ACL that is 1 h4 header + 2 ACL handle + 2 ACL data size
+ */
+#define H4DMUX_HEADER_HCI (1 + 3) /* CMD, SCO */
+#define H4DMUX_HEADER_ACL (1 + 4) /* ACL */
+
+#define HCI_COMMAND_PKT (1)
+#define HCI_ACLDATA_PKT (2)
+#define HCI_EVENT_PKT (4)
+
+#define ACLDATA_HEADER_SIZE (4)
+#define L2CAP_HEADER_SIZE (4)
+
+#define HCI_ACL_DATA_FLAGS(data) ((*(data + 1)) & 0xf0)
+#define HCI_ACL_DATA_CON_HDL(data) ((u16)(*(data + 0) | ((*(data + 1)) & 0x0f) << 8))
+#define HCI_ACL_DATA_LENGTH(data) ((u16)(*(data + 2) | (*(data + 3)) << 8))
+#define HCI_L2CAP_LENGTH(data) ((u16)(*(data + 4) | (*(data + 5)) << 8))
+#define HCI_L2CAP_CID(data) ((u16)(*(data + 6) | (*(data + 7)) << 8))
+
+#define HCI_EVENT_NUMBER_OF_COMPLETED_PACKETS_EVENT (0x13)
+#define HCI_EVENT_HARDWARE_ERROR_EVENT (0x10)
+
+#define SCSC_BT_CONF "bt.hcf"
+#ifdef CONFIG_SCSC_BT_BLUEZ
+#define SCSC_BT_ADDR "/csa/bluetooth/.bd_addr"
+#define SCSC_BT_ADDR_LEN (3)
+#elif defined CONFIG_SCSC_BT_ADDRESS_IN_FILE
+#define SCSC_BT_ADDR CONFIG_SCSC_BT_ADDRESS_FILENAME
+#define SCSC_BT_ADDR_LEN (6)
+#endif
+
+#define SCSC_H4_DEVICE_NAME "scsc_h4_0"
+#define SCSC_ANT_DEVICE_NAME "scsc_ant_0"
+
+#define ANT_COMMAND_MSG 0x0C
+#define ANT_DATA_MSG 0x0E
+#define ANT_HEADER_LENGTH 1
+
+#define SCSC_BT_CONNECTION_INFO_MAX (0x1000)
+#define SCSC_BT_ACL_RAW_MASK (0xF000)
+#define SCSC_BT_ACL_RAW (0x2000)
+#define SCSC_BT_ACL_HANDLE_MASK (0x0FFF)
+
+#define SCSC_TTY_MINORS (8)
+
+enum scsc_bt_shm_thread_flags;
+
+enum scsc_bt_read_op {
+ BT_READ_OP_NONE,
+ BT_READ_OP_HCI_EVT,
+ BT_READ_OP_HCI_EVT_ERROR,
+ BT_READ_OP_ACL_DATA,
+ BT_READ_OP_ACL_CREDIT,
+ BT_READ_OP_IQ_REPORT,
+ BT_READ_OP_STOP
+};
+
+enum scsc_ant_read_op {
+ ANT_READ_OP_NONE,
+ ANT_READ_OP_CMD,
+ ANT_READ_OP_DATA
+};
+
+struct scsc_bt_connection_info {
+ u8 state;
+ u16 length;
+ u16 l2cap_cid;
+};
+
+#define CONNECTION_NONE (0)
+#define CONNECTION_ACTIVE (1)
+#define CONNECTION_DISCONNECTED (2)
+
+enum bt_link_type_enum {
+ BT_LINK_TYPE_SCO = 0,
+ BT_LINK_TYPE_ACL = 1,
+ BT_LINK_TYPE_SETUP_ID = 2,
+ BT_LINK_TYPE_SETUP_FHS = 3,
+ BT_LINK_TYPE_ESCO = 4,
+ BT_LINK_TYPE_ACL_23 = 5,
+ BT_LINK_TYPE_ESCO_23 = 6,
+ BT_LINK_TYPE_ANTPLUS = 7,
+ MAX_BT_LINK_TYPE = 7
+};
+
+enum scsc_bt_avdtp_detect_state_enum {
+ BT_AVDTP_STATE_IDLE_SIGNALING,
+ BT_AVDTP_STATE_PENDING_SIGNALING,
+ BT_AVDTP_STATE_COMPLETE_SIGNALING,
+ BT_AVDTP_STATE_IDLE_STREAMING,
+ BT_AVDTP_STATE_PENDING_STREAMING,
+ BT_AVDTP_STATE_COMPLETE_STREAMING,
+};
+
+enum scsc_bt_avdtp_detect_conn_req_direction_enum {
+ BT_AVDTP_CONN_REQ_DIR_INCOMING,
+ BT_AVDTP_CONN_REQ_DIR_OUTGOING,
+};
+
+enum scsc_bt_avdtp_detect_type {
+ BT_AVDTP_CONN_TYPE_SIGNAL = 0,
+ BT_AVDTP_CONN_TYPE_STREAM = 1,
+};
+
+struct scsc_bt_avdtp_detect_connection {
+ enum scsc_bt_avdtp_detect_type type;
+ enum scsc_bt_avdtp_detect_state_enum state;
+ u16 src_cid;
+ u16 dst_cid;
+};
+
+struct scsc_bt_avdtp_detect_ongoing {
+ struct scsc_bt_avdtp_detect_connection incoming_signal;
+ struct scsc_bt_avdtp_detect_connection outgoing_signal;
+ struct scsc_bt_avdtp_detect_connection incoming_stream;
+ struct scsc_bt_avdtp_detect_connection outgoing_stream;
+};
+
+enum scsc_bt_avdtp_detect_tsep {
+ BT_AVDTP_TSEP_SRC = 0,
+ BT_AVDTP_TSEP_SNK = 1,
+};
+
+struct scsc_bt_avdtp_detect_src_snk {
+ enum scsc_bt_avdtp_detect_tsep tsep;
+ struct scsc_bt_avdtp_detect_snk_seid *local_snk_seids;
+ struct scsc_bt_avdtp_detect_snk_seid *remote_snk_seids;
+ uint16_t local_snk_seid_candidate;
+ uint16_t remote_snk_seid_candidate;
+};
+
+struct scsc_bt_avdtp_detect_snk_seid {
+ uint8_t seid;
+ struct scsc_bt_avdtp_detect_snk_seid *next;
+};
+
+struct scsc_bt_avdtp_detect_hci_connection {
+ struct scsc_bt_avdtp_detect_ongoing ongoing;
+ u16 hci_connection_handle;
+ struct scsc_bt_avdtp_detect_connection signal;
+ struct scsc_bt_avdtp_detect_connection stream;
+ struct scsc_bt_avdtp_detect_src_snk tsep_detect;
+ struct scsc_bt_avdtp_detect_hci_connection *next;
+ spinlock_t lock;
+};
+
+struct scsc_bt_avdtp_detect {
+ struct scsc_bt_avdtp_detect_hci_connection *connections;
+ spinlock_t lock;
+ spinlock_t fw_write_lock;
+};
+
+struct scsc_common_service {
+ struct scsc_mx *maxwell_core;
+ struct class *class;
+};
+
+extern struct scsc_common_service common_service;
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+struct scsc_bt_hcf_collection {
+ void *hcf;
+ u32 hcf_size;
+};
+#endif
+
+struct scsc_bt_service {
+ dev_t device;
+ struct scsc_service *service;
+ struct device *dev;
+
+ struct cdev h4_cdev;
+ struct device *h4_device;
+ struct file *h4_file;
+ bool h4_users;
+ atomic_t h4_readers;
+ atomic_t h4_writers;
+ size_t h4_write_offset;
+
+ atomic_t error_count;
+ atomic_t service_users;
+
+ scsc_mifram_ref bhcs_ref; /* Bluetooth host configuration service reference */
+ scsc_mifram_ref bsmhcp_ref; /* Bluetooth shared memory host controller protocol reference */
+ scsc_mifram_ref config_ref; /* Bluetooth configuration reference */
+ scsc_mifram_ref abox_ref; /* A-Box reference */
+ struct BSMHCP_PROTOCOL *bsmhcp_protocol; /* Bluetooth shared memory host controller protocol pointer */
+ size_t read_offset;
+ enum scsc_bt_read_op read_operation;
+ u32 read_index;
+ wait_queue_head_t read_wait;
+
+ wait_queue_head_t info_wait;
+
+ int last_alloc; /* Cached previous alloc index to aid search */
+ u8 allocated[BSMHCP_DATA_BUFFER_TX_ACL_SIZE];
+ u32 allocated_count;
+ u32 freed_count;
+ bool processed[BSMHCP_TRANSFER_RING_EVT_SIZE];
+
+ struct scsc_bt_connection_info connection_handle_list[SCSC_BT_CONNECTION_INFO_MAX];
+ bool hci_event_paused;
+ bool acldata_paused;
+
+ struct wake_lock read_wake_lock;
+ struct wake_lock write_wake_lock;
+ struct wake_lock service_wake_lock;
+ size_t write_wake_lock_count;
+ size_t write_wake_unlock_count;
+
+ size_t interrupt_count;
+ size_t interrupt_read_count;
+ size_t interrupt_write_count;
+
+ u32 mailbox_hci_evt_read;
+ u32 mailbox_hci_evt_write;
+ u32 mailbox_acl_rx_read;
+ u32 mailbox_acl_rx_write;
+ u32 mailbox_acl_free_read;
+ u32 mailbox_acl_free_read_scan;
+ u32 mailbox_acl_free_write;
+ u32 mailbox_iq_report_read;
+ u32 mailbox_iq_report_write;
+
+ struct scsc_bt_avdtp_detect avdtp_detect;
+ struct completion recovery_release_complete;
+ struct completion recovery_probe_complete;
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ struct scsc_bt_hcf_collection hcf_collection;
+#endif
+};
+
+extern struct scsc_bt_service bt_service;
+
+/* IQ reporting */
+#define HCI_IQ_REPORTING_MAX_NUM_SAMPLES (82)
+/**
+ * The driver supports both reception of LE Connection IQ Report
+ * and LE Connectionless IQ report event:
+ *
+ * The largest hci event is LE Connection IQ Report, which
+ * constitutes of:
+ *
+ * - Hci packet type : 1 octet
+ * - Event code : 1 octet
+ * - Param total Length : 1 octet
+ * - Subevent code : 1 octet
+ * - Connection hdl : 2 octets
+ * - RX_PHY : 1 octet
+ * - Data_channel_idx : 1 octet
+ * - RSSI : 2 octets
+ * - RSSI antenna id : 1 octet
+ * - cte type : 1 octet
+ * - Slot durations : 1 octet
+ * - packet_status : 1 octet
+ * - event_counter : 2 octets
+ * - Sample count : 1 octet
+ *********************************
+ * Total : 17 octets
+ *
+ * The maximum hci event size in bytes is:
+ * (17 + (number of samples * 2 (both I and Q)))
+ *
+ */
+#define HCI_IQ_REPORT_MAX_LEN (17 + (2 * HCI_IQ_REPORTING_MAX_NUM_SAMPLES))
+#define HCI_LE_CONNECTIONLESS_IQ_REPORT_EVENT_SUB_CODE (0x15)
+#define HCI_LE_CONNECTION_IQ_REPORT_EVENT_SUB_CODE (0x16)
+
+
+struct scsc_ant_service {
+ dev_t device;
+ struct scsc_service *service;
+ struct device *dev;
+
+ struct cdev ant_cdev;
+ struct device *ant_device;
+ struct file *ant_file;
+ bool ant_users;
+ atomic_t ant_readers;
+ atomic_t ant_writers;
+ size_t ant_write_offset;
+
+ atomic_t error_count;
+ atomic_t service_users;
+
+ /* Bluetooth host configuration service reference */
+ scsc_mifram_ref bhcs_ref;
+ /* Ant shared memory host controller protocol reference */
+ scsc_mifram_ref asmhcp_ref;
+ /* Bluetooth configuration reference */
+ scsc_mifram_ref config_ref;
+
+ /* Ant shared memory host controller protocol pointer */
+ struct ASMHCP_PROTOCOL *asmhcp_protocol;
+ size_t read_offset;
+ enum scsc_ant_read_op read_operation;
+ u32 read_index;
+ wait_queue_head_t read_wait;
+
+ struct wake_lock read_wake_lock;
+ struct wake_lock write_wake_lock;
+ struct wake_lock service_wake_lock;
+ size_t write_wake_lock_count;
+ size_t write_wake_unlock_count;
+
+ size_t interrupt_count;
+ size_t interrupt_read_count;
+ size_t interrupt_write_count;
+
+ u32 mailbox_data_ctr_driv_read;
+ u32 mailbox_data_ctr_driv_write;
+ u32 mailbox_cmd_ctr_driv_read;
+ u32 mailbox_cmd_ctr_driv_write;
+
+ struct completion recovery_release_complete;
+ struct completion recovery_probe_complete;
+};
+
+extern struct scsc_ant_service ant_service;
+/* Coex avdtp detection */
+
+/* The buffers passed for inspection begin at the L2CAP basic header, as does the length
+ * passed in the function calls */
+#define AVDTP_DETECT_MIN_DATA_LENGTH (12) /* We always want to look for the SRC CID */
+#define AVDTP_DETECT_MIN_DATA_LENGTH_CON_RSP (16) /* For CON RSP, we want the result, too */
+#define AVDTP_DETECT_MIN_AVDTP_LENGTH (6) /* Basic L2CAP header + 2 AVDTP octets as min */
+
+#define HCI_ACL_PACKET_BOUNDARY_START_FLUSH (2)
+
+/* Can't use HCI_L2CAP_CID(data), since that assumes 4 bytes of HCI header, which has been stripped
+ * for the calls to the avdtp detection functions */
+#define HCI_L2CAP_RX_CID(data) ((u16)(*(data + 2) | (*(data + 3)) << 8))
+
+#define HCI_L2CAP_CODE(data) ((u8)(*(data + 4)))
+#define HCI_L2CAP_CON_REQ_PSM(data) ((u16)(*(data + 8) | (*(data + 9)) << 8))
+/* Valid for at least connection request/response and disconnection request */
+#define HCI_L2CAP_SOURCE_CID(data) ((u16)(*(data + 10) | (*(data + 11)) << 8))
+/* Valid for at least connection and disconnection responses */
+#define HCI_L2CAP_RSP_DEST_CID(data) ((u16)(*(data + 8) | (*(data + 9)) << 8))
+#define HCI_L2CAP_CON_RSP_RESULT(data) ((u16)(*(data + 12) | (*(data + 13)) << 8))
+#define HCI_L2CAP_CON_RSP_RESULT_SUCCESS (0x0000)
+#define HCI_L2CAP_CON_RSP_RESULT_REFUSED (0x0002)
+
+
+#define HCI_L2CAP_CONF_SEID_OFFSET 6
+#define HCI_L2CAP_CONF_TSEP_OFFSET 7
+#define HCI_L2CAP_CONF_SEID_INFO_SIZE 2
+#define HCI_L2CAP_CONF_SEID(data, index) (((u8)(*(data + index * HCI_L2CAP_CONF_SEID_INFO_SIZE + HCI_L2CAP_CONF_SEID_OFFSET)) >> 2) & 0x3F)
+#define HCI_L2CAP_CONF_TSEP_SRC 0
+#define HCI_L2CAP_CONF_TSEP_SNK 1
+#define HCI_L2CAP_CONF_TSEP(data, index) (((u8)(*(data + index * HCI_L2CAP_CONF_SEID_INFO_SIZE + HCI_L2CAP_CONF_TSEP_OFFSET)) >> 3) & 0x1)
+#define HCI_L2CAP_SET_CONF_ACP_SEID_OFFSET 6
+#define HCI_L2CAP_SET_CONF_ACP_SEID(data) (((u8)(*(data + HCI_L2CAP_SET_CONF_ACP_SEID_OFFSET)) >> 2) & 0x3F)
+
+
+#define L2CAP_AVDTP_PSM 0x0019
+#define L2CAP_SIGNALING_CID 0x0001
+#define L2CAP_CODE_CONNECT_REQ 0x02
+#define L2CAP_CODE_CONNECT_RSP 0x03
+#define L2CAP_CODE_CONFIGURE_REQ 0x04
+#define L2CAP_CODE_DISCONNECT_REQ 0x06
+#define L2CAP_CODE_DISCONNECT_RSP 0x07
+
+#define AVDTP_MESSAGE_TYPE_OFFSET 4 /* Assuming only single packet type */
+#define AVDTP_MESSAGE_TYPE_MASK 0x03
+#define AVDTP_MESSAGE_TYPE(data) ((u8)(*(data + AVDTP_MESSAGE_TYPE_OFFSET)) & AVDTP_MESSAGE_TYPE_MASK)
+#define AVDTP_MESSAGE_TYPE_CMD 0x00
+#define AVDTP_MESSAGE_TYPE_GENERAL_REJECT 0x01
+#define AVDTP_MESSAGE_TYPE_RSP_ACCEPT 0x02
+#define AVDTP_MESSAGE_TYPE_RSP_REJECT 0x03
+
+#define AVDTP_SIGNAL_ID_OFFSET 5 /* Assuming only single packet type */
+#define AVDTP_SIGNAL_ID_MASK 0x1F
+#define AVDTP_SIGNAL_ID(data) ((u8)(*(data + AVDTP_SIGNAL_ID_OFFSET)) & AVDTP_SIGNAL_ID_MASK)
+
+#define AVDTP_SIGNAL_ID_DISCOVER 0x01
+#define AVDTP_SIGNAL_ID_SET_CONF 0x03
+#define AVDTP_SIGNAL_ID_OPEN 0x06
+#define AVDTP_SIGNAL_ID_START 0x07
+#define AVDTP_SIGNAL_ID_CLOSE 0x08
+#define AVDTP_SIGNAL_ID_SUSPEND 0x09
+#define AVDTP_SIGNAL_ID_ABORT 0x0A
+
+#define AVDTP_SIGNAL_FLAG_MASK (0x80000000)
+#define AVDTP_SNK_FLAG_MASK (0x40000000)
+#define AVDTP_MESSAGE_COUNT_MASK (0x30000000)
+#define AVDTP_GET_MESSAGE_COUNT(data) ((data & AVDTP_MESSAGE_COUNT_MASK) >> 28)
+
+#define AVDTP_SNK_FLAG_TD_MASK (0x00000001)
+#define AVDTP_OPEN_FLAG_TD_MASK (0x00000002)
+
+extern uint16_t avdtp_signaling_src_cid;
+extern uint16_t avdtp_signaling_dst_cid;
+extern uint16_t avdtp_streaming_src_cid;
+extern uint16_t avdtp_streaming_dst_cid;
+extern uint16_t avdtp_hci_connection_handle;
+
+#define AVDTP_DETECT_SIGNALING_IGNORE 0
+#define AVDTP_DETECT_SIGNALING_ACTIVE 1
+#define AVDTP_DETECT_SIGNALING_INACTIVE 2
+#define AVDTP_DETECT_SIGNALING_OPEN 3
+
+void scsc_avdtp_detect_rxtx(u16 hci_connection_handle, const unsigned char *data, uint16_t length, bool is_tx);
+bool scsc_avdtp_detect_reset_connection_handle(uint16_t hci_connection_handle);
+bool scsc_bt_shm_h4_avdtp_detect_write(uint32_t flags,
+ uint16_t l2cap_cid,
+ uint16_t hci_connection_handle);
+void scsc_avdtp_detect_exit(void);
+
+#ifdef CONFIG_SCSC_BT_BLUEZ
+void slsi_bt_notify_probe(struct device *dev,
+ const struct file_operations *fs,
+ atomic_t *error_count,
+ wait_queue_head_t *read_wait);
+void slsi_bt_notify_remove(void);
+#else
+#define slsi_bt_notify_probe(dev, fs, error_count, read_wait)
+#define slsi_bt_notify_remove()
+#endif
+
+#endif /* __SCSC_BT_PRIV_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+/* MX BT shared memory interface */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/kthread.h>
+#include <asm/io.h>
+#include <linux/wakelock.h>
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/api/bsmhcp.h>
+#include <scsc/scsc_logring.h>
+
+#include "scsc_bt_priv.h"
+#include "scsc_shm.h"
+#include "scsc_bt_hci.h"
+
+struct hci_credit_entry {
+ u16 hci_connection_handle;
+ u16 credits;
+};
+
+static u8 h4_write_buffer[BSMHCP_ACL_PACKET_SIZE + H4DMUX_HEADER_ACL];
+static u8 h4_acl_header[5];
+static u8 h4_hci_event_ncp_header[4 + BSMHCP_TRANSFER_RING_ACL_COUNT * sizeof(struct hci_credit_entry)];
+static u32 h4_hci_event_ncp_header_len = 8;
+static struct hci_credit_entry *h4_hci_credit_entries = (struct hci_credit_entry *) &h4_hci_event_ncp_header[4];
+static u8 h4_hci_event_hardware_error[4] = { HCI_EVENT_PKT, HCI_EVENT_HARDWARE_ERROR_EVENT, 1, 0 };
+static u8 h4_iq_report_evt[HCI_IQ_REPORT_MAX_LEN];
+static u32 h4_iq_report_evt_len;
+
+
+static void scsc_bt_shm_irq_handler(int irqbit, void *data)
+{
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(bt_service.service, irqbit);
+
+ /* Ensure irq bit is cleared before reading the mailbox indexes */
+ mb();
+
+ bt_service.interrupt_count++;
+
+ /* Wake the reader operation */
+ if (bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_read ||
+ 0 != atomic_read(&bt_service.error_count) ||
+ bt_service.bsmhcp_protocol->header.panic_deathbed_confession) {
+ bt_service.interrupt_read_count++;
+
+ wake_lock_timeout(&bt_service.read_wake_lock, HZ);
+ wake_up(&bt_service.read_wait);
+ }
+
+ if (bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_write ==
+ bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_read &&
+ bt_service.bsmhcp_protocol->header.mailbox_acl_tx_write ==
+ bt_service.bsmhcp_protocol->header.mailbox_acl_tx_read) {
+ bt_service.interrupt_write_count++;
+
+ if (wake_lock_active(&bt_service.write_wake_lock)) {
+ bt_service.write_wake_unlock_count++;
+ wake_unlock(&bt_service.write_wake_lock);
+ }
+ }
+}
+
+/* Assign firmware/host interrupts */
+static void scsc_bt_shm_init_interrupt(void)
+{
+ /* To-host f/w IRQ allocations and ISR registrations */
+ bt_service.bsmhcp_protocol->header.bg_to_ap_int_src = scsc_service_mifintrbit_register_tohost(bt_service.service, scsc_bt_shm_irq_handler, NULL);
+ bt_service.bsmhcp_protocol->header.fg_to_ap_int_src = scsc_service_mifintrbit_register_tohost(bt_service.service, scsc_bt_shm_irq_handler, NULL);
+
+ /* From-host f/w IRQ allocations */
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src = scsc_service_mifintrbit_alloc_fromhost(bt_service.service, SCSC_MIFINTR_TARGET_R4);
+ bt_service.bsmhcp_protocol->header.ap_to_fg_int_src = scsc_service_mifintrbit_alloc_fromhost(bt_service.service, SCSC_MIFINTR_TARGET_R4);
+ bt_service.bsmhcp_protocol->header.ap_to_fg_m4_int_src = scsc_service_mifintrbit_alloc_fromhost(bt_service.service, SCSC_MIFINTR_TARGET_M4);
+
+ SCSC_TAG_DEBUG(BT_COMMON, "Registered to-host IRQ bits %d:%d:%d, from-host IRQ bits %d:%d\n",
+ bt_service.bsmhcp_protocol->header.bg_to_ap_int_src,
+ bt_service.bsmhcp_protocol->header.fg_to_ap_int_src,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_m4_int_src,
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_int_src);
+}
+
+bool scsc_bt_shm_h4_avdtp_detect_write(uint32_t flags,
+ uint16_t l2cap_cid,
+ uint16_t hci_connection_handle)
+{
+ uint32_t tr_read;
+ uint32_t tr_write;
+ struct BSMHCP_TD_AVDTP *td;
+
+ spin_lock(&bt_service.avdtp_detect.fw_write_lock);
+
+ /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
+ tr_read = bt_service.bsmhcp_protocol->header.mailbox_avdtp_read;
+ tr_write = bt_service.bsmhcp_protocol->header.mailbox_avdtp_write;
+
+ td = &bt_service.bsmhcp_protocol->avdtp_transfer_ring[tr_write];
+
+ SCSC_TAG_DEBUG(BT_H4,
+ "AVDTP_DETECT_PKT (flags: 0x%08X, cid: 0x%04X, handle: 0x%04X, read=%u, write=%u)\n",
+ flags,
+ l2cap_cid,
+ hci_connection_handle,
+ tr_read,
+ tr_write);
+
+ /* Index out of bounds check */
+ if (tr_read >= BSMHCP_TRANSFER_RING_AVDTP_SIZE || tr_write >= BSMHCP_TRANSFER_RING_AVDTP_SIZE) {
+ spin_unlock(&bt_service.avdtp_detect.fw_write_lock);
+ SCSC_TAG_ERR(BT_H4,
+ "AVDTP_DETECT_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n",
+ tr_read,
+ tr_write);
+ atomic_inc(&bt_service.error_count);
+ return false;
+ }
+
+ /* Does the transfer ring have room for an entry */
+ if (BSMHCP_HAS_ROOM(tr_write, tr_read, BSMHCP_TRANSFER_RING_AVDTP_SIZE)) {
+ /* Fill the transfer descriptor with the AVDTP data */
+ td->flags = flags;
+ td->l2cap_cid = l2cap_cid;
+ td->hci_connection_handle = hci_connection_handle;
+
+ /* Ensure the wake lock is acquired */
+ if (!wake_lock_active(&bt_service.write_wake_lock)) {
+ bt_service.write_wake_lock_count++;
+ wake_lock(&bt_service.write_wake_lock);
+ }
+
+ /* Increate the write pointer */
+ BSMHCP_INCREASE_INDEX(tr_write, BSMHCP_TRANSFER_RING_AVDTP_SIZE);
+ bt_service.bsmhcp_protocol->header.mailbox_avdtp_write = tr_write;
+
+ spin_unlock(&bt_service.avdtp_detect.fw_write_lock);
+
+ /* Memory barrier to ensure out-of-order execution is completed */
+ mmiowb();
+
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(
+ bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_bg_int_src,
+ SCSC_MIFINTR_TARGET_R4);
+ } else {
+ /* Transfer ring full */
+ spin_unlock(&bt_service.avdtp_detect.fw_write_lock);
+ SCSC_TAG_ERR(BT_H4,
+ "AVDTP_DETECT_PKT - No more room for messages (tr_read=%u, tr_write=%u)\n",
+ tr_read,
+ tr_write);
+ scsc_service_force_panic(bt_service.service);
+ return false;
+ }
+ return true;
+}
+
+
+static ssize_t scsc_bt_shm_h4_hci_cmd_write(const unsigned char *data, size_t count)
+{
+ /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
+ uint32_t tr_read = bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_read;
+ uint32_t tr_write = bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_write;
+#ifdef CONFIG_SCSC_PRINTK
+ uint16_t op_code = *(uint16_t *) data;
+#endif
+
+ /* Temp vars */
+ struct BSMHCP_TD_CONTROL *td = &bt_service.bsmhcp_protocol->hci_cmd_transfer_ring[tr_write];
+
+ SCSC_TAG_DEBUG(BT_H4, "HCI_COMMAND_PKT (op_code=0x%04x, len=%zu, read=%u, write=%u)\n", op_code, count, tr_read, tr_write);
+
+ /* Index out of bounds check */
+ if (tr_read >= BSMHCP_TRANSFER_RING_CMD_SIZE || tr_write >= BSMHCP_TRANSFER_RING_CMD_SIZE) {
+ SCSC_TAG_ERR(BT_H4, "HCI_COMMAND_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n", tr_read, tr_write);
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+
+ /* Does the transfer ring have room for an entry */
+ if (BSMHCP_HAS_ROOM(tr_write, tr_read, BSMHCP_TRANSFER_RING_CMD_SIZE)) {
+ /* Fill the transfer descriptor with the HCI command data */
+ memcpy(td->data, data, count);
+ td->length = (u16)count;
+
+ /* Ensure the wake lock is acquired */
+ if (!wake_lock_active(&bt_service.write_wake_lock)) {
+ bt_service.write_wake_lock_count++;
+ wake_lock(&bt_service.write_wake_lock);
+ }
+
+ /* Increate the write pointer */
+ BSMHCP_INCREASE_INDEX(tr_write, BSMHCP_TRANSFER_RING_CMD_SIZE);
+ bt_service.bsmhcp_protocol->header.mailbox_hci_cmd_write = tr_write;
+
+ /* Memory barrier to ensure out-of-order execution is completed */
+ mmiowb();
+
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service, bt_service.bsmhcp_protocol->header.ap_to_bg_int_src, SCSC_MIFINTR_TARGET_R4);
+ } else
+ /* Transfer ring full. Only happens if the user attempt to send more HCI command packets than
+ * available credits */
+ count = 0;
+
+ return count;
+}
+
+static ssize_t scsc_bt_shm_h4_acl_write(const unsigned char *data, size_t count)
+{
+ /* Store the read/write pointer on the stack since both are placed in unbuffered/uncached memory */
+ uint32_t tr_read = bt_service.bsmhcp_protocol->header.mailbox_acl_tx_read;
+ uint32_t tr_write = bt_service.bsmhcp_protocol->header.mailbox_acl_tx_write;
+
+ /* Temp vars */
+ struct BSMHCP_TD_ACL_TX_DATA *td = &bt_service.bsmhcp_protocol->acl_tx_data_transfer_ring[tr_write];
+ int acldata_buf_index = -1;
+ u16 l2cap_length;
+ u32 i;
+ size_t payload_len = count - ACLDATA_HEADER_SIZE;
+
+ /* Index out of bounds check */
+ if (tr_read >= BSMHCP_TRANSFER_RING_ACL_SIZE || tr_write >= BSMHCP_TRANSFER_RING_ACL_SIZE) {
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - Index out of bounds (tr_read=%u, tr_write=%u)\n", tr_read, tr_write);
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+
+ /* Allocate a data slot */
+ for (i = 0; i < BSMHCP_DATA_BUFFER_TX_ACL_SIZE; i++) {
+ /* Wrap the offset index around the buffer max */
+ if (++bt_service.last_alloc == BSMHCP_DATA_BUFFER_TX_ACL_SIZE)
+ bt_service.last_alloc = 0;
+ /* Claim a free slot */
+ if (bt_service.allocated[bt_service.last_alloc] == 0) {
+ bt_service.allocated[bt_service.last_alloc] = 1;
+ acldata_buf_index = bt_service.last_alloc;
+ bt_service.allocated_count++;
+ break;
+ }
+ }
+
+ /* Is a buffer available to hold the data */
+ if (acldata_buf_index < 0) {
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - No buffers available\n");
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+
+ /* Does the transfer ring have room for an entry */
+ if (BSMHCP_HAS_ROOM(tr_write, tr_read, BSMHCP_TRANSFER_RING_ACL_SIZE)) {
+ /* Extract the ACL data header and L2CAP header and fill it into the transfer descriptor */
+ td->buffer_index = (uint8_t)acldata_buf_index;
+ td->flags = HCI_ACL_DATA_FLAGS(data);
+ td->hci_connection_handle = HCI_ACL_DATA_CON_HDL(data);
+ td->length = (u16)payload_len;
+
+ if ((td->flags & BSMHCP_ACL_BC_FLAG_BCAST_ACTIVE) ||
+ (td->flags & BSMHCP_ACL_BC_FLAG_BCAST_ALL)) {
+ SCSC_TAG_DEBUG(BT_H4,
+ "Setting broadcast handle (hci_connection_handle=0x%03x)\n",
+ td->hci_connection_handle);
+ bt_service.connection_handle_list[td->hci_connection_handle].state = CONNECTION_ACTIVE;
+ }
+
+ /* Is this a packet marked with the start flag */
+ if ((td->flags & BSMHCP_ACL_PB_FLAG_MASK) == BSMHCP_ACL_PB_FLAG_START_NONFLUSH ||
+ (td->flags & BSMHCP_ACL_PB_FLAG_MASK) == BSMHCP_ACL_PB_FLAG_START_FLUSH) {
+
+ /* Extract the L2CAP payload length and connection identifier */
+ td->l2cap_cid = HCI_L2CAP_CID(data);
+
+ /* data+4 to skip the HCI header, to align offsets with the rx detection. The "true" argument is to tell
+ * the detection that this is TX */
+ scsc_avdtp_detect_rxtx(td->hci_connection_handle, data+4, td->length, true);
+
+ l2cap_length = HCI_L2CAP_LENGTH(data);
+
+ SCSC_TAG_DEBUG(BT_TX, "ACL[START] (len=%u, buffer=%u, credits=%u, l2cap_cid=0x%04x, l2cap_length=%u)\n",
+ td->length, acldata_buf_index,
+ BSMHCP_DATA_BUFFER_TX_ACL_SIZE - (bt_service.allocated_count - bt_service.freed_count),
+ HCI_L2CAP_CID(data), l2cap_length);
+
+ if (l2cap_length == payload_len - L2CAP_HEADER_SIZE)
+ /* Mark it with the END flag if packet length matches the L2CAP payload length */
+ td->flags |= BSMHCP_ACL_L2CAP_FLAG_END;
+ else if (l2cap_length < payload_len - L2CAP_HEADER_SIZE) {
+ /* Mark it with the END flag if packet length is greater than the L2CAP payload length
+ and generate a warning notifying that this is incorrect according to the specification.
+ This is allowed to support the BITE tester. */
+ SCSC_TAG_WARNING(BT_H4, "ACL_DATA_PKT - H4 ACL payload length > L2CAP Length (payload_len=%zu, l2cap_length=%u)\n",
+ payload_len - L2CAP_HEADER_SIZE, l2cap_length);
+ td->flags |= BSMHCP_ACL_L2CAP_FLAG_END;
+ } else if (l2cap_length > (payload_len - L2CAP_HEADER_SIZE)) {
+ /* This is only a fragment of the packet. Save the remaining number of octets required
+ * to complete the packet */
+ bt_service.connection_handle_list[td->hci_connection_handle].length = (u16)(l2cap_length - payload_len + L2CAP_HEADER_SIZE);
+ bt_service.connection_handle_list[td->hci_connection_handle].l2cap_cid = HCI_L2CAP_CID(data);
+ } else {
+ /* The packet is larger than the L2CAP payload length - protocol error */
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - L2CAP Length Error (l2cap_length=%u, payload_len=%zu)\n",
+ l2cap_length, payload_len - L2CAP_HEADER_SIZE);
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+ } else if ((td->flags & BSMHCP_ACL_PB_FLAG_MASK) == BSMHCP_ACL_PB_FLAG_CONT) {
+ /* Set the L2CAP connection identifer set by the start packet */
+ td->l2cap_cid = bt_service.connection_handle_list[td->hci_connection_handle].l2cap_cid;
+
+ SCSC_TAG_DEBUG(BT_TX, "ACL[CONT] (len=%u, buffer=%u, credits=%u, l2cap_cid=0x%04x, length=%u)\n",
+ td->length, acldata_buf_index,
+ BSMHCP_DATA_BUFFER_TX_ACL_SIZE - (bt_service.allocated_count - bt_service.freed_count),
+ bt_service.connection_handle_list[td->hci_connection_handle].l2cap_cid,
+ bt_service.connection_handle_list[td->hci_connection_handle].length);
+
+ /* Does this packet complete the L2CAP frame */
+ if (bt_service.connection_handle_list[td->hci_connection_handle].length == payload_len) {
+ /* The L2CAP frame is complete. mark it with the END flag */
+ td->flags |= BSMHCP_ACL_L2CAP_FLAG_END;
+
+ /* Set the remaining length to zero */
+ bt_service.connection_handle_list[td->hci_connection_handle].length = 0;
+ } else if (bt_service.connection_handle_list[td->hci_connection_handle].length < payload_len) {
+ /* Mark it with the END flag if packet length is greater than the L2CAP missing payload length
+ and generate a warning notifying that this is incorrect according to the specification.
+ This is allowed to support the BITE tester. */
+ SCSC_TAG_WARNING(BT_H4, "ACL_DATA_PKT - H4 ACL payload length > L2CAP Missing Length (payload_len=%zu, missing=%u)\n",
+ payload_len, bt_service.connection_handle_list[td->hci_connection_handle].length);
+ td->flags |= BSMHCP_ACL_L2CAP_FLAG_END;
+ /* Set the remaining length to zero */
+ bt_service.connection_handle_list[td->hci_connection_handle].length = 0;
+ } else if (bt_service.connection_handle_list[td->hci_connection_handle].length > payload_len)
+ /* This is another fragment of the packet. Save the remaining number of octets required
+ * to complete the packet */
+ bt_service.connection_handle_list[td->hci_connection_handle].length -= (u16)payload_len;
+ else if (bt_service.connection_handle_list[td->hci_connection_handle].length < payload_len) {
+ /* The packet is larger than the L2CAP payload length - protocol error */
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - L2CAP Length Error (missing=%u, payload_len=%zu)\n",
+ bt_service.connection_handle_list[td->hci_connection_handle].length, payload_len);
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+ } else {
+ /* Reserved flags set - report it as an error */
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - Flag set to reserved\n");
+ atomic_inc(&bt_service.error_count);
+ return -EIO;
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "ACL_DATA_PKT (len=%zu, read=%u, write=%u, slot=%u, flags=0x%04x, handle=0x%03x, l2cap_cid=0x%04x, missing=%u)\n",
+ payload_len, tr_read, tr_write, acldata_buf_index, td->flags >> 4, td->hci_connection_handle, td->l2cap_cid,
+ bt_service.connection_handle_list[td->hci_connection_handle].length);
+
+ /* Ensure the wake lock is acquired */
+ if (!wake_lock_active(&bt_service.write_wake_lock)) {
+ bt_service.write_wake_lock_count++;
+ wake_lock(&bt_service.write_wake_lock);
+ }
+
+ /* Copy the ACL packet into the targer buffer */
+ memcpy(&bt_service.bsmhcp_protocol->acl_tx_buffer[acldata_buf_index][0], &data[ACLDATA_HEADER_SIZE], payload_len);
+ /* Increate the write pointer */
+ BSMHCP_INCREASE_INDEX(tr_write, BSMHCP_TRANSFER_RING_ACL_SIZE);
+ bt_service.bsmhcp_protocol->header.mailbox_acl_tx_write = tr_write;
+
+ /* Memory barrier to ensure out-of-order execution is completed */
+ mmiowb();
+
+ if (bt_service.bsmhcp_protocol->header.firmware_features & BSMHCP_FEATURE_M4_INTERRUPTS)
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_m4_int_src, SCSC_MIFINTR_TARGET_M4);
+ else
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_int_src, SCSC_MIFINTR_TARGET_R4);
+ } else {
+ /* Transfer ring full. Only happens if the user attempt to send more ACL data packets than
+ * available credits */
+ SCSC_TAG_ERR(BT_H4, "ACL_DATA_PKT - No room in transfer ring (tr_write=%u, tr_read=%u)\n",
+ tr_write, tr_read);
+ atomic_inc(&bt_service.error_count);
+ count = -EIO;
+ }
+
+ return count;
+}
+
+#ifdef CONFIG_SCSC_PRINTK
+static const char *scsc_hci_evt_decode_event_code(u8 hci_event_code, u8 hci_ulp_sub_code)
+{
+ const char *ret = "NA";
+
+ switch (hci_event_code) {
+ HCI_EV_DECODE(HCI_EV_INQUIRY_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_INQUIRY_RESULT);
+ HCI_EV_DECODE(HCI_EV_CONN_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_CONN_REQUEST);
+ HCI_EV_DECODE(HCI_EV_DISCONNECT_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_AUTH_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_REMOTE_NAME_REQ_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ENCRYPTION_CHANGE);
+ HCI_EV_DECODE(HCI_EV_CHANGE_CONN_LINK_KEY_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_MASTER_LINK_KEY_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_READ_REM_SUPP_FEATURES_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_READ_REMOTE_VER_INFO_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_QOS_SETUP_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_COMMAND_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_COMMAND_STATUS);
+ HCI_EV_DECODE(HCI_EV_HARDWARE_ERROR);
+ HCI_EV_DECODE(HCI_EV_FLUSH_OCCURRED);
+ HCI_EV_DECODE(HCI_EV_ROLE_CHANGE);
+ HCI_EV_DECODE(HCI_EV_NUMBER_COMPLETED_PKTS);
+ HCI_EV_DECODE(HCI_EV_MODE_CHANGE);
+ HCI_EV_DECODE(HCI_EV_RETURN_LINK_KEYS);
+ HCI_EV_DECODE(HCI_EV_PIN_CODE_REQ);
+ HCI_EV_DECODE(HCI_EV_LINK_KEY_REQ);
+ HCI_EV_DECODE(HCI_EV_LINK_KEY_NOTIFICATION);
+ HCI_EV_DECODE(HCI_EV_LOOPBACK_COMMAND);
+ HCI_EV_DECODE(HCI_EV_DATA_BUFFER_OVERFLOW);
+ HCI_EV_DECODE(HCI_EV_MAX_SLOTS_CHANGE);
+ HCI_EV_DECODE(HCI_EV_READ_CLOCK_OFFSET_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_CONN_PACKET_TYPE_CHANGED);
+ HCI_EV_DECODE(HCI_EV_QOS_VIOLATION);
+ HCI_EV_DECODE(HCI_EV_PAGE_SCAN_MODE_CHANGE);
+ HCI_EV_DECODE(HCI_EV_PAGE_SCAN_REP_MODE_CHANGE);
+ HCI_EV_DECODE(HCI_EV_FLOW_SPEC_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_INQUIRY_RESULT_WITH_RSSI);
+ HCI_EV_DECODE(HCI_EV_READ_REM_EXT_FEATURES_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_FIXED_ADDRESS);
+ HCI_EV_DECODE(HCI_EV_ALIAS_ADDRESS);
+ HCI_EV_DECODE(HCI_EV_GENERATE_ALIAS_REQ);
+ HCI_EV_DECODE(HCI_EV_ACTIVE_ADDRESS);
+ HCI_EV_DECODE(HCI_EV_ALLOW_PRIVATE_PAIRING);
+ HCI_EV_DECODE(HCI_EV_ALIAS_ADDRESS_REQ);
+ HCI_EV_DECODE(HCI_EV_ALIAS_NOT_RECOGNISED);
+ HCI_EV_DECODE(HCI_EV_FIXED_ADDRESS_ATTEMPT);
+ HCI_EV_DECODE(HCI_EV_SYNC_CONN_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_SYNC_CONN_CHANGED);
+ HCI_EV_DECODE(HCI_EV_SNIFF_SUB_RATE);
+ HCI_EV_DECODE(HCI_EV_EXTENDED_INQUIRY_RESULT);
+ HCI_EV_DECODE(HCI_EV_ENCRYPTION_KEY_REFRESH_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_IO_CAPABILITY_REQUEST);
+ HCI_EV_DECODE(HCI_EV_IO_CAPABILITY_RESPONSE);
+ HCI_EV_DECODE(HCI_EV_USER_CONFIRMATION_REQUEST);
+ HCI_EV_DECODE(HCI_EV_USER_PASSKEY_REQUEST);
+ HCI_EV_DECODE(HCI_EV_REMOTE_OOB_DATA_REQUEST);
+ HCI_EV_DECODE(HCI_EV_SIMPLE_PAIRING_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_LST_CHANGE);
+ HCI_EV_DECODE(HCI_EV_ENHANCED_FLUSH_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_USER_PASSKEY_NOTIFICATION);
+ HCI_EV_DECODE(HCI_EV_KEYPRESS_NOTIFICATION);
+ HCI_EV_DECODE(HCI_EV_REM_HOST_SUPPORTED_FEATURES);
+ HCI_EV_DECODE(HCI_EV_TRIGGERED_CLOCK_CAPTURE);
+ HCI_EV_DECODE(HCI_EV_SYNCHRONIZATION_TRAIN_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_SYNCHRONIZATION_TRAIN_RECEIVED);
+ HCI_EV_DECODE(HCI_EV_CSB_RECEIVE);
+ HCI_EV_DECODE(HCI_EV_CSB_TIMEOUT);
+ HCI_EV_DECODE(HCI_EV_TRUNCATED_PAGE_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_SLAVE_PAGE_RESPONSE_TIMEOUT);
+ HCI_EV_DECODE(HCI_EV_CSB_CHANNEL_MAP_CHANGE);
+ HCI_EV_DECODE(HCI_EV_INQUIRY_RESPONSE_NOTIFICATION);
+ HCI_EV_DECODE(HCI_EV_AUTHENTICATED_PAYLOAD_TIMEOUT_EXPIRED);
+ case HCI_EV_ULP:
+ {
+ switch (hci_ulp_sub_code) {
+ HCI_EV_DECODE(HCI_EV_ULP_CONNECTION_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_ADVERTISING_REPORT);
+ HCI_EV_DECODE(HCI_EV_ULP_CONNECTION_UPDATE_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_READ_REMOTE_USED_FEATURES_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_LONG_TERM_KEY_REQUEST);
+ HCI_EV_DECODE(HCI_EV_ULP_REMOTE_CONNECTION_PARAMETER_REQUEST);
+ HCI_EV_DECODE(HCI_EV_ULP_DATA_LENGTH_CHANGE);
+ HCI_EV_DECODE(HCI_EV_ULP_READ_LOCAL_P256_PUB_KEY_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_GENERATE_DHKEY_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_ENHANCED_CONNECTION_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_DIRECT_ADVERTISING_REPORT);
+ HCI_EV_DECODE(HCI_EV_ULP_PHY_UPDATE_COMPLETE);
+ HCI_EV_DECODE(HCI_EV_ULP_USED_CHANNEL_SELECTION);
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
+static ssize_t scsc_iq_report_evt_read(char __user *buf, size_t len)
+{
+ ssize_t consumed = 0;
+ ssize_t ret = 0;
+
+ /* Calculate the amount of data that can be transferred */
+ len = min(h4_iq_report_evt_len - bt_service.read_offset, len);
+
+ SCSC_TAG_DEBUG(BT_H4, "SCSC_IQ_REPORT_EVT_READ: td(h4_iq_len=%u offset=%u)\n",
+ h4_iq_report_evt_len,
+ bt_service.read_offset);
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(buf, &h4_iq_report_evt[bt_service.read_offset], len);
+ if (ret == 0) {
+ /* All good - Update our consumed information */
+ bt_service.read_offset += len;
+ consumed = len;
+
+ SCSC_TAG_DEBUG(BT_H4, "SCSC_IQ_REPORT_EVT_READ: (offset=%u consumed: %u)\n",
+ bt_service.read_offset,
+ consumed);
+
+ /* Have all data been copied to the userspace buffer */
+ if (bt_service.read_offset == h4_iq_report_evt_len) {
+ /* All good - read operation is completed */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = BT_READ_OP_NONE;
+ }
+ } else {
+ SCSC_TAG_ERR(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+
+ return ret == 0 ? consumed : ret;
+}
+
+static ssize_t scsc_hci_evt_read(char __user *buf, size_t len)
+{
+ struct BSMHCP_TD_HCI_EVT *td = &bt_service.bsmhcp_protocol->hci_evt_transfer_ring[bt_service.read_index];
+ u8 h4_hci_event_header = HCI_EVENT_PKT;
+ ssize_t consumed = 0;
+ ssize_t ret = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "td (length=%u, hci_connection_handle=0x%03x, event_type=%u), len=%zu, read_offset=%zu\n",
+ td->length, td->hci_connection_handle, td->event_type, len, bt_service.read_offset);
+
+ /* Is this the start of the copy operation */
+ if (0 == bt_service.read_offset) {
+ SCSC_TAG_DEBUG(BT_RX, "HCI Event [type=%s (0x%02x), length=%u]\n",
+ scsc_hci_evt_decode_event_code(td->data[0], td->data[2]), td->data[0], td->data[1]);
+
+ if (td->data[1] + HCI_EVENT_HEADER_LENGTH != td->length) {
+ SCSC_TAG_ERR(BT_H4, "Firmware sent invalid HCI event\n");
+ atomic_inc(&bt_service.error_count);
+ ret = -EFAULT;
+ }
+
+ /* Store the H4 header in the user buffer */
+ ret = copy_to_user(buf, &h4_hci_event_header, sizeof(h4_hci_event_header));
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ consumed = sizeof(h4_hci_event_header);
+ bt_service.read_offset = sizeof(h4_hci_event_header);
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ /* Can more data be put into the userspace buffer */
+ if (0 == ret && (len - consumed)) {
+ /* Calculate the amount of data that can be transferred */
+ len = min((td->length - (bt_service.read_offset - sizeof(h4_hci_event_header))), (len - consumed));
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(&buf[consumed], &td->data[bt_service.read_offset - sizeof(h4_hci_event_header)], len);
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ bt_service.read_offset += len;
+ consumed += len;
+
+ /* Have all data been copied to the userspace buffer */
+ if (bt_service.read_offset == (sizeof(h4_hci_event_header) + td->length)) {
+ /* All good - read operation is completed */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = BT_READ_OP_NONE;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+static ssize_t scsc_hci_evt_error_read(char __user *buf, size_t len)
+{
+ ssize_t ret;
+ ssize_t consumed = 0;
+
+ /* Calculate the amount of data that can be transferred */
+ len = min(sizeof(h4_hci_event_hardware_error) - bt_service.read_offset, len);
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(buf, &h4_hci_event_hardware_error[bt_service.read_offset], len);
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ bt_service.read_offset += len;
+ consumed = len;
+
+ /* Have all data been copied to the userspace buffer */
+ if (bt_service.read_offset == sizeof(h4_hci_event_hardware_error)) {
+ /* All good - read operation is completed */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = BT_READ_OP_NONE;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+static ssize_t scsc_acl_read(char __user *buf, size_t len)
+{
+ struct BSMHCP_TD_ACL_RX *td = &bt_service.bsmhcp_protocol->acl_rx_transfer_ring[bt_service.read_index];
+ ssize_t consumed = 0;
+ size_t copy_len = 0;
+ ssize_t ret = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "td (length=%u, hci_connection_handle=0x%03x, packet_boundary=%u, broadcast_flag=%u), len=%zu, read_offset=%zu\n",
+ td->length, td->hci_connection_handle, td->packet_boundary, td->broadcast_flag, len, bt_service.read_offset);
+
+ /* Has the header been copied to userspace */
+ if (bt_service.read_offset < sizeof(h4_acl_header)) {
+ /* Calculate the amount of data that can be transferred */
+ copy_len = min(sizeof(h4_acl_header) - bt_service.read_offset, len);
+
+ /* Fully generate the H4 header + ACL data header regardless of the available amount of user memory */
+ h4_acl_header[0] = HCI_ACLDATA_PKT;
+ h4_acl_header[1] = td->hci_connection_handle & 0x00ff;
+ h4_acl_header[2] = ((td->hci_connection_handle & 0x0f00) >> 8) | ((td->packet_boundary & 0x03) << 4) | ((td->broadcast_flag & 0x03) << 6);
+ h4_acl_header[3] = td->length & 0x00ff;
+ h4_acl_header[4] = (td->length & 0xff00) >> 8;
+
+ /* Copy the H4 header + ACL data header to the userspace buffer */
+ ret = copy_to_user(buf, &h4_acl_header[bt_service.read_offset], copy_len);
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ consumed = copy_len;
+ bt_service.read_offset += copy_len;
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ /* Can more data be put into the userspace buffer */
+ if (0 == ret && bt_service.read_offset >= sizeof(h4_acl_header) && (len - consumed)) {
+ /* Calculate the amount of data that can be transferred */
+ copy_len = min((td->length - (bt_service.read_offset - sizeof(h4_acl_header))), (len - consumed));
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(&buf[consumed], &td->data[bt_service.read_offset - sizeof(h4_acl_header)], copy_len);
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ bt_service.read_offset += copy_len;
+ consumed += copy_len;
+
+ /* Have all data been copied to the userspace buffer */
+ if (bt_service.read_offset == (sizeof(h4_acl_header) + td->length)) {
+ /* All good - read operation is completed */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = BT_READ_OP_NONE;
+
+ /* Only supported on start packet*/
+ if (td->packet_boundary == HCI_ACL_PACKET_BOUNDARY_START_FLUSH)
+ /* The "false" argument is to tell the detection that this is RX */
+ scsc_avdtp_detect_rxtx(td->hci_connection_handle, td->data, td->length, false);
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "read_offset=%zu, consumed=%zu, ret=%zd, len=%zu, copy_len=%zu\n",
+ bt_service.read_offset, consumed, ret, len, copy_len);
+
+ return 0 == ret ? consumed : ret;
+}
+
+static ssize_t scsc_acl_credit(char __user *buf, size_t len)
+{
+#ifdef CONFIG_SCSC_PRINTK
+ struct BSMHCP_TD_ACL_TX_FREE *td = &bt_service.bsmhcp_protocol->acl_tx_free_transfer_ring[bt_service.read_index];
+#endif
+ ssize_t consumed = 0;
+ ssize_t ret = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "td (hci_connection_handle=0x%03x, buffer_index=%u), len=%zu, read_offset=%zu\n",
+ td->hci_connection_handle & SCSC_BT_ACL_HANDLE_MASK,
+ td->buffer_index, len, bt_service.read_offset);
+
+ /* Calculate the amount of data that can be transferred */
+ len = min(h4_hci_event_ncp_header_len - bt_service.read_offset, len);
+
+ /* Copy the data to the user buffer */
+ ret = copy_to_user(buf, &h4_hci_event_ncp_header[bt_service.read_offset], len);
+ if (0 == ret) {
+ /* All good - Update our consumed information */
+ bt_service.read_offset += len;
+ consumed = len;
+
+ /* Have all data been copied to the userspace buffer */
+ if (bt_service.read_offset == h4_hci_event_ncp_header_len) {
+ /* All good - read operation is completed */
+ bt_service.read_offset = 0;
+ bt_service.read_operation = BT_READ_OP_NONE;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_to_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+static ssize_t scsc_bt_shm_h4_read_continue(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+
+ /* Is a HCI event read operation ongoing */
+ if (BT_READ_OP_HCI_EVT == bt_service.read_operation) {
+ SCSC_TAG_DEBUG(BT_H4, "BT_READ_OP_HCI_EVT\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_hci_evt_read(buf, len);
+ if (BT_READ_OP_NONE == bt_service.read_operation)
+ /* All done - increase the read pointer and continue
+ * unless this was an out-of-order read for the queue
+ * sync helper */
+ if (bt_service.read_index == bt_service.mailbox_hci_evt_read)
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_hci_evt_read, BSMHCP_TRANSFER_RING_EVT_SIZE);
+ /* Is a ACL data read operation ongoing */
+ } else if (BT_READ_OP_ACL_DATA == bt_service.read_operation) {
+ SCSC_TAG_DEBUG(BT_H4, "BT_READ_OP_ACL_DATA\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_acl_read(buf, len);
+ if (BT_READ_OP_NONE == bt_service.read_operation)
+ /* All done - increase the read pointer and continue */
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_acl_rx_read, BSMHCP_TRANSFER_RING_ACL_SIZE);
+ /* Is a ACL credit update operation ongoing */
+ } else if (BT_READ_OP_ACL_CREDIT == bt_service.read_operation) {
+ SCSC_TAG_DEBUG(BT_H4, "BT_READ_OP_ACL_CREDIT\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_acl_credit(buf, len);
+ } else if (bt_service.read_operation == BT_READ_OP_IQ_REPORT) {
+ SCSC_TAG_DEBUG(BT_H4, "BT_READ_OP_IQ_REPORT\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_iq_report_evt_read(buf, len);
+ if (bt_service.read_operation == BT_READ_OP_NONE)
+ /* All done - increase the read pointer and continue */
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_iq_report_read, BSMHCP_TRANSFER_RING_IQ_REPORT_SIZE);
+ } else if (BT_READ_OP_HCI_EVT_ERROR == bt_service.read_operation) {
+ SCSC_TAG_ERR(BT_H4, "BT_READ_OP_HCI_EVT_ERROR\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_hci_evt_error_read(buf, len);
+ if (BT_READ_OP_NONE == bt_service.read_operation)
+ /* All done - set the stop condition */
+ bt_service.read_operation = BT_READ_OP_STOP;
+ } else if (BT_READ_OP_STOP == bt_service.read_operation)
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t scsc_bt_shm_h4_read_iq_report_evt(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ if (bt_service.read_operation == BT_READ_OP_NONE &&
+ bt_service.mailbox_iq_report_read != bt_service.mailbox_iq_report_write) {
+ struct BSMHCP_TD_IQ_REPORTING_EVT *td =
+ &bt_service.bsmhcp_protocol->iq_reporting_transfer_ring[bt_service.mailbox_iq_report_read];
+ u32 index = 0;
+ u32 j = 0;
+ u32 i;
+
+ memset(h4_iq_report_evt, 0, sizeof(h4_iq_report_evt));
+ h4_iq_report_evt_len = 0;
+
+ h4_iq_report_evt[index++] = HCI_EVENT_PKT;
+ h4_iq_report_evt[index++] = 0x3E;
+ index++; /* Leaving room for total length of params */
+ h4_iq_report_evt[index++] = td->subevent_code;
+
+ if (td->subevent_code == HCI_LE_CONNECTIONLESS_IQ_REPORT_EVENT_SUB_CODE) {
+ /* LE Connectionless IQ Report Event*/
+ h4_iq_report_evt[index++] = td->sync_handle & 0xFF;
+ h4_iq_report_evt[index++] = (td->sync_handle >> 8) & 0xFF;
+ } else if (td->subevent_code == HCI_LE_CONNECTION_IQ_REPORT_EVENT_SUB_CODE) {
+ /* LE connection IQ Report Event */
+ h4_iq_report_evt[index++] = td->connection_handle & 0xFF;
+ h4_iq_report_evt[index++] = (td->connection_handle >> 8) & 0xFF;
+ h4_iq_report_evt[index++] = td->rx_phy;
+
+ }
+ h4_iq_report_evt[index++] = td->channel_index;
+ h4_iq_report_evt[index++] = td->rssi & 0xFF;
+ h4_iq_report_evt[index++] = (td->rssi >> 8) & 0xFF;
+ h4_iq_report_evt[index++] = td->rssi_antenna_id;
+ h4_iq_report_evt[index++] = td->cte_type;
+ h4_iq_report_evt[index++] = td->slot_durations;
+ h4_iq_report_evt[index++] = td->packet_status;
+ h4_iq_report_evt[index++] = td->event_count & 0xFF;
+ h4_iq_report_evt[index++] = (td->event_count >> 8) & 0xFF;
+ h4_iq_report_evt[index++] = td->sample_count;
+
+ /* Total length of hci event */
+ h4_iq_report_evt_len = index + (2 * td->sample_count);
+
+ /* Total length of hci event parameters */
+ h4_iq_report_evt[2] = h4_iq_report_evt_len - 3;
+
+ for (i = 0; i < td->sample_count; i++) {
+ h4_iq_report_evt[index + i] = td->data[j++];
+ h4_iq_report_evt[(index + td->sample_count) + i] = td->data[j++];
+ }
+
+ bt_service.read_operation = BT_READ_OP_IQ_REPORT;
+ bt_service.read_index = bt_service.mailbox_iq_report_read;
+
+ ret = scsc_iq_report_evt_read(&buf[consumed], len - consumed);
+ if (ret > 0) {
+ /* All good - Update our consumed information */
+ consumed += ret;
+ ret = 0;
+
+ /**
+ * Update the index if all the data could be copied to the userspace
+ * buffer otherwise stop processing the HCI events
+ */
+ if (bt_service.read_operation == BT_READ_OP_NONE)
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_iq_report_read,
+ BSMHCP_TRANSFER_RING_IQ_REPORT_SIZE);
+ }
+ }
+
+ return ret == 0 ? consumed : ret;
+}
+
+static ssize_t scsc_bt_shm_h4_read_hci_evt(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ while (BT_READ_OP_NONE == bt_service.read_operation && 0 == ret && !bt_service.hci_event_paused && bt_service.mailbox_hci_evt_read != bt_service.mailbox_hci_evt_write) {
+ struct BSMHCP_TD_HCI_EVT *td = &bt_service.bsmhcp_protocol->hci_evt_transfer_ring[bt_service.mailbox_hci_evt_read];
+
+ /* This event has already been processed - skip it */
+ if (bt_service.processed[bt_service.mailbox_hci_evt_read]) {
+ bt_service.processed[bt_service.mailbox_hci_evt_read] = false;
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_hci_evt_read, BSMHCP_TRANSFER_RING_EVT_SIZE);
+ continue;
+ }
+
+ /* A connection event has been detected by the firmware */
+ if (td->event_type & BSMHCP_EVENT_TYPE_CONNECTED) {
+ /* Sanity check of the HCI connection handle */
+ if (td->hci_connection_handle >= SCSC_BT_CONNECTION_INFO_MAX) {
+ SCSC_TAG_ERR(BT_H4, "connection handle is beyond max (hci_connection_handle=0x%03x)\n",
+ td->hci_connection_handle);
+ atomic_inc(&bt_service.error_count);
+ break;
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "connected (hci_connection_handle=0x%03x, state=%u)\n",
+ td->hci_connection_handle, bt_service.connection_handle_list[td->hci_connection_handle].state);
+
+ /* Update the connection table to mark it as active */
+ bt_service.connection_handle_list[td->hci_connection_handle].state = CONNECTION_ACTIVE;
+ bt_service.connection_handle_list[td->hci_connection_handle].length = 0;
+
+ /* ACL data processing can now continue */
+ bt_service.acldata_paused = false;
+
+ /* A disconnection event has been detected by the firmware */
+ } else if (td->event_type & BSMHCP_EVENT_TYPE_DISCONNECTED) {
+ SCSC_TAG_DEBUG(BT_H4, "disconnected (hci_connection_handle=0x%03x, state=%u)\n",
+ td->hci_connection_handle, bt_service.connection_handle_list[td->hci_connection_handle].state);
+
+ /* If this ACL connection had an avdtp stream, mark it gone and interrupt the bg */
+ if (scsc_avdtp_detect_reset_connection_handle(td->hci_connection_handle))
+ mmiowb();
+
+ /* If the connection is marked as active the ACL disconnect packet hasn't yet arrived */
+ if (CONNECTION_ACTIVE == bt_service.connection_handle_list[td->hci_connection_handle].state) {
+ /* Pause the HCI event procssing until the ACL disconnect packet arrives */
+ bt_service.hci_event_paused = true;
+ break;
+ }
+
+ /* Firmware does not have more ACL data - Mark the connection as inactive */
+ bt_service.connection_handle_list[td->hci_connection_handle].state = CONNECTION_NONE;
+ }
+
+ /* Start a HCI event copy to userspace */
+ bt_service.read_operation = BT_READ_OP_HCI_EVT;
+ bt_service.read_index = bt_service.mailbox_hci_evt_read;
+ ret = scsc_hci_evt_read(&buf[consumed], len - consumed);
+ if (ret > 0) {
+ /* All good - Update our consumed information */
+ consumed += ret;
+ ret = 0;
+
+ /* Update the index if all the data could be copied to the userspace buffer otherwise stop processing the HCI events */
+ if (BT_READ_OP_NONE == bt_service.read_operation)
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_hci_evt_read, BSMHCP_TRANSFER_RING_EVT_SIZE);
+ else
+ break;
+ }
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+/**
+ * Start the acl data to userspace copy
+ *
+ * Acl processing should be stopped if either unable to read a complete packet
+ * or a complete packet is read and BlueZ is enabled
+ *
+ * @param[out] ret result of read operations written to here
+ * @param[in,out] consumed read bytes added to this
+ *
+ * @return true if ACL data processing should stop
+ */
+static bool scsc_bt_shm_h4_acl_start_copy(char __user *buf,
+ size_t len,
+ ssize_t *ret,
+ ssize_t *consumed)
+{
+ bt_service.read_operation = BT_READ_OP_ACL_DATA;
+ bt_service.read_index = bt_service.mailbox_acl_rx_read;
+ *ret = scsc_acl_read(&buf[*consumed], len - *consumed);
+ if (*ret <= 0)
+ return *ret < 0; /* Break the loop for errors */
+
+ /* Update our consumed information */
+ *consumed += *ret;
+ *ret = 0;
+
+ /* Stop processing if all the data could not be copied to userspace */
+ if (bt_service.read_operation != BT_READ_OP_NONE)
+ return true;
+
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_acl_rx_read, BSMHCP_TRANSFER_RING_ACL_SIZE);
+
+ return false;
+}
+
+static ssize_t scsc_bt_shm_h4_read_acl_data(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ while (bt_service.read_operation == BT_READ_OP_NONE &&
+ !bt_service.acldata_paused &&
+ bt_service.mailbox_acl_rx_read != bt_service.mailbox_acl_rx_write) {
+ struct BSMHCP_TD_ACL_RX *td = &bt_service.bsmhcp_protocol->acl_rx_transfer_ring[bt_service.mailbox_acl_rx_read];
+
+ /* Bypass packet inspection and connection handling for data dump */
+ if (SCSC_BT_ACL_RAW == (td->hci_connection_handle & SCSC_BT_ACL_RAW_MASK)) {
+ if (scsc_bt_shm_h4_acl_start_copy(buf, len, &ret, &consumed))
+ break;
+ }
+
+ /* Sanity check of the HCI connection handle */
+ if (td->hci_connection_handle >= SCSC_BT_CONNECTION_INFO_MAX) {
+ SCSC_TAG_ERR(BT_H4, "connection handle is beyond max (hci_connection_handle=0x%03x)\n",
+ td->hci_connection_handle);
+ atomic_inc(&bt_service.error_count);
+ break;
+ }
+
+ /* Only process ACL data if the connection is marked active aka a HCI connection complete event has arrived */
+ if (CONNECTION_ACTIVE == bt_service.connection_handle_list[td->hci_connection_handle].state) {
+ /* Is this the final packet for the indicated ACL connection */
+ if (td->disconnected) {
+ SCSC_TAG_DEBUG(BT_H4, "ACL disconnected (hci_connection_handle=0x%03x, state=%u)\n",
+ td->hci_connection_handle, bt_service.connection_handle_list[td->hci_connection_handle].state);
+
+ /* Update the connection table to mark it as disconnected */
+ bt_service.connection_handle_list[td->hci_connection_handle].state = CONNECTION_DISCONNECTED;
+
+ /* Clear the HCI event processing to allow for the HCI disconnect event to be transferred to userspace */
+ bt_service.hci_event_paused = false;
+
+ /* Update the read pointer */
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_acl_rx_read, BSMHCP_TRANSFER_RING_ACL_SIZE);
+ } else {
+ if (scsc_bt_shm_h4_acl_start_copy(buf, len, &ret, &consumed))
+ break;
+ }
+ /* If the connection state is inactive the HCI connection complete information hasn't yet arrived. Stop processing ACL data */
+ } else if (CONNECTION_NONE == bt_service.connection_handle_list[td->hci_connection_handle].state) {
+ SCSC_TAG_DEBUG(BT_H4, "ACL empty (hci_connection_handle=0x%03x, state=%u)\n",
+ td->hci_connection_handle, bt_service.connection_handle_list[td->hci_connection_handle].state);
+ bt_service.acldata_paused = true;
+ /* If the connection state is disconnection the firmware sent ACL after the ACL disconnect packet which is an FW error */
+ } else {
+ SCSC_TAG_ERR(BT_H4, "ACL data received after disconnected indication\n");
+ atomic_inc(&bt_service.error_count);
+ break;
+ }
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+static ssize_t scsc_bt_shm_h4_read_acl_credit(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ ssize_t consumed = 0;
+
+ if (BT_READ_OP_NONE == bt_service.read_operation && 0 == ret &&
+ bt_service.mailbox_acl_free_read != bt_service.mailbox_acl_free_write) {
+ u32 entries = 0;
+ u32 index;
+
+ memset(h4_hci_event_ncp_header, 0, sizeof(h4_hci_event_ncp_header));
+
+ while (bt_service.mailbox_acl_free_read != bt_service.mailbox_acl_free_write) {
+ struct BSMHCP_TD_ACL_TX_FREE *td =
+ &bt_service.bsmhcp_protocol->acl_tx_free_transfer_ring[bt_service.mailbox_acl_free_read];
+ uint16_t sanitized_conn_handle = td->hci_connection_handle & SCSC_BT_ACL_HANDLE_MASK;
+
+ if (bt_service.connection_handle_list[sanitized_conn_handle].state == CONNECTION_ACTIVE) {
+ for (index = 0; index < BSMHCP_TRANSFER_RING_ACL_COUNT; index++) {
+ if (0 == h4_hci_credit_entries[index].hci_connection_handle) {
+ h4_hci_credit_entries[index].hci_connection_handle =
+ td->hci_connection_handle;
+ h4_hci_credit_entries[index].credits = 1;
+ entries++;
+ break;
+ } else if ((h4_hci_credit_entries[index].hci_connection_handle &
+ SCSC_BT_ACL_HANDLE_MASK) == sanitized_conn_handle) {
+ h4_hci_credit_entries[index].hci_connection_handle =
+ td->hci_connection_handle;
+ h4_hci_credit_entries[index].credits++;
+ break;
+ }
+ }
+ } else
+ SCSC_TAG_WARNING(BT_H4,
+ "No active connection ((hci_connection_handle=0x%03x)\n",
+ sanitized_conn_handle);
+
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_acl_free_read, BSMHCP_TRANSFER_RING_ACL_SIZE);
+ }
+
+ if (entries) {
+ /* Fill the number of completed packets data into the temp buffer */
+ h4_hci_event_ncp_header[0] = HCI_EVENT_PKT;
+ h4_hci_event_ncp_header[1] = HCI_EVENT_NUMBER_OF_COMPLETED_PACKETS_EVENT;
+ h4_hci_event_ncp_header[2] = 1 + (4 * entries); /* Parameter length */
+ h4_hci_event_ncp_header[3] = entries; /* Number_of_Handles */
+ h4_hci_event_ncp_header_len = 4 + (4 * entries);
+
+ /* Start a ACL credit copy to userspace */
+ bt_service.read_operation = BT_READ_OP_ACL_CREDIT;
+ bt_service.read_index = bt_service.mailbox_acl_free_read;
+ ret = scsc_acl_credit(&buf[consumed], len - consumed);
+ if (ret > 0) {
+ /* Update our consumed information */
+ consumed += ret;
+ ret = 0;
+ }
+ }
+ }
+
+ return 0 == ret ? consumed : ret;
+}
+
+ssize_t scsc_bt_shm_h4_queue_sync_helper(char __user *buf, size_t len)
+{
+ ssize_t ret = 0;
+ bool found = false;
+ u32 mailbox_hci_evt_read = bt_service.mailbox_hci_evt_read;
+
+ /* If both the HCI event transfer ring and ACL data transfer ring has been
+ * paused the entire HCI event transfer ring is scanned for the presence
+ * of the connected indication. Once present this is transferred to the host
+ * stack and marked as processed. This will unlock the hci event processing */
+ while (bt_service.hci_event_paused && bt_service.acldata_paused) {
+ struct BSMHCP_TD_ACL_RX *acl_td = &bt_service.bsmhcp_protocol->acl_rx_transfer_ring[bt_service.mailbox_acl_rx_read];
+
+ while (mailbox_hci_evt_read != bt_service.mailbox_hci_evt_write) {
+ struct BSMHCP_TD_HCI_EVT *td = &bt_service.bsmhcp_protocol->hci_evt_transfer_ring[mailbox_hci_evt_read];
+
+ if (td->event_type & BSMHCP_EVENT_TYPE_CONNECTED && acl_td->hci_connection_handle == td->hci_connection_handle) {
+ /* Update the connection table to mark it as active */
+ bt_service.connection_handle_list[td->hci_connection_handle].state = CONNECTION_ACTIVE;
+ bt_service.connection_handle_list[td->hci_connection_handle].length = 0;
+
+ /* ACL data processing can now continue */
+ bt_service.acldata_paused = false;
+
+ /* Mark the event as processed */
+ bt_service.processed[mailbox_hci_evt_read] = true;
+
+ /* Indicate the event have been found */
+ found = true;
+
+ /* Start a HCI event copy to userspace */
+ bt_service.read_operation = BT_READ_OP_HCI_EVT;
+ bt_service.read_index = mailbox_hci_evt_read;
+ ret = scsc_hci_evt_read(buf, len);
+ break;
+ }
+
+ BSMHCP_INCREASE_INDEX(mailbox_hci_evt_read, BSMHCP_TRANSFER_RING_EVT_SIZE);
+ }
+
+ if (!found) {
+ ret = wait_event_interruptible_timeout(bt_service.read_wait,
+ ((mailbox_hci_evt_read != bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write ||
+ 0 != atomic_read(&bt_service.error_count) ||
+ bt_service.bsmhcp_protocol->header.panic_deathbed_confession)), HZ);
+ if (0 == ret) {
+ SCSC_TAG_ERR(BT_H4, "firmware didn't send the connected event within the given timeframe\n");
+ atomic_inc(&bt_service.error_count);
+ break;
+ } else if (1 != ret) {
+ SCSC_TAG_INFO(BT_H4, "user interrupt\n");
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+ssize_t scsc_bt_shm_h4_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
+{
+ ssize_t consumed = 0;
+ ssize_t ret = 0;
+ ssize_t res;
+ bool gen_bg_int = false;
+ bool gen_fg_int = false;
+
+ if (len == 0)
+ return 0;
+
+ /* Only 1 reader is allowed */
+ if (1 != atomic_inc_return(&bt_service.h4_readers)) {
+ atomic_dec(&bt_service.h4_readers);
+ return -EIO;
+ }
+
+ /* Update the cached variables with the non-cached variables */
+ bt_service.mailbox_hci_evt_write = bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write;
+ bt_service.mailbox_acl_rx_write = bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write;
+ bt_service.mailbox_acl_free_write = bt_service.bsmhcp_protocol->header.mailbox_acl_free_write;
+ bt_service.mailbox_iq_report_write = bt_service.bsmhcp_protocol->header.mailbox_iq_report_write;
+
+ /* Only generate the HCI hardware error event if any pending operation has been completed
+ * and the event hasn't already neen sent. This check assume the main while loop will exit
+ * on a completed operation in the next section */
+ if (0 != atomic_read(&bt_service.error_count) && BT_READ_OP_NONE == bt_service.read_operation)
+ bt_service.read_operation = BT_READ_OP_HCI_EVT_ERROR;
+
+ /* put the remaining data from the transfer ring into the available userspace buffer */
+ if (BT_READ_OP_NONE != bt_service.read_operation) {
+ ret = scsc_bt_shm_h4_read_continue(buf, len);
+ /* Update the consumed variable in case a operation was ongoing */
+ if (0 < ret) {
+ consumed = ret;
+ ret = 0;
+ }
+ }
+
+ /* Main loop - Can only be entered when no operation is present on entering this function
+ * or no hardware error has been detected. It loops until data has been placed in the
+ * userspace buffer or an error has been detected */
+ while (0 == atomic_read(&bt_service.error_count) && 0 == consumed) {
+ /* If both the HCI event processing and ACL data processing has been disabled this function
+ * helps exit this condition by scanning the HCI event queue for the connection established
+ * event and return it to userspace */
+ ret = scsc_bt_shm_h4_queue_sync_helper(buf, len);
+ if (ret > 0) {
+ consumed = ret;
+ break;
+ }
+
+ /* Does any of the read/write pairs differs */
+ if ((bt_service.mailbox_hci_evt_read == bt_service.mailbox_hci_evt_write || bt_service.hci_event_paused) &&
+ (bt_service.mailbox_acl_rx_read == bt_service.mailbox_acl_rx_write || bt_service.acldata_paused) &&
+ bt_service.mailbox_acl_free_read == bt_service.mailbox_acl_free_write &&
+ bt_service.mailbox_iq_report_read == bt_service.mailbox_iq_report_write &&
+ 0 == atomic_read(&bt_service.error_count) &&
+ 0 == bt_service.bsmhcp_protocol->header.panic_deathbed_confession) {
+ /* Don't wait if in NONBLOCK mode */
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ /* All read/write pairs are identical - wait for the firmware. The conditional
+ * check is used to verify that a read/write pair has actually changed */
+ ret = wait_event_interruptible(
+ bt_service.read_wait,
+ ((bt_service.mailbox_hci_evt_read !=
+ bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write &&
+ !bt_service.hci_event_paused) ||
+ (bt_service.mailbox_acl_rx_read !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write &&
+ !bt_service.acldata_paused) ||
+ (bt_service.mailbox_acl_free_read !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_write) ||
+ (bt_service.mailbox_iq_report_read !=
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_write) ||
+ atomic_read(&bt_service.error_count) != 0 ||
+ bt_service.bsmhcp_protocol->header.panic_deathbed_confession));
+
+ /* Has an error been detected elsewhere in the driver then just return from this function */
+ if (0 != atomic_read(&bt_service.error_count))
+ break;
+
+ /* Any failures is handled by the userspace application */
+ if (ret)
+ break;
+
+ /* Refresh our write indexes before starting to process the protocol */
+ bt_service.mailbox_hci_evt_write = bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write;
+ bt_service.mailbox_acl_rx_write = bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write;
+ bt_service.mailbox_acl_free_write = bt_service.bsmhcp_protocol->header.mailbox_acl_free_write;
+ bt_service.mailbox_iq_report_write = bt_service.bsmhcp_protocol->header.mailbox_iq_report_write;
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "hci_evt_read=%u, hci_evt_write=%u, acl_rx_read=%u,acl_rx_write=%u\n",
+ bt_service.mailbox_hci_evt_read,
+ bt_service.mailbox_hci_evt_write,
+ bt_service.mailbox_acl_rx_read,
+ bt_service.mailbox_acl_rx_write);
+
+ SCSC_TAG_DEBUG(BT_H4, "acl_free_read=%u, acl_free_write=%u, iq_report_read=%u iq_report_write=%u\n",
+ bt_service.mailbox_acl_free_read,
+ bt_service.mailbox_acl_free_write,
+ bt_service.mailbox_iq_report_read,
+ bt_service.mailbox_iq_report_write);
+
+ SCSC_TAG_DEBUG(BT_H4, "read_operation=%u, hci_event_paused=%u, acldata_paused=%u\n",
+ bt_service.read_operation, bt_service.hci_event_paused,
+ bt_service.acldata_paused);
+
+ while (bt_service.mailbox_acl_free_read_scan != bt_service.mailbox_acl_free_write) {
+ struct BSMHCP_TD_ACL_TX_FREE *td = &bt_service.bsmhcp_protocol->acl_tx_free_transfer_ring[bt_service.mailbox_acl_free_read_scan];
+
+ /* Free the buffer in the allocation table */
+ if (td->buffer_index < BSMHCP_DATA_BUFFER_TX_ACL_SIZE) {
+ bt_service.allocated[td->buffer_index] = 0;
+ bt_service.freed_count++;
+
+ SCSC_TAG_DEBUG(BT_TX, "ACL[CREDIT] (index=%u, buffer=%u, credits=%u)\n",
+ bt_service.mailbox_acl_free_read_scan,
+ td->buffer_index,
+ BSMHCP_DATA_BUFFER_TX_ACL_SIZE - (bt_service.allocated_count - bt_service.freed_count));
+ }
+
+ BSMHCP_INCREASE_INDEX(bt_service.mailbox_acl_free_read_scan, BSMHCP_TRANSFER_RING_ACL_SIZE);
+ }
+
+ /* First: process any pending HCI event that needs to be sent to userspace */
+ res = scsc_bt_shm_h4_read_hci_evt(&buf[consumed], len - consumed);
+ if (res < 0) {
+ ret = res;
+ break;
+ }
+ consumed += res;
+
+ /* Second: process any pending ACL data that needs to be sent to userspace */
+ res = scsc_bt_shm_h4_read_acl_data(&buf[consumed], len - consumed);
+ if (res < 0) {
+ ret = res;
+ break;
+ }
+ consumed += res;
+
+ /* Third: process any pending ACL data that needs to be sent to userspace */
+ res = scsc_bt_shm_h4_read_acl_credit(&buf[consumed], len - consumed);
+ if (res < 0) {
+ ret = res;
+ break;
+ }
+ consumed += res;
+
+ res = scsc_bt_shm_h4_read_iq_report_evt(&buf[consumed], len - consumed);
+ if (res < 0) {
+ ret = res;
+ break;
+ }
+ consumed += res;
+ }
+
+ if (0 == ret && 0 == consumed) {
+ if (0 != atomic_read(&bt_service.error_count) && BT_READ_OP_NONE == bt_service.read_operation)
+ bt_service.read_operation = BT_READ_OP_HCI_EVT_ERROR;
+
+ if (BT_READ_OP_HCI_EVT_ERROR == bt_service.read_operation) {
+ SCSC_TAG_DEBUG(BT_H4, "BT_READ_OP_HCI_EVT_ERROR\n");
+
+ /* Copy data into the userspace buffer */
+ ret = scsc_hci_evt_error_read(buf, len);
+ if (ret > 0) {
+ consumed += ret;
+ ret = 0;
+ }
+
+ if (BT_READ_OP_NONE == bt_service.read_operation)
+ /* All done - set the stop condition */
+ bt_service.read_operation = BT_READ_OP_STOP;
+ }
+ }
+
+ /* If anything was read, generate the appropriate interrupt(s) */
+ if (bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read !=
+ bt_service.mailbox_hci_evt_read)
+ gen_bg_int = true;
+
+ if (bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read !=
+ bt_service.mailbox_acl_rx_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_read !=
+ bt_service.mailbox_acl_free_read)
+ gen_fg_int = true;
+
+ if (bt_service.bsmhcp_protocol->header.mailbox_iq_report_read !=
+ bt_service.mailbox_iq_report_read)
+ gen_fg_int = true;
+
+
+ /* Update the read index for all transfer rings */
+ bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read = bt_service.mailbox_hci_evt_read;
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read = bt_service.mailbox_acl_rx_read;
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_read = bt_service.mailbox_acl_free_read;
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_read = bt_service.mailbox_iq_report_read;
+
+ /* Ensure the data is updating correctly in memory */
+ mmiowb();
+
+ if (gen_bg_int)
+ scsc_service_mifintrbit_bit_set(bt_service.service, bt_service.bsmhcp_protocol->header.ap_to_bg_int_src, SCSC_MIFINTR_TARGET_R4);
+
+ if (gen_fg_int) {
+ if (bt_service.bsmhcp_protocol->header.firmware_features & BSMHCP_FEATURE_M4_INTERRUPTS)
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_m4_int_src, SCSC_MIFINTR_TARGET_M4);
+ else
+ /* Trigger the interrupt in the mailbox */
+ scsc_service_mifintrbit_bit_set(bt_service.service,
+ bt_service.bsmhcp_protocol->header.ap_to_fg_int_src, SCSC_MIFINTR_TARGET_R4);
+ }
+
+ if (BT_READ_OP_STOP != bt_service.read_operation)
+ SCSC_TAG_DEBUG(BT_H4, "hci_evt_read=%u, acl_rx_read=%u, acl_free_read=%u, read_operation=%u, consumed=%zd, ret=%zd\n",
+ bt_service.mailbox_hci_evt_read, bt_service.mailbox_acl_rx_read, bt_service.mailbox_acl_free_read, bt_service.read_operation, consumed, ret);
+
+ /* Decrease the H4 readers counter */
+ atomic_dec(&bt_service.h4_readers);
+
+ return 0 == ret ? consumed : ret;
+}
+
+ssize_t scsc_bt_shm_h4_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
+{
+ size_t length;
+ size_t hci_pkt_len;
+ ssize_t written = 0;
+ ssize_t ret = 0;
+
+ SCSC_TAG_DEBUG(BT_H4, "enter\n");
+
+ UNUSED(file);
+ UNUSED(offset);
+
+ /* Only 1 writer is allowed */
+ if (1 != atomic_inc_return(&bt_service.h4_writers)) {
+ atomic_dec(&bt_service.h4_writers);
+ return -EIO;
+ }
+
+ /* Has en error been detect then just return with an error */
+ if (0 != atomic_read(&bt_service.error_count)) {
+ /* SCSC_TAG_WARNING(BT_H4, "firmware panicked or protocol error (error_count=%u)\n", atomic_read(&bt_service.error_count));*/
+ atomic_dec(&bt_service.h4_writers);
+ return -EIO;
+ }
+
+ while (written != count && 0 == ret) {
+ length = min(count - written, sizeof(h4_write_buffer) - bt_service.h4_write_offset);
+ SCSC_TAG_DEBUG(BT_H4, "count: %zu, length: %zu, h4_write_offset: %zu, written:%zu, size:%zu\n",
+ count, length, bt_service.h4_write_offset, written, sizeof(h4_write_buffer));
+
+ /* Is there room in the temp buffer */
+ if (0 == length) {
+ SCSC_TAG_ERR(BT_H4, "no room in the buffer\n");
+ atomic_inc(&bt_service.error_count);
+ ret = -EIO;
+ break;
+ }
+
+ /* Copy the userspace data to the target buffer */
+ ret = copy_from_user(&h4_write_buffer[bt_service.h4_write_offset], &buf[written], length);
+ if (0 == ret) {
+ /* Is there enough data to include a HCI command header and is the type a HCI_COMMAND_PKT */
+ if ((length + bt_service.h4_write_offset) >= H4DMUX_HEADER_HCI && HCI_COMMAND_PKT == h4_write_buffer[0]) {
+ /* Extract the HCI command packet length */
+ hci_pkt_len = h4_write_buffer[3] + 3;
+
+ /* Is it a complete packet available */
+ if ((hci_pkt_len + 1) <= (length + bt_service.h4_write_offset)) {
+ /* Transfer the packet to the HCI command transfer ring */
+ ret = scsc_bt_shm_h4_hci_cmd_write(&h4_write_buffer[1], hci_pkt_len);
+ if (ret >= 0) {
+ written += ((hci_pkt_len + 1) - bt_service.h4_write_offset);
+ bt_service.h4_write_offset = 0;
+ ret = 0;
+ }
+ } else {
+ /* Still needing data to have the complete packet */
+ SCSC_TAG_WARNING(BT_H4, "missing data (need=%zu, got=%zu)\n", (hci_pkt_len + 1), (length + bt_service.h4_write_offset));
+ written += length;
+ bt_service.h4_write_offset += (u32) length;
+ }
+ /* Is there enough data to include a ACL data header and is the type a HCI_ACLDATA_PKT */
+ } else if ((length + bt_service.h4_write_offset) >= H4DMUX_HEADER_ACL && HCI_ACLDATA_PKT == h4_write_buffer[0]) {
+ /* Extract the ACL data packet length */
+ hci_pkt_len = (h4_write_buffer[3] | (h4_write_buffer[4] << 8));
+
+ /* Sanity check on the packet length */
+ if (hci_pkt_len > BSMHCP_ACL_PACKET_SIZE) {
+ SCSC_TAG_ERR(BT_H4, "ACL packet length is larger than read buffer size specifies (%zu > %u)\n", hci_pkt_len, BSMHCP_ACL_PACKET_SIZE);
+ atomic_inc(&bt_service.error_count);
+ ret = -EIO;
+ break;
+ }
+
+ /* Is it a complete packet available */
+ if ((hci_pkt_len + 5) <= (length + bt_service.h4_write_offset)) {
+ /* Transfer the packet to the ACL data transfer ring */
+ ret = scsc_bt_shm_h4_acl_write(&h4_write_buffer[1], hci_pkt_len + 4);
+ if (ret >= 0) {
+ written += ((hci_pkt_len + 5) - bt_service.h4_write_offset);
+ bt_service.h4_write_offset = 0;
+ ret = 0;
+ }
+ } else {
+ /* Still needing data to have the complete packet */
+ SCSC_TAG_WARNING(BT_H4, "missing data (need=%zu, got=%zu)\n", (hci_pkt_len + 5), (length - bt_service.h4_write_offset));
+ written += length;
+ bt_service.h4_write_offset += (u32) length;
+ }
+ /* Is there less data than a header then just wait for more */
+ } else if (length <= 5) {
+ bt_service.h4_write_offset += length;
+ written += length;
+ /* Header is unknown - unable to proceed */
+ } else {
+ atomic_inc(&bt_service.error_count);
+ ret = -EIO;
+ }
+ } else {
+ SCSC_TAG_WARNING(BT_H4, "copy_from_user returned: %zu\n", ret);
+ ret = -EACCES;
+ }
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "h4_write_offset=%zu, ret=%zu, written=%zu\n",
+ bt_service.h4_write_offset, ret, written);
+
+ /* Decrease the H4 readers counter */
+ atomic_dec(&bt_service.h4_writers);
+
+ return 0 == ret ? written : ret;
+}
+
+unsigned scsc_bt_shm_h4_poll(struct file *file, poll_table *wait)
+{
+ /* Add the wait queue to the polling queue */
+ poll_wait(file, &bt_service.read_wait, wait);
+
+ /* Has en error been detect then just return with an error */
+ if (bt_service.bsmhcp_protocol->header.mailbox_hci_evt_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_hci_evt_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_rx_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_acl_free_read ||
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_write !=
+ bt_service.bsmhcp_protocol->header.mailbox_iq_report_read ||
+ (bt_service.read_operation != BT_READ_OP_NONE &&
+ bt_service.read_operation != BT_READ_OP_STOP) ||
+ ((BT_READ_OP_STOP != bt_service.read_operation) &&
+ (0 != atomic_read(&bt_service.error_count) ||
+ bt_service.bsmhcp_protocol->header.panic_deathbed_confession))) {
+ SCSC_TAG_DEBUG(BT_H4, "queue(s) changed\n");
+ return POLLIN | POLLRDNORM; /* readeable */
+ }
+
+ SCSC_TAG_DEBUG(BT_H4, "no change\n");
+
+ return POLLOUT; /* writeable */
+}
+
+/* Initialise the shared memory interface */
+int scsc_bt_shm_init(void)
+{
+ /* Get kmem pointer to the shared memory ref */
+ bt_service.bsmhcp_protocol = scsc_mx_service_mif_addr_to_ptr(bt_service.service, bt_service.bsmhcp_ref);
+ if (bt_service.bsmhcp_protocol == NULL) {
+ SCSC_TAG_ERR(BT_COMMON, "couldn't map kmem to shm_ref 0x%08x\n", (u32)bt_service.bsmhcp_ref);
+ return -ENOMEM;
+ }
+
+ /* Clear the protocol shared memory area */
+ memset(bt_service.bsmhcp_protocol, 0, sizeof(*bt_service.bsmhcp_protocol));
+ bt_service.bsmhcp_protocol->header.magic_value = BSMHCP_PROTOCOL_MAGICVALUE;
+ bt_service.mailbox_hci_evt_read = 0;
+ bt_service.mailbox_acl_rx_read = 0;
+ bt_service.mailbox_acl_free_read = 0;
+ bt_service.mailbox_acl_free_read_scan = 0;
+ bt_service.mailbox_iq_report_read = 0;
+ bt_service.read_index = 0;
+ bt_service.allocated_count = 0;
+
+ /* Initialise the interrupt handlers */
+ scsc_bt_shm_init_interrupt();
+
+ return 0;
+}
+
+/* Terminate the shared memory interface, stopping its thread.
+ *
+ * Note: The service must be stopped prior to calling this function.
+ * The shared memory can only be released after calling this function.
+ */
+void scsc_bt_shm_exit(void)
+{
+ /* Release IRQs */
+ if (bt_service.bsmhcp_protocol != NULL) {
+ scsc_service_mifintrbit_unregister_tohost(bt_service.service, bt_service.bsmhcp_protocol->header.bg_to_ap_int_src);
+ scsc_service_mifintrbit_unregister_tohost(bt_service.service, bt_service.bsmhcp_protocol->header.fg_to_ap_int_src);
+
+ scsc_service_mifintrbit_free_fromhost(bt_service.service, bt_service.bsmhcp_protocol->header.ap_to_bg_int_src, SCSC_MIFINTR_TARGET_R4);
+ scsc_service_mifintrbit_free_fromhost(bt_service.service, bt_service.bsmhcp_protocol->header.ap_to_fg_int_src, SCSC_MIFINTR_TARGET_R4);
+ scsc_service_mifintrbit_free_fromhost(bt_service.service, bt_service.bsmhcp_protocol->header.ap_to_fg_m4_int_src, SCSC_MIFINTR_TARGET_M4);
+ }
+
+ /* Clear all control structures */
+ bt_service.last_alloc = 0;
+ bt_service.hci_event_paused = false;
+ bt_service.acldata_paused = false;
+ bt_service.bsmhcp_protocol = NULL;
+
+ memset(bt_service.allocated, 0, sizeof(bt_service.allocated));
+ memset(bt_service.connection_handle_list, 0, sizeof(bt_service.connection_handle_list));
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+/* Shared memory interface API */
+
+#ifndef __SCSC_SHM_H__
+#define __SCSC_SHM_H__
+#include <scsc/api/bsmhcp.h>
+#include <scsc/api/asmhcp.h>
+
+/* Bluetooth specific functions */
+int scsc_bt_shm_init(void);
+void scsc_bt_shm_exit(void);
+ssize_t scsc_bt_shm_h4_read(struct file *file,
+ char __user *buf,
+ size_t len,
+ loff_t *offset);
+ssize_t scsc_bt_shm_h4_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *offset);
+unsigned scsc_bt_shm_h4_poll(struct file *file, poll_table *wait);
+
+/* ANT specific functions */
+int scsc_ant_shm_init(void);
+void scsc_ant_shm_exit(void);
+ssize_t scsc_shm_ant_read(struct file *file,
+ char __user *buf,
+ size_t len,
+ loff_t *offset);
+ssize_t scsc_shm_ant_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *offset);
+unsigned int scsc_shm_ant_poll(struct file *file, poll_table *wait);
+#endif
--- /dev/null
+config SCSC_WLAN
+ tristate "Driver for Samsung SCSC Maxwell Wifi"
+ depends on SCSC_CORE
+ depends on CFG80211
+
+if SCSC_WLAN != n
+config SCSC_WLAN_KEY_MGMT_OFFLOAD
+ bool "Enable roam offload for SCSC WLAN"
+ default y
+ ---help---
+ This option enables roam offload for SCSC WLAN.
+
+config SCSC_WLAN_HIP4_PROFILING
+ bool "Enable HIP4 profiling"
+ default y
+ depends on SCSC_WLAN
+ ---help---
+ This option enables HIP4 profiling
+
+config SCSC_WLAN_DEBUG
+ bool "Enable debug output from the SCSC Wifi driver"
+ depends on SCSC_WLAN
+ select SCSC_WLAN_HIP4_PROFILING
+ ---help---
+ This option enables debug support for the SCSC wifi chipset.
+
+config SCSC_WLAN_SG
+ bool "Enable SCSC WLAN Scatter-gather and GSO support"
+ depends on SCSC_WLAN
+ default y
+ ---help---
+ This option enables scatter-gather and GSO feature
+
+config SCSC_WLAN_SKB_TRACKING
+ bool "Enable debug memory tracking of skb data in the driver"
+ ---help---
+ This option enables tracking of all skb data in the driver..
+ This will affect the performance!
+
+config SCSC_WLAN_RX_NAPI
+ bool "Enable use of net device napi rx polling api"
+ default y
+ ---help---
+ This option enables the drivers use of the napi api
+
+config SCSC_WLAN_RX_NAPI_GRO
+ bool "Enable use of net device napi rx GRO"
+ depends on SCSC_WLAN_RX_NAPI
+ default y
+ ---help---
+ This option enables the drivers use of the napi Generic Receive Offload
+
+config SCSC_WLAN_ANDROID
+ bool "Android specific support"
+ default y
+ ---help---
+ Enable support for Android specific aspects of the driver.
+
+config SCSC_WLAN_STA_ONLY
+ bool "Support WLAN STA only (no P2P/AP/IBSS)"
+ default n
+ ---help---
+ Enable only basic STA mode for debugging purposes
+
+config SCSC_WLAN_KIC_OPS
+ bool "Wi-Fi service driver provides OPS for KIC to use for various tasks"
+ depends on SAMSUNG_KIC
+ default y
+ ---help---
+ Provide Wi-Fi service driver OPS, which KIC can use for triggering Wi-Fi related events.
+
+config SCSC_WLAN_GSCAN_ENABLE
+ bool "Enable GSCAN in SCSC wlan driver"
+ default y
+ ---help---
+ This option enables GSCAN in SCSC wlan driver.
+
+config SCSC_WLAN_WES_NCHO
+ bool "Enable NCHO/WES feature"
+ default y
+ ---help---
+ This option enables the NCHO/WES feature
+
+config SCSC_WLAN_MUTEX_DEBUG
+ bool "Enable Mutex debugging mechanism in slsi wlan driver"
+ default y
+ ---help---
+ This option enables the Mutex debug reports in slsi wlan driver.
+
+config CONFIG_SCSC_WLAN_BLOCK_IPV6
+ bool "Block IPv6"
+ default n
+ ---help---
+ This option blocks IPv6 packets.
+
+config CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+ bool "Disable NAT"
+ default n
+ ---help---
+ This option disables NAT.
+
+config SCSC_WLAN_HANG_TEST
+ bool "Test HANG Event"
+ default y
+ ---help---
+ This option facilitates triggering of HANG Event
+ to reset WLAN.
+
+config SCSC_WLAN_NAT_KEEPALIVE_DISABLE
+ bool "Disable NAT KeepAlive"
+ default n
+ ---help---
+ This option disables the NAT KeepAlive
+ Offload Feature.
+
+config SCSC_WLAN_ENHANCED_LOGGING
+ bool "Enable Enhanced Logging"
+ depends on SCSC_WIFILOGGER
+ default y
+ ---help---
+ This option enables the Enhanced Logging
+ Feature.
+
+config SCSC_WLAN_ACM_API
+ bool "Provide TSF read API"
+ default n
+ ---help---
+ This option provides API to read TSF.
+
+endif
+
+config SCSC_WLAN_MAC_ADDRESS_FILENAME
+ string "MAC address filename"
+ default "/efs/wifi/.mac.info"
+ ---help---
+ Select the programmed MAC address file.
+
+config SCSC_WLAN_MAX_INTERFACES
+ int "Max number of virtual interfaces supported"
+ range 1 3
+ default 3
+ ---help---
+ The driver structures are sized to support this
+ number of interfaces.
+
+config SCSC_WLAN_AP_INFO_FILE
+ bool "Create .softap.info file"
+ default n
+ ---help---
+ The option enables the driver to create .softap.info
+ file in user space and fill information in it.
+
+config SCSC_WLAN_WIFI_SHARING
+ bool "Wifi Sharing Support"
+ default n
+ ---help---
+ This option tells if wifi sharing is supported or not.
+
+config SCSC_WLAN_SAE_CONFIG
+ bool "Wpa3 Support"
+ default n
+ ---help---
+ This option tells if wpa3 is supported or not.
+
+config SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ bool "Wifi Mac Randomization Support"
+ default n
+ ---help---
+ This option tells if mac randomization is supported or not.
+
+config SCSC_WLAN_SINGLE_ANTENNA
+ depends on SCSC_WLAN_WIFI_SHARING
+ bool "Single Antenna Supported"
+ default y
+ ---help---
+ This option tells if there is support for single
+ antenna or dual antenna.
+
+config SCSC_AP_INTERFACE_NAME
+ string "AP net device interface name"
+ default "wlan1"
+ ---help---
+ AP net device interface name.
+
+config SCSC_WLAN_ACS_ENABLE
+ bool "ACS Support"
+ default y
+ ---help---
+ This option tells if automatic channel selection is
+ supported or not.
+
+config SCSC_AP_INTERFACE_NAME
+ string "AP net device interface name"
+ default "wlan1"
+ ---help---
+ AP net device interface name.
+
+config SCSC_WLAN_MHS_STATIC_INTERFACE
+ bool "Static AP Interface Support"
+ default n
+ ---help---
+ This option tells if AP interface has been created
+ statically or dynamically.
+ is enabled or not.
+
+config SCSC_WLAN_PRIORITISE_IMP_FRAMES
+ bool "Change priority of important frames such as DNS, MDNS, TCP SYN"
+ default y
+ ---help---
+ The option enables the driver to prioritise important frames
+ (such as DNS, MDNS, TCP SYN) so that they are sent ahead of others.
+
+config SCSC_WIFI_NAN_ENABLE
+ bool "Enable WiFi NAN"
+ default n
+ ---help---
+ This option tells whether WiFi NAN is enabled or not.
+
+config SCSC_WLAN_ENHANCED_PKT_FILTER
+ bool "Enable enhanced packet filtering in suspend"
+ default n
+ ---help---
+ This option tells if UDP packet filtering in suspend is
+ supported or not.
+
+config SCSC_WLAN_SET_NUM_ANTENNAS
+ bool "Enable configuring of number of antennas"
+ default n
+ ---help---
+ This option tells whether configuring of number of antennas is enabled or not.
+
+config SCSC_ENHANCED_PACKET_STATS
+ bool "Enable enhanced packet stats"
+ default n
+ ---help---
+ This option tells whether enhanced packet stats collection
+ is enabled or not.
+
+config SCSC_WLAN_LOW_LATENCY_MODE
+ bool "Support enabling/disabling roaming as per latency mode"
+ default n
+ ---help---
+ This option tells whether support is provided for enabling
+ or disabling roaming as per the latency mode.
+
+config SCSC_WLAN_SET_PREFERRED_ANTENNA
+ bool "Enable the driver to set preferred antenna"
+ default y
+ ---help---
+ This option enables the driver to set the preferred antenna.
+
+config SLSI_WLAN_STA_FWD_BEACON
+ bool "Forwarding beacon support"
+ default n
+ ---help---
+ This option tells if beacon forwarding in STA mode is enabled or not.
+ When it is enabled, Driver deliver beacon info of connected AP to Supplicant.
+
+config SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ bool "Enable enhanced arp detection"
+ default n
+ ---help---
+ This option tells whether enhanced arp detection
+ is enabled or not.
--- /dev/null
+# ----------------------------------------------------------------------------
+# FILE: Makefile
+#
+# PURPOSE:
+# Build instructions for SCSC Wlan driver.
+#
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved
+# ----------------------------------------------------------------------------
+
+# Needed since this subdir is symlinked in the main Kernel tree
+# without this our samsung subdir is NOT cleaned.
+clean-files := *.o *.ko
+
+obj-$(CONFIG_SCSC_WLAN) += scsc_wlan.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += hip4.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mbulk.o
+
+# ----------------------------------------------------------------------------
+# HIP4 sampler
+# ----------------------------------------------------------------------------
+ifeq ($(CONFIG_SCSC_WLAN_HIP4_PROFILING),y)
+scsc_wlan-$(CONFIG_SCSC_WLAN) += hip4_sampler.o
+endif
+
+scsc_wlan-$(CONFIG_SCSC_SMAPPER) += hip4_smapper.o
+
+# Upper driver
+scsc_wlan-$(CONFIG_SCSC_WLAN) += dev.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += cfg80211_ops.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += netif.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += rx.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += tx.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += ba.o
+
+# SAPs
+scsc_wlan-$(CONFIG_SCSC_WLAN) += sap_mlme.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += sap_ma.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += sap_dbg.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += sap_test.o
+
+# ----------------------------------------------------------------------------
+# Common Driver Files
+# ----------------------------------------------------------------------------
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mib.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mib_text_convert.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += debug.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += debug_frame.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += procfs.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mgt.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mlme.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += udi.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += log_clients.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += src_sink.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += fw_test.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += cac.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += scsc_wifi_fcq.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += ioctl.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += wakelock.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += traffic_monitor.o
+
+# ----------------------------------------------------------------------------
+# ACM
+# ----------------------------------------------------------------------------
+ifeq ($(CONFIG_SCSC_WLAN_ACM_API),y)
+scsc_wlan-$(CONFIG_SCSC_WLAN) += acm_api.o
+endif
+
+# ----------------------------------------------------------------------------
+# Building for Hardware
+# ----------------------------------------------------------------------------
+scsc_wlan-$(CONFIG_SCSC_WLAN) += hip.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += cm_if.o
+
+ifeq ($(CONFIG_SCSC_WLAN_KIC_OPS),y)
+scsc_wlan-$(CONFIG_SCSC_WLAN) += kic.o
+endif
+ccflags-$(CONFIG_SCSC_WLAN_KIC_OPS) += -DCONFIG_SCSC_WLAN_KIC_OPS
+
+ifeq ($(CONFIG_SCSC_WLAN_GSCAN_ENABLE),y)
+scsc_wlan-$(CONFIG_SCSC_WLAN) += nl80211_vendor.o
+ccflags-y += -DCONFIG_SCSC_WLAN_GSCAN_ENABLE
+endif
+ifeq ($(CONFIG_SCSC_WIFI_NAN_ENABLE),y)
+scsc_wlan-$(CONFIG_SCSC_WLAN) += nl80211_vendor_nan.o
+scsc_wlan-$(CONFIG_SCSC_WLAN) += mlme_nan.o
+endif
+
+ifeq ($(CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD),y)
+ccflags-y += -DCONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+endif
+
+# ---------------------------------------------------------------------------
+# Include
+# ---------------------------------------------------------------------------
+# TODO: This is only required because scsc_wifilogger is in the wrong driver directory
+ccflags-y += -Idrivers/misc/samsung/scsc
+
+# ----------------------------------------------------------------------------
+# Wlan configuration
+# ----------------------------------------------------------------------------
+ccflags-$(CONFIG_SCSC_WLAN_DEBUG) += -DCONFIG_SCSC_WLAN_DEBUG
+ccflags-$(CONFIG_SCSC_WLAN_SKB_TRACKING) += -DCONFIG_SCSC_WLAN_SKB_TRACKING
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_DATA_PLANE_PROFILE_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_DATA_PLANE_PROFILE_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_TX_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_TX_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_HIP_PSCHED_AMSDU) += -DCONFIG_SCSC_WLAN_HIP_PSCHED_AMSDU
+ccflags-$(CONFIG_SCSC_WLAN_RX_NAPI) += -DCONFIG_SCSC_WLAN_RX_NAPI
+ccflags-$(CONFIG_SCSC_WLAN_RX_NAPI_GRO) += -DCONFIG_SCSC_WLAN_RX_NAPI_GRO
+ccflags-$(CONFIG_SCSC_WLAN_HIP_SUPPORT_SCATTER_GATHER_API) += -DCONFIG_SCSC_WLAN_HIP_SUPPORT_SCATTER_GATHER_API
+ccflags-$(CONFIG_SCSC_WLAN_WES_NCHO) += -DCONFIG_SCSC_WLAN_WES_NCHO
+ccflags-$(CONFIG_SCSC_WLAN_MUTEX_DEBUG) += -DCONFIG_SCSC_WLAN_MUTEX_DEBUG
+ccflags-$(CONFIG_SCSC_WLAN_BLOCK_IPV6) += -DCONFIG_SCSC_WLAN_BLOCK_IPV6
+ccflags-$(CONFIG_SCSC_WLAN_DISABLE_NAT_KA) += -DCONFIG_SCSC_WLAN_DISABLE_NAT_KA
+
+ccflags-y += $(CONFIG_SAMSUNG_MAXWELL_EXTRA)
+ccflags-y += -DCONFIG_SCSC_WLAN_MAX_INTERFACES=$(CONFIG_SCSC_WLAN_MAX_INTERFACES)
+
+# Android specific build options
+ccflags-$(CONFIG_SCSC_WLAN_ANDROID) += -DCONFIG_SCSC_WLAN_ANDROID
+
+## See drivers/misc/samsung/scsc/ Makefile for an explanation of the reasons
+## of the following ifeq/else
+ifeq ($(CONFIG_SCSC_LOGRING), m)
+ccflags-y += -DCONFIG_SCSC_PRINTK
+else
+ccflags-$(CONFIG_SCSC_LOGRING) += -DCONFIG_SCSC_PRINTK
+endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/netdevice.h>
+#include "mlme.h"
+#include "mib.h"
+#include "debug.h"
+/* PSID Hdr(4) + VLDATA Hdr(1) + TSF(8) * + padding(1)*/
+#define TSF_RESP_SIZE (14)
+
+unsigned long long SDA_getTsf(const unsigned char if_id)
+{
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+ struct slsi_dev *sdev = NULL;
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ /* Alloc buffer in stack to avoid possible scheduling when malloc.*/
+ u8 res_payload[TSF_RESP_SIZE] = { 0 };
+ int r = 0;
+ int rx_length = 0;
+ unsigned long long ret = 0;
+
+ read_lock(&dev_base_lock);
+ dev = first_net_device(&init_net);
+ while (dev) {
+ if (if_id == 0 && memcmp(dev->name, "wlan0", 5) == 0)
+ break;
+ else if (if_id == 1 && memcmp(dev->name, "p2p0", 4) == 0)
+ break;
+ dev = next_net_device(dev);
+ }
+ read_unlock(&dev_base_lock);
+
+ if (!dev) {
+ SLSI_ERR(sdev, "dev is null\n");
+ return 0;
+ }
+ ndev_vif = netdev_priv(dev);
+ if (!ndev_vif) {
+ SLSI_ERR(sdev, "ndev_vif is null\n");
+ return 0;
+ }
+ sdev = ndev_vif->sdev;
+ if (!sdev) {
+ SLSI_ERR(sdev, "sdev is null\n");
+ return 0;
+ }
+
+ SLSI_DBG3(sdev, SLSI_MLME, "\n");
+ slsi_mib_encode_get(&mibreq, SLSI_PSID_UNIFI_CURRENT_TSF_TIME, 0);
+ mibrsp.dataLength = sizeof(res_payload);
+ mibrsp.data = res_payload;
+
+ r = slsi_mlme_get(sdev, dev, mibreq.data, mibreq.dataLength, mibrsp.data, mibrsp.dataLength, &rx_length);
+
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_length;
+ if (rx_length == 0) {
+ SLSI_ERR(sdev, "Mib decode error\n");
+ return 0;
+ }
+ slsi_mib_decodeInt64(&mibrsp.data[4], &ret);
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ return 0;
+ }
+ return ret;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "debug.h"
+#include "dev.h"
+#include "ba.h"
+#include "mgt.h"
+
+/* Age value for frames in MPDU reorder buffer */
+static int ba_mpdu_reorder_age_timeout = 150; /* 150 milli seconds */
+module_param(ba_mpdu_reorder_age_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ba_mpdu_reorder_age_timeout, "Timeout (in ms) before a BA frame in Reorder buffer is passed to upper layers");
+
+#define BA_WINDOW_BOUNDARY 2048
+
+#define SN_TO_INDEX(__ba_session_rx, __sn) (((__sn - __ba_session_rx->start_sn) & 0xFFF) % __ba_session_rx->buffer_size)
+
+#define ADVANCE_EXPECTED_SN(__ba_session_rx) \
+ { \
+ __ba_session_rx->expected_sn++; \
+ __ba_session_rx->expected_sn &= 0xFFF; \
+ }
+
+#define FREE_BUFFER_SLOT(__ba_session_rx, __index) \
+ { \
+ __ba_session_rx->occupied_slots--; \
+ __ba_session_rx->buffer[__index].active = false; \
+ }
+
+#define IS_SN_LESS(sn1, sn2) ((((sn1) - (sn2)) & 0xFFF) > BA_WINDOW_BOUNDARY)
+
+void slsi_rx_ba_init(struct slsi_dev *sdev)
+{
+ int i;
+
+ for (i = 0; i < SLSI_MAX_RX_BA_SESSIONS; i++)
+ slsi_spinlock_create(&sdev->rx_ba_buffer_pool[i].ba_lock);
+
+ slsi_spinlock_create(&sdev->rx_ba_buffer_pool_lock);
+}
+
+static struct slsi_ba_session_rx *slsi_rx_ba_alloc_buffer(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_ba_session_rx *buffer = NULL;
+ int i;
+
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "RX BA buffer pool status: %d,%d,%d,%d,%d,%d,%d,%d\n",
+ sdev->rx_ba_buffer_pool[0].used, sdev->rx_ba_buffer_pool[1].used, sdev->rx_ba_buffer_pool[2].used,
+ sdev->rx_ba_buffer_pool[3].used, sdev->rx_ba_buffer_pool[4].used, sdev->rx_ba_buffer_pool[5].used,
+ sdev->rx_ba_buffer_pool[6].used, sdev->rx_ba_buffer_pool[7].used);
+
+ slsi_spinlock_lock(&sdev->rx_ba_buffer_pool_lock);
+ for (i = 0; i < SLSI_MAX_RX_BA_SESSIONS; i++) {
+ if (!sdev->rx_ba_buffer_pool[i].used) {
+ sdev->rx_ba_buffer_pool[i].used = true;
+ buffer = &sdev->rx_ba_buffer_pool[i];
+ break;
+ }
+ }
+ slsi_spinlock_unlock(&sdev->rx_ba_buffer_pool_lock);
+
+ if (!buffer)
+ SLSI_NET_ERR(dev, "No free RX BA buffer\n");
+
+ return buffer;
+}
+
+static void slsi_rx_ba_free_buffer(struct net_device *dev, struct slsi_peer *peer, int tid)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ slsi_spinlock_lock(&sdev->rx_ba_buffer_pool_lock);
+ if (peer && peer->ba_session_rx[tid]) {
+ peer->ba_session_rx[tid]->used = false;
+ peer->ba_session_rx[tid] = NULL;
+ }
+ slsi_spinlock_unlock(&sdev->rx_ba_buffer_pool_lock);
+
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "RX BA buffer pool status: %d,%d,%d,%d,%d,%d,%d,%d\n",
+ sdev->rx_ba_buffer_pool[0].used, sdev->rx_ba_buffer_pool[1].used, sdev->rx_ba_buffer_pool[2].used,
+ sdev->rx_ba_buffer_pool[3].used, sdev->rx_ba_buffer_pool[4].used, sdev->rx_ba_buffer_pool[5].used,
+ sdev->rx_ba_buffer_pool[6].used, sdev->rx_ba_buffer_pool[7].used);
+}
+
+/* This code - slsi_ba_process_complete()
+ * is called in the data workqueue context with the
+ * netdev_vif mutex held.
+ */
+void slsi_ba_process_complete(struct net_device *dev, bool from_ba_timer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ while ((skb = slsi_skb_dequeue(&ndev_vif->ba_complete)) != NULL)
+ slsi_rx_data_deliver_skb(ndev_vif->sdev, dev, skb, from_ba_timer);
+}
+
+static void slsi_ba_signal_process_complete(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u32 conf_hip4_ver = 0;
+#endif
+
+ atomic_set(&ndev_vif->ba_flush, 1);
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ if (conf_hip4_ver == 5)
+ slsi_skb_schedule_work(&ndev_vif->rx_data);
+#else
+ slsi_skb_schedule_work(&ndev_vif->rx_data);
+#endif
+}
+
+static void ba_add_frame_to_ba_complete(struct net_device *dev, struct slsi_ba_frame_desc *frame_desc)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ slsi_skb_queue_tail(&ndev_vif->ba_complete, frame_desc->signal);
+}
+
+static void ba_update_expected_sn(struct net_device *dev,
+ struct slsi_ba_session_rx *ba_session_rx, u16 sn)
+{
+ u32 i, j;
+ u16 gap;
+
+ gap = (sn - ba_session_rx->expected_sn) & 0xFFF;
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Proccess the frames up to new expected_sn = %d gap = %d\n", sn, gap);
+
+ for (j = 0; j < gap && j < ba_session_rx->buffer_size; j++) {
+ i = SN_TO_INDEX(ba_session_rx, ba_session_rx->expected_sn);
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Proccess the slot index = %d\n", i);
+ if (ba_session_rx->buffer[i].active) {
+ ba_add_frame_to_ba_complete(dev, &ba_session_rx->buffer[i]);
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Proccess the frame at index = %d expected_sn = %d\n", i, ba_session_rx->expected_sn);
+ FREE_BUFFER_SLOT(ba_session_rx, i);
+ } else {
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Empty slot at index = %d\n", i);
+ }
+ ADVANCE_EXPECTED_SN(ba_session_rx);
+ }
+ ba_session_rx->expected_sn = sn;
+}
+
+static void ba_complete_ready_sequence(struct net_device *dev,
+ struct slsi_ba_session_rx *ba_session_rx)
+{
+ int i;
+
+ i = SN_TO_INDEX(ba_session_rx, ba_session_rx->expected_sn);
+ while (ba_session_rx->buffer[i].active) {
+ ba_add_frame_to_ba_complete(dev, &ba_session_rx->buffer[i]);
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Completed stored frame (expected_sn=%d) at i = %d\n",
+ ba_session_rx->expected_sn, i);
+ FREE_BUFFER_SLOT(ba_session_rx, i);
+ ADVANCE_EXPECTED_SN(ba_session_rx);
+ i = SN_TO_INDEX(ba_session_rx, ba_session_rx->expected_sn);
+ }
+}
+
+static void ba_scroll_window(struct net_device *dev,
+ struct slsi_ba_session_rx *ba_session_rx, u16 sn)
+{
+ if (((sn - ba_session_rx->expected_sn) & 0xFFF) <= BA_WINDOW_BOUNDARY) {
+ ba_update_expected_sn(dev, ba_session_rx, sn);
+ ba_complete_ready_sequence(dev, ba_session_rx);
+ }
+}
+
+static int ba_consume_frame_or_get_buffer_index(struct net_device *dev, struct slsi_peer *peer,
+ struct slsi_ba_session_rx *ba_session_rx, u16 sn, struct slsi_ba_frame_desc *frame_desc, bool *stop_timer)
+{
+ int i;
+ u16 sn_temp;
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+#endif
+
+ *stop_timer = false;
+
+ if (((sn - ba_session_rx->expected_sn) & 0xFFF) <= BA_WINDOW_BOUNDARY) {
+ /* Once we are in BA window, set the flag for BA trigger */
+ if (!ba_session_rx->trigger_ba_after_ssn)
+ ba_session_rx->trigger_ba_after_ssn = true;
+
+ sn_temp = ba_session_rx->expected_sn + ba_session_rx->buffer_size;
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "New frame: sn=%d\n", sn);
+
+ if (!(((sn - sn_temp) & 0xFFF) > BA_WINDOW_BOUNDARY)) {
+ u16 new_expected_sn;
+
+ SLSI_NET_DBG2(dev, SLSI_RX_BA, "Frame is out of window\n");
+ sn_temp = (sn - ba_session_rx->buffer_size) & 0xFFF;
+ if (ba_session_rx->timer_on)
+ *stop_timer = true;
+ new_expected_sn = (sn_temp + 1) & 0xFFF;
+ ba_update_expected_sn(dev, ba_session_rx, new_expected_sn);
+ }
+
+ i = -1;
+ if (sn == ba_session_rx->expected_sn) {
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "sn = ba_session_rx->expected_sn = %d\n", sn);
+ if (ba_session_rx->timer_on)
+ *stop_timer = true;
+ ADVANCE_EXPECTED_SN(ba_session_rx);
+ ba_add_frame_to_ba_complete(dev, frame_desc);
+ } else {
+ i = SN_TO_INDEX(ba_session_rx, sn);
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "sn (%d) != ba_session_rx->expected_sn(%d), i = %d\n", sn, ba_session_rx->expected_sn, i);
+ if (ba_session_rx->buffer[i].active) {
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "free frame at i = %d\n", i);
+ i = -1;
+ slsi_kfree_skb(frame_desc->signal);
+ }
+ }
+ if (!IS_SN_LESS(sn, ba_session_rx->highest_received_sn))
+ ba_session_rx->highest_received_sn = sn;
+ } else {
+ i = -1;
+ if (!ba_session_rx->trigger_ba_after_ssn) {
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "frame before ssn, pass it up: sn=%d\n", sn);
+ ba_add_frame_to_ba_complete(dev, frame_desc);
+ } else {
+ if (slsi_is_tdls_peer(dev, peer)) {
+ /* Don't drop old frames in TDLS AMPDU-reordering for interoperability with third party devices.
+ * When the TDLS link is established the peer sends few packets with AP's sequence number.
+ * BA reorder logic updates the expected sequence number. After that peer sends packets with
+ * starting sequence number negotiated in BA (0). But those frames are getting dropped here.
+ * Because of this TCP traffic does not work and TDLS link is getting disconnected.
+ */
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "tdls: forward old frame: sn=%d, expected_sn=%d\n", sn, ba_session_rx->expected_sn);
+ ba_add_frame_to_ba_complete(dev, frame_desc);
+ } else {
+ /* this frame is deemed as old. But it may so happen that the reorder process did not wait long
+ * enough for this frame and moved to new window. So check here that the current frame still lies in
+ * originators transmit window by comparing it with highest sequence number received from originator.
+ *
+ * If it lies in the window pass the frame to next process else discard the frame here.
+ */
+ if (IS_SN_LESS(ba_session_rx->highest_received_sn, (((sn + ba_session_rx->buffer_size) & 0xFFF) - 1))) {
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "old frame, but still in window: sn=%d, highest_received_sn=%d\n", sn, ba_session_rx->highest_received_sn);
+ ba_add_frame_to_ba_complete(dev, frame_desc);
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "old frame, drop: sn=%d, expected_sn=%d\n", sn, ba_session_rx->expected_sn);
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (ndev_vif->enhanced_arp_detect_enabled)
+ slsi_fill_enhanced_arp_out_of_order_drop_counter(ndev_vif,
+ frame_desc->signal);
+#endif
+ slsi_kfree_skb(frame_desc->signal);
+ }
+ }
+ }
+ }
+ return i;
+}
+
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+static void slsi_ba_aging_timeout_handler(struct timer_list *t)
+#else
+static void slsi_ba_aging_timeout_handler(unsigned long data)
+#endif
+{
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ struct slsi_ba_session_rx *ba_session_rx = from_timer(ba_session_rx, t, ba_age_timer);
+#else
+ struct slsi_ba_session_rx *ba_session_rx = (struct slsi_ba_session_rx *)data;
+#endif
+ u8 i, j;
+ u8 gap = 1;
+ u16 temp_sn;
+ struct net_device *dev = ba_session_rx->dev;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u32 conf_hip4_ver = 0;
+#endif
+
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "\n");
+
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+
+ ba_session_rx->timer_on = false;
+
+ if (ba_session_rx->active && ba_session_rx->occupied_slots) {
+ /* expected sequence has not arrived so start searching from next
+ * sequence number until a frame is available and determine the gap.
+ * Release all the frames upto next hole from the reorder buffer.
+ */
+ temp_sn = (ba_session_rx->expected_sn + 1) & 0xFFF;
+
+ for (j = 0; j < ba_session_rx->buffer_size; j++) {
+ i = SN_TO_INDEX(ba_session_rx, temp_sn);
+
+ if (ba_session_rx->buffer[i].active) {
+ while (gap--)
+ ADVANCE_EXPECTED_SN(ba_session_rx);
+
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Completed stored frame (expected_sn=%d) at i = %d\n", ba_session_rx->expected_sn, i);
+ ba_add_frame_to_ba_complete(dev, &ba_session_rx->buffer[i]);
+ FREE_BUFFER_SLOT(ba_session_rx, i);
+ ADVANCE_EXPECTED_SN(ba_session_rx);
+ ba_complete_ready_sequence(dev, ba_session_rx);
+ break;
+ }
+ /* advance temp sequence number and frame gap */
+ temp_sn = (temp_sn + 1) & 0xFFF;
+ gap++;
+ }
+
+ /* Check for next hole in the buffer, if hole exists create the timer for next missing frame */
+ if (ba_session_rx->occupied_slots) {
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Timer start\n");
+ mod_timer(&ba_session_rx->ba_age_timer, jiffies + msecs_to_jiffies(ba_mpdu_reorder_age_timeout));
+ ba_session_rx->timer_on = true;
+ }
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ /* Process the data now marked as completed */
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ if (conf_hip4_ver == 4)
+ slsi_ba_process_complete(dev, true);
+ else
+ slsi_ba_signal_process_complete(dev);
+#else
+ slsi_ba_signal_process_complete(dev);
+#endif
+ } else {
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ }
+}
+
+int slsi_ba_process_frame(struct net_device *dev, struct slsi_peer *peer,
+ struct sk_buff *skb, u16 sequence_number, u16 tid)
+{
+ int i;
+ struct slsi_ba_session_rx *ba_session_rx = peer->ba_session_rx[tid];
+ struct slsi_ba_frame_desc frame_desc;
+ bool stop_timer = false;
+
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Got frame(sn=%d)\n", sequence_number);
+
+ if (WARN_ON(tid > FAPI_PRIORITY_QOS_UP7)) {
+ SLSI_NET_ERR(dev, "tid=%d\n", tid);
+ return -EINVAL;
+ }
+
+ if (!ba_session_rx)
+ return -EINVAL;
+
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+
+ if (!ba_session_rx->active) {
+ SLSI_NET_ERR(dev, "No BA session exists\n");
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ return -EINVAL;
+ }
+
+ frame_desc.signal = skb;
+ frame_desc.sn = sequence_number;
+ frame_desc.active = true;
+
+ i = ba_consume_frame_or_get_buffer_index(dev, peer, ba_session_rx, sequence_number, &frame_desc, &stop_timer);
+ if (i >= 0) {
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Store frame(sn=%d) at i = %d\n", sequence_number, i);
+ ba_session_rx->buffer[i] = frame_desc;
+ ba_session_rx->occupied_slots++;
+ } else {
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Frame consumed - sn = %d\n", sequence_number);
+ }
+
+ ba_complete_ready_sequence(dev, ba_session_rx);
+
+ /* Timer decision:
+ *
+ * If the timer is not running (timer_on=false)
+ * Start the timer if there are holes (occupied_slots!=0)
+ *
+ * If the timer is running (timer_on=true)
+ * Stop the timer if
+ * There are no holes (occupied_slots=0)
+ * Restart the timer if
+ * stop_timer=true and there are holes (occupied_slots!=0)
+ * Leave the timer running (do nothing) if
+ * stop_timer=false and there are holes (occupied_slots!=0)
+ */
+
+ if (!ba_session_rx->timer_on) {
+ if (ba_session_rx->occupied_slots) {
+ stop_timer = false;
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Timer start\n");
+ mod_timer(&ba_session_rx->ba_age_timer, jiffies + msecs_to_jiffies(ba_mpdu_reorder_age_timeout));
+ ba_session_rx->timer_on = true;
+ }
+ } else if (!ba_session_rx->occupied_slots) {
+ stop_timer = true;
+ } else if (stop_timer) {
+ stop_timer = false;
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Timer restart\n");
+ mod_timer(&ba_session_rx->ba_age_timer, jiffies + msecs_to_jiffies(ba_mpdu_reorder_age_timeout));
+ ba_session_rx->timer_on = true;
+ }
+
+ if (stop_timer) {
+ ba_session_rx->timer_on = false;
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ SLSI_NET_DBG4(dev, SLSI_RX_BA, "Timer stop\n");
+ del_timer_sync(&ba_session_rx->ba_age_timer);
+ } else {
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ }
+ slsi_ba_signal_process_complete(dev);
+ return 0;
+}
+
+bool slsi_ba_check(struct slsi_peer *peer, u16 tid)
+{
+ if (tid > FAPI_PRIORITY_QOS_UP7)
+ return false;
+ if (!peer->ba_session_rx[tid])
+ return false;
+
+ return peer->ba_session_rx[tid]->active;
+}
+
+static void __slsi_rx_ba_stop(struct net_device *dev, struct slsi_ba_session_rx *ba_session_rx)
+{
+ u8 i, j;
+
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "Stopping BA session: tid = %d\n", ba_session_rx->tid);
+
+ if (WARN_ON(!ba_session_rx->active)) {
+ SLSI_NET_ERR(dev, "No BA session exists\n");
+ return;
+ }
+
+ for (i = SN_TO_INDEX(ba_session_rx, ba_session_rx->expected_sn), j = 0;
+ j < ba_session_rx->buffer_size; i++, j++) {
+ i %= ba_session_rx->buffer_size;
+ if (ba_session_rx->buffer[i].active) {
+ ba_add_frame_to_ba_complete(dev, &ba_session_rx->buffer[i]);
+ SLSI_NET_DBG3(dev, SLSI_RX_BA, "Completed stored frame at i = %d\n", i);
+ FREE_BUFFER_SLOT(ba_session_rx, i);
+ }
+ }
+ ba_session_rx->active = false;
+}
+
+static void slsi_rx_ba_stop_lock_held(struct net_device *dev, struct slsi_ba_session_rx *ba_session_rx)
+{
+ __slsi_rx_ba_stop(dev, ba_session_rx);
+ if (ba_session_rx->timer_on) {
+ ba_session_rx->timer_on = false;
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ del_timer_sync(&ba_session_rx->ba_age_timer);
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+ }
+}
+
+static void slsi_rx_ba_stop_lock_unheld(struct net_device *dev, struct slsi_ba_session_rx *ba_session_rx)
+{
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+ __slsi_rx_ba_stop(dev, ba_session_rx);
+ if (ba_session_rx->timer_on) {
+ ba_session_rx->timer_on = false;
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ del_timer_sync(&ba_session_rx->ba_age_timer);
+ } else {
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ }
+}
+
+void slsi_rx_ba_stop_all(struct net_device *dev, struct slsi_peer *peer)
+{
+ int i;
+
+ for (i = 0; i < NUM_BA_SESSIONS_PER_PEER; i++)
+ if (peer->ba_session_rx[i] && peer->ba_session_rx[i]->active) {
+ slsi_rx_ba_stop_lock_unheld(dev, peer->ba_session_rx[i]);
+ slsi_rx_ba_free_buffer(dev, peer, i);
+ }
+}
+
+static int slsi_rx_ba_start(struct net_device *dev,
+ struct slsi_ba_session_rx *ba_session_rx,
+ u16 tid, u16 buffer_size, u16 start_sn)
+{
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "Request to start a new BA session tid=%d buffer_size=%d start_sn=%d\n",
+ tid, buffer_size, start_sn);
+
+ if (WARN_ON((!buffer_size) || (buffer_size > SLSI_BA_BUFFER_SIZE_MAX))) {
+ SLSI_NET_ERR(dev, "Invalid window size: buffer_size=%d\n", buffer_size);
+ return -EINVAL;
+ }
+
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+
+ if (ba_session_rx->active) {
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "BA session already exists\n");
+
+ if ((ba_session_rx->buffer_size == buffer_size) &&
+ (ba_session_rx->expected_sn == start_sn)) {
+ SLSI_NET_DBG1(dev, SLSI_RX_BA,
+ "BA session tid=%d already exists. The parameters match so keep the existing session\n",
+ tid);
+
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+
+ return 0;
+ }
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "Parameters don't match so stop the existing BA session: tid=%d\n", tid);
+ slsi_rx_ba_stop_lock_held(dev, ba_session_rx);
+ }
+
+ ba_session_rx->dev = dev;
+ ba_session_rx->buffer_size = buffer_size;
+ ba_session_rx->start_sn = start_sn;
+ ba_session_rx->expected_sn = start_sn;
+ ba_session_rx->highest_received_sn = 0;
+ ba_session_rx->trigger_ba_after_ssn = false;
+ ba_session_rx->tid = tid;
+ ba_session_rx->timer_on = false;
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ timer_setup(&ba_session_rx->ba_age_timer, slsi_ba_aging_timeout_handler, 0);
+#else
+ ba_session_rx->ba_age_timer.function = slsi_ba_aging_timeout_handler;
+ ba_session_rx->ba_age_timer.data = (unsigned long)ba_session_rx;
+ init_timer(&ba_session_rx->ba_age_timer);
+#endif
+
+ ba_session_rx->active = true;
+ SLSI_NET_DBG1(dev, SLSI_RX_BA, "Started a new BA session tid=%d buffer_size=%d start_sn=%d\n",
+ tid, buffer_size, start_sn);
+
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ slsi_ba_signal_process_complete(dev);
+
+ return 0;
+}
+
+static void slsi_ba_process_error(struct net_device *dev,
+ struct slsi_ba_session_rx *ba_session_rx, u16 sequence_number)
+{
+ slsi_spinlock_lock(&ba_session_rx->ba_lock);
+
+ if (WARN_ON(!ba_session_rx->active)) {
+ SLSI_NET_ERR(dev, "No BA session exists\n");
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ return;
+ }
+
+ ba_scroll_window(dev, ba_session_rx, sequence_number);
+
+ slsi_spinlock_unlock(&ba_session_rx->ba_lock);
+ slsi_ba_signal_process_complete(dev);
+}
+
+void slsi_handle_blockack(struct net_device *dev, struct slsi_peer *peer,
+ u16 vif, u8 *peer_qsta_address, u16 parameter_set, u16 sequence_number,
+ u16 reason_code, u16 direction)
+{
+ struct slsi_ba_session_rx *ba_session_rx;
+ u16 user_priority = (parameter_set >> 2) & 0x000F;
+ u16 buffer_size = (parameter_set >> 6) & 0x03FF;
+
+ SLSI_UNUSED_PARAMETER(vif);
+ SLSI_UNUSED_PARAMETER(peer_qsta_address);
+
+ if (WARN_ON(user_priority > FAPI_PRIORITY_QOS_UP7)) {
+ SLSI_NET_ERR(dev, "Invalid user_priority=%d\n", user_priority);
+ return;
+ }
+
+ switch (direction) {
+ case FAPI_DIRECTION_TRANSMIT:
+ break;
+ case FAPI_DIRECTION_RECEIVE:
+ ba_session_rx = peer->ba_session_rx[user_priority];
+
+ switch (reason_code) {
+ case FAPI_REASONCODE_START:
+ if (!peer->ba_session_rx[user_priority])
+ peer->ba_session_rx[user_priority] = slsi_rx_ba_alloc_buffer(dev);
+
+ if (peer->ba_session_rx[user_priority])
+ if (slsi_rx_ba_start(dev, peer->ba_session_rx[user_priority], user_priority, buffer_size, sequence_number) != 0)
+ slsi_rx_ba_free_buffer(dev, peer, user_priority);
+ break;
+ case FAPI_REASONCODE_END:
+ if (ba_session_rx) {
+ slsi_rx_ba_stop_lock_unheld(dev, ba_session_rx);
+ slsi_rx_ba_free_buffer(dev, peer, user_priority);
+ }
+ break;
+ case FAPI_REASONCODE_UNSPECIFIED_REASON:
+ if (ba_session_rx)
+ slsi_ba_process_error(dev, ba_session_rx, sequence_number);
+ break;
+ default:
+ SLSI_NET_ERR(dev, "Invalid value: reason_code=%d\n", reason_code);
+ break;
+ }
+ break;
+ default:
+ SLSI_NET_ERR(dev, "Invalid value: direction=%d\n", direction);
+ break;
+ }
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_BA_H__
+#define __SLSI_BA_H__
+
+#include "dev.h"
+
+void slsi_handle_blockack(struct net_device *dev, struct slsi_peer *peer,
+ u16 vif, u8 *peer_qsta_address, u16 parameter_set, u16 sequence_number,
+ u16 reason_code, u16 direction);
+
+int slsi_ba_process_frame(struct net_device *dev, struct slsi_peer *peer,
+ struct sk_buff *skb, u16 sequence_number, u16 tid);
+
+void slsi_ba_process_complete(struct net_device *dev, bool from_ba_timer);
+
+bool slsi_ba_check(struct slsi_peer *peer, u16 tid);
+
+void slsi_rx_ba_stop_all(struct net_device *dev, struct slsi_peer *peer);
+
+void slsi_rx_ba_init(struct slsi_dev *sdev);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "cac.h"
+
+static struct cac_tspec *tspec_list;
+static int tspec_list_next_id;
+static u8 dialog_token_next;
+
+/* Define the meta-data info for tspec_fields */
+static struct tspec_field tspec_fields[] = {
+ { "traffic_type", 0, 1, 0x1, 0 },
+ { "tsid", 0, 1, 0xF, 1 },
+ { "direction", 0, 1, 0x3, 5 },
+ { "access_policy", 0, 1, 0x3, 7 }, /* WMM - always set to 1 */
+ { "aggregation", 0, 1, 0x1, 9 }, /* WMM - not supported */
+ { "psb", 0, 1, 0x1, 10 },
+ { "user_priority", 0, 1, 0x7, 11 },
+ { "tsinfo_ack_policy", 0, 1, 0x3, 14 }, /* WMM - not supported */
+ { "schedule", 0, 1, 0x1, 16 }, /* WMM - not supported */
+ { "nominal_msdu_size", 0, 0, 2, OFFSETOF(nominal_msdu_size) },
+ { "max_msdu_size", 0, 0, 2, OFFSETOF(maximum_msdu_size) },
+ { "min_service_interval", 0, 0, 4, OFFSETOF(minimum_service_interval) },
+ { "max_service_interval", 0, 0, 4, OFFSETOF(maximum_service_interval) },
+ { "inactivity_interval", 0, 0, 4, OFFSETOF(inactivity_interval) },
+ { "suspension_interval", 0, 0, 4, OFFSETOF(suspension_interval) },
+ { "service_start_time", 0, 0, 4, OFFSETOF(service_start_time) },
+ { "min_data_rate", 0, 0, 4, OFFSETOF(minimum_data_rate) },
+ { "mean_data_rate", 0, 0, 4, OFFSETOF(mean_data_rate) },
+ { "peak_data_rate", 0, 0, 4, OFFSETOF(peak_data_rate) },
+ { "max_burst_size", 0, 0, 4, OFFSETOF(maximum_burst_size) },
+ { "delay_bound", 0, 0, 4, OFFSETOF(delay_bound) },
+ { "min_phy_rate", 0, 0, 4, OFFSETOF(minimum_phy_rate) },
+ { "surplus_bw_allowance", 0, 0, 2,
+ OFFSETOF(surplus_bandwidth_allowance) },
+ { "medium_time", 0, 0, 2, OFFSETOF(medium_time) },
+};
+
+/* Define the OUI type data for the corresponding IE's */
+static const u8 TSRS_OUI_TYPE[] = { 0x00, 0x40, 0x96, 0x08 };
+static const u8 EBW_OUI_TYPE[] = { 0x00, 0x40, 0x96, 0x0F };
+
+static const int NUM_TSPEC_FIELDS = sizeof(tspec_fields) / sizeof(struct tspec_field);
+static u32 previous_msdu_lifetime = MAX_TRANSMIT_MSDU_LIFETIME_NOT_VALID;
+static u8 ccx_status = BSS_CCX_DISABLED;
+
+static void cac_set_ric_ie(struct slsi_dev *sdev, struct net_device *netdev);
+static int cac_get_rde_tspec_ie(struct slsi_dev *sdev, u8 *assoc_rsp_ie, int assoc_rsp_ie_len, const u8 **tspec_ie_arr);
+/* Name: strtoint
+ * Desc: Converts a string to a decimal or hexadecimal integer
+ * s: the string to be converted
+ * res: pointer to the calculated integer
+ * return: 0 (success), 1(failure)
+ */
+static int strtoint(const char *s, int *res)
+{
+ int base = 10;
+
+ if (strlen(s) > 2)
+ if (s[0] == '0' && (s[1] == 'x' || s[1] == 'X'))
+ base = 16;
+ return kstrtoint(s, base, res);
+}
+
+/* Name: find_tspec_entry
+ * Desc: Finds a tspec entry in the list of tspecs (tspec_list)
+ * according to tspec id and status (accepted or not accepted)
+ * id: the tspec id
+ * accepted: 1 : accepted by AP, 0: new or rejected by AP
+ * return: pointer to the tspec struct or NULL if a tspec doesn't exist
+ */
+static struct cac_tspec *find_tspec_entry(int id, int accepted)
+{
+ struct cac_tspec *itr;
+
+ itr = tspec_list;
+ while (itr != NULL) {
+ if ((itr->id == id) && (itr->accepted == accepted))
+ break;
+ itr = itr->next;
+ }
+ return itr;
+}
+
+/* Name: cac_query_tspec_field
+ * Desc: Get the value of a tspec's field.
+ * sdev: pointer to the slsi_dev struct
+ * entry: pointer to the tspec
+ * field: the field name
+ * value: poinet to the field value
+ * return: 0 (success), -1 (failure)
+ */
+static int cac_query_tspec_field(struct slsi_dev *sdev, struct cac_tspec *entry, const char *field, u32 *value)
+{
+ int i;
+ u32 tsinfo;
+ u8 mask;
+ u8 *pos;
+
+ if ((entry == NULL) || (field == NULL) || (value == NULL))
+ return -1;
+
+ for (i = 0; i < NUM_TSPEC_FIELDS; i++)
+ if (strcasecmp(field, tspec_fields[i].name) == 0)
+ break;
+ if (i >= NUM_TSPEC_FIELDS) {
+ SLSI_ERR(sdev, "CAC: Invalid TSPEC config field\n");
+ return -1;
+ }
+ if (tspec_fields[i].is_tsinfo_field) {
+ mask = tspec_fields[i].size;
+ tsinfo = CAC_GET_LE24(&entry->tspec.ts_info[0]) & TSINFO_MASK;
+ *value = (tsinfo >> tspec_fields[i].offset) & mask;
+ } else {
+ pos = (u8 *)(&entry->tspec) + tspec_fields[i].offset;
+ if (tspec_fields[i].size == 1)
+ *value = (*pos & 0xFF);
+ else if (tspec_fields[i].size == 2)
+ *value = CAC_GET_LE16(pos);
+ else
+ *value = CAC_GET_LE32(pos);
+ }
+
+ return 0;
+}
+
+/* Name: get_netdev_for_station
+ * Desc: Get the pointer to net_device struct with vif_type == FAPI_VIFTYPE_STATION
+ * sdev: pointer to the slsi_dev struct
+ * return: pointer to the net_device struct or NULL if the it doesn't exist
+ */
+static struct net_device *get_netdev_for_station(struct slsi_dev *sdev)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ s32 vif;
+
+ for (vif = 1; vif <= CONFIG_SCSC_WLAN_MAX_INTERFACES; vif++) {
+ dev = slsi_get_netdev_locked(sdev, vif);
+ if (!dev)
+ continue;
+ ndev_vif = netdev_priv(dev);
+ if (!ndev_vif)
+ continue;
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION &&
+ ndev_vif->iftype == NL80211_IFTYPE_STATION)
+ return dev;
+ }
+ return NULL;
+}
+
+/* Name: add_ebw_ie
+ * Desc: Add ebw ie
+ * buf: pointer to buf that the ie is going to added
+ * buf_len: the byte length of the ie
+ * tsid: tspec id
+ * return: length of bytes that were added
+ */
+static int add_ebw_ie(u8 *buf, size_t buf_len, u8 tsid)
+{
+ u8 *pos;
+
+ if ((buf == NULL) || (buf_len < 8))
+ return -1;
+
+ pos = buf;
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* element id */
+ *pos++ = 6; /* length */
+ memcpy(pos, EBW_OUI_TYPE, sizeof(EBW_OUI_TYPE));
+ pos += sizeof(EBW_OUI_TYPE);
+ *pos++ = tsid;
+ *pos++ = 0;
+
+ return pos - buf;
+}
+
+/* Name: add_tsrs_ie
+ * Desc: Add tsrs_ie
+ * buf: pointer to buf that the ie is going to added
+ * buf_len: the byte length of the ie
+ * tsid: tspec id
+ * rates: list of rates that are supported
+ * num_rates: number of rates that are supported
+ * return: length of bytes that were added
+ */
+static int add_tsrs_ie(u8 *buf, size_t buf_len, u8 tsid,
+ u8 rates[CCX_MAX_NUM_RATES], int num_rates)
+{
+ u8 *pos;
+ size_t ie_len = (size_t)(7 + num_rates);
+ int i;
+
+ if ((buf == NULL) || (buf_len < ie_len) || (rates == NULL) ||
+ (num_rates > CCX_MAX_NUM_RATES))
+ return -1;
+
+ pos = buf;
+ memset(pos, 0, ie_len);
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* element id */
+ *pos++ = ie_len - 2; /* length */
+ memcpy(pos, TSRS_OUI_TYPE, sizeof(TSRS_OUI_TYPE));
+ pos += sizeof(TSRS_OUI_TYPE);
+ *pos++ = tsid;
+ for (i = 0; i < num_rates; i++)
+ *pos++ = rates[i];
+
+ return pos - buf;
+}
+
+/* Name: bss_get_ie
+ * Desc: Get the buffer of an IE that is included in a bss
+ * bss: pointer to the cfg80211_bss struct
+ * ie: the IE id that is going to be extracted
+ * return: pointer to the start of the IE buffer
+ */
+static const u8 *bss_get_ie(struct cfg80211_bss *bss, u8 ie)
+{
+ const u8 *pos;
+ u8 ies_len, ies_cur_len;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ pos = (const u8 *)(bss->ies);
+ ies_len = (u8)bss->ies->len;
+#else
+ pos = (const u8 *)(bss->information_elements);
+ ies_len = (u8)bss->len_information_elements;
+#endif
+ ies_cur_len = 1;
+
+ while (ies_cur_len <= ies_len) {
+ if (pos[0] == ie)
+ return pos;
+
+ pos += 2 + pos[1];
+ ies_cur_len++;
+ }
+
+ return NULL;
+}
+
+/* Name: bss_get_bit_rates
+ * Desc: Get the buffer of an IE that is included in a bss
+ * bss: pointer to the cfg80211_bss struct
+ * rates: the rates that are supported
+ * return: 0 (succes), -1 (failure)
+ */
+static int bss_get_bit_rates(struct cfg80211_bss *bss, u8 **rates)
+{
+ const u8 *ie, *ie2;
+ int i, j;
+ unsigned int len;
+ u8 *r;
+
+ ie = bss_get_ie(bss, WLAN_EID_SUPP_RATES);
+ ie2 = bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES);
+
+ len = (ie ? ie[1] : 0) + (ie2 ? ie2[1] : 0);
+
+ if (!len)
+ return -1;
+
+ r = kmalloc(len, GFP_KERNEL);
+ if (!r)
+ return -1;
+
+ for (i = 0; ie && i < ie[1]; i++)
+ r[i] = ie[i + 2] & 0x7f;
+
+ for (j = 0; ie2 && j < ie2[1]; j++)
+ r[i + j] = ie2[j + 2] & 0x7f;
+
+ *rates = r;
+ return len;
+}
+
+/* Name: cac_send_addts
+ * Desc: Build and send the ADDTS action frame
+ * sdev: pointer to the slsi_dev struct
+ * id: the tspec id that is going to be included in the ADDTS action frame
+ * ebw: 1 (add ebw IE), 0 (don't add ebw IE)
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_send_addts(struct slsi_dev *sdev, int id, int ebw)
+{
+ struct action_addts_req *req;
+ size_t extra_ie_len = 50;
+ int ie_len = 0;
+ size_t req_len;
+ struct cac_tspec *entry;
+ u8 tsid, i;
+ u8 *rates;
+ u8 rate = 0;
+ u8 *pos;
+ int num_rates;
+ struct netdev_vif *ndev_vif;
+ struct net_device *netdev;
+ u16 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ struct ieee80211_hdr *hdr;
+ u8 *buf = NULL;
+ u8 *bssid;
+ u8 r = 0;
+
+ entry = find_tspec_entry(id, 0);
+ if (entry == NULL) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Invalid TSPEC ID\n");
+ return -1;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ netdev = get_netdev_for_station(sdev);
+ if (netdev == NULL) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ ndev_vif = netdev_priv(netdev);
+ if ((ndev_vif == NULL) || (ndev_vif->sta.sta_bss == NULL)) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Not connected, can't send ADDTS\n");
+ r = -1;
+ goto exit;
+ }
+ bssid = ndev_vif->sta.sta_bss->bssid;
+ if (entry->accepted) {
+ SLSI_ERR(sdev, "CAC-ADDTS: TSPEC already accepted\n");
+ r = -1;
+ goto exit;
+ }
+
+ buf = kmalloc(IEEE80211_HEADER_SIZE + sizeof(*req) + extra_ie_len, GFP_KERNEL);
+ if (buf == NULL) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Failed to allocate ADDTS request\n");
+ r = -1;
+ goto exit;
+ }
+
+ hdr = (struct ieee80211_hdr *)buf;
+ hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_ACTION);
+ SLSI_ETHER_COPY(hdr->addr1, bssid);
+ SLSI_ETHER_COPY(hdr->addr2, sdev->hw_addr);
+ SLSI_ETHER_COPY(hdr->addr3, bssid);
+
+ req = (struct action_addts_req *)(buf + IEEE80211_HEADER_SIZE);
+ req->hdr.category = WLAN_CATEGORY_WMM;
+ req->hdr.action = WMM_ACTION_CODE_ADDTS_REQ;
+ if (dialog_token_next == 0)
+ dialog_token_next++;
+ req->hdr.dialog_token = dialog_token_next++;
+ req->hdr.status_code = 0;
+ tsid = (CAC_GET_LE24(req->tspec.ts_info) >> 1) & 0xF;
+
+ /* Find the value of PSB in TSPEC. If PSB is unspecified; fill the
+ * value from UAPSD value stored in Peer structure for the AC
+ */
+ if (entry->psb_specified == 0) {
+ struct slsi_peer *peer;
+ u32 priority;
+
+ peer = slsi_get_peer_from_qs(sdev, netdev, SLSI_STA_PEER_QUEUESET);
+ if (!peer) {
+ SLSI_ERR(sdev, "CAC-ADDTS: no Peer found\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+
+ cac_query_tspec_field(sdev, entry, "user_priority", &priority);
+ if (peer->uapsd & BIT(slsi_frame_priority_to_ac_queue(priority)))
+ entry->tspec.ts_info[1] |= 0x04;
+ }
+ memcpy(&req->tspec, &entry->tspec, sizeof(entry->tspec));
+ req_len = sizeof(*req);
+ pos = (u8 *)(req + 1);
+ entry->ebw = ebw ? 1 : 0;
+
+ if (ebw) {
+ ie_len += add_ebw_ie(pos, extra_ie_len, tsid);
+ if (ie_len <= 0)
+ SLSI_ERR(sdev, "CAC-ADDTS: Failed to add EBW IE\n");
+ }
+
+ /* Add tsrs IE in case of ccx enabled bss */
+ if (ccx_status == BSS_CCX_ENABLED) {
+ num_rates = bss_get_bit_rates(ndev_vif->sta.sta_bss, &rates);
+ if (num_rates <= 0)
+ rate = 12; /* Default to 6Mbps */
+ else {
+ for (i = 0; i < num_rates; i++)
+ if ((rates[i] > rate) && (rates[i] <= 48))
+ rate = rates[i];
+ kfree(rates);
+ }
+
+ do {
+ /* if the nominal rate is equal to minimum_phy_rate
+ * don't add the tsrs_ie
+ */
+ if ((rate * TSRS_RATE_PER_UNIT) == req->tspec.minimum_phy_rate)
+ break;
+
+ if ((rate * TSRS_RATE_PER_UNIT) > req->tspec.minimum_phy_rate) {
+ ie_len += add_tsrs_ie(pos + ie_len, extra_ie_len - ie_len,
+ tsid, &rate, 1);
+ if (ie_len <= 0) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Failed to add TSRS IE\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+ } else { /* only the "<" case is possible */
+ SLSI_ERR(sdev, "CAC-ADDTS: BSS rate too low\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+ } while (0);
+ }
+
+ if (slsi_mlme_send_frame_mgmt(sdev, netdev, buf, (IEEE80211_HEADER_SIZE + req_len + ie_len),
+ FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION,
+ host_tag, 0, sdev->fw_dwell_time, 0) != 0) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Failed to send ADDTS request\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+ entry->dialog_token = req->hdr.dialog_token;
+
+exit_free_buf:
+ kfree(buf);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return r;
+}
+
+/* Name: cac_send_delts
+ * Desc: Build and send the DELTS action frame
+ * sdev: pointer to the slsi_dev struct
+ * id: the tspec id that is going the DELTS action frame to send for
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_send_delts(struct slsi_dev *sdev, int id)
+{
+ struct action_delts_req *req;
+ struct cac_tspec *entry;
+ size_t req_len;
+ u32 priority;
+ int rc;
+ struct netdev_vif *ndev_vif;
+ struct net_device *netdev;
+ u16 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ struct ieee80211_hdr *hdr;
+ u8 *buf = NULL;
+ u8 *bssid;
+ u8 r = 0;
+ struct slsi_peer *stapeer;
+
+ entry = find_tspec_entry(id , 1);
+ if (entry == NULL) {
+ SLSI_ERR(sdev, "CAC-DELTS: no TSPEC has been established for tsid=%d\n", id);
+ return -1;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ netdev = get_netdev_for_station(sdev);
+ if (netdev == NULL) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ ndev_vif = netdev_priv(netdev);
+ if ((ndev_vif == NULL) || (ndev_vif->sta.sta_bss == NULL)) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "CAC-DELTS: Not connected, can't send DELTS\n");
+ r = -1;
+ goto exit;
+ }
+
+ stapeer = slsi_get_peer_from_qs(sdev, netdev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!stapeer)) {
+ r = -1;
+ goto exit;
+ }
+
+ bssid = ndev_vif->sta.sta_bss->bssid;
+ buf = kmalloc(24 + sizeof(*req), GFP_KERNEL);
+ if (buf == NULL) {
+ SLSI_ERR(sdev, "CAC-DELTS: Failed to allocate DELTS request\n");
+ r = -1;
+ goto exit;
+ }
+ hdr = (struct ieee80211_hdr *)buf;
+ hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_ACTION);
+ SLSI_ETHER_COPY(hdr->addr1, bssid);
+ SLSI_ETHER_COPY(hdr->addr2, sdev->hw_addr);
+ SLSI_ETHER_COPY(hdr->addr3, bssid);
+ req = (struct action_delts_req *)(buf + 24);
+ req_len = sizeof(*req);
+ req->hdr.category = WLAN_CATEGORY_WMM;
+ req->hdr.action = WMM_ACTION_CODE_DELTS;
+ req->hdr.dialog_token = 0;
+ req->hdr.status_code = 0;
+ memcpy(&req->tspec, &entry->tspec, sizeof(entry->tspec));
+
+ /* TODO_HARDMAC: If PMF is negotiated over the link, the host shall not
+ * issue this primitive before pairwise keys have been installed in F/W .
+ */
+ if (slsi_mlme_send_frame_mgmt(sdev, netdev, buf, (IEEE80211_HEADER_SIZE + req_len), FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, 0, 0) != 0) {
+ SLSI_ERR(sdev, "CAC-DELTS: Failed to send DELTS request\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+ rc = cac_query_tspec_field(sdev, entry, "user_priority", &priority);
+ if (rc != 0) {
+ SLSI_ERR(sdev, "CAC-DELTS: Error in reading priority from tspec!\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+
+ if (slsi_mlme_del_traffic_parameters(sdev, netdev, priority) != 0) {
+ SLSI_ERR(sdev, "CAC-DELTS: Failed to send DELTS request\n");
+ r = -1;
+ goto exit_free_buf;
+ }
+
+ /* BlockAck Control Req was previously used to enable blockack for VO & VI. This
+ * signal is removed and expected to be replaced with MIBs - not able to see
+ * through the haze yet!. Need to take approp. action when the cloud clears.
+ * Historical Data:
+ * if the DELTS request is for UP = 4 or 5 then generate a
+ * MLME-BLOCKACK-CONTROL.request so that no BlockAck is negotiated
+ * on AC_VI. And leave AC_BE enabled
+ */
+
+ entry->accepted = 0; /* DELTS sent successfully */
+ sdev->tspec_error_code = 0;
+ stapeer->tspec_established &= ~BIT(priority);
+ /* update RIC in add_info_elements for assoc req */
+ cac_set_ric_ie(sdev, netdev);
+
+ if (ccx_status == BSS_CCX_ENABLED && previous_msdu_lifetime != MAX_TRANSMIT_MSDU_LIFETIME_NOT_VALID)
+ if (slsi_send_max_transmit_msdu_lifetime(sdev, netdev, previous_msdu_lifetime) != 0) {
+ SLSI_ERR(sdev, "CAC-DELTS: slsi_send_max_msdu_lifetime failed");
+ goto exit_free_buf;
+ }
+exit_free_buf:
+ kfree(buf);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return r;
+}
+
+/* Name: cac_create_tspec
+ * Desc: Create a tspec entry and added it to the tspec list
+ * sdev: pointer to the slsi_dev struct
+ * id: the id of the tspec that is included in DELTS action frame
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_create_tspec(struct slsi_dev *sdev, char *args)
+{
+ struct cac_tspec *entry;
+ int id;
+ u8 tid_auto_done = 0;
+ struct netdev_vif *ndev_vif;
+ struct net_device *netdev;
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ netdev = get_netdev_for_station(sdev);
+ if (netdev == NULL) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ ndev_vif = netdev_priv(netdev);
+ if (ndev_vif == NULL) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "CAC-ADDTS: Not connected, can't create TSPEC\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -1;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ if (args == NULL) {
+ /* No input for tid, so we use the auto increment*/
+ if (tspec_list_next_id <= 7) {
+ id = tspec_list_next_id++;
+ } else {
+ id = 0;
+ tspec_list_next_id = 0;
+ tspec_list_next_id++;
+ }
+ tid_auto_done = 1;
+ }
+
+ if ((!tid_auto_done) && (strtoint(args, &id) < 0)) {
+ /* Invalid input for tid, so we use the auto increment*/
+ if (tspec_list_next_id <= 7) {
+ id = tspec_list_next_id++;
+ } else {
+ id = 0;
+ tspec_list_next_id = 0;
+ tspec_list_next_id++;
+ }
+ }
+
+ if (id < TSID_MIN || id > TSID_MAX) {
+ SLSI_ERR(sdev, "CAC: Invalid TSID =%d, must be in range 0-7\n", id);
+ return -1;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ SLSI_ERR(sdev, "CAC: Failed to allocate TSPEC\n");
+ return -1;
+ }
+
+ entry->id = id;
+ entry->tspec.eid = WLAN_EID_VENDOR_SPECIFIC;
+ entry->tspec.length = sizeof(entry->tspec) - sizeof(entry->tspec.eid) - sizeof(entry->tspec.length);
+ CAC_PUT_BE24(entry->tspec.oui, WLAN_OUI_MICROSOFT);
+ entry->tspec.oui_type = WLAN_OUI_TYPE_MICROSOFT_WMM;
+ entry->tspec.oui_subtype = WMM_OUI_SUBTYPE_TSPEC_ELEMENT;
+ entry->tspec.version = WMM_VERSION;
+ entry->accepted = 0;
+ entry->psb_specified = 0;
+ /* Setting the 7th bit of ts info to 1, as its a fixed reserved bit. */
+ entry->tspec.ts_info[0] = 0x80;
+
+ entry->next = tspec_list;
+ tspec_list = entry;
+ SLSI_DBG1(sdev, SLSI_MLME, "CAC: Created TSPEC entry for id =%d\n", id);
+
+ return entry->id;
+}
+
+/* Name: cac_delete_tspec
+ * Desc: delete a tspec from the list of the tspecs
+ * sdev: pointer to the slsi_dev struct
+ * id: the id of the tspec that will be deleted
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_delete_tspec(struct slsi_dev *sdev, int id)
+{
+ struct cac_tspec *itr;
+ struct cac_tspec *prev;
+
+ itr = tspec_list;
+ prev = NULL;
+ while (itr != NULL) {
+ if (itr->id == id) {
+ if (prev)
+ prev->next = itr->next;
+ else
+ tspec_list = itr->next;
+
+ if (itr->accepted)
+ cac_send_delts(sdev, itr->id);
+
+ SLSI_DBG3(sdev, SLSI_MLME, "CAC: TSPEC entry deleted for id =%d\n", id);
+ kfree(itr);
+
+ return 0;
+ }
+ prev = itr;
+ itr = itr->next;
+ }
+ SLSI_ERR(sdev, "CAC: Couldn't find TSPEC with id %d for deletion", id);
+
+ return -1;
+}
+
+/* Name: cac_delete_tspec_by_state
+ * Desc: delete a tspec from the list of the tspecs based on id and state
+ * sdev: pointer to the slsi_dev struct
+ * id: the id of the tspec that will be deleted
+ * accepted: 0 - not yet accepted by AP, 1- accepted by AP
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_delete_tspec_by_state(struct slsi_dev *sdev, int id, int accepted)
+{
+ struct cac_tspec *itr;
+ struct cac_tspec *prev;
+
+ itr = tspec_list;
+ prev = NULL;
+ while (itr != NULL) {
+ if ((itr->id == id) && (itr->accepted == accepted)) {
+ if (prev)
+ prev->next = itr->next;
+ else
+ tspec_list = itr->next;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "CAC: Deleting TSPEC 0x%p with ID %d (accepted =%d)\n", itr, id, accepted);
+ kfree(itr);
+ return 0;
+ }
+ prev = itr;
+ itr = itr->next;
+ }
+ SLSI_ERR(sdev, "CAC: Couldn't find TSPEC with ID %d (accepted =%d)\n", id, accepted);
+
+ return -1;
+}
+
+/* Name: cac_config_tspec
+ * Desc: Set a field's value of a tspec
+ * sdev: pointer to the slsi_dev struct
+ * id: the id of the tspec that will be configured
+ * field: the field name that will be changed
+ * value: the value of the field
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_config_tspec(struct slsi_dev *sdev, int id, const char *field, u32 value)
+{
+ struct cac_tspec *entry;
+ int i;
+ u32 max = 0xFFFFFFFF;
+ u32 tsinfo;
+ u8 mask;
+ u8 *pos;
+
+ if (field == NULL)
+ return -1;
+
+ entry = find_tspec_entry(id, 0);
+ if (entry == NULL) {
+ SLSI_ERR(sdev, "CAC: Invalid TSPEC ID\n");
+ return -1;
+ }
+
+ for (i = 0; i < NUM_TSPEC_FIELDS; i++)
+ if (strcasecmp(field, tspec_fields[i].name) == 0)
+ break;
+ if (i >= NUM_TSPEC_FIELDS) {
+ SLSI_ERR(sdev, "CAC: Invalid TSPEC config field\n");
+ return -1;
+ }
+ if (tspec_fields[i].read_only) {
+ SLSI_ERR(sdev, "CAC: TSPEC field is read-only\n");
+ return -1;
+ }
+ if (tspec_fields[i].is_tsinfo_field) {
+ mask = tspec_fields[i].size;
+ if (strcasecmp(field, "psb") == 0) {
+ if (value <= mask)
+ entry->psb_specified = 1;
+ else
+ return 0;
+ }
+ if (value > mask) {
+ SLSI_ERR(sdev, "CAC: TSPEC config value exceeded maximum for %s\n", tspec_fields[i].name);
+ return -1;
+ }
+
+ tsinfo = CAC_GET_LE24(&entry->tspec.ts_info[0]);
+ tsinfo &= ~(u32)(mask << tspec_fields[i].offset);
+ tsinfo |= (u32)((value & mask) << tspec_fields[i].offset);
+ CAC_PUT_LE24(entry->tspec.ts_info, tsinfo);
+ } else {
+ if (tspec_fields[i].size < 4)
+ max = ((1 << (tspec_fields[i].size * 8)) - 1);
+
+ if (value > max) {
+ SLSI_ERR(sdev, "CAC: TSPEC config value exceeded maximumfor %s\n", tspec_fields[i].name);
+ return -1;
+ }
+
+ pos = (u8 *)(&entry->tspec) + tspec_fields[i].offset;
+ if (tspec_fields[i].size == 1)
+ *pos = (value & 0xFF);
+ else if (tspec_fields[i].size == 2)
+ CAC_PUT_LE16(pos, value);
+ else
+ CAC_PUT_LE32(pos, value);
+ }
+
+ return 0;
+}
+
+/* Name: cac_ctrl_create_tspec
+ * Desc: public function to create tspec
+ * sdev: pointer to the slsi_dev struct
+ * return: tspec id
+ */
+int cac_ctrl_create_tspec(struct slsi_dev *sdev, char *args)
+{
+ int id;
+
+ id = cac_create_tspec(sdev, args);
+ if (id < 0)
+ return -1;
+
+ return id;
+}
+
+/* Name: cac_ctrl_delete_tspec
+ * Desc: public function to delete tspec
+ * sdev: pointer to the slsi_dev struct
+ * args:pointer to a buffer that contains the agrs for deleting tspec from the list
+ * return: 0 (succes), -1 (failure)
+ */
+int cac_ctrl_delete_tspec(struct slsi_dev *sdev, char *args)
+{
+ int id;
+
+ if (strtoint(args, &id) < 0) {
+ SLSI_ERR(sdev, "CAC-DELETE-TSPEC: Invalid TSPEC ID\n");
+ return -1;
+ }
+
+ if (cac_delete_tspec(sdev, id) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Name: cac_ctrl_config_tspec
+ * Desc: public function to configure a tspec
+ * sdev: pointer to the slsi_dev struct
+ * args: pointer to a buffer that contains the agrs for tspec configuration
+ * return: 0 (succes), -1 (failure)
+ */
+int cac_ctrl_config_tspec(struct slsi_dev *sdev, char *args)
+{
+ char *id;
+ char *field;
+ char *value;
+ int tspec_id;
+ u32 val;
+
+ id = args;
+ field = strchr(id, ' ');
+ if (field == NULL) {
+ SLSI_ERR(sdev, "CAC: field string is NULL\n");
+ return -1;
+ }
+ *field++ = '\0';
+ value = strchr(field, ' ');
+ if (value == NULL) {
+ SLSI_ERR(sdev, "CAC: field value is NULL\n");
+ return -1;
+ }
+ *value++ = '\0';
+
+ if (strtoint(id, &tspec_id) < 0) {
+ SLSI_ERR(sdev, "CAC: Conversion error for tspecid\n");
+ return -1;
+ }
+
+ if (strtoint(value, &val) < 0) {
+ SLSI_ERR(sdev, "CAC: Conversion error for tspecid value\n");
+ return -1;
+ }
+
+ if (cac_config_tspec(sdev, tspec_id, field, val) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Name: cac_ctrl_send_addts
+ * Desc: public function to send ADDTS action frame
+ * sdev: pointer to the slsi_dev struct
+ * args: buffer that contains the agrs for ADDTS request
+ * return: 0 (succes), -1 (failure)
+ */
+int cac_ctrl_send_addts(struct slsi_dev *sdev, char *args)
+{
+ char *id_str;
+ char *ebw_str;
+ int id;
+ int ebw = 0;
+
+ if (args == NULL)
+ return -1;
+
+ id_str = args;
+ ebw_str = strchr(id_str, ' ');
+ if (ebw_str != NULL) {
+ *ebw_str++ = '\0';
+ if (!strncmp(ebw_str, "ebw", 3))
+ ebw = 1;
+ }
+ if (strtoint(id_str, &id) < 0) {
+ SLSI_ERR(sdev, "CAC: Conversion error for tspecid value\n");
+ return -1;
+ }
+ if (cac_send_addts(sdev, id, ebw) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Name: cac_ctrl_send_delts
+ * Desc: public function to send DELTS action frame
+ * sdev: pointer to the slsi_dev struct
+ * args: buffer that contains the agrs for DELTS request
+ * return: 0 (succes), -1 (failure)
+ */
+int cac_ctrl_send_delts(struct slsi_dev *sdev, char *args)
+{
+ int id;
+
+ if (args == NULL)
+ return -1;
+
+ if (strtoint(args, &id) < 0) {
+ SLSI_ERR(sdev, "CAC: Invalid TSPEC ID\n");
+ return -1;
+ }
+ if (cac_send_delts(sdev, id) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Name: cac_process_delts_req
+ * Desc: process a DELTS request
+ * sdev: pointer to the slsi_dev struct
+ * req: buffer of the DELTS request
+ * return: 0 (succes), -1 (failure)
+ */
+static void cac_process_delts_req(struct slsi_dev *sdev, struct net_device *netdev, struct action_delts_req *req)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(netdev);
+ struct cac_tspec *itr;
+ u32 priority;
+ int rc;
+ struct slsi_peer *stapeer;
+ u8 tid;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED) || (ndev_vif->sta.sta_bss == NULL)) {
+ SLSI_ERR(sdev, "CAC: Not connected, Unexpected DELTS request\n");
+ return;
+ }
+
+ stapeer = slsi_get_peer_from_qs(sdev, netdev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!stapeer))
+ return;
+
+ tid = (CAC_GET_LE24(req->tspec.ts_info) >> 1) & 0xF;
+ SLSI_DBG1(sdev, SLSI_MLME, "CAC: TID in delts request =%d\n", tid);
+
+ itr = find_tspec_entry(tid, 1);
+ if (itr == NULL) {
+ SLSI_ERR(sdev, "CAC: No matching TSPEC found\n");
+ return;
+ }
+
+ rc = cac_query_tspec_field(sdev, itr, "user_priority", &priority);
+ if (rc != 0) {
+ SLSI_ERR(sdev, "CAC: Missing priority from TSPEC!\n");
+ return;
+ }
+
+ if (slsi_mlme_del_traffic_parameters(sdev, netdev, priority) != 0) {
+ SLSI_ERR(sdev, "CAC: Failed to send DEL-TRAFFIC_PARAMETERS request\n");
+ return;
+ }
+
+ /* BlockAck Control Req was previously used to enable blockack for VO & VI. This
+ * signal is removed and expected to be replaced with MIBs - not able to see
+ * through the haze yet!. Need to take approp. action when the cloud clears.
+ * Historical Data:
+ * if the DELTS request is for UP = 4 or 5 then generate a
+ * MLME-BLOCKACK-CONTROL.request so that no BlockAck is negotiated
+ * on AC_VI. And leave AC_BE enabled
+ */
+
+ itr->accepted = 0; /* del traffic parameters sent successfully */
+ stapeer->tspec_established &= ~BIT(priority);
+ SLSI_DBG1(sdev, SLSI_MLME, "tspec_established =%x\n", stapeer->tspec_established);
+ /* update RIC in add_info_elements for assoc req */
+ cac_set_ric_ie(sdev, netdev);
+
+ if (ccx_status == BSS_CCX_ENABLED && previous_msdu_lifetime != MAX_TRANSMIT_MSDU_LIFETIME_NOT_VALID)
+ if (slsi_send_max_transmit_msdu_lifetime(sdev, netdev, previous_msdu_lifetime) != 0) {
+ SLSI_ERR(sdev, "CAC: slsi_send_max_msdu_lifetime failed");
+ return;
+ }
+}
+
+/* Name: cac_find_edca_ie
+ * Desc: Finds the edca IE in the ADDTS response action frame
+ * sdev: pointer to the slsi_dev struct
+ * ie: buffer of the edca IE
+ * tsid: the tsid that is included in the edca IE
+ * lifetime: the lifetime value that is included in the edca IE
+ * return: 0 (succes), -1 (failure)
+ */
+static int cac_find_edca_ie(const u8 *ie, size_t ie_len, u8 *tsid, u16 *lifetime)
+{
+ const u8 *pos = ie;
+
+ if ((ie == NULL) || (ie_len < 9) ||
+ (tsid == NULL) || (lifetime == NULL))
+ return -1;
+
+ pos = cfg80211_find_vendor_ie(WLAN_OUI_CISCO, WLAN_OUI_TYPE_CISCO_EDCA, ie, ie_len);
+ if (pos && (pos + 9 <= ie + ie_len)) {
+ *tsid = pos[6];
+ *lifetime = CAC_GET_LE16(&pos[7]);
+ return 0;
+ }
+
+ return -1;
+}
+
+/* Name: cac_process_addts_rsp
+ * Desc: parsing of the addts response
+ * sdev: pointer to the slsi_dev struct
+ * rsp: the buffer of the ADDTS response received
+ * ie_len: the length of the buffer
+ * return: 0 (succes), -1 (failure)
+ */
+static void cac_process_addts_rsp(struct slsi_dev *sdev, struct net_device *netdev, struct action_addts_rsp *rsp, const u8 *ie, size_t ie_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(netdev);
+ struct cac_tspec *itr, *entry;
+ struct wmm_tspec_element *tspec;
+ u32 priority, prev_priority;
+ int rc;
+ u8 tsid;
+ u16 msdu_lifetime;
+ struct slsi_peer *peer;
+ u16 medium_time;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_DBG1(sdev, SLSI_MLME, "\n");
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED) || (ndev_vif->sta.sta_bss == NULL)) {
+ SLSI_ERR(sdev, "CAC: Not connected, INVALID state for ADDTS response\n");
+ return;
+ }
+
+ peer = slsi_get_peer_from_qs(sdev, netdev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer))
+ return;
+
+ itr = tspec_list;
+ while (itr != NULL) {
+ if (itr->dialog_token == rsp->hdr.dialog_token) {
+ itr->dialog_token = 0; /*reset the dialog token to avoid any incorrect matches if AP send incorrect value*/
+ break;
+ }
+ itr = itr->next;
+ }
+ if (itr == NULL) {
+ SLSI_ERR(sdev, "CAC: No matching TSPEC found for ADDTS response\n");
+ return;
+ }
+
+ if (rsp->hdr.status_code != ADDTS_STATUS_ACCEPTED) {
+ SLSI_ERR(sdev, "CAC: TSPEC rejected (status=0x%02X)", rsp->hdr.status_code);
+ cac_delete_tspec_by_state(sdev, itr->id , 0);
+ return;
+ }
+
+ if ((ccx_status == BSS_CCX_ENABLED) && cac_find_edca_ie(ie, ie_len, &tsid, &msdu_lifetime) != 0)
+ msdu_lifetime = MSDU_LIFETIME_DEFAULT;
+
+ tspec = (struct wmm_tspec_element *)(rsp + 1);
+ medium_time = tspec->medium_time;
+
+ rc = cac_query_tspec_field(sdev, itr, "user_priority", &priority);
+ SLSI_DBG1(sdev, SLSI_MLME, "CAC: Priority for current tspec id %d=%d\n", itr->id, priority);
+
+ if (peer->tspec_established == 0)
+ goto set_params;
+
+ SLSI_DBG1(sdev, SLSI_MLME, "TSPEC already established\n");
+
+ /* TSPEC is already established . Check if it is for same UP / UP mapping to same AC
+ * If same UP (or UP mapping to same AC) : set params with modified values
+ * If not, set traffic params for this priority (new AC)
+ */
+ switch (priority) {
+ /*AC_BK*/
+ case FAPI_PRIORITY_QOS_UP1:
+ case FAPI_PRIORITY_QOS_UP2:
+ if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP1))
+ prev_priority = FAPI_PRIORITY_QOS_UP1;
+ else if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP2))
+ prev_priority = FAPI_PRIORITY_QOS_UP2;
+ else
+ goto set_params;
+ break;
+
+ /*AC_BE*/
+ case FAPI_PRIORITY_QOS_UP0:
+ case FAPI_PRIORITY_QOS_UP3:
+ if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP0))
+ prev_priority = FAPI_PRIORITY_QOS_UP0;
+ else if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP3))
+ prev_priority = FAPI_PRIORITY_QOS_UP3;
+ else
+ goto set_params;
+ break;
+
+ /*AC_VI*/
+ case FAPI_PRIORITY_QOS_UP4:
+ case FAPI_PRIORITY_QOS_UP5:
+ if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP4))
+ prev_priority = FAPI_PRIORITY_QOS_UP4;
+ else if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP5))
+ prev_priority = FAPI_PRIORITY_QOS_UP5;
+ else
+ goto set_params;
+ break;
+
+ /*AC_VO*/
+ case FAPI_PRIORITY_QOS_UP6:
+ case FAPI_PRIORITY_QOS_UP7:
+ if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP6))
+ prev_priority = FAPI_PRIORITY_QOS_UP6;
+ else if (peer->tspec_established & BIT(FAPI_PRIORITY_QOS_UP7))
+ prev_priority = FAPI_PRIORITY_QOS_UP7;
+ else
+ goto set_params;
+ break;
+ /* invalid*/
+ default:
+ SLSI_ERR(sdev, "CAC: Invalid UP in the request\n");
+ return;
+ }
+
+ /* Look for TSPEC entry for initial request */
+ entry = find_tspec_entry(itr->id, 1);
+ if (entry) { /*same TID*/
+ cac_query_tspec_field(sdev, entry, "user_priority", &prev_priority);
+ SLSI_DBG1(sdev, SLSI_MLME, "CAC: Modify TSPEC (prev_priority =%d)\n", prev_priority);
+ /* On receiving the new medium time (second ADDTS Response) , driver shall issue
+ * mlme-set-traffic-parameters.request with the received medium time.
+ * Use UP from old entry so FW can replace the medium time
+ * Delete the old entry in host, and replace UP in new entry.
+ */
+ cac_delete_tspec_by_state(sdev, entry->id, 1);
+ if (priority != prev_priority) {
+ itr->tspec.ts_info[1] &= ~(7 << 3) ; /*clear the value*/
+ itr->tspec.ts_info[1] |= prev_priority << 3 ; /*set the value*/
+ priority = prev_priority;
+ }
+
+ } else {
+ /* Two distinct TSes are being admitted, so the driver needs to add both allocated medium time
+ * The UP must be set to the same value of the first mlme-set-traffic-parameters.request so that
+ * the FW replaces the current medium time with the new medium time.
+ */
+ SLSI_DBG1(sdev, SLSI_MLME, "CAC: Modify TSPEC for different TID\n");
+ entry = tspec_list;
+ while (entry != NULL) {
+ if ((entry->accepted) && ((entry->tspec.ts_info[1] >> 3 & 0x07) == prev_priority)) { /*initial TS entry for same priority*/
+ medium_time += entry->tspec.medium_time;
+ priority = prev_priority;
+ break;
+ }
+ entry = entry->next;
+ }
+ if (entry == NULL) {
+ SLSI_ERR(sdev, "CAC: Failed to find entry for prev established TSPEC!!\n");
+ return;
+ }
+ }
+
+set_params:
+ SLSI_DBG1(sdev, SLSI_MLME, "sending traffic params tid [%d]", itr->id);
+ if (slsi_mlme_set_traffic_parameters(sdev, netdev, priority, medium_time, tspec->minimum_data_rate, ndev_vif->sta.sta_bss->bssid) != 0) {
+ SLSI_ERR(sdev, "CAC: Failed to send SET_TRAFFIC_PARAMETERS request\n");
+ return;
+ }
+
+ /*update the TSPEC with medium_time allocated by AP*/
+ itr->tspec.medium_time = medium_time;
+
+ /* BlockAck Control Req was previously used to enable blockack for VO & VI. This
+ * signal is removed and expected to be replaced with MIBs - not able to see
+ * through the haze yet!. Need to take approp. action when the cloud clears.
+ * Historical Data:
+ * Currently the firmware autonomously negotiates BlockAck agreement for AC_BE.
+ * It is required for WMM-AC certification to use BlockAck for AC_VI.
+ * So if a TSPEC for AC_VI (UP = 5 0r 4) is successfully negotiated, the host
+ * generates an MLME-BLOCKACK-CONTROL.request, identifying that a BlockAck for the
+ * corresponding Priority (direction set to Any) should be enabled, i.e. the F/W
+ * will accept a downlink requested BlockAck Request, and will try to set-up an
+ * uplink BlockAck Request for that priority (TID).
+ * Bits for AC_BE should always be set
+ * For WMM-AC certification, if the EDCA parameters for both VO and VI are same
+ * during association and both are ACM = 1, then don't use BlockAck for AC_VI.
+ */
+
+ /* Add store in MIB the msdu_lifetime value in case of ccx enabled bss */
+ if (ccx_status == BSS_CCX_ENABLED) {
+ if ((slsi_read_max_transmit_msdu_lifetime(sdev, netdev, &previous_msdu_lifetime)) != 0) {
+ previous_msdu_lifetime = MAX_TRANSMIT_MSDU_LIFETIME_NOT_VALID;
+ SLSI_ERR(sdev, "CAC: slsi_read_max_msdu_lifetime failed");
+ return;
+ }
+
+ if (slsi_send_max_transmit_msdu_lifetime(sdev, netdev, msdu_lifetime) != 0) {
+ SLSI_ERR(sdev, "CAC: slsi_send_max_msdu_lifetime failed");
+ return;
+ }
+ }
+
+ itr->accepted = 1; /* add_tspec accepted by AP*/
+ sdev->tspec_error_code = 0; /* add_tspec response received */
+ peer->tspec_established |= BIT(priority);
+ /* update RIC in add_info_elements for assoc req */
+ cac_set_ric_ie(sdev, netdev);
+}
+
+/* Name: cac_rx_wmm_action
+ * Desc: Get the action frame received and call the corresponding process routine
+ * sdev: pointer to the slsi_dev struct
+ * data: buffer to the action frame received
+ * len: the length in bytes of the action frame
+ */
+void cac_rx_wmm_action(struct slsi_dev *sdev, struct net_device *netdev, struct ieee80211_mgmt *data, size_t len)
+{
+ struct ieee80211_mgmt *mgmt = data;
+ struct action_addts_rsp *addts;
+
+ if ((sdev == NULL) || (data == NULL) || (netdev == NULL) || (len == 0))
+ return;
+
+ if (mgmt->u.action.u.wme_action.action_code == WMM_ACTION_CODE_ADDTS_RESP) {
+ addts = (struct action_addts_rsp *)&mgmt->u.action;
+ cac_process_addts_rsp(sdev, netdev, addts, mgmt->u.action.u.wme_action.variable, len - sizeof(*addts) + 1);
+ } else if (mgmt->u.action.u.wme_action.action_code == WMM_ACTION_CODE_DELTS) {
+ cac_process_delts_req(sdev, netdev, (struct action_delts_req *)&mgmt->u.action);
+ }
+}
+
+/* Name: cac_get_active_tspecs
+ * Desc:
+ * tspecs: the list of active tspecs
+ * return: 0 (succes), -1 (failure)
+ */
+int cac_get_active_tspecs(struct cac_activated_tspec **tspecs)
+{
+ struct cac_tspec *itr = tspec_list;
+ int count = 0;
+ int i = 0;
+
+ if (tspecs == NULL)
+ return -1;
+
+ while (itr != NULL) {
+ if (itr->accepted)
+ count++;
+ itr = itr->next;
+ }
+ *tspecs = kmalloc_array((size_t)count, sizeof(struct cac_activated_tspec), GFP_KERNEL);
+ itr = tspec_list;
+ while (itr != NULL) {
+ if (itr->accepted) {
+ tspecs[i]->ebw = itr->ebw;
+ memcpy(&tspecs[i]->tspec, &itr->tspec, sizeof(itr->tspec));
+ i++;
+ }
+ itr = itr->next;
+ }
+
+ return count;
+}
+
+/*********************************************************
+ * call cac_delete_tspec_list to delete all tspecs
+ * when the device is disconnecting
+ */
+/* Name: cac_delete_tspec_list
+ * Desc:
+ * sdev: pointer to the slsi_dev struct
+ * return: None
+ */
+void cac_delete_tspec_list(struct slsi_dev *sdev)
+{
+ struct cac_tspec *itr = tspec_list;
+ struct cac_tspec *temp = NULL;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ while (itr != NULL) {
+ itr->accepted = 0;
+ itr->dialog_token = 0;
+ temp = itr;
+ itr = itr->next;
+ kfree(temp);
+ }
+ tspec_list = NULL;
+}
+
+void cac_deactivate_tspecs(struct slsi_dev *sdev)
+{
+ struct cac_tspec *itr = tspec_list;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ while (itr) {
+ itr->accepted = 0;
+ itr->dialog_token = 0;
+ itr = itr->next;
+ }
+}
+
+static void cac_set_ric_ie(struct slsi_dev *sdev, struct net_device *netdev)
+{
+ struct cac_tspec *itr = tspec_list;
+ int tspec_count = 0;
+ int buf_len = 0;
+ u8 *buff, *add_info_ies;
+ struct wmm_tspec_element *tspec_ie;
+ int i = 0;
+ struct netdev_vif *ndev_vif = netdev_priv(netdev);
+
+ while (itr) {
+ if (itr->accepted)
+ tspec_count++;
+ itr = itr->next;
+ }
+
+ if (tspec_count == 0) {
+ slsi_mlme_add_info_elements(sdev, netdev, FAPI_PURPOSE_ASSOCIATION_REQUEST,
+ ndev_vif->sta.assoc_req_add_info_elem,
+ ndev_vif->sta.assoc_req_add_info_elem_len);
+ return;
+ }
+
+ /* RDE (6 bytes), WMM TSPEC * tspec_count bytes*/
+ buf_len = 6 + (sizeof(struct wmm_tspec_element) * tspec_count);
+ buf_len += ndev_vif->sta.assoc_req_add_info_elem_len;
+ add_info_ies = kmalloc(buf_len, GFP_KERNEL);
+ if (!add_info_ies) {
+ SLSI_ERR(sdev, "malloc fail. size:%d\n", buf_len);
+ return;
+ }
+ memcpy(add_info_ies, ndev_vif->sta.assoc_req_add_info_elem, ndev_vif->sta.assoc_req_add_info_elem_len);
+
+ buff = add_info_ies + ndev_vif->sta.assoc_req_add_info_elem_len;
+ buff[0] = WLAN_EID_RIC_DATA;
+ buff[1] = 4;
+ buff[2] = 0; /* random identifier */
+ /* buff[3]: resource desc count update after filling TSPEC */
+ buff[4] = 0; /* buff[4]-buff[5] status code. set to success */
+ buff[5] = 0;
+
+ itr = tspec_list;
+ i = 0;
+ while (itr) {
+ if (itr->accepted) {
+ tspec_ie = (struct wmm_tspec_element *)&buff[6 + i * sizeof(struct wmm_tspec_element)];
+ memcpy(tspec_ie, &itr->tspec, sizeof(struct wmm_tspec_element));
+ ((struct wmm_tspec_element *)tspec_ie)->medium_time = 0;
+ i++;
+ }
+ itr = itr->next;
+ }
+ buff[3] = i;
+ slsi_mlme_add_info_elements(sdev, netdev, FAPI_PURPOSE_ASSOCIATION_REQUEST, add_info_ies, buf_len);
+ kfree(add_info_ies);
+}
+
+static int cac_get_rde_tspec_ie(struct slsi_dev *sdev, u8 *assoc_rsp_ie, int assoc_rsp_ie_len, const u8 **tspec_ie_arr)
+{
+ const u8 *ie;
+ u16 status;
+ int tspec_count = 0, i = 0;
+
+ ie = assoc_rsp_ie;
+
+ /* Find total number of RDE TSPEC */
+ while (ie && (assoc_rsp_ie_len > ie - assoc_rsp_ie)) {
+ ie = cfg80211_find_ie(WLAN_EID_RIC_DATA, ie, assoc_rsp_ie_len - (ie - assoc_rsp_ie));
+ if (!ie)
+ break;
+ status = CAC_GET_LE16(&ie[4]);
+ if (status != 0)
+ continue;
+
+ tspec_count += ie[3]; /* TSPEC descriptor count */
+ ie = ie + ie[1];
+ }
+
+ /* limit WMM TSPEC count to TSID_MAX */
+ if (tspec_count > TSID_MAX) {
+ SLSI_DBG1(sdev, SLSI_MLME, "received %d TSPEC but can accommodate only %d\n", tspec_count, TSID_MAX);
+ tspec_count = TSID_MAX;
+ }
+
+ /* Get all WMM TSPEC IE pointers */
+ ie = cfg80211_find_ie(WLAN_EID_RIC_DATA, assoc_rsp_ie, assoc_rsp_ie_len);
+ while (i < tspec_count && ie) {
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, ie,
+ assoc_rsp_ie_len - (ie - assoc_rsp_ie));
+ if (!ie)
+ break;
+ /* re-assoc-res can contain wmm parameter IE and wmm TSPEC IE.
+ * we want wmm TSPEC Element)
+ */
+ if (ie[1] > 6 && ie[6] == WMM_OUI_SUBTYPE_TSPEC_ELEMENT) {
+ tspec_ie_arr[i] = ie;
+ i++;
+ }
+ ie += ie[1];
+ }
+
+ return i;
+}
+
+void cac_update_roam_traffic_params(struct slsi_dev *sdev, struct net_device *dev)
+{
+ const u8 *tspec_ie_arr[TSID_MAX];
+ int assoc_rsp_tspec_count, i;
+ u32 priority;
+ struct cac_tspec *itr;
+ struct wmm_tspec_element *assoc_rsp_tspec;
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_DBG3(sdev, SLSI_MLME, "\n");
+
+ /* Roamed to new AP. TSPEC admitted to previous AP are no more valid.
+ * Set all TSPEC to not admitted
+ */
+ cac_deactivate_tspecs(sdev);
+
+ if (!peer) {
+ SLSI_ERR(sdev, "AP peer entry not found\n");
+ return;
+ }
+
+ /* Find all the admitted TSPECs in assoc resp. */
+ assoc_rsp_tspec_count = cac_get_rde_tspec_ie(sdev, peer->assoc_resp_ie->data,
+ peer->assoc_resp_ie->len, tspec_ie_arr);
+
+ SLSI_DBG3(sdev, SLSI_MLME, "assoc_rsp_tspec_count:%d\n", assoc_rsp_tspec_count);
+
+ if (!assoc_rsp_tspec_count)
+ return;
+
+ /* update the admitted TSPECs from assoc resp and set traffic params in FW.*/
+ for (i = 0; i < assoc_rsp_tspec_count; i++) {
+ assoc_rsp_tspec = (struct wmm_tspec_element *)tspec_ie_arr[i];
+ SLSI_DBG3(sdev, SLSI_MLME, "rsp_tspec:[%d] ts: [%x|%x|%x] medium time[%x]\n", i,
+ assoc_rsp_tspec->ts_info[0], assoc_rsp_tspec->ts_info[1], assoc_rsp_tspec->ts_info[2],
+ assoc_rsp_tspec->medium_time);
+
+ itr = find_tspec_entry((assoc_rsp_tspec->ts_info[0] & 0x1E) >> 1, 0);
+ if (!itr) {
+ SLSI_DBG3(sdev, SLSI_MLME, "tspec entry not found\n");
+ continue;
+ }
+
+ itr->tspec.medium_time = assoc_rsp_tspec->medium_time;
+ itr->tspec.minimum_data_rate = assoc_rsp_tspec->minimum_data_rate;
+ itr->accepted = 1;
+ cac_query_tspec_field(sdev, itr, "user_priority", &priority);
+ peer->tspec_established |= BIT(priority);
+ SLSI_DBG3(sdev, SLSI_MLME, "tspec admitted id[%d]\n", itr->id);
+ slsi_mlme_set_traffic_parameters(sdev, dev, priority, assoc_rsp_tspec->medium_time,
+ assoc_rsp_tspec->minimum_data_rate, ndev_vif->sta.sta_bss->bssid);
+ }
+}
+
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef CAC_H
+#define CAC_H
+
+#include <linux/kernel.h>
+#include "dev.h"
+#include "debug.h"
+#include "mlme.h"
+#include "mgt.h"
+
+/* management */
+#define WLAN_OUI_CISCO 0x004096 /* Cisco systems OUI */
+#define WLAN_OUI_TYPE_CISCO_EDCA 0x09
+
+#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WMM_VERSION 1
+#define WMM_ACTION_CODE_ADDTS_REQ 0
+#define WMM_ACTION_CODE_ADDTS_RESP 1
+#define WMM_ACTION_CODE_DELTS 2
+#define WMM_ADDTS_STATUS_ADMISSION_ACCEPTED 0
+#define WMM_ADDTS_STATUS_INVALID_PARAMETERS 1
+/* 2 - Reserved */
+#define WMM_ADDTS_STATUS_REFUSED 3
+/* 4-255 - Reserved */
+
+/* WMM TSPEC Direction Field Values */
+#define WMM_TSPEC_DIRECTION_UPLINK 0
+#define WMM_TSPEC_DIRECTION_DOWNLINK 1
+/* 2 - Reserved */
+#define WMM_TSPEC_DIRECTION_BI_DIRECTIONAL 3
+
+/* WMM TSPEC PSB Field Values */
+#define WMM_TSPEC_PSB_UNSPECIFIED 2
+
+#define ADDTS_STATUS_ACCEPTED 0x00
+#define ADDTS_STATUS_INVALID_PARAM 0x01
+#define ADDTS_STATUS_REFUSED 0x03
+#define ADDTS_STATUS_DELAY 0x2F
+#define ADDTS_STATUS_UNSPECIFIED 0xC8
+#define ADDTS_STATUS_POLICY_CONFIG 0xC9
+#define ADDTS_STATUS_ASSOC_DENIED 0xCA
+#define ADDTS_STATUS_INVALID_PARAM2 0xCB
+
+#define TSINFO_MASK 0x00FFFFFF
+
+#define CCX_MAX_NUM_RATES 8
+
+#define TSID_MIN 0
+#define TSID_MAX 7
+
+#define TSRS_RATE_PER_UNIT 500000
+#define IEEE80211_HEADER_SIZE 24
+
+#define MAX_TRANSMIT_MSDU_LIFETIME_NOT_VALID -1
+#define BSS_CCX_DISABLED 0
+#define BSS_CCX_ENABLED 1
+
+/* Macros for handling unaligned memory accesses */
+#define CAC_GET_LE16(a) ((u16)(((a)[1] << 8) | (a)[0]))
+#define CAC_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16)(val)) >> 8; \
+ (a)[0] = ((u16)(val)) & 0xff; \
+ } while (0)
+#define CAC_PUT_BE24(a, val) \
+ do { \
+ (a)[0] = (u8)((((u32)(val)) >> 16) & 0xff); \
+ (a)[1] = (u8)((((u32)(val)) >> 8) & 0xff); \
+ (a)[2] = (u8)(((u32)(val)) & 0xff); \
+ } while (0)
+#define CAC_GET_LE24(a) ((((u32)(a)[2]) << 16) | (((u32)(a)[1]) << 8) | ((u32)(a)[0]))
+#define CAC_PUT_LE24(a, val) \
+ do { \
+ (a)[2] = (u8)((((u32)(val)) >> 16) & 0xff); \
+ (a)[1] = (u8)((((u32)(val)) >> 8) & 0xff); \
+ (a)[0] = (u8)(((u32)(val)) & 0xff); \
+ } while (0)
+#define CAC_GET_LE32(a) ((((u32)(a)[3]) << 24) | (((u32)(a)[2]) << 16) | \
+ (((u32)(a)[1]) << 8) | ((u32)(a)[0]))
+#define CAC_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8)((((u32)(val)) >> 24) & 0xff); \
+ (a)[2] = (u8)((((u32)(val)) >> 16) & 0xff); \
+ (a)[1] = (u8)((((u32)(val)) >> 8) & 0xff); \
+ (a)[0] = (u8)(((u32)(val)) & 0xff); \
+ } while (0)
+
+#define IEEE80211_FC(type, stype) (u16)(type | stype)
+
+/* WMM TSPEC Element */
+struct wmm_tspec_element {
+ char eid; /* 221 = 0xdd */
+ u8 length; /* 6 + 55 = 61 */
+ u8 oui[3]; /* 00:50:f2 */
+ u8 oui_type; /* 2 */
+ u8 oui_subtype; /* 2 */
+ u8 version; /* 1 */
+ /* WMM TSPEC body (55 octets): */
+ u8 ts_info[3];
+ u16 nominal_msdu_size;
+ u16 maximum_msdu_size;
+ u32 minimum_service_interval;
+ u32 maximum_service_interval;
+ u32 inactivity_interval;
+ u32 suspension_interval;
+ u32 service_start_time;
+ u32 minimum_data_rate;
+ u32 mean_data_rate;
+ u32 peak_data_rate;
+ u32 maximum_burst_size;
+ u32 delay_bound;
+ u32 minimum_phy_rate;
+ u16 surplus_bandwidth_allowance;
+ u16 medium_time;
+} __packed;
+
+#define MSDU_LIFETIME_DEFAULT 512
+
+struct cac_activated_tspec {
+ struct wmm_tspec_element tspec;
+ int ebw;
+};
+
+struct tspec_field {
+ const char *name;
+ int read_only;
+ int is_tsinfo_field;
+ u8 size;
+ u32 offset;
+};
+
+struct cac_tspec {
+ struct cac_tspec *next;
+ int id;
+ struct wmm_tspec_element tspec;
+ u8 psb_specified;
+ int ebw;
+ int accepted;
+ u8 dialog_token;
+};
+
+#define OFFSETOF(m) ((size_t)&((struct wmm_tspec_element *)0)->m)
+
+struct wmm_action_hdr {
+ u8 category;
+ u8 action;
+ u8 dialog_token;
+ u8 status_code;
+} __packed;
+
+struct action_addts_req {
+ struct wmm_action_hdr hdr;
+ struct wmm_tspec_element tspec;
+} __packed;
+
+struct action_addts_rsp {
+ struct wmm_action_hdr hdr;
+} __packed;
+
+struct action_delts_req {
+ struct wmm_action_hdr hdr;
+ struct wmm_tspec_element tspec;
+} __packed;
+/* prototypes for public functions */
+int cac_ctrl_create_tspec(struct slsi_dev *sdev, char *args);
+int cac_ctrl_config_tspec(struct slsi_dev *sdev, char *args);
+int cac_ctrl_send_addts(struct slsi_dev *sdev, char *args);
+int cac_ctrl_send_delts(struct slsi_dev *sdev, char *args);
+int cac_update_local_tspec(struct slsi_dev *sdev, u16 msdu_lifetime, struct wmm_tspec_element *tspec);
+int cac_get_active_tspecs(struct cac_activated_tspec **tspecs);
+void cac_delete_tspec_list(struct slsi_dev *sdev);
+int cac_ctrl_delete_tspec(struct slsi_dev *sdev, char *args);
+void cac_rx_wmm_action(struct slsi_dev *sdev, struct net_device *netdev, struct ieee80211_mgmt *data, size_t len);
+void cac_update_roam_traffic_params(struct slsi_dev *sdev, struct net_device *dev);
+#endif /* CAC_H */
--- /dev/null
+/***************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/version.h>
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+#include "dev.h"
+#include "cfg80211_ops.h"
+#include "debug.h"
+#include "mgt.h"
+#include "mlme.h"
+#include "netif.h"
+#include "unifiio.h"
+#include "mib.h"
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+#include "nl80211_vendor.h"
+
+#define SLSI_MAX_CHAN_2G_BAND 14
+
+static uint keep_alive_period = SLSI_P2PGO_KEEP_ALIVE_PERIOD_SEC;
+module_param(keep_alive_period, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(keep_alive_period, "default is 10 seconds");
+
+static bool slsi_is_mhs_active(struct slsi_dev *sdev)
+{
+ struct net_device *mhs_dev = sdev->netdev_ap;
+ struct netdev_vif *ndev_vif;
+ bool ret;
+
+ if (mhs_dev) {
+ ndev_vif = netdev_priv(mhs_dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ ret = ndev_vif->is_available;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+struct wireless_dev *slsi_add_virtual_intf(struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+struct wireless_dev *slsi_add_virtual_intf(struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+struct wireless_dev *slsi_add_virtual_intf(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+#else
+struct net_device *slsi_add_ virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+#endif
+
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0))
+ SLSI_UNUSED_PARAMETER(flags);
+#endif
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Intf name:%s, type:%d, macaddr:%pM\n", name, type, params->macaddr);
+ if (slsi_is_mhs_active(sdev)) {
+ SLSI_ERR(sdev, "MHS is active. cannot add new interface\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ dev = slsi_dynamic_interface_create(wiphy, name, type, params);
+ if (!dev)
+ goto exit_with_error;
+ ndev_vif = netdev_priv(dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ return &ndev_vif->wdev;
+#else
+ return dev;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+
+exit_with_error:
+ return ERR_PTR(-ENODEV);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+int slsi_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+{
+#endif
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Dev name:%s\n", dev->name);
+
+ slsi_stop_net_dev(sdev, dev);
+ slsi_netif_remove_rtlnl_locked(sdev, dev);
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ if (dev == sdev->netdev_ap)
+ rcu_assign_pointer(sdev->netdev_ap, NULL);
+ if (!sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN])
+ rcu_assign_pointer(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN], sdev->netdev_ap);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+int slsi_change_virtual_intf(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+#else
+int slsi_change_virtual_intf(struct wiphy *wiphy,
+ struct net_device *dev,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+#endif
+
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0))
+ SLSI_UNUSED_PARAMETER(flags);
+#endif
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "type:%u, iftype:%d\n", type, ndev_vif->iftype);
+
+ if (WARN_ON(ndev_vif->activated)) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ case NL80211_IFTYPE_MONITOR:
+#endif
+ ndev_vif->iftype = type;
+ dev->ieee80211_ptr->iftype = type;
+ if (params)
+ dev->ieee80211_ptr->use_4addr = params->use_4addr;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_add_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ int r = 0;
+ u16 key_type = FAPI_KEYTYPE_GROUP;
+
+ if (WARN_ON(pairwise && !mac_addr))
+ return -EINVAL;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "(key_index:%d, pairwise:%d, address:%pM, cipher:0x%.8X, key_len:%d,"
+ "vif_type:%d)\n", key_index, pairwise, mac_addr, params->cipher, params->key_len,
+ ndev_vif->vif_type);
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "vif not active\n");
+ goto exit;
+ }
+
+ if (params->cipher == WLAN_CIPHER_SUITE_PMK) {
+ r = slsi_mlme_set_pmk(sdev, dev, params->key, params->key_len);
+ goto exit;
+ }
+
+ if (mac_addr && pairwise) {
+ /* All Pairwise Keys will have a peer record. */
+ peer = slsi_get_peer_from_mac(sdev, dev, mac_addr);
+ if (peer)
+ mac_addr = peer->address;
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ /* Sta Group Key will use the peer address */
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (peer)
+ mac_addr = peer->address;
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && !pairwise)
+ /* AP Group Key will use the Interface address */
+ mac_addr = dev->dev_addr;
+ else {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ /*Treat WEP key as pairwise key*/
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ ((params->cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (params->cipher == WLAN_CIPHER_SUITE_WEP104)) && peer) {
+ u8 bc_mac_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "WEP Key: store key\n");
+ r = slsi_mlme_set_key(sdev, dev, key_index, FAPI_KEYTYPE_WEP, bc_mac_addr, params);
+ if (r == FAPI_RESULTCODE_SUCCESS) {
+ /* if static ip is set before connection, after setting keys enable powersave. */
+ if (ndev_vif->ipaddress)
+ slsi_mlme_powermgt(sdev, dev, ndev_vif->set_power_mode);
+ } else {
+ SLSI_NET_ERR(dev, "Error adding WEP key\n");
+ }
+ goto exit;
+ }
+
+ if (pairwise) {
+ key_type = FAPI_KEYTYPE_PAIRWISE;
+ if (WARN_ON(!peer)) {
+ r = -EINVAL;
+ goto exit;
+ }
+ } else if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ key_type = FAPI_KEYTYPE_IGTK;
+ }
+
+ if (WARN(!mac_addr, "mac_addr not defined\n")) {
+ r = -EINVAL;
+ goto exit;
+ }
+ if (!((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (key_index == 4))) {
+ r = slsi_mlme_set_key(sdev, dev, key_index, key_type, mac_addr, params);
+ if (r) {
+ SLSI_NET_ERR(dev, "error in adding key (key_type: %d)\n", key_type);
+ goto exit;
+ }
+ }
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ ndev_vif->sta.eap_hosttag = 0xFFFF;
+ /* if static IP is set before connection, after setting keys enable powersave. */
+ if (ndev_vif->ipaddress)
+ slsi_mlme_powermgt(sdev, dev, ndev_vif->set_power_mode);
+ }
+
+ if (key_type == FAPI_KEYTYPE_GROUP) {
+ ndev_vif->sta.group_key_set = true;
+ ndev_vif->ap.cipher = params->cipher;
+ } else if (key_type == FAPI_KEYTYPE_PAIRWISE) {
+ if (peer)
+ peer->pairwise_key_set = true;
+ }
+
+ if (peer) {
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ if (pairwise) {
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4) { /*WAPI */
+ slsi_mlme_connect_resp(sdev, dev);
+ slsi_set_packet_filters(sdev, dev);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ }
+ }
+
+ if (ndev_vif->sta.gratuitous_arp_needed) {
+ ndev_vif->sta.gratuitous_arp_needed = false;
+ slsi_send_gratuitous_arp(sdev, dev);
+ }
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && pairwise) {
+ slsi_mlme_connected_resp(sdev, dev, peer->aid);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTED;
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO)
+ ndev_vif->ap.p2p_gc_keys_set = true;
+ }
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_del_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(key_index);
+ SLSI_UNUSED_PARAMETER(pairwise);
+ SLSI_UNUSED_PARAMETER(mac_addr);
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_DELETEKEYS.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int slsi_get_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
+ void (*callback)(void *cookie, struct key_params *))
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct key_params params;
+
+#define SLSI_MAX_KEY_SIZE 8 /*used only for AP case, so WAPI not considered*/
+ u8 key_seq[SLSI_MAX_KEY_SIZE] = { 0 };
+ int r = 0;
+
+ SLSI_UNUSED_PARAMETER(mac_addr);
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_GET_KEY_SEQUENCE.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "(key_index:%d, pairwise:%d, mac_addr:%pM, vif_type:%d)\n", key_index,
+ pairwise, mac_addr, ndev_vif->vif_type);
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_ERR(dev, "vif not active\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ /* The get_key call is expected only for AP vif with Group Key type */
+ if (FAPI_VIFTYPE_AP != ndev_vif->vif_type) {
+ SLSI_NET_ERR(dev, "Invalid vif type: %d\n", ndev_vif->vif_type);
+ r = -EINVAL;
+ goto exit;
+ }
+ if (pairwise) {
+ SLSI_NET_ERR(dev, "Invalid key type\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ memset(¶ms, 0, sizeof(params));
+ /* Update params with sequence number, key field would be updated NULL */
+ params.key = NULL;
+ params.key_len = 0;
+ params.cipher = ndev_vif->ap.cipher;
+ if (!((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (key_index == 4))) {
+ r = slsi_mlme_get_key(sdev, dev, key_index, FAPI_KEYTYPE_GROUP, key_seq, ¶ms.seq_len);
+
+ if (!r) {
+ params.seq = key_seq;
+ callback(cookie, ¶ms);
+ }
+ }
+#undef SLSI_MAX_KEY_SIZE
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+static size_t slsi_strip_wsc_p2p_ie(const u8 *src_ie, size_t src_ie_len, u8 *dest_ie, bool strip_wsc, bool strip_p2p)
+{
+ const u8 *ie;
+ const u8 *next_ie;
+ size_t dest_ie_len = 0;
+
+ if (!dest_ie || !(strip_p2p || strip_wsc))
+ return dest_ie_len;
+
+ for (ie = src_ie; (ie - src_ie) < src_ie_len; ie = next_ie) {
+ next_ie = ie + ie[1] + 2;
+
+ if (ie[0] == WLAN_EID_VENDOR_SPECIFIC && ie[1] > 4) {
+ int i;
+ unsigned int oui = 0;
+
+ for (i = 0; i < 4; i++)
+ oui = (oui << 8) | ie[5 - i];
+
+ if (strip_wsc && (oui == SLSI_WPS_OUI_PATTERN))
+ continue;
+ if (strip_p2p && (oui == SLSI_P2P_OUI_PATTERN))
+ continue;
+ }
+
+ if (next_ie - src_ie <= src_ie_len) {
+ memcpy(dest_ie + dest_ie_len, ie, ie[1] + 2);
+ dest_ie_len += ie[1] + 2;
+ }
+ }
+
+ return dest_ie_len;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct net_device *dev = request->wdev->netdev;
+
+#else
+int slsi_scan(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_scan_request *request)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ u16 scan_type = FAPI_SCANTYPE_FULL_SCAN;
+ int r = 0;
+ u8 *scan_ie;
+ size_t scan_ie_len;
+ bool strip_wsc = false;
+ bool strip_p2p = false;
+ struct ieee80211_channel *channels[64];
+ int i, chan_count = 0;
+ struct cfg80211_scan_info info = {.aborted = false};
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ u8 mac_addr_mask[ETH_ALEN] = {0xFF};
+#endif
+
+ if (WARN_ON(!request->wdev))
+ return -EINVAL;
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_WARN(dev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Reject scan request if Group Formation is in progress */
+ if (sdev->p2p_state == P2P_ACTION_FRAME_TX_RX) {
+ SLSI_NET_INFO(dev, "Scan received in P2P Action Frame Tx/Rx state - Reject\n");
+ return -EBUSY;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ SLSI_NET_INFO(dev, "Rejecting scan request as last scan is still running\n");
+ r = -EBUSY;
+ goto exit;
+ }
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ if (ndev_vif->is_wips_running && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ int ret = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Scan invokes DRIVER_BCN_ABORT\n");
+ ret = slsi_mlme_set_forward_beacon(sdev, dev, FAPI_ACTION_STOP);
+
+ if (!ret) {
+ ret = slsi_send_forward_beacon_abort_vendor_event(sdev,
+ SLSI_FORWARD_BEACON_ABORT_REASON_SCANNING);
+ }
+ }
+#endif
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "channels:%d, ssids:%d, ie_len:%d, vif_index:%d\n", request->n_channels,
+ request->n_ssids, (int)request->ie_len, ndev_vif->ifnum);
+
+ for (i = 0; i < request->n_channels; i++)
+ channels[i] = request->channels[i];
+ chan_count = request->n_channels;
+
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ if (sdev->initial_scan) {
+ sdev->initial_scan = false;
+ scan_type = FAPI_SCANTYPE_INITIAL_SCAN;
+ }
+ ndev_vif->unsync.slsi_p2p_continuous_fullscan = false;
+ }
+
+ /* Update scan timing for P2P social channels scan.*/
+ if ((request->ie) &&
+ cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, request->ie, request->ie_len) &&
+ request->ssids && SLSI_IS_P2P_SSID(request->ssids[0].ssid, request->ssids[0].ssid_len)) {
+ /* In supplicant during joining procedure the P2P GO scan
+ * with GO's operating channel comes on P2P device. Hence added the
+ * check for n_channels as 1
+ */
+ if (!ndev_vif->drv_in_p2p_procedure) {
+ if (delayed_work_pending(&ndev_vif->unsync.unset_channel_expiry_work)) {
+ cancel_delayed_work(&ndev_vif->unsync.unset_channel_expiry_work);
+ slsi_mlme_unset_channel_req(sdev, dev);
+ ndev_vif->driver_channel = 0;
+ }
+ }
+ if (request->n_channels == SLSI_P2P_SOCIAL_CHAN_COUNT || request->n_channels == 1) {
+ scan_type = FAPI_SCANTYPE_P2P_SCAN_SOCIAL;
+ ndev_vif->unsync.slsi_p2p_continuous_fullscan = false;
+ } else if (request->n_channels > SLSI_P2P_SOCIAL_CHAN_COUNT) {
+ if (!ndev_vif->unsync.slsi_p2p_continuous_fullscan) {
+ scan_type = FAPI_SCANTYPE_P2P_SCAN_FULL;
+ ndev_vif->unsync.slsi_p2p_continuous_fullscan = true;
+ } else {
+ int count = 0, chann = 0;
+
+ scan_type = FAPI_SCANTYPE_P2P_SCAN_SOCIAL;
+ ndev_vif->unsync.slsi_p2p_continuous_fullscan = false;
+ for (i = 0; i < request->n_channels; i++) {
+ chann = channels[i]->hw_value & 0xFF;
+ if (chann == 1 || chann == 6 || chann == 11) {
+ channels[count] = request->channels[i];
+ count++;
+ }
+ }
+ chan_count = count;
+ }
+ }
+ }
+
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif) && (request->ie)) {
+ const u8 *ie;
+
+ /* check HS2 related bits in extended capabilties (interworking, WNM,QoS Map, BSS transition) and set in MIB*/
+ r = slsi_mlme_set_hs2_ext_cap(sdev, dev, request->ie, request->ie_len);
+ if (r)
+ goto exit;
+
+ /* Supplicant adds wsc and p2p in Station scan at the end of scan request ie.
+ * for non-wps case remove both wps and p2p IEs
+ * for wps case remove only p2p IE
+ */
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, request->ie, request->ie_len);
+ if (ie && ie[1] > SLSI_WPS_REQUEST_TYPE_POS &&
+ ie[SLSI_WPS_REQUEST_TYPE_POS] == SLSI_WPS_REQUEST_TYPE_ENROLEE_INFO_ONLY)
+ strip_wsc = true;
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, request->ie, request->ie_len);
+ if (ie)
+ strip_p2p = true;
+ }
+
+ if (strip_wsc || strip_p2p) {
+ scan_ie = kmalloc(request->ie_len, GFP_KERNEL);
+ if (!scan_ie) {
+ SLSI_NET_INFO(dev, "Out of memory for scan IEs\n");
+ r = -ENOMEM;
+ goto exit;
+ }
+ scan_ie_len = slsi_strip_wsc_p2p_ie(request->ie, request->ie_len, scan_ie, strip_wsc, strip_p2p);
+ } else {
+ scan_ie = (u8 *)request->ie;
+ scan_ie_len = request->ie_len;
+ }
+
+ /* Flush out any outstanding single scan timeout work */
+ cancel_delayed_work(&ndev_vif->scan_timeout_work);
+
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = false;
+ slsi_purge_scan_results(ndev_vif, SLSI_SCAN_HW_ID);
+
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ if (sdev->fw_mac_randomization_enabled) {
+ memcpy(sdev->scan_mac_addr, request->mac_addr, ETH_ALEN);
+ r = slsi_set_mac_randomisation_mask(sdev, request->mac_addr_mask);
+ if (!r)
+ sdev->scan_addr_set = 1;
+ } else {
+ SLSI_NET_INFO(dev, "Mac Randomization is not enabled in Firmware\n");
+ sdev->scan_addr_set = 0;
+ }
+ } else
+#endif
+ if (sdev->scan_addr_set) {
+ memset(mac_addr_mask, 0xFF, ETH_ALEN);
+ r = slsi_set_mac_randomisation_mask(sdev, mac_addr_mask);
+ sdev->scan_addr_set = 0;
+ }
+#endif
+
+ r = slsi_mlme_add_scan(sdev,
+ dev,
+ scan_type,
+ FAPI_REPORTMODE_REAL_TIME,
+ request->n_ssids,
+ request->ssids,
+ chan_count,
+ channels,
+ NULL,
+ scan_ie,
+ scan_ie_len,
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan);
+
+ if (r != 0) {
+ if (r > 0) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Nothing to be done\n");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(request, &info);
+#else
+ cfg80211_scan_done(request, false);
+#endif
+ r = 0;
+ } else {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "add_scan error: %d\n", r);
+ r = -EIO;
+ }
+ } else {
+ ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req = request;
+
+ /* if delayed work is already scheduled, queue delayed work fails. So set
+ * requeue_timeout_work flag to enqueue delayed work in the timeout handler
+ */
+ if (queue_delayed_work(sdev->device_wq, &ndev_vif->scan_timeout_work,
+ msecs_to_jiffies(SLSI_FW_SCAN_DONE_TIMEOUT_MSEC)))
+ ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
+ else
+ ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = true;
+
+ /* Update State only for scan in Device role */
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif) && (!SLSI_IS_P2P_GROUP_STATE(sdev))) {
+ if (scan_type == FAPI_SCANTYPE_P2P_SCAN_SOCIAL)
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_SCANNING);
+ } else if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif) && scan_ie_len) {
+ kfree(ndev_vif->probe_req_ies);
+ ndev_vif->probe_req_ies = kmalloc(request->ie_len, GFP_KERNEL);
+ if (!ndev_vif->probe_req_ies) /* Don't fail, continue as it would still work */
+ ndev_vif->probe_req_ie_len = 0;
+ else {
+ ndev_vif->probe_req_ie_len = scan_ie_len;
+ memcpy(ndev_vif->probe_req_ies, scan_ie, scan_ie_len);
+ }
+ }
+ }
+ if (strip_p2p || strip_wsc)
+ kfree(scan_ie);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+int slsi_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r;
+ u8 *scan_ie;
+ size_t scan_ie_len;
+ bool strip_wsc = false;
+ bool strip_p2p = false;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_ADD_SCAN.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Allow sched_scan only on wlan0. For P2PCLI interface, sched_scan might get requested following a
+ * wlan0 scan and its results being shared to sibling interfaces. Reject sched_scan for other interfaces.
+ */
+ if (!SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ SLSI_NET_INFO(dev, "Scheduled scan req received on vif %d - Reject\n", ndev_vif->ifnum);
+ return -EINVAL;
+ }
+
+ /* Unlikely to get a schedule scan while Group formation is in progress.
+ * In case it is requested, it will be rejected.
+ */
+ if (sdev->p2p_state == P2P_ACTION_FRAME_TX_RX) {
+ SLSI_NET_INFO(dev, "Scheduled scan req received in P2P Action Frame Tx/Rx state - Reject\n");
+ return -EBUSY;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "channels:%d, ssids:%d, ie_len:%d, vif_index:%d\n", request->n_channels,
+ request->n_ssids, (int)request->ie_len, ndev_vif->ifnum);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].sched_req) {
+ r = -EBUSY;
+ goto exit;
+ }
+
+ if (request->ie) {
+ const u8 *ie;
+ /* Supplicant adds wsc and p2p in Station scan at the end of scan request ie.
+ * Remove both wps and p2p IEs.
+ * Scheduled scan is not used for wsc, So no need to check for wsc request type
+ */
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, request->ie, request->ie_len);
+ if (ie)
+ strip_wsc = true;
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, request->ie, request->ie_len);
+ if (ie)
+ strip_p2p = true;
+ }
+
+ if (strip_wsc || strip_p2p) {
+ scan_ie = kmalloc(request->ie_len, GFP_KERNEL);
+ if (!scan_ie) {
+ SLSI_NET_INFO(dev, "Out of memory for scan IEs\n");
+ r = -ENOMEM;
+ goto exit;
+ }
+ scan_ie_len = slsi_strip_wsc_p2p_ie(request->ie, request->ie_len, scan_ie, strip_wsc, strip_p2p);
+ } else {
+ scan_ie = (u8 *)request->ie;
+ scan_ie_len = request->ie_len;
+ }
+
+ slsi_purge_scan_results(ndev_vif, SLSI_SCAN_SCHED_ID);
+ r = slsi_mlme_add_sched_scan(sdev, dev, request, scan_ie, scan_ie_len);
+
+ if (strip_p2p || strip_wsc)
+ kfree(scan_ie);
+
+ if (r != 0) {
+ if (r > 0) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Nothing to be done\n");
+ cfg80211_sched_scan_stopped(wiphy, request->reqid);
+ r = 0;
+ } else {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "add_scan error: %d\n", r);
+ r = -EIO;
+ }
+ } else {
+ ndev_vif->scan[SLSI_SCAN_SCHED_ID].sched_req = request;
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+int slsi_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
+{
+#else
+int slsi_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+#endif
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+ SLSI_UNUSED_PARAMETER(reqid);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "vif_index:%d", ndev_vif->ifnum);
+ if (!ndev_vif->scan[SLSI_SCAN_SCHED_ID].sched_req) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "No sched scan req\n");
+ goto exit;
+ }
+
+ r = slsi_mlme_del_scan(sdev, dev, (ndev_vif->ifnum << 8 | SLSI_SCAN_SCHED_ID), false);
+
+ ndev_vif->scan[SLSI_SCAN_SCHED_ID].sched_req = NULL;
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+static void slsi_abort_hw_scan(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Abort on-going scan, vif_index:%d,"
+ "ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req:%p\n", ndev_vif->ifnum,
+ ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ (void)slsi_mlme_del_scan(sdev, dev, ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID, false);
+ slsi_scan_complete(sdev, dev, SLSI_SCAN_HW_ID, false);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+}
+
+int slsi_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct netdev_vif *ndev_p2p_vif;
+ u8 device_address[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ int r = 0;
+ u16 capability = WLAN_CAPABILITY_ESS;
+ struct slsi_peer *peer;
+ u16 prev_vif_type;
+ u32 action_frame_bmap;
+ struct net_device *p2p_dev;
+ const u8 *bssid;
+ struct ieee80211_channel *channel;
+ const u8 *connected_ssid = NULL;
+ u8 peer_address[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u16 center_freq = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_CONNECT.request\n");
+ return -EOPNOTSUPP;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ capability = sme->privacy ? IEEE80211_PRIVACY_ON : IEEE80211_PRIVACY_OFF;
+#else
+ if (sme->privacy)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+#endif
+
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_WARN(sdev, "device not started yet (device_state:%d)\n", sdev->device_state);
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (ndev_vif->sta.sta_bss)
+ SLSI_ETHER_COPY(peer_address, ndev_vif->sta.sta_bss->bssid);
+
+ center_freq = sme->channel ? sme->channel->center_freq : 0;
+
+ SLSI_NET_INFO(dev, "%.*s Freq=%d vifStatus=%d CurrBssid:%pM NewBssid:%pM Qinfo:%d ieLen:%d\n",
+ (int)sme->ssid_len, sme->ssid, center_freq, ndev_vif->sta.vif_status,
+ peer_address, sme->bssid, sdev->device_config.qos_info, (int)sme->ie_len);
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+ SCSC_WLOG_PKTFATE_NEW_ASSOC();
+ if (sme->bssid) {
+ SCSC_WLOG_DRIVER_EVENT(WLOG_NORMAL, WIFI_EVENT_ASSOCIATION_REQUESTED, 3,
+ WIFI_TAG_BSSID, ETH_ALEN, sme->bssid,
+ WIFI_TAG_SSID, sme->ssid_len, sme->ssid,
+ WIFI_TAG_CHANNEL, sizeof(u16), ¢er_freq);
+ // ?? WIFI_TAG_VENDOR_SPECIFIC, sizeof(RSSE), RSSE);
+ }
+#endif
+
+ if (SLSI_IS_HS2_UNSYNC_VIF(ndev_vif)) {
+ slsi_wlan_unsync_vif_deactivate(sdev, dev, true);
+ } else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ SLSI_NET_ERR(dev, "Connect requested on incorrect vif\n");
+ goto exit_with_error;
+ }
+
+ if (WARN_ON(!sme->ssid))
+ goto exit_with_error;
+
+ if (WARN_ON(sme->ssid_len > IEEE80211_MAX_SSID_LEN))
+ goto exit_with_error;
+
+ if (sme->bssid) {
+ if ((SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) && (sdev->p2p_state == P2P_GROUP_FORMED_CLI)) {
+ p2p_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+ if (p2p_dev) {
+ ndev_p2p_vif = netdev_priv(p2p_dev);
+ if (ndev_p2p_vif->sta.sta_bss) {
+ if (SLSI_ETHER_EQUAL(ndev_p2p_vif->sta.sta_bss->bssid, sme->bssid)) {
+ SLSI_NET_ERR(dev, "Connect Request Rejected\n");
+ goto exit_with_error;
+ }
+ }
+ }
+ }
+ }
+
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_STATION) && (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ /*reassociation*/
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer))
+ goto exit_with_error;
+
+ if (!sme->bssid) {
+ SLSI_NET_ERR(dev, "Require bssid in reassoc but received null\n");
+ goto exit_with_error;
+ }
+ if (!memcmp(peer->address, sme->bssid, ETH_ALEN)) { /*same bssid*/
+ r = slsi_mlme_reassociate(sdev, dev);
+ if (r) {
+ SLSI_NET_ERR(dev, "Failed to reassociate : %d\n", r);
+ } else {
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_CONNECTING;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ }
+ goto exit;
+ } else { /*different bssid*/
+ connected_ssid = cfg80211_find_ie(WLAN_EID_SSID, ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+
+ if (!connected_ssid) {
+ SLSI_NET_ERR(dev, "Require ssid in roam but received null\n");
+ goto exit_with_error;
+ }
+
+ if (!memcmp(&connected_ssid[2], sme->ssid, connected_ssid[1])) { /*same ssid*/
+ if (!sme->channel) {
+ SLSI_NET_ERR(dev, "Roaming has been rejected, as sme->channel is null\n");
+ goto exit_with_error;
+ }
+ r = slsi_mlme_roam(sdev, dev, sme->bssid, sme->channel->center_freq);
+ if (r) {
+ SLSI_NET_ERR(dev, "Failed to roam : %d\n", r);
+ goto exit_with_error;
+ }
+ goto exit;
+ } else {
+ SLSI_NET_ERR(dev, "Connected but received connect to new ESS, without disconnect");
+ goto exit_with_error;
+ }
+ }
+ }
+ /* Sta started case */
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ SLSI_NET_ERR(dev, "Iftype: %d\n", ndev_vif->iftype);
+ goto exit_with_error;
+ }
+#endif /*wifi sharing*/
+ if (WARN_ON(ndev_vif->activated)) {
+ SLSI_NET_ERR(dev, "Vif is activated: %d\n", ndev_vif->activated);
+ goto exit_with_error;
+ }
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION &&
+ ndev_vif->sta.vif_status != SLSI_VIF_STATUS_UNSPECIFIED) {
+ SLSI_NET_ERR(dev, "VIF status: %d\n", ndev_vif->sta.vif_status);
+ goto exit_with_error;
+ }
+ prev_vif_type = ndev_vif->vif_type;
+
+ prev_vif_type = ndev_vif->vif_type;
+ switch (ndev_vif->iftype) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ ndev_vif->iftype = NL80211_IFTYPE_STATION;
+ dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ action_frame_bmap = SLSI_STA_ACTION_FRAME_BITMAP;
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ if (sdev->device_config.wes_mode)
+ action_frame_bmap |= SLSI_ACTION_FRAME_VENDOR_SPEC;
+#endif
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ slsi_p2p_group_start_remove_unsync_vif(sdev);
+ p2p_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2P);
+ if (p2p_dev)
+ SLSI_ETHER_COPY(device_address, p2p_dev->dev_addr);
+ action_frame_bmap = SLSI_ACTION_FRAME_PUBLIC;
+ break;
+ default:
+ SLSI_NET_ERR(dev, "Invalid Device Type: %d\n", ndev_vif->iftype);
+ goto exit_with_error;
+ }
+
+ /* Initial Roaming checks done - assign vif type */
+ ndev_vif->vif_type = FAPI_VIFTYPE_STATION;
+
+ channel = sme->channel;
+ bssid = sme->bssid;
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(wiphy,
+ sme->channel,
+ sme->bssid,
+ sme->ssid,
+ sme->ssid_len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ IEEE80211_BSS_TYPE_ANY,
+#else
+ capability,
+#endif
+ capability);
+ if (!ndev_vif->sta.sta_bss) {
+ struct cfg80211_ssid ssid;
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "BSS info is not available - Perform scan\n");
+ ssid.ssid_len = sme->ssid_len;
+ memcpy(ssid.ssid, sme->ssid, ssid.ssid_len);
+ r = slsi_mlme_connect_scan(sdev, dev, 1, &ssid, sme->channel);
+ if (r) {
+ SLSI_NET_ERR(dev, "slsi_mlme_connect_scan failed\n");
+ goto exit;
+ }
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(wiphy,
+ sme->channel,
+ sme->bssid,
+ sme->ssid,
+ sme->ssid_len,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ IEEE80211_BSS_TYPE_ANY,
+#else
+ capability,
+#endif
+ capability);
+ if (!ndev_vif->sta.sta_bss) {
+ SLSI_NET_ERR(dev, "cfg80211_get_bss(%.*s, %pM) Not found\n", (int)sme->ssid_len, sme->ssid, sme->bssid);
+ /*Set previous status in case of failure */
+ ndev_vif->vif_type = prev_vif_type;
+ r = -ENOENT;
+ goto exit;
+ }
+ channel = ndev_vif->sta.sta_bss->channel;
+ bssid = ndev_vif->sta.sta_bss->bssid;
+ } else {
+ channel = ndev_vif->sta.sta_bss->channel;
+ bssid = ndev_vif->sta.sta_bss->bssid;
+ }
+
+ ndev_vif->channel_type = NL80211_CHAN_NO_HT;
+ ndev_vif->chan = channel;
+
+ if (slsi_mlme_add_vif(sdev, dev, dev->dev_addr, device_address) != 0) {
+ SLSI_NET_ERR(dev, "slsi_mlme_add_vif failed\n");
+ goto exit_with_bss;
+ }
+ if (slsi_vif_activated(sdev, dev) != 0) {
+ SLSI_NET_ERR(dev, "slsi_vif_activated failed\n");
+ goto exit_with_vif;
+ }
+ if (slsi_mlme_register_action_frame(sdev, dev, action_frame_bmap, action_frame_bmap) != 0) {
+ SLSI_NET_ERR(dev, "Action frame registration failed for bitmap value %d\n", action_frame_bmap);
+ goto exit_with_vif;
+ }
+
+ r = slsi_set_boost(sdev, dev);
+ if (r != 0)
+ SLSI_NET_ERR(dev, "Rssi Boost set failed: %d\n", r);
+
+ /* add_info_elements with Probe Req IEs. Proceed even if confirm fails for add_info as it would
+ * still work if the fw pre-join scan does not include the vendor IEs
+ */
+ if (ndev_vif->probe_req_ies) {
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ if (sme->crypto.wpa_versions == 2)
+ ndev_vif->delete_probe_req_ies = true; /* Stored Probe Req can be deleted at vif
+ * deletion after WPA2 association
+ */
+ else
+ /* Retain stored Probe Req at vif deletion until WPA2 connection to allow Probe req */
+ ndev_vif->delete_probe_req_ies = false;
+ } else {
+ ndev_vif->delete_probe_req_ies = true; /* Delete stored Probe Req at vif deletion for STA */
+ }
+ (void)slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_REQUEST, ndev_vif->probe_req_ies,
+ ndev_vif->probe_req_ie_len);
+ }
+
+ /* Sometimes netif stack takes more time to initialize and any packet
+ * sent to stack would be dropped. This behavior is random in nature,
+ * so start the netif stack before sending out the connect req, it shall
+ * give enough time to netstack to initialize.
+ */
+ netif_carrier_on(dev);
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_CONNECTING;
+
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+ if (sme->auth_type == NL80211_AUTHTYPE_SAE && (sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) {
+ const u8 *rsn;
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "N AKM Suites: : %1d\n", sme->crypto.n_akm_suites);
+ ndev_vif->sta.crypto.wpa_versions = 3;
+ rsn = cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len);
+ if (rsn) {
+ int pos;
+
+ pos = 7 + 2 + (rsn[8] * 4) + 2;
+ ndev_vif->sta.crypto.akm_suites[0] = ((rsn[pos + 4] << 24) | (rsn[pos + 3] << 16) | (rsn[pos + 2] << 8) | (rsn[pos + 1]));
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "RSN IE: : %1d\n", ndev_vif->sta.crypto.akm_suites[0]);
+ } else {
+ ndev_vif->sta.crypto.wpa_versions = 0;
+ }
+#endif
+
+ r = slsi_mlme_connect(sdev, dev, sme, channel, bssid);
+ if (r != 0) {
+ ndev_vif->sta.is_wps = false;
+ SLSI_NET_ERR(dev, "connect failed: %d\n", r);
+ netif_carrier_off(dev);
+ goto exit_with_vif;
+ }
+
+ peer = slsi_peer_add(sdev, dev, (u8 *)bssid, SLSI_STA_PEER_QUEUESET + 1);
+ ndev_vif->sta.resp_id = 0;
+
+ if (!peer)
+ goto exit_with_error;
+
+ goto exit;
+
+exit_with_vif:
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+exit_with_bss:
+ if (ndev_vif->sta.sta_bss) {
+ slsi_cfg80211_put_bss(wiphy, ndev_vif->sta.sta_bss);
+ ndev_vif->sta.sta_bss = NULL;
+ }
+exit_with_error:
+ r = -EINVAL;
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return r;
+}
+
+int slsi_disconnect(struct wiphy *wiphy, struct net_device *dev,
+ u16 reason_code)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "reason: %d, vif_index = %d, vif_type = %d\n", reason_code,
+ ndev_vif->ifnum, ndev_vif->vif_type);
+
+ /* Assuming that the time it takes the firmware to disconnect is not significant
+ * as this function holds the locks until the MLME-DISCONNECT-IND comes back.
+ * Unless the MLME-DISCONNECT-CFM fails.
+ */
+ if (!ndev_vif->activated) {
+ r = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ cfg80211_disconnected(dev, reason_code, NULL, 0, false, GFP_KERNEL);
+#else
+ cfg80211_disconnected(dev, reason_code, NULL, 0, GFP_KERNEL);
+#endif
+ SLSI_NET_INFO(dev, "Vif is already Deactivated\n");
+ goto exit;
+ }
+
+ peer = ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET];
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+ SCSC_WLOG_DRIVER_EVENT(WLOG_NORMAL, WIFI_EVENT_DISASSOCIATION_REQUESTED, 2,
+ WIFI_TAG_BSSID, ETH_ALEN, peer->address,
+ WIFI_TAG_REASON_CODE, sizeof(u16), &reason_code);
+#endif
+
+ switch (ndev_vif->vif_type) {
+ case FAPI_VIFTYPE_STATION:
+ {
+ slsi_reset_throughput_stats(dev);
+ /* Disconnecting spans several host firmware interactions so track the status
+ * so that the Host can ignore connect related signaling eg. MLME-CONNECT-IND
+ * now that it has triggered a disconnect.
+ */
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_DISCONNECTING;
+
+ netif_carrier_off(dev);
+ if (peer->valid)
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+
+ /* MLME-DISCONNECT_CFM only means that the firmware has accepted the request it has not yet
+ * disconnected. Completion of the disconnect is indicated by MLME-DISCONNECT-IND, so have
+ * to wait for that before deleting the VIF. Also any new activities eg. connect can not yet
+ * be started on the VIF until the disconnection is completed. So the MLME function also handles
+ * waiting for the MLME-DISCONNECT-IND (if the CFM is successful)
+ */
+
+ r = slsi_mlme_disconnect(sdev, dev, peer->address, reason_code, true);
+ if (r != 0)
+ SLSI_NET_ERR(dev, "Disconnection returned with failure\n");
+ /* Even if we fail to disconnect cleanly, tidy up. */
+ r = slsi_handle_disconnect(sdev, dev, peer->address, 0);
+
+ break;
+ }
+ default:
+ SLSI_NET_WARN(dev, "Invalid - vif type:%d, device type:%d)\n", ndev_vif->vif_type, ndev_vif->iftype);
+ r = -EINVAL;
+ break;
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_set_default_key(struct wiphy *wiphy, struct net_device *dev,
+ u8 key_index, bool unicast, bool multicast)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(dev);
+ SLSI_UNUSED_PARAMETER(key_index);
+ SLSI_UNUSED_PARAMETER(unicast);
+ SLSI_UNUSED_PARAMETER(multicast);
+ /* Key is set in add_key. Nothing to do here */
+ return 0;
+}
+
+int slsi_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_index)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(key_index);
+ SLSI_UNUSED_PARAMETER(dev);
+
+ return 0;
+}
+
+int slsi_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "slsi_set_wiphy_parms Frag Threshold = %d, RTS Threshold = %d",
+ wiphy->frag_threshold, wiphy->rts_threshold);
+
+ if ((changed & WIPHY_PARAM_FRAG_THRESHOLD) && (wiphy->frag_threshold != -1)) {
+ r = slsi_set_uint_mib(sdev, NULL, SLSI_PSID_DOT11_FRAGMENTATION_THRESHOLD, wiphy->frag_threshold);
+ if (r != 0) {
+ SLSI_ERR(sdev, "Setting FRAG_THRESHOLD failed\n");
+ return r;
+ }
+ }
+
+ if ((changed & WIPHY_PARAM_RTS_THRESHOLD) && (wiphy->rts_threshold != -1)) {
+ r = slsi_set_uint_mib(sdev, NULL, SLSI_PSID_DOT11_RTS_THRESHOLD, wiphy->rts_threshold);
+ if (r != 0) {
+ SLSI_ERR(sdev, "Setting RTS_THRESHOLD failed\n");
+ return r;
+ }
+ }
+
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+#else
+int slsi_set_tx_power(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type, int mbm)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_UNUSED_PARAMETER(wdev);
+ SLSI_UNUSED_PARAMETER(type);
+#endif
+ SLSI_UNUSED_PARAMETER(mbm);
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ r = -EOPNOTSUPP;
+
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm)
+#else
+int slsi_get_tx_power(struct wiphy *wiphy, int *dbm)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_UNUSED_PARAMETER(wdev);
+#endif
+ SLSI_UNUSED_PARAMETER(dbm);
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ r = -EOPNOTSUPP;
+
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev,
+ struct station_del_parameters *del_params)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac)
+#else
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac)
+#endif
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ int r = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ const u8 *mac = del_params->mac;
+#endif
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "%pM, vifType:%d, vifIndex:%d vifActivated:%d ap.p2p_gc_keys_set = %d\n",
+ mac, ndev_vif->vif_type, ndev_vif->ifnum, ndev_vif->activated, ndev_vif->ap.p2p_gc_keys_set);
+
+ /* Function is called by cfg80211 before the VIF is added */
+ if (!ndev_vif->activated)
+ goto exit;
+
+ if (FAPI_VIFTYPE_AP != ndev_vif->vif_type) {
+ r = -EINVAL;
+ goto exit;
+ }
+ /* MAC with NULL value will come in case of flushing VLANS . Ignore this.*/
+ if (!mac) {
+ goto exit;
+ } else if (is_broadcast_ether_addr(mac)) {
+ int i = 0;
+
+ while (i < SLSI_PEER_INDEX_MAX) {
+ peer = ndev_vif->peer_sta_record[i];
+ if (peer && peer->valid) {
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ }
+ ++i;
+ }
+
+ /* Note AP :: mlme_disconnect_request with broadcast mac address is
+ * not required. Other third party devices don't support this. Conclusively,
+ * BIP support is not present with AP
+ */
+
+ /* Free WPA and WMM IEs if present */
+ slsi_clear_cached_ies(&ndev_vif->ap.cache_wpa_ie, &ndev_vif->ap.wpa_ie_len);
+ slsi_clear_cached_ies(&ndev_vif->ap.cache_wmm_ie, &ndev_vif->ap.wmm_ie_len);
+
+ netif_carrier_off(dev);
+
+ /* All STA related packets and info should already have been flushed */
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+ ndev_vif->ipaddress = cpu_to_be32(0);
+
+ if (ndev_vif->ap.p2p_gc_keys_set) {
+ slsi_wakeunlock(&sdev->wlan_wl);
+ ndev_vif->ap.p2p_gc_keys_set = false;
+ }
+ } else {
+ peer = slsi_get_peer_from_mac(sdev, dev, mac);
+ if (peer) { /* To handle race condition when disconnect_req is sent before procedure_strted_ind and before mlme-connected_ind*/
+ if (peer->connected_state == SLSI_STA_CONN_STATE_CONNECTING) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "SLSI_STA_CONN_STATE_CONNECTING : mlme-disconnect-req dropped at driver\n");
+ goto exit;
+ }
+ if (peer->is_wps) {
+ /* To inter-op with Intel STA in P2P cert need to discard the deauth after successful WPS handshake as a P2P GO */
+ SLSI_NET_INFO(dev, "DISCONNECT after WPS : mlme-disconnect-req dropped at driver\n");
+ goto exit;
+ }
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ r = slsi_mlme_disconnect(sdev, dev, peer->address, WLAN_REASON_DEAUTH_LEAVING, true);
+ if (r != 0)
+ SLSI_NET_ERR(dev, "Disconnection returned with failure\n");
+ /* Even if we fail to disconnect cleanly, tidy up. */
+ r = slsi_handle_disconnect(sdev, dev, peer->address, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+int slsi_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+#else
+int slsi_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+#endif
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ peer = slsi_get_peer_from_mac(sdev, dev, mac);
+ if (!peer) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "%pM : Not Found\n", mac);
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (((ndev_vif->iftype == NL80211_IFTYPE_STATION && !(ndev_vif->sta.roam_in_progress)) ||
+ ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ /*Read MIB and fill into the peer.sinfo*/
+ r = slsi_mlme_get_sinfo_mib(sdev, dev, peer);
+ if (r) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "failed to read Station Info Error:%d\n", r);
+ goto exit;
+ }
+ }
+
+ *sinfo = peer->sinfo;
+ sinfo->generation = ndev_vif->cfg80211_sinfo_generation;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "%pM, tx:%d, txbytes:%llu, rx:%d, rxbytes:%llu tx_fail:%d tx_retry:%d\n",
+ mac,
+ peer->sinfo.tx_packets,
+ peer->sinfo.tx_bytes,
+ peer->sinfo.rx_packets,
+ peer->sinfo.rx_bytes,
+ peer->sinfo.tx_failed,
+ peer->sinfo.tx_retries);
+#else
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "%pM, tx:%d, txbytes:%d, rx:%d, rxbytes:%d tx_fail:%d tx_retry:%d\n",
+ mac,
+ peer->sinfo.tx_packets,
+ peer->sinfo.tx_bytes,
+ peer->sinfo.rx_packets,
+ peer->sinfo.rx_bytes,
+ peer->sinfo.tx_failed,
+ peer->sinfo.tx_retries);
+#endif
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = -EINVAL;
+ u16 pwr_mode = enabled ? FAPI_POWERMANAGEMENTMODE_POWER_SAVE : FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
+
+ SLSI_UNUSED_PARAMETER(timeout);
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_POWERMGT.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (slsi_is_rf_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, RF test does not support.\n");
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "enabled:%d, vif_type:%d, vif_index:%d\n", enabled, ndev_vif->vif_type,
+ ndev_vif->ifnum);
+
+ if ((ndev_vif->activated) && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION)) {
+ ndev_vif->set_power_mode = pwr_mode;
+ r = slsi_mlme_powermgt(sdev, dev, pwr_mode);
+ } else {
+ r = 0;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 18))
+int slsi_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper)
+#else
+int slsi_tdls_oper(struct wiphy *wiphy, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper)
+#endif
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "oper:%d, vif_type:%d, vif_index:%d\n", oper, ndev_vif->vif_type,
+ ndev_vif->ifnum);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ r = -ENOTSUPP;
+ goto exit;
+ }
+
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "NL80211_TDLS_DISCOVERY_REQ\n");
+ r = slsi_mlme_tdls_action(sdev, dev, peer, FAPI_TDLSACTION_DISCOVERY, 0, 0);
+ break;
+ case NL80211_TDLS_SETUP:
+ r = slsi_mlme_tdls_action(sdev, dev, peer, FAPI_TDLSACTION_SETUP, 0, 0);
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ r = slsi_mlme_tdls_action(sdev, dev, peer, FAPI_TDLSACTION_TEARDOWN, 0, 0);
+ break;
+ default:
+ r = -EOPNOTSUPP;
+ goto exit;
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+int slsi_set_qos_map(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_qos_map *qos_map)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ int r = 0;
+
+ /* Cleaning up is inherently taken care by driver */
+ if (!qos_map)
+ return r;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Set QoS Map\n");
+ peer = ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET];
+ if (!peer || !peer->valid) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ memcpy(&peer->qos_map, qos_map, sizeof(struct cfg80211_qos_map));
+ peer->qos_map_set = true;
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+int slsi_set_monitor_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "channel (freq:%u)\n", chandef->chan->center_freq);
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, SLSI_NET_INDEX_WLAN);
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev No longer exists\n");
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ ndev_vif = netdev_priv(dev);
+ rcu_read_unlock();
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (slsi_test_sap_configure_monitor_mode(sdev, dev, chandef) != 0) {
+ SLSI_ERR(sdev, "set Monitor channel failed\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EINVAL;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+#endif
+#endif
+int slsi_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+{
+ SLSI_UNUSED_PARAMETER(wow);
+
+ return 0;
+}
+
+int slsi_resume(struct wiphy *wiphy)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+ /* Scheduling the IO thread */
+/* (void)slsi_hip_run_bh(sdev); */
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ return 0;
+}
+
+int slsi_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(dev);
+ SLSI_UNUSED_PARAMETER(pmksa);
+ return 0;
+}
+
+int slsi_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(dev);
+ SLSI_UNUSED_PARAMETER(pmksa);
+ return 0;
+}
+
+int slsi_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+{
+ SLSI_UNUSED_PARAMETER(wiphy);
+ SLSI_UNUSED_PARAMETER(dev);
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+int slsi_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_WARN(sdev, "device not started yet (device_state:%d)\n", sdev->device_state);
+ goto exit_with_error;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "channel freq = %d, duration = %d, vif_type = %d, vif_index = %d,"
+ "sdev->p2p_state = %s\n", chan->center_freq, duration, ndev_vif->vif_type, ndev_vif->ifnum,
+ slsi_p2p_state_text(sdev->p2p_state));
+ if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ SLSI_NET_ERR(dev, "Invalid vif type\n");
+ goto exit_with_error;
+ }
+
+ if (SLSI_IS_P2P_GROUP_STATE(sdev)) {
+ slsi_assign_cookie_id(cookie, &ndev_vif->unsync.roc_cookie);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
+ cfg80211_remain_on_channel_expired(wdev, *cookie, chan, GFP_KERNEL);
+#else
+ cfg80211_ready_on_channel(dev, *cookie, chan, channel_type, duration, GFP_KERNEL);
+ cfg80211_remain_on_channel_expired(dev, *cookie, chan, channel_type, GFP_KERNEL);
+#endif
+ goto exit;
+ }
+
+ /* Unsync vif will be required, cancel any pending work of its deletion */
+ cancel_delayed_work(&ndev_vif->unsync.del_vif_work);
+
+ /* Ideally, there should not be any ROC work pending. However, supplicant can send back to back ROC in a race scenario as below.
+ * If action frame is received while P2P social scan, the response frame tx is delayed till scan completes. After scan completion,
+ * frame tx is done and ROC is started. Upon frame tx status, supplicant sends another ROC without cancelling the previous one.
+ */
+ cancel_delayed_work(&ndev_vif->unsync.roc_expiry_work);
+
+ if (delayed_work_pending(&ndev_vif->unsync.unset_channel_expiry_work))
+ cancel_delayed_work(&ndev_vif->unsync.unset_channel_expiry_work);
+
+ /* If action frame tx is in progress and ROC comes, then it would mean action frame tx was done in ROC and
+ * frame tx ind is awaited, don't change state. Also allow back to back ROC in case it comes.
+ */
+ if ((sdev->p2p_state == P2P_ACTION_FRAME_TX_RX) || (sdev->p2p_state == P2P_LISTENING)) {
+ goto exit_with_roc;
+ }
+
+ /* Unsync vif activation: Possible P2P state at this point is P2P_IDLE_NO_VIF or P2P_IDLE_VIF_ACTIVE */
+ if (sdev->p2p_state == P2P_IDLE_NO_VIF) {
+ if (slsi_p2p_vif_activate(sdev, dev, chan, duration, true) != 0)
+ goto exit_with_error;
+ } else if (sdev->p2p_state == P2P_IDLE_VIF_ACTIVE) {
+ /* Configure Probe Response IEs in firmware if they have changed */
+ if (ndev_vif->unsync.ies_changed) {
+ u16 purpose = FAPI_PURPOSE_PROBE_RESPONSE;
+
+ if (slsi_mlme_add_info_elements(sdev, dev, purpose, ndev_vif->unsync.probe_rsp_ies, ndev_vif->unsync.probe_rsp_ies_len) != 0) {
+ SLSI_NET_ERR(dev, "Probe Rsp IEs setting failed\n");
+ goto exit_with_vif;
+ }
+ ndev_vif->unsync.ies_changed = false;
+ }
+ /* Channel Setting - Don't set if already on same channel */
+ if (ndev_vif->driver_channel != chan->hw_value) {
+ if (slsi_mlme_set_channel(sdev, dev, chan, SLSI_FW_CHANNEL_DURATION_UNSPECIFIED, 0, 0) != 0) {
+ SLSI_NET_ERR(dev, "Channel setting failed\n");
+ goto exit_with_vif;
+ } else {
+ ndev_vif->chan = chan;
+ ndev_vif->driver_channel = chan->hw_value;
+ }
+ }
+ } else {
+ SLSI_NET_ERR(dev, "Driver in incorrect P2P state (%s)", slsi_p2p_state_text(sdev->p2p_state));
+ goto exit_with_error;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 9))
+ ndev_vif->channel_type = channel_type;
+#endif
+
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_LISTENING);
+
+exit_with_roc:
+ /* Cancel remain on channel is sent to the supplicant 10ms before the duration
+ *This is to avoid the race condition of supplicant sending cancel remain on channel and
+ *drv sending cancel_remain on channel because of roc expiry.
+ *This race condition causes delay to the next p2p search
+ */
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.roc_expiry_work,
+ msecs_to_jiffies(duration - SLSI_P2P_ROC_EXTRA_MSEC));
+
+ slsi_assign_cookie_id(cookie, &ndev_vif->unsync.roc_cookie);
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Cookie = 0x%llx\n", *cookie);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
+#else
+ cfg80211_ready_on_channel(dev, *cookie, chan, channel_type, duration, GFP_KERNEL);
+#endif
+
+ goto exit;
+
+exit_with_vif:
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+exit_with_error:
+ r = -EINVAL;
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+int slsi_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Cookie = 0x%llx, vif_type = %d, vif_index = %d, sdev->p2p_state = %s,"
+ "ndev_vif->ap.p2p_gc_keys_set = %d, ndev_vif->unsync.roc_cookie = 0x%llx\n", cookie,
+ ndev_vif->vif_type, ndev_vif->ifnum, slsi_p2p_state_text(sdev->p2p_state),
+ ndev_vif->ap.p2p_gc_keys_set, ndev_vif->unsync.roc_cookie);
+
+ if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ SLSI_NET_ERR(dev, "Invalid vif type\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (!((sdev->p2p_state == P2P_LISTENING) || (sdev->p2p_state == P2P_ACTION_FRAME_TX_RX))) {
+ goto exit;
+ }
+
+ if (sdev->p2p_state == P2P_ACTION_FRAME_TX_RX && ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID) {
+ /* Reset the expected action frame as procedure got completed */
+ SLSI_INFO(sdev, "Action frame (%s) was not received\n", slsi_p2p_pa_subtype_text(ndev_vif->mgmt_tx_data.exp_frame));
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ }
+
+ cancel_delayed_work(&ndev_vif->unsync.roc_expiry_work);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_remain_on_channel_expired(&ndev_vif->wdev, ndev_vif->unsync.roc_cookie, ndev_vif->chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(ndev_vif->wdev.netdev, ndev_vif->unsync.roc_cookie,
+ ndev_vif->chan, ndev_vif->channel_type, GFP_KERNEL);
+#endif
+ if (!ndev_vif->drv_in_p2p_procedure) {
+ if (delayed_work_pending(&ndev_vif->unsync.unset_channel_expiry_work))
+ cancel_delayed_work(&ndev_vif->unsync.unset_channel_expiry_work);
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.unset_channel_expiry_work,
+ msecs_to_jiffies(SLSI_P2P_UNSET_CHANNEL_EXTRA_MSEC));
+ }
+ /* Queue work to delete unsync vif */
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC);
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_change_bss(struct wiphy *wiphy, struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int r = 0;
+
+ SLSI_UNUSED_PARAMETER(params);
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ return r;
+}
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 5, 0))
+int slsi_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "channel_type:%u, freq:%u, vif_index:%d, vif_type:%d\n", channel_type,
+ chan->center_freq, ndev_vif->ifnum, ndev_vif->vif_type);
+ if (WARN_ON(ndev_vif->activated)) {
+ r = -EINVAL;
+ goto exit;
+ }
+
+ ndev_vif->channel_type = channel_type;
+ ndev_vif->chan = chan;
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 5, 0)) */
+
+static void slsi_ap_start_obss_scan(struct slsi_dev *sdev, struct net_device *dev, struct netdev_vif *ndev_vif)
+{
+ struct cfg80211_ssid ssids;
+ struct ieee80211_channel *channel;
+ int n_ssids = 1, n_channels = 1, i;
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "channel %u\n", ndev_vif->chan->hw_value);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ ssids.ssid_len = 0;
+ for (i = 0; i < IEEE80211_MAX_SSID_LEN; i++)
+ ssids.ssid[i] = 0x00; /* Broadcast SSID */
+
+ channel = ieee80211_get_channel(sdev->wiphy, ndev_vif->chan->center_freq);
+
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = true;
+ (void)slsi_mlme_add_scan(sdev,
+ dev,
+ FAPI_SCANTYPE_OBSS_SCAN,
+ FAPI_REPORTMODE_REAL_TIME,
+ n_ssids,
+ &ssids,
+ n_channels,
+ &channel,
+ NULL,
+ NULL, /* No IEs */
+ 0,
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan /* Wait for scan_done_ind */);
+
+ slsi_ap_obss_scan_done_ind(dev, ndev_vif);
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = false;
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+}
+
+static int slsi_ap_start_validate(struct net_device *dev, struct slsi_dev *sdev, struct cfg80211_ap_settings *settings)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ SLSI_NET_ERR(dev, "AP start requested on incorrect vif\n");
+ goto exit_with_error;
+ }
+
+ if (!settings->ssid_len || !settings->ssid) {
+ SLSI_NET_ERR(dev, "SSID not provided\n");
+ goto exit_with_error;
+ }
+
+ if (!settings->beacon.head_len || !settings->beacon.head) {
+ SLSI_NET_ERR(dev, "Beacon not provided\n");
+ goto exit_with_error;
+ }
+
+ if (!settings->beacon_interval) {
+ SLSI_NET_ERR(dev, "Beacon Interval not provided\n");
+ goto exit_with_error;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ ndev_vif->chandef = &settings->chandef;
+ ndev_vif->chan = ndev_vif->chandef->chan;
+#endif
+ if (WARN_ON(!ndev_vif->chan))
+ goto exit_with_error;
+
+ if (WARN_ON(ndev_vif->activated))
+ goto exit_with_error;
+
+ if (WARN_ON((ndev_vif->iftype != NL80211_IFTYPE_AP) && (ndev_vif->iftype != NL80211_IFTYPE_P2P_GO)))
+ goto exit_with_error;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if ((ndev_vif->chan->hw_value <= 14) && (!sdev->fw_2g_40mhz_enabled) &&
+ (ndev_vif->chandef->width == NL80211_CHAN_WIDTH_40)) {
+ SLSI_NET_ERR(dev, "Configuration error: 40 MHz on 2.4 GHz is not supported. Channel_no: %d Channel_width: %d\n", ndev_vif->chan->hw_value, slsi_get_chann_info(sdev, ndev_vif->chandef));
+ goto exit_with_error;
+ }
+#else
+ if ((ndev_vif->chan->hw_value <= 14) && (!sdev->fw_2g_40mhz_enabled) &&
+ (ndev_vif->channel_type > NL80211_CHAN_HT20)) {
+ SLSI_NET_ERR(dev, "Configuration error: 40 MHz on 2.4 GHz is not supported. Channel_no: %d Channel_width: %d\n", ndev_vif->chan->hw_value, slsi_get_chann_info(sdev, ndev_vif->channel_type));
+ goto exit_with_error;
+ }
+#endif
+
+ return 0;
+
+exit_with_error:
+ return -EINVAL;
+}
+
+static int slsi_get_max_bw_mhz(struct slsi_dev *sdev, u16 prim_chan_cf)
+{
+ int i;
+ struct ieee80211_regdomain *regd = sdev->device_config.domain_info.regdomain;
+
+ if (!regd) {
+ SLSI_WARN(sdev, "NO regdomain info\n");
+ return 0;
+ }
+
+ for (i = 0; i < regd->n_reg_rules; i++) {
+ if ((regd->reg_rules[i].freq_range.start_freq_khz / 1000 <= prim_chan_cf - 10) &&
+ (regd->reg_rules[i].freq_range.end_freq_khz / 1000 >= prim_chan_cf + 10))
+ return regd->reg_rules[i].freq_range.max_bandwidth_khz / 1000;
+ }
+
+ SLSI_WARN(sdev, "Freq(%d) not found in regdomain\n", prim_chan_cf);
+ return 0;
+}
+
+int slsi_start_ap(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct net_device *wlan_dev;
+ u8 device_address[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ int r = 0;
+ const u8 *wpa_ie_pos = NULL;
+ size_t wpa_ie_len = 0;
+ const u8 *wmm_ie_pos = NULL;
+ size_t wmm_ie_len = 0;
+ const u8 *country_ie = NULL;
+ char alpha2[SLSI_COUNTRY_CODE_LEN];
+ bool append_vht_ies = false;
+ const u8 *ie;
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ int wifi_sharing_channel_switched = 0;
+ struct netdev_vif *ndev_sta_vif;
+ int invalid_channel = 0;
+#endif
+ int skip_indoor_check_for_wifi_sharing = 0;
+ u8 *ds_params_ie = NULL;
+ struct ieee80211_mgmt *mgmt;
+ u16 beacon_ie_head_len;
+ u8 *ht_operation_ie = NULL;
+ struct ieee80211_channel *channel = NULL;
+ int indoor_channel = 0;
+ int i;
+ u32 chan_flags;
+ u16 center_freq;
+
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_WARN(sdev, "device not started yet (device_state:%d)\n", sdev->device_state);
+ r = -EINVAL;
+ goto exit_with_start_stop_mutex;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ /* Abort any ongoing wlan scan. */
+ wlan_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ if (wlan_dev)
+ slsi_abort_hw_scan(sdev, wlan_dev);
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "AP frequency received: %d\n", settings->chandef.chan->center_freq);
+ mgmt = (struct ieee80211_mgmt *)settings->beacon.head;
+ beacon_ie_head_len = settings->beacon.head_len - ((u8 *)mgmt->u.beacon.variable - (u8 *)mgmt);
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ ndev_sta_vif = netdev_priv(wlan_dev);
+ if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif)) {
+ SLSI_MUTEX_LOCK(ndev_sta_vif->vif_mutex);
+ if ((ndev_sta_vif->activated) && (ndev_sta_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_sta_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTING ||
+ ndev_sta_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ invalid_channel = slsi_select_wifi_sharing_ap_channel(wiphy, dev, settings, sdev,
+ &wifi_sharing_channel_switched);
+ skip_indoor_check_for_wifi_sharing = 1;
+ if (invalid_channel) {
+ SLSI_NET_ERR(dev, "Rejecting AP start req at host (invalid channel)\n");
+ SLSI_MUTEX_UNLOCK(ndev_sta_vif->vif_mutex);
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_sta_vif->vif_mutex);
+ }
+#endif
+
+ memset(&ndev_vif->ap, 0, sizeof(ndev_vif->ap));
+ /* Initialise all allocated peer structures to remove old data. */
+ /*slsi_netif_init_all_peers(sdev, dev);*/
+
+ /* Reg domain changes */
+ country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, settings->beacon.tail, settings->beacon.tail_len);
+ if (country_ie) {
+ country_ie += 2;
+ memcpy(alpha2, country_ie, SLSI_COUNTRY_CODE_LEN);
+ if (memcmp(sdev->device_config.domain_info.regdomain->alpha2, alpha2, SLSI_COUNTRY_CODE_LEN - 1) != 0) {
+ if (slsi_set_country_update_regd(sdev, alpha2, SLSI_COUNTRY_CODE_LEN) != 0) {
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+ }
+ }
+ if (!skip_indoor_check_for_wifi_sharing && sdev->band_5g_supported &&
+ ((settings->chandef.chan->center_freq / 1000) == 5)) {
+ channel = ieee80211_get_channel(sdev->wiphy, settings->chandef.chan->center_freq);
+ if (!channel) {
+ SLSI_ERR(sdev, "Invalid frequency %d used to start AP. Channel not found\n",
+ settings->chandef.chan->center_freq);
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+ if (ndev_vif->iftype != NL80211_IFTYPE_P2P_GO) {
+ if ((channel->flags) & (IEEE80211_CHAN_INDOOR_ONLY)) {
+ chan_flags = (IEEE80211_CHAN_INDOOR_ONLY | IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_DISABLED |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 10, 13)
+ IEEE80211_CHAN_PASSIVE_SCAN
+#else
+ IEEE80211_CHAN_NO_IR
+#endif
+ );
+
+ for (i = 0; i < wiphy->bands[NL80211_BAND_5GHZ]->n_channels; i++) {
+ if (!(wiphy->bands[NL80211_BAND_5GHZ]->channels[i].flags & chan_flags)) {
+ center_freq = wiphy->bands[NL80211_BAND_5GHZ]->channels[i].center_freq;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, center_freq);
+ settings->chandef.center_freq1 = center_freq;
+ SLSI_DBG1(sdev, SLSI_CFG80211, "ap valid frequency:%d,chan_flags:%x\n",
+ center_freq,
+ wiphy->bands[NL80211_BAND_5GHZ]->channels[i].flags);
+ indoor_channel = 1;
+ break;
+ }
+ }
+ if (indoor_channel == 0) {
+ SLSI_ERR(sdev, "No valid channel found to start the AP");
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+ }
+ }
+ }
+
+ r = slsi_ap_start_validate(dev, sdev, settings);
+ if (r != 0)
+ goto exit_with_vif_mutex;
+
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO) {
+ slsi_p2p_group_start_remove_unsync_vif(sdev);
+ SLSI_ETHER_COPY(device_address, dev->dev_addr);
+ if (keep_alive_period != SLSI_P2PGO_KEEP_ALIVE_PERIOD_SEC)
+ if (slsi_set_uint_mib(sdev, NULL, SLSI_PSID_UNIFI_MLMEGO_KEEP_ALIVE_TIMEOUT,
+ keep_alive_period) != 0) {
+ SLSI_NET_ERR(dev, "P2PGO Keep Alive MIB set failed");
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Channel: %d, Maximum bandwidth: %d\n", ndev_vif->chandef->chan->hw_value,
+ slsi_get_max_bw_mhz(sdev, ndev_vif->chandef->chan->center_freq));
+ /* 11ac configuration (5GHz and VHT) */
+ if ((ndev_vif->chandef->chan->hw_value >= 36) && (ndev_vif->chandef->chan->hw_value < 165) &&
+ (sdev->fw_vht_enabled) && sdev->allow_switch_80_mhz &&
+ (slsi_get_max_bw_mhz(sdev, ndev_vif->chandef->chan->center_freq) >= 80)) {
+ u16 oper_chan = ndev_vif->chandef->chan->hw_value;
+ append_vht_ies = true;
+ ndev_vif->chandef->width = NL80211_CHAN_WIDTH_80;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "5 GHz- Include VHT\n");
+ if ((oper_chan >= 36) && (oper_chan <= 48))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(42, NL80211_BAND_5GHZ);
+ else if ((oper_chan >= 149) && (oper_chan <= 161))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(155, NL80211_BAND_5GHZ);
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ /* In wifi sharing case, AP can start on STA channel even though it is DFS channel*/
+ if (wifi_sharing_channel_switched == 1) {
+ if ((oper_chan >= 52) && (oper_chan <= 64))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(58,
+ NL80211_BAND_5GHZ);
+ else if ((oper_chan >= 100) && (oper_chan <= 112))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(106,
+ NL80211_BAND_5GHZ);
+ else if ((oper_chan >= 116) && (oper_chan <= 128))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(122,
+ NL80211_BAND_5GHZ);
+ else if ((oper_chan >= 132) && (oper_chan <= 144))
+ ndev_vif->chandef->center_freq1 = ieee80211_channel_to_frequency(138,
+ NL80211_BAND_5GHZ);
+ }
+#endif
+ } else if (sdev->fw_ht_enabled && sdev->allow_switch_40_mhz &&
+ slsi_get_max_bw_mhz(sdev, ndev_vif->chandef->chan->center_freq) >= 40 &&
+ ((ndev_vif->chandef->chan->hw_value < 165 && ndev_vif->chandef->chan->hw_value >= 36) ||
+ (ndev_vif->chandef->chan->hw_value < 12 && sdev->fw_2g_40mhz_enabled &&
+ ndev_vif->iftype == NL80211_IFTYPE_P2P_GO))) {
+ /* HT40 configuration (5GHz/2GHz and HT) */
+ u16 oper_chan = ndev_vif->chandef->chan->hw_value;
+ u8 bw_40_minus_channels[] = { 40, 48, 153, 161, 5, 6, 7, 8, 9, 10, 11 };
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ u8 bw_40_minus_dfs_channels[] = { 144, 136, 128, 120, 112, 104, 64, 56 };
+#endif
+ u8 ch;
+
+ ndev_vif->chandef->width = NL80211_CHAN_WIDTH_40;
+ ndev_vif->chandef->center_freq1 = ndev_vif->chandef->chan->center_freq + 10;
+ for (ch = 0; ch < ARRAY_SIZE(bw_40_minus_channels); ch++)
+ if (oper_chan == bw_40_minus_channels[ch]) {
+ ndev_vif->chandef->center_freq1 = ndev_vif->chandef->chan->center_freq - 10;
+ break;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (wifi_sharing_channel_switched == 1) {
+ for (ch = 0; ch < ARRAY_SIZE(bw_40_minus_dfs_channels); ch++)
+ if (oper_chan == bw_40_minus_dfs_channels[ch]) {
+ ndev_vif->chandef->center_freq1 = ndev_vif->chandef->chan->center_freq - 10;
+ break;
+ }
+ }
+#endif
+ }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (slsi_check_channelization(sdev, ndev_vif->chandef, wifi_sharing_channel_switched) != 0) {
+#else
+ if (slsi_check_channelization(sdev, ndev_vif->chandef, 0) != 0) {
+#endif
+#else
+ if (slsi_check_channelization(sdev, ndev_vif->channel_type) != 0) {
+#endif
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+
+ if (ndev_vif->iftype == NL80211_IFTYPE_AP) {
+ /* Legacy AP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (ndev_vif->chandef->width == NL80211_CHAN_WIDTH_20)
+#else
+ if (ndev_vif->channel_type == NL80211_CHAN_HT20)
+#endif
+ slsi_ap_start_obss_scan(sdev, dev, ndev_vif);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (ndev_vif->chandef->width <= NL80211_CHAN_WIDTH_20) {
+ /* Enable LDPC, SGI20 and SGI40 for both SoftAP & P2PGO if firmware supports */
+ if (cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, settings->beacon.tail, settings->beacon.tail_len)) {
+ u8 enforce_ht_cap1 = sdev->fw_ht_cap[0] & (IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_SGI_20);
+ u8 enforce_ht_cap2 = sdev->fw_ht_cap[1] & (IEEE80211_HT_CAP_RX_STBC >> 8);
+
+ slsi_modify_ies(dev, WLAN_EID_HT_CAPABILITY, (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, 2, enforce_ht_cap1);
+ slsi_modify_ies(dev, WLAN_EID_HT_CAPABILITY, (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, 3, enforce_ht_cap2);
+ }
+ } else if (cfg80211_chandef_valid(ndev_vif->chandef)) {
+ u8 *ht_operation_ie;
+ u8 sec_chan_offset = 0;
+ u8 ch;
+ u8 bw_40_minus_channels[] = { 40, 48, 153, 161, 5, 6, 7, 8, 9, 10, 11 };
+
+ ht_operation_ie = (u8 *)cfg80211_find_ie(WLAN_EID_HT_OPERATION, settings->beacon.tail,
+ settings->beacon.tail_len);
+ if (!ht_operation_ie) {
+ SLSI_NET_ERR(dev, "HT Operation IE is not passed by wpa_supplicant");
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+
+ sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ for (ch = 0; ch < ARRAY_SIZE(bw_40_minus_channels); ch++)
+ if (bw_40_minus_channels[ch] == ndev_vif->chandef->chan->hw_value) {
+ sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ }
+
+ /* Change HT Information IE subset 1 */
+ ht_operation_ie += 3;
+ *(ht_operation_ie) |= sec_chan_offset;
+ *(ht_operation_ie) |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+
+ /* For 80MHz, Enable HT Capabilities : Support 40MHz Channel Width, SGI20 and SGI40
+ * for AP (both softAp as well as P2P GO), if firmware supports.
+ */
+ if (cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, settings->beacon.tail,
+ settings->beacon.tail_len)) {
+ u8 enforce_ht_cap1 = sdev->fw_ht_cap[0] & (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_LDPC_CODING);
+ u8 enforce_ht_cap2 = sdev->fw_ht_cap[1] & (IEEE80211_HT_CAP_RX_STBC >> 8);
+
+ slsi_modify_ies(dev, WLAN_EID_HT_CAPABILITY, (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, 2, enforce_ht_cap1);
+ slsi_modify_ies(dev, WLAN_EID_HT_CAPABILITY, (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, 3, enforce_ht_cap2);
+ }
+ }
+#endif
+
+ if (indoor_channel == 1
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ || (wifi_sharing_channel_switched == 1)
+#endif
+ ) {
+ slsi_modify_ies_on_channel_switch(dev, settings, ds_params_ie, ht_operation_ie, mgmt, beacon_ie_head_len);
+ }
+ ndev_vif->vif_type = FAPI_VIFTYPE_AP;
+
+ if (slsi_mlme_add_vif(sdev, dev, dev->dev_addr, device_address) != 0) {
+ SLSI_NET_ERR(dev, "slsi_mlme_add_vif failed\n");
+ r = -EINVAL;
+ goto exit_with_vif_mutex;
+ }
+
+ if (slsi_vif_activated(sdev, dev) != 0) {
+ SLSI_NET_ERR(dev, "slsi_vif_activated failed\n");
+ goto exit_with_vif;
+ }
+
+ /* Extract the WMM and WPA IEs from settings->beacon.tail - This is sent in add_info_elements and shouldn't be included in start_req
+ * Cache IEs to be used in later add_info_elements_req. The IEs would be freed during AP stop
+ */
+ wpa_ie_pos = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, settings->beacon.tail, settings->beacon.tail_len);
+ if (wpa_ie_pos) {
+ wpa_ie_len = (size_t)(*(wpa_ie_pos + 1) + 2); /* For 0xdd (1) and Tag Length (1) */
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "WPA IE found: Length = %zu\n", wpa_ie_len);
+ SLSI_EC_GOTO(slsi_cache_ies(wpa_ie_pos, wpa_ie_len, &ndev_vif->ap.cache_wpa_ie, &ndev_vif->ap.wpa_ie_len), r, exit_with_vif);
+ }
+
+ wmm_ie_pos = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, settings->beacon.tail, settings->beacon.tail_len);
+ if (wmm_ie_pos) {
+ wmm_ie_len = (size_t)(*(wmm_ie_pos + 1) + 2);
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "WMM IE found: Length = %zu\n", wmm_ie_len);
+ SLSI_EC_GOTO(slsi_cache_ies(wmm_ie_pos, wmm_ie_len, &ndev_vif->ap.cache_wmm_ie, &ndev_vif->ap.wmm_ie_len), r, exit_with_vif);
+ }
+
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+
+ /* Set Vendor specific IEs (WPA, WMM, WPS, P2P) for Beacon, Probe Response and Association Response
+ * The Beacon and Assoc Rsp IEs can include Extended Capability (WLAN_EID_EXT_CAPAB) IE when supported.
+ * Some other IEs (like internetworking, etc) can also come if supported.
+ * The add_info should include only vendor specific IEs and other IEs should be removed if supported in future.
+ */
+ if ((wmm_ie_pos) || (wpa_ie_pos) || (settings->beacon.beacon_ies_len > 0 && settings->beacon.beacon_ies)) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Add info elements for beacon\n");
+ SLSI_EC_GOTO(slsi_ap_prepare_add_info_ies(ndev_vif, settings->beacon.beacon_ies, settings->beacon.beacon_ies_len), r, exit_with_vif);
+ SLSI_EC_GOTO(slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_BEACON, ndev_vif->ap.add_info_ies, ndev_vif->ap.add_info_ies_len), r, exit_with_vif);
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ }
+
+ if ((wmm_ie_pos) || (wpa_ie_pos) || (settings->beacon.proberesp_ies_len > 0 && settings->beacon.proberesp_ies)) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Add info elements for probe response\n");
+ SLSI_EC_GOTO(slsi_ap_prepare_add_info_ies(ndev_vif, settings->beacon.proberesp_ies, settings->beacon.proberesp_ies_len), r, exit_with_vif);
+ SLSI_EC_GOTO(slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_RESPONSE, ndev_vif->ap.add_info_ies, ndev_vif->ap.add_info_ies_len), r, exit_with_vif);
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ }
+
+ if ((wmm_ie_pos) || (wpa_ie_pos) || (settings->beacon.assocresp_ies_len > 0 && settings->beacon.assocresp_ies)) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Add info elements for assoc response\n");
+ SLSI_EC_GOTO(slsi_ap_prepare_add_info_ies(ndev_vif, settings->beacon.assocresp_ies, settings->beacon.assocresp_ies_len), r, exit_with_vif);
+ SLSI_EC_GOTO(slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_ASSOCIATION_RESPONSE, ndev_vif->ap.add_info_ies, ndev_vif->ap.add_info_ies_len), r, exit_with_vif);
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ }
+
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO) {
+ u32 af_bmap_active = SLSI_ACTION_FRAME_PUBLIC;
+ u32 af_bmap_suspended = SLSI_ACTION_FRAME_PUBLIC;
+
+ r = slsi_mlme_register_action_frame(sdev, dev, af_bmap_active, af_bmap_suspended);
+ if (r != 0) {
+ SLSI_NET_ERR(dev, "slsi_mlme_register_action_frame failed: resultcode = %d\n", r);
+ goto exit_with_vif;
+ }
+ }
+
+ if (append_vht_ies) {
+ ndev_vif->ap.mode = SLSI_80211_MODE_11AC;
+ } else if (cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, settings->beacon.tail, settings->beacon.tail_len) &&
+ cfg80211_find_ie(WLAN_EID_HT_OPERATION, settings->beacon.tail, settings->beacon.tail_len)) {
+ ndev_vif->ap.mode = SLSI_80211_MODE_11N;
+ } else {
+ ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, settings->beacon.tail, settings->beacon.tail_len);
+ if (ie)
+ ndev_vif->ap.mode = slsi_get_supported_mode(ie);
+ }
+
+ r = slsi_mlme_start(sdev, dev, dev->dev_addr, settings, wpa_ie_pos, wmm_ie_pos, append_vht_ies);
+
+ if ((indoor_channel == 1)
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ || (wifi_sharing_channel_switched == 1)
+#endif
+#ifdef CONFIG_SCSC_WLAN_ACS_ENABLE
+ || (sdev->acs_channel_switched == true)
+#endif
+ )
+ cfg80211_ch_switch_notify(dev, &settings->chandef);
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (r == 0)
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Soft Ap started on frequency: %d\n",
+ settings->chandef.chan->center_freq);
+ if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
+ ndev_vif->chan = settings->chandef.chan;
+#endif
+ if (r != 0) {
+ SLSI_NET_ERR(dev, "Start ap failed: resultcode = %d frequency = %d\n", r,
+ settings->chandef.chan->center_freq);
+ goto exit_with_vif;
+ } else if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO) {
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_GROUP_FORMED_GO);
+ }
+#ifdef CONFIG_SCSC_WLAN_SET_NUM_ANTENNAS
+ if (ndev_vif->iftype == NL80211_IFTYPE_AP) {
+ /* Don't care results. */
+ slsi_set_num_antennas(dev, 1 /*SISO*/);
+ }
+#endif
+ ndev_vif->ap.beacon_interval = settings->beacon_interval;
+ ndev_vif->ap.ssid_len = settings->ssid_len;
+ memcpy(ndev_vif->ap.ssid, settings->ssid, settings->ssid_len);
+
+ netif_carrier_on(dev);
+
+ if (ndev_vif->ipaddress != cpu_to_be32(0))
+ /* Static IP is assigned already */
+ slsi_ip_address_changed(sdev, dev, ndev_vif->ipaddress);
+
+ r = slsi_read_disconnect_ind_timeout(sdev, SLSI_PSID_UNIFI_DISCONNECT_TIMEOUT);
+ if (r != 0)
+ sdev->device_config.ap_disconnect_ind_timeout = *sdev->sig_wait_cfm_timeout;
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "slsi_read_disconnect_ind_timeout: timeout = %d", sdev->device_config.ap_disconnect_ind_timeout);
+ goto exit_with_vif_mutex;
+exit_with_vif:
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+ r = -EINVAL;
+exit_with_vif_mutex:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit_with_start_stop_mutex:
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return r;
+}
+
+int slsi_change_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_beacon_data *info)
+{
+ SLSI_UNUSED_PARAMETER(info);
+
+ return -EOPNOTSUPP;
+}
+
+int slsi_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+{
+ slsi_reset_throughput_stats(dev);
+
+ return 0;
+}
+
+static int slsi_p2p_group_mgmt_tx(const struct ieee80211_mgmt *mgmt, struct wiphy *wiphy,
+ struct net_device *dev, struct ieee80211_channel *chan,
+ unsigned int wait, const u8 *buf, size_t len,
+ bool dont_wait_for_ack, u64 *cookie)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif;
+ struct net_device *netdev;
+ int subtype = slsi_p2p_get_public_action_subtype(mgmt);
+ int r = 0;
+ u32 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ u16 freq = 0;
+ u32 dwell_time = SLSI_FORCE_SCHD_ACT_FRAME_MSEC;
+ u16 data_unit_desc = FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME;
+
+ if (sdev->p2p_group_exp_frame != SLSI_P2P_PA_INVALID) {
+ SLSI_NET_ERR(dev, "sdev->p2p_group_exp_frame : %d\n", sdev->p2p_group_exp_frame);
+ return -EINVAL;
+ }
+ netdev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+ ndev_vif = netdev_priv(netdev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Sending Action frame (%s) on p2p group vif (%d), vif_index = %d,"
+ "vif_type = %d, chan->hw_value = %d, ndev_vif->chan->hw_value = %d, wait = %d,"
+ "sdev->p2p_group_exp_frame = %d\n", slsi_p2p_pa_subtype_text(subtype), ndev_vif->activated,
+ ndev_vif->ifnum, ndev_vif->vif_type, chan->hw_value, ndev_vif->chan->hw_value, wait,
+ ndev_vif->chan->hw_value);
+
+ if (!((ndev_vif->iftype == NL80211_IFTYPE_P2P_GO) || (ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT)))
+ goto exit_with_error;
+
+ if (chan->hw_value != ndev_vif->chan->hw_value) {
+ freq = SLSI_FREQ_HOST_TO_FW(chan->center_freq);
+ dwell_time = wait;
+ }
+
+ /* Incase of GO dont wait for resp/cfm packets for go-negotiation.*/
+ if (subtype != SLSI_P2P_PA_GO_NEG_RSP)
+ sdev->p2p_group_exp_frame = slsi_p2p_get_exp_peer_frame_subtype(subtype);
+
+ r = slsi_mlme_send_frame_mgmt(sdev, netdev, buf, len, data_unit_desc, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, freq, dwell_time * 1000, 0);
+ if (r)
+ goto exit_with_lock;
+ slsi_assign_cookie_id(cookie, &ndev_vif->mgmt_tx_cookie);
+ r = slsi_set_mgmt_tx_data(ndev_vif, *cookie, host_tag, buf, len); /* If error then it is returned in exit */
+ goto exit_with_lock;
+
+exit_with_error:
+ r = -EINVAL;
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+/* Handle mgmt_tx callback for P2P modes */
+static int slsi_p2p_mgmt_tx(const struct ieee80211_mgmt *mgmt, struct wiphy *wiphy,
+ struct net_device *dev, struct netdev_vif *ndev_vif,
+ struct ieee80211_channel *chan, unsigned int wait,
+ const u8 *buf, size_t len, bool dont_wait_for_ack, u64 *cookie)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+
+ if (ieee80211_is_action(mgmt->frame_control)) {
+ u16 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ int subtype = slsi_p2p_get_public_action_subtype(mgmt);
+ u8 exp_peer_frame;
+ u32 dwell_time = 0;
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Action frame (%s), unsync_vif_active (%d)\n", slsi_p2p_pa_subtype_text(subtype), ndev_vif->activated);
+
+ if (subtype == SLSI_P2P_PA_INVALID) {
+ SLSI_NET_ERR(dev, "Invalid Action frame subtype\n");
+ goto exit_with_error;
+ }
+
+ /* Check if unsync vif is available */
+ if (sdev->p2p_state == P2P_IDLE_NO_VIF)
+ if (slsi_p2p_vif_activate(sdev, dev, chan, wait, false) != 0)
+ goto exit_with_error;
+
+ /* Clear Probe Response IEs if vif was already present with a different channel */
+ if (ndev_vif->driver_channel != chan->hw_value) {
+ if (slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_RESPONSE, NULL, 0) != 0)
+ SLSI_NET_ERR(dev, "Clearing Probe Response IEs failed for unsync vif\n");
+ slsi_unsync_vif_set_probe_rsp_ie(ndev_vif, NULL, 0);
+
+ if (slsi_mlme_set_channel(sdev, dev, chan, SLSI_FW_CHANNEL_DURATION_UNSPECIFIED, 0, 0) != 0)
+ goto exit_with_vif;
+ else {
+ ndev_vif->chan = chan;
+ ndev_vif->driver_channel = chan->hw_value;
+ }
+ }
+
+ /* Check if peer frame response is expected */
+ exp_peer_frame = slsi_p2p_get_exp_peer_frame_subtype(subtype);
+
+ if (exp_peer_frame != SLSI_P2P_PA_INVALID) {
+ if ((subtype == SLSI_P2P_PA_GO_NEG_RSP) && (slsi_p2p_get_go_neg_rsp_status(dev, mgmt) != SLSI_P2P_STATUS_CODE_SUCCESS)) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "GO_NEG_RSP Tx, peer response not expected\n");
+ exp_peer_frame = SLSI_P2P_PA_INVALID;
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Peer response expected with action frame (%s)\n",
+ slsi_p2p_pa_subtype_text(exp_peer_frame));
+
+ if (ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID)
+ (void)slsi_set_mgmt_tx_data(ndev_vif, 0, 0, NULL, 0);
+
+ /* Change Force Schedule Duration as peer response is expected */
+ if (wait)
+ dwell_time = wait;
+ else
+ dwell_time = SLSI_FORCE_SCHD_ACT_FRAME_MSEC;
+ }
+ }
+
+ slsi_assign_cookie_id(cookie, &ndev_vif->mgmt_tx_cookie);
+
+ /* Send the action frame, transmission status indication would be received later */
+ if (slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, dwell_time * 1000, 0) != 0)
+ goto exit_with_vif;
+ if (subtype == SLSI_P2P_PA_GO_NEG_CFM)
+ ndev_vif->drv_in_p2p_procedure = false;
+ else if ((subtype == SLSI_P2P_PA_GO_NEG_REQ) || (subtype == SLSI_P2P_PA_PROV_DISC_REQ))
+ ndev_vif->drv_in_p2p_procedure = true;
+ /* If multiple frames are requested for tx, only the info of first frame would be stored */
+ if (ndev_vif->mgmt_tx_data.host_tag == 0) {
+ unsigned int n_wait = 0;
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Store mgmt frame tx data for cookie = 0x%llx\n", *cookie);
+
+ ret = slsi_set_mgmt_tx_data(ndev_vif, *cookie, host_tag, buf, len);
+ if (ret != 0)
+ goto exit_with_vif;
+ ndev_vif->mgmt_tx_data.exp_frame = exp_peer_frame;
+
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_ACTION_FRAME_TX_RX);
+ if ((exp_peer_frame == SLSI_P2P_PA_GO_NEG_RSP) || (exp_peer_frame == SLSI_P2P_PA_GO_NEG_CFM))
+ /* Retain vif for larger duration that wpa_supplicant asks to wait,
+ * during GO-Negotiation to allow peer to retry GO neg in bad radio condition.
+ * Some of phones retry GO-Negotiation after 2 seconds
+ */
+ n_wait = SLSI_P2P_NEG_PROC_UNSYNC_VIF_RETAIN_DURATION;
+ else if (exp_peer_frame != SLSI_P2P_PA_INVALID)
+ /* If a peer response is expected queue work to retain vif till wait time else the work will be handled in mgmt_tx_cancel_wait */
+ n_wait = wait + SLSI_P2P_MGMT_TX_EXTRA_MSEC;
+ if (n_wait) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "retain unsync vif for duration (%d) msec\n", n_wait);
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, n_wait);
+ }
+ } else {
+ /* Already a frame Tx is in progress, send immediate tx_status as success. Sending immediate tx status should be ok
+ * as supplicant is in another procedure and so these frames would be mostly only response frames.
+ */
+ WARN_ON(sdev->p2p_state != P2P_ACTION_FRAME_TX_RX);
+
+ if (!dont_wait_for_ack) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Send immediate tx_status (cookie = 0x%llx)\n", *cookie);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ cfg80211_mgmt_tx_status(&ndev_vif->wdev, *cookie, buf, len, true, GFP_KERNEL);
+#else
+ cfg80211_mgmt_tx_status(dev, *cookie, buf, len, true, GFP_KERNEL);
+#endif
+ }
+ }
+ goto exit;
+ }
+
+ /* Else send failure for unexpected management frame */
+ SLSI_NET_ERR(dev, "Drop Tx frame: Unexpected Management frame\n");
+ goto exit_with_error;
+
+exit_with_vif:
+ if (sdev->p2p_state != P2P_LISTENING)
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+exit_with_error:
+ ret = -EINVAL;
+exit:
+ return ret;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+int slsi_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+int slsi_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "iface_num = %d, cookie = 0x%llx, vif_index = %d, vif_type = %d,"
+ "sdev->p2p_state = %d, ndev_vif->mgmt_tx_data.cookie = 0x%llx, sdev->p2p_group_exp_frame = %d,"
+ "sdev->wlan_unsync_vif_state = %d\n", ndev_vif->ifnum, cookie,
+ ndev_vif->vif_type, sdev->p2p_state, ndev_vif->mgmt_tx_data.cookie,
+ sdev->p2p_group_exp_frame, sdev->wlan_unsync_vif_state);
+
+ /* If device was in frame tx_rx state, clear mgmt tx data and change state */
+ if ((sdev->p2p_state == P2P_ACTION_FRAME_TX_RX) && (ndev_vif->mgmt_tx_data.cookie == cookie)) {
+ if (ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID)
+ (void)slsi_mlme_reset_dwell_time(sdev, dev);
+
+ (void)slsi_set_mgmt_tx_data(ndev_vif, 0, 0, NULL, 0);
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+
+ if (delayed_work_pending(&ndev_vif->unsync.roc_expiry_work)) {
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_LISTENING);
+ } else {
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC);
+ SLSI_P2P_STATE_CHANGE(ndev_vif->sdev, P2P_IDLE_VIF_ACTIVE);
+ }
+ } else if ((SLSI_IS_P2P_GROUP_STATE(sdev)) && (sdev->p2p_group_exp_frame != SLSI_P2P_PA_INVALID)) {
+ /* acquire mutex lock if it is not group net dev */
+ slsi_clear_offchannel_data(sdev, (!SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif)) ? true : false);
+ } else if ((sdev->wlan_unsync_vif_state == WLAN_UNSYNC_VIF_TX) && (ndev_vif->mgmt_tx_data.cookie == cookie)) {
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_ACTIVE;
+ cancel_delayed_work(&ndev_vif->unsync.hs2_del_vif_work);
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.hs2_del_vif_work, msecs_to_jiffies(SLSI_HS2_UNSYNC_VIF_EXTRA_MSEC));
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+void slsi_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+void slsi_mgmt_frame_register(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9)) */
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_UNUSED_PARAMETER(frame_type);
+ SLSI_UNUSED_PARAMETER(reg);
+#endif
+
+ if (WARN_ON(!dev))
+ return;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+}
+
+static int slsi_wlan_mgmt_tx(struct slsi_dev *sdev, struct net_device *dev,
+ struct ieee80211_channel *chan, unsigned int wait,
+ const u8 *buf, size_t len, bool dont_wait_for_ack, u64 *cookie)
+{
+ u32 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_auth(mgmt->frame_control))
+ slsi_wlan_dump_public_action_subtype(sdev, mgmt, true);
+ if (!ndev_vif->activated) {
+ r = slsi_wlan_unsync_vif_activate(sdev, dev, chan, wait);
+ if (r)
+ return r;
+
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, wait * 1000, 0);
+ if (r)
+ goto exit_with_vif;
+
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_TX;
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.hs2_del_vif_work, msecs_to_jiffies(wait));
+ } else {
+ /* vif is active*/
+ if (ieee80211_is_auth(mgmt->frame_control)) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Transmit on the current frequency\n");
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME,
+ FAPI_MESSAGETYPE_IEEE80211_MGMT, host_tag, 0, wait * 1000, 0);
+ if (r)
+ return r;
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_UNSYNCHRONISED) {
+ cancel_delayed_work(&ndev_vif->unsync.hs2_del_vif_work);
+ /*even if we fail to cancel the delayed work, we shall go ahead and send action frames*/
+ if (ndev_vif->driver_channel != chan->hw_value) {
+ r = slsi_mlme_set_channel(sdev, dev, chan, SLSI_FW_CHANNEL_DURATION_UNSPECIFIED, 0, 0);
+ if (r)
+ goto exit_with_vif;
+ else {
+ ndev_vif->driver_channel = chan->hw_value;
+ }
+ }
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "HS2 vif is active ,send GAS (ANQP) request on channel freq = %d\n", chan->center_freq);
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, wait * 1000, 0);
+ if (r)
+ goto exit_with_vif;
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_TX;
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.hs2_del_vif_work, msecs_to_jiffies(wait));
+ } else if (ndev_vif->chan->hw_value == chan->hw_value) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "STA VIF is active on same channel, send GAS (ANQP) request on channel freq %d\n", chan->center_freq);
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, wait * 1000, 0);
+ if (r)
+ return r;
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "STA VIF is active on a different channel, send GAS (ANQP) request on channel freq %d\n", chan->center_freq);
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, SLSI_FREQ_HOST_TO_FW(chan->center_freq), wait * 1000, 0);
+ if (r)
+ return r;
+ }
+ }
+
+ slsi_assign_cookie_id(cookie, &ndev_vif->mgmt_tx_cookie);
+ slsi_set_mgmt_tx_data(ndev_vif, *cookie, host_tag, buf, len);
+ return r;
+
+exit_with_vif:
+ slsi_wlan_unsync_vif_deactivate(sdev, dev, true);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+int slsi_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+int slsi_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, bool offchan,
+ unsigned int wait, const u8 *buf, size_t len, bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+ struct net_device *dev = wdev->netdev;
+
+#else
+int slsi_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8 *buf, size_t len, bool no_cck,
+ bool dont_wait_for_ack, u64 *cookie)
+{
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+
+ /* Note to explore for AP ::All public action frames which come to host should be handled properly
+ * Additionally, if PMF is negotiated over the link, the host shall not issue "mlme-send-frame.request"
+ * primitive for action frames before the pairwise keys have been installed in F/W. Presently, for
+ * SoftAP with PMF support, there is no scenario in which slsi_mlme_send_frame will be called for
+ * action frames for VIF TYPE = AP.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ struct net_device *dev = wdev->netdev;
+ struct ieee80211_channel *chan = params->chan;
+ bool offchan = params->offchan;
+ unsigned int wait = params->wait;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ bool no_cck = params->no_cck;
+ bool dont_wait_for_ack = params->dont_wait_for_ack;
+#endif
+
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ const struct ieee80211_mgmt *mgmt = (const struct ieee80211_mgmt *)buf;
+ int r = 0;
+
+ SLSI_UNUSED_PARAMETER(offchan);
+ SLSI_UNUSED_PARAMETER(no_cck);
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_WARN(sdev, "device not started yet (device_state:%d)\n", sdev->device_state);
+ r = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!(ieee80211_is_auth(mgmt->frame_control))) {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Mgmt Frame Tx: iface_num = %d, channel = %d, wait = %d, noAck = %d,"
+ "offchannel = %d, mgmt->frame_control = %d, vif_type = %d\n", ndev_vif->ifnum, chan->hw_value,
+ wait, dont_wait_for_ack, offchan, mgmt->frame_control, ndev_vif->vif_type);
+ } else {
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Received Auth Frame");
+ }
+
+ if (!(ieee80211_is_mgmt(mgmt->frame_control))) {
+ SLSI_NET_ERR(dev, "Drop Tx frame: Not a Management frame\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif) || (ndev_vif->iftype == NL80211_IFTYPE_AP && (ieee80211_is_auth(mgmt->frame_control)))) {
+ r = slsi_wlan_mgmt_tx(SDEV_FROM_WIPHY(wiphy), dev, chan, wait, buf, len, dont_wait_for_ack, cookie);
+ goto exit;
+ }
+
+ /*P2P*/
+
+ /* Drop Probe Responses which can come in P2P Device and P2P Group role */
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /* Ideally supplicant doesn't expect Tx status for Probe Rsp. Send tx status just in case it requests ack */
+ if (!dont_wait_for_ack) {
+ slsi_assign_cookie_id(cookie, &ndev_vif->mgmt_tx_cookie);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_KERNEL);
+#else
+ cfg80211_mgmt_tx_status(dev, *cookie, buf, len, true, GFP_KERNEL);
+#endif
+ }
+ goto exit;
+ }
+
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ /* Check whether STA scan is running or not. If yes, then abort the STA scan */
+ slsi_abort_sta_scan(sdev);
+ if (SLSI_IS_P2P_GROUP_STATE(sdev))
+ r = slsi_p2p_group_mgmt_tx(mgmt, wiphy, dev, chan, wait, buf, len, dont_wait_for_ack, cookie);
+ else
+ r = slsi_p2p_mgmt_tx(mgmt, wiphy, dev, ndev_vif, chan, wait, buf, len, dont_wait_for_ack, cookie);
+ } else if (SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif))
+ if (chan->hw_value == ndev_vif->chan->hw_value) {
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ u16 host_tag = slsi_tx_mgmt_host_tag(sdev);
+
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, buf, len, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, 0, 0, 0);
+ if (r) {
+ SLSI_NET_ERR(dev, "Failed to send action frame, r = %d\n", r);
+ goto exit;
+ }
+ slsi_assign_cookie_id(cookie, &ndev_vif->mgmt_tx_cookie);
+ r = slsi_set_mgmt_tx_data(ndev_vif, *cookie, host_tag, buf, len);
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return r;
+}
+
+/* cw = (2^n -1). But WMM IE needs value n. */
+u8 slsi_get_ecw(int cw)
+{
+ int ecw = 0;
+
+ cw = cw + 1;
+ do {
+ cw = cw >> 1;
+ ecw++;
+ } while (cw);
+ return ecw - 1;
+}
+
+int slsi_set_txq_params(struct wiphy *wiphy, struct net_device *ndev,
+ struct ieee80211_txq_params *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(ndev);
+ struct slsi_wmm_parameter_element *wmm_ie = &ndev_vif->ap.wmm_ie;
+ int r = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ int ac = params->ac;
+#else
+ int ac = params->queue;
+#endif
+ /* Index remapping for AC from nl80211_ac enum to slsi_ac_index_wmm enum (index to be used in the IE).
+ * Kernel version less than 3.5.0 doesn't support nl80211_ac enum hence not using the nl80211_ac enum.
+ * Eg. NL80211_AC_VO (index value 0) would be remapped to AC_VO (index value 3).
+ * Don't change the order of array elements.
+ */
+ u8 ac_index_map[4] = { AC_VO, AC_VI, AC_BE, AC_BK };
+ int ac_remapped = ac_index_map[ac];
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG2(ndev, SLSI_CFG80211, " ac= %x, ac_remapped = %d aifs = %d, cmin=%x cmax = %x, txop = %x,"
+ "vif_index = %d vif_type = %d", ac, ac_remapped, params->aifs, params->cwmin, params->cwmax,
+ params->txop, ndev_vif->ifnum, ndev_vif->vif_type);
+
+ if (ndev_vif->activated) {
+ wmm_ie->ac[ac_remapped].aci_aifsn = (ac_remapped << 5) | (params->aifs & 0x0f);
+ wmm_ie->ac[ac_remapped].ecw = ((slsi_get_ecw(params->cwmax)) << 4) | ((slsi_get_ecw(params->cwmin)) & 0x0f);
+ wmm_ie->ac[ac_remapped].txop_limit = cpu_to_le16(params->txop);
+ if (ac == 3) {
+ wmm_ie->eid = SLSI_WLAN_EID_VENDOR_SPECIFIC;
+ wmm_ie->len = 24;
+ wmm_ie->oui[0] = 0x00;
+ wmm_ie->oui[1] = 0x50;
+ wmm_ie->oui[2] = 0xf2;
+ wmm_ie->oui_type = WLAN_OUI_TYPE_MICROSOFT_WMM;
+ wmm_ie->oui_subtype = 1;
+ wmm_ie->version = 1;
+ wmm_ie->qos_info = 0;
+ wmm_ie->reserved = 0;
+ r = slsi_mlme_add_info_elements(sdev, ndev, FAPI_PURPOSE_LOCAL, (const u8 *)wmm_ie, sizeof(struct slsi_wmm_parameter_element));
+ if (r)
+ SLSI_NET_ERR(ndev, "Error sending TX Queue Parameters for AP error = %d", r);
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+int slsi_synchronised_response(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ r = slsi_mlme_synchronised_response(sdev, dev, params);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+static int slsi_update_ft_ies(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION)
+ r = slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_ASSOCIATION_REQUEST, ftie->ie, ftie->ie_len);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_set_mac_acl(struct wiphy *wiphy, struct net_device *dev,
+ const struct cfg80211_acl_data *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_SET_ACL.request\n");
+ return -EOPNOTSUPP;
+ }
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (FAPI_VIFTYPE_AP != ndev_vif->vif_type) {
+ SLSI_NET_ERR(dev, "Invalid vif type: %d\n", ndev_vif->vif_type);
+ r = -EINVAL;
+ goto exit;
+ }
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "ACL:: Policy: %d Number of stations: %d\n", params->acl_policy, params->n_acl_entries);
+ r = slsi_mlme_set_acl(sdev, dev, ndev_vif->ifnum, params);
+ if (r != 0)
+ SLSI_NET_ERR(dev, "mlme_set_acl_req returned with CFM failure\n");
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+#endif
+
+static struct cfg80211_ops slsi_ops = {
+ .add_virtual_intf = slsi_add_virtual_intf,
+ .del_virtual_intf = slsi_del_virtual_intf,
+ .change_virtual_intf = slsi_change_virtual_intf,
+
+ .scan = slsi_scan,
+ .connect = slsi_connect,
+ .disconnect = slsi_disconnect,
+
+ .add_key = slsi_add_key,
+ .del_key = slsi_del_key,
+ .get_key = slsi_get_key,
+ .set_default_key = slsi_set_default_key,
+ .set_default_mgmt_key = slsi_config_default_mgmt_key,
+
+ .set_wiphy_params = slsi_set_wiphy_params,
+
+ .del_station = slsi_del_station,
+ .get_station = slsi_get_station,
+ .set_tx_power = slsi_set_tx_power,
+ .get_tx_power = slsi_get_tx_power,
+ .set_power_mgmt = slsi_set_power_mgmt,
+
+ .suspend = slsi_suspend,
+ .resume = slsi_resume,
+
+ .set_pmksa = slsi_set_pmksa,
+ .del_pmksa = slsi_del_pmksa,
+ .flush_pmksa = slsi_flush_pmksa,
+
+ .remain_on_channel = slsi_remain_on_channel,
+ .cancel_remain_on_channel = slsi_cancel_remain_on_channel,
+
+ .change_bss = slsi_change_bss,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 5, 0))
+ .set_channel = slsi_set_channel,
+#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 5, 0)) */
+
+ .start_ap = slsi_start_ap,
+ .change_beacon = slsi_change_beacon,
+ .stop_ap = slsi_stop_ap,
+
+ .sched_scan_start = slsi_sched_scan_start,
+ .sched_scan_stop = slsi_sched_scan_stop,
+
+ .mgmt_frame_register = slsi_mgmt_frame_register,
+ .mgmt_tx = slsi_mgmt_tx,
+ .mgmt_tx_cancel_wait = slsi_mgmt_tx_cancel_wait,
+ .set_txq_params = slsi_set_txq_params,
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+ .external_auth = slsi_synchronised_response,
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ .set_mac_acl = slsi_set_mac_acl,
+ .update_ft_ies = slsi_update_ft_ies,
+#endif
+ .tdls_oper = slsi_tdls_oper,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ .set_monitor_channel = slsi_set_monitor_channel,
+#endif
+ .set_qos_map = slsi_set_qos_map
+#endif
+};
+
+#define RATE_LEGACY(_rate, _hw_value, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_hw_value), \
+ .flags = (_flags), \
+}
+
+#define CHAN2G(_freq, _idx) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 17, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = NL80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 17, \
+}
+
+static struct ieee80211_channel slsi_2ghz_channels[] = {
+ CHAN2G(2412, 1),
+ CHAN2G(2417, 2),
+ CHAN2G(2422, 3),
+ CHAN2G(2427, 4),
+ CHAN2G(2432, 5),
+ CHAN2G(2437, 6),
+ CHAN2G(2442, 7),
+ CHAN2G(2447, 8),
+ CHAN2G(2452, 9),
+ CHAN2G(2457, 10),
+ CHAN2G(2462, 11),
+ CHAN2G(2467, 12),
+ CHAN2G(2472, 13),
+ CHAN2G(2484, 14),
+};
+
+static struct ieee80211_rate slsi_11g_rates[] = {
+ RATE_LEGACY(10, 1, 0),
+ RATE_LEGACY(20, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE_LEGACY(55, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE_LEGACY(110, 6, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE_LEGACY(60, 4, 0),
+ RATE_LEGACY(90, 5, 0),
+ RATE_LEGACY(120, 7, 0),
+ RATE_LEGACY(180, 8, 0),
+ RATE_LEGACY(240, 9, 0),
+ RATE_LEGACY(360, 10, 0),
+ RATE_LEGACY(480, 11, 0),
+ RATE_LEGACY(540, 12, 0),
+};
+
+static struct ieee80211_channel slsi_5ghz_channels[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 36),
+ CHAN5G(5200, 40),
+ CHAN5G(5220, 44),
+ CHAN5G(5240, 48),
+ /* UNII 2 */
+ CHAN5G(5260, 52),
+ CHAN5G(5280, 56),
+ CHAN5G(5300, 60),
+ CHAN5G(5320, 64),
+ /* "Middle band" */
+ CHAN5G(5500, 100),
+ CHAN5G(5520, 104),
+ CHAN5G(5540, 108),
+ CHAN5G(5560, 112),
+ CHAN5G(5580, 116),
+ CHAN5G(5600, 120),
+ CHAN5G(5620, 124),
+ CHAN5G(5640, 128),
+ CHAN5G(5660, 132),
+ CHAN5G(5680, 136),
+ CHAN5G(5700, 140),
+ CHAN5G(5720, 144),
+ /* UNII 3 */
+ CHAN5G(5745, 149),
+ CHAN5G(5765, 153),
+ CHAN5G(5785, 157),
+ CHAN5G(5805, 161),
+ CHAN5G(5825, 165),
+};
+
+/* note fw_rate_idx_to_host_11a_idx[] below must change if this table changes */
+
+static struct ieee80211_rate wifi_11a_rates[] = {
+ RATE_LEGACY(60, 4, 0),
+ RATE_LEGACY(90, 5, 0),
+ RATE_LEGACY(120, 7, 0),
+ RATE_LEGACY(180, 8, 0),
+ RATE_LEGACY(240, 9, 0),
+ RATE_LEGACY(360, 10, 0),
+ RATE_LEGACY(480, 11, 0),
+ RATE_LEGACY(540, 12, 0),
+};
+
+static struct ieee80211_sta_ht_cap slsi_ht_cap = {
+ .ht_supported = true,
+ .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_RX_STBC |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_4,
+ .mcs = {
+ .rx_mask = { 0xff, 0, },
+ .rx_highest = cpu_to_le16(0),
+ .tx_params = 0,
+ },
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+struct ieee80211_sta_vht_cap slsi_vht_cap = {
+ .vht_supported = true,
+ .cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ (5 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT),
+ .vht_mcs = {
+ .rx_mcs_map = cpu_to_le16(0xfffe),
+ .rx_highest = cpu_to_le16(0),
+ .tx_mcs_map = cpu_to_le16(0xfffe),
+ .tx_highest = cpu_to_le16(0),
+ },
+};
+#endif
+
+struct ieee80211_supported_band slsi_band_2ghz = {
+ .channels = slsi_2ghz_channels,
+ .band = NL80211_BAND_2GHZ,
+ .n_channels = ARRAY_SIZE(slsi_2ghz_channels),
+ .bitrates = slsi_11g_rates,
+ .n_bitrates = ARRAY_SIZE(slsi_11g_rates),
+};
+
+struct ieee80211_supported_band slsi_band_5ghz = {
+ .channels = slsi_5ghz_channels,
+ .band = NL80211_BAND_5GHZ,
+ .n_channels = ARRAY_SIZE(slsi_5ghz_channels),
+ .bitrates = wifi_11a_rates,
+ .n_bitrates = ARRAY_SIZE(wifi_11a_rates),
+};
+
+static const u32 slsi_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_SMS4,
+ WLAN_CIPHER_SUITE_PMK
+};
+
+static const struct ieee80211_txrx_stypes
+ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+};
+
+/* Interface combinations supported by driver */
+static struct ieee80211_iface_limit iface_limits[] = {
+#ifdef CONFIG_SCSC_WLAN_STA_ONLY
+ /* Basic STA-only */
+ {
+ .max = CONFIG_SCSC_WLAN_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+#else
+ /* AP mode: # AP <= 1 on channel = 1 */
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+ /* STA and P2P mode: #STA <= 1, #{P2P-client,P2P-GO} <= 1 on two channels */
+ /* For P2P, the device mode and group mode is first started as STATION and then changed.
+ * Similarly it is changed to STATION on group removal. Hence set maximum interfaces for STATION.
+ */
+ {
+ .max = CONFIG_SCSC_WLAN_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ /* ADHOC mode: #ADHOC <= 1 on channel = 1 */
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+#endif
+};
+
+static struct ieee80211_regdomain slsi_regdomain = {
+ .reg_rules = {
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ REG_RULE(0, 0, 0, 0, 0, 0),
+ }
+};
+
+static struct ieee80211_iface_combination iface_comb[] = {
+ {
+ .limits = iface_limits,
+ .n_limits = ARRAY_SIZE(iface_limits),
+ .num_different_channels = 2,
+ .max_interfaces = CONFIG_SCSC_WLAN_MAX_INTERFACES,
+ },
+};
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static struct cfg80211_wowlan slsi_wowlan_config = {
+ .any = true,
+};
+#endif
+#endif
+
+struct slsi_dev *slsi_cfg80211_new(struct device *dev)
+{
+ struct wiphy *wiphy;
+ struct slsi_dev *sdev = NULL;
+
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "wiphy_new()\n");
+ wiphy = wiphy_new(&slsi_ops, sizeof(struct slsi_dev));
+ if (!wiphy) {
+ SLSI_ERR_NODEV("wiphy_new() failed");
+ return NULL;
+ }
+
+ sdev = (struct slsi_dev *)wiphy->priv;
+
+ sdev->wiphy = wiphy;
+
+ set_wiphy_dev(wiphy, dev);
+
+ /* Allow changing of the netns, if NOT set then no changes are allowed */
+ wiphy->flags |= WIPHY_FLAG_NETNS_OK;
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
+
+ /* wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ *
+ * Whilst the firmware does support roaming the driver MUST NOT advertise it
+ * as the supplicant will NOT send the BSSID and frequency information in the
+ * connect cfg80211 op.
+ * If the driver advertises FW_ROAM then the supplicant expects it to perform
+ * any scans required to find an appropriate AP and will only pass the SSID
+ */
+
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_AP_UAPSD;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ wiphy->max_acl_mac_addrs = SLSI_AP_PEER_CONNECTIONS_MAX;
+#endif
+
+ wiphy->privid = sdev;
+
+ wiphy->interface_modes =
+#ifdef CONFIG_SCSC_WLAN_STA_ONLY
+ BIT(NL80211_IFTYPE_STATION);
+#else
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ BIT(NL80211_IFTYPE_MONITOR) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+#endif
+ slsi_band_2ghz.ht_cap = slsi_ht_cap;
+ slsi_band_5ghz.ht_cap = slsi_ht_cap;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ slsi_band_5ghz.vht_cap = slsi_vht_cap;
+#endif
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->bands[NL80211_BAND_2GHZ] = &slsi_band_2ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &slsi_band_5ghz;
+
+ memset(&sdev->device_config, 0, sizeof(struct slsi_dev_config));
+ sdev->device_config.band_5G = &slsi_band_5ghz;
+ sdev->device_config.band_2G = &slsi_band_2ghz;
+ sdev->device_config.domain_info.regdomain = &slsi_regdomain;
+
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->max_remain_on_channel_duration = 5000; /* 5000 msec */
+
+ wiphy->cipher_suites = slsi_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(slsi_cipher_suites);
+
+ wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
+
+ /* Driver interface combinations */
+ wiphy->n_iface_combinations = ARRAY_SIZE(iface_comb);
+ wiphy->iface_combinations = iface_comb;
+
+ /* Basic scan parameters */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_scan_ie_len = 2048;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 0))
+ /* Scheduled scanning support */
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ /* Parameters for Scheduled Scanning Support */
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI);
+#endif
+
+ /* Match the maximum number of SSIDs that could be requested from wpa_supplicant */
+ wiphy->max_sched_scan_ssids = 16;
+
+ /* To get a list of SSIDs rather than just the wildcard SSID need to support match sets */
+ wiphy->max_match_sets = 16;
+
+ wiphy->max_sched_scan_ie_len = 2048;
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ wiphy->wowlan = NULL;
+ wiphy->wowlan_config = &slsi_wowlan_config;
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ wiphy->regulatory_flags |= (REGULATORY_STRICT_REG |
+ REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS);
+#endif
+#ifndef CONFIG_SCSC_WLAN_STA_ONLY
+ /* P2P flags */
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+
+ /* Enable Probe response offloading w.r.t WPS and P2P */
+ wiphy->probe_resp_offload |=
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ /* TDLS support */
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+#endif
+ /* Mac Randomization */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+#endif
+#endif
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+ wiphy->features |= NL80211_FEATURE_SAE;
+#endif
+ return sdev;
+}
+
+int slsi_cfg80211_register(struct slsi_dev *sdev)
+{
+ SLSI_DBG1(sdev, SLSI_CFG80211, "wiphy_register()\n");
+ return wiphy_register(sdev->wiphy);
+}
+
+void slsi_cfg80211_unregister(struct slsi_dev *sdev)
+{
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ sdev->wiphy->wowlan = NULL;
+ sdev->wiphy->wowlan_config = NULL;
+#endif
+#endif
+ SLSI_DBG1(sdev, SLSI_CFG80211, "wiphy_unregister()\n");
+ wiphy_unregister(sdev->wiphy);
+}
+
+void slsi_cfg80211_free(struct slsi_dev *sdev)
+{
+ SLSI_DBG1(sdev, SLSI_CFG80211, "wiphy_free()\n");
+ wiphy_free(sdev->wiphy);
+}
+
+void slsi_cfg80211_update_wiphy(struct slsi_dev *sdev)
+{
+ /* update supported Bands */
+ if (sdev->band_5g_supported) {
+ sdev->wiphy->bands[NL80211_BAND_5GHZ] = &slsi_band_5ghz;
+ sdev->device_config.band_5G = &slsi_band_5ghz;
+ } else {
+ sdev->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ sdev->device_config.band_5G = NULL;
+ }
+
+ /* update HT features */
+ if (sdev->fw_ht_enabled) {
+ slsi_ht_cap.ht_supported = true;
+ slsi_ht_cap.cap = le16_to_cpu(*(u16 *)sdev->fw_ht_cap);
+ slsi_ht_cap.ampdu_density = (sdev->fw_ht_cap[2] & IEEE80211_HT_AMPDU_PARM_DENSITY) >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
+ slsi_ht_cap.ampdu_factor = sdev->fw_ht_cap[2] & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ } else {
+ slsi_ht_cap.ht_supported = false;
+ }
+ slsi_band_2ghz.ht_cap = slsi_ht_cap;
+ slsi_band_5ghz.ht_cap = slsi_ht_cap;
+
+ /* update VHT features */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (sdev->fw_vht_enabled) {
+ slsi_vht_cap.vht_supported = true;
+ slsi_vht_cap.cap = le32_to_cpu(*(u32 *)sdev->fw_vht_cap);
+ } else {
+ slsi_vht_cap.vht_supported = false;
+ }
+ slsi_band_5ghz.vht_cap = slsi_vht_cap;
+#endif
+
+ SLSI_INFO(sdev, "BANDS SUPPORTED -> 2.4:'%c' 5:'%c'\n", sdev->wiphy->bands[NL80211_BAND_2GHZ] ? 'Y' : 'N',
+ sdev->wiphy->bands[NL80211_BAND_5GHZ] ? 'Y' : 'N');
+ SLSI_INFO(sdev, "HT/VHT SUPPORTED -> HT:'%c' VHT:'%c'\n", sdev->fw_ht_enabled ? 'Y' : 'N',
+ sdev->fw_vht_enabled ? 'Y' : 'N');
+ SLSI_INFO(sdev, "HT -> cap:0x%04x ampdu_density:%d ampdu_factor:%d\n", slsi_ht_cap.cap, slsi_ht_cap.ampdu_density, slsi_ht_cap.ampdu_factor);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ SLSI_INFO(sdev, "VHT -> cap:0x%08x\n", slsi_vht_cap.cap);
+#endif
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_CFG80211_OPS_H__
+#define __SLSI_CFG80211_OPS_H__
+
+#include <net/cfg80211.h>
+
+struct slsi_dev;
+
+#define SDEV_FROM_WIPHY(wiphy) ((struct slsi_dev *)(wiphy)->priv)
+#define WLAN_CIPHER_SUITE_PMK 0x00904C00
+
+#define SLSI_WPS_REQUEST_TYPE_POS 15
+#define SLSI_WPS_REQUEST_TYPE_ENROLEE_INFO_ONLY 0x00
+#define SLSI_WPS_OUI_PATTERN 0x04F25000
+#define SLSI_P2P_OUI_PATTERN 0x099a6f50
+#define SLSI_VENDOR_OUI_AND_TYPE_LEN 4
+
+struct slsi_dev *slsi_cfg80211_new(struct device *dev);
+int slsi_cfg80211_register(struct slsi_dev *sdev);
+void slsi_cfg80211_unregister(struct slsi_dev *sdev);
+void slsi_cfg80211_free(struct slsi_dev *sdev);
+void slsi_cfg80211_update_wiphy(struct slsi_dev *sdev);
+#endif /*__SLSI_CFG80211_OPS_H__*/
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ * Chip Manager interface
+ *
+ ****************************************************************************/
+
+#include "mgt.h"
+#include "dev.h"
+#include "debug.h"
+#include "scsc_wifi_cm_if.h"
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+#include "hip4_sampler.h"
+#endif
+
+#include <scsc/scsc_mx.h>
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+static bool EnableTestMode;
+module_param(EnableTestMode, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(EnableTestMode, "Enable WlanLite test mode driver.");
+
+static BLOCKING_NOTIFIER_HEAD(slsi_wlan_notifier);
+
+static bool EnableRfTestMode;
+module_param(EnableRfTestMode, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(EnableRfTestMode, "Enable RF test mode driver.");
+
+static struct mutex slsi_start_mutex;
+static int recovery_in_progress;
+static u16 latest_scsc_panic_code;
+
+
+/* TODO: Would be good to get this removed - use module_client? */
+struct slsi_cm_ctx {
+ struct slsi_dev *sdev;
+};
+
+/* Only one wlan service instance is assumed for now. */
+static struct slsi_cm_ctx cm_ctx;
+
+static void slsi_hip_block_bh(struct slsi_dev *sdev);
+
+int slsi_wlan_service_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&slsi_wlan_notifier, nb);
+}
+
+int slsi_wlan_service_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&slsi_wlan_notifier, nb);
+}
+
+static int wlan_suspend(struct scsc_service_client *client)
+{
+ struct slsi_dev *sdev = container_of(client, struct slsi_dev, mx_wlan_client);
+
+ SLSI_INFO_NODEV("Nofity registered functions\n");
+ blocking_notifier_call_chain(&slsi_wlan_notifier, SCSC_WIFI_SUSPEND, sdev);
+
+ return 0;
+}
+
+static int wlan_resume(struct scsc_service_client *client)
+{
+ struct slsi_dev *sdev = container_of(client, struct slsi_dev, mx_wlan_client);
+
+ SLSI_INFO_NODEV("Nofity registered functions\n");
+ blocking_notifier_call_chain(&slsi_wlan_notifier, SCSC_WIFI_RESUME, sdev);
+
+ return 0;
+}
+
+static void wlan_stop_on_failure(struct scsc_service_client *client)
+{
+ int state;
+ struct slsi_dev *sdev = container_of(client, struct slsi_dev, mx_wlan_client);
+
+ SLSI_INFO_NODEV("\n");
+
+ mutex_lock(&slsi_start_mutex);
+ recovery_in_progress = 1;
+ sdev->recovery_status = 1;
+ state = atomic_read(&sdev->cm_if.cm_if_state);
+ if (state != SCSC_WIFI_CM_IF_STATE_STOPPED) {
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_BLOCKED);
+ sdev->fail_reported = true;
+
+ /* If next state is stopped, then don't signal recovery since
+ * the Android framework won't/shouldn't restart (supplicant
+ * stop and start).
+ */
+ if (sdev->recovery_next_state != SCSC_WIFI_CM_IF_STATE_STOPPING) {
+ slsi_hip_block_bh(sdev);
+
+ /* Stop wlan operations. Send event to registered parties */
+ mutex_unlock(&slsi_start_mutex);
+ SLSI_INFO_NODEV("Nofity registered functions\n");
+ blocking_notifier_call_chain(&slsi_wlan_notifier, SCSC_WIFI_STOP, sdev);
+ mutex_lock(&slsi_start_mutex);
+ }
+ } else {
+ SLSI_INFO_NODEV("Wi-Fi service driver not started\n");
+ }
+
+ mutex_unlock(&slsi_start_mutex);
+}
+
+static void wlan_failure_reset(struct scsc_service_client *client, u16 scsc_panic_code)
+{
+ SLSI_INFO_NODEV("\n");
+ latest_scsc_panic_code = scsc_panic_code;
+}
+
+int slsi_check_rf_test_mode(void)
+{
+ struct file *fp = NULL;
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ char *filepath = "/data/vendor/conn/.psm.info";
+#else
+ char *filepath = "/data/misc/conn/.psm.info";
+#endif
+ char power_val = 0;
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp) || (!fp)) {
+ pr_err("%s is not exist.\n", filepath);
+ return -ENOENT; /* -2 */
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ kernel_read(fp, &power_val, 1, &fp->f_pos);
+#else
+ kernel_read(fp, fp->f_pos, &power_val, 1);
+#endif
+ /* if power_val is 0, it means rf_test mode by rf. */
+ if (power_val == '0') {
+ pr_err("*#rf# is enabled.\n");
+ EnableRfTestMode = 1;
+ } else {
+ pr_err("*#rf# is disabled.\n");
+ EnableRfTestMode = 0;
+ }
+
+ if (fp)
+ filp_close(fp, NULL);
+
+ return 0;
+}
+
+/* WLAN service driver registration
+ * ================================
+ */
+void slsi_wlan_service_probe(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ struct slsi_dev *sdev;
+ struct device *dev;
+ struct scsc_service_client mx_wlan_client;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ char buf[SCSC_LOG_FAPI_VERSION_SIZE];
+#endif
+
+ SLSI_UNUSED_PARAMETER(module_client);
+
+ SLSI_INFO_NODEV("WLAN service probe\n");
+
+ mutex_lock(&slsi_start_mutex);
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress)
+ goto done;
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY) {
+ SLSI_INFO_NODEV("Probe recovery\n");
+ sdev = cm_ctx.sdev;
+ recovery_in_progress = 0;
+ sdev->fail_reported = false;
+ sdev->recovery_status = 0;
+ sdev->mlme_blocked = false;
+ complete_all(&sdev->recovery_completed);
+ } else {
+ /* Register callbacks */
+ mx_wlan_client.stop_on_failure = wlan_stop_on_failure;
+ mx_wlan_client.failure_reset = wlan_failure_reset;
+ mx_wlan_client.suspend = wlan_suspend;
+ mx_wlan_client.resume = wlan_resume;
+
+ dev = scsc_service_get_device_by_mx(mx);
+
+ /* The mutex must be released at this point since the attach
+ * process may call various functions including
+ * slsi_sm_wlan_service_start and slsi_sm_wlan_service_open, which will
+ * claim the same mutex.
+ */
+ mutex_unlock(&slsi_start_mutex);
+ sdev = slsi_dev_attach(dev, mx, &mx_wlan_client);
+ mutex_lock(&slsi_start_mutex);
+ if (!sdev) {
+ SLSI_ERR_NODEV("WLAN attach failed - slsi_dev_attach\n");
+ goto done;
+ }
+
+ cm_ctx.sdev = sdev; /* TODO: For now. */
+
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_PROBING);
+ get_device(dev);
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ hip4_sampler_create(sdev, mx);
+#endif
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ memset(buf, 0, SCSC_LOG_FAPI_VERSION_SIZE);
+ /* Write FAPI VERSION to collector header */
+ /* IMPORTANT - Do not change the formatting as User space tooling is parsing the string
+ * to read SAP fapi versions.
+ */
+ snprintf(buf, SCSC_LOG_FAPI_VERSION_SIZE, "ma:%u.%u, mlme:%u.%u, debug:%u.%u, test:%u.%u",
+ FAPI_MAJOR_VERSION(FAPI_DATA_SAP_VERSION), FAPI_MINOR_VERSION(FAPI_DATA_SAP_VERSION),
+ FAPI_MAJOR_VERSION(FAPI_CONTROL_SAP_VERSION), FAPI_MINOR_VERSION(FAPI_CONTROL_SAP_VERSION),
+ FAPI_MAJOR_VERSION(FAPI_DEBUG_SAP_VERSION), FAPI_MINOR_VERSION(FAPI_DEBUG_SAP_VERSION),
+ FAPI_MAJOR_VERSION(FAPI_TEST_SAP_VERSION), FAPI_MINOR_VERSION(FAPI_TEST_SAP_VERSION));
+
+ scsc_log_collector_write_fapi(buf, SCSC_LOG_FAPI_VERSION_SIZE);
+#endif
+ }
+
+ if (reason != SCSC_MODULE_CLIENT_REASON_RECOVERY)
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_PROBED);
+
+done:
+ mutex_unlock(&slsi_start_mutex);
+}
+
+/* service_clean_up_locked expects the slsi_start_mutex mutex to be claimed when
+ * service_clean_up_locked is called.
+ */
+static void service_clean_up_locked(struct slsi_dev *sdev)
+{
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_REMOVING);
+ put_device(sdev->dev);
+
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_REMOVED);
+
+ sdev->maxwell_core = NULL;
+
+ /* The mutex must be released at this point since the tear down
+ * process will call various functions including
+ * slsi_sm_wlan_service_stop and slsi_sm_wlan_service_close, which will
+ * claim the same mutex.
+ */
+ mutex_unlock(&slsi_start_mutex);
+ slsi_dev_detach(sdev);
+ mutex_lock(&slsi_start_mutex);
+}
+
+static void slsi_wlan_service_remove(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason)
+{
+ struct slsi_dev *sdev;
+ int state;
+
+ SLSI_UNUSED_PARAMETER(mx);
+ SLSI_UNUSED_PARAMETER(module_client);
+
+ sdev = cm_ctx.sdev;
+ if (!sdev) {
+ SLSI_INFO_NODEV("no sdev\n");
+ return;
+ }
+
+ if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && !recovery_in_progress) {
+ SLSI_INFO_NODEV("WLAN service remove - recovery. Service not active.\n");
+ } else if (reason == SCSC_MODULE_CLIENT_REASON_RECOVERY && recovery_in_progress) {
+ int r;
+
+ SLSI_INFO_NODEV("WLAN service remove - recovery\n");
+
+ /* Only indicate if the next state is not stopping. The recovery
+ * handling won't have any affect if the framework is closing
+ * anyway.
+ */
+ if (sdev->recovery_next_state != SCSC_WIFI_CM_IF_STATE_STOPPING) {
+ SLSI_INFO_NODEV("Nofity registered functions\n");
+ blocking_notifier_call_chain(&slsi_wlan_notifier, SCSC_WIFI_FAILURE_RESET, sdev);
+ }
+
+ mutex_lock(&slsi_start_mutex);
+ /**
+ * If there was a request to stop during the recovery, then do
+ * not sent a hang - just stop here. The Wi-Fi service driver is
+ * ready to be turned on again. Let the service_stop complete.
+ */
+ complete_all(&sdev->recovery_remove_completion);
+ if (sdev->recovery_next_state == SCSC_WIFI_CM_IF_STATE_STOPPING) {
+ SLSI_INFO_NODEV("Recovery - next state stopping\n");
+ } else {
+ SLSI_INFO_NODEV("Calling slsi_send_hanged_vendor_event with latest_scsc_panic_code=0x%x\n",
+ latest_scsc_panic_code);
+ if (slsi_send_hanged_vendor_event(sdev, latest_scsc_panic_code) < 0)
+ SLSI_ERR(sdev, "Failed to send hang event\n");
+
+ /* Complete any pending ctrl signals, which will prevent
+ * the hang event from being processed.
+ */
+ complete_all(&sdev->sig_wait.completion);
+ }
+
+ mutex_unlock(&slsi_start_mutex);
+ r = wait_for_completion_timeout(&sdev->recovery_stop_completion,
+ msecs_to_jiffies(sdev->recovery_timeout));
+ if (r == 0)
+ SLSI_INFO(sdev, "recovery_stop_completion timeout\n");
+
+ mutex_lock(&slsi_start_mutex);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ reinit_completion(&sdev->recovery_stop_completion);
+#else
+ /*This is how the macro is used in the older verion.*/
+ INIT_COMPLETION(sdev->recovery_stop_completion);
+#endif
+ mutex_unlock(&slsi_start_mutex);
+
+ } else {
+ SLSI_INFO_NODEV("WLAN service remove\n");
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ hip4_sampler_destroy(sdev, mx);
+#endif
+
+ mutex_lock(&slsi_start_mutex);
+ state = atomic_read(&sdev->cm_if.cm_if_state);
+ if (state != SCSC_WIFI_CM_IF_STATE_STARTED &&
+ state != SCSC_WIFI_CM_IF_STATE_PROBED &&
+ state != SCSC_WIFI_CM_IF_STATE_STOPPED &&
+ state != SCSC_WIFI_CM_IF_STATE_BLOCKED) {
+ mutex_unlock(&slsi_start_mutex);
+ SLSI_INFO_NODEV("state-event error %d\n", state);
+ return;
+ }
+
+ service_clean_up_locked(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ }
+}
+
+/* Block future HIP runs through the hip_switch */
+static void slsi_hip_block_bh(struct slsi_dev *sdev)
+{
+ SLSI_WARN(sdev, "HIP state set to #SLSI_HIP_STATE_BLOCKED#\n");
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_BLOCKED);
+}
+
+struct scsc_mx_module_client wlan_driver = {
+ .name = "WLAN driver",
+ .probe = slsi_wlan_service_probe,
+ .remove = slsi_wlan_service_remove,
+};
+
+int slsi_sm_service_driver_register(void)
+{
+ struct slsi_cm_ctx *ctx = &cm_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ mutex_init(&slsi_start_mutex);
+ scsc_mx_module_register_client_module(&wlan_driver);
+
+ return 0;
+}
+
+void slsi_sm_service_driver_unregister(void)
+{
+ scsc_mx_module_unregister_client_module(&wlan_driver);
+}
+
+/* start/stop wlan service
+ * =======================
+ */
+void slsi_sm_service_failed(struct slsi_dev *sdev, const char *reason)
+{
+ int state;
+
+ mutex_lock(&slsi_start_mutex);
+
+ state = atomic_read(&sdev->cm_if.cm_if_state);
+ if (state != SCSC_WIFI_CM_IF_STATE_STARTED &&
+ state != SCSC_WIFI_CM_IF_STATE_STOPPING) {
+ mutex_unlock(&slsi_start_mutex);
+ SLSI_INFO(sdev, "State %d - ignoring event\n", state);
+ return;
+ }
+
+ /* Limit the volume of error reports to the core */
+ if (!sdev->fail_reported) {
+ /* This log may be scraped by test systems */
+ SLSI_ERR(sdev, "scsc_wifibt: FATAL ERROR: %s\n", reason);
+
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_BLOCKED);
+ slsi_hip_block_bh(sdev);
+ scsc_mx_service_service_failed(sdev->service, reason);
+ scsc_mx_service_mif_dump_registers(sdev->service);
+ sdev->fail_reported = true;
+ }
+
+ mutex_unlock(&slsi_start_mutex);
+}
+
+/* Is production test mode enabled? */
+bool slsi_is_test_mode_enabled(void)
+{
+ return EnableTestMode;
+}
+
+/* Is production rf test mode enabled? */
+bool slsi_is_rf_test_mode_enabled(void)
+{
+ return EnableRfTestMode;
+}
+
+#define SLSI_SM_WLAN_SERVICE_RECOVERY_COMPLETED_TIMEOUT 20000
+#define SLSI_SM_WLAN_SERVICE_RECOVERY_DISABLED_TIMEOUT 2000
+
+int slsi_sm_wlan_service_open(struct slsi_dev *sdev)
+{
+ int err = 0;
+ int state;
+
+ mutex_lock(&slsi_start_mutex);
+ state = atomic_read(&sdev->cm_if.cm_if_state);
+ if (state != SCSC_WIFI_CM_IF_STATE_PROBED &&
+ state != SCSC_WIFI_CM_IF_STATE_STOPPED) {
+ SLSI_INFO(sdev, "State-event error %d\n", state);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ sdev->recovery_timeout = mxman_recovery_disabled() ? SLSI_SM_WLAN_SERVICE_RECOVERY_DISABLED_TIMEOUT : SLSI_SM_WLAN_SERVICE_RECOVERY_COMPLETED_TIMEOUT;
+
+ /* Open service - will download FW - will set MBOX0 with Starting address */
+ SLSI_INFO(sdev, "Open WLAN service\n");
+ sdev->service = scsc_mx_service_open(sdev->maxwell_core, SCSC_SERVICE_ID_WLAN, &sdev->mx_wlan_client, &err);
+ if (!sdev->service) {
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_PROBED);
+ SLSI_WARN(sdev, "Service open failed\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+}
+
+#if defined(CONFIG_SLUB_DEBUG_ON) || defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_DEBUG_RT_MUTEXES) || \
+ defined(CONFIG_DEBUG_SPINLOCK) && defined(CONFIG_DEBUG_MUTEXES) && defined(CONFIG_DEBUG_LOCK_ALLOC) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_DEBUG_ATOMIC_SLEEP) && defined(CONFIG_DEBUG_LIST)
+#define KERNEL_DEBUG_OPTIONS_ENABLED
+#endif
+
+int slsi_sm_wlan_service_start(struct slsi_dev *sdev)
+{
+ struct slsi_hip4 *hip = &sdev->hip4_inst;
+ scsc_mifram_ref ref;
+ int err = 0;
+ int err2 = 0;
+ int state;
+
+ mutex_lock(&slsi_start_mutex);
+ state = atomic_read(&sdev->cm_if.cm_if_state);
+ SLSI_INFO(sdev,
+ "Recovery -- Status:%d In_Progress:%d -- cm_if_state:%d\n",
+ sdev->recovery_status, recovery_in_progress, state);
+ if (state != SCSC_WIFI_CM_IF_STATE_PROBED &&
+ state != SCSC_WIFI_CM_IF_STATE_STOPPED) {
+ SLSI_INFO(sdev, "State-event error %d\n", state);
+ mutex_unlock(&slsi_start_mutex);
+ return -EINVAL;
+ }
+
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STARTING);
+
+#ifdef KERNEL_DEBUG_OPTIONS_ENABLED
+ SLSI_WARN(sdev, "Kernel config debug options are enabled. This might impact the throughput performance.\n");
+#endif
+
+ /* Get RAM from the MIF */
+ SLSI_INFO(sdev, "Allocate mifram\n");
+
+#ifdef CONFIG_SCSC_PCIE
+ err = scsc_mx_service_mifram_alloc(sdev->service, 1.5 * 1024 * 1024, &sdev->hip4_inst.hip_ref, 4096);
+#else
+ err = scsc_mx_service_mifram_alloc(sdev->service, 2 * 1024 * 1024, &sdev->hip4_inst.hip_ref, 4096);
+#endif
+ if (err) {
+ SLSI_WARN(sdev, "scsc_mx_service_mifram_alloc failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+
+ SLSI_INFO(sdev, "Start HIP\n");
+ err = slsi_hip_start(sdev);
+ if (err) {
+ SLSI_WARN(sdev, "slsi_hip_start failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ slsi_hip_stop(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+
+ err = scsc_mx_service_mif_ptr_to_addr(sdev->service, hip->hip_control, &ref);
+ if (err) {
+ SLSI_WARN(sdev, "scsc_mx_service_mif_ptr_to_addr failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ slsi_hip_stop(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+
+ SLSI_INFO(sdev, "Starting WLAN service\n");
+ err = scsc_mx_service_start(sdev->service, ref);
+ if (err) {
+ SLSI_WARN(sdev, "scsc_mx_service_start failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ slsi_hip_stop(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+ err = slsi_hip_setup(sdev);
+ if (err) {
+ SLSI_WARN(sdev, "slsi_hip_setup failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ SLSI_INFO_NODEV("Stopping WLAN service\n");
+ err2 = scsc_mx_service_stop(sdev->service);
+ if (err2)
+ SLSI_INFO(sdev, "scsc_mx_service_stop failed err2: %d\n", err2);
+ slsi_hip_stop(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+ /* Service has started, inform SAP versions to the registered SAPs */
+ err = slsi_hip_sap_setup(sdev);
+ if (err) {
+ SLSI_WARN(sdev, "slsi_hip_sap_setup failed err: %d\n", err);
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+ SLSI_INFO_NODEV("Stopping WLAN service\n");
+ err2 = scsc_mx_service_stop(sdev->service);
+ if (err2)
+ SLSI_INFO(sdev, "scsc_mx_service_stop failed err2: %d\n", err2);
+ slsi_hip_stop(sdev);
+ mutex_unlock(&slsi_start_mutex);
+ return err;
+ }
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STARTED);
+ mutex_unlock(&slsi_start_mutex);
+ return 0;
+}
+
+static void __slsi_sm_wlan_service_stop_wait_locked(struct slsi_dev *sdev)
+{
+ int r;
+
+ mutex_unlock(&slsi_start_mutex);
+ r = wait_for_completion_timeout(&sdev->recovery_remove_completion,
+ msecs_to_jiffies(sdev->recovery_timeout));
+ if (r == 0)
+ SLSI_INFO(sdev, "recovery_remove_completion timeout\n");
+
+ mutex_lock(&slsi_start_mutex);
+ sdev->recovery_next_state = SCSC_WIFI_CM_IF_STATE_STOPPED;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ reinit_completion(&sdev->recovery_remove_completion);
+#else
+ /*This is how the macro is used in the older verion.*/
+ INIT_COMPLETION(sdev->recovery_remove_completion);
+#endif
+}
+
+void slsi_sm_wlan_service_stop(struct slsi_dev *sdev)
+{
+ int cm_if_state;
+ int err = 0;
+
+ mutex_lock(&slsi_start_mutex);
+ cm_if_state = atomic_read(&sdev->cm_if.cm_if_state);
+ SLSI_INFO(sdev,
+ "Recovery -- Status:%d In_Progress:%d -- cm_if_state:%d\n",
+ sdev->recovery_status, recovery_in_progress, cm_if_state);
+
+ if (cm_if_state == SCSC_WIFI_CM_IF_STATE_BLOCKED) {
+ __slsi_sm_wlan_service_stop_wait_locked(sdev);
+
+ /* If the wait hasn't timed out, the recovery remove completion
+ * will have completed properly and the cm_if_state will be
+ * set to stopped here. If the probe hasn't fired for some reason
+ * try and do a service_stop regardless, since that's all we can
+ * do in this situation; hence skip the state check.
+ */
+ goto skip_state_check;
+ }
+
+ if (cm_if_state != SCSC_WIFI_CM_IF_STATE_STARTED &&
+ cm_if_state != SCSC_WIFI_CM_IF_STATE_REMOVED &&
+ cm_if_state != SCSC_WIFI_CM_IF_STATE_PROBED) {
+ SLSI_INFO(sdev, "Service not started or incorrect state %d\n",
+ cm_if_state);
+ goto exit;
+ }
+
+ /**
+ * Note that the SCSC_WIFI_CM_IF_STATE_STOPPING state will inhibit
+ * auto-recovery mechanism, so be careful not to abuse it: as an
+ * example if panic happens on start or stop we don't want to
+ * un-necessarily pass by STOPPING in order to have a successful
+ * recovery in such a situation.
+ */
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPING);
+skip_state_check:
+ SLSI_INFO_NODEV("Stopping WLAN service\n");
+ err = scsc_mx_service_stop(sdev->service);
+ if (err == -EILSEQ) {
+ /* scsc_mx_service_stop failed since there's a recovery in
+ * progress, so just wait for it to complete and try again.
+ */
+ SLSI_INFO(sdev, "scsc_mx_service_stop failed err: %d\n", err);
+ __slsi_sm_wlan_service_stop_wait_locked(sdev);
+ goto skip_state_check;
+ } else if (err == -EIO) {
+ char reason[80];
+
+ SLSI_INFO(sdev, "scsc_mx_service_stop failed err: %d\n", err);
+
+ /* scsc_mx_service_stop since there was no respons from firmware
+ * to the stop request. Generate a host initiated panic to reset
+ * the chip and wait for it to complete.
+ */
+ sdev->recovery_next_state = SCSC_WIFI_CM_IF_STATE_STOPPING;
+ snprintf(reason, sizeof(reason), "WLAN scsc_mx_service_stop failed");
+
+ mutex_unlock(&slsi_start_mutex);
+ slsi_sm_service_failed(sdev, reason);
+ mutex_lock(&slsi_start_mutex);
+
+ __slsi_sm_wlan_service_stop_wait_locked(sdev);
+ } else if (err == -EPERM) {
+ /* Special case when recovery is disabled, otherwise the driver
+ * will wait forever for recovery that never comes
+ */
+ SLSI_INFO(sdev, "refused due to previous failure, recovery is disabled: %d\n", err);
+ } else if (err != 0) {
+ SLSI_INFO(sdev, "scsc_mx_service_stop failed, unknown err: %d\n", err);
+ }
+
+ atomic_set(&sdev->cm_if.cm_if_state, SCSC_WIFI_CM_IF_STATE_STOPPED);
+exit:
+ mutex_unlock(&slsi_start_mutex);
+}
+
+#define SLSI_SM_WLAN_SERVICE_CLOSE_RETRY 60
+void slsi_sm_wlan_service_close(struct slsi_dev *sdev)
+{
+ int cm_if_state, r;
+
+ mutex_lock(&slsi_start_mutex);
+ cm_if_state = atomic_read(&sdev->cm_if.cm_if_state);
+ if (cm_if_state != SCSC_WIFI_CM_IF_STATE_STOPPED) {
+ SLSI_INFO(sdev, "Service not stopped\n");
+ goto exit;
+ }
+
+ SLSI_INFO_NODEV("Closing WLAN service\n");
+ scsc_mx_service_mifram_free(sdev->service, sdev->hip4_inst.hip_ref);
+ r = scsc_mx_service_close(sdev->service);
+ if (r == -EIO) {
+ int retry_counter;
+
+ /**
+ * Error handling in progress - try and close again later.
+ * The service close call shall remain blocked until close
+ * service is successful. Try up to 30 seconds.
+ */
+ for (retry_counter = 0;
+ SLSI_SM_WLAN_SERVICE_CLOSE_RETRY > retry_counter;
+ retry_counter++) {
+ msleep(500);
+ r = scsc_mx_service_close(sdev->service);
+ if (r == 0) {
+ SLSI_INFO(sdev, "scsc_mx_service_close closed after %d attempts\n",
+ retry_counter + 1);
+ break;
+ }
+ }
+
+ if (retry_counter + 1 == SLSI_SM_WLAN_SERVICE_CLOSE_RETRY)
+ SLSI_ERR(sdev, "scsc_mx_service_close failed %d times\n",
+ SLSI_SM_WLAN_SERVICE_CLOSE_RETRY);
+ } else if (r == -EPERM) {
+ SLSI_ERR(sdev, "scsc_mx_service_close - recovery is disabled (%d)\n", r);
+ }
+
+ if (recovery_in_progress)
+ complete_all(&sdev->recovery_stop_completion);
+exit:
+ mutex_unlock(&slsi_start_mutex);
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef _SLSI_CONST_H__
+#define _SLSI_CONST_H__
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ieee80211.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Max number of peers */
+#define SLSI_TDLS_PEER_CONNECTIONS_MAX 4
+#define SLSI_AP_PEER_CONNECTIONS_MAX 10
+/* When SLSI_ADHOC_PEER_CONNECTIONS_MAX is increased to 32,
+ * method of intiliazing peer_sta_record[] should be carefully changed
+ * from the present way of static allocation to a dynamic allocation or
+ * a VIF specific initialization or both.
+ */
+#define SLSI_ADHOC_PEER_CONNECTIONS_MAX 16
+
+/* Max number of indexes */
+#define SLSI_TDLS_PEER_INDEX_MIN 2
+#define SLSI_TDLS_PEER_INDEX_MAX 15
+#define SLSI_PEER_INDEX_MIN 1
+#define SLSI_PEER_INDEX_MAX 16
+
+#define SLSI_STA_PEER_QUEUESET 0
+
+#define SLSI_BA_TRAFFIC_STREAM_MAX 8
+#define SLSI_BA_BUFFER_SIZE_MAX 64
+
+/* Until LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 1) the IE Value for
+ * HT OPERATION was incorrectly defined as HT INFORMATION
+ */
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
+#ifndef WLAN_EID_HT_INFORMATION
+#define WLAN_EID_HT_INFORMATION 61
+#endif
+
+#define WLAN_EID_HT_OPERATION WLAN_EID_HT_INFORMATION
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SLSI_CONST_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/sysfs.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+
+#include "debug.h"
+#include "procfs.h"
+#include "utils.h"
+
+#ifndef CONFIG_SCSC_DEBUG_COMPATIBILITY
+const int SLSI_INIT_DEINIT;
+const int SLSI_NETDEV = 1;
+const int SLSI_CFG80211 = 2;
+const int SLSI_MLME = 3;
+const int SLSI_SUMMARY_FRAMES = 4;
+const int SLSI_HYDRA = 5;
+const int SLSI_TX = 6;
+const int SLSI_RX = 7;
+const int SLSI_UDI = 8;
+
+const int SLSI_WIFI_FCQ = 9;
+
+const int SLSI_HIP = 10;
+const int SLSI_HIP_INIT_DEINIT = 11;
+const int SLSI_HIP_FW_DL = 12;
+const int SLSI_HIP_SDIO_OP = 13;
+const int SLSI_HIP_PS = 14;
+const int SLSI_HIP_TH = 15;
+const int SLSI_HIP_FH = 16;
+const int SLSI_HIP_SIG = 17;
+
+const int SLSI_FUNC_TRACE = 18;
+const int SLSI_TEST = 19; /* Unit test logging */
+const int SLSI_SRC_SINK = 20;
+const int SLSI_FW_TEST = 21;
+const int SLSI_RX_BA = 22;
+
+const int SLSI_TDLS = 23;
+const int SLSI_GSCAN = 24;
+const int SLSI_MBULK = 25;
+const int SLSI_FLOWC = 26;
+const int SLSI_SMAPPER = 27;
+#endif
+
+static int slsi_dbg_set_param_cb(const char *val, const struct kernel_param *kp);
+static int slsi_dbg_get_param_cb(char *buffer, const struct kernel_param *kp);
+
+static struct kernel_param_ops param_ops_log = {
+ .set = slsi_dbg_set_param_cb,
+ .get = slsi_dbg_get_param_cb,
+};
+
+#define ADD_DEBUG_MODULE_PARAM(name, default_level, filter) \
+ int slsi_dbg_lvl_ ## name = default_level; \
+ module_param_cb(slsi_dbg_lvl_ ## name, ¶m_ops_log, (void *)&filter, S_IRUGO | S_IWUSR); \
+ MODULE_PARM_DESC(slsi_dbg_lvl_ ## name, " Debug levels (0~4) for the " # name " module (0 = off) default=" # default_level)
+
+#ifndef CONFIG_SCSC_DEBUG_COMPATIBILITY
+/* Name, Default, Filter */
+ADD_DEBUG_MODULE_PARAM(init_deinit, 3, SLSI_INIT_DEINIT);
+ADD_DEBUG_MODULE_PARAM(netdev, 2, SLSI_NETDEV);
+ADD_DEBUG_MODULE_PARAM(cfg80211, 1, SLSI_CFG80211);
+ADD_DEBUG_MODULE_PARAM(mlme, 2, SLSI_MLME);
+ADD_DEBUG_MODULE_PARAM(summary_frames, 0, SLSI_SUMMARY_FRAMES);
+ADD_DEBUG_MODULE_PARAM(hydra, 0, SLSI_HYDRA);
+ADD_DEBUG_MODULE_PARAM(tx, 0, SLSI_TX);
+ADD_DEBUG_MODULE_PARAM(rx, 0, SLSI_RX);
+ADD_DEBUG_MODULE_PARAM(udi, 2, SLSI_UDI);
+
+ADD_DEBUG_MODULE_PARAM(wifi_fcq, 0, SLSI_WIFI_FCQ);
+
+ADD_DEBUG_MODULE_PARAM(hip, 0, SLSI_HIP);
+ADD_DEBUG_MODULE_PARAM(hip_init_deinit, 0, SLSI_HIP_INIT_DEINIT);
+ADD_DEBUG_MODULE_PARAM(hip_fw_dl, 0, SLSI_HIP_FW_DL);
+ADD_DEBUG_MODULE_PARAM(hip_sdio_op, 0, SLSI_HIP_SDIO_OP);
+ADD_DEBUG_MODULE_PARAM(hip_ps, 0, SLSI_HIP_PS);
+ADD_DEBUG_MODULE_PARAM(hip_th, 0, SLSI_HIP_TH);
+ADD_DEBUG_MODULE_PARAM(hip_fh, 0, SLSI_HIP_FH);
+ADD_DEBUG_MODULE_PARAM(hip_sig, 0, SLSI_HIP_SIG);
+
+ADD_DEBUG_MODULE_PARAM(func_trace, 0, SLSI_FUNC_TRACE);
+ADD_DEBUG_MODULE_PARAM(test, 0, SLSI_TEST);
+ADD_DEBUG_MODULE_PARAM(src_sink, 0, SLSI_SRC_SINK);
+ADD_DEBUG_MODULE_PARAM(fw_test, 0, SLSI_FW_TEST);
+ADD_DEBUG_MODULE_PARAM(rx_ba, 0, SLSI_RX_BA);
+
+ADD_DEBUG_MODULE_PARAM(tdls, 2, SLSI_TDLS);
+ADD_DEBUG_MODULE_PARAM(gscan, 3, SLSI_GSCAN);
+ADD_DEBUG_MODULE_PARAM(mbulk, 0, SLSI_MBULK);
+ADD_DEBUG_MODULE_PARAM(flowc, 0, SLSI_FLOWC);
+ADD_DEBUG_MODULE_PARAM(smapper, 0, SLSI_SMAPPER);
+
+int slsi_dbg_lvl_all; /* Override all debug modules */
+
+int *slsi_dbg_filters[] = {
+ &slsi_dbg_lvl_init_deinit,
+ &slsi_dbg_lvl_netdev,
+ &slsi_dbg_lvl_cfg80211,
+ &slsi_dbg_lvl_mlme,
+ &slsi_dbg_lvl_summary_frames,
+ &slsi_dbg_lvl_hydra,
+ &slsi_dbg_lvl_tx,
+ &slsi_dbg_lvl_rx,
+ &slsi_dbg_lvl_udi,
+
+ &slsi_dbg_lvl_wifi_fcq,
+
+ &slsi_dbg_lvl_hip,
+ &slsi_dbg_lvl_hip_init_deinit,
+ &slsi_dbg_lvl_hip_fw_dl,
+ &slsi_dbg_lvl_hip_sdio_op,
+ &slsi_dbg_lvl_hip_ps,
+ &slsi_dbg_lvl_hip_th,
+ &slsi_dbg_lvl_hip_fh,
+ &slsi_dbg_lvl_hip_sig,
+
+ &slsi_dbg_lvl_func_trace,
+ &slsi_dbg_lvl_test,
+ &slsi_dbg_lvl_src_sink,
+ &slsi_dbg_lvl_fw_test,
+ &slsi_dbg_lvl_rx_ba,
+
+ &slsi_dbg_lvl_tdls,
+ &slsi_dbg_lvl_gscan,
+ &slsi_dbg_lvl_mbulk,
+ &slsi_dbg_lvl_flowc,
+ &slsi_dbg_lvl_smapper,
+};
+#else
+int slsi_dbg_lvl_compat_all;
+module_param(slsi_dbg_lvl_compat_all, int, S_IRUGO | S_IWUSR);
+
+int *slsi_dbg_filters[] = {
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+ &slsi_dbg_lvl_compat_all,
+};
+
+int slsi_dbg_lvl_all; /* Override all debug modules */
+#endif
+
+const int SLSI_DF_MAX = (sizeof(slsi_dbg_filters) / sizeof(slsi_dbg_filters[0]));
+
+const int SLSI_OVERRIDE_ALL_FILTER = -1; /* This is not a log module but merely a filter option */
+
+/* Convert a string containing a decimal value to an integer */
+static int slsi_decstr_to_int(const char *dec_str, int *res)
+{
+ int tmp_res = 0;
+ int sign = 0;
+ const char *tmp_char = dec_str;
+
+ sign = (*tmp_char == '-') ? -1 : ((*tmp_char == '+') ? 1 : 0);
+ if (sign != 0)
+ tmp_char++;
+
+ while (*tmp_char) {
+ if (*tmp_char == '\n')
+ break;
+ if ((*tmp_char < '0') || (*tmp_char > '9'))
+ return -1;
+ tmp_res = tmp_res * 10 + (*tmp_char - '0');
+ tmp_char++;
+ }
+
+ *res = (sign < 0) ? (-tmp_res) : tmp_res;
+ return 0;
+}
+
+static int slsi_dbg_set_param_cb(const char *val, const struct kernel_param *kp)
+{
+ int new_val;
+ int filter;
+
+ if (slsi_decstr_to_int(val, &new_val) < 0) {
+ pr_info("%s: failed to convert %s to int\n", __func__, val);
+ return -1;
+ }
+ filter = *((int *)(kp->arg));
+
+ if (filter < -1 || filter >= SLSI_DF_MAX) {
+ pr_info("%s: filter %d out of range\n", __func__, filter);
+ return -1;
+ }
+
+ if (filter == SLSI_OVERRIDE_ALL_FILTER) {
+ if (new_val == -1) {
+ pr_info("Override does not take effect because slsi_dbg_lvl_all=%d\n", new_val);
+ } else {
+ int i;
+
+ pr_info("Setting all debug modules to level %d\n", new_val);
+ for (i = 0; i < SLSI_DF_MAX; i++)
+ *slsi_dbg_filters[i] = new_val;
+
+ slsi_dbg_lvl_all = new_val;
+ }
+ } else {
+ pr_info("Setting debug module %d to level %d\n", filter, new_val);
+ *slsi_dbg_filters[filter] = new_val;
+ }
+
+ return 0;
+}
+
+static int slsi_dbg_get_param_cb(char *buffer, const struct kernel_param *kp)
+{
+#define KERN_PARAM_OPS_MAX_BUF_SIZE (4 * 1024)
+ int filter;
+ int val = 0;
+
+ filter = *((int *)(kp->arg));
+
+ if (filter == SLSI_OVERRIDE_ALL_FILTER)
+ val = slsi_dbg_lvl_all;
+ else if (filter < 0 || filter >= SLSI_DF_MAX)
+ pr_info("%s: filter %d out of range\n", __func__, filter);
+ else
+ val = *slsi_dbg_filters[filter];
+
+ return snprintf(buffer, KERN_PARAM_OPS_MAX_BUF_SIZE, "%i", val);
+}
+
+module_param_cb(slsi_dbg_lvl_all, ¶m_ops_log, (void *)&SLSI_OVERRIDE_ALL_FILTER, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(slsi_dbg_lvl_all, "Override debug level (0~4) for all the log modules (-1 = do not override) default=0");
+
+#ifdef CONFIG_SCSC_WLAN_SKB_TRACKING
+struct slsi_skb_tracker {
+ spinlock_t lock;
+ struct list_head tracked;
+ u32 tracked_count;
+ u32 tracked_count_max;
+};
+
+struct slsi_tracked_skb {
+ struct list_head entry;
+ const char *file;
+ int line;
+ struct sk_buff *skb;
+};
+
+static struct slsi_skb_tracker skb_tracker;
+
+void slsi_dbg_track_skb_init(void)
+{
+ SLSI_DBG4_NODEV(SLSI_TEST, "\n");
+ memset(&skb_tracker, 0x00, sizeof(skb_tracker));
+ spin_lock_init(&skb_tracker.lock);
+ INIT_LIST_HEAD(&skb_tracker.tracked);
+}
+
+void slsi_dbg_track_skb_reset(void)
+{
+ SLSI_DBG4_NODEV(SLSI_TEST, "\n");
+ skb_tracker.tracked_count = 0;
+ skb_tracker.tracked_count_max = 0;
+}
+
+bool slsi_dbg_track_skb_marker_f(struct sk_buff *skb, const char *file, int line)
+{
+ struct slsi_tracked_skb *t;
+ struct list_head *pos, *q;
+ bool r = true;
+ unsigned long flags;
+
+ if (!skb)
+ return r;
+
+ spin_lock_irqsave(&skb_tracker.lock, flags);
+ list_for_each_safe(pos, q, &skb_tracker.tracked) {
+ t = list_entry(pos, struct slsi_tracked_skb, entry);
+ if (t->skb == skb) {
+ SLSI_DBG4_NODEV(SLSI_TEST, "Marker 0x%p: %s:%d\n", skb, file, line);
+ t->file = file;
+ t->line = line;
+ goto exit;
+ }
+ }
+ WARN_ON(1);
+ SLSI_ERR_NODEV("SKB Not Tracked: %p: %s:%d\n", skb, file, line);
+ r = false;
+exit:
+ spin_unlock_irqrestore(&skb_tracker.lock, flags);
+ return r;
+}
+
+void slsi_dbg_track_skb_f(struct sk_buff *skb, gfp_t flags, const char *file, int line)
+{
+ struct slsi_tracked_skb *t = kmalloc(sizeof(*t), flags);
+ unsigned long flags_irq;
+
+ if (!t)
+ return;
+
+ t->file = file;
+ t->line = line;
+ t->skb = skb_get(skb); /* Add a reference to the skb */
+ SLSI_DBG4_NODEV(SLSI_TEST, "track SKB: 0x%p: %s:%d\n", skb, file, line);
+ spin_lock_irqsave(&skb_tracker.lock, flags_irq);
+ list_add(&t->entry, &skb_tracker.tracked);
+ skb_tracker.tracked_count++;
+ if (skb_tracker.tracked_count > skb_tracker.tracked_count_max)
+ skb_tracker.tracked_count_max = skb_tracker.tracked_count;
+ spin_unlock_irqrestore(&skb_tracker.lock, flags_irq);
+}
+
+bool slsi_dbg_untrack_skb_f(struct sk_buff *skb, const char *file, int line)
+{
+ struct slsi_tracked_skb *t;
+ struct list_head *pos, *q;
+ bool r = true;
+ unsigned long flags;
+
+ if (!skb)
+ return r;
+
+ spin_lock_irqsave(&skb_tracker.lock, flags);
+ list_for_each_safe(pos, q, &skb_tracker.tracked) {
+ t = list_entry(pos, struct slsi_tracked_skb, entry);
+ if (t->skb == skb) {
+ SLSI_DBG4_NODEV(SLSI_TEST, "un-track SKB: 0x%p: %s:%d\n", skb, file, line);
+ list_del(pos);
+ kfree_skb(t->skb); /* Free the reference we took */
+ kfree(t);
+ skb_tracker.tracked_count--;
+ goto exit;
+ }
+ }
+ WARN_ON(1);
+ SLSI_ERR_NODEV("SKB Not Tracked: %p: %s:%d", skb, file, line);
+ r = false;
+exit:
+ spin_unlock_irqrestore(&skb_tracker.lock, flags);
+ return r;
+}
+
+void slsi_dbg_track_skb_report(void)
+{
+ struct slsi_tracked_skb *t;
+ struct list_head *pos, *q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skb_tracker.lock, flags);
+ SLSI_INFO_NODEV("Tracked Count Current: %d\n", skb_tracker.tracked_count);
+ SLSI_INFO_NODEV("Tracked Count Max : %d\n", skb_tracker.tracked_count_max);
+ list_for_each_safe(pos, q, &skb_tracker.tracked) {
+ t = list_entry(pos, struct slsi_tracked_skb, entry);
+ if (skb_shared(t->skb))
+ SLSI_ERR_NODEV("SKB Leak: 0x%p: %s:%d\n", t->skb, t->file, t->line);
+ else
+ SLSI_ERR_NODEV("SKB Not Untracked: 0x%p: %s:%d\n", t->skb, t->file, t->line);
+ list_del(pos);
+ kfree_skb(t->skb); /* Free the reference we took */
+ kfree(t);
+ skb_tracker.tracked_count--;
+ }
+ INIT_LIST_HEAD(&skb_tracker.tracked);
+ spin_unlock_irqrestore(&skb_tracker.lock, flags);
+}
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/kernel.h>
+#include "dev.h"
+#include <scsc/scsc_logring.h>
+
+/* Logging modules
+ * =======================
+ */
+
+#ifndef CONFIG_SCSC_DEBUG_COMPATIBILITY
+extern const int SLSI_INIT_DEINIT;
+extern const int SLSI_NETDEV;
+extern const int SLSI_CFG80211;
+extern const int SLSI_MLME;
+extern const int SLSI_SUMMARY_FRAMES;
+extern const int SLSI_HYDRA;
+extern const int SLSI_TX;
+extern const int SLSI_RX;
+extern const int SLSI_UDI;
+
+extern const int SLSI_WIFI_FCQ;
+
+extern const int SLSI_HIP;
+extern const int SLSI_HIP_INIT_DEINIT;
+extern const int SLSI_HIP_FW_DL;
+extern const int SLSI_HIP_SDIO_OP;
+extern const int SLSI_HIP_PS;
+extern const int SLSI_HIP_TH;
+extern const int SLSI_HIP_FH;
+extern const int SLSI_HIP_SIG;
+
+extern const int SLSI_FUNC_TRACE;
+extern const int SLSI_TEST;
+extern const int SLSI_SRC_SINK;
+extern const int SLSI_FW_TEST;
+extern const int SLSI_RX_BA;
+
+extern const int SLSI_TDLS;
+extern const int SLSI_GSCAN;
+extern const int SLSI_MBULK;
+extern const int SLSI_FLOWC;
+extern const int SLSI_SMAPPER;
+#endif /* CONFIG_SCSC_DEBUG_COMPATIBILITY */
+
+extern int *slsi_dbg_filters[];
+
+#ifndef pr_warn
+#define pr_warn pr_warning
+#endif
+
+/*---------------------------*/
+
+/**
+ * debug logging functions
+ * =======================
+ */
+
+#define SLSI_EWI_NODEV_LABEL "ieee80211 phy.: "
+#define SLSI_EWI_DEV(sdev) (likely((sdev) && ((sdev)->wiphy)) ? &((sdev)->wiphy->dev) : NULL)
+#define SLSI_EWI_NET_DEV(ndev) (likely(ndev) ? SLSI_EWI_DEV(((struct netdev_vif *)netdev_priv(ndev))->sdev) : NULL)
+#define SLSI_EWI_NET_NAME(ndev) (likely(ndev) ? netdev_name(ndev) : NULL)
+
+#define SLSI_EWI(output, sdev, label, fmt, arg ...) output(SLSI_EWI_DEV(sdev), SCSC_PREFIX label ": %s: " fmt, __func__, ## arg)
+#define SLSI_EWI_NET(output, ndev, label, fmt, arg ...) output(SLSI_EWI_NET_DEV(ndev), SCSC_PREFIX "%s: " label ": %s: " fmt, SLSI_EWI_NET_NAME(ndev), __func__, ## arg)
+#define SLSI_EWI_NODEV(output, label, fmt, arg ...) output(SLSI_EWI_NODEV_LABEL SCSC_PREFIX label ": %s: " fmt, __func__, ## arg)
+
+#define SLSI_EWI_HEX(output, klevel, sdev, label, p, len, fmt, arg ...) \
+ do { \
+ SLSI_EWI(output, sdev, label, fmt, ## arg); \
+ print_hex_dump(klevel, SCSC_PREFIX, DUMP_PREFIX_OFFSET, 16, 1, p, len, 0); \
+ } while (0)
+
+#define SLSI_EWI_HEX_NET(output, klevel, dev, label, p, len, fmt, arg ...) \
+ do { \
+ SLSI_EWI_NET(output, dev, label, fmt, ## arg); \
+ print_hex_dump(klevel, SCSC_PREFIX, DUMP_PREFIX_OFFSET, 16, 1, p, len, 0); \
+ } while (0)
+
+#define SLSI_EWI_HEX_NODEV(output, klevel, label, p, len, fmt, arg ...) \
+ do { \
+ SLSI_EWI_NODEV(output, label, fmt, ## arg); \
+ print_hex_dump(klevel, SCSC_PREFIX, DUMP_PREFIX_OFFSET, 16, 1, p, len, 0); \
+ } while (0)
+
+#define SLSI_ERR(sdev, fmt, arg ...) SLSI_EWI(dev_err, sdev, "E", fmt, ## arg)
+#define SLSI_WARN(sdev, fmt, arg ...) SLSI_EWI(dev_warn, sdev, "W", fmt, ## arg)
+#define SLSI_INFO(sdev, fmt, arg ...) SLSI_EWI(dev_info, sdev, "I", fmt, ## arg)
+
+#define SLSI_NET_ERR(ndev, fmt, arg ...) SLSI_EWI_NET(dev_err, ndev, "E", fmt, ## arg)
+#define SLSI_NET_WARN(ndev, fmt, arg ...) SLSI_EWI_NET(dev_warn, ndev, "W", fmt, ## arg)
+#define SLSI_NET_INFO(ndev, fmt, arg ...) SLSI_EWI_NET(dev_info, ndev, "I", fmt, ## arg)
+
+#define SLSI_ERR_NODEV(fmt, arg ...) SLSI_EWI_NODEV(pr_err, "E", fmt, ## arg)
+#define SLSI_WARN_NODEV(fmt, arg ...) SLSI_EWI_NODEV(pr_warn, "W", fmt, ## arg)
+#define SLSI_INFO_NODEV(fmt, arg ...) SLSI_EWI_NODEV(pr_info, "I", fmt, ## arg)
+
+#define SLSI_ERR_HEX(sdev, p, len, fmt, arg ...) SLSI_EWI_HEX(dev_err, KERN_ERR, sdev, "E", p, len, fmt, ## arg)
+#define SLSI_WARN_HEX(sdev, p, len, fmt, arg ...) SLSI_EWI_HEX(dev_warn, KERN_WARN, sdev, "W", p, len, fmt, ## arg)
+#define SLSI_INFO_HEX(sdev, p, len, fmt, arg ...) SLSI_EWI_HEX(dev_info, KERN_INFO, sdev, "I", p, len, fmt, ## arg)
+
+#define SLSI_ERR_HEX_NODEV(p, len, fmt, arg ...) SLSI_EWI_HEX_NODEV(pr_err, KERN_ERR, "E", p, len, fmt, ## arg)
+#define SLSI_WARN_HEX_NODEV(p, len, fmt, arg ...) SLSI_EWI_HEX_NODEV(pr_warn, KERN_WARN, "W", p, len, fmt, ## arg)
+#define SLSI_INFO_HEX_NODEV(p, len, fmt, arg ...) SLSI_EWI_HEX_NODEV(pr_info, KERN_INFO, "I", p, len, fmt, ## arg)
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+
+#define SLSI_DBG(sdev, filter, dbg_lvl, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI(dev_info, sdev, # dbg_lvl, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG_NET(ndev, filter, dbg_lvl, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI_NET(dev_info, ndev, # dbg_lvl, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG_NODEV(filter, dbg_lvl, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI_NODEV(pr_info, # dbg_lvl, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG1(sdev, filter, fmt, arg ...) SLSI_DBG(sdev, filter, 1, fmt, ## arg)
+#define SLSI_DBG2(sdev, filter, fmt, arg ...) SLSI_DBG(sdev, filter, 2, fmt, ## arg)
+#define SLSI_DBG3(sdev, filter, fmt, arg ...) SLSI_DBG(sdev, filter, 3, fmt, ## arg)
+#define SLSI_DBG4(sdev, filter, fmt, arg ...) SLSI_DBG(sdev, filter, 4, fmt, ## arg)
+
+#define SLSI_NET_DBG1(ndev, filter, fmt, arg ...) SLSI_DBG_NET(ndev, filter, 1, fmt, ## arg)
+#define SLSI_NET_DBG2(ndev, filter, fmt, arg ...) SLSI_DBG_NET(ndev, filter, 2, fmt, ## arg)
+#define SLSI_NET_DBG3(ndev, filter, fmt, arg ...) SLSI_DBG_NET(ndev, filter, 3, fmt, ## arg)
+#define SLSI_NET_DBG4(ndev, filter, fmt, arg ...) SLSI_DBG_NET(ndev, filter, 4, fmt, ## arg)
+
+#define SLSI_DBG1_NODEV(filter, fmt, arg ...) SLSI_DBG_NODEV(filter, 1, fmt, ## arg)
+#define SLSI_DBG2_NODEV(filter, fmt, arg ...) SLSI_DBG_NODEV(filter, 2, fmt, ## arg)
+#define SLSI_DBG3_NODEV(filter, fmt, arg ...) SLSI_DBG_NODEV(filter, 3, fmt, ## arg)
+#define SLSI_DBG4_NODEV(filter, fmt, arg ...) SLSI_DBG_NODEV(filter, 4, fmt, ## arg)
+
+/* Prints LOG_ENTRY if the condition evaluates to TRUE otherwise nothing is printed. */
+#define LOG_CONDITIONALLY(condition, LOG_ENTRY) \
+ do { \
+ if (unlikely(condition)) \
+ LOG_ENTRY; \
+ } while (0)
+
+/* Returns TRUE if the flag is set otherwise returns FALSE. */
+#define LOG_BOOL_FLAG(flag) \
+ (flag) ? "TRUE" : "FALSE"
+
+#define SLSI_DBG_HEX_OUT(sdev, filter, dbg_lvl, p, len, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI_HEX(dev_info, KERN_INFO, sdev, # dbg_lvl, p, len, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG_HEX_OUT_NET(sdev, filter, dbg_lvl, p, len, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI_HEX_NET(dev_info, KERN_INFO, dev, # dbg_lvl, p, len, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG_HEX_OUT_NODEV(filter, dbg_lvl, p, len, fmt, arg ...) \
+ do { \
+ if (unlikely((dbg_lvl) <= *slsi_dbg_filters[filter])) { \
+ SLSI_EWI_HEX_NODEV(pr_info, KERN_INFO, # dbg_lvl, p, len, fmt, ## arg); \
+ } \
+ } while (0)
+
+#define SLSI_DBG_HEX(sdev, filter, p, len, fmt, arg ...) SLSI_DBG_HEX_OUT(sdev, filter, 4, p, len, fmt, ## arg)
+#define SLSI_NET_DBG_HEX(dev, filter, p, len, fmt, arg ...) SLSI_DBG_HEX_OUT_NET(dev, filter, 4, p, len, fmt, ## arg)
+#define SLSI_DBG_HEX_NODEV(filter, p, len, fmt, arg ...) SLSI_DBG_HEX_OUT_NODEV(filter, 4, p, len, fmt, ## arg)
+
+#define FUNC_ENTER(sdev) SLSI_DBG4(sdev, SLSI_FUNC_TRACE, "--->\n")
+#define FUNC_EXIT(sdev) SLSI_DBG4(sdev, SLSI_FUNC_TRACE, "<---\n")
+
+#define FUNC_ENTER_NODEV() SLSI_DBG4_NODEV(SLSI_FUNC_TRACE, "--->\n")
+#define FUNC_EXIT_NODEV() SLSI_DBG4_NODEV(SLSI_FUNC_TRACE, "<---\n")
+
+void slsi_debug_frame(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, const char *prefix);
+#else /* CONFIG_SCSC_WLAN_DEBUG */
+
+#define SLSI_DBG1(sdev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG2(sdev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG3(sdev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG4(sdev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_NET_DBG1(dev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_NET_DBG2(dev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_NET_DBG3(dev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_NET_DBG4(dev, filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG1_NODEV(filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG2_NODEV(filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG3_NODEV(filter, fmt, arg ...) do {} while (0)
+#define SLSI_DBG4_NODEV(filter, fmt, arg ...) do {} while (0)
+
+#define LOG_CONDITIONALLY(condition, LOG_ENTRY) do {} while (0)
+#define LOG_BOOL_FLAG(flag) do {} while (0)
+
+#define SLSI_DBG_HEX(sdev, filter, p, len, fmt, arg ...) do {} while (0)
+#define SLSI_NET_DBG_HEX(dev, filter, p, len, fmt, arg ...) do {} while (0)
+#define SLSI_DBG_HEX_NODEV(filter, p, len, fmt, arg ...) do {} while (0)
+
+#define FUNC_ENTER(sdev)
+#define FUNC_EXIT(sdev)
+
+#define FUNC_ENTER_NODEV()
+#define FUNC_EXIT_NODEV()
+
+static inline void slsi_debug_frame(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, const char *prefix)
+{
+ (void)sdev; /* unused */
+ (void)dev; /* unused */
+ (void)skb; /* unused */
+ (void)prefix; /* unused */
+}
+
+#endif /* CONFIG_SCSC_WLAN_DEBUG */
+
+#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
+
+#undef SLSI_ERR
+#undef SLSI_WARN
+#undef SLSI_INFO
+#define SLSI_ERR(sdev, fmt, arg ...) SCSC_ERR_SDEV(sdev, fmt, ## arg)
+#define SLSI_WARN(sdev, fmt, arg ...) SCSC_WARNING_SDEV(sdev, fmt, ## arg)
+#define SLSI_INFO(sdev, fmt, arg ...) SCSC_INFO_SDEV(sdev, fmt, ## arg)
+
+#undef SLSI_NET_ERR
+#undef SLSI_NET_WARN
+#undef SLSI_NET_INFO
+#define SLSI_NET_ERR(ndev, fmt, arg ...) SCSC_ERR_NDEV(ndev, fmt, ## arg)
+#define SLSI_NET_WARN(ndev, fmt, arg ...) SCSC_WARNING_NDEV(ndev, fmt, ## arg)
+#define SLSI_NET_INFO(ndev, fmt, arg ...) SCSC_INFO_NDEV(ndev, fmt, ## arg)
+
+#undef SLSI_ERR_NODEV
+#undef SLSI_WARN_NODEV
+#undef SLSI_INFO_NODEV
+#define SLSI_ERR_NODEV(fmt, arg ...) SCSC_ERR(fmt, ## arg)
+#define SLSI_WARN_NODEV(fmt, arg ...) SCSC_WARNING(fmt, ## arg)
+#define SLSI_INFO_NODEV(fmt, arg ...) SCSC_INFO(fmt, ## arg)
+
+#undef SLSI_DBG1
+#undef SLSI_DBG2
+#undef SLSI_DBG3
+#undef SLSI_DBG4
+#define SLSI_DBG1(sdev, filter, fmt, arg ...) SCSC_TAG_DBG1_SDEV(sdev, filter, fmt, ## arg)
+#define SLSI_DBG2(sdev, filter, fmt, arg ...) SCSC_TAG_DBG2_SDEV(sdev, filter, fmt, ## arg)
+#define SLSI_DBG3(sdev, filter, fmt, arg ...) SCSC_TAG_DBG3_SDEV(sdev, filter, fmt, ## arg)
+#define SLSI_DBG4(sdev, filter, fmt, arg ...) SCSC_TAG_DBG4_SDEV(sdev, filter, fmt, ## arg)
+
+#undef SLSI_NET_DBG1
+#undef SLSI_NET_DBG2
+#undef SLSI_NET_DBG3
+#undef SLSI_NET_DBG4
+#define SLSI_NET_DBG1(ndev, filter, fmt, arg ...) SCSC_TAG_DBG1_NDEV(ndev, filter, fmt, ## arg)
+#define SLSI_NET_DBG2(ndev, filter, fmt, arg ...) SCSC_TAG_DBG2_NDEV(ndev, filter, fmt, ## arg)
+#define SLSI_NET_DBG3(ndev, filter, fmt, arg ...) SCSC_TAG_DBG3_NDEV(ndev, filter, fmt, ## arg)
+#define SLSI_NET_DBG4(ndev, filter, fmt, arg ...) SCSC_TAG_DBG4_NDEV(ndev, filter, fmt, ## arg)
+
+#undef SLSI_DBG1_NODEV
+#undef SLSI_DBG2_NODEV
+#undef SLSI_DBG3_NODEV
+#undef SLSI_DBG4_NODEV
+#define SLSI_DBG1_NODEV(filter, fmt, arg ...) SCSC_TAG_DBG1(filter, fmt, ## arg)
+#define SLSI_DBG2_NODEV(filter, fmt, arg ...) SCSC_TAG_DBG2(filter, fmt, ## arg)
+#define SLSI_DBG3_NODEV(filter, fmt, arg ...) SCSC_TAG_DBG3(filter, fmt, ## arg)
+#define SLSI_DBG4_NODEV(filter, fmt, arg ...) SCSC_TAG_DBG4(filter, fmt, ## arg)
+
+#endif /* CONFIG_SCSC_DEBUG_COMPATIBILITY */
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/ieee80211.h>
+#include <linux/ratelimit.h>
+
+#include "debug.h"
+#include "fapi.h"
+#include "const.h"
+#include "mgt.h"
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+
+/* frame decoding debug level */
+static int slsi_debug_summary_frame = 3;
+module_param(slsi_debug_summary_frame, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(slsi_debug_summary_frame, "Debug level (0: disable, 1: mgmt only (no scan), 2: mgmt and imp frames, 3: all");
+
+struct slsi_decode_entry {
+ const char *name;
+ void (*decode_fn)(u8 *frame, u16 frame_length, char *result, size_t result_length);
+};
+
+struct slsi_decode_snap {
+ const u8 snap[8];
+ const char *name;
+ size_t (*decode_fn)(u8 *frame, u16 frame_length, char *result, size_t result_length);
+};
+
+struct slsi_value_name_decode {
+ const u16 value;
+ const char *name;
+ size_t (*decode_fn)(u8 *frame, u16 frame_length, char *result, size_t result_length);
+};
+
+static size_t slsi_decode_basic_ie_info(u8 *ies, u16 ies_length, char *result, size_t result_length)
+{
+ size_t size_written = 0;
+ const u8 *ssid = cfg80211_find_ie(WLAN_EID_SSID, ies, ies_length);
+ const u8 *rsn = cfg80211_find_ie(WLAN_EID_RSN, ies, ies_length);
+ const u8 *wpa = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, ies_length);
+ const u8 *wps = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, ies, ies_length);
+ const u8 *htop = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ies, ies_length);
+ const u8 *country = cfg80211_find_ie(WLAN_EID_COUNTRY, ies, ies_length);
+
+ if (htop && htop[1]) {
+ size_written += snprintf(result + size_written, result_length - size_written, " channel:%d", htop[2]);
+ } else {
+ const u8 *ds = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ies, ies_length);
+
+ if (ds)
+ size_written += snprintf(result + size_written, result_length - size_written, " channel:%d", ds[2]);
+ }
+
+ if (ssid) {
+ if (ssid[1])
+ size_written += snprintf(result + size_written, result_length - size_written, " %.*s", ssid[1], (char *)&ssid[2]);
+ else
+ size_written += snprintf(result + size_written, result_length - size_written, " <HIDDEN>");
+ }
+
+ if (country)
+ size_written += snprintf(result + size_written, result_length - size_written, " country:%c%c%c", country[2], country[3], country[4]);
+ if (wpa)
+ size_written += snprintf(result + size_written, result_length - size_written, " wpa");
+ if (rsn)
+ size_written += snprintf(result + size_written, result_length - size_written, " wpa2");
+ if (wps)
+ size_written += snprintf(result + size_written, result_length - size_written, " wps");
+ return size_written;
+}
+
+static void slsi_decode_frame_ies_only(u8 offset, u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t str_len;
+
+ result[0] = '(';
+ str_len = slsi_decode_basic_ie_info(frame + offset,
+ frame_length - offset,
+ result + 1,
+ result_length - 1);
+ result[1 + str_len] = ')';
+ result[2 + str_len] = '\0';
+}
+
+static size_t slsi_decode_frame_leu16(u8 *frame, u16 frame_length, char *result, size_t result_length, const char *name)
+{
+ u16 value = frame[0] | frame[1] << 8;
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ return snprintf(result, result_length, "%s:%u", name, value);
+}
+
+#define SLSI_ASSOC_REQ_IE_OFFSET 4
+static void slsi_decode_assoc_req(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ slsi_decode_frame_ies_only(SLSI_ASSOC_REQ_IE_OFFSET, frame, frame_length, result, result_length);
+}
+
+#define SLSI_ASSOC_RSP_STATUS_OFFSET 2
+#define SLSI_ASSOC_RSP_IE_OFFSET 6
+static void slsi_decode_assoc_rsp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t str_len = 0;
+
+ result[str_len++] = '(';
+ str_len += slsi_decode_frame_leu16(frame + SLSI_ASSOC_RSP_STATUS_OFFSET,
+ frame_length - SLSI_ASSOC_RSP_STATUS_OFFSET,
+ result + str_len,
+ result_length - str_len,
+ "status");
+ str_len += slsi_decode_basic_ie_info(frame + SLSI_ASSOC_RSP_IE_OFFSET,
+ frame_length - SLSI_ASSOC_RSP_IE_OFFSET,
+ result + str_len,
+ result_length - str_len);
+ result[str_len++] = ')';
+ result[str_len] = '\0';
+}
+
+#define SLSI_DEAUTH_REASON_OFFSET 0
+static void slsi_decode_deauth(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t str_len = 0;
+
+ result[str_len++] = '(';
+ str_len += slsi_decode_frame_leu16(frame + SLSI_DEAUTH_REASON_OFFSET,
+ frame_length - SLSI_DEAUTH_REASON_OFFSET,
+ result + str_len,
+ result_length - str_len,
+ "reason_code");
+ result[str_len++] = ')';
+ result[str_len] = '\0';
+}
+
+#define SLSI_AUTH_ALGO_OFFSET 0
+#define SLSI_AUTH_SEQ_OFFSET 2
+#define SLSI_AUTH_STATUS_OFFSET 4
+static void slsi_decode_auth(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t str_len = 0;
+
+ result[str_len++] = '(';
+ str_len += slsi_decode_frame_leu16(frame + SLSI_AUTH_ALGO_OFFSET,
+ frame_length - SLSI_AUTH_ALGO_OFFSET,
+ result + str_len,
+ result_length - str_len,
+ "algo");
+ result[str_len++] = ' ';
+ str_len += slsi_decode_frame_leu16(frame + SLSI_AUTH_SEQ_OFFSET,
+ frame_length - SLSI_AUTH_SEQ_OFFSET,
+ result + str_len,
+ result_length - str_len,
+ "seq");
+ result[str_len++] = ' ';
+ str_len += slsi_decode_frame_leu16(frame + SLSI_AUTH_STATUS_OFFSET,
+ frame_length - SLSI_AUTH_STATUS_OFFSET,
+ result + str_len,
+ result_length - str_len,
+ "status");
+ result[str_len++] = ' ';
+ result[str_len++] = ')';
+ result[str_len] = '\0';
+}
+
+#define SLSI_REASSOC_IE_OFFSET 10
+static void slsi_decode_reassoc_req(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ slsi_decode_frame_ies_only(SLSI_REASSOC_IE_OFFSET, frame, frame_length, result, result_length);
+}
+
+#define SLSI_BEACON_IE_OFFSET 12
+static void slsi_decode_beacon(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ slsi_decode_frame_ies_only(SLSI_BEACON_IE_OFFSET, frame, frame_length, result, result_length);
+}
+
+#define SLSI_PROBEREQ_IE_OFFSET 0
+static void slsi_decode_probe_req(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ slsi_decode_frame_ies_only(SLSI_PROBEREQ_IE_OFFSET, frame, frame_length, result, result_length);
+}
+
+#define SLSI_ACTION_BLOCK_ACK_ADDBA_REQ 0
+#define SLSI_ACTION_BLOCK_ACK_ADDBA_RSP 1
+#define SLSI_ACTION_BLOCK_ACK_DELBA 2
+static size_t slsi_decode_action_blockack(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u8 action = frame[1];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ switch (action) {
+ case SLSI_ACTION_BLOCK_ACK_ADDBA_REQ:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->ADDBAReq(token:%u)", token);
+ }
+ case SLSI_ACTION_BLOCK_ACK_ADDBA_RSP:
+ {
+ u8 token = frame[2];
+ u16 status = frame[3] | frame[4] << 8;
+
+ return snprintf(result, result_length, "->ADDBARsp(token:%u, status:%u)", token, status);
+ }
+ case SLSI_ACTION_BLOCK_ACK_DELBA:
+ {
+ u16 reason_code = frame[4] | frame[5] << 8;
+
+ return snprintf(result, result_length, "->DELBA(reason_code:%u)", reason_code);
+ }
+ default:
+ return snprintf(result, result_length, "->Action(%u)", action);
+ }
+}
+
+#define SLSI_ACTION_PUBLIC_DISCOVERY_RSP 14
+
+static size_t slsi_decode_action_public(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u8 action = frame[1];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ switch (action) {
+ case SLSI_ACTION_PUBLIC_DISCOVERY_RSP:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->DiscoveryRsp(token:%u)", token);
+ }
+ default:
+ return snprintf(result, result_length, "->Action(%u)", action);
+ }
+}
+
+#define SLSI_ACTION_TDLS_SETUP_REQ 0
+#define SLSI_ACTION_TDLS_SETUP_RSP 1
+#define SLSI_ACTION_TDLS_SETUP_CFM 2
+#define SLSI_ACTION_TDLS_TEARDOWN 3
+#define SLSI_ACTION_TDLS_PEER_TRAFFIC_IND 4
+#define SLSI_ACTION_TDLS_CHANNEL_SWITCH_REQ 5
+#define SLSI_ACTION_TDLS_CHANNEL_SWITCH_RSP 6
+#define SLSI_ACTION_TDLS_PEER_PSM_REQ 7
+#define SLSI_ACTION_TDLS_PEER_PSM_RSP 8
+#define SLSI_ACTION_TDLS_PEER_TRAFFIC_RSP 9
+#define SLSI_ACTION_TDLS_DISCOVERY_REQ 10
+
+static size_t slsi_decode_action_tdls(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u8 action = frame[1];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ switch (action) {
+ case SLSI_ACTION_TDLS_SETUP_REQ:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->SetupReq(token:%u)", token);
+ }
+ case SLSI_ACTION_TDLS_SETUP_RSP:
+ {
+ u16 status = frame[2] | frame[3] << 8;
+ u8 token = frame[4];
+
+ return snprintf(result, result_length, "->SetupRsp(token:%u, status:%u)", token, status);
+ }
+ case SLSI_ACTION_TDLS_SETUP_CFM:
+ {
+ u16 status = frame[2] | frame[3] << 8;
+ u8 token = frame[4];
+
+ return snprintf(result, result_length, "->SetupCfm(token:%u, status:%u)", token, status);
+ }
+ case SLSI_ACTION_TDLS_TEARDOWN:
+ {
+ u16 reason = frame[2] | frame[3] << 8;
+
+ return snprintf(result, result_length, "->SetupCfm(reason:%u)", reason);
+ }
+ case SLSI_ACTION_TDLS_PEER_TRAFFIC_IND:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->PeerTrafficInd(token:%u)", token);
+ }
+ case SLSI_ACTION_TDLS_CHANNEL_SWITCH_REQ:
+ {
+ u8 channel = frame[2];
+
+ return snprintf(result, result_length, "->ChannelSwitchReq(channel:%u)", channel);
+ }
+ case SLSI_ACTION_TDLS_CHANNEL_SWITCH_RSP:
+ {
+ u16 status = frame[2] | frame[3] << 8;
+
+ return snprintf(result, result_length, "->ChannelSwitchRsp(status:%u)", status);
+ }
+ case SLSI_ACTION_TDLS_PEER_PSM_REQ:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->PeerPSMReq(token:%u)", token);
+ }
+ case SLSI_ACTION_TDLS_PEER_PSM_RSP:
+ {
+ u8 token = frame[2];
+ u16 status = frame[3] | frame[4] << 8;
+
+ return snprintf(result, result_length, "->PeerPSMRsp(token:%u, status:%u)", token, status);
+ }
+ case SLSI_ACTION_TDLS_PEER_TRAFFIC_RSP:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->PeerTrafficRsp(token:%u)", token);
+ }
+ case SLSI_ACTION_TDLS_DISCOVERY_REQ:
+ {
+ u8 token = frame[2];
+
+ return snprintf(result, result_length, "->DiscoveryReq(token:%u)", token);
+ }
+ default:
+ return snprintf(result, result_length, "->Action(%u)", action);
+ }
+}
+
+static const struct slsi_value_name_decode action_categories[] = {
+ { 3, "BlockAck", slsi_decode_action_blockack },
+ { 0, "SpectrumManagement", NULL },
+ { 1, "QoS", NULL },
+ { 2, "DLS", NULL },
+ { 4, "Public", slsi_decode_action_public },
+ { 5, "RadioMeasurement", NULL },
+ { 6, "FastBSSTransition", NULL },
+ { 7, "HT", NULL },
+ { 8, "SAQuery", NULL },
+ { 9, "ProtectedDualOfPublicAction", NULL },
+ { 12, "TDLS", slsi_decode_action_tdls },
+ { 17, "ReservedWFA", NULL },
+ { 126, "VendorSpecificProtected", NULL },
+ { 127, "VendorSpecific", NULL },
+ { 132, "Public(error)", slsi_decode_action_public },
+};
+
+#define SLSI_ACTION_CAT_BLOCK_ACK 3
+static void slsi_decode_action(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u8 category = frame[0];
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(action_categories); i++)
+ if (action_categories[i].value == category) {
+ int size_written = snprintf(result, result_length, "->%s", action_categories[i].name);
+
+ if (action_categories[i].decode_fn)
+ action_categories[i].decode_fn(frame, frame_length, result + size_written, result_length - size_written);
+ return;
+ }
+ snprintf(result, result_length, "->category:%u", category);
+}
+
+const char *slsi_arp_opcodes[] = {
+ "Reserved",
+ "REQUEST",
+ "REPLY",
+ "request Reverse",
+ "reply Reverse",
+ "DRARP-Request",
+ "DRARP-Reply",
+ "DRARP-Error",
+ "InARP-Request",
+ "InARP-Reply",
+ "ARP-NAK",
+ "MARS-Request",
+ "MARS-Multi",
+ "MARS-MServ",
+ "MARS-Join",
+ "MARS-Leave",
+ "MARS-NAK",
+ "MARS-Unserv",
+ "MARS-SJoin",
+ "MARS-SLeave",
+ "MARS-Grouplist-Request",
+ "MARS-Grouplist-Reply",
+ "MARS-Redirect-Map",
+ "MAPOS-UNARP",
+ "OP_EXP1",
+ "OP_EXP2",
+};
+
+static size_t slsi_decode_arp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ /* u16 htype = frame[0] << 8 | frame[1];
+ * u16 proto = frame[2] << 8 | frame[3];
+ * u8 hlen = frame[4];
+ * u8 plen = frame[5];
+ */
+ u16 opcode = frame[6] << 8 | frame[7];
+ u8 *sha = &frame[8];
+ u8 *spa = &frame[14];
+ u8 *tha = &frame[18];
+ u8 *tpa = &frame[24];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ if (opcode < ARRAY_SIZE(slsi_arp_opcodes))
+ return snprintf(result, result_length, "->%s(sha:%.2X:%.2X:%.2X:%.2X:%.2X:%.2X, spa:%u.%u.%u.%u, tha:%.2X:%.2X:%.2X:%.2X:%.2X:%.2X, tpa:%u.%u.%u.%u)",
+ slsi_arp_opcodes[opcode],
+ sha[0], sha[1], sha[2], sha[3], sha[4], sha[5],
+ spa[0], spa[1], spa[2], spa[3],
+ tha[0], tha[1], tha[2], tha[3], tha[4], tha[5],
+ tpa[0], tpa[1], tpa[2], tpa[3]);
+ else
+ return snprintf(result, result_length, "->(opcode:%u)", opcode);
+}
+
+static const struct slsi_value_name_decode slsi_decode_eapol_packet_types[] = {
+ { 1, "Identity", NULL },
+ { 2, "Notification", NULL },
+ { 3, "Nak", NULL },
+ { 4, "MD5Challenge", NULL },
+ { 5, "OneTimePassword", NULL },
+ { 6, "GenericTokenCard", NULL },
+ { 9, "RSA Public Key Authentication", NULL },
+ { 10, "DSS Unilateral", NULL },
+ { 11, "KEA", NULL },
+ { 12, "KEA-VALIDATE", NULL },
+ { 13, "EAP-TLS", NULL },
+ { 14, "Defender Token (AXENT)", NULL },
+ { 15, "RSA Security SecurID EAP", NULL },
+ { 16, "Arcot Systems EAP", NULL },
+ { 17, "EAP-Cisco Wireless", NULL },
+ { 18, "EAP-SIM", NULL },
+ { 19, "SRP-SHA1 Part 1", NULL },
+ { 21, "EAP-TTLS", NULL },
+ { 22, "Remote Access Service", NULL },
+ { 23, "EAP-AKA", NULL },
+ { 24, "EAP-3Com Wireless", NULL },
+ { 25, "PEAP", NULL },
+ { 26, "MS-EAP-Authentication", NULL },
+ { 27, "MAKE", NULL },
+ { 28, "CRYPTOCard", NULL },
+ { 29, "EAP-MSCHAP-V2", NULL },
+ { 30, "DynamID", NULL },
+ { 31, "Rob EAP", NULL },
+ { 32, "EAP-POTP", NULL },
+ { 33, "MS-Authentication-TLV", NULL },
+ { 34, "SentriNET", NULL },
+ { 35, "EAP-Actiontec Wireless", NULL },
+ { 36, "Cogent Systems Biometrics Authentication EAP", NULL },
+ { 37, "AirFortress EAP", NULL },
+ { 38, "EAP-HTTP Digest", NULL },
+ { 39, "SecureSuite EAP", NULL },
+ { 40, "DeviceConnect EAP", NULL },
+ { 41, "EAP-SPEKE", NULL },
+ { 42, "EAP-MOBAC", NULL },
+ { 43, "EAP-FAST", NULL },
+ { 44, "ZLXEAP", NULL },
+ { 45, "EAP-Link", NULL },
+ { 46, "EAP-PAX", NULL },
+ { 47, "EAP-PSK", NULL },
+ { 48, "EAP-SAKE", NULL },
+ { 49, "EAP-IKEv2", NULL },
+ { 50, "EAP-AKA", NULL },
+ { 51, "EAP-GPSK", NULL },
+ { 52, "EAP-pwd", NULL },
+ { 53, "EAP-EKE V1", NULL },
+ { 254, "WPS", NULL }
+};
+
+static size_t slsi_decode_eapol_packet(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ static const char *const slsi_decode_eapol_packet_codes[] = {
+ "",
+ "Request",
+ "Response",
+ "Success",
+ "Failure",
+ };
+
+ size_t size_written = 0;
+ u32 i;
+ u8 code = frame[0];
+ u8 id = frame[1];
+ u16 length = frame[2] << 8 | frame[3];
+ const char *code_str = "";
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ if (code >= 1 && code <= 4)
+ code_str = slsi_decode_eapol_packet_codes[code];
+
+ if (length > 4 && (code == 1 || code == 2)) {
+ u8 type = frame[4];
+
+ for (i = 0; i < ARRAY_SIZE(slsi_decode_eapol_packet_types); i++)
+ if (slsi_decode_eapol_packet_types[i].value == type) {
+ size_written += snprintf(result, result_length, ":%s:%s id:%u", slsi_decode_eapol_packet_types[i].name, code_str, id);
+ return size_written;
+ }
+ size_written += snprintf(result, result_length, ":type:%u: %s id:%u", type, code_str, id);
+ } else {
+ size_written += snprintf(result, result_length, ":%s id:%u length:%u", code_str, id, length);
+ }
+ return size_written;
+}
+
+static const struct slsi_value_name_decode slsi_eapol_packet_type[] = {
+ { 0, "EapPacket", slsi_decode_eapol_packet },
+ { 1, "EapolStart", NULL },
+ { 2, "EapolLogoff", NULL },
+ { 3, "EapolKey", NULL }
+};
+
+static size_t slsi_decode_eapol(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t size_written = 0;
+ u8 packet_type = frame[1];
+ u16 length = frame[2] << 8 | frame[3];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ if (packet_type < ARRAY_SIZE(slsi_eapol_packet_type)) {
+ size_written += snprintf(result, result_length, "->%s", slsi_eapol_packet_type[packet_type].name);
+ if (slsi_eapol_packet_type[packet_type].decode_fn)
+ size_written += slsi_eapol_packet_type[packet_type].decode_fn(frame + 4, length, result + size_written, result_length - size_written);
+ return size_written;
+ } else {
+ return snprintf(result, result_length, "->packet_type:%u", packet_type);
+ }
+}
+
+static size_t slsi_decode_tdls(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u8 payload_type = frame[0];
+
+ if (payload_type == 2) {
+ slsi_decode_action(frame + 1, frame_length - 1, result, result_length);
+ return 0;
+ } else {
+ return snprintf(result, result_length, "->Unknown(payload:%u", payload_type);
+ }
+}
+
+static size_t slsi_decode_ipv4_icmp_echo(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u16 id = frame[0] << 8 | frame[1];
+ u16 seq = frame[2] << 8 | frame[3];
+
+ SLSI_UNUSED_PARAMETER(frame_length);
+
+ return snprintf(result, result_length, " id:%u seq:%u", id, seq);
+}
+
+static const struct slsi_value_name_decode slsi_ipv4_icmp_types[] = {
+ { 0, "EchoReply", slsi_decode_ipv4_icmp_echo },
+ { 8, "Echo ", slsi_decode_ipv4_icmp_echo },
+ { 3, "Destination Unreachable Ack", NULL },
+ { 4, "Source Quench", NULL },
+ { 5, "Redirect", NULL },
+ { 6, "Alternate Host Address", NULL },
+ { 9, "Router Advertisement", NULL },
+ { 10, "Router Selection", NULL },
+ { 11, "Time Exceeded", NULL },
+ { 12, "Parameter Problem", NULL },
+ { 13, "Timestamp", NULL },
+ { 14, "Timestamp Reply", NULL },
+ { 15, "Information Request", NULL },
+ { 16, "Information Reply", NULL },
+ { 17, "Address Mask Request", NULL },
+ { 18, "Address Mask Reply", NULL },
+ { 19, "Reserved (for Security)", NULL },
+ { 30, "Traceroute", NULL },
+ { 31, "Datagram Conversion Error", NULL },
+ { 32, "Mobile Host Redirect", NULL },
+ { 33, "IPv6 Where-Are-You", NULL },
+ { 34, "IPv6 I-Am-Here", NULL },
+ { 35, "Mobile Registration Request", NULL },
+ { 36, "Mobile Registration Reply", NULL },
+ { 39, "SKIP", NULL },
+ { 40, "Photuris", NULL },
+ { 253, "RFC3692-style Experiment 1", NULL },
+ { 254, "RFC3692-style Experiment 2", NULL }
+};
+
+static size_t slsi_decode_ipv4_icmp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t size_written = 0;
+ u32 i;
+ u8 type = frame[0];
+ u8 code = frame[1];
+
+ for (i = 0; i < ARRAY_SIZE(slsi_ipv4_icmp_types); i++)
+ if (slsi_ipv4_icmp_types[i].value == type) {
+ size_written += snprintf(result, result_length, "->%s(code:%u)", slsi_ipv4_icmp_types[i].name, code);
+ if (slsi_ipv4_icmp_types[i].decode_fn)
+ size_written += slsi_ipv4_icmp_types[i].decode_fn(frame + 4, frame_length - 4, result + size_written, result_length - size_written);
+ return size_written;
+ }
+ return snprintf(result, result_length, "->type(%u)", type);
+}
+
+static const struct slsi_value_name_decode slsi_ipv4_udp_bootp_dhcp_option53[] = {
+ { 1, "DHCP_DISCOVER", NULL },
+ { 2, "DHCP_OFFER", NULL },
+ { 3, "DHCP_REQUEST", NULL },
+ { 4, "DHCP_DECLINE", NULL },
+ { 5, "DHCP_ACK", NULL },
+ { 6, "DHCP_NAK", NULL },
+ { 7, "DHCP_RELEASE", NULL },
+ { 8, "DHCP_INFORM", NULL },
+};
+
+#define SLSI_IPV4_UDP_BOOTP_CIADDR_OFFSET 16
+#define SLSI_IPV4_UDP_BOOTP_YIADDR_OFFSET 20
+#define SLSI_IPV4_UDP_BOOTP_GIADDR_OFFSET 24
+#define SLSI_IPV4_UDP_BOOTP_MAGIC_OFFSET 236
+static size_t slsi_decode_ipv4_udp_bootp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u32 i;
+ u8 *ciaddr = &frame[SLSI_IPV4_UDP_BOOTP_CIADDR_OFFSET];
+ u8 *yiaddr = &frame[SLSI_IPV4_UDP_BOOTP_YIADDR_OFFSET];
+ u8 *giaddr = &frame[SLSI_IPV4_UDP_BOOTP_GIADDR_OFFSET];
+ u8 *magic = &frame[SLSI_IPV4_UDP_BOOTP_MAGIC_OFFSET];
+
+ if (magic[0] == 0x63 && magic[1] == 0x82 && magic[2] == 0x53 && magic[3] == 0x63) {
+ u8 *p = &frame[SLSI_IPV4_UDP_BOOTP_MAGIC_OFFSET + 4];
+
+ while (p < p + frame_length) {
+ u8 option = p[0];
+ u8 option_length = p[1];
+
+ if (option == 53 && option_length == 1) {
+ for (i = 0; i < ARRAY_SIZE(slsi_ipv4_udp_bootp_dhcp_option53); i++)
+ if (slsi_ipv4_udp_bootp_dhcp_option53[i].value == p[2])
+ return snprintf(result, result_length, "->%s(ci:%u.%u.%u.%u yi:%u.%u.%u.%u gi:%u.%u.%u.%u)",
+ slsi_ipv4_udp_bootp_dhcp_option53[i].name,
+ ciaddr[0], ciaddr[1], ciaddr[2], ciaddr[3],
+ yiaddr[0], yiaddr[1], yiaddr[2], yiaddr[3],
+ giaddr[0], giaddr[1], giaddr[2], giaddr[3]);
+ return snprintf(result, result_length, "->option53(%u ci:%u.%u.%u.%u yi:%u.%u.%u.%u gi:%u.%u.%u.%u)",
+ p[2],
+ ciaddr[0], ciaddr[1], ciaddr[2], ciaddr[3],
+ yiaddr[0], yiaddr[1], yiaddr[2], yiaddr[3],
+ giaddr[0], giaddr[1], giaddr[2], giaddr[3]);
+ }
+ if (option == 0)
+ break;
+ p = p + 2 + option_length;
+ }
+ }
+ return 0;
+}
+
+static const struct slsi_value_name_decode slsi_ipv4_udp_ports[] = {
+ { 53, "DNS", NULL },
+ { 67, "Bootp", slsi_decode_ipv4_udp_bootp },
+ { 68, "Bootp", slsi_decode_ipv4_udp_bootp },
+};
+
+static size_t slsi_decode_ipv4_udp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ /* 0 7 8 15 16 23 24 31
+ * +--------+--------+--------+--------+
+ * | Source | Destination |
+ * | Port | Port |
+ * +--------+--------+--------+--------+
+ * | | |
+ * | Length | Checksum |
+ * +--------+--------+--------+--------+
+ * |
+ * | data octets ...
+ * +--------------------- ...
+ */
+ size_t size_written = 0;
+ u32 i;
+ u16 sport = frame[0] << 8 | frame[1];
+ u16 dport = frame[2] << 8 | frame[3];
+ u16 length = frame[4] << 8 | frame[5];
+
+ /*u16 chksum = frame[6] << 8 | frame[7];*/
+
+ for (i = 0; i < ARRAY_SIZE(slsi_ipv4_udp_ports); i++)
+ if (slsi_ipv4_udp_ports[i].value == dport || slsi_ipv4_udp_ports[i].value == sport) {
+ size_written += snprintf(result, result_length, "->%s", slsi_ipv4_udp_ports[i].name);
+ if (slsi_ipv4_udp_ports[i].decode_fn)
+ size_written += slsi_ipv4_udp_ports[i].decode_fn(frame + 8, length, result + size_written, result_length - size_written);
+ else
+ size_written += snprintf(result + size_written, result_length - size_written, "(dport:%u, size:%u)", dport, frame_length - 8);
+ return size_written;
+ }
+ return snprintf(result, result_length, "(dport:%u, size:%u)", dport, frame_length - 8);
+}
+
+static size_t slsi_decode_ipv4_tcp(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ /* TCP Header Format
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Source Port | Destination Port |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Acknowledgment Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Data | |U|A|P|R|S|F| |
+ * | Offset| Reserved |R|C|S|S|Y|I| Window |
+ * | | |G|K|H|T|N|N| |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Checksum | Urgent Pointer |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Options | Padding |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | data |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ u16 dport = frame[2] << 8 | frame[3];
+ u8 flags = frame[13];
+ bool fin = flags & 0x01;
+ bool syn = flags & 0x02;
+ bool rst = flags & 0x04;
+ bool psh = flags & 0x08;
+ bool ack = flags & 0x10;
+ bool urg = flags & 0x20;
+
+ return snprintf(result, result_length, "(dport:%u%s%s%s%s%s%s size:%u)",
+ dport,
+ fin ? " FIN" : "",
+ syn ? " SYN" : "",
+ rst ? " RST" : "",
+ psh ? " PSH" : "",
+ ack ? " ACK" : "",
+ urg ? " URG" : "",
+ frame_length - 24);
+}
+
+#define SLSI_IPV4_PROTO_ICMP 1
+#define SLSI_IPV4_PROTO_IGMP 2
+#define SLSI_IPV4_PROTO_TCP 6
+#define SLSI_IPV4_PROTO_UDP 17
+static size_t slsi_decode_ipv4(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ size_t size_written = 0;
+ u16 ip_data_offset = 20;
+ /*u8 version = frame[0] >> 4; */
+ u8 hlen = frame[0] & 0x0F;
+ /*u8 tos = frame[1]; */
+ /*u16 len = frame[2] << 8 | frame[3]; */
+ /*u16 id = frame[4] << 8 | frame[5]; */
+ /*u16 flags_foff = frame[6] << 8 | frame[7]; */
+ /*u8 ttl = frame[8]; */
+ u8 proto = frame[9];
+ /*u16 cksum = frame[10] << 8 | frame[11]; */
+ u8 *src_ip = &frame[12];
+ u8 *dest_ip = &frame[16];
+
+ if (hlen > 5)
+ ip_data_offset += (hlen - 5) * 4;
+
+ size_written += snprintf(result + size_written, result_length - size_written, "(s:%u.%u.%u.%u d:%u.%u.%u.%u)",
+ src_ip[0], src_ip[1], src_ip[2], src_ip[3],
+ dest_ip[0], dest_ip[1], dest_ip[2], dest_ip[3]);
+
+ switch (proto) {
+ case SLSI_IPV4_PROTO_TCP:
+ size_written += snprintf(result + size_written, result_length - size_written, "->TCP");
+ size_written += slsi_decode_ipv4_tcp(frame + ip_data_offset,
+ frame_length - ip_data_offset,
+ result + size_written,
+ result_length - size_written);
+ break;
+ case SLSI_IPV4_PROTO_UDP:
+ size_written += snprintf(result + size_written, result_length - size_written, "->UDP");
+ size_written += slsi_decode_ipv4_udp(frame + ip_data_offset,
+ frame_length - ip_data_offset,
+ result + size_written,
+ result_length - size_written);
+ break;
+ case SLSI_IPV4_PROTO_ICMP:
+ size_written += snprintf(result + size_written, result_length - size_written, "->ICMP");
+ size_written += slsi_decode_ipv4_icmp(frame + ip_data_offset,
+ frame_length - ip_data_offset,
+ result + size_written,
+ result_length - size_written);
+ break;
+ case SLSI_IPV4_PROTO_IGMP:
+ size_written += snprintf(result + size_written, result_length - size_written, "->IGMP");
+ break;
+ default:
+ size_written += snprintf(result + size_written, result_length - size_written, "->proto:%u", proto);
+ break;
+ }
+ return size_written;
+}
+
+static const struct slsi_decode_snap snap_types[] = {
+ { { 0x08, 0x00 }, "IpV4", slsi_decode_ipv4 },
+ { { 0x08, 0x06 }, "Arp", slsi_decode_arp },
+ { { 0x88, 0x8e }, "Eapol", slsi_decode_eapol },
+ { { 0x89, 0x0d }, NULL, slsi_decode_tdls },
+ { { 0x86, 0xdd }, "IpV6", NULL },
+ { { 0x88, 0xb4 }, "Wapi", NULL },
+};
+
+static void slsi_decode_proto_data(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(snap_types); i++)
+ if (memcmp(snap_types[i].snap, frame, 2) == 0) {
+ int slen = 0;
+
+ if (snap_types[i].name)
+ slen = snprintf(result, result_length, "->%s", snap_types[i].name);
+ if (snap_types[i].decode_fn)
+ slen += snap_types[i].decode_fn(frame + 2, frame_length - 2, result + slen, result_length - slen);
+ return;
+ }
+
+ snprintf(result, result_length, "(proto:0x%.2X%.2X)", frame[0], frame[1]);
+}
+
+static void slsi_decode_80211_data(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ return slsi_decode_proto_data(frame + 6, frame_length - 6, result, result_length);
+}
+
+static const struct slsi_decode_entry frame_types[4][16] = {
+ {
+ { "AssocReq", slsi_decode_assoc_req },
+ { "AssocRsp", slsi_decode_assoc_rsp },
+ { "ReassocReq", slsi_decode_reassoc_req },
+ { "ReassocRsp", slsi_decode_assoc_rsp }, /* Same as Assoc Req Frame*/
+ { "ProbeReq", slsi_decode_probe_req },
+ { "ProbeRsp", slsi_decode_beacon }, /* Same as Beacon Frame */
+ { "TimingAdv", NULL },
+ { "Reserved", NULL },
+ { "Beacon ", slsi_decode_beacon },
+ { "Atim", NULL },
+ { "Disassoc", slsi_decode_deauth }, /* Same as Deauth Frame */
+ { "Auth", slsi_decode_auth },
+ { "Deauth", slsi_decode_deauth },
+ { "Action", slsi_decode_action },
+ { "ActionNoAck", slsi_decode_action },
+ { "Reserved", NULL }
+ },
+ {
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "BlockAckReq", NULL },
+ { "BlockAck", NULL },
+ { "PsPoll", NULL },
+ { "RTS", NULL },
+ { "CTS", NULL },
+ { "Ack", NULL },
+ { "CF-End", NULL },
+ { "CF-End+Ack", NULL }
+ },
+ {
+ { "Data", slsi_decode_80211_data },
+ { "Data+CF-Ack", slsi_decode_80211_data },
+ { "Data+CF-Poll", slsi_decode_80211_data },
+ { "Data+CF-Ack+Poll", slsi_decode_80211_data },
+ { "Null", NULL },
+ { "CF-Ack", NULL },
+ { "CF-Poll", NULL },
+ { "CF-Ack+Poll", NULL },
+ { "QosData", slsi_decode_80211_data },
+ { "QosData+CF-Ack", slsi_decode_80211_data },
+ { "QosData+CF-Poll", slsi_decode_80211_data },
+ { "QosData+CF-Ack+Poll", slsi_decode_80211_data },
+ { "QosNull", NULL },
+ { "Reserved", NULL },
+ { "QosCF-Poll", NULL },
+ { "QosCF-Ack+Poll", NULL }
+ },
+ {
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ { "Reserved", NULL },
+ }
+};
+
+static bool slsi_decode_80211_frame(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame;
+ u16 fc_cpu = cpu_to_le16(hdr->frame_control);
+ int ftype_idx = (fc_cpu & 0xf) >> 2;
+ const struct slsi_decode_entry *entry;
+ int hdrlen;
+ int slen;
+
+ /* Only decode Management Frames at Level 1 */
+ if (slsi_debug_summary_frame == 1 && ftype_idx != 0)
+ return false;
+
+ /* Filter Scanning at the debug level 3 and above as it can be noisy with large scan results */
+ if (slsi_debug_summary_frame < 3 &&
+ (ieee80211_is_probe_req(fc_cpu) || ieee80211_is_probe_resp(fc_cpu) || ieee80211_is_beacon(fc_cpu)))
+ return false;
+
+ entry = &frame_types[ftype_idx][(fc_cpu >> 4) & 0xf];
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ slen = snprintf(result, result_length, entry->name);
+
+ if (entry->decode_fn)
+ entry->decode_fn(frame + hdrlen, frame_length - hdrlen, result + slen, result_length - slen);
+ return true;
+}
+
+static bool slsi_decode_l3_frame(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ int slen;
+
+ /* Only decode Management Frames at Level 1 */
+ if (slsi_debug_summary_frame == 1)
+ return false;
+
+ /* Only decode high important frames e.g. EAPOL, ARP, DHCP at Level 2 */
+ if (slsi_debug_summary_frame == 2) {
+ struct ethhdr *ehdr = (struct ethhdr *)frame;
+ u16 eth_type = be16_to_cpu(ehdr->h_proto);
+
+ switch (eth_type) {
+ case ETH_P_IP:
+ if (slsi_is_dhcp_packet(frame) == SLSI_TX_IS_NOT_DHCP)
+ return false;
+ break;
+ /* Fall through; process EAPOL, WAPI and ARP frames */
+ case ETH_P_PAE:
+ case ETH_P_WAI:
+ case ETH_P_ARP:
+ break;
+ default:
+ /* return for all other frames */
+ return false;
+ }
+ }
+ slen = snprintf(result, result_length, "eth");
+ slsi_decode_proto_data(frame + 12, frame_length - 12, result + slen, result_length - slen);
+ return true;
+}
+
+static bool slsi_decode_amsdu_subframe(u8 *frame, u16 frame_length, char *result, size_t result_length)
+{
+ int slen;
+
+ /* Only decode Management Frames at Level 1 */
+ if (slsi_debug_summary_frame == 1)
+ return false;
+
+ /* Only decode high important frames e.g. EAPOL, ARP, DHCP at Level 2 */
+ if (slsi_debug_summary_frame == 2) {
+ struct msduhdr *msdu_hdr = (struct msduhdr *)frame;
+ u16 eth_type = be16_to_cpu(msdu_hdr->type);
+
+ switch (eth_type) {
+ case ETH_P_IP:
+ /* slsi_is_dhcp_packet() decodes the frame as Ethernet frame so
+ * pass a offset (difference between MSDU header and ethernet header)
+ * to frames so it reads at the right offset
+ */
+ if (slsi_is_dhcp_packet(frame + 8) == SLSI_TX_IS_NOT_DHCP)
+ return false;
+ break;
+ /* Fall through; process EAPOL, WAPI and ARP frames */
+ case ETH_P_PAE:
+ case ETH_P_WAI:
+ case ETH_P_ARP:
+ break;
+ default:
+ /* return for all other frames */
+ return false;
+ }
+ }
+ slen = snprintf(result, result_length, "eth");
+ slsi_decode_proto_data(frame + 20, frame_length - 20, result + slen, result_length - slen);
+ return true;
+}
+
+static inline bool slsi_debug_frame_ratelimited(void)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, (5 * HZ), 200);
+
+ if (__ratelimit(&_rs))
+ return true;
+ return false;
+}
+
+/* NOTE: dev can be NULL */
+void slsi_debug_frame(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, const char *prefix)
+{
+ char frame_info[384];
+ u8 *frame = fapi_get_data(skb);
+ u16 len = fapi_get_datalen(skb);
+ u8 *dst = NULL;
+ u8 *src = NULL;
+ u16 frametype = 0xFFFF;
+ bool print = false;
+ u16 id = fapi_get_sigid(skb);
+ u16 vif = fapi_get_vif(skb);
+ s16 rssi = 0;
+
+ if (!slsi_debug_summary_frame)
+ return;
+
+ if (!len)
+ return;
+
+ switch (id) {
+ case MA_UNITDATA_REQ:
+ case MA_UNITDATA_IND:
+ if (!slsi_debug_frame_ratelimited()) /* Limit the Data output to stop too much spam at high data rates */
+ return;
+ break;
+ default:
+ break;
+ }
+
+ frame_info[0] = '\0';
+ switch (id) {
+ case MA_UNITDATA_REQ:
+ frametype = fapi_get_u16(skb, u.ma_unitdata_req.data_unit_descriptor);
+ break;
+ case MA_UNITDATA_IND:
+ frametype = fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor);
+ break;
+ case MLME_SEND_FRAME_REQ:
+ frametype = fapi_get_u16(skb, u.mlme_send_frame_req.data_unit_descriptor);
+ break;
+ case MLME_RECEIVED_FRAME_IND:
+ frametype = fapi_get_u16(skb, u.mlme_received_frame_ind.data_unit_descriptor);
+ break;
+ case MLME_SCAN_IND:
+ frametype = FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME;
+ rssi = fapi_get_s16(skb, u.mlme_scan_ind.rssi);
+ vif = fapi_get_u16(skb, u.mlme_scan_ind.scan_id) >> 8;
+ break;
+ case MLME_CONNECT_CFM:
+ case MLME_CONNECT_IND:
+ case MLME_PROCEDURE_STARTED_IND:
+ case MLME_CONNECTED_IND:
+ case MLME_REASSOCIATE_IND:
+ case MLME_ROAMED_IND:
+ frametype = FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME;
+ break;
+ default:
+ return;
+ }
+
+ switch (frametype) {
+ case FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME:
+ {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame;
+
+ dst = hdr->addr1;
+ src = hdr->addr2;
+ print = slsi_decode_80211_frame(frame, len, frame_info, sizeof(frame_info));
+ break;
+ }
+ case FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME:
+ {
+ struct ethhdr *ehdr = (struct ethhdr *)frame;
+
+ dst = ehdr->h_dest;
+ src = ehdr->h_source;
+ print = slsi_decode_l3_frame(frame, len, frame_info, sizeof(frame_info));
+ break;
+ }
+ case FAPI_DATAUNITDESCRIPTOR_AMSDU_SUBFRAME:
+ {
+ struct ethhdr *ehdr = (struct ethhdr *)frame;
+
+ dst = ehdr->h_dest;
+ src = ehdr->h_source;
+ print = slsi_decode_amsdu_subframe(frame, len, frame_info, sizeof(frame_info));
+ break;
+ }
+ default:
+ return;
+ }
+ if (print) {
+#ifdef CONFIG_SCSC_WLAN_SKB_TRACKING
+ SLSI_DBG4(sdev, SLSI_SUMMARY_FRAMES, "%-5s: 0x%p %s(vif:%u rssi:%-3d, s:%pM d:%pM)->%s\n",
+ dev ? netdev_name(dev) : "", skb, prefix, vif, rssi, src, dst, frame_info);
+#else
+ SLSI_DBG4(sdev, SLSI_SUMMARY_FRAMES, "%-5s: %s(vif:%u rssi:%-3d, s:%pM d:%pM)->%s\n",
+ dev ? netdev_name(dev) : "", prefix, vif, rssi, src, dst, frame_info);
+#endif
+ }
+}
+
+#endif /* CONFIG_SCSC_WLAN_DEBUG */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h"
+#include "hip.h"
+#include "mgt.h"
+#include "debug.h"
+#include "udi.h"
+#include "hip_bh.h"
+#include "cfg80211_ops.h"
+#include "netif.h"
+#include "procfs.h"
+#include "ba.h"
+#include "nl80211_vendor.h"
+
+#include "sap_mlme.h"
+#include "sap_ma.h"
+#include "sap_dbg.h"
+#include "sap_test.h"
+
+#ifdef CONFIG_SCSC_WLAN_KIC_OPS
+#include "kic.h"
+#endif
+
+char *slsi_mib_file = "wlan.hcf";
+module_param_named(mib_file, slsi_mib_file, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mib_file, "mib data filename");
+
+char *slsi_mib_file2 = "wlan_sw.hcf";
+module_param_named(mib_file2, slsi_mib_file2, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mib_file2, "sw mib data filename");
+
+static char *local_mib_file = "localmib.hcf";
+module_param(local_mib_file, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(local_mib_file, "local mib data filename (Optional extra mib values)");
+
+static char *maddr_file = "mac.txt";
+module_param(maddr_file, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(maddr_file, "mac address filename");
+
+static bool term_udi_users = true;
+module_param(term_udi_users, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(term_udi_users, "Try to terminate UDI user space users (applications) connected on the cdev (0, 1)");
+
+static int sig_wait_cfm_timeout = 6000;
+module_param(sig_wait_cfm_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sig_wait_cfm_timeout, "Signal wait timeout in milliseconds (default: 3000)");
+
+static bool lls_disabled;
+module_param(lls_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(lls_disabled, "Disable LLS: to disable LLS set 1");
+
+static bool gscan_disabled = 0;
+module_param(gscan_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gscan_disabled, "Disable gscan: to disable gscan set 1");
+
+static bool llslogs_disabled;
+module_param(llslogs_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(llslogs_disabled, "Disable llslogs: to disable llslogs set 1");
+
+static bool epno_disabled;
+module_param(epno_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(epno_disabled, "Disable ePNO: to disable ePNO set 1.\nNote: for ePNO to work gscan should be enabled");
+
+static bool vo_vi_block_ack_disabled;
+module_param(vo_vi_block_ack_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(vo_vi_block_ack_disabled, "Disable VO VI Block Ack logic added for WMM AC Cert : 5.1.4");
+
+static int max_scan_result_count = 200;
+module_param(max_scan_result_count, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_scan_result_count, "Max scan results to be reported");
+static bool rtt_disabled = 1;
+module_param(rtt_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rtt_disabled, "Disable rtt: to disable rtt set 1");
+
+static bool nan_disabled;
+module_param(nan_disabled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nan_disabled, "Disable NAN: to disable NAN set 1.");
+
+bool slsi_dev_gscan_supported(void)
+{
+ return !gscan_disabled;
+}
+
+bool slsi_dev_rtt_supported(void)
+{
+ return !rtt_disabled;
+}
+
+bool slsi_dev_llslogs_supported(void)
+{
+ return !llslogs_disabled;
+}
+
+bool slsi_dev_lls_supported(void)
+{
+ return !lls_disabled;
+}
+
+bool slsi_dev_epno_supported(void)
+{
+ return !epno_disabled;
+}
+
+bool slsi_dev_vo_vi_block_ack(void)
+{
+ return vo_vi_block_ack_disabled;
+}
+
+int slsi_dev_get_scan_result_count(void)
+{
+ return max_scan_result_count;
+}
+
+int slsi_dev_nan_supported(struct slsi_dev *sdev)
+{
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ if (sdev)
+ return sdev->nan_enabled && !nan_disabled;
+ return false;
+#else
+ return false;
+#endif
+}
+
+static int slsi_dev_inetaddr_changed(struct notifier_block *nb, unsigned long data, void *arg)
+{
+ struct slsi_dev *sdev = container_of(nb, struct slsi_dev, inetaddr_notifier);
+ struct in_ifaddr *ifa = arg;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ if (!wdev)
+ return NOTIFY_DONE;
+
+ if (wdev->wiphy != sdev->wiphy)
+ return NOTIFY_DONE;
+
+ if (data == NETDEV_DOWN) {
+ SLSI_NET_DBG2(dev, SLSI_NETDEV, "Returning 0 for NETDEV_DOWN event\n");
+ return 0;
+ }
+
+ SLSI_NET_INFO(dev, "IP: %pI4\n", &ifa->ifa_address);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif) && wake_lock_active(&sdev->wlan_wl_roam)) {
+ SLSI_NET_DBG2(dev, SLSI_NETDEV, "Releasing the roaming wakelock\n");
+ wake_unlock(&sdev->wlan_wl_roam);
+ /* If upper layers included wps ie in connect but the actually
+ * connection is not for wps, reset the wps flag.
+ */
+ if (ndev_vif->sta.is_wps) {
+ SLSI_NET_DBG1(dev, SLSI_NETDEV,
+ "is_wps set but not wps connection.\n");
+ ndev_vif->sta.is_wps = false;
+ }
+ }
+#endif
+ slsi_ip_address_changed(sdev, dev, ifa->ifa_address);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+static int slsi_dev_inet6addr_changed(struct notifier_block *nb, unsigned long data, void *arg)
+{
+ struct slsi_dev *sdev = container_of(nb, struct slsi_dev, inet6addr_notifier);
+ struct inet6_ifaddr *ifa = arg;
+ struct net_device *dev = ifa->idev->dev;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ (void)data; /* unused */
+
+ if (!wdev)
+ return NOTIFY_DONE;
+
+ if (wdev->wiphy != sdev->wiphy)
+ return NOTIFY_DONE;
+
+ SLSI_NET_INFO(dev, "IPv6: %pI6\n", &ifa->addr.s6_addr);
+
+ slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
+ memcpy(&ndev_vif->ipv6address, &ifa->addr, sizeof(struct in6_addr));
+ slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
+
+ return 0;
+}
+#endif
+
+void slsi_regd_init(struct slsi_dev *sdev)
+{
+ struct ieee80211_regdomain *slsi_world_regdom_custom = sdev->device_config.domain_info.regdomain;
+ struct ieee80211_reg_rule reg_rules[] = {
+ /* Channel 1 - 11*/
+ REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ /* Channel 12 - 13 NO_IR*/
+ REG_RULE(2467 - 10, 2472 + 10, 40, 0, 20, NL80211_RRF_NO_IR),
+#endif
+ /* Channel 36 - 48 */
+ REG_RULE(5180 - 10, 5240 + 10, 80, 0, 20, 0),
+ /* Channel 52 - 64 */
+ REG_RULE(5260 - 10, 5320 + 10, 80, 0, 20, NL80211_RRF_DFS),
+ /* Channel 100 - 140 */
+ REG_RULE(5500 - 10, 5700 + 10, 80, 0, 20, NL80211_RRF_DFS),
+ /* Channel 149 - 165 */
+ REG_RULE(5745 - 10, 5825 + 10, 80, 0, 20, 0),
+ };
+
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_INIT_DEINIT, "regulatory init\n");
+ slsi_world_regdom_custom->n_reg_rules = 6;
+ for (i = 0; i < slsi_world_regdom_custom->n_reg_rules; i++)
+ slsi_world_regdom_custom->reg_rules[i] = reg_rules[i];
+
+ /* Country code '00' indicates world regulatory domain */
+ slsi_world_regdom_custom->alpha2[0] = '0';
+ slsi_world_regdom_custom->alpha2[1] = '0';
+
+ wiphy_apply_custom_regulatory(sdev->wiphy, slsi_world_regdom_custom);
+}
+
+struct slsi_dev *slsi_dev_attach(struct device *dev, struct scsc_mx *core, struct scsc_service_client *mx_wlan_client)
+{
+ struct slsi_dev *sdev;
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_INIT_DEINIT, "Add Device\n");
+
+ sdev = slsi_cfg80211_new(dev);
+ if (!sdev) {
+ SLSI_ERR_NODEV("No sdev\n");
+ return NULL;
+ }
+
+ sdev->mlme_blocked = false;
+
+ SLSI_MUTEX_INIT(sdev->netdev_add_remove_mutex);
+ mutex_init(&sdev->netdev_remove_mutex);
+ SLSI_MUTEX_INIT(sdev->start_stop_mutex);
+ SLSI_MUTEX_INIT(sdev->device_config_mutex);
+ SLSI_MUTEX_INIT(sdev->logger_mutex);
+
+ sdev->dev = dev;
+ sdev->maxwell_core = core;
+ memcpy(&sdev->mx_wlan_client, mx_wlan_client, sizeof(struct scsc_service_client));
+
+ sdev->fail_reported = false;
+ sdev->p2p_certif = false;
+ sdev->allow_switch_40_mhz = true;
+ sdev->allow_switch_80_mhz = true;
+ sdev->mib[0].mib_file_name = slsi_mib_file;
+ sdev->mib[1].mib_file_name = slsi_mib_file2;
+ sdev->local_mib.mib_file_name = local_mib_file;
+ sdev->maddr_file_name = maddr_file;
+ sdev->device_config.qos_info = -1;
+ sdev->acs_channel_switched = false;
+ memset(&sdev->chip_info_mib, 0xFF, sizeof(struct slsi_chip_info_mib));
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ sdev->num_5g_restricted_channels = 0;
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ sdev->device_config.okc_mode = 0;
+ sdev->device_config.wes_mode = 0;
+ sdev->device_config.roam_scan_mode = 0;
+#endif
+
+ slsi_log_clients_init(sdev);
+
+ slsi_wakelock_init(&sdev->wlan_wl, "wlan");
+ slsi_wakelock_init(&sdev->wlan_wl_mlme, "wlan_mlme");
+ slsi_wakelock_init(&sdev->wlan_wl_ma, "wlan_ma");
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ wake_lock_init(&sdev->wlan_wl_roam, WAKE_LOCK_SUSPEND, "wlan_roam");
+#endif
+ sdev->recovery_next_state = 0;
+ init_completion(&sdev->recovery_remove_completion);
+ init_completion(&sdev->recovery_stop_completion);
+ init_completion(&sdev->recovery_completed);
+ sdev->recovery_status = 0;
+
+ sdev->term_udi_users = &term_udi_users;
+ sdev->sig_wait_cfm_timeout = &sig_wait_cfm_timeout;
+ slsi_sig_send_init(&sdev->sig_wait);
+
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++)
+ atomic_set(&sdev->tx_host_tag[i], ((1 << 2) | i));
+
+ if (slsi_skb_work_init(sdev, NULL, &sdev->rx_dbg_sap, "slsi_wlan_rx_dbg_sap", slsi_rx_dbg_sap_work) != 0)
+ goto err_if;
+
+ if (slsi_netif_init(sdev) != 0) {
+ SLSI_ERR(sdev, "Can not create the network interface\n");
+ goto err_ctrl_wq_init;
+ }
+
+ if (slsi_hip_init(sdev, dev) != 0) {
+ SLSI_ERR(sdev, "slsi_hip_init() Failed\n");
+ goto err_netif_init;
+ }
+
+ if (slsi_udi_node_init(sdev, dev) != 0) {
+ SLSI_ERR(sdev, "failed to init UDI\n");
+ goto err_hip_init;
+ }
+
+ slsi_create_proc_dir(sdev);
+ slsi_traffic_mon_clients_init(sdev);
+
+ /* update regulatory domain */
+ slsi_regd_init(sdev);
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ slsi_nl80211_vendor_init(sdev);
+#endif
+
+ if (slsi_cfg80211_register(sdev) != 0) {
+ SLSI_ERR(sdev, "failed to register with cfg80211\n");
+ goto err_udi_proc_init;
+ }
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ sdev->inet6addr_notifier.notifier_call = slsi_dev_inet6addr_changed;
+ if (register_inet6addr_notifier(&sdev->inet6addr_notifier) != 0) {
+ SLSI_ERR(sdev, "failed to register inet6addr_notifier\n");
+ goto err_cfg80211_registered;
+ }
+#endif
+
+ sdev->inetaddr_notifier.notifier_call = slsi_dev_inetaddr_changed;
+ if (register_inetaddr_notifier(&sdev->inetaddr_notifier) != 0) {
+ SLSI_ERR(sdev, "failed to register inetaddr_notifier\n");
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ unregister_inet6addr_notifier(&sdev->inet6addr_notifier);
+#endif
+ goto err_cfg80211_registered;
+ }
+
+#ifdef SLSI_TEST_DEV
+ slsi_init_netdev_mac_addr(sdev);
+#endif
+ slsi_rx_ba_init(sdev);
+
+ if (slsi_netif_register(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]) != 0) {
+ SLSI_ERR(sdev, "failed to register with wlan netdev\n");
+ goto err_inetaddr_registered;
+ }
+#ifdef CONFIG_SCSC_WLAN_STA_ONLY
+ SLSI_ERR(sdev, "CONFIG_SCSC_WLAN_STA_ONLY: not registering p2p netdev\n");
+#else
+ if (slsi_netif_register(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]) != 0) {
+ SLSI_ERR(sdev, "failed to register with p2p netdev\n");
+ goto err_wlan_registered;
+ }
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ if (slsi_netif_register(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]) != 0) {
+ SLSI_ERR(sdev, "failed to register with p2px_wlan1 netdev\n");
+ goto err_p2p_registered;
+ }
+ rcu_assign_pointer(sdev->netdev_ap, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
+#endif
+#endif
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ if (slsi_netif_register(sdev, sdev->netdev[SLSI_NET_INDEX_NAN]) != 0) {
+ SLSI_ERR(sdev, "failed to register with NAN netdev\n");
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ goto err_p2px_wlan_registered;
+#else
+ goto err_p2p_registered;
+#endif
+#else
+ goto err_p2p_registered;
+#endif
+ }
+#endif
+#endif
+#ifdef CONFIG_SCSC_WLAN_KIC_OPS
+ if (wifi_kic_register(sdev) < 0)
+ SLSI_ERR(sdev, "failed to register Wi-Fi KIC ops\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ sdev->enhanced_pkt_filter_enabled = true;
+#endif
+ sdev->device_state = SLSI_DEVICE_STATE_STOPPED;
+ sdev->current_tspec_id = -1;
+ sdev->tspec_error_code = -1;
+
+ /* Driver workqueue used to queue work in different modes (STA/P2P/HS2) */
+ sdev->device_wq = alloc_ordered_workqueue("slsi_wlan_wq", 0);
+ if (!sdev->device_wq) {
+ SLSI_ERR(sdev, "Cannot allocate workqueue\n");
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ goto err_nan_registered;
+#else
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ goto err_p2px_wlan_registered;
+#else
+ goto err_p2p_registered;
+#endif
+#else
+ goto err_p2p_registered;
+#endif
+#endif
+ }
+ return sdev;
+
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+err_nan_registered:
+ slsi_netif_remove(sdev, sdev->netdev[SLSI_NET_INDEX_NAN]);
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+err_p2px_wlan_registered:
+ slsi_netif_remove(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
+ rcu_assign_pointer(sdev->netdev_ap, NULL);
+#endif
+#endif
+
+err_p2p_registered:
+ slsi_netif_remove(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
+
+err_wlan_registered:
+ slsi_netif_remove(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
+
+err_inetaddr_registered:
+ unregister_inetaddr_notifier(&sdev->inetaddr_notifier);
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ unregister_inet6addr_notifier(&sdev->inet6addr_notifier);
+#endif
+
+err_cfg80211_registered:
+ slsi_cfg80211_unregister(sdev);
+
+err_udi_proc_init:
+ slsi_traffic_mon_clients_deinit(sdev);
+ slsi_remove_proc_dir(sdev);
+ slsi_udi_node_deinit(sdev);
+
+err_hip_init:
+ slsi_hip_deinit(sdev);
+
+err_netif_init:
+ slsi_netif_deinit(sdev);
+
+err_ctrl_wq_init:
+ slsi_skb_work_deinit(&sdev->rx_dbg_sap);
+
+err_if:
+ slsi_wakelock_exit(&sdev->wlan_wl);
+ slsi_wakelock_exit(&sdev->wlan_wl_mlme);
+ slsi_wakelock_exit(&sdev->wlan_wl_ma);
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ wake_lock_destroy(&sdev->wlan_wl_roam);
+#endif
+ slsi_cfg80211_free(sdev);
+ return NULL;
+}
+
+void slsi_dev_detach(struct slsi_dev *sdev)
+{
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Remove Device\n");
+
+ slsi_stop(sdev);
+
+#ifdef CONFIG_SCSC_WLAN_KIC_OPS
+ wifi_kic_unregister();
+#endif
+ complete_all(&sdev->sig_wait.completion);
+ complete_all(&sdev->recovery_remove_completion);
+ complete_all(&sdev->recovery_stop_completion);
+ complete_all(&sdev->recovery_completed);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Unregister inetaddr_notifier\n");
+ unregister_inetaddr_notifier(&sdev->inetaddr_notifier);
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Unregister inet6addr_notifier\n");
+ unregister_inet6addr_notifier(&sdev->inet6addr_notifier);
+#endif
+
+ WARN_ON(!sdev->device_wq);
+ if (sdev->device_wq)
+ flush_workqueue(sdev->device_wq);
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ slsi_nl80211_vendor_deinit(sdev);
+#endif
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Unregister netif\n");
+ slsi_netif_remove_all(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Unregister cfg80211\n");
+ slsi_cfg80211_unregister(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Remove proc entries\n");
+ slsi_remove_proc_dir(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "De-initialise the Traffic monitor\n");
+ slsi_traffic_mon_clients_deinit(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "De-initialise the UDI\n");
+ slsi_log_clients_terminate(sdev);
+ slsi_udi_node_deinit(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "De-initialise Hip\n");
+ slsi_hip_deinit(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "De-initialise netif\n");
+ slsi_netif_deinit(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "De-initialise Regulatory\n");
+ slsi_regd_deinit(sdev);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Stop Work Queues\n");
+ slsi_skb_work_deinit(&sdev->rx_dbg_sap);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Clean up wakelock\n");
+ slsi_wakelock_exit(&sdev->wlan_wl);
+ slsi_wakelock_exit(&sdev->wlan_wl_mlme);
+ slsi_wakelock_exit(&sdev->wlan_wl_ma);
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ wake_lock_destroy(&sdev->wlan_wl_roam);
+#endif
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Free cfg80211\n");
+ slsi_cfg80211_free(sdev);
+
+ slsi_dbg_track_skb_report();
+ slsi_dbg_track_skb_reset();
+}
+
+int __init slsi_dev_load(void)
+{
+ SLSI_INFO_NODEV("Loading Maxwell Wi-Fi driver\n");
+
+ slsi_dbg_track_skb_init();
+
+ if (slsi_udi_init())
+ SLSI_INFO_NODEV("Failed to init udi - continuing\n");
+
+ if (slsi_sm_service_driver_register())
+ SLSI_INFO_NODEV("slsi_sm_service_driver_register failed - continuing\n");
+
+ /* Register SAPs */
+ sap_mlme_init();
+ sap_ma_init();
+ sap_dbg_init();
+ sap_test_init();
+
+/* Always create devnode if TW Android P on */
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ slsi_create_sysfs_macaddr();
+#endif
+ SLSI_INFO_NODEV("--- Maxwell Wi-Fi driver loaded successfully ---\n");
+ return 0;
+}
+
+void __exit slsi_dev_unload(void)
+{
+ SLSI_INFO_NODEV("Unloading Maxwell Wi-Fi driver\n");
+
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ slsi_destroy_sysfs_macaddr();
+#endif
+ /* Unregister SAPs */
+ sap_mlme_deinit();
+ sap_ma_deinit();
+ sap_dbg_deinit();
+ sap_test_deinit();
+
+ slsi_sm_service_driver_unregister();
+
+ slsi_udi_deinit();
+
+ SLSI_INFO_NODEV("--- Maxwell Wi-Fi driver unloaded successfully ---\n");
+}
+
+module_init(slsi_dev_load);
+module_exit(slsi_dev_unload);
+
+MODULE_DESCRIPTION("mx140 Wi-Fi Driver");
+MODULE_AUTHOR("SLSI");
+MODULE_LICENSE("GPL and additional rights");
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_DEVICE_H__
+#define __SLSI_DEVICE_H__
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ratelimit.h>
+#include <linux/ip.h>
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+
+#include <linux/nl80211.h>
+#include <linux/wireless.h>
+#include <linux/proc_fs.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include <linux/nl80211.h>
+
+#include <scsc/scsc_mx.h>
+
+#include "fapi.h"
+#include "const.h"
+#include "utils.h"
+#include "hip.h"
+#include "log_clients.h"
+#include "src_sink.h"
+#include "scsc_wifi_fcq.h"
+#include "scsc_wifi_cm_if.h"
+#include "hip4.h"
+#include "nl80211_vendor.h"
+#include "traffic_monitor.h"
+
+#define FAPI_MAJOR_VERSION(v) (((v) >> 8) & 0xFF)
+#define FAPI_MINOR_VERSION(v) ((v) & 0xFF)
+
+/* Modes for CMDGETBSSINFO and CMDGETSTAINFO */
+#define SLSI_80211_MODE_11B 0
+#define SLSI_80211_MODE_11G 1
+#define SLSI_80211_MODE_11N 2
+#define SLSI_80211_MODE_11A 3
+#define SLSI_80211_MODE_11AC 4
+
+#define SLSI_FW_API_RATE_HT_SELECTOR_FIELD 0xc000
+#define SLSI_FW_API_RATE_NON_HT_SELECTED 0x4000
+#define SLSI_FW_API_RATE_HT_SELECTED 0x8000
+#define SLSI_FW_API_RATE_VHT_SELECTED 0xc000
+
+#define SLSI_FW_API_RATE_VHT_MCS_FIELD 0x000F
+#define SLSI_FW_API_RATE_HT_MCS_FIELD 0x003F
+#define SLSI_FW_API_RATE_INDEX_FIELD 0x1fff
+#define SLSI_FW_API_RATE_VHT_NSS_FIELD 0x0070
+#define SLSI_FW_API_RATE_HT_NSS_FIELD 0x0040
+
+#define SLSI_FW_API_RATE_BW_FIELD 0x0600
+#define SLSI_FW_API_RATE_BW_40MHZ 0x0200
+#define SLSI_FW_API_RATE_BW_20MHZ 0x0000
+
+#define SLSI_FW_API_RATE_SGI 0x0100
+#define SLSI_FW_API_RATE_GF 0x0080
+
+/* indices: 3= BW20->idx_0, BW40->idx_1, BW80->idx_2.
+ * 2= noSGI->idx_0, SGI->idx_1
+ * 10= mcs index
+ * rate units 100kbps
+ * This table for single stream Nss=1and does not include 160MHz BW and 80+80MHz BW.
+ */
+static const u16 slsi_rates_table[3][2][10] = {
+ { /* BW20 */
+ { /* no SGI */
+ 65, 130, 195, 260, 390, 520, 585, 650, 780, 0
+ },
+ { /* SGI */
+ 72, 144, 217, 289, 433, 578, 650, 722, 867, 0
+ }
+ },
+ { /* BW40 */
+ { /* no SGI */
+ 135, 270, 405, 540, 810, 1080, 1215, 1350, 1620, 1800
+ },
+ { /* SGI */
+ 150, 300, 450, 600, 900, 1200, 1350, 1500, 1800, 2000
+ }
+ },
+ { /* BW80 */
+ { /* no SGI */
+ 293, 585, 878, 1170, 1755, 2340, 2633, 2925, 3510, 3900
+ },
+ { /* SGI */
+ 325, 650, 975, 1300, 1950, 2600, 2925, 3250, 3900, 4333
+ }
+ }
+};
+/* MSDU subframe Header */
+struct msduhdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ __be16 len; /* MSDU Subframe length */
+ unsigned char dsap; /* DSAP field - SNAP 0xaa */
+ unsigned char ssap; /* SSAP field - SNAP 0xaa */
+ unsigned char ui; /* Control field: U, func UI - 0x03 */
+ unsigned char oui[3]; /* Organization Code - 0x000000 */
+ __be16 type; /* Type - 0x0800 (IPv4)*/
+} __attribute__((packed));
+
+static inline void ethr_ii_to_subframe_msdu(struct sk_buff *skb)
+{
+ struct ethhdr *ehdr;
+ struct msduhdr msduh;
+
+ ehdr = eth_hdr(skb);
+ ether_addr_copy(msduh.h_dest, ehdr->h_dest);
+ ether_addr_copy(msduh.h_source, ehdr->h_source);
+ /* adjust packet length */
+ msduh.len = cpu_to_be16(skb->len - 6);
+ msduh.dsap = 0xaa;
+ msduh.ssap = 0xaa;
+ msduh.ui = 0x03;
+ memset(msduh.oui, 0x0, 3);
+ msduh.type = ehdr->h_proto;
+ (void)skb_push(skb, sizeof(struct msduhdr) - sizeof(struct ethhdr));
+ /* update SKB mac_header to point to start of MSDU header */
+ skb->mac_header -= (sizeof(struct msduhdr) - sizeof(struct ethhdr));
+ memcpy(skb->data, &msduh, sizeof(struct msduhdr));
+}
+
+#define SLSI_TX_PROCESS_ID_MIN (0xC001)
+#define SLSI_TX_PROCESS_ID_MAX (0xCF00)
+#define SLSI_TX_PROCESS_ID_UDI_MIN (0xCF01)
+#define SLSI_TX_PROCESS_ID_UDI_MAX (0xCFFE)
+
+/* There are no wakelocks in kernel/supplicant/hostapd.
+ * So keep the platform active for some time after receiving any data packet.
+ * This timeout value can be fine-tuned based on the test results.
+ */
+#define SLSI_RX_WAKELOCK_TIME (200)
+#define MAX_BA_BUFFER_SIZE 64
+#define NUM_BA_SESSIONS_PER_PEER 8
+#define MAX_CHANNEL_LIST 20
+#define SLSI_MAX_RX_BA_SESSIONS (8)
+#define SLSI_STA_ACTION_FRAME_BITMAP (SLSI_ACTION_FRAME_PUBLIC | SLSI_ACTION_FRAME_WMM | SLSI_ACTION_FRAME_WNM |\
+ SLSI_ACTION_FRAME_QOS | SLSI_ACTION_FRAME_PROTECTED_DUAL |\
+ SLSI_ACTION_FRAME_RADIO_MEASUREMENT)
+
+/* Default value for MIB SLSI_PSID_UNIFI_DISCONNECT_TIMEOUT + 1 sec*/
+#define SLSI_DEFAULT_AP_DISCONNECT_IND_TIMEOUT 3000
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define WLAN_EID_VHT_CAPABILITY 191
+#define WLAN_EID_VHT_OPERATION 192
+#endif
+
+#define NUM_COUNTRY (300)
+
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+#define SLSI_MUTEX_INIT(slsi_mutex__) \
+ { \
+ (slsi_mutex__).owner = NULL; \
+ mutex_init(&(slsi_mutex__).mutex); \
+ (slsi_mutex__).valid = true; \
+ }
+
+#define SLSI_MUTEX_LOCK(slsi_mutex_to_lock) \
+ { \
+ (slsi_mutex_to_lock).line_no_before = __LINE__; \
+ (slsi_mutex_to_lock).file_name_before = __FILE__; \
+ mutex_lock(&(slsi_mutex_to_lock).mutex); \
+ (slsi_mutex_to_lock).owner = current; \
+ (slsi_mutex_to_lock).line_no_after = __LINE__; \
+ (slsi_mutex_to_lock).file_name_after = __FILE__; \
+ (slsi_mutex_to_lock).function = __func__; \
+ }
+
+#define SLSI_MUTEX_UNLOCK(slsi_mutex_to_unlock) \
+ { \
+ (slsi_mutex_to_unlock).owner = NULL; \
+ mutex_unlock(&(slsi_mutex_to_unlock).mutex); \
+ }
+#define SLSI_MUTEX_IS_LOCKED(slsi_mutex__) mutex_is_locked(&(slsi_mutex__).mutex)
+
+struct slsi_mutex {
+ bool valid;
+ u32 line_no_before;
+ const u8 *file_name_before;
+ /* a std mutex */
+ struct mutex mutex;
+ u32 line_no_after;
+ const u8 *file_name_after;
+ const u8 *function;
+ struct task_struct *owner;
+};
+
+#else
+#define SLSI_MUTEX_INIT(mutex__) mutex_init(&(mutex__))
+#define SLSI_MUTEX_LOCK(mutex_to_lock) mutex_lock(&(mutex_to_lock))
+#define SLSI_MUTEX_UNLOCK(mutex_to_unlock) mutex_unlock(&(mutex_to_unlock))
+#define SLSI_MUTEX_IS_LOCKED(mutex__) mutex_is_locked(&(mutex__))
+#endif
+
+#define OS_UNUSED_PARAMETER(x) ((void)(x))
+
+#define SLSI_HOST_TAG_TRAFFIC_QUEUE(htag) (htag & 0x00000003)
+
+/* For each mlme-req a mlme-cfm is expected to be received from the
+ * firmware. The host is not allowed to send another mlme-req until
+ * the mlme-cfm is received.
+ *
+ * However there are also instances where we need to wait for an mlme-ind
+ * following a mlme-req/cfm exchange. One example of this is the disconnect
+ * sequence:
+ * mlme-disconnect-req - host requests disconnection
+ * mlme-disconnect-cfm - firmware accepts disconnection request but hasn't
+ * disconnected yet.
+ * mlme-disconnect-ind - firmware reports final result of disconnection
+ *
+ * Assuming that waiting for the mlme-ind following on from the mlme-req/cfm
+ * is ok.
+ */
+struct slsi_sig_send {
+ /* a std spinlock */
+ spinlock_t send_signal_lock;
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex mutex;
+#else
+ /* a std mutex */
+ struct mutex mutex;
+#endif
+ struct completion completion;
+
+ u16 process_id;
+ u16 req_id;
+ u16 cfm_id;
+ u16 ind_id;
+ struct sk_buff *cfm;
+ struct sk_buff *ind;
+ struct sk_buff *mib_error;
+};
+
+static inline void slsi_sig_send_init(struct slsi_sig_send *sig_send)
+{
+ spin_lock_init(&sig_send->send_signal_lock);
+ sig_send->req_id = 0;
+ sig_send->cfm_id = 0;
+ sig_send->process_id = SLSI_TX_PROCESS_ID_MIN;
+ SLSI_MUTEX_INIT(sig_send->mutex);
+ init_completion(&sig_send->completion);
+}
+
+struct slsi_ba_frame_desc {
+ bool active;
+ struct sk_buff *signal;
+ u16 sn;
+};
+
+struct slsi_ba_session_rx {
+ bool active;
+ bool used;
+ void *vif;
+ struct slsi_ba_frame_desc buffer[MAX_BA_BUFFER_SIZE];
+ u16 buffer_size;
+ u16 occupied_slots;
+ u16 expected_sn;
+ u16 start_sn;
+ u16 highest_received_sn;
+ bool trigger_ba_after_ssn;
+ u8 tid;
+
+ /* Aging timer parameters */
+ bool timer_on;
+ struct timer_list ba_age_timer;
+ struct slsi_spinlock ba_lock;
+ struct net_device *dev;
+};
+
+#define SLSI_TID_MAX (16)
+#define SLSI_AMPDU_F_INITIATED (0x0001)
+#define SLSI_AMPDU_F_CREATED (0x0002)
+#define SLSI_AMPDU_F_OPERATIONAL (0x0004)
+
+#define SLSI_SCAN_HW_ID 0
+#define SLSI_SCAN_SCHED_ID 1
+#define SLSI_SCAN_MAX 3
+
+#define SLSI_SCAN_SSID_MAP_MAX 10 /* Arbitrary value */
+#define SLSI_SCAN_SSID_MAP_EXPIRY_AGE 2 /* If hidden bss not found these many scan cycles, remove map. Arbitrary value*/
+#define SLSI_FW_SCAN_DONE_TIMEOUT_MSEC (15 * 1000)
+
+struct slsi_scan_result {
+ u8 bssid[ETH_ALEN];
+ u8 hidden;
+ int rssi;
+ struct sk_buff *probe_resp;
+ struct sk_buff *beacon;
+ struct slsi_scan_result *next;
+ int band;
+};
+
+/* Per Interface Scan Data
+ * Access protected by: cfg80211_lock
+ */
+struct slsi_scan {
+ /* When a Scan is running this not NULL. */
+ struct cfg80211_scan_request *scan_req;
+ struct slsi_acs_request *acs_request;
+ struct cfg80211_sched_scan_request *sched_req;
+ bool requeue_timeout_work;
+
+ /* Indicates if the scan req is blocking. i.e, waiting until scan_done_ind received */
+ bool is_blocking_scan;
+
+ struct slsi_scan_result *scan_results; /* head for scan_results list*/
+};
+
+struct slsi_ssid_map {
+ u8 bssid[ETH_ALEN];
+ u8 ssid[32];
+ u8 ssid_len;
+ u8 age;
+ int band;
+};
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+struct slsi_enhanced_arp_counters {
+ u16 arp_req_count_from_netdev;
+ u16 arp_req_count_to_lower_mac;
+ u16 arp_req_rx_count_by_lower_mac;
+ u16 arp_req_count_tx_success;
+ u16 arp_rsp_rx_count_by_lower_mac;
+ u16 arp_rsp_rx_count_by_upper_mac;
+ u16 arp_rsp_count_to_netdev;
+ u16 arp_rsp_count_out_of_order_drop;
+ u16 ap_link_active;
+ bool is_duplicate_addr_detected;
+};
+#endif
+
+struct slsi_peer {
+ /* Flag MUST be set last when creating a record and immediately when removing.
+ * Otherwise another process could test the flag and start using the data.
+ */
+ bool valid;
+ u8 address[ETH_ALEN];
+
+ /* Presently connected_state is used only for AP/GO mode*/
+ u8 connected_state;
+ u16 aid;
+ /* Presently is_wps is used only in P2P GO mode */
+ bool is_wps;
+ u16 capabilities;
+ bool qos_enabled;
+ u8 queueset;
+ struct scsc_wifi_fcq_data_qset data_qs;
+ struct scsc_wifi_fcq_ctrl_q ctrl_q;
+
+ bool authorized;
+ bool pairwise_key_set;
+
+ /* Needed for STA/AP VIF */
+ struct sk_buff *assoc_ie;
+ struct sk_buff_head buffered_frames;
+ /* Needed for STA VIF */
+ struct sk_buff *assoc_resp_ie;
+
+ /* bitmask that keeps the status of acm bit for each AC
+ * bit 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * vo vo vi vi be bk bk be
+ */
+ u8 wmm_acm;
+ /* bitmask that keeps the status of tspec establishment for each priority
+ * bit 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * p7 p6 p5 p4 p3 p2 p1 p0
+ */
+ u8 tspec_established;
+ u8 uapsd;
+
+ /* TODO_HARDMAC:
+ * Q: Can we obtain stats from the firmware?
+ * Yes - then this is NOT needed and we can just get from the firmware when requested.
+ * No - How much can we get from the PSCHED?
+ */
+ struct station_info sinfo;
+ /* rate limit for peer sinfo mib reads */
+ struct ratelimit_state sinfo_mib_get_rs;
+ struct slsi_ba_session_rx *ba_session_rx[NUM_BA_SESSIONS_PER_PEER];
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ /* qos map configured at peer end*/
+ bool qos_map_set;
+ struct cfg80211_qos_map qos_map;
+#endif
+};
+
+/* Used to update vif type on vif deactivation indicating vif is no longer available */
+#define SLSI_VIFTYPE_UNSPECIFIED 0xFFFF
+
+struct slsi_vif_mgmt_tx {
+ u64 cookie; /* Cookie assigned by Host for the tx mgmt frame */
+ u16 host_tag; /* Host tag for the tx mgmt frame */
+ const u8 *buf; /* Buffer - Mgmt frame requested for tx */
+ size_t buf_len; /* Buffer length */
+ u8 exp_frame; /* Next expected Public action frame subtype from peer */
+};
+
+struct slsi_wmm_ac {
+ u8 aci_aifsn;
+ u8 ecw;
+ u16 txop_limit;
+} __packed;
+
+/* struct slsi_wmm_parameter_element
+ *
+ * eid - Vendor Specific
+ * len - Remaining Length of IE
+ * oui - Microsoft OUI
+ * oui_type - WMM
+ * oui_subtype - Param IE
+ * version - 1
+ * qos_info - Qos
+ * reserved -
+ * ac - BE,BK,VI,VO
+ */
+struct slsi_wmm_parameter_element {
+ u8 eid;
+ u8 len;
+ u8 oui[3];
+ u8 oui_type;
+ u8 oui_subtype;
+ u8 version;
+ u8 qos_info;
+ u8 reserved;
+ struct slsi_wmm_ac ac[4];
+} __packed;
+
+#define SLSI_MIN_FILTER_ID 0x80 /* Start of filter range reserved for host */
+
+/* for AP */
+#define SLSI_AP_ALL_IPV6_PKTS_FILTER_ID 0x80
+
+/* filter IDs for filters installed by driver */
+#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
+
+enum slsi_filter_id {
+ SLSI_LOCAL_ARP_FILTER_ID = SLSI_MIN_FILTER_ID, /* 0x80 */
+ SLSI_ALL_BC_MC_FILTER_ID, /* 0x81 */
+ SLSI_PROXY_ARP_FILTER_ID, /* 0x82 */
+ SLSI_ALL_IPV6_PKTS_FILTER_ID, /* 0x83 */
+#ifndef CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+ SLSI_NAT_IPSEC_FILTER_ID, /* 0x84 */
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ SLSI_OPT_OUT_ALL_FILTER_ID, /* 0x85 */
+ SLSI_OPT_IN_TCP4_FILTER_ID, /* 0x86 */
+ SLSI_OPT_IN_TCP6_FILTER_ID, /* 0x87 */
+#endif
+ SLSI_REGD_MC_FILTER_ID, /* 0x88 */
+};
+#else
+
+/* for STA */
+enum slsi_filter_id {
+ SLSI_LOCAL_ARP_FILTER_ID = SLSI_MIN_FILTER_ID, /* 0x80 */
+ SLSI_ALL_BC_MC_FILTER_ID, /* 0x81 */
+ SLSI_PROXY_ARP_FILTER_ID, /* 0x82 */
+ SLSI_LOCAL_NS_FILTER_ID, /* 0x83 */
+ SLSI_PROXY_ARP_NA_FILTER_ID, /* 0x84 */
+#ifndef CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+ SLSI_NAT_IPSEC_FILTER_ID, /* 0x85 */
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ SLSI_OPT_OUT_ALL_FILTER_ID, /* 0x86 */
+ SLSI_OPT_IN_TCP4_FILTER_ID, /* 0x87 */
+ SLSI_OPT_IN_TCP6_FILTER_ID, /* 0x88 */
+#endif
+ SLSI_REGD_MC_FILTER_ID, /* 0x89 */
+};
+
+#endif
+
+#define SLSI_MAX_PKT_FILTERS 16
+
+#ifndef CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+/* default config */
+#define SLSI_MC_ADDR_ENTRY_MAX (SLSI_MIN_FILTER_ID + SLSI_MAX_PKT_FILTERS - SLSI_REGD_MC_FILTER_ID)
+#else
+#define SLSI_MC_ADDR_ENTRY_MAX (SLSI_MIN_FILTER_ID + SLSI_MAX_PKT_FILTERS - SLSI_REGD_MC_FILTER_ID + 1)
+#endif
+
+/* Values for vif_status field
+ *
+ * Used to indicate the status of an activated VIF, to help resolve
+ * conflicting activities with indications from the firmware eg.
+ * cfg80211 triggers a disconnection before a STA completes its
+ * connection to an AP.
+ */
+#define SLSI_VIF_STATUS_UNSPECIFIED 0
+#define SLSI_VIF_STATUS_CONNECTING 1
+#define SLSI_VIF_STATUS_CONNECTED 2
+#define SLSI_VIF_STATUS_DISCONNECTING 3
+
+/*From wifi_offload.h (N_AVAIL_ID=3)*/
+#define SLSI_MAX_KEEPALIVE_ID 3
+
+struct slsi_last_connected_bss {
+ u8 address[ETH_ALEN];
+ int antenna_mode;
+ int rssi;
+ int mode;
+ int passpoint_version;
+ int snr;
+ int noise_level;
+ u16 bandwidth;
+ u16 roaming_count;
+ u16 channel_freq;
+ u16 tx_data_rate;
+ u8 roaming_akm;
+ u8 kv;
+ u32 kvie;
+ bool mimo_used;
+};
+
+struct slsi_vif_sta {
+ /* Only valid when the VIF is activated */
+ u8 vif_status;
+ bool is_wps;
+ u16 eap_hosttag;
+ u16 m4_host_tag;
+ u16 keepalive_host_tag[SLSI_MAX_KEEPALIVE_ID];
+
+ struct sk_buff *roam_mlme_procedure_started_ind;
+
+ /* This id is used to find out which response (connect resp/roamed resp/reassoc resp)
+ * is to be sent once M4 is transmitted successfully
+ */
+ u16 resp_id;
+ bool gratuitous_arp_needed;
+
+ /* regd multicast address*/
+ u8 regd_mc_addr_count;
+ u8 regd_mc_addr[SLSI_MC_ADDR_ENTRY_MAX][ETH_ALEN];
+ bool group_key_set;
+ struct sk_buff *mlme_scan_ind_skb;
+ bool roam_in_progress;
+ int tdls_peer_sta_records;
+ bool tdls_enabled;
+ struct cfg80211_bss *sta_bss;
+ u8 *assoc_req_add_info_elem;
+ u8 assoc_req_add_info_elem_len;
+
+ /* List of seen ESS and Freq associated with them */
+ struct list_head network_map;
+
+ struct slsi_wmm_ac wmm_ac[4];
+ bool nd_offload_enabled;
+
+ /*This structure is used to store last disconnected bss info and valid even when vif is deactivated. */
+ struct slsi_last_connected_bss last_connected_bss;
+ struct cfg80211_crypto_settings crypto;
+
+ /* Variable to indicate if roamed_ind needs to be dropped in driver, to maintain roam synchronization. */
+ atomic_t drop_roamed_ind;
+};
+
+struct slsi_vif_unsync {
+ struct delayed_work roc_expiry_work; /* Work on ROC duration expiry */
+ struct delayed_work del_vif_work; /* Work on unsync vif retention timeout */
+ struct delayed_work hs2_del_vif_work; /* Work on HS2 unsync vif retention timeout */
+ struct delayed_work unset_channel_expiry_work; /*unset channel after a timer */
+ u64 roc_cookie; /* Cookie id for ROC */
+ u8 *probe_rsp_ies; /* Probe response IEs to be configured in firmware */
+ size_t probe_rsp_ies_len; /* Probe response IE length */
+ bool ies_changed; /* To indicate if Probe Response IEs have changed from that previously stored */
+ bool listen_offload; /* To indicate if Listen Offload is started */
+ bool slsi_p2p_continuous_fullscan;
+};
+
+struct slsi_last_disconnected_sta {
+ u8 address[ETH_ALEN];
+ u32 rx_retry_packets;
+ u32 rx_bc_mc_packets;
+ u16 capabilities;
+ int bandwidth;
+ int antenna_mode;
+ int rssi;
+ int mode;
+ u16 tx_data_rate;
+ bool mimo_used;
+ u16 reason;
+ int support_mode;
+};
+
+struct slsi_vif_ap {
+ struct slsi_wmm_parameter_element wmm_ie;
+ struct slsi_last_disconnected_sta last_disconnected_sta;
+ u8 *cache_wmm_ie;
+ u8 *cache_wpa_ie;
+ u8 *add_info_ies;
+ size_t wmm_ie_len;
+ size_t wpa_ie_len;
+ size_t add_info_ies_len;
+ bool p2p_gc_keys_set; /* Used in GO mode to identify that a CLI has connected after WPA2 handshake */
+ bool privacy; /* Used for port enabling based on the open/secured AP configuration */
+ bool qos_enabled;
+ int beacon_interval; /* Beacon interval in AP/GO mode */
+ int mode;
+ bool non_ht_bss_present; /* Non HT BSS observed in HT20 OBSS scan */
+ struct scsc_wifi_fcq_data_qset group_data_qs;
+ u32 cipher;
+ u16 channel_freq;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+};
+
+#define SLSI_NAN_MAX_PUBLISH_ID 16
+#define SLSI_NAN_MAX_SUBSCRIBE_ID 16
+
+struct slsi_vif_nan {
+ u32 publish_id_map;
+ u32 subscribe_id_map;
+};
+
+#define TCP_ACK_SUPPRESSION_RECORDS_MAX 16
+#define TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT 10 /* in seconds */
+
+#define TCP_ACK_SUPPRESSION_OPTIONS_OFFSET 20
+#define TCP_ACK_SUPPRESSION_OPTION_EOL 0
+#define TCP_ACK_SUPPRESSION_OPTION_NOP 1
+#define TCP_ACK_SUPPRESSION_OPTION_MSS 2
+#define TCP_ACK_SUPPRESSION_OPTION_WINDOW 3
+#define TCP_ACK_SUPPRESSION_OPTION_SACK 5
+
+#define SLSI_IS_VIF_CHANNEL_5G(ndev_vif) (((ndev_vif)->chan) ? ((ndev_vif)->chan->hw_value > 14) : 0)
+
+struct slsi_tcp_ack_s {
+ u32 daddr;
+ u32 dport;
+ u32 saddr;
+ u32 sport;
+ struct sk_buff_head list;
+ u8 window_multiplier;
+ u16 mss;
+ u32 ack_seq;
+ u16 slow_start_count;
+ u8 count;
+ u8 max;
+ u8 age;
+ struct timer_list timer;
+ struct slsi_spinlock lock;
+ bool state;
+ ktime_t last_sent;
+ bool tcp_slow_start;
+
+ /* TCP session throughput monitor */
+ u16 hysteresis;
+ u32 last_tcp_rate;
+ ktime_t last_sample_time;
+ u32 last_ack_seq;
+ u64 num_bytes;
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ u8 stream_id;
+ u8 rx_window_scale;
+#endif
+};
+
+struct slsi_tcp_ack_stats {
+ u32 tack_acks;
+ u32 tack_suppressed;
+ u32 tack_sent;
+ u32 tack_max;
+ u32 tack_timeout;
+ u32 tack_dacks;
+ u32 tack_sacks;
+ u32 tack_delay_acks;
+ u32 tack_low_window;
+ u32 tack_nocache;
+ u32 tack_norecord;
+ u32 tack_hasdata;
+ u32 tack_psh;
+ u32 tack_dropped;
+ u32 tack_ktime;
+ u32 tack_lastrecord;
+ u32 tack_searchrecord;
+ u32 tack_ece;
+};
+
+struct netdev_vif {
+ struct slsi_dev *sdev;
+ struct wireless_dev wdev;
+ atomic_t is_registered; /* Has the net dev been registered */
+ bool is_available; /* Has the net dev been opened AND is usable */
+ bool is_fw_test; /* Is the device in use as a test device via UDI */
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ bool is_wips_running;
+#endif
+ /* Structure can be accessed by cfg80211 ops, procfs/ioctls and as a result
+ * of receiving MLME indications e.g. MLME-CONNECT-IND that can affect the
+ * status of the interface eg. STA connect failure will delete the VIF.
+ */
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex vif_mutex;
+#else
+ /* a std mutex */
+ struct mutex vif_mutex;
+#endif
+ struct slsi_sig_send sig_wait;
+
+ struct slsi_skb_work rx_data;
+ struct slsi_skb_work rx_mlme;
+ u16 ifnum;
+ enum nl80211_iftype iftype;
+ enum nl80211_channel_type channel_type;
+ struct ieee80211_channel *chan;
+ u16 driver_channel;
+ bool drv_in_p2p_procedure;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ struct cfg80211_chan_def *chandef;
+#endif
+
+ /* NOTE: The Address is a __be32
+ * It needs converting to pass to the FW
+ * But not for the Arp or trace %pI4
+ */
+ __be32 ipaddress;
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ struct in6_addr ipv6address;
+ struct slsi_spinlock ipv6addr_lock;
+#endif
+ struct net_device_stats stats;
+ u32 rx_packets[SLSI_LLS_AC_MAX];
+ u32 tx_packets[SLSI_LLS_AC_MAX];
+ u32 tx_no_ack[SLSI_LLS_AC_MAX];
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex scan_mutex;
+ struct slsi_mutex scan_result_mutex;
+#else
+ /* a std mutex */
+ struct mutex scan_mutex;
+ struct mutex scan_result_mutex;
+
+#endif
+ struct slsi_scan scan[SLSI_SCAN_MAX];
+
+ struct slsi_src_sink_params src_sink_params;
+ u16 power_mode;
+ u16 set_power_mode;
+
+ bool activated; /* VIF is created in firmware and ready to use */
+ u16 vif_type;
+ struct slsi_spinlock peer_lock;
+ int peer_sta_records;
+ struct slsi_peer *peer_sta_record[SLSI_ADHOC_PEER_CONNECTIONS_MAX];
+
+ /* Used to populate the cfg80211 station_info structure generation variable.
+ * This number should increase every time the list of stations changes
+ * i.e. when a station is added or removed, so that userspace can tell
+ * whether it got a consistent snapshot.
+ */
+ int cfg80211_sinfo_generation;
+
+ /* Block Ack MPDU Re-order */
+ struct sk_buff_head ba_complete;
+ atomic_t ba_flush;
+
+ u64 mgmt_tx_cookie; /* Cookie id for mgmt tx */
+ struct slsi_vif_mgmt_tx mgmt_tx_data;
+ struct delayed_work scan_timeout_work; /* Work on scan timeout */
+ bool delete_probe_req_ies; /* Delete probe request stored at probe_req_ies, if
+ * connected for WAP2 at mlme_del_vif or in all cases
+ * if STA
+ */
+ u8 *probe_req_ies;
+ size_t probe_req_ie_len;
+
+ struct slsi_vif_unsync unsync;
+ struct slsi_vif_sta sta;
+ struct slsi_vif_ap ap;
+ struct slsi_vif_nan nan;
+
+ /* TCP ack suppression. */
+ struct slsi_tcp_ack_s *last_tcp_ack;
+ struct slsi_tcp_ack_s ack_suppression[TCP_ACK_SUPPRESSION_RECORDS_MAX];
+ struct slsi_tcp_ack_stats tcp_ack_stats;
+ /* traffic monitor */
+ ktime_t last_timer_time;
+ u32 report_time;
+ u32 num_bytes_tx_per_timer;
+ u32 num_bytes_rx_per_timer;
+ u32 num_bytes_tx_per_sec;
+ u32 num_bytes_rx_per_sec;
+ u32 throughput_tx;
+ u32 throughput_rx;
+ u32 throughput_tx_bps;
+ u32 throughput_rx_bps;
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ bool enhanced_arp_detect_enabled;
+ struct slsi_enhanced_arp_counters enhanced_arp_stats;
+ u8 target_ip_addr[4];
+ int enhanced_arp_host_tag[5];
+#endif
+};
+
+struct slsi_802_11d_reg_domain {
+ u8 *countrylist;
+ struct ieee80211_regdomain *regdomain;
+ int country_len;
+};
+
+struct slsi_apf_capabilities {
+ u16 version;
+ u16 max_length;
+};
+
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+struct slsi_wes_mode_roam_scan_channels {
+ int n;
+ u8 channels[MAX_CHANNEL_LIST];
+};
+#endif
+
+struct slsi_dev_config {
+ /* Supported Freq Band (Dynamic)
+ * Set via the freq_band procfs
+ */
+#define SLSI_FREQ_BAND_AUTO 0
+#define SLSI_FREQ_BAND_5GHZ 1
+#define SLSI_FREQ_BAND_2GHZ 2
+ int supported_band;
+
+ struct ieee80211_supported_band *band_5G;
+ struct ieee80211_supported_band *band_2G;
+
+ /* current user suspend mode
+ * Set via the suspend_mode procfs
+ * 0 : not suspended
+ * 1 : suspended
+ */
+ int user_suspend_mode;
+
+ /* Rx filtering rule
+ * Set via the rx_filter_num procfs
+ * 0: Unicast, 1: Broadcast, 2:Multicast IPv4, 3: Multicast IPv6
+ */
+ int rx_filter_num;
+
+ /* Rx filter rule enabled
+ * Set via the rx_filter_start & rx_filter_stop procfs
+ */
+ bool rx_filter_rule_started;
+
+ /* AP Auto channel Selection */
+#define SLSI_NO_OF_SCAN_CHANLS_FOR_AUTO_CHAN_MAX 14
+ int ap_auto_chan;
+
+ /*QoS capability for a non-AP Station*/
+ int qos_info;
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ /* NCHO OKC mode */
+ int okc_mode;
+
+ /*NCHO WES mode */
+ int wes_mode;
+
+ int roam_scan_mode;
+
+ /*WES mode roam scan channels*/
+ struct slsi_wes_mode_roam_scan_channels wes_roam_scan_list;
+#endif
+ struct slsi_802_11d_reg_domain domain_info;
+
+ int ap_disconnect_ind_timeout;
+
+ u8 host_state;
+
+ int rssi_boost_5g;
+ int rssi_boost_2g;
+ bool disable_ch12_ch13;
+ bool fw_enhanced_arp_detect_supported;
+ bool fw_apf_supported;
+ struct slsi_apf_capabilities apf_cap;
+};
+
+#define SLSI_DEVICE_STATE_ATTACHING 0
+#define SLSI_DEVICE_STATE_STOPPED 1
+#define SLSI_DEVICE_STATE_STARTING 2
+#define SLSI_DEVICE_STATE_STARTED 3
+#define SLSI_DEVICE_STATE_STOPPING 4
+
+#define SLSI_NET_INDEX_WLAN 1
+#define SLSI_NET_INDEX_P2P 2
+#define SLSI_NET_INDEX_P2PX_SWLAN 3
+#define SLSI_NET_INDEX_NAN 4
+
+/* States used during P2P operations */
+enum slsi_p2p_states {
+ P2P_IDLE_NO_VIF, /* Initial state - Unsync vif is not present */
+ P2P_IDLE_VIF_ACTIVE, /* Unsync vif is present but no P2P procedure in progress */
+ P2P_SCANNING, /* P2P SOCIAL channel (1,6,11) scan in progress. Not used for P2P full scan */
+ P2P_LISTENING, /* P2P Listen (ROC) in progress */
+ P2P_ACTION_FRAME_TX_RX, /* P2P Action frame Tx in progress or waiting for a peer action frame Rx (i.e. in response to the Tx frame) */
+ P2P_GROUP_FORMED_CLI, /* P2P Group Formed - CLI role */
+ P2P_GROUP_FORMED_GO, /* P2P Group Formed - GO role */
+ /* NOTE: In P2P_LISTENING state if frame transmission is requested to driver then a peer response is ideally NOT expected.
+ * This is an assumption based on the fact that FIND would be stopped prior to group formation/connection.
+ * If driver were to receive a peer frame in P2P_LISTENING state then it would most probably be a REQUEST frame and the supplicant would respond to it.
+ * Hence the driver should get only RESPONSE frames for transmission in P2P_LISTENING state.
+ */
+};
+
+enum slsi_wlan_state {
+ WLAN_UNSYNC_NO_VIF = 0, /* Initial state - Unsync vif is not present */
+ WLAN_UNSYNC_VIF_ACTIVE, /* Unsync vif is activated but no wlan procedure in progress */
+ WLAN_UNSYNC_VIF_TX /* Unsync vif is activated and wlan procedure in progress */
+};
+
+/* Wakelock timeouts */
+#define SLSI_WAKELOCK_TIME_MSEC_EAPOL (1000)
+
+struct slsi_chip_info_mib {
+ u16 chip_version;
+};
+
+struct slsi_plat_info_mib {
+ u16 plat_build;
+};
+
+/* P2P States in text format for debug purposes */
+static inline char *slsi_p2p_state_text(u8 state)
+{
+ switch (state) {
+ case P2P_IDLE_NO_VIF:
+ return "P2P_IDLE_NO_VIF";
+ case P2P_IDLE_VIF_ACTIVE:
+ return "P2P_IDLE_VIF_ACTIVE";
+ case P2P_SCANNING:
+ return "P2P_SCANNING";
+ case P2P_LISTENING:
+ return "P2P_LISTENING";
+ case P2P_ACTION_FRAME_TX_RX:
+ return "P2P_ACTION_FRAME_TX_RX";
+ case P2P_GROUP_FORMED_CLI:
+ return "P2P_GROUP_FORMED_CLI";
+ case P2P_GROUP_FORMED_GO:
+ return "P2P_GROUP_FORMED_GO";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#define SLSI_WLAN_MAX_HCF_PLATFORM_LEN (128)
+
+struct slsi_dev_mib_info {
+ char *mib_file_name;
+ unsigned int mib_hash;
+
+ /* Cached File MIB Configuration values from User Space */
+ u8 *mib_data;
+ u32 mib_len;
+ char platform[SLSI_WLAN_MAX_HCF_PLATFORM_LEN];
+};
+
+#define SLSI_WLAN_MAX_MIB_FILE 2 /* Number of WLAN HCFs to load */
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+struct slsi_dev_mib_collect_file {
+ char file_name[32];
+ u16 len;
+ u8 *data;
+} __packed;
+
+struct slsi_dev_mib_collect {
+ bool enabled;
+ /* Serialize writers/readers */
+ spinlock_t in_collection;
+ char num_files;
+ /* +1 represents local_mib */
+ struct slsi_dev_mib_collect_file file[SLSI_WLAN_MAX_MIB_FILE + 1];
+};
+
+#endif
+
+struct slsi_dev {
+ /* Devices */
+ struct device *dev;
+ struct wiphy *wiphy;
+
+ struct slsi_hip hip; /* HIP bookkeeping block */
+ struct slsi_hip4 hip4_inst; /* The handler to parse to HIP */
+
+ struct scsc_wifi_cm_if cm_if; /* cm_if bookkeeping block */
+ struct scsc_mx *maxwell_core;
+ struct scsc_service_client mx_wlan_client;
+ struct scsc_service *service;
+ struct slsi_chip_info_mib chip_info_mib;
+ struct slsi_plat_info_mib plat_info_mib;
+ u16 reg_dom_version;
+
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex netdev_add_remove_mutex;
+#else
+ /* a std mutex */
+ struct mutex netdev_add_remove_mutex;
+#endif
+ /* mutex to protect dynamic netdev removal */
+ struct mutex netdev_remove_mutex;
+ int netdev_up_count;
+ struct net_device __rcu *netdev[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1]; /* 0 is reserved */
+ struct net_device __rcu *netdev_ap;
+ u8 netdev_addresses[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1][ETH_ALEN]; /* 0 is reserved */
+
+ int device_state;
+
+ /* BoT */
+ atomic_t in_pause_state;
+
+ /* Locking used to control Starting and stopping the chip */
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex start_stop_mutex;
+#else
+ /* a std mutex */
+ struct mutex start_stop_mutex;
+#endif
+ /* UDI Logging */
+ struct slsi_log_clients log_clients;
+ void *uf_cdev;
+
+ /* ProcFS */
+ int procfs_instance;
+ struct proc_dir_entry *procfs_dir;
+
+ /* Configuration */
+ u8 hw_addr[ETH_ALEN];
+ struct slsi_dev_mib_info mib[SLSI_WLAN_MAX_MIB_FILE];
+ struct slsi_dev_mib_info local_mib;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ struct slsi_dev_mib_collect collect_mib;
+#endif
+ char *maddr_file_name;
+ bool *term_udi_users; /* Try to terminate UDI users during unload */
+ int *sig_wait_cfm_timeout;
+
+ struct slsi_wake_lock wlan_wl;
+ struct slsi_wake_lock wlan_wl_mlme;
+ struct slsi_wake_lock wlan_wl_ma;
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ struct wake_lock wlan_wl_roam;
+#endif
+ struct slsi_sig_send sig_wait;
+ struct slsi_skb_work rx_dbg_sap;
+ atomic_t tx_host_tag[SLSI_LLS_AC_MAX];
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex device_config_mutex;
+#else
+ /* a std mutex */
+ struct mutex device_config_mutex;
+#endif
+ struct slsi_dev_config device_config;
+
+ struct notifier_block inetaddr_notifier;
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ struct notifier_block inet6addr_notifier;
+#endif
+
+ struct workqueue_struct *device_wq; /* Driver Workqueue */
+ enum slsi_p2p_states p2p_state; /* Store current P2P operation */
+
+ enum slsi_wlan_state wlan_unsync_vif_state; /* Store current sate of unsync wlan vif */
+
+ int current_tspec_id;
+ int tspec_error_code;
+ u8 p2p_group_exp_frame; /* Next expected Public action frame subtype from peer */
+ bool initial_scan;
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ struct slsi_gscan *gscan;
+ struct slsi_gscan_result *gscan_hash_table[SLSI_GSCAN_HASH_TABLE_SIZE];
+ int num_gscan_results;
+ int buffer_threshold;
+ int buffer_consumed;
+ struct slsi_bucket bucket[SLSI_GSCAN_MAX_BUCKETS];
+ struct list_head hotlist_results;
+ bool epno_active;
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ u8 scan_mac_addr[6];
+ bool scan_addr_set;
+#endif
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ int minor_prof;
+#endif
+ struct slsi_ba_session_rx rx_ba_buffer_pool[SLSI_MAX_RX_BA_SESSIONS];
+ struct slsi_spinlock rx_ba_buffer_pool_lock;
+ bool fail_reported;
+ bool p2p_certif; /* Set to true to idenitfy p2p_certification testing is going on*/
+ bool mlme_blocked; /* When true do not send mlme signals to FW */
+ atomic_t debug_inds;
+ int recovery_next_state;
+ struct completion recovery_remove_completion;
+ struct completion recovery_stop_completion;
+ struct completion recovery_completed;
+ int recovery_status;
+ struct slsi_ssid_map ssid_map[SLSI_SCAN_SSID_MAP_MAX];
+ bool band_5g_supported;
+ int supported_2g_channels[14];
+ int supported_5g_channels[25];
+ int enabled_channel_count;
+ bool fw_ht_enabled;
+ u8 fw_ht_cap[4]; /* HT capabilities is 21 bytes but host is never intersted in last 17 bytes*/
+ bool fw_vht_enabled;
+ u8 fw_vht_cap[4];
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ u8 wifi_sharing_5ghz_channel[8];
+ int valid_5g_freq[25];
+ int wifi_sharing_5g_restricted_channels[25];
+ int num_5g_restricted_channels;
+#endif
+ bool fw_2g_40mhz_enabled;
+ bool nan_enabled;
+ u16 assoc_result_code; /* Status of latest association in STA mode */
+ bool allow_switch_40_mhz; /* Used in AP cert to disable HT40 when not configured */
+ bool allow_switch_80_mhz; /* Used in AP cert to disable VHT when not configured */
+#ifdef CONFIG_SCSC_WLAN_AP_INFO_FILE
+ /* Parameters in '/data/vendor/conn/.softap.info' */
+ bool dualband_concurrency;
+ u32 softap_max_client;
+#endif
+ u32 fw_dwell_time;
+ int lls_num_radio;
+
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ struct slsi_mutex logger_mutex;
+#else
+ /* a std mutex */
+ struct mutex logger_mutex;
+#endif
+ struct slsi_traffic_mon_clients traffic_mon_clients;
+ /*Store vif index corresponding to rtt id for FTM*/
+ u16 rtt_vif[8];
+ bool acs_channel_switched;
+ int recovery_timeout; /* ms autorecovery completion timeout */
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ bool fw_mac_randomization_enabled;
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ bool enhanced_pkt_filter_enabled;
+#endif
+};
+
+/* Compact representation of channels a ESS has been seen on
+ * This is sized correctly for the Channels we currently support,
+ * 2.4Ghz Channels 1 - 14
+ * 5 Ghz Channels Uni1, Uni2 and Uni3
+ */
+struct slsi_roaming_network_map_entry {
+ struct list_head list;
+ unsigned long last_seen_jiffies; /* Timestamp of the last time we saw this ESS */
+ struct cfg80211_ssid ssid; /* SSID of the ESS */
+ u8 initial_bssid[ETH_ALEN]; /* Bssid of the first ap seen in this ESS */
+ bool only_one_ap_seen; /* Has more than one AP for this ESS been seen */
+ u16 channels_24_ghz; /* 2.4 Ghz Channels Bit Map */
+ /* 5 Ghz Channels Bit Map
+ * channels_5_ghz & 0x000000FF = 4 Uni1 Channels
+ * channels_5_ghz & 0x00FFFF00 = 15 Uni2 Channels
+ * channels_5_ghz & 0xFF000000 = 5 Uni3 Channels
+ */
+ u32 channels_5_ghz;
+};
+
+#define LLC_SNAP_HDR_LEN 8
+struct llc_snap_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ u16 snap_type;
+} __packed;
+
+void slsi_rx_data_deliver_skb(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, bool from_ba_timer);
+void slsi_rx_dbg_sap_work(struct work_struct *work);
+void slsi_rx_netdev_data_work(struct work_struct *work);
+void slsi_rx_netdev_mlme_work(struct work_struct *work);
+int slsi_rx_enqueue_netdev_mlme(struct slsi_dev *sdev, struct sk_buff *skb, u16 vif);
+void slsi_rx_scan_pass_to_cfg80211(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_buffered_frames(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer);
+int slsi_rx_blocking_signals(struct slsi_dev *sdev, struct sk_buff *skb);
+void slsi_scan_complete(struct slsi_dev *sdev, struct net_device *dev, u16 scan_id, bool aborted);
+
+void slsi_tx_pause_queues(struct slsi_dev *sdev);
+void slsi_tx_unpause_queues(struct slsi_dev *sdev);
+int slsi_tx_control(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+int slsi_tx_data(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+int slsi_tx_data_lower(struct slsi_dev *sdev, struct sk_buff *skb);
+bool slsi_is_test_mode_enabled(void);
+bool slsi_is_rf_test_mode_enabled(void);
+int slsi_check_rf_test_mode(void);
+void slsi_regd_deinit(struct slsi_dev *sdev);
+void slsi_init_netdev_mac_addr(struct slsi_dev *sdev);
+bool slsi_dev_lls_supported(void);
+bool slsi_dev_gscan_supported(void);
+bool slsi_dev_epno_supported(void);
+bool slsi_dev_vo_vi_block_ack(void);
+int slsi_dev_get_scan_result_count(void);
+bool slsi_dev_llslogs_supported(void);
+int slsi_dev_nan_supported(struct slsi_dev *sdev);
+void slsi_regd_init(struct slsi_dev *sdev);
+bool slsi_dev_rtt_supported(void);
+
+static inline u16 slsi_tx_host_tag(struct slsi_dev *sdev, enum slsi_traffic_q tq)
+{
+ /* host_tag:
+ * bit 0,1 = trafficqueue identifier
+ * bit 2-15 = incremental number
+ * So increment by 4 to get bit 2-15 a incremental sequence
+ */
+ return (u16)atomic_add_return(4, &sdev->tx_host_tag[tq]);
+}
+
+static inline u16 slsi_tx_mgmt_host_tag(struct slsi_dev *sdev)
+{
+ /* Doesn't matter which traffic queue host tag is selected.*/
+ return slsi_tx_host_tag(sdev, 0);
+}
+
+static inline struct net_device *slsi_get_netdev_rcu(struct slsi_dev *sdev, u16 ifnum)
+{
+ WARN_ON(!rcu_read_lock_held());
+ if (ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES) {
+ /* WARN(1, "ifnum:%d", ifnum); WARN() is used like this to avoid Coverity Error */
+ return NULL;
+ }
+ return rcu_dereference(sdev->netdev[ifnum]);
+}
+
+static inline struct net_device *slsi_get_netdev_locked(struct slsi_dev *sdev, u16 ifnum)
+{
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
+ if (ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES) {
+ WARN(1, "ifnum:%d", ifnum); /* WARN() is used like this to avoid Coverity Error */
+ return NULL;
+ }
+ return sdev->netdev[ifnum];
+}
+
+static inline struct net_device *slsi_get_netdev(struct slsi_dev *sdev, u16 ifnum)
+{
+ struct net_device *dev;
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ dev = slsi_get_netdev_locked(sdev, ifnum);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ return dev;
+}
+
+static inline int slsi_get_supported_mode(const u8 *peer_ie)
+{
+ const u8 *peer_ie_data;
+ u8 peer_ie_len;
+ int i;
+ int supported_rate;
+
+ peer_ie_len = peer_ie[1];
+ peer_ie_data = &peer_ie[2];
+ for (i = 0; i < peer_ie_len; i++) {
+ supported_rate = ((peer_ie_data[i] & 0x7F) / 2);
+ if (supported_rate > 11)
+ return SLSI_80211_MODE_11G;
+ }
+ return SLSI_80211_MODE_11B;
+}
+
+/* Names of full mode HCF files */
+extern char *slsi_mib_file;
+extern char *slsi_mib_file2;
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Note: this is an auto-generated file. */
+
+#ifndef _FAPI_H__
+#define _FAPI_H__
+
+#include <linux/kernel.h>
+#include <linux/if_ether.h>
+#include "utils.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CONFIG_SCSC_SMAPPER
+struct slsi_skb_cb {
+ u32 sig_length;
+ u32 data_length;
+ u32 frame_format;
+ u32 colour;
+};
+
+static inline struct slsi_skb_cb *slsi_skb_cb_get(struct sk_buff *skb)
+{
+ return (struct slsi_skb_cb *)skb->cb;
+}
+
+static inline struct slsi_skb_cb *slsi_skb_cb_init(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct slsi_skb_cb) > sizeof(skb->cb));
+
+ memset(skb->cb, 0, sizeof(struct slsi_skb_cb));
+ return slsi_skb_cb_get(skb);
+}
+#endif
+
+#define FAPI_SIG_TYPE_MASK 0x0F00
+#define FAPI_SIG_TYPE_REQ 0x0000
+#define FAPI_SIG_TYPE_CFM 0x0100
+#define FAPI_SIG_TYPE_RES 0x0200
+#define FAPI_SIG_TYPE_IND 0x0300
+
+#define FAPI_SAP_TYPE_MASK 0xF000
+#define FAPI_SAP_TYPE_MA 0x1000
+#define FAPI_SAP_TYPE_MLME 0x2000
+#define FAPI_SAP_TYPE_DEBUG 0x8000
+#define FAPI_SAP_TYPE_TEST 0x9000
+
+#define FAPI_DEBUG_SAP_ENG_VERSION 0x0001
+#define FAPI_DATA_SAP_ENG_VERSION 0x0001
+#define FAPI_CONTROL_SAP_ENG_VERSION 0x0006
+#define FAPI_TEST_SAP_ENG_VERSION 0x000e
+#define FAPI_DEBUG_SAP_VERSION 0x0d03
+#define FAPI_CONTROL_SAP_VERSION 0x0e00
+#define FAPI_TEST_SAP_VERSION 0x0e00
+#define FAPI_DATA_SAP_VERSION 0x0e00
+
+#define FAPI_ACLPOLICY_BLACKLIST 0x0000
+#define FAPI_ACLPOLICY_WHITELIST 0x0001
+
+#define FAPI_ACTION_STOP 0x0000
+#define FAPI_ACTION_START 0x0001
+
+#define FAPI_APFFILTERMODE_SUSPEND 0x0001
+#define FAPI_APFFILTERMODE_ACTIVE 0x0002
+
+#define FAPI_AUTHENTICATIONTYPE_OPEN_SYSTEM 0x0000
+#define FAPI_AUTHENTICATIONTYPE_SHARED_KEY 0x0001
+#define FAPI_AUTHENTICATIONTYPE_SAE 0x0003
+#define FAPI_AUTHENTICATIONTYPE_LEAP 0x0080
+
+#define FAPI_BANDWIDTH_20_MHZ 0x0
+#define FAPI_BANDWIDTH_40_MHZ 0x1
+#define FAPI_BANDWIDTH_80_MHZ 0x2
+#define FAPI_BANDWIDTH_160_MHZ 0x3
+
+#define FAPI_BLOCKACKPOLICY_DELAYED_BLOCKACK 0x0
+#define FAPI_BLOCKACKPOLICY_IMMEDIATE_BLOCKACK 0x1
+
+#define FAPI_BULKDATADESCRIPTOR_INLINE 0x0000
+#define FAPI_BULKDATADESCRIPTOR_SMAPPER 0x0001
+
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ 0x00
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ 0x01
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ 0x02
+
+#define FAPI_CONNECTIONTYPE_WLAN_INFRASTRUCTURE 0x0000
+#define FAPI_CONNECTIONTYPE_P2P_OPERATION 0x0001
+#define FAPI_CONNECTIONTYPE_NAN_FURTHER_SERVICE_SLOT 0x0004
+#define FAPI_CONNECTIONTYPE_WLAN_RANGING 0x0005
+
+#define FAPI_CWSTARTFLAGS_NONE 0x0000
+#define FAPI_CWSTARTFLAGS_SCAN_CHANNEL 0x0001
+
+#define FAPI_CWTYPE_SINE 0x0000
+#define FAPI_CWTYPE_RAMP 0x0001
+#define FAPI_CWTYPE_TWO_TONE 0x0002
+#define FAPI_CWTYPE_DC 0x0003
+#define FAPI_CWTYPE_PRN 0x0004
+
+#define FAPI_DATARATE_11B20_1MBPS 0x0001
+#define FAPI_DATARATE_11B20_2MBPS 0x0002
+#define FAPI_DATARATE_11B20_5M5BPS 0x0005
+#define FAPI_DATARATE_11B20_11MBPS 0x000b
+#define FAPI_DATARATE_11A20_6MBPS 0x2006
+#define FAPI_DATARATE_11A20_9MBPS 0x2009
+#define FAPI_DATARATE_11A20_12MBPS 0x200c
+#define FAPI_DATARATE_11A20_18MBPS 0x2012
+#define FAPI_DATARATE_11A20_24MBPS 0x2018
+#define FAPI_DATARATE_11A20_36MBPS 0x2024
+#define FAPI_DATARATE_11A20_48MBPS 0x2030
+#define FAPI_DATARATE_11A20_54MBPS 0x2036
+#define FAPI_DATARATE_11N20_6M5BPS 0x4000
+#define FAPI_DATARATE_11N20_13MBPS 0x4001
+#define FAPI_DATARATE_11N20_19M5BPS 0x4002
+#define FAPI_DATARATE_11N20_26MBPS 0x4003
+#define FAPI_DATARATE_11N20_39MBPS 0x4004
+#define FAPI_DATARATE_11N20_52MBPS 0x4005
+#define FAPI_DATARATE_11N20_58M5BPS 0x4006
+#define FAPI_DATARATE_11N20_65MBPS 0x4007
+#define FAPI_DATARATE_11N20_7M2BPS_SGI 0x4040
+#define FAPI_DATARATE_11N20_14M4BPS_SGI 0x4041
+#define FAPI_DATARATE_11N20_21M7BPS_SGI 0x4042
+#define FAPI_DATARATE_11N20_28M9BPS_SGI 0x4043
+#define FAPI_DATARATE_11N20_43M3BPS_SGI 0x4044
+#define FAPI_DATARATE_11N20_57M8BPS_SGI 0x4045
+#define FAPI_DATARATE_11N20_65MBPS_SGI 0x4046
+#define FAPI_DATARATE_11N20_72M2BPS_SGI 0x4047
+#define FAPI_DATARATE_11N20_13MBPS_NSS2 0x4088
+#define FAPI_DATARATE_11N20_26MBPS_NSS2 0x4089
+#define FAPI_DATARATE_11N20_39MBPS_NSS2 0x408a
+#define FAPI_DATARATE_11N20_52MBPS_NSS2 0x408b
+#define FAPI_DATARATE_11N20_78MBPS_NSS2 0x408c
+#define FAPI_DATARATE_11N20_104MBPS_NSS2 0x408d
+#define FAPI_DATARATE_11N20_117MBPS_NSS2 0x408e
+#define FAPI_DATARATE_11N20_130MBPS_NSS2 0x408f
+#define FAPI_DATARATE_11N20_14M4BPS_SGI_NSS2 0x40c8
+#define FAPI_DATARATE_11N20_28M9BPS_SGI_NSS2 0x40c9
+#define FAPI_DATARATE_11N20_43M3BPS_SGI_NSS2 0x40ca
+#define FAPI_DATARATE_11N20_57M8BPS_SGI_NSS2 0x40cb
+#define FAPI_DATARATE_11N20_86M7BPS_SGI_NSS2 0x40cc
+#define FAPI_DATARATE_11N20_115M6BPS_SGI_NSS2 0x40cd
+#define FAPI_DATARATE_11N20_130MBPS_SGI_NSS2 0x40ce
+#define FAPI_DATARATE_11N20_144M4BPS_SGI_NSS2 0x40cf
+#define FAPI_DATARATE_11N40_13M5BPS 0x4400
+#define FAPI_DATARATE_11N40_27MBPS 0x4401
+#define FAPI_DATARATE_11N40_40M5BPS 0x4402
+#define FAPI_DATARATE_11N40_54MBPS 0x4403
+#define FAPI_DATARATE_11N40_81MBPS 0x4404
+#define FAPI_DATARATE_11N40_108MBPS 0x4405
+#define FAPI_DATARATE_11N40_121M5BPS 0x4406
+#define FAPI_DATARATE_11N40_135MBPS 0x4407
+#define FAPI_DATARATE_11N40_6MBPS 0x4420
+#define FAPI_DATARATE_11N40_15MBPS_SGI 0x4440
+#define FAPI_DATARATE_11N40_30MBPS_SGI 0x4441
+#define FAPI_DATARATE_11N40_45MBPS_SGI 0x4442
+#define FAPI_DATARATE_11N40_60MBPS_SGI 0x4443
+#define FAPI_DATARATE_11N40_90MBPS_SGI 0x4444
+#define FAPI_DATARATE_11N40_120MBPS_SGI 0x4445
+#define FAPI_DATARATE_11N40_135MBPS_SGI 0x4446
+#define FAPI_DATARATE_11N40_150MBPS_SGI 0x4447
+#define FAPI_DATARATE_11N40_6M7BPS_SGI 0x4460
+#define FAPI_DATARATE_11N40_27MBPS_NSS2 0x4488
+#define FAPI_DATARATE_11N40_54MBPS_NSS2 0x4489
+#define FAPI_DATARATE_11N40_81MBPS_NSS2 0x448a
+#define FAPI_DATARATE_11N40_108MBPS_NSS2 0x448b
+#define FAPI_DATARATE_11N40_162MBPS_NSS2 0x448c
+#define FAPI_DATARATE_11N40_216MBPS_NSS2 0x448d
+#define FAPI_DATARATE_11N40_243MBPS_NSS2 0x448e
+#define FAPI_DATARATE_11N40_270MBPS_NSS2 0x448f
+#define FAPI_DATARATE_11N40_30MBPS_SGI_NSS2 0x44c8
+#define FAPI_DATARATE_11N40_60MBPS_SGI_NSS2 0x44c9
+#define FAPI_DATARATE_11N40_90MBPS_SGI_NSS2 0x44ca
+#define FAPI_DATARATE_11N40_120MBPS_SGI_NSS2 0x44cb
+#define FAPI_DATARATE_11N40_180MBPS_SGI_NSS2 0x44cc
+#define FAPI_DATARATE_11N40_240MBPS_SGI_NSS2 0x44cd
+#define FAPI_DATARATE_11N40_270MBPS_SGI_NSS2 0x44ce
+#define FAPI_DATARATE_11N40_300MBPS_SGI_NSS2 0x44cf
+#define FAPI_DATARATE_11AC20_6M5BPS 0x6000
+#define FAPI_DATARATE_11AC20_13MBPS 0x6001
+#define FAPI_DATARATE_11AC20_19M5BPS 0x6002
+#define FAPI_DATARATE_11AC20_26MBPS 0x6003
+#define FAPI_DATARATE_11AC20_39MBPS 0x6004
+#define FAPI_DATARATE_11AC20_52MBPS 0x6005
+#define FAPI_DATARATE_11AC20_58M5BPS 0x6006
+#define FAPI_DATARATE_11AC20_65MBPS 0x6007
+#define FAPI_DATARATE_11AC20_78MBPS 0x6008
+#define FAPI_DATARATE_11AC20_7M2BPS_SGI 0x6040
+#define FAPI_DATARATE_11AC20_14M4BPS_SGI 0x6041
+#define FAPI_DATARATE_11AC20_21M7BPS_SGI 0x6042
+#define FAPI_DATARATE_11AC20_28M9BPS_SGI 0x6043
+#define FAPI_DATARATE_11AC20_43M3BPS_SGI 0x6044
+#define FAPI_DATARATE_11AC20_57M8BPS_SGI 0x6045
+#define FAPI_DATARATE_11AC20_65MBPS_SGI 0x6046
+#define FAPI_DATARATE_11AC20_72M2BPS_SGI 0x6047
+#define FAPI_DATARATE_11AC20_86M7BPS_SGI 0x6048
+#define FAPI_DATARATE_11AC20_13MBPS_NSS2 0x6080
+#define FAPI_DATARATE_11AC20_26MBPS_NSS2 0x6081
+#define FAPI_DATARATE_11AC20_39MBPS_NSS2 0x6082
+#define FAPI_DATARATE_11AC20_52MBPS_NSS2 0x6083
+#define FAPI_DATARATE_11AC20_78MBPS_NSS2 0x6084
+#define FAPI_DATARATE_11AC20_104MBPS_NSS2 0x6085
+#define FAPI_DATARATE_11AC20_117MBPS_NSS2 0x6086
+#define FAPI_DATARATE_11AC20_130MBPS_NSS2 0x6087
+#define FAPI_DATARATE_11AC20_156MBPS_NSS2 0x6088
+#define FAPI_DATARATE_11AC20_14M4BPS_SGI_NSS2 0x60c0
+#define FAPI_DATARATE_11AC20_28M9BPS_SGI_NSS2 0x60c1
+#define FAPI_DATARATE_11AC20_43M3BPS_SGI_NSS2 0x60c2
+#define FAPI_DATARATE_11AC20_57M8BPS_SGI_NSS2 0x60c3
+#define FAPI_DATARATE_11AC20_86M7BPS_SGI_NSS2 0x60c4
+#define FAPI_DATARATE_11AC20_115M6BPS_SGI_NSS2 0x60c5
+#define FAPI_DATARATE_11AC20_130MBPS_SGI_NSS2 0x60c6
+#define FAPI_DATARATE_11AC20_144M4BPS_SGI_NSS2 0x60c7
+#define FAPI_DATARATE_11AC20_173M3BPS_SGI_NSS2 0x60c8
+#define FAPI_DATARATE_11AC40_13M5BPS 0x6400
+#define FAPI_DATARATE_11AC40_27MBPS 0x6401
+#define FAPI_DATARATE_11AC40_40M5BPS 0x6402
+#define FAPI_DATARATE_11AC40_54MBPS 0x6403
+#define FAPI_DATARATE_11AC40_81MBPS 0x6404
+#define FAPI_DATARATE_11AC40_108MBPS 0x6405
+#define FAPI_DATARATE_11AC40_121M5BPS 0x6406
+#define FAPI_DATARATE_11AC40_135MBPS 0x6407
+#define FAPI_DATARATE_11AC40_162MBPS 0x6408
+#define FAPI_DATARATE_11AC40_180MBPS 0x6409
+#define FAPI_DATARATE_11AC40_15MBPS_SGI 0x6440
+#define FAPI_DATARATE_11AC40_30MBPS_SGI 0x6441
+#define FAPI_DATARATE_11AC40_45MBPS_SGI 0x6442
+#define FAPI_DATARATE_11AC40_60MBPS_SGI 0x6443
+#define FAPI_DATARATE_11AC40_90MBPS_SGI 0x6444
+#define FAPI_DATARATE_11AC40_120MBPS_SGI 0x6445
+#define FAPI_DATARATE_11AC40_135MBPS_SGI 0x6446
+#define FAPI_DATARATE_11AC40_150MBPS_SGI 0x6447
+#define FAPI_DATARATE_11AC40_180MBPS_SGI 0x6448
+#define FAPI_DATARATE_11AC40_200MBPS_SGI 0x6449
+#define FAPI_DATARATE_11AC40_27MBPS_NSS2 0x6480
+#define FAPI_DATARATE_11AC40_54MBPS_NSS2 0x6481
+#define FAPI_DATARATE_11AC40_81MBPS_NSS2 0x6482
+#define FAPI_DATARATE_11AC40_108MBPS_NSS2 0x6483
+#define FAPI_DATARATE_11AC40_162MBPS_NSS2 0x6484
+#define FAPI_DATARATE_11AC40_216MBPS_NSS2 0x6485
+#define FAPI_DATARATE_11AC40_243MBPS_NSS2 0x6486
+#define FAPI_DATARATE_11AC40_270MBPS_NSS2 0x6487
+#define FAPI_DATARATE_11AC40_324MBPS_NSS2 0x6488
+#define FAPI_DATARATE_11AC40_360MBPS_NSS2 0x6489
+#define FAPI_DATARATE_11AC40_30MBPS_SGI_NSS2 0x64c0
+#define FAPI_DATARATE_11AC40_60MBPS_SGI_NSS2 0x64c1
+#define FAPI_DATARATE_11AC40_90MBPS_SGI_NSS2 0x64c2
+#define FAPI_DATARATE_11AC40_120MBPS_SGI_NSS2 0x64c3
+#define FAPI_DATARATE_11AC40_180MBPS_SGI_NSS2 0x64c4
+#define FAPI_DATARATE_11AC40_240MBPS_SGI_NSS2 0x64c5
+#define FAPI_DATARATE_11AC40_270MBPS_SGI_NSS2 0x64c6
+#define FAPI_DATARATE_11AC40_300MBPS_SGI_NSS2 0x64c7
+#define FAPI_DATARATE_11AC40_360MBPS_SGI_NSS2 0x64c8
+#define FAPI_DATARATE_11AC40_400MBPS_SGI_NSS2 0x64c9
+#define FAPI_DATARATE_11AC80_29M3BPS 0x6800
+#define FAPI_DATARATE_11AC80_58M5BPS 0x6801
+#define FAPI_DATARATE_11AC80_87M8BPS 0x6802
+#define FAPI_DATARATE_11AC80_117MBPS 0x6803
+#define FAPI_DATARATE_11AC80_175M5BPS 0x6804
+#define FAPI_DATARATE_11AC80_234MBPS 0x6805
+#define FAPI_DATARATE_11AC80_263M3BPS 0x6806
+#define FAPI_DATARATE_11AC80_292M5BPS 0x6807
+#define FAPI_DATARATE_11AC80_351MBPS 0x6808
+#define FAPI_DATARATE_11AC80_390MBPS 0x6809
+#define FAPI_DATARATE_11AC80_32M5BPS_SGI 0x6840
+#define FAPI_DATARATE_11AC80_65MBPS_SGI 0x6841
+#define FAPI_DATARATE_11AC80_97M5BPS_SGI 0x6842
+#define FAPI_DATARATE_11AC80_130MBPS_SGI 0x6843
+#define FAPI_DATARATE_11AC80_195MBPS_SGI 0x6844
+#define FAPI_DATARATE_11AC80_260MBPS_SGI 0x6845
+#define FAPI_DATARATE_11AC80_292M5BPS_SGI 0x6846
+#define FAPI_DATARATE_11AC80_325MBPS_SGI 0x6847
+#define FAPI_DATARATE_11AC80_390MBPS_SGI 0x6848
+#define FAPI_DATARATE_11AC80_433M3BPS_SGI 0x6849
+#define FAPI_DATARATE_11AC80_58M5BPS_NSS2 0x6880
+#define FAPI_DATARATE_11AC80_117MBPS_NSS2 0x6881
+#define FAPI_DATARATE_11AC80_175M5BPS_NSS2 0x6882
+#define FAPI_DATARATE_11AC80_234MBPS_NSS2 0x6883
+#define FAPI_DATARATE_11AC80_351MBPS_NSS2 0x6884
+#define FAPI_DATARATE_11AC80_468MBPS_NSS2 0x6885
+#define FAPI_DATARATE_11AC80_526M5BPS_NSS2 0x6886
+#define FAPI_DATARATE_11AC80_585MBPS_NSS2 0x6887
+#define FAPI_DATARATE_11AC80_702MBPS_NSS2 0x6888
+#define FAPI_DATARATE_11AC80_780MBPS_NSS2 0x6889
+#define FAPI_DATARATE_11AC80_65MBPS_SGI_NSS2 0x68c0
+#define FAPI_DATARATE_11AC80_130MBPS_SGI_NSS2 0x68c1
+#define FAPI_DATARATE_11AC80_195MBPS_SGI_NSS2 0x68c2
+#define FAPI_DATARATE_11AC80_260MBPS_SGI_NSS2 0x68c3
+#define FAPI_DATARATE_11AC80_390MBPS_SGI_NSS2 0x68c4
+#define FAPI_DATARATE_11AC80_520MBPS_SGI_NSS2 0x68c5
+#define FAPI_DATARATE_11AC80_585MBPS_SGI_NSS2 0x68c6
+#define FAPI_DATARATE_11AC80_650MBPS_SGI_NSS2 0x68c7
+#define FAPI_DATARATE_11AC80_780MBPS_SGI_NSS2 0x68c8
+#define FAPI_DATARATE_11AC80_866M7BPS_SGI_NSS2 0x68c9
+#define FAPI_DATARATE_CTR_TOTAL 0xe000
+#define FAPI_DATARATE_CTR_NO_ERROR 0xe001
+#define FAPI_DATARATE_CTR_CRC_ERROR 0xe002
+#define FAPI_DATARATE_CTR_BAD_SIGNAL 0xe003
+#define FAPI_DATARATE_CTR_STBC 0xe004
+#define FAPI_DATARATE_CTR_DUPLICATE 0xe005
+#define FAPI_DATARATE_CTR_ERROR 0xe006
+#define FAPI_DATARATE_CTR_LDPC 0xe007
+#define FAPI_DATARATE_CTR_BEAMFORMED 0xe008
+
+#define FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME 0x0000
+#define FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME 0x0001
+#define FAPI_DATAUNITDESCRIPTOR_AMSDU_SUBFRAME 0x0002
+#define FAPI_DATAUNITDESCRIPTOR_AMSDU 0x0003
+#define FAPI_DATAUNITDESCRIPTOR_TCP_ACK 0x0004
+
+#define FAPI_DEVICEROLE_INFRASTRUCTURE_STATION 0x0001
+#define FAPI_DEVICEROLE_P2P_GO 0x0002
+#define FAPI_DEVICEROLE_P2P_DEVICE 0x0003
+#define FAPI_DEVICEROLE_P2P_CLIENT 0x0004
+
+#define FAPI_DEVICESTATE_IDLE 0
+#define FAPI_DEVICESTATE_RX_RUNNING 1
+#define FAPI_DEVICESTATE_TX_RUNNING 2
+#define FAPI_DEVICESTATE_CW_RUNNING 3
+#define FAPI_DEVICESTATE_BIST_RUNNING 4
+
+#define FAPI_DFSREGULATORY_UNKNOWN 0x0000
+#define FAPI_DFSREGULATORY_FCC 0x0001
+#define FAPI_DFSREGULATORY_ETSI 0x0002
+#define FAPI_DFSREGULATORY_JAPAN 0x0003
+#define FAPI_DFSREGULATORY_GLOBAL 0x0004
+#define FAPI_DFSREGULATORY_CHINA 0x0006
+
+#define FAPI_DIRECTION_TRANSMIT 0x0000
+#define FAPI_DIRECTION_RECEIVE 0x0001
+
+#define FAPI_ENDPOINT_HOSTIO 0x0001
+#define FAPI_ENDPOINT_DPLP 0x0002
+
+#define FAPI_EPNOPOLICY_HIDDEN 0x0001
+#define FAPI_EPNOPOLICY_A_BAND 0x0002
+#define FAPI_EPNOPOLICY_G_BAND 0x0004
+#define FAPI_EPNOPOLICY_STRICT_MATCH 0x0008
+#define FAPI_EPNOPOLICY_SAME_NETWORK 0x0010
+#define FAPI_EPNOPOLICY_AUTH_OPEN 0x0100
+#define FAPI_EPNOPOLICY_AUTH_PSK 0x0200
+#define FAPI_EPNOPOLICY_AUTH_EAPOL 0x0400
+
+#define FAPI_EVENT_WIFI_EVENT_ASSOCIATION_REQUESTED 0x0000
+#define FAPI_EVENT_WIFI_EVENT_AUTH_COMPLETE 0x0001
+#define FAPI_EVENT_WIFI_EVENT_ASSOC_COMPLETE 0x0002
+#define FAPI_EVENT_WIFI_EVENT_FW_AUTH_STARTED 0x0003
+#define FAPI_EVENT_WIFI_EVENT_FW_ASSOC_STARTED 0x0004
+#define FAPI_EVENT_WIFI_EVENT_FW_RE_ASSOC_STARTED 0x0005
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_SCAN_REQUESTED 0x0006
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND 0x0007
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_SCAN_COMPLETE 0x0008
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_STARTED 0x0009
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_COMPLETE 0x000a
+#define FAPI_EVENT_WIFI_EVENT_DISASSOCIATION_REQUESTED 0x000b
+#define FAPI_EVENT_WIFI_EVENT_RE_ASSOCIATION_REQUESTED 0x000c
+#define FAPI_EVENT_WIFI_EVENT_ROAM_REQUESTED 0x000d
+#define FAPI_EVENT_WIFI_EVENT_BEACON_RECEIVED 0x000e
+#define FAPI_EVENT_WIFI_EVENT_ROAM_SCAN_STARTED 0x000f
+#define FAPI_EVENT_WIFI_EVENT_ROAM_SCAN_COMPLETE 0x0010
+#define FAPI_EVENT_WIFI_EVENT_ROAM_SEARCH_STARTED 0x0011
+#define FAPI_EVENT_WIFI_EVENT_ROAM_SEARCH_STOPPED 0x0012
+#define FAPI_EVENT_WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT 0x0014
+#define FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START 0x0015
+#define FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP 0x0016
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED 0x0017
+#define FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED 0x0018
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED 0x001a
+#define FAPI_EVENT_WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE 0x001b
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_SCO_START 0x001c
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_SCO_STOP 0x001d
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_SCAN_START 0x001e
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_SCAN_STOP 0x001f
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_HID_START 0x0020
+#define FAPI_EVENT_WIFI_EVENT_BT_COEX_BT_HID_STOP 0x0021
+#define FAPI_EVENT_WIFI_EVENT_ROAM_AUTH_STARTED 0x0022
+#define FAPI_EVENT_WIFI_EVENT_ROAM_AUTH_COMPLETE 0x0023
+#define FAPI_EVENT_WIFI_EVENT_ROAM_ASSOC_STARTED 0x0024
+#define FAPI_EVENT_WIFI_EVENT_ROAM_ASSOC_COMPLETE 0x0025
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_STOP 0x0026
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_CYCLE_STARTED 0x0027
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_CYCLE_COMPLETED 0x0028
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_BUCKET_STARTED 0x0029
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_BUCKET_COMPLETED 0x002a
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE 0x002b
+#define FAPI_EVENT_WIFI_EVENT_G_SCAN_CAPABILITIES 0x002c
+#define FAPI_EVENT_WIFI_EVENT_ROAM_CANDIDATE_FOUND 0x002d
+#define FAPI_EVENT_WIFI_EVENT_ROAM_SCAN_CONFIG 0x002e
+#define FAPI_EVENT_WIFI_EVENT_AUTH_TIMEOUT 0x002f
+#define FAPI_EVENT_WIFI_EVENT_ASSOC_TIMEOUT 0x0030
+#define FAPI_EVENT_WIFI_EVENT_MEM_ALLOC_FAILURE 0x0031
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_ADD 0x0032
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_REMOVE 0x0033
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND 0x0034
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED 0x0035
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND 0x0036
+#define FAPI_EVENT_WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE 0x0037
+#define FAPI_EVENT_WIFI_EVENT_BLACKOUT_START 0x0064
+#define FAPI_EVENT_WIFI_EVENT_BLACKOUT_STOP 0x0065
+#define FAPI_EVENT_WIFI_EVENT_NAN_PUBLISH_TERMINATED 0x0100
+#define FAPI_EVENT_WIFI_EVENT_NAN_SUBSCRIBE_TERMINATED 0x0101
+#define FAPI_EVENT_WIFI_EVENT_NAN_MATCH_EXPIRED 0x0102
+#define FAPI_EVENT_WIFI_EVENT_NAN_ADDRESS_CHANGED 0x0103
+#define FAPI_EVENT_WIFI_EVENT_NAN_CLUSTER_STARTED 0x0104
+#define FAPI_EVENT_WIFI_EVENT_NAN_CLUSTER_JOINED 0x0105
+#define FAPI_EVENT_WIFI_EVENT_NAN_TRANSMIT_FOLLOWUP 0x0106
+
+#define FAPI_HIDDENSSID_NOT_HIDDEN 0x0000
+#define FAPI_HIDDENSSID_HIDDEN_ZERO_LENGTH 0x0001
+#define FAPI_HIDDENSSID_HIDDEN_ZERO_DATA 0x0002
+
+#define FAPI_HOSTSTATE_LCD_ACTIVE 0x0001
+#define FAPI_HOSTSTATE_CELLULAR_ACTIVE 0x0002
+#define FAPI_HOSTSTATE_SAR_ACTIVE 0x0004
+#define FAPI_HOSTSTATE_GRIP_ACTIVE 0x0040
+#define FAPI_HOSTSTATE_LOW_LATENCY_ACTIVE 0x0080
+
+#define FAPI_HT_NON_HT_RATE 0x1
+#define FAPI_HT_HT_RATE 0x2
+#define FAPI_HT_VHT_RATE 0x3
+
+#define FAPI_KEYTYPE_GROUP 0x0000
+#define FAPI_KEYTYPE_PAIRWISE 0x0001
+#define FAPI_KEYTYPE_WEP 0x0002
+#define FAPI_KEYTYPE_IGTK 0x0003
+#define FAPI_KEYTYPE_PMK 0x0004
+#define FAPI_KEYTYPE_FIRST_ILLEGAL 0x0005
+
+#define FAPI_MESSAGETYPE_EAP_MESSAGE 0x0001
+#define FAPI_MESSAGETYPE_EAPOL_KEY_M123 0x0002
+#define FAPI_MESSAGETYPE_EAPOL_KEY_M4 0x0003
+#define FAPI_MESSAGETYPE_ARP 0x0004
+#define FAPI_MESSAGETYPE_DHCP 0x0005
+#define FAPI_MESSAGETYPE_NEIGHBOR_DISCOVERY 0x0006
+#define FAPI_MESSAGETYPE_WAI_MESSAGE 0x0007
+#define FAPI_MESSAGETYPE_ANY_OTHER 0x0008
+#define FAPI_MESSAGETYPE_IEEE80211_ACTION 0x0010
+#define FAPI_MESSAGETYPE_IEEE80211_MGMT 0x0011
+
+#define FAPI_MODE_SOURCE 0x0001
+#define FAPI_MODE_SINK 0x0002
+#define FAPI_MODE_LOOPBACK 0x0003
+
+#define FAPI_NANAVAILABILITYDURATION_16MS 0x00
+#define FAPI_NANAVAILABILITYDURATION_32MS 0x01
+#define FAPI_NANAVAILABILITYDURATION_64MS 0x02
+
+#define FAPI_NANOPERATIONCONTROL_MAC_ADDRESS_EVENT 0x0002
+#define FAPI_NANOPERATIONCONTROL_START_CLUSTER_EVENT 0x0004
+#define FAPI_NANOPERATIONCONTROL_JOINED_CLUSTER_EVENT 0x0008
+
+#define FAPI_NANSDFCONTROL_PUBLISH_END_EVENT 0x0001
+#define FAPI_NANSDFCONTROL_SUBSCRIBE_END_EVENT 0x0002
+#define FAPI_NANSDFCONTROL_MATCH_EXPIRED_EVENT 0x0004
+#define FAPI_NANSDFCONTROL_RECEIVED_FOLLOWUP_EVENT 0x0008
+#define FAPI_NANSDFCONTROL_DISABLE_FOLLOWUP_TRANSMIT_STATUS 0x0010
+
+#define FAPI_NSSTYPE_NSS_ONE_STREAM 0x0000
+#define FAPI_NSSTYPE_NSS_TWO_STREAMS 0x0001
+#define FAPI_NSSTYPE_NSS_THREE_STREAMS 0x0002
+#define FAPI_NSSTYPE_NSS_FOUR_STREAMS 0x0003
+#define FAPI_NSSTYPE_NSS_FIVE_STREAMS 0x0004
+#define FAPI_NSSTYPE_NSS_SIX_STREAMS 0x0005
+#define FAPI_NSSTYPE_NSS_SEVEN_STREAMS 0x0006
+#define FAPI_NSSTYPE_NSS_EIGHT_STREAMS 0x0007
+
+#define FAPI_PACKETFILTERMODE_OPT_OUT 0x01
+#define FAPI_PACKETFILTERMODE_OPT_IN 0x02
+#define FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP 0x04
+#define FAPI_PACKETFILTERMODE_OPT_IN_SLEEP 0x08
+
+#define FAPI_PMALLOCAREA_PMALLOC_STATS 0x0000
+#define FAPI_PMALLOCAREA_PMALLOC_FSM_STATS 0x0001
+#define FAPI_PMALLOCAREA_HOSTIO_SIG_SIZES 0x0002
+
+#define FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE 0x0000
+#define FAPI_POWERMANAGEMENTMODE_POWER_SAVE 0x0001
+
+#define FAPI_PRIMARYCHANNELPOSITION_P0 0x00
+#define FAPI_PRIMARYCHANNELPOSITION_P1 0x01
+#define FAPI_PRIMARYCHANNELPOSITION_P2 0x02
+#define FAPI_PRIMARYCHANNELPOSITION_P3 0x03
+#define FAPI_PRIMARYCHANNELPOSITION_P4 0x04
+#define FAPI_PRIMARYCHANNELPOSITION_P5 0x05
+#define FAPI_PRIMARYCHANNELPOSITION_P6 0x06
+#define FAPI_PRIMARYCHANNELPOSITION_P7 0x07
+#define FAPI_PRIMARYCHANNELPOSITION_NOT_APPLICABLE 0x08
+
+#define FAPI_PRIORITY_QOS_UP0 0x0000
+#define FAPI_PRIORITY_QOS_UP1 0x0001
+#define FAPI_PRIORITY_QOS_UP2 0x0002
+#define FAPI_PRIORITY_QOS_UP3 0x0003
+#define FAPI_PRIORITY_QOS_UP4 0x0004
+#define FAPI_PRIORITY_QOS_UP5 0x0005
+#define FAPI_PRIORITY_QOS_UP6 0x0006
+#define FAPI_PRIORITY_QOS_UP7 0x0007
+#define FAPI_PRIORITY_CONTENTION 0x8000
+
+#define FAPI_PROCEDURETYPE_UNKNOWN 0x0000
+#define FAPI_PROCEDURETYPE_CONNECTION_STARTED 0x0001
+#define FAPI_PROCEDURETYPE_DEVICE_DISCOVERED 0x0002
+#define FAPI_PROCEDURETYPE_ROAMING_STARTED 0x0003
+
+#define FAPI_PROCESSOR_DEFAULT 0x0
+#define FAPI_PROCESSOR_CHIPCPU 0x1
+#define FAPI_PROCESSOR_HOSTCPU 0x3
+
+#define FAPI_PROTOCOL_UDP 0x0001
+#define FAPI_PROTOCOL_TCP 0x0002
+
+#define FAPI_PURPOSE_BEACON 0x0001
+#define FAPI_PURPOSE_PROBE_RESPONSE 0x0002
+#define FAPI_PURPOSE_ASSOCIATION_RESPONSE 0x0004
+#define FAPI_PURPOSE_LOCAL 0x0008
+#define FAPI_PURPOSE_ASSOCIATION_REQUEST 0x0010
+#define FAPI_PURPOSE_PROBE_REQUEST 0x0020
+
+#define FAPI_RADIOBITMAP_RADIO_0 0x0001
+#define FAPI_RADIOBITMAP_RADIO_1 0x0002
+
+#define FAPI_REASONCODE_RESERVED 0x0000
+#define FAPI_REASONCODE_UNSPECIFIED_REASON 0x0001
+#define FAPI_REASONCODE_DEAUTHENTICATED_INVALID_AUTHENTICATION 0x0002
+#define FAPI_REASONCODE_DEAUTHENTICATED_LEAVING 0x0003
+#define FAPI_REASONCODE_DEAUTHENTICATED_NO_MORE_STATIONS 0x0005
+#define FAPI_REASONCODE_DEAUTHENTICATED_INVALID_CLASS_2_FRAME 0x0006
+#define FAPI_REASONCODE_DEAUTHENTICATED_INVALID_CLASS_3_FRAME 0x0007
+#define FAPI_REASONCODE_DEAUTHENTICATED_REASON_INVALID_IE 0x000d
+#define FAPI_REASONCODE_DEAUTHENTICATED_4_WAY_HANDSHAKE_TIMEOUT 0x000f
+#define FAPI_REASONCODE_DEAUTHENTICATED_GROUP_HANDSHAKE_TIMEOUT 0x0010
+#define FAPI_REASONCODE_DEAUTHENTICATED_HANDSHAKE_ELEMENT_MISMATCH 0x0011
+#define FAPI_REASONCODE_DEAUTHENTICATED_REASON_INVALID_RSNE 0x0014
+#define FAPI_REASONCODE_DEAUTHENTICATED_802_1_X_AUTH_FAILED 0x0017
+#define FAPI_REASONCODE_TDLS_PEER_UNREACHABLE 0x0019
+#define FAPI_REASONCODE_TDLS_TEARDOWN_UNSPECIFIED_REASON 0x001a
+#define FAPI_REASONCODE_QOS_UNSPECIFIED_REASON 0x0020
+#define FAPI_REASONCODE_QOS_EXCESSIVE_NOT_ACK 0x0022
+#define FAPI_REASONCODE_QOS_TXOP_LIMIT_EXCEEDED 0x0023
+#define FAPI_REASONCODE_QSTA_LEAVING 0x0024
+#define FAPI_REASONCODE_END 0x0025
+#define FAPI_REASONCODE_UNKNOWN 0x0026
+#define FAPI_REASONCODE_TIMEOUT 0x0027
+#define FAPI_REASONCODE_KEEP_ALIVE_FAILURE 0x0028
+#define FAPI_REASONCODE_START 0x0029
+#define FAPI_REASONCODE_DEAUTHENTICATED_REASON_INVALID_PMKID 0x0031
+#define FAPI_REASONCODE_INVALID_PMKID 0x0049
+#define FAPI_REASONCODE_SYNCHRONISATION_LOSS 0x8003
+#define FAPI_REASONCODE_SECURITY_REQUIRED 0x8004
+#define FAPI_REASONCODE_ROAMING_FAILURE_LINK_LOSS_NO_CANDIDATE 0x8005
+#define FAPI_REASONCODE_HOTSPOT_MAX_CLIENT_REACHED 0x8006
+#define FAPI_REASONCODE_CHANNEL_SWITCH_FAILURE 0x8007
+#define FAPI_REASONCODE_REPORTING_ABORTED_SCANNING 0x8008
+#define FAPI_REASONCODE_REPORTING_ABORTED_ROAMING 0x8009
+#define FAPI_REASONCODE_NAN_SERVICE_TERMINATED_TIMEOUT 0x9001
+#define FAPI_REASONCODE_NAN_SERVICE_TERMINATED_USER_REQUEST 0x9002
+#define FAPI_REASONCODE_NAN_SERVICE_TERMINATED_COUNT_REACHED 0x9003
+#define FAPI_REASONCODE_NAN_SERVICE_TERMINATED_DISCOVERY_SHUTDOWN 0x9004
+#define FAPI_REASONCODE_NAN_TRANSMIT_FOLLOWUP_SUCCESS 0x9006
+#define FAPI_REASONCODE_NAN_TRANSMIT_FOLLOWUP_FAILURE 0x9007
+#define FAPI_REASONCODE_NDP_ACCEPTED 0x9008
+#define FAPI_REASONCODE_NDP_REJECTED 0x9009
+
+#define FAPI_REPORTMODE_RESERVED 0x0001
+#define FAPI_REPORTMODE_END_OF_SCAN_CYCLE 0x0002
+#define FAPI_REPORTMODE_REAL_TIME 0x0004
+#define FAPI_REPORTMODE_NO_BATCH 0x0008
+
+#define FAPI_RESULTCODE_SUCCESS 0x0000
+#define FAPI_RESULTCODE_UNSPECIFIED_FAILURE 0x0001
+#define FAPI_RESULTCODE_INVALID_PARAMETERS 0x0026
+#define FAPI_RESULTCODE_REJECTED_INVALID_IE 0x0028
+#define FAPI_RESULTCODE_NOT_ALLOWED 0x0030
+#define FAPI_RESULTCODE_NOT_PRESENT 0x0031
+#define FAPI_RESULTCODE_TRANSMISSION_FAILURE 0x004f
+#define FAPI_RESULTCODE_TOO_MANY_SIMULTANEOUS_REQUESTS 0x8001
+#define FAPI_RESULTCODE_BSS_ALREADY_STARTED_OR_JOINED 0x8002
+#define FAPI_RESULTCODE_NOT_SUPPORTED 0x8003
+#define FAPI_RESULTCODE_INVALID_STATE 0x8004
+#define FAPI_RESULTCODE_INSUFFICIENT_RESOURCE 0x8006
+#define FAPI_RESULTCODE_INVALID_VIRTUAL_INTERFACE_INDEX 0x800a
+#define FAPI_RESULTCODE_HOST_REQUEST_SUCCESS 0x800b
+#define FAPI_RESULTCODE_HOST_REQUEST_FAILED 0x800c
+#define FAPI_RESULTCODE_INVALID_FREQUENCY 0x800e
+#define FAPI_RESULTCODE_PROBE_TIMEOUT 0x800f
+#define FAPI_RESULTCODE_AUTH_TIMEOUT 0x8010
+#define FAPI_RESULTCODE_ASSOC_TIMEOUT 0x8011
+#define FAPI_RESULTCODE_ASSOC_ABORT 0x8012
+#define FAPI_RESULTCODE_AUTH_NO_ACK 0x8013
+#define FAPI_RESULTCODE_ASSOC_NO_ACK 0x8014
+#define FAPI_RESULTCODE_AUTH_FAILED_CODE 0x8100
+#define FAPI_RESULTCODE_ASSOC_FAILED_CODE 0x8200
+#define FAPI_RESULTCODE_INVALID_TLV_VALUE 0x9000
+#define FAPI_RESULTCODE_NAN_PROTOCOL_FAILURE 0x9001
+#define FAPI_RESULTCODE_NAN_INVALID_PUBLISH_SUBSCRIBE_ID 0x9002
+#define FAPI_RESULTCODE_NAN_INVALID_REQUESTOR_INSTANCE_ID 0x9003
+#define FAPI_RESULTCODE_UNSUPPORTED_CONCURRENCY 0x9004
+#define FAPI_RESULTCODE_NAN_INVALID_NDP_ID 0x9005
+#define FAPI_RESULTCODE_NAN_INVALID_PEER_ID 0x9006
+#define FAPI_RESULTCODE_NAN_NO_OTA_ACK 0x9007
+#define FAPI_RESULTCODE_NAN_INVALID_AVAILABILITY 0x9008
+#define FAPI_RESULTCODE_NAN_IMMUTABLE_UNACCEPTABLE 0x9009
+#define FAPI_RESULTCODE_NAN_REJECTED_SECURITY_POLICY 0x900a
+#define FAPI_RESULTCODE_NDP_REJECTED 0x900b
+#define FAPI_RESULTCODE_NDL_UNACCEPTABLE 0x900c
+#define FAPI_RESULTCODE_NDL_FAILED_SCHEDULE 0x900d
+
+#define FAPI_RTTBANDWIDTH_20MHZ 0x0004
+#define FAPI_RTTBANDWIDTH_40MHZ 0x0008
+#define FAPI_RTTBANDWIDTH_80MHZ 0x0010
+#define FAPI_RTTBANDWIDTH_160MHZ 0x0020
+
+#define FAPI_RTTPREAMBLE_LEGACY 0x0001
+#define FAPI_RTTPREAMBLE_HT 0x0002
+#define FAPI_RTTPREAMBLE_VHT 0x0004
+
+#define FAPI_RTTSTATUS_SUCCESS 0x0000
+#define FAPI_RTTSTATUS_UNSPECIFIED_FAILURE 0x0001
+#define FAPI_RTTSTATUS_FAIL_NO_RESPONSE 0x0002
+#define FAPI_RTTSTATUS_FAIL_REJECTED 0x0003
+#define FAPI_RTTSTATUS_FAIL_NOT_SCHEDULED 0x0004
+#define FAPI_RTTSTATUS_FAIL_TIMEOUT 0x0005
+#define FAPI_RTTSTATUS_FAIL_INCORRECT_CHANNEL 0x0006
+#define FAPI_RTTSTATUS_FAIL_FTM_NOT_SUPPORTED 0x0007
+#define FAPI_RTTSTATUS_FAIL_MEASUREMENT_ABORTED 0x0008
+#define FAPI_RTTSTATUS_FAIL_INVALID_TIME_STAMP 0x0009
+#define FAPI_RTTSTATUS_FAIL_NO_FTM_RECEIVED 0x000a
+#define FAPI_RTTSTATUS_FAIL_BURST_NOT_SCHEDULED 0x000b
+#define FAPI_RTTSTATUS_FAIL_BUSY_TRY_LATER 0x000c
+#define FAPI_RTTSTATUS_FAIL_INVALID_REQUEST 0x000d
+#define FAPI_RTTSTATUS_FAIL_FTM_PARAMETER_OVERRIDE 0x000f
+
+#define FAPI_RTTTYPE_ONE_SIDED 0x0001
+#define FAPI_RTTTYPE_TWO_SIDED 0x0002
+
+#define FAPI_RULEFLAG_NO_IR 0x0001
+#define FAPI_RULEFLAG_DFS 0x0002
+#define FAPI_RULEFLAG_NO_OFDM 0x0004
+#define FAPI_RULEFLAG_NO_INDOOR 0x0008
+#define FAPI_RULEFLAG_NO_OUTDOOR 0x0010
+
+#define FAPI_RXSTARTFLAGS_NONE 0x0000
+#define FAPI_RXSTARTFLAGS_SCAN_CHANNEL 0x0001
+#define FAPI_RXSTARTFLAGS_FILTERING 0x0002
+#define FAPI_RXSTARTFLAGS_BEAMFORMING 0x0004
+#define FAPI_RXSTARTFLAGS_ACK 0x0008
+#define FAPI_RXSTARTFLAGS_LP_MODE 0x0010
+#define FAPI_RXSTARTFLAGS_CHAN_RSSI 0x0020
+#define FAPI_RXSTARTFLAGS_DISABLE_EXTERNAL_LNA 0x0040
+
+#define FAPI_SCANPOLICY_PASSIVE 0x01
+#define FAPI_SCANPOLICY_TEST_MODE 0x02
+#define FAPI_SCANPOLICY_ANY_RA 0x04
+#define FAPI_SCANPOLICY_2_4GHZ 0x08
+#define FAPI_SCANPOLICY_5GHZ 0x10
+#define FAPI_SCANPOLICY_NON_DFS 0x20
+#define FAPI_SCANPOLICY_DFS 0x40
+#define FAPI_SCANPOLICY_ON_CHANNEL 0x80
+
+#define FAPI_SCANTYPE_INITIAL_SCAN 0x0001
+#define FAPI_SCANTYPE_FULL_SCAN 0x0002
+#define FAPI_SCANTYPE_SCHEDULED_SCAN 0x0003
+#define FAPI_SCANTYPE_P2P_SCAN_FULL 0x0004
+#define FAPI_SCANTYPE_P2P_SCAN_SOCIAL 0x0005
+#define FAPI_SCANTYPE_OBSS_SCAN 0x0006
+#define FAPI_SCANTYPE_AP_AUTO_CHANNEL_SELECTION 0x0007
+#define FAPI_SCANTYPE_GSCAN 0x0009
+#define FAPI_SCANTYPE_MEASUREMENT_SCAN 0x000a
+#define FAPI_SCANTYPE_SOFT_NEIGHBOUR_ROAMING_SCAN 0x000b
+#define FAPI_SCANTYPE_SOFT_CACHED_ROAMING_SCAN 0x000c
+#define FAPI_SCANTYPE_SOFT_ALL_ROAMING_SCAN 0x000d
+#define FAPI_SCANTYPE_HARD_NEIGHBOUR_ROAMING_SCAN 0x000e
+#define FAPI_SCANTYPE_HARD_CACHED_ROAMING_SCAN 0x000f
+#define FAPI_SCANTYPE_HARD_ALL_ROAMING_SCAN 0x0010
+#define FAPI_SCANTYPE_OBSS_SCAN_INTERNAL 0x0011
+#define FAPI_SCANTYPE_NAN_SCAN 0x0012
+#define FAPI_SCANTYPE_FTM_NEIGHBOUR_SCAN 0x0013
+#define FAPI_SCANTYPE_FIRST_ILLEGAL 0x0014
+
+#define FAPI_STATSSTOPBITMAP_STATS_RADIO 0x0001
+#define FAPI_STATSSTOPBITMAP_STATS_RADIO_CCA 0x0002
+#define FAPI_STATSSTOPBITMAP_STATS_RADIO_CHANNELS 0x0004
+#define FAPI_STATSSTOPBITMAP_STATS_RADIO_SCAN 0x0008
+#define FAPI_STATSSTOPBITMAP_STATS_IFACE 0x0010
+#define FAPI_STATSSTOPBITMAP_STATS_IFACE_TXRATE 0x0020
+#define FAPI_STATSSTOPBITMAP_STATS_IFACE_AC 0x0040
+#define FAPI_STATSSTOPBITMAP_STATS_IFACE_CONTENSION 0x0080
+
+#define FAPI_TDLSACTION_DISCOVERY 0x0000
+#define FAPI_TDLSACTION_SETUP 0x0001
+#define FAPI_TDLSACTION_TEARDOWN 0x0002
+/*todo*/
+#define FAPI_TDLSACTION_CHANNEL_SWITCH 0x0003
+
+#define FAPI_TDLSEVENT_CONNECTED 0x0001
+#define FAPI_TDLSEVENT_DISCONNECTED 0x0002
+#define FAPI_TDLSEVENT_DISCOVERED 0x0003
+
+#define FAPI_TRANSMISSIONSTATUS_SUCCESSFUL 0x0000
+#define FAPI_TRANSMISSIONSTATUS_RETRY_LIMIT 0x0001
+#define FAPI_TRANSMISSIONSTATUS_TX_LIFETIME 0x0002
+#define FAPI_TRANSMISSIONSTATUS_NO_BSS 0x0003
+#define FAPI_TRANSMISSIONSTATUS_EXCESSIVE_DATA_LENGTH 0x0004
+#define FAPI_TRANSMISSIONSTATUS_UNAVAILABLE_KEY_MAPPING 0x0005
+#define FAPI_TRANSMISSIONSTATUS_UNSPECIFIED_FAILURE 0x0006
+
+#define FAPI_TXDATATYPE_DATA_WORD 0x0000
+#define FAPI_TXDATATYPE_DATA_RANDOM 0x0001
+
+#define FAPI_TXREADFLAGS_NONE 0x0000
+#define FAPI_TXREADFLAGS_FRAME_COUNTING 0x0001
+#define FAPI_TXREADFLAGS_THERMAL_CUTOUT 0x0002
+
+#define FAPI_TXSETPARAMSFLAGS_NONE 0x0000
+#define FAPI_TXSETPARAMSFLAGS_ACK 0x0001
+#define FAPI_TXSETPARAMSFLAGS_DUPLICATE_80 0x0002
+#define FAPI_TXSETPARAMSFLAGS_DUPLICATE_40 0x0004
+#define FAPI_TXSETPARAMSFLAGS_DEAFEN_RX 0x0008
+#define FAPI_TXSETPARAMSFLAGS_CS 0x0010
+#define FAPI_TXSETPARAMSFLAGS_SCAN_CHANNEL 0x0020
+#define FAPI_TXSETPARAMSFLAGS_SHORT_PREAMBLE 0x0040
+#define FAPI_TXSETPARAMSFLAGS_DISABLE_SCRAMBLER 0x0080
+#define FAPI_TXSETPARAMSFLAGS_LDPC 0x0100
+#define FAPI_TXSETPARAMSFLAGS_STBC 0x0200
+#define FAPI_TXSETPARAMSFLAGS_DISABLE_SPREADER 0x0400
+#define FAPI_TXSETPARAMSFLAGS_GREENFIELD_PREAMBLE 0x0800
+#define FAPI_TXSETPARAMSFLAGS_RX_LOW_POWER 0x1000
+#define FAPI_TXSETPARAMSFLAGS_IBSS_FRAMES 0x2000
+#define FAPI_TXSETPARAMSFLAGS_BEAMFORMING 0x4000
+#define FAPI_TXSETPARAMSFLAGS_DISABLE_EXTERNAL_LNA 0x8000
+
+#define FAPI_TYPEOFAIRPOWER_EIRP 0x00
+#define FAPI_TYPEOFAIRPOWER_TPO 0x01
+#define FAPI_TYPEOFAIRPOWER_RAW 0x02
+
+#define FAPI_USAGE_NO_USE 0x0
+#define FAPI_USAGE_USE 0x1
+
+#define FAPI_VIFRANGE_VIF_INDEX_MIN 0x0001
+#define FAPI_VIFRANGE_VIF_INDEX_MAX 0x0008
+
+#define FAPI_VIFTYPE_UNSYNCHRONISED 0x0000
+#define FAPI_VIFTYPE_STATION 0x0002
+#define FAPI_VIFTYPE_AP 0x0003
+#define FAPI_VIFTYPE_WLANLITE 0x0004
+#define FAPI_VIFTYPE_NAN 0x0005
+#define FAPI_VIFTYPE_DISCOVERY 0x0006
+#define FAPI_VIFTYPE_PRECONNECT 0x0007
+#define FAPI_VIFTYPE_NANDATAPATH 0x0008
+#define FAPI_VIFTYPE_MONITOR 0x0010
+#define FAPI_VIFTYPE_SCAN 0x0020
+#define FAPI_VIFTYPE_OFFCHANNEL 0x0021
+#define FAPI_VIFTYPE_RANGE 0x0022
+
+#define FAPI_WIFILOGGINGPARAMS_BEACON_PERIOD 0xf000
+#define FAPI_WIFILOGGINGPARAMS_BLACKOUT_ID 0xf001
+#define FAPI_WIFILOGGINGPARAMS_BLACKOUT_SOURCE 0xf002
+#define FAPI_WIFILOGGINGPARAMS_BLACKOUT_TYPE 0xf003
+#define FAPI_WIFILOGGINGPARAMS_BLOCKACK_PARAMETER_SET 0xf004
+#define FAPI_WIFILOGGINGPARAMS_DIRECTION 0xf005
+#define FAPI_WIFILOGGINGPARAMS_DUTY_CYCLE 0xf006
+#define FAPI_WIFILOGGINGPARAMS_EAPOL_KEY_TYPE 0xf007
+#define FAPI_WIFILOGGINGPARAMS_FRAME 0xf008
+#define FAPI_WIFILOGGINGPARAMS_LOCAL_DURATION 0xf009
+#define FAPI_WIFILOGGINGPARAMS_PERIOD 0xf00a
+#define FAPI_WIFILOGGINGPARAMS_REASON 0xf00b
+#define FAPI_WIFILOGGINGPARAMS_REPORT_MODE 0xf00c
+#define FAPI_WIFILOGGINGPARAMS_RETRY_COUNT 0xf00d
+#define FAPI_WIFILOGGINGPARAMS_SCAN_TYPE 0xf00e
+#define FAPI_WIFILOGGINGPARAMS_SECONDARY_CHANNEL_OFFSET 0xf00f
+#define FAPI_WIFILOGGINGPARAMS_SEQUENCE_NUMBER 0xf010
+#define FAPI_WIFILOGGINGPARAMS_SNIFF_ACL 0xf011
+#define FAPI_WIFILOGGINGPARAMS_TEMPORAL_KEYS_REQUIRED 0xf012
+#define FAPI_WIFILOGGINGPARAMS_VIF_ID 0xf013
+
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P0 0x0000
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P1 0x0001
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P2 0x0002
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P3 0x0003
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P4 0x0004
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P5 0x0005
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P6 0x0006
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_P7 0x0007
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_20MHZ_PRIMARYCHANNELPOSITION_NOT_APPLICABLE 0x0008
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P0 0x0100
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P1 0x0101
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P2 0x0102
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P3 0x0103
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P4 0x0104
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P5 0x0105
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P6 0x0106
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_P7 0x0107
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_40MHZ_PRIMARYCHANNELPOSITION_NOT_APPLICABLE 0x0108
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P0 0x0200
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P1 0x0201
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P2 0x0202
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P3 0x0203
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P4 0x0204
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P5 0x0205
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P6 0x0206
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_P7 0x0207
+#define FAPI_CHANNELBANDWIDTH_BANDWIDTH_80MHZ_PRIMARYCHANNELPOSITION_NOT_APPLICABLE 0x0208
+
+#define MA_UNITDATA_REQ 0x1000
+#define MA_SPARE_1_REQ 0x1002
+#define MA_SPARE_2_REQ 0x1003
+#define MA_SPARE_3_REQ 0x1004
+#define MA_SPARE_SIGNAL_1_REQ 0x1005
+#define MA_SPARE_SIGNAL_2_REQ 0x1006
+#define MA_SPARE_SIGNAL_3_REQ 0x1007
+#define MA_UNITDATA_CFM 0x1100
+#define MA_SPARE_1_CFM 0x1102
+#define MA_SPARE_2_CFM 0x1103
+#define MA_SPARE_3_CFM 0x1104
+#define MA_SPARE_SIGNAL_1_CFM 0x1105
+#define MA_SPARE_SIGNAL_2_CFM 0x1106
+#define MA_SPARE_SIGNAL_3_CFM 0x1107
+#define MA_SPARE_1_RES 0x1200
+#define MA_SPARE_2_RES 0x1201
+#define MA_SPARE_3_RES 0x1202
+#define MA_SPARE_SIGNAL_1_RES 0x1203
+#define MA_SPARE_SIGNAL_2_RES 0x1204
+#define MA_SPARE_SIGNAL_3_RES 0x1205
+#define MA_UNITDATA_IND 0x1300
+#define MA_BLOCKACK_IND 0x1301
+#define MA_SPARE_1_IND 0x1302
+#define MA_SPARE_2_IND 0x1303
+#define MA_SPARE_3_IND 0x1304
+#define MA_SPARE_SIGNAL_1_IND 0x1305
+#define MA_SPARE_SIGNAL_2_IND 0x1306
+#define MA_SPARE_SIGNAL_3_IND 0x1307
+#define MLME_GET_REQ 0x2001
+#define MLME_SET_REQ 0x2002
+#define MLME_POWERMGT_REQ 0x2003
+#define MLME_ADD_INFO_ELEMENTS_REQ 0x2004
+#define MLME_ADD_SCAN_REQ 0x2005
+#define MLME_DEL_SCAN_REQ 0x2006
+#define MLME_ADD_VIF_REQ 0x2007
+#define MLME_DEL_VIF_REQ 0x2008
+#define MLME_START_REQ 0x2009
+#define MLME_SET_CHANNEL_REQ 0x200a
+#define MLME_CONNECT_REQ 0x200b
+#define MLME_REASSOCIATE_REQ 0x200c
+#define MLME_ROAM_REQ 0x200d
+#define MLME_DISCONNECT_REQ 0x200e
+#define MLME_REGISTER_ACTION_FRAME_REQ 0x200f
+#define MLME_SEND_FRAME_REQ 0x2010
+#define MLME_RESET_DWELL_TIME_REQ 0x2011
+#define MLME_SET_TRAFFIC_PARAMETERS_REQ 0x2012
+#define MLME_DEL_TRAFFIC_PARAMETERS_REQ 0x2013
+#define MLME_SET_PACKET_FILTER_REQ 0x2014
+#define MLME_SET_IP_ADDRESS_REQ 0x2015
+#define MLME_SET_ACL_REQ 0x2016
+#define MLME_SETKEYS_REQ 0x2018
+#define MLME_GET_KEY_SEQUENCE_REQ 0x201a
+#define MLME_SET_PMK_REQ 0x201c
+#define MLME_SET_CACHED_CHANNELS_REQ 0x201f
+#define MLME_SET_WHITELIST_SSID_REQ 0x2020
+#define MLME_TDLS_ACTION_REQ 0x2021
+#define MLME_CHANNEL_SWITCH_REQ 0x2022
+#define MLME_MONITOR_RSSI_REQ 0x2023
+#define MLME_START_LINK_STATISTICS_REQ 0x2024
+#define MLME_STOP_LINK_STATISTICS_REQ 0x2025
+#define MLME_SET_PNO_LIST_REQ 0x2027
+#define MLME_HOST_STATE_REQ 0x2028
+#define MLME_ADD_RANGE_REQ 0x2029
+#define MLME_DEL_RANGE_REQ 0x202a
+#define MLME_SET_NOA_REQ 0x202b
+#define MLME_SET_CTWINDOW_REQ 0x202c
+#define MLME_NAN_START_REQ 0x202d
+#define MLME_NAN_CONFIG_REQ 0x202e
+#define MLME_NAN_PUBLISH_REQ 0x202f
+#define MLME_NAN_SUBSCRIBE_REQ 0x2030
+#define MLME_NAN_FOLLOWUP_REQ 0x2031
+#define MLME_UNSET_CHANNEL_REQ 0x2032
+#define MLME_SET_COUNTRY_REQ 0x2033
+#define MLME_FORWARD_BEACON_REQ 0x2034
+#define MLME_NDP_SETUP_REQ 0x2035
+#define MLME_NDP_SETUP_RESPONSE_REQ 0x2036
+#define MLME_NDP_TERMINATE_REQ 0x2037
+#define MLME_NAN_ADD_RANGE_REQ 0x2038
+#define MLME_NAN_DEL_RANGE_REQ 0x2039
+#define MLME_SPARE_4_REQ 0x203a
+#define MLME_SPARE_5_REQ 0x203b
+#define MLME_SPARE_6_REQ 0x203c
+#define MLME_INSTALL_APF_REQ 0x203d
+#define MLME_READ_APF_REQ 0x203e
+#define MLME_SET_NSS_REQ 0x203f
+#define MLME_ARP_DETECT_REQ 0x2040
+#define MLME_SPARE_SIGNAL_1_REQ 0x2041
+#define MLME_SPARE_SIGNAL_2_REQ 0x2042
+#define MLME_SPARE_SIGNAL_3_REQ 0x2043
+#define MLME_GET_CFM 0x2101
+#define MLME_SET_CFM 0x2102
+#define MLME_POWERMGT_CFM 0x2103
+#define MLME_ADD_INFO_ELEMENTS_CFM 0x2104
+#define MLME_ADD_SCAN_CFM 0x2105
+#define MLME_DEL_SCAN_CFM 0x2106
+#define MLME_ADD_VIF_CFM 0x2107
+#define MLME_DEL_VIF_CFM 0x2108
+#define MLME_START_CFM 0x2109
+#define MLME_SET_CHANNEL_CFM 0x210a
+#define MLME_CONNECT_CFM 0x210b
+#define MLME_REASSOCIATE_CFM 0x210c
+#define MLME_ROAM_CFM 0x210d
+#define MLME_DISCONNECT_CFM 0x210e
+#define MLME_REGISTER_ACTION_FRAME_CFM 0x210f
+#define MLME_SEND_FRAME_CFM 0x2110
+#define MLME_RESET_DWELL_TIME_CFM 0x2111
+#define MLME_SET_TRAFFIC_PARAMETERS_CFM 0x2112
+#define MLME_DEL_TRAFFIC_PARAMETERS_CFM 0x2113
+#define MLME_SET_PACKET_FILTER_CFM 0x2114
+#define MLME_SET_IP_ADDRESS_CFM 0x2115
+#define MLME_SET_ACL_CFM 0x2116
+#define MLME_SETKEYS_CFM 0x2118
+#define MLME_GET_KEY_SEQUENCE_CFM 0x211a
+#define MLME_SET_PMK_CFM 0x211c
+#define MLME_SET_CACHED_CHANNELS_CFM 0x211f
+#define MLME_SET_WHITELIST_SSID_CFM 0x2120
+#define MLME_TDLS_ACTION_CFM 0x2121
+#define MLME_CHANNEL_SWITCH_CFM 0x2122
+#define MLME_MONITOR_RSSI_CFM 0x2123
+#define MLME_START_LINK_STATISTICS_CFM 0x2124
+#define MLME_STOP_LINK_STATISTICS_CFM 0x2125
+#define MLME_SET_PNO_LIST_CFM 0x2127
+#define MLME_HOST_STATE_CFM 0x2128
+#define MLME_ADD_RANGE_CFM 0x2129
+#define MLME_DEL_RANGE_CFM 0x212a
+#define MLME_SET_NOA_CFM 0x212b
+#define MLME_SET_CTWINDOW_CFM 0x212c
+#define MLME_NAN_START_CFM 0x212d
+#define MLME_NAN_CONFIG_CFM 0x212e
+#define MLME_NAN_PUBLISH_CFM 0x212f
+#define MLME_NAN_SUBSCRIBE_CFM 0x2130
+#define MLME_NAN_FOLLOWUP_CFM 0x2131
+#define MLME_UNSET_CHANNEL_CFM 0x2132
+#define MLME_SET_COUNTRY_CFM 0x2133
+#define MLME_FORWARD_BEACON_CFM 0x2134
+#define MLME_NDP_SETUP_CFM 0x2135
+#define MLME_NDP_SETUP_RESPONSE_CFM 0x2136
+#define MLME_NDP_TERMINATE_CFM 0x2137
+#define MLME_NAN_ADD_RANGE_CFM 0x2138
+#define MLME_NAN_DEL_RANGE_CFM 0x2139
+#define MLME_SPARE_4_CFM 0x213a
+#define MLME_SPARE_5_CFM 0x213b
+#define MLME_SPARE_6_CFM 0x213c
+#define MLME_INSTALL_APF_CFM 0x213d
+#define MLME_READ_APF_CFM 0x213e
+#define MLME_SET_NSS_CFM 0x213f
+#define MLME_ARP_DETECT_CFM 0x2140
+#define MLME_SPARE_SIGNAL_1_CFM 0x2141
+#define MLME_SPARE_SIGNAL_2_CFM 0x2142
+#define MLME_SPARE_SIGNAL_3_CFM 0x2143
+#define MLME_CONNECT_RES 0x2200
+#define MLME_CONNECTED_RES 0x2201
+#define MLME_REASSOCIATE_RES 0x2202
+#define MLME_ROAMED_RES 0x2203
+#define MLME_TDLS_PEER_RES 0x2204
+#define MLME_SYNCHRONISED_RES 0x2205
+#define MLME_SPARE_2_RES 0x2206
+#define MLME_SPARE_3_RES 0x2207
+#define MLME_SPARE_4_RES 0x2208
+#define MLME_SPARE_SIGNAL_1_RES 0x2209
+#define MLME_SPARE_SIGNAL_2_RES 0x220a
+#define MLME_SPARE_SIGNAL_3_RES 0x220b
+#define MLME_SCAN_IND 0x2300
+#define MLME_SCAN_DONE_IND 0x2301
+#define MLME_LISTEN_END_IND 0x2302
+#define MLME_CONNECT_IND 0x2303
+#define MLME_CONNECTED_IND 0x2304
+#define MLME_REASSOCIATE_IND 0x2305
+#define MLME_ROAM_IND 0x2306
+#define MLME_ROAMED_IND 0x2307
+#define MLME_DISCONNECT_IND 0x2308
+#define MLME_DISCONNECTED_IND 0x2309
+#define MLME_PROCEDURE_STARTED_IND 0x230a
+#define MLME_MIC_FAILURE_IND 0x230b
+#define MLME_FRAME_TRANSMISSION_IND 0x230c
+#define MLME_RECEIVED_FRAME_IND 0x230d
+#define MLME_TDLS_PEER_IND 0x230f
+#define MLME_RSSI_REPORT_IND 0x2312
+#define MLME_AC_PRIORITY_UPDATE_IND 0x2313
+#define MLME_RANGE_IND 0x2314
+#define MLME_RANGE_DONE_IND 0x2315
+#define MLME_EVENT_LOG_IND 0x2316
+#define MLME_NAN_EVENT_IND 0x2317
+#define MLME_NAN_SERVICE_IND 0x2318
+#define MLME_NAN_FOLLOWUP_IND 0x2319
+#define MLME_CHANNEL_SWITCHED_IND 0x231a
+#define MLME_SYNCHRONISED_IND 0x231b
+#define MLME_BEACON_REPORTING_EVENT_IND 0x231c
+#define MLME_SPARE_3_IND 0x231d
+#define MLME_SPARE_4_IND 0x231e
+#define MLME_NDP_SETUP_IND 0x231f
+#define MLME_NDP_REQUESTED_IND 0x2320
+#define MLME_NDP_SETUP_RESPONSE_IND 0x2321
+#define MLME_NDP_TERMINATED_IND 0x2322
+#define MLME_NAN_ADD_RANGE_IND 0x2323
+#define MLME_SPARE_5_IND 0x2324
+#define MLME_SPARE_SIGNAL_1_IND 0x2325
+#define MLME_SPARE_SIGNAL_2_IND 0x2326
+#define MLME_SPARE_SIGNAL_3_IND 0x2327
+#define DEBUG_SPARE_1_REQ 0x8000
+#define DEBUG_SPARE_2_REQ 0x8001
+#define DEBUG_SPARE_3_REQ 0x8002
+#define DEBUG_SPARE_SIGNAL_1_REQ 0x8003
+#define DEBUG_SPARE_SIGNAL_2_REQ 0x8004
+#define DEBUG_SPARE_SIGNAL_3_REQ 0x8005
+#define DEBUG_SPARE_1_CFM 0x8100
+#define DEBUG_SPARE_2_CFM 0x8101
+#define DEBUG_SPARE_3_CFM 0x8102
+#define DEBUG_SPARE_SIGNAL_1_CFM 0x8103
+#define DEBUG_SPARE_SIGNAL_2_CFM 0x8104
+#define DEBUG_SPARE_SIGNAL_3_CFM 0x8105
+#define DEBUG_SPARE_1_RES 0x8200
+#define DEBUG_SPARE_2_RES 0x8201
+#define DEBUG_SPARE_3_RES 0x8202
+#define DEBUG_SPARE_SIGNAL_1_RES 0x8203
+#define DEBUG_SPARE_SIGNAL_2_RES 0x8204
+#define DEBUG_SPARE_SIGNAL_3_RES 0x8205
+#define DEBUG_WORD12IND 0x8301
+#define DEBUG_FAULT_IND 0x8302
+#define DEBUG_WORDS_IND 0x8303
+#define DEBUG_SPARE_2_IND 0x8304
+#define DEBUG_SPARE_3_IND 0x8305
+#define DEBUG_SPARE_4_IND 0x8306
+#define DEBUG_SPARE_SIGNAL_1_IND 0x8307
+#define DEBUG_SPARE_SIGNAL_2_IND 0x8308
+#define DEBUG_SPARE_SIGNAL_3_IND 0x8309
+#define TEST_BLOCK_REQUESTS_REQ 0x9000
+#define TEST_PANIC_REQ 0x9001
+#define TEST_SUSPEND_REQ 0x9002
+#define TEST_RESUME_REQ 0x9003
+#define RADIO_LOGGING_REQ 0x9004
+#define WLANLITE_CW_START_REQ 0x9005
+#define WLANLITE_CW_STOP_REQ 0x9006
+#define WLANLITE_TX_SET_PARAMS_REQ 0x9007
+#define WLANLITE_TX_START_REQ 0x9008
+#define WLANLITE_TX_READ_REQ 0x9009
+#define WLANLITE_TX_STOP_REQ 0x900a
+#define WLANLITE_RX_START_REQ 0x900b
+#define WLANLITE_RX_READ_REQ 0x900c
+#define WLANLITE_RX_STOP_REQ 0x900d
+#define WLANLITE_STATUS_REQ 0x900e
+#define TEST_PMALLOC_REQ 0x900f
+#define TEST_CONFIGURE_MONITOR_MODE_REQ 0x9010
+#define TEST_CHECK_FW_ALIVE_REQ 0x9012
+#define DEBUG_GENERIC_REQ 0x9013
+#define DEBUG_PKT_SINK_START_REQ 0x9014
+#define DEBUG_PKT_SINK_STOP_REQ 0x9015
+#define DEBUG_PKT_SINK_REPORT_REQ 0x9016
+#define DEBUG_PKT_GEN_START_REQ 0x9017
+#define DEBUG_PKT_GEN_STOP_REQ 0x9018
+#define DEBUG_PKT_GEN_REPORT_REQ 0x9019
+#define WLANLITE_RADIO_SELECT_REQ 0x901a
+#define TEST_HIP_TESTER_START_REQ 0x901b
+#define TEST_HIP_TESTER_STOP_REQ 0x901c
+#define TEST_HIP_TESTER_SET_PARAMS_REQ 0x901d
+#define TEST_HIP_TESTER_REPORT_REQ 0x901e
+#define TEST_BIST_GET_TX_GAIN_REQ 0x901f
+#define TEST_SPARE_1_REQ 0x9020
+#define TEST_SPARE_2_REQ 0x9021
+#define TEST_SPARE_3_REQ 0x9022
+#define TEST_SPARE_SIGNAL_1_REQ 0x9023
+#define TEST_SPARE_SIGNAL_2_REQ 0x9024
+#define TEST_SPARE_SIGNAL_3_REQ 0x9025
+#define RADIO_LOGGING_CFM 0x9100
+#define WLANLITE_CW_START_CFM 0x9101
+#define WLANLITE_TX_SET_PARAMS_CFM 0x9102
+#define WLANLITE_CW_STOP_CFM 0x9103
+#define WLANLITE_TX_START_CFM 0x9104
+#define WLANLITE_TX_READ_CFM 0x9105
+#define WLANLITE_TX_STOP_CFM 0x9106
+#define WLANLITE_RX_START_CFM 0x9107
+#define WLANLITE_RX_READ_CFM 0x9108
+#define WLANLITE_RX_STOP_CFM 0x9109
+#define WLANLITE_STATUS_CFM 0x910a
+#define TEST_PMALLOC_CFM 0x910b
+#define TEST_CONFIGURE_MONITOR_MODE_CFM 0x910c
+#define TEST_CHECK_FW_ALIVE_CFM 0x910e
+#define TEST_SUSPEND_CFM 0x910f
+#define TEST_RESUME_CFM 0x9110
+#define DEBUG_GENERIC_CFM 0x9111
+#define WLANLITE_RADIO_SELECT_CFM 0x9112
+#define TEST_HIP_TESTER_START_CFM 0x9113
+#define TEST_HIP_TESTER_STOP_CFM 0x9114
+#define TEST_HIP_TESTER_SET_PARAMS_CFM 0x9115
+#define TEST_BIST_GET_TX_GAIN_CFM 0x9116
+#define TEST_SPARE_1_CFM 0x9117
+#define TEST_SPARE_2_CFM 0x9118
+#define TEST_SPARE_3_CFM 0x9119
+#define TEST_SPARE_SIGNAL_1_CFM 0x911a
+#define TEST_SPARE_SIGNAL_2_CFM 0x911b
+#define TEST_SPARE_SIGNAL_3_CFM 0x911c
+#define TEST_SPARE_1_RES 0x9200
+#define TEST_SPARE_2_RES 0x9201
+#define TEST_SPARE_3_RES 0x9202
+#define TEST_SPARE_SIGNAL_1_RES 0x9203
+#define TEST_SPARE_SIGNAL_2_RES 0x9204
+#define TEST_SPARE_SIGNAL_3_RES 0x9205
+#define RADIO_LOGGING_IND 0x9300
+#define DEBUG_GENERIC_IND 0x9301
+#define DEBUG_PKT_SINK_REPORT_IND 0x9302
+#define DEBUG_PKT_GEN_REPORT_IND 0x9303
+#define TEST_HIP_TESTER_REPORT_IND 0x9304
+#define TEST_SPARE_1_IND 0x9305
+#define TEST_SPARE_2_IND 0x9306
+#define TEST_SPARE_3_IND 0x9307
+#define TEST_SPARE_SIGNAL_1_IND 0x9308
+#define TEST_SPARE_SIGNAL_2_IND 0x9309
+#define TEST_SPARE_SIGNAL_3_IND 0x930a
+
+struct fapi_signal_header {
+ __le16 id;
+ __le16 receiver_pid;
+ __le16 sender_pid;
+ __le32 fw_reference;
+} __packed;
+
+struct fapi_vif_signal_header {
+ __le16 id;
+ __le16 receiver_pid;
+ __le16 sender_pid;
+ __le32 fw_reference;
+ __le16 vif;
+} __packed;
+
+struct fapi_signal {
+ __le16 id;
+ __le16 receiver_pid;
+ __le16 sender_pid;
+ __le32 fw_reference;
+
+ union {
+ struct {
+ __le16 vif;
+ __le16 host_tag;
+ __le16 priority;
+ __le16 peer_index;
+ __le16 data_unit_descriptor;
+ __le16 bulk_data_descriptor;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_unitdata_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_3_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_3_req;
+ struct {
+ __le16 vif;
+ __le16 transmission_status;
+ __le16 host_tag;
+ __le16 sequence_number;
+ __le16 peer_index;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_unitdata_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_3_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_3_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_3_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_3_res;
+ struct {
+ __le16 vif;
+ __le16 data_unit_descriptor;
+ __le16 sequence_number;
+ __le16 priority;
+ __le16 peer_index;
+ __le16 proprieraty_information_length;
+ __le16 bulk_data_descriptor;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_unitdata_ind;
+ struct {
+ __le16 vif;
+ u8 peer_qsta_address[ETH_ALEN];
+ __le16 sequence_number;
+ __le16 reason_code;
+ __le16 blockack_parameter_set;
+ __le16 direction;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_blockack_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed ma_spare_signal_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_get_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_req;
+ struct {
+ __le16 vif;
+ __le16 power_management_mode;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_powermgt_req;
+ struct {
+ __le16 vif;
+ __le16 purpose;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_info_elements_req;
+ struct {
+ __le16 vif;
+ __le16 scan_id;
+ __le16 scan_type;
+ u8 device_address[ETH_ALEN];
+ __le16 report_mode_bitmap;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_scan_req;
+ struct {
+ __le16 vif;
+ __le16 scan_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_scan_req;
+ struct {
+ __le16 vif;
+ u8 interface_address[ETH_ALEN];
+ __le16 virtual_interface_type;
+ u8 device_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_vif_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_vif_req;
+ struct {
+ __le16 vif;
+ u8 bssid[ETH_ALEN];
+ __le16 beacon_period;
+ __le16 dtim_period;
+ __le16 capability_information;
+ __le16 authentication_type;
+ __le16 hidden_ssid;
+ __le16 channel_frequency;
+ __le16 channel_information;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_start_req;
+ struct {
+ __le16 vif;
+ __le16 availability_duration;
+ __le16 availability_interval;
+ __le16 count;
+ __le16 channel_frequency;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_channel_req;
+ struct {
+ __le16 vif;
+ u8 bssid[ETH_ALEN];
+ __le16 authentication_type;
+ __le16 channel_frequency;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connect_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reassociate_req;
+ struct {
+ __le16 vif;
+ u8 bssid[ETH_ALEN];
+ __le16 channel_frequency;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_roam_req;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 reason_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_disconnect_req;
+ struct {
+ __le16 vif;
+ __le32 action_frame_category_bitmap_active;
+ __le32 action_frame_category_bitmap_suspended;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_register_action_frame_req;
+ struct {
+ __le16 vif;
+ __le16 host_tag;
+ __le16 data_unit_descriptor;
+ __le16 message_type;
+ __le16 channel_frequency;
+ __le32 dwell_time;
+ __le32 period;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_send_frame_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reset_dwell_time_req;
+ struct {
+ __le16 vif;
+ __le16 user_priority;
+ __le16 medium_time;
+ __le16 minimum_data_rate;
+ u8 peer_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_traffic_parameters_req;
+ struct {
+ __le16 vif;
+ __le16 user_priority;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_traffic_parameters_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_packet_filter_req;
+ struct {
+ __le16 vif;
+ __le16 ip_version;
+ u8 multicast_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_ip_address_req;
+ struct {
+ __le16 vif;
+ __le16 entries;
+ __le16 acl_policy;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_acl_req;
+ struct {
+ __le16 vif;
+ __le16 length;
+ __le16 key_id;
+ __le16 key_type;
+ u8 address[ETH_ALEN];
+ __le16 sequence_number[8];
+ __le32 cipher_suite_selector;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_setkeys_req;
+ struct {
+ __le16 vif;
+ __le16 key_id;
+ __le16 key_type;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_get_key_sequence_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_pmk_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_cached_channels_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_whitelist_ssid_req;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 tdls_action;
+ __le16 channel_frequency;
+ __le16 channel_information;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_tdls_action_req;
+ struct {
+ __le16 vif;
+ __le16 channel_frequency;
+ __le16 channel_information;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_channel_switch_req;
+ struct {
+ __le16 vif;
+ __le16 low_rssi_threshold;
+ __le16 high_rssi_threshold;
+ __le16 rssi_monitoring_enabled;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_monitor_rssi_req;
+ struct {
+ __le16 vif;
+ __le16 mpdu_size_threshold;
+ __le16 aggressive_statistics_gathering_enabled;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_start_link_statistics_req;
+ struct {
+ __le16 vif;
+ __le16 statistics_stop_bitmap;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_stop_link_statistics_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_pno_list_req;
+ struct {
+ __le16 vif;
+ __le16 host_state;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_host_state_req;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ u8 device_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_range_req;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le16 entries;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_range_req;
+ struct {
+ __le16 vif;
+ __le16 request_id;
+ __le16 noa_count;
+ __le32 interval;
+ __le32 duration;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_noa_req;
+ struct {
+ __le16 vif;
+ __le16 ctwindow;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_ctwindow_req;
+ struct {
+ __le16 vif;
+ __le16 operatein5gband;
+ __le16 hopcountmax;
+ __le16 nan_operation_control_flags;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_start_req;
+ struct {
+ __le16 vif;
+ __le16 nan_operation_control_flags;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_config_req;
+ struct {
+ __le16 vif;
+ __le16 publish_id;
+ __le16 nan_sdf_flags;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_publish_req;
+ struct {
+ __le16 vif;
+ __le16 subscribe_id;
+ __le16 nan_sdf_flags;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_subscribe_req;
+ struct {
+ __le16 vif;
+ __le16 publish_subscribe_id;
+ __le16 peer_id;
+ __le16 nan_sdf_flags;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_followup_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_unset_channel_req;
+ struct {
+ __le16 vif;
+ __le16 country_code;
+ __le16 dfs_regulatory_domain;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_country_req;
+ struct {
+ __le16 vif;
+ __le16 wips_action;
+ __le16 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_forward_beacon_req;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ __le16 peer_id;
+ u8 local_ndp_interface_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_req;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ __le16 reason_code;
+ u8 local_ndp_interface_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_response_req;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_terminate_req;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_add_range_req;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_del_range_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_4_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_5_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_6_req;
+ struct {
+ __le16 vif;
+ __le16 filter_mode;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_install_apf_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_read_apf_req;
+ struct {
+ __le16 vif;
+ __le16 rx_nss;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_nss_req;
+ struct {
+ __le16 vif;
+ __le16 arp_detect_action;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_arp_detect_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_3_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_get_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_powermgt_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_info_elements_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 scan_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_scan_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 scan_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_scan_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_vif_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_vif_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_start_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_channel_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connect_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reassociate_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_roam_cfm;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_disconnect_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_register_action_frame_cfm;
+ struct {
+ __le16 vif;
+ __le16 host_tag;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_send_frame_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reset_dwell_time_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_traffic_parameters_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_traffic_parameters_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_packet_filter_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_ip_address_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_acl_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_setkeys_cfm;
+ struct {
+ __le16 vif;
+ __le16 sequence_number[8];
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_get_key_sequence_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_pmk_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_cached_channels_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_whitelist_ssid_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_tdls_action_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_channel_switch_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_monitor_rssi_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_start_link_statistics_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_stop_link_statistics_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_pno_list_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_host_state_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_add_range_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_del_range_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_noa_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_ctwindow_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_start_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_config_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_publish_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_subscribe_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_followup_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_unset_channel_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_country_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_forward_beacon_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_response_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_terminate_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_add_range_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_del_range_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_4_cfm;
+ struct {
+ __le16 result_code;
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_5_cfm;
+ struct {
+ __le16 result_code;
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_6_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_install_apf_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_read_apf_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_set_nss_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_arp_detect_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_3_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connect_res;
+ struct {
+ __le16 vif;
+ __le16 peer_index;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connected_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reassociate_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_roamed_res;
+ struct {
+ __le16 vif;
+ __le16 peer_index;
+ __le16 tdls_event;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_tdls_peer_res;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ u8 bssid[ETH_ALEN];
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_synchronised_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_3_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_4_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_3_res;
+ struct {
+ __le16 vif;
+ __le16 channel_frequency;
+ __le16 rssi;
+ __le16 scan_id;
+ __le16 hotlisted_ap;
+ __le16 preferrednetwork_ap;
+ __le16 anqp_elements_length;
+ __le16 network_block_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_scan_ind;
+ struct {
+ __le16 vif;
+ __le16 scan_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_scan_done_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_listen_end_ind;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connect_ind;
+ struct {
+ __le16 vif;
+ __le16 peer_index;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_connected_ind;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_reassociate_ind;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_roam_ind;
+ struct {
+ __le16 vif;
+ __le16 temporal_keys_required;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_roamed_ind;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_disconnect_ind;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 reason_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_disconnected_ind;
+ struct {
+ __le16 vif;
+ __le16 procedure_type;
+ __le16 peer_index;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_procedure_started_ind;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 key_type;
+ __le16 key_id;
+ __le16 key_sequence_number[8];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_mic_failure_ind;
+ struct {
+ __le16 vif;
+ __le16 host_tag;
+ __le16 transmission_status;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_frame_transmission_ind;
+ struct {
+ __le16 vif;
+ __le16 data_unit_descriptor;
+ __le16 channel_frequency;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_received_frame_ind;
+ struct {
+ __le16 vif;
+ u8 peer_sta_address[ETH_ALEN];
+ __le16 peer_index;
+ __le16 tdls_event;
+ __le16 reason_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_tdls_peer_ind;
+ struct {
+ __le16 vif;
+ u8 bssid[ETH_ALEN];
+ __le16 rssi;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_rssi_report_ind;
+ struct {
+ __le16 vif;
+ __le16 ac_priority;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ac_priority_update_ind;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le16 entries;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_range_ind;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_range_done_ind;
+ struct {
+ __le16 vif;
+ __le16 event;
+ u8 timestamp[8];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_event_log_ind;
+ struct {
+ __le16 vif;
+ __le16 event;
+ __le16 identifier;
+ u8 address_or_identifier[ETH_ALEN];
+ __le16 reason_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_event_ind;
+ struct {
+ __le16 vif;
+ __le16 publish_subscribe_id;
+ __le16 peer_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_service_ind;
+ struct {
+ __le16 vif;
+ __le16 publish_subscribe_id;
+ __le16 peer_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_followup_ind;
+ struct {
+ __le16 vif;
+ __le16 channel_frequency;
+ __le16 channel_information;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_channel_switched_ind;
+ struct {
+ __le16 vif;
+ __le16 rssi;
+ __le16 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_synchronised_ind;
+ struct {
+ __le16 vif;
+ __le16 abort_reason;
+ __le16 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_beacon_reporting_event_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_4_ind;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ u8 peer_ndp_interface_address[ETH_ALEN];
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_ind;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ __le16 publish_subscribe_id;
+ u8 peer_ndp_interface_address[ETH_ALEN];
+ __le16 security_required;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_requested_ind;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ u8 peer_ndp_interface_address[ETH_ALEN];
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_setup_response_ind;
+ struct {
+ __le16 vif;
+ __le16 instance_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_ndp_terminated_ind;
+ struct {
+ __le16 vif;
+ __le16 rtt_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_nan_add_range_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_5_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed mlme_spare_signal_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_3_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_3_req;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_3_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_3_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_3_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_3_res;
+ struct {
+ __le16 vif;
+ __le16 module_id;
+ __le16 module_sub_id;
+ __le32 timestamp;
+ __le16 debug_words[12];
+ __le16 sequence_number;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_word12_ind;
+ struct {
+ __le16 vif;
+ __le16 faultid;
+ __le16 count;
+ __le32 timestamp;
+ __le32 arg;
+ __le16 cpu;
+ __le16 sequence_number;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_fault_ind;
+ struct {
+ __le16 vif;
+ __le16 module_id;
+ __le16 module_sub_id;
+ __le32 timestamp;
+ __le16 sequence_number;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_words_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_4_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_spare_signal_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_block_requests_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_panic_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_suspend_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_resume_req;
+ struct {
+ __le32 logging_source;
+ __le32 logging_frequency;
+ __le32 capture_stream;
+ __le32 trigger_mode;
+ __le32 delay;
+ __le32 buffer_size;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed radio_logging_req;
+ struct {
+ __le16 freq;
+ __le16 power;
+ __le16 flags;
+ __le16 type;
+ __le16 amplitude;
+ __le32 freq1;
+ __le32 freq2;
+ __le16 phase;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_cw_start_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_cw_stop_req;
+ struct {
+ __le16 freq;
+ __le16 rate;
+ __le16 channel_information;
+ __le16 power;
+ __le16 length;
+ __le32 interval;
+ __le16 flags;
+ __le16 aid;
+ __le16 distance_to_band_edge_half_mhz;
+ __le16 regulatory_domain;
+ __le16 spare_0;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_set_params_req;
+ struct {
+ __le32 num_frames_to_send;
+ __le16 data_type;
+ __le16 data_param;
+ u8 dest_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 num_mpdus_per_ampdu;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_start_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_read_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_stop_req;
+ struct {
+ __le16 freq;
+ __le16 channel_information;
+ __le16 flags;
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 aid;
+ __le16 num_mpdus_per_ampdu;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_start_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_read_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_stop_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_status_req;
+ struct {
+ __le16 alloc_area;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_pmalloc_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_configure_monitor_mode_req;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_check_fw_alive_req;
+ struct {
+ __le16 vif;
+ __le16 debug_words[8];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_generic_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 interval;
+ __le16 packets_per_interval;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_sink_start_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_sink_stop_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 report_interval;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_sink_report_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 interval;
+ __le16 size;
+ __le16 packets_per_interval;
+ __le32 ipv4destination_address;
+ __le16 packets_per_interrupt;
+ __le16 use_streaming;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_gen_start_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_gen_stop_req;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 report_interval;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_gen_report_req;
+ struct {
+ __le16 radio_bitmap;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_radio_select_req;
+ struct {
+ __le16 vif;
+ __le16 mode;
+ __le16 end_point;
+ __le16 protocol;
+ __le32 interval;
+ __le16 packets_per_interval;
+ __le16 packets_size;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_start_req;
+ struct {
+ __le16 vif;
+ __le16 stream_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_stop_req;
+ struct {
+ __le16 vif;
+ __le16 stream_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_set_params_req;
+ struct {
+ __le16 vif;
+ __le16 stream_id;
+ __le32 report_interval;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_report_req;
+ struct {
+ __le16 freq;
+ __le16 tx_gain;
+ __le16 rx_gain;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_bist_get_tx_gain_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_3_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_1_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_2_req;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_3_req;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed radio_logging_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_cw_start_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_set_params_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_cw_stop_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_start_cfm;
+ struct {
+ __le16 result_code;
+ __le16 flags;
+ __le32 ctr_frames_left_to_send;
+ __le32 transmission_back_off;
+ __le16 wanted_power_target;
+ __le16 final_power_target;
+ __le16 oob_constraint;
+ __le16 last_trim_pa_temperature;
+ __le16 current_pa_temperature;
+ __le16 last_trim_ambient_temperature;
+ __le16 current_ambient_temperature;
+ __le16 temp_power_adjust;
+ __le32 ctr_frames_success;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_read_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_tx_stop_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_start_cfm;
+ struct {
+ __le16 result_code;
+ __le32 freq_offset_cur;
+ __le32 freq_offset_avg;
+ __le16 rssi_cur;
+ __le16 rssi_avg;
+ __le16 rssi_min;
+ __le16 rssi_max;
+ __le16 snr_cur;
+ __le16 snr_avg;
+ __le16 snr_min;
+ __le16 snr_max;
+ __le32 interval;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_read_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_rx_stop_cfm;
+ struct {
+ __le16 result_code;
+ __le16 device_state;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_status_cfm;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_pmalloc_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_configure_monitor_mode_cfm;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_check_fw_alive_cfm;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_suspend_cfm;
+ struct {
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_resume_cfm;
+ struct {
+ __le16 vif;
+ __le16 debug_words[8];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_generic_cfm;
+ struct {
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed wlanlite_radio_select_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 stream_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_start_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 stream_id;
+ __le32 duration;
+ __le32 count;
+ __le32 failed_count;
+ __le32 octets;
+ __le32 mbps;
+ __le16 idle_ratio;
+ __le16 int_latency;
+ __le32 tester_reserved1;
+ __le32 tester_reserved2;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_stop_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le16 stream_id;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_set_params_cfm;
+ struct {
+ __le16 result_code;
+ __le32 gain;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_bist_get_tx_gain_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_3_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_1_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_2_cfm;
+ struct {
+ __le16 vif;
+ __le16 result_code;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_3_cfm;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_3_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_1_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_2_res;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_3_res;
+ struct {
+ __le32 sequence_number;
+ __le32 more_data;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed radio_logging_ind;
+ struct {
+ __le16 vif;
+ __le16 debug_words[8];
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_generic_ind;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 duration;
+ __le32 received_packets;
+ __le32 received_octets;
+ __le32 kbps;
+ __le16 idle_ratio;
+ __le16 int_latency;
+ __le16 free_kbytes;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_sink_report_ind;
+ struct {
+ __le16 vif;
+ __le16 end_point;
+ __le16 direction;
+ __le32 duration;
+ __le32 received_packets;
+ __le32 failed_count;
+ __le32 received_octets;
+ __le32 kbps;
+ __le16 idle_ratio;
+ __le16 int_latency;
+ __le16 free_kbytes;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed debug_pkt_gen_report_ind;
+ struct {
+ __le16 vif;
+ __le16 stream_id;
+ __le32 duration;
+ __le32 count;
+ __le32 failed_count;
+ __le32 octets;
+ __le32 mbps;
+ __le16 idle_ratio;
+ __le16 int_latency;
+ __le32 tester_reserved1;
+ __le32 tester_reserved2;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_hip_tester_report_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_3_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_1_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_2_ind;
+ struct {
+ __le16 vif;
+ __le32 spare_1;
+ __le32 spare_2;
+ __le32 spare_3;
+ u8 dr[0];
+ } __packed test_spare_signal_3_ind;
+ } u;
+} __packed;
+
+static inline struct sk_buff *fapi_alloc_f(size_t sig_size, size_t data_size, u16 id, u16 vif, const char *file, int line)
+{
+ struct sk_buff *skb = slsi_alloc_skb_f(sig_size + data_size, GFP_ATOMIC, file, line);
+ struct fapi_vif_signal_header *header;
+
+ WARN_ON(sig_size < sizeof(struct fapi_signal_header));
+ if (WARN_ON(!skb))
+ return NULL;
+
+ slsi_skb_cb_init(skb)->sig_length = sig_size;
+ slsi_skb_cb_get(skb)->data_length = sig_size;
+
+ header = (struct fapi_vif_signal_header *)skb_put(skb, sig_size);
+ header->id = cpu_to_le16(id);
+ header->receiver_pid = 0;
+ header->sender_pid = 0;
+ header->fw_reference = 0;
+ header->vif = vif;
+ return skb;
+}
+
+#define fapi_sig_size(mp_name) ((u16)offsetof(struct fapi_signal, u.mp_name.dr))
+#define fapi_alloc(mp_name, mp_id, mp_vif, mp_datalen) fapi_alloc_f(fapi_sig_size(mp_name), mp_datalen, mp_id, mp_vif, __FILE__, __LINE__)
+#define fapi_get_buff(mp_skb, mp_name) (((struct fapi_signal *)(mp_skb)->data)->mp_name)
+#define fapi_get_u16(mp_skb, mp_name) le16_to_cpu(((struct fapi_signal *)(mp_skb)->data)->mp_name)
+#define fapi_get_u32(mp_skb, mp_name) le32_to_cpu(((struct fapi_signal *)(mp_skb)->data)->mp_name)
+/*todo*/
+#define fapi_get_u64(mp_skb, mp_name) le64_to_cpu(((struct fapi_signal *)(mp_skb)->data)->mp_name)
+#define fapi_set_u16(mp_skb, mp_name, mp_value) (((struct fapi_signal *)(mp_skb)->data)->mp_name = cpu_to_le16(mp_value))
+#define fapi_set_u32(mp_skb, mp_name, mp_value) (((struct fapi_signal *)(mp_skb)->data)->mp_name = cpu_to_le32(mp_value))
+#define fapi_get_s16(mp_skb, mp_name) ((s16)le16_to_cpu(((struct fapi_signal *)(mp_skb)->data)->mp_name))
+#define fapi_get_s32(mp_skb, mp_name) ((s32)le32_to_cpu(((struct fapi_signal *)(mp_skb)->data)->mp_name))
+#define fapi_set_s16(mp_skb, mp_name, mp_value) (((struct fapi_signal *)(mp_skb)->data)->mp_name = cpu_to_le16((u16)mp_value))
+#define fapi_set_s32(mp_skb, mp_name, mp_value) (((struct fapi_signal *)(mp_skb)->data)->mp_name = cpu_to_le32((u32)mp_value))
+#define fapi_set_memcpy(mp_skb, mp_name, mp_value) memcpy(((struct fapi_signal *)(mp_skb)->data)->mp_name, mp_value, sizeof(((struct fapi_signal *)(mp_skb)->data)->mp_name))
+#define fapi_set_memset(mp_skb, mp_name, mp_value) memset(((struct fapi_signal *)(mp_skb)->data)->mp_name, mp_value, sizeof(((struct fapi_signal *)(mp_skb)->data)->mp_name))
+
+/* Helper to get and set high/low 16 bits from u32 signals */
+#define fapi_get_high16_u32(mp_skb, mp_name) ((fapi_get_u32((mp_skb), mp_name) & 0xffff0000) >> 16)
+#define fapi_set_high16_u32(mp_skb, mp_name, mp_value) fapi_set_u32((mp_skb), mp_name, (fapi_get_u32((mp_skb), mp_name) & 0xffff) | ((mp_value) << 16))
+#define fapi_get_low16_u32(mp_skb, mp_name) (fapi_get_u32((mp_skb), mp_name) & 0xffff)
+#define fapi_set_low16_u32(mp_skb, mp_name, mp_value) fapi_set_u32((mp_skb), mp_name, (fapi_get_u32((mp_skb), mp_name) & 0xffff0000) | (mp_value))
+
+/* Helper to get signal and data */
+#define fapi_get_sigid(mp_skb) le16_to_cpu(((struct fapi_signal *)(mp_skb)->data)->id)
+#define fapi_get_siglen(mp_skb) (slsi_skb_cb_get(mp_skb)->sig_length)
+#define fapi_get_datalen(mp_skb) (slsi_skb_cb_get(mp_skb)->data_length - slsi_skb_cb_get(mp_skb)->sig_length)
+#define fapi_get_data(mp_skb) (mp_skb->data + fapi_get_siglen(mp_skb))
+#define fapi_get_vif(mp_skb) le16_to_cpu(((struct fapi_vif_signal_header *)(mp_skb)->data)->vif)
+
+/* Helper to get the struct ieee80211_mgmt from the data */
+#define fapi_get_mgmt(mp_skb) ((struct ieee80211_mgmt *)fapi_get_data(mp_skb))
+#define fapi_get_mgmtlen(mp_skb) fapi_get_datalen(mp_skb)
+
+static inline u8 *fapi_append_data(struct sk_buff *skb, const u8 *data, size_t data_len)
+{
+ u8 *p;
+
+ if (WARN_ON(skb_tailroom(skb) < data_len))
+ return NULL;
+
+ p = skb_put(skb, data_len);
+ slsi_skb_cb_get(skb)->data_length += data_len;
+ if (data)
+ memcpy(p, data, data_len);
+ return p;
+}
+
+static inline bool fapi_is_mlme(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SAP_TYPE_MASK) == FAPI_SAP_TYPE_MLME;
+}
+
+static inline bool fapi_is_ma(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SAP_TYPE_MASK) == FAPI_SAP_TYPE_MA;
+}
+
+static inline bool fapi_is_debug(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SAP_TYPE_MASK) == FAPI_SAP_TYPE_DEBUG;
+}
+
+static inline bool fapi_is_test(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SAP_TYPE_MASK) == FAPI_SAP_TYPE_TEST;
+}
+
+static inline bool fapi_is_req(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SIG_TYPE_MASK) == FAPI_SIG_TYPE_REQ;
+}
+
+static inline bool fapi_is_cfm(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SIG_TYPE_MASK) == FAPI_SIG_TYPE_CFM;
+}
+
+static inline bool fapi_is_res(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SIG_TYPE_MASK) == FAPI_SIG_TYPE_RES;
+}
+
+static inline bool fapi_is_ind(struct sk_buff *skb)
+{
+ return (fapi_get_sigid(skb) & FAPI_SIG_TYPE_MASK) == FAPI_SIG_TYPE_IND;
+}
+
+static inline u16 fapi_get_expected_size_table(struct sk_buff *skb, const u16 fapi_size_table[], size_t table_size, u16 start_id)
+{
+ u16 id = fapi_get_sigid(skb);
+
+ if (id < start_id)
+ return 0;
+
+ if (id - start_id >= (u16)table_size)
+ return 0;
+
+ return fapi_size_table[id - start_id];
+}
+
+static inline u16 fapi_get_expected_size(struct sk_buff *skb)
+{
+ static const u16 fapi_ma_req_size_table[] = {
+ fapi_sig_size(ma_unitdata_req),
+ 0,
+ fapi_sig_size(ma_spare_1_req),
+ fapi_sig_size(ma_spare_2_req),
+ fapi_sig_size(ma_spare_3_req),
+ fapi_sig_size(ma_spare_signal_1_req),
+ fapi_sig_size(ma_spare_signal_2_req),
+ fapi_sig_size(ma_spare_signal_3_req),
+ };
+
+ static const u16 fapi_mlme_req_size_table[] = {
+ fapi_sig_size(mlme_get_req),
+ fapi_sig_size(mlme_set_req),
+ fapi_sig_size(mlme_powermgt_req),
+ fapi_sig_size(mlme_add_info_elements_req),
+ fapi_sig_size(mlme_add_scan_req),
+ fapi_sig_size(mlme_del_scan_req),
+ fapi_sig_size(mlme_add_vif_req),
+ fapi_sig_size(mlme_del_vif_req),
+ fapi_sig_size(mlme_start_req),
+ fapi_sig_size(mlme_set_channel_req),
+ fapi_sig_size(mlme_connect_req),
+ fapi_sig_size(mlme_reassociate_req),
+ fapi_sig_size(mlme_roam_req),
+ fapi_sig_size(mlme_disconnect_req),
+ fapi_sig_size(mlme_register_action_frame_req),
+ fapi_sig_size(mlme_send_frame_req),
+ fapi_sig_size(mlme_reset_dwell_time_req),
+ fapi_sig_size(mlme_set_traffic_parameters_req),
+ fapi_sig_size(mlme_del_traffic_parameters_req),
+ fapi_sig_size(mlme_set_packet_filter_req),
+ fapi_sig_size(mlme_set_ip_address_req),
+ fapi_sig_size(mlme_set_acl_req),
+ 0,
+ fapi_sig_size(mlme_setkeys_req),
+ 0,
+ fapi_sig_size(mlme_get_key_sequence_req),
+ 0,
+ fapi_sig_size(mlme_set_pmk_req),
+ 0,
+ 0,
+ fapi_sig_size(mlme_set_cached_channels_req),
+ fapi_sig_size(mlme_set_whitelist_ssid_req),
+ fapi_sig_size(mlme_tdls_action_req),
+ fapi_sig_size(mlme_channel_switch_req),
+ fapi_sig_size(mlme_monitor_rssi_req),
+ fapi_sig_size(mlme_start_link_statistics_req),
+ fapi_sig_size(mlme_stop_link_statistics_req),
+ 0,
+ fapi_sig_size(mlme_set_pno_list_req),
+ fapi_sig_size(mlme_host_state_req),
+ fapi_sig_size(mlme_add_range_req),
+ fapi_sig_size(mlme_del_range_req),
+ fapi_sig_size(mlme_set_noa_req),
+ fapi_sig_size(mlme_set_ctwindow_req),
+ fapi_sig_size(mlme_nan_start_req),
+ fapi_sig_size(mlme_nan_config_req),
+ fapi_sig_size(mlme_nan_publish_req),
+ fapi_sig_size(mlme_nan_subscribe_req),
+ fapi_sig_size(mlme_nan_followup_req),
+ fapi_sig_size(mlme_unset_channel_req),
+ fapi_sig_size(mlme_set_country_req),
+ fapi_sig_size(mlme_forward_beacon_req),
+ fapi_sig_size(mlme_ndp_setup_req),
+ fapi_sig_size(mlme_ndp_setup_response_req),
+ fapi_sig_size(mlme_ndp_terminate_req),
+ fapi_sig_size(mlme_nan_add_range_req),
+ fapi_sig_size(mlme_nan_del_range_req),
+ fapi_sig_size(mlme_spare_4_req),
+ fapi_sig_size(mlme_spare_5_req),
+ fapi_sig_size(mlme_spare_6_req),
+ fapi_sig_size(mlme_install_apf_req),
+ fapi_sig_size(mlme_read_apf_req),
+ fapi_sig_size(mlme_set_nss_req),
+ fapi_sig_size(mlme_arp_detect_req),
+ fapi_sig_size(mlme_spare_signal_1_req),
+ fapi_sig_size(mlme_spare_signal_2_req),
+ fapi_sig_size(mlme_spare_signal_3_req),
+ };
+
+ static const u16 fapi_debug_req_size_table[] = {
+ fapi_sig_size(debug_spare_1_req),
+ fapi_sig_size(debug_spare_2_req),
+ fapi_sig_size(debug_spare_3_req),
+ fapi_sig_size(debug_spare_signal_1_req),
+ fapi_sig_size(debug_spare_signal_2_req),
+ fapi_sig_size(debug_spare_signal_3_req),
+ };
+
+ static const u16 fapi_test_req_size_table[] = {
+ fapi_sig_size(test_block_requests_req),
+ fapi_sig_size(test_panic_req),
+ fapi_sig_size(test_suspend_req),
+ fapi_sig_size(test_resume_req),
+ fapi_sig_size(radio_logging_req),
+ fapi_sig_size(wlanlite_cw_start_req),
+ fapi_sig_size(wlanlite_cw_stop_req),
+ fapi_sig_size(wlanlite_tx_set_params_req),
+ fapi_sig_size(wlanlite_tx_start_req),
+ fapi_sig_size(wlanlite_tx_read_req),
+ fapi_sig_size(wlanlite_tx_stop_req),
+ fapi_sig_size(wlanlite_rx_start_req),
+ fapi_sig_size(wlanlite_rx_read_req),
+ fapi_sig_size(wlanlite_rx_stop_req),
+ fapi_sig_size(wlanlite_status_req),
+ fapi_sig_size(test_pmalloc_req),
+ fapi_sig_size(test_configure_monitor_mode_req),
+ 0,
+ fapi_sig_size(test_check_fw_alive_req),
+ fapi_sig_size(debug_generic_req),
+ fapi_sig_size(debug_pkt_sink_start_req),
+ fapi_sig_size(debug_pkt_sink_stop_req),
+ fapi_sig_size(debug_pkt_sink_report_req),
+ fapi_sig_size(debug_pkt_gen_start_req),
+ fapi_sig_size(debug_pkt_gen_stop_req),
+ fapi_sig_size(debug_pkt_gen_report_req),
+ fapi_sig_size(wlanlite_radio_select_req),
+ fapi_sig_size(test_hip_tester_start_req),
+ fapi_sig_size(test_hip_tester_stop_req),
+ fapi_sig_size(test_hip_tester_set_params_req),
+ fapi_sig_size(test_hip_tester_report_req),
+ fapi_sig_size(test_bist_get_tx_gain_req),
+ fapi_sig_size(test_spare_1_req),
+ fapi_sig_size(test_spare_2_req),
+ fapi_sig_size(test_spare_3_req),
+ fapi_sig_size(test_spare_signal_1_req),
+ fapi_sig_size(test_spare_signal_2_req),
+ fapi_sig_size(test_spare_signal_3_req),
+ };
+
+ static const u16 fapi_ma_cfm_size_table[] = {
+ fapi_sig_size(ma_unitdata_cfm),
+ 0,
+ fapi_sig_size(ma_spare_1_cfm),
+ fapi_sig_size(ma_spare_2_cfm),
+ fapi_sig_size(ma_spare_3_cfm),
+ fapi_sig_size(ma_spare_signal_1_cfm),
+ fapi_sig_size(ma_spare_signal_2_cfm),
+ fapi_sig_size(ma_spare_signal_3_cfm),
+ };
+
+ static const u16 fapi_mlme_cfm_size_table[] = {
+ fapi_sig_size(mlme_get_cfm),
+ fapi_sig_size(mlme_set_cfm),
+ fapi_sig_size(mlme_powermgt_cfm),
+ fapi_sig_size(mlme_add_info_elements_cfm),
+ fapi_sig_size(mlme_add_scan_cfm),
+ fapi_sig_size(mlme_del_scan_cfm),
+ fapi_sig_size(mlme_add_vif_cfm),
+ fapi_sig_size(mlme_del_vif_cfm),
+ fapi_sig_size(mlme_start_cfm),
+ fapi_sig_size(mlme_set_channel_cfm),
+ fapi_sig_size(mlme_connect_cfm),
+ fapi_sig_size(mlme_reassociate_cfm),
+ fapi_sig_size(mlme_roam_cfm),
+ fapi_sig_size(mlme_disconnect_cfm),
+ fapi_sig_size(mlme_register_action_frame_cfm),
+ fapi_sig_size(mlme_send_frame_cfm),
+ fapi_sig_size(mlme_reset_dwell_time_cfm),
+ fapi_sig_size(mlme_set_traffic_parameters_cfm),
+ fapi_sig_size(mlme_del_traffic_parameters_cfm),
+ fapi_sig_size(mlme_set_packet_filter_cfm),
+ fapi_sig_size(mlme_set_ip_address_cfm),
+ fapi_sig_size(mlme_set_acl_cfm),
+ 0,
+ fapi_sig_size(mlme_setkeys_cfm),
+ 0,
+ fapi_sig_size(mlme_get_key_sequence_cfm),
+ 0,
+ fapi_sig_size(mlme_set_pmk_cfm),
+ 0,
+ 0,
+ fapi_sig_size(mlme_set_cached_channels_cfm),
+ fapi_sig_size(mlme_set_whitelist_ssid_cfm),
+ fapi_sig_size(mlme_tdls_action_cfm),
+ fapi_sig_size(mlme_channel_switch_cfm),
+ fapi_sig_size(mlme_monitor_rssi_cfm),
+ fapi_sig_size(mlme_start_link_statistics_cfm),
+ fapi_sig_size(mlme_stop_link_statistics_cfm),
+ 0,
+ fapi_sig_size(mlme_set_pno_list_cfm),
+ fapi_sig_size(mlme_host_state_cfm),
+ fapi_sig_size(mlme_add_range_cfm),
+ fapi_sig_size(mlme_del_range_cfm),
+ fapi_sig_size(mlme_set_noa_cfm),
+ fapi_sig_size(mlme_set_ctwindow_cfm),
+ fapi_sig_size(mlme_nan_start_cfm),
+ fapi_sig_size(mlme_nan_config_cfm),
+ fapi_sig_size(mlme_nan_publish_cfm),
+ fapi_sig_size(mlme_nan_subscribe_cfm),
+ fapi_sig_size(mlme_nan_followup_cfm),
+ fapi_sig_size(mlme_unset_channel_cfm),
+ fapi_sig_size(mlme_set_country_cfm),
+ fapi_sig_size(mlme_forward_beacon_cfm),
+ fapi_sig_size(mlme_ndp_setup_cfm),
+ fapi_sig_size(mlme_ndp_setup_response_cfm),
+ fapi_sig_size(mlme_ndp_terminate_cfm),
+ fapi_sig_size(mlme_nan_add_range_cfm),
+ fapi_sig_size(mlme_nan_del_range_cfm),
+ fapi_sig_size(mlme_spare_4_cfm),
+ fapi_sig_size(mlme_spare_5_cfm),
+ fapi_sig_size(mlme_spare_6_cfm),
+ fapi_sig_size(mlme_install_apf_cfm),
+ fapi_sig_size(mlme_read_apf_cfm),
+ fapi_sig_size(mlme_set_nss_cfm),
+ fapi_sig_size(mlme_arp_detect_cfm),
+ fapi_sig_size(mlme_spare_signal_1_cfm),
+ fapi_sig_size(mlme_spare_signal_2_cfm),
+ fapi_sig_size(mlme_spare_signal_3_cfm),
+ };
+
+ static const u16 fapi_debug_cfm_size_table[] = {
+ fapi_sig_size(debug_spare_1_cfm),
+ fapi_sig_size(debug_spare_2_cfm),
+ fapi_sig_size(debug_spare_3_cfm),
+ fapi_sig_size(debug_spare_signal_1_cfm),
+ fapi_sig_size(debug_spare_signal_2_cfm),
+ fapi_sig_size(debug_spare_signal_3_cfm),
+ };
+
+ static const u16 fapi_test_cfm_size_table[] = {
+ fapi_sig_size(radio_logging_cfm),
+ fapi_sig_size(wlanlite_cw_start_cfm),
+ fapi_sig_size(wlanlite_tx_set_params_cfm),
+ fapi_sig_size(wlanlite_cw_stop_cfm),
+ fapi_sig_size(wlanlite_tx_start_cfm),
+ fapi_sig_size(wlanlite_tx_read_cfm),
+ fapi_sig_size(wlanlite_tx_stop_cfm),
+ fapi_sig_size(wlanlite_rx_start_cfm),
+ fapi_sig_size(wlanlite_rx_read_cfm),
+ fapi_sig_size(wlanlite_rx_stop_cfm),
+ fapi_sig_size(wlanlite_status_cfm),
+ fapi_sig_size(test_pmalloc_cfm),
+ fapi_sig_size(test_configure_monitor_mode_cfm),
+ 0,
+ fapi_sig_size(test_check_fw_alive_cfm),
+ fapi_sig_size(test_suspend_cfm),
+ fapi_sig_size(test_resume_cfm),
+ fapi_sig_size(debug_generic_cfm),
+ fapi_sig_size(wlanlite_radio_select_cfm),
+ fapi_sig_size(test_hip_tester_start_cfm),
+ fapi_sig_size(test_hip_tester_stop_cfm),
+ fapi_sig_size(test_hip_tester_set_params_cfm),
+ fapi_sig_size(test_bist_get_tx_gain_cfm),
+ fapi_sig_size(test_spare_1_cfm),
+ fapi_sig_size(test_spare_2_cfm),
+ fapi_sig_size(test_spare_3_cfm),
+ fapi_sig_size(test_spare_signal_1_cfm),
+ fapi_sig_size(test_spare_signal_2_cfm),
+ fapi_sig_size(test_spare_signal_3_cfm),
+ };
+
+ static const u16 fapi_ma_ind_size_table[] = {
+ fapi_sig_size(ma_unitdata_ind),
+ fapi_sig_size(ma_blockack_ind),
+ fapi_sig_size(ma_spare_1_ind),
+ fapi_sig_size(ma_spare_2_ind),
+ fapi_sig_size(ma_spare_3_ind),
+ fapi_sig_size(ma_spare_signal_1_ind),
+ fapi_sig_size(ma_spare_signal_2_ind),
+ fapi_sig_size(ma_spare_signal_3_ind),
+ };
+
+ static const u16 fapi_mlme_ind_size_table[] = {
+ fapi_sig_size(mlme_scan_ind),
+ fapi_sig_size(mlme_scan_done_ind),
+ fapi_sig_size(mlme_listen_end_ind),
+ fapi_sig_size(mlme_connect_ind),
+ fapi_sig_size(mlme_connected_ind),
+ fapi_sig_size(mlme_reassociate_ind),
+ fapi_sig_size(mlme_roam_ind),
+ fapi_sig_size(mlme_roamed_ind),
+ fapi_sig_size(mlme_disconnect_ind),
+ fapi_sig_size(mlme_disconnected_ind),
+ fapi_sig_size(mlme_procedure_started_ind),
+ fapi_sig_size(mlme_mic_failure_ind),
+ fapi_sig_size(mlme_frame_transmission_ind),
+ fapi_sig_size(mlme_received_frame_ind),
+ 0,
+ fapi_sig_size(mlme_tdls_peer_ind),
+ 0,
+ 0,
+ fapi_sig_size(mlme_rssi_report_ind),
+ fapi_sig_size(mlme_ac_priority_update_ind),
+ fapi_sig_size(mlme_range_ind),
+ fapi_sig_size(mlme_range_done_ind),
+ fapi_sig_size(mlme_event_log_ind),
+ fapi_sig_size(mlme_nan_event_ind),
+ fapi_sig_size(mlme_nan_service_ind),
+ fapi_sig_size(mlme_nan_followup_ind),
+ fapi_sig_size(mlme_channel_switched_ind),
+ fapi_sig_size(mlme_synchronised_ind),
+ fapi_sig_size(mlme_beacon_reporting_event_ind),
+ fapi_sig_size(mlme_spare_3_ind),
+ fapi_sig_size(mlme_spare_4_ind),
+ fapi_sig_size(mlme_ndp_setup_ind),
+ fapi_sig_size(mlme_ndp_requested_ind),
+ fapi_sig_size(mlme_ndp_setup_response_ind),
+ fapi_sig_size(mlme_ndp_terminated_ind),
+ fapi_sig_size(mlme_nan_add_range_ind),
+ fapi_sig_size(mlme_spare_5_ind),
+ fapi_sig_size(mlme_spare_signal_1_ind),
+ fapi_sig_size(mlme_spare_signal_2_ind),
+ fapi_sig_size(mlme_spare_signal_3_ind),
+ };
+
+ static const u16 fapi_debug_ind_size_table[] = {
+ fapi_sig_size(debug_word12_ind),
+ fapi_sig_size(debug_fault_ind),
+ fapi_sig_size(debug_words_ind),
+ fapi_sig_size(debug_spare_2_ind),
+ fapi_sig_size(debug_spare_3_ind),
+ fapi_sig_size(debug_spare_4_ind),
+ fapi_sig_size(debug_spare_signal_1_ind),
+ fapi_sig_size(debug_spare_signal_2_ind),
+ fapi_sig_size(debug_spare_signal_3_ind),
+ };
+
+ static const u16 fapi_test_ind_size_table[] = {
+ fapi_sig_size(radio_logging_ind),
+ fapi_sig_size(debug_generic_ind),
+ fapi_sig_size(debug_pkt_sink_report_ind),
+ fapi_sig_size(debug_pkt_gen_report_ind),
+ fapi_sig_size(test_hip_tester_report_ind),
+ fapi_sig_size(test_spare_1_ind),
+ fapi_sig_size(test_spare_2_ind),
+ fapi_sig_size(test_spare_3_ind),
+ fapi_sig_size(test_spare_signal_1_ind),
+ fapi_sig_size(test_spare_signal_2_ind),
+ fapi_sig_size(test_spare_signal_3_ind),
+ };
+
+ static const u16 fapi_mlme_res_size_table[] = {
+ fapi_sig_size(mlme_connect_res),
+ fapi_sig_size(mlme_connected_res),
+ fapi_sig_size(mlme_reassociate_res),
+ fapi_sig_size(mlme_roamed_res),
+ fapi_sig_size(mlme_tdls_peer_res),
+ fapi_sig_size(mlme_synchronised_res),
+ fapi_sig_size(mlme_spare_2_res),
+ fapi_sig_size(mlme_spare_3_res),
+ fapi_sig_size(mlme_spare_4_res),
+ fapi_sig_size(mlme_spare_signal_1_res),
+ fapi_sig_size(mlme_spare_signal_2_res),
+ fapi_sig_size(mlme_spare_signal_3_res),
+ };
+
+ static const u16 fapi_ma_res_size_table[] = {
+ fapi_sig_size(ma_spare_1_res),
+ fapi_sig_size(ma_spare_2_res),
+ fapi_sig_size(ma_spare_3_res),
+ fapi_sig_size(ma_spare_signal_1_res),
+ fapi_sig_size(ma_spare_signal_2_res),
+ fapi_sig_size(ma_spare_signal_3_res),
+ };
+
+ static const u16 fapi_debug_res_size_table[] = {
+ fapi_sig_size(debug_spare_1_res),
+ fapi_sig_size(debug_spare_2_res),
+ fapi_sig_size(debug_spare_3_res),
+ fapi_sig_size(debug_spare_signal_1_res),
+ fapi_sig_size(debug_spare_signal_2_res),
+ fapi_sig_size(debug_spare_signal_3_res),
+ };
+
+ static const u16 fapi_test_res_size_table[] = {
+ fapi_sig_size(test_spare_1_res),
+ fapi_sig_size(test_spare_2_res),
+ fapi_sig_size(test_spare_3_res),
+ fapi_sig_size(test_spare_signal_1_res),
+ fapi_sig_size(test_spare_signal_2_res),
+ fapi_sig_size(test_spare_signal_3_res),
+ };
+
+ if (fapi_is_mlme(skb) && fapi_is_req(skb))
+ return fapi_get_expected_size_table(skb, fapi_mlme_req_size_table, ARRAY_SIZE(fapi_mlme_req_size_table), MLME_GET_REQ);
+ else if (fapi_is_mlme(skb) && fapi_is_cfm(skb))
+ return fapi_get_expected_size_table(skb, fapi_mlme_cfm_size_table, ARRAY_SIZE(fapi_mlme_cfm_size_table), MLME_GET_CFM);
+ else if (fapi_is_mlme(skb) && fapi_is_res(skb))
+ return fapi_get_expected_size_table(skb, fapi_mlme_res_size_table, ARRAY_SIZE(fapi_mlme_res_size_table), MLME_CONNECT_RES);
+ else if (fapi_is_mlme(skb) && fapi_is_ind(skb))
+ return fapi_get_expected_size_table(skb, fapi_mlme_ind_size_table, ARRAY_SIZE(fapi_mlme_ind_size_table), MLME_SCAN_IND);
+ else if (fapi_is_ma(skb) && fapi_is_req(skb))
+ return fapi_get_expected_size_table(skb, fapi_ma_req_size_table, ARRAY_SIZE(fapi_ma_req_size_table), MA_UNITDATA_REQ);
+ else if (fapi_is_ma(skb) && fapi_is_cfm(skb))
+ return fapi_get_expected_size_table(skb, fapi_ma_cfm_size_table, ARRAY_SIZE(fapi_ma_cfm_size_table), MA_UNITDATA_CFM);
+ else if (fapi_is_ma(skb) && fapi_is_ind(skb))
+ return fapi_get_expected_size_table(skb, fapi_ma_ind_size_table, ARRAY_SIZE(fapi_ma_ind_size_table), MA_UNITDATA_IND);
+ else if (fapi_is_debug(skb) && fapi_is_req(skb))
+ return fapi_get_expected_size_table(skb, fapi_debug_req_size_table, ARRAY_SIZE(fapi_debug_req_size_table), DEBUG_GENERIC_REQ);
+ else if (fapi_is_debug(skb) && fapi_is_cfm(skb))
+ return fapi_get_expected_size_table(skb, fapi_debug_cfm_size_table, ARRAY_SIZE(fapi_debug_cfm_size_table), DEBUG_GENERIC_CFM);
+ else if (fapi_is_debug(skb) && fapi_is_ind(skb))
+ return fapi_get_expected_size_table(skb, fapi_debug_ind_size_table, ARRAY_SIZE(fapi_debug_ind_size_table), DEBUG_WORD12IND);
+ else if (fapi_is_test(skb) && fapi_is_req(skb))
+ return fapi_get_expected_size_table(skb, fapi_test_req_size_table, ARRAY_SIZE(fapi_test_req_size_table), TEST_BLOCK_REQUESTS_REQ);
+ else if (fapi_is_test(skb) && fapi_is_cfm(skb))
+ return fapi_get_expected_size_table(skb, fapi_test_cfm_size_table, ARRAY_SIZE(fapi_test_cfm_size_table), RADIO_LOGGING_CFM);
+ else if (fapi_is_test(skb) && fapi_is_ind(skb))
+ return fapi_get_expected_size_table(skb, fapi_test_ind_size_table, ARRAY_SIZE(fapi_test_ind_size_table), RADIO_LOGGING_IND);
+ else if (fapi_is_mlme(skb) && fapi_is_res(skb))
+ return fapi_get_expected_size_table(skb, fapi_ma_res_size_table, ARRAY_SIZE(fapi_ma_res_size_table), MA_SPARE_SIGNAL_1_RES);
+ else if (fapi_is_mlme(skb) && fapi_is_res(skb))
+ return fapi_get_expected_size_table(skb, fapi_debug_res_size_table, ARRAY_SIZE(fapi_debug_res_size_table), DEBUG_SPARE_SIGNAL_1_RES);
+ else if (fapi_is_mlme(skb) && fapi_is_res(skb))
+ return fapi_get_expected_size_table(skb, fapi_test_res_size_table, ARRAY_SIZE(fapi_test_res_size_table), TEST_SPARE_SIGNAL_1_RES);
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _FAPI_H__ */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+#include "dev.h"
+#include "fapi.h"
+#include "fw_test.h"
+#include "debug.h"
+#include "mgt.h"
+#include "mlme.h"
+#include "netif.h"
+#include "ba.h"
+#include "sap_mlme.h"
+
+static void slsi_fw_test_save_frame(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *saved_skbs[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1], struct sk_buff *skb, bool udi_header)
+{
+ u16 vif;
+
+ skb = slsi_skb_copy(skb, GFP_KERNEL);
+
+ if (udi_header)
+ skb_pull(skb, sizeof(struct udi_msg_t));
+
+ vif = fapi_get_vif(skb);
+
+ SLSI_DBG3(sdev, SLSI_FW_TEST, "sig:0x%.4X, vif:%d\n", fapi_get_sigid(skb), vif);
+ slsi_debug_frame(sdev, NULL, skb, "SAVE");
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ slsi_kfree_skb(saved_skbs[vif]);
+ saved_skbs[vif] = skb;
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+}
+
+static void slsi_fw_test_process_frame(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *skb, bool udi_header)
+{
+ u16 vif;
+
+ skb = slsi_skb_copy(skb, GFP_KERNEL);
+
+ if (udi_header)
+ skb_pull(skb, sizeof(struct udi_msg_t));
+
+ vif = fapi_get_vif(skb);
+
+ SLSI_DBG3(sdev, SLSI_FW_TEST, "sig:0x%.4X, vif:%d\n", fapi_get_sigid(skb), vif);
+ slsi_debug_frame(sdev, NULL, skb, "PROCESS");
+
+ slsi_skb_work_enqueue(&fwtest->fw_test_work, skb);
+}
+
+int slsi_fw_test_signal(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ u16 vif = fapi_get_vif(skb);
+
+ /* Atleast one write to via the UDI interface */
+ fwtest->fw_test_enabled = true;
+ SLSI_DBG3(sdev, SLSI_FW_TEST, "0x%p: sig:0x%.4X, vif:%d\n", skb, fapi_get_sigid(skb), vif);
+
+ if (WARN(vif > CONFIG_SCSC_WLAN_MAX_INTERFACES, "vif(%d) > CONFIG_SCSC_WLAN_MAX_INTERFACES", vif))
+ return -EINVAL;
+
+ switch (fapi_get_sigid(skb)) {
+ case MLME_ADD_VIF_REQ:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Save MLME_ADD_VIF_REQ(0x%.4X, vif:%d)\n", skb, fapi_get_sigid(skb), vif);
+ slsi_fw_test_save_frame(sdev, fwtest, fwtest->mlme_add_vif_req, skb, false);
+ slsi_fw_test_process_frame(sdev, fwtest, skb, false);
+ break;
+ case MLME_CONNECT_REQ:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Save MLME_CONNECT_REQ(0x%.4X, vif:%d)\n", skb, fapi_get_sigid(skb), vif);
+ slsi_fw_test_save_frame(sdev, fwtest, fwtest->mlme_connect_req, skb, false);
+ break;
+ case MLME_DEL_VIF_REQ:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Save MLME_DEL_VIF_REQ(0x%.4X, vif:%d)\n", skb, fapi_get_sigid(skb), vif);
+ slsi_fw_test_process_frame(sdev, fwtest, skb, false);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+int slsi_fw_test_signal_with_udi_header(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct udi_msg_t *udi_msg = (struct udi_msg_t *)skb->data;
+ struct fapi_vif_signal_header *fapi_header = (struct fapi_vif_signal_header *)(skb->data + sizeof(struct udi_msg_t));
+
+ if (!fwtest->fw_test_enabled)
+ return 0;
+
+ SLSI_DBG3(sdev, SLSI_FW_TEST, "0x%p: sig:0x%.4X, vif:%d\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+
+ if (udi_msg->direction == SLSI_LOG_DIRECTION_TO_HOST) {
+ switch (le16_to_cpu(fapi_header->id)) {
+ case MLME_DISCONNECT_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_DISCONNECT_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_DISCONNECTED_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_DISCONNECTED_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_CONNECT_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_CONNECT_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_CONNECTED_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_CONNECTED_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_ROAMED_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_ROAMED_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_TDLS_PEER_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_TDLS_PEER_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_CONNECT_CFM:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Save MLME_CONNECT_CFM(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_save_frame(sdev, fwtest, fwtest->mlme_connect_cfm, skb, true);
+ break;
+ case MLME_PROCEDURE_STARTED_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Save MLME_PROCEDURE_STARTED_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_save_frame(sdev, fwtest, fwtest->mlme_procedure_started_ind, skb, true);
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_PROCEDURE_STARTED_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ case MLME_START_CFM:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MLME_START_CFM(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ sdev->device_config.ap_disconnect_ind_timeout = SLSI_DEFAULT_AP_DISCONNECT_IND_TIMEOUT;
+ break;
+ case MA_BLOCKACK_IND:
+ SLSI_DBG2(sdev, SLSI_FW_TEST, "0x%p: Process MA_BLOCKACK_IND(0x%.4X, vif:%d)\n", skb, le16_to_cpu(fapi_header->id), le16_to_cpu(fapi_header->vif));
+ slsi_fw_test_process_frame(sdev, fwtest, skb, true);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void slsi_fw_test_connect_station_roam(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ struct sk_buff *mlme_procedure_started_ind;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Station Connect(vif:%d) Roam\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->is_fw_test, "!is_fw_test"))
+ return;
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ if (WARN(ndev_vif->vif_type != FAPI_VIFTYPE_STATION, "Not Station Vif"))
+ return;
+
+ if (WARN(!peer, "peer not found"))
+ return;
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ mlme_procedure_started_ind = fwtest->mlme_procedure_started_ind[ndev_vif->ifnum];
+ fwtest->mlme_procedure_started_ind[ndev_vif->ifnum] = NULL;
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!mlme_procedure_started_ind, "mlme_procedure_started_ind not found"))
+ return;
+
+ slsi_rx_ba_stop_all(dev, peer);
+
+ SLSI_ETHER_COPY(peer->address, mgmt->bssid);
+ slsi_peer_update_assoc_req(sdev, dev, peer, mlme_procedure_started_ind);
+ slsi_peer_update_assoc_rsp(sdev, dev, peer, slsi_skb_copy(skb, GFP_KERNEL));
+}
+
+static void slsi_fw_test_connect_start_station(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ struct sk_buff *ind;
+ struct slsi_peer *peer;
+ u8 bssid[ETH_ALEN];
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Station Connect Start(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->is_fw_test, "!is_fw_test"))
+ return;
+ if (WARN(ndev_vif->activated, "Already Activated"))
+ return;
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ req = fwtest->mlme_connect_req[ndev_vif->ifnum];
+ cfm = fwtest->mlme_connect_cfm[ndev_vif->ifnum];
+ ind = fwtest->mlme_procedure_started_ind[ndev_vif->ifnum];
+ if (req)
+ SLSI_ETHER_COPY(bssid, fapi_get_buff(req, u.mlme_connect_req.bssid));
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!req, "mlme_connect_req Not found"))
+ return;
+ if (WARN(!cfm, "mlme_connect_cfm Not found"))
+ return;
+
+ ndev_vif->iftype = NL80211_IFTYPE_STATION;
+ dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ ndev_vif->vif_type = FAPI_VIFTYPE_STATION;
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d slsi_vif_activated\n", ndev_vif->ifnum);
+ if (WARN(slsi_vif_activated(sdev, dev) != 0, "slsi_vif_activated() Failed"))
+ return;
+
+ peer = slsi_peer_add(sdev, dev, bssid, SLSI_STA_PEER_QUEUESET + 1);
+ if (WARN(!peer, "slsi_peer_add(%pM) Failed", bssid)) {
+ slsi_vif_deactivated(sdev, dev);
+ return;
+ }
+
+ slsi_peer_update_assoc_req(sdev, dev, peer, slsi_skb_copy(skb, GFP_KERNEL));
+}
+
+static void slsi_fw_test_connect_station(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ struct sk_buff *ind;
+ struct slsi_peer *peer;
+ u16 result;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Station Connect(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->is_fw_test, "!is_fw_test"))
+ return;
+
+ result = fapi_get_u16(skb, u.mlme_connect_ind.result_code);
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ req = fwtest->mlme_connect_req[ndev_vif->ifnum];
+ cfm = fwtest->mlme_connect_cfm[ndev_vif->ifnum];
+ ind = fwtest->mlme_procedure_started_ind[ndev_vif->ifnum];
+ fwtest->mlme_connect_req[ndev_vif->ifnum] = NULL;
+ fwtest->mlme_connect_cfm[ndev_vif->ifnum] = NULL;
+ fwtest->mlme_procedure_started_ind[ndev_vif->ifnum] = NULL;
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!req, "mlme_connect_req Not found"))
+ goto exit;
+ if (WARN(!cfm, "mlme_connect_cfm Not found"))
+ goto exit;
+ if (FAPI_RESULTCODE_SUCCESS == result &&
+ WARN(!ind, "mlme_procedure_started_ind Not found"))
+ goto exit;
+ if (FAPI_RESULTCODE_SUCCESS != result)
+ goto exit;
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ peer = slsi_get_peer_from_mac(sdev, dev, fapi_get_buff(req, u.mlme_connect_req.bssid));
+ if (WARN(!peer, "slsi_get_peer_from_mac(%pM) Failed", fapi_get_buff(req, u.mlme_connect_req.bssid)))
+ goto exit;
+
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ netif_carrier_on(dev);
+
+exit:
+ slsi_kfree_skb(req);
+ slsi_kfree_skb(cfm);
+ slsi_kfree_skb(ind);
+}
+
+static void slsi_fw_test_started_network(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 result = fapi_get_u16(skb, u.mlme_start_cfm.result_code);
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Start Network(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->is_fw_test, "!is_fw_test"))
+ return;
+ if (WARN(ndev_vif->activated, "Already Activated"))
+ return;
+
+ ndev_vif->iftype = NL80211_IFTYPE_AP;
+ dev->ieee80211_ptr->iftype = NL80211_IFTYPE_AP;
+ ndev_vif->vif_type = FAPI_VIFTYPE_AP;
+
+ if (WARN(slsi_vif_activated(sdev, dev) != 0, "slsi_vif_activated() Failed"))
+ return;
+
+ if (FAPI_RESULTCODE_SUCCESS == result)
+ netif_carrier_on(dev);
+}
+
+static void slsi_fw_test_stop_network(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+ SLSI_UNUSED_PARAMETER(skb);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!ndev_vif->is_fw_test)
+ return;
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Stopping Network(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ netif_carrier_off(dev);
+ slsi_vif_deactivated(sdev, dev);
+}
+
+static void slsi_fw_test_connect_start_ap(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ u16 peer_index;
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Network Peer Connect Start(vif:%d)\n", ndev_vif->ifnum);
+ WARN(!ndev_vif->is_fw_test, "!is_fw_test");
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ if (WARN_ON(!ieee80211_is_assoc_req(mgmt->frame_control) &&
+ !ieee80211_is_reassoc_req(mgmt->frame_control)))
+ return;
+ peer_index = fapi_get_u16(skb, u.mlme_procedure_started_ind.peer_index);
+
+ peer = slsi_peer_add(sdev, dev, mgmt->sa, peer_index);
+ if (WARN_ON(!peer))
+ return;
+
+ slsi_peer_update_assoc_req(sdev, dev, peer, slsi_skb_copy(skb, GFP_KERNEL));
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTING;
+}
+
+static void slsi_fw_test_connected_network(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ u16 peer_index = fapi_get_u16(skb, u.mlme_connected_ind.peer_index);
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Network Peer Connect(vif:%d, peer_index:%d)\n", ndev_vif->ifnum, peer_index);
+ WARN(!ndev_vif->is_fw_test, "!is_fw_test");
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ if (WARN_ON(peer_index > SLSI_PEER_INDEX_MAX))
+ return;
+
+ peer = slsi_get_peer_from_qs(sdev, dev, peer_index - 1);
+ if (WARN(!peer, "Peer(peer_index:%d) Not Found", peer_index))
+ return;
+
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTED;
+
+ slsi_rx_buffered_frames(sdev, dev, peer);
+}
+
+/* Setup the NetDev / Peers based on the saved frames */
+static void slsi_fw_test_procedure_started_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_STATION;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "ProceedureStarted(vif:%d)\n", ndev_vif->ifnum);
+
+ if (fapi_get_u16(skb, u.mlme_procedure_started_ind.procedure_type) != FAPI_PROCEDURETYPE_CONNECTION_STARTED) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ /* Set up the VIF and Data plane ready to go BUT do not open the control port */
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Start UDI test NetDevice(vif:%d)\n", ndev_vif->ifnum);
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_STATION:
+ slsi_fw_test_connect_start_station(sdev, dev, fwtest, skb);
+ break;
+ case FAPI_VIFTYPE_AP:
+ slsi_fw_test_connect_start_ap(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+/* Setup the NetDev / Peers based on the saved frames */
+static void slsi_fw_test_connect_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_STATION;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Network Peer Connect(vif:%d)\n", ndev_vif->ifnum);
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Start UDI test NetDevice(vif:%d)\n", ndev_vif->ifnum);
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_STATION:
+ slsi_fw_test_connect_station(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_connected_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_STATION;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Connected(vif:%d)\n", ndev_vif->ifnum);
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_AP:
+ slsi_fw_test_connected_network(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_roamed_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_STATION;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Roamed(vif:%d)\n", ndev_vif->ifnum);
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_STATION:
+ slsi_fw_test_connect_station_roam(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_disconnect_station(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+ SLSI_UNUSED_PARAMETER(skb);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!ndev_vif->is_fw_test)
+ return;
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Station Disconnect(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->activated, "Not Activated"))
+ return;
+
+ netif_carrier_off(dev);
+ if (peer)
+ slsi_peer_remove(sdev, dev, peer);
+ slsi_vif_deactivated(sdev, dev);
+}
+
+static void slsi_fw_test_disconnect_network(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ /* Find the peer based on MAC address, mlme-disconnect-ind and mlme-disconnected-ind
+ * both have the MAC address in the same position.
+ */
+ struct slsi_peer *peer = slsi_get_peer_from_mac(sdev, dev, fapi_get_buff(skb, u.mlme_disconnect_ind.peer_sta_address));
+
+ SLSI_UNUSED_PARAMETER(fwtest);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!ndev_vif->is_fw_test)
+ return;
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Network Peer Disconnect(vif:%d)\n", ndev_vif->ifnum);
+
+ if (peer)
+ slsi_peer_remove(sdev, dev, peer);
+}
+
+static void slsi_fw_test_disconnected_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_STATION;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_STATION:
+ slsi_fw_test_disconnect_station(sdev, dev, fwtest, skb);
+ break;
+ case FAPI_VIFTYPE_AP:
+ slsi_fw_test_disconnect_network(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_tdls_event_connected(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_peer *peer = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 peer_index = fapi_get_u16(skb, u.mlme_tdls_peer_ind.peer_index);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ ndev_vif->sta.tdls_enabled = true;
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "TDLS connect (vif:%d, peer_index:%d, mac:%pM)\n", fapi_get_vif(skb), peer_index, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+
+ if ((ndev_vif->sta.tdls_peer_sta_records) + 1 > SLSI_TDLS_PEER_CONNECTIONS_MAX) {
+ SLSI_NET_ERR(dev, "max TDLS limit reached (peer_index:%d)\n", peer_index);
+ goto out;
+ }
+
+ if (peer_index < SLSI_TDLS_PEER_INDEX_MIN || peer_index > SLSI_TDLS_PEER_INDEX_MAX) {
+ SLSI_NET_ERR(dev, "incorrect index (peer_index:%d)\n", peer_index);
+ goto out;
+ }
+
+ peer = slsi_peer_add(sdev, dev, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address), peer_index);
+ if (!peer) {
+ SLSI_NET_ERR(dev, "peer add failed\n");
+ goto out;
+ }
+
+ /* QoS is mandatory for TDLS - enable QoS for TDLS peer by default */
+ peer->qos_enabled = true;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+
+ /* move TDLS packets from STA Q to TDLS Q */
+ slsi_tdls_move_packets(sdev, dev, ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET], peer, true);
+
+out:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+static void slsi_fw_test_tdls_event_disconnected(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_peer *peer = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG1(dev, SLSI_MLME, "TDLS dis-connect (vif:%d, mac:%pM)\n", ndev_vif->ifnum, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+
+ peer = slsi_get_peer_from_mac(sdev, dev, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+ if (!peer || (peer->aid == 0)) {
+ WARN_ON(!peer || (peer->aid == 0));
+ SLSI_NET_DBG1(dev, SLSI_MLME, "can't find peer by MAC address\n");
+ goto out;
+ }
+
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+
+ /* move TDLS packets from TDLS Q to STA Q */
+ slsi_tdls_move_packets(sdev, dev, ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET], peer, false);
+ slsi_peer_remove(sdev, dev, peer);
+
+out:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+static void slsi_fw_test_tdls_peer_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 vif_type = 0;
+ u16 tdls_event;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+ if (WARN(!ndev_vif->activated, "Not Activated")) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ vif_type = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ if (WARN(vif_type != FAPI_VIFTYPE_STATION, "Not STA VIF")) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ tdls_event = fapi_get_u16(skb, u.mlme_tdls_peer_ind.tdls_event);
+ SLSI_NET_DBG1(dev, SLSI_MLME, "TDLS peer(vif:%d tdls_event:%d)\n", ndev_vif->ifnum, tdls_event);
+ switch (tdls_event) {
+ case FAPI_TDLSEVENT_CONNECTED:
+ slsi_fw_test_tdls_event_connected(sdev, dev, skb);
+ break;
+ case FAPI_TDLSEVENT_DISCONNECTED:
+ slsi_fw_test_tdls_event_disconnected(sdev, dev, skb);
+ break;
+ case FAPI_TDLSEVENT_DISCOVERED:
+ /* nothing to do */
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d tdls_event:%d not supported\n", ndev_vif->ifnum, tdls_event);
+ break;
+ }
+ slsi_kfree_skb(skb);
+}
+
+/* Setup the NetDev */
+static void slsi_fw_test_start_cfm(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_UNSYNCHRONISED;
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Network Start(vif:%d)\n", ndev_vif->ifnum);
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "Start UDI test NetDevice(vif:%d)\n", ndev_vif->ifnum);
+ if (WARN(!add_vif_req, "fwtest->mlme_add_vif_req[ndev_vif->ifnum] == NULL"))
+ goto out;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ case FAPI_VIFTYPE_AP:
+ slsi_fw_test_started_network(sdev, dev, fwtest, skb);
+ break;
+ default:
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "vif:%d virtual_interface_type:%d NOT SUPPORTED\n", ndev_vif->ifnum, viftype);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_add_vif_req(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_UNUSED_PARAMETER(sdev);
+ SLSI_UNUSED_PARAMETER(fwtest);
+
+ SLSI_DBG1(sdev, SLSI_FW_TEST, "Mark UDI test NetDevice(vif:%d)\n", fapi_get_vif(skb));
+ ndev_vif->is_fw_test = true;
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_del_vif_req(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *add_vif_req;
+ u16 viftype = FAPI_VIFTYPE_UNSYNCHRONISED;
+
+ SLSI_DBG1(sdev, SLSI_FW_TEST, "Unmark UDI test NetDevice(vif:%d)\n", fapi_get_vif(skb));
+
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ add_vif_req = fwtest->mlme_add_vif_req[ndev_vif->ifnum];
+ if (add_vif_req)
+ viftype = fapi_get_u16(add_vif_req, u.mlme_add_vif_req.virtual_interface_type);
+ slsi_kfree_skb(fwtest->mlme_add_vif_req[ndev_vif->ifnum]);
+ slsi_kfree_skb(fwtest->mlme_connect_req[ndev_vif->ifnum]);
+ slsi_kfree_skb(fwtest->mlme_connect_cfm[ndev_vif->ifnum]);
+ slsi_kfree_skb(fwtest->mlme_procedure_started_ind[ndev_vif->ifnum]);
+
+ fwtest->mlme_add_vif_req[ndev_vif->ifnum] = NULL;
+ fwtest->mlme_connect_req[ndev_vif->ifnum] = NULL;
+ fwtest->mlme_connect_cfm[ndev_vif->ifnum] = NULL;
+ fwtest->mlme_procedure_started_ind[ndev_vif->ifnum] = NULL;
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ switch (viftype) {
+ /* As there is no specific MLME primitive for shutting down the network
+ * perform an actions on the MLME-DEL-VIF.
+ */
+ case FAPI_VIFTYPE_AP:
+ slsi_fw_test_stop_network(sdev, dev, fwtest, skb);
+ break;
+ default:
+ if (ndev_vif->is_fw_test && ndev_vif->activated) {
+ netif_carrier_off(dev);
+ slsi_vif_deactivated(sdev, dev);
+ }
+ break;
+ }
+ ndev_vif->is_fw_test = false;
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_fw_test_ma_blockack_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_fw_test *fwtest, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ if (!ndev_vif->is_fw_test) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_FW_TEST, "MA Block Ack Indication(vif:%d)\n", ndev_vif->ifnum);
+ slsi_rx_blockack_ind(sdev, dev, skb);
+}
+
+void slsi_fw_test_work(struct work_struct *work)
+{
+ struct slsi_fw_test *fw_test = container_of(work, struct slsi_fw_test, fw_test_work.work);
+ struct slsi_dev *sdev = fw_test->sdev;
+ struct sk_buff *skb = slsi_skb_work_dequeue(&fw_test->fw_test_work);
+ struct net_device *dev;
+
+ while (skb) {
+ u16 vif = fapi_get_vif(skb);
+
+ SLSI_DBG3(sdev, SLSI_FW_TEST, "0x%p: Signal:0x%.4X, vif:%d\n", skb, fapi_get_sigid(skb), vif);
+
+ if (WARN(!vif, "!vif")) {
+ slsi_kfree_skb(skb);
+ skb = slsi_skb_work_dequeue(&fw_test->fw_test_work);
+ continue;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ dev = slsi_get_netdev_locked(sdev, vif);
+ if (!dev) {
+ /* Just ignore the signal. This is valid in some error testing scenarios*/
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ slsi_kfree_skb(skb);
+ skb = slsi_skb_work_dequeue(&fw_test->fw_test_work);
+ continue;
+ }
+
+ switch (fapi_get_sigid(skb)) {
+ case MLME_PROCEDURE_STARTED_IND:
+ slsi_fw_test_procedure_started_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_CONNECT_IND:
+ slsi_fw_test_connect_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_ROAMED_IND:
+ slsi_fw_test_roamed_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_CONNECTED_IND:
+ slsi_fw_test_connected_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_DISCONNECT_IND:
+ case MLME_DISCONNECTED_IND:
+ slsi_fw_test_disconnected_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_TDLS_PEER_IND:
+ slsi_fw_test_tdls_peer_ind(sdev, dev, fw_test, skb);
+ break;
+ case MLME_START_CFM:
+ slsi_fw_test_start_cfm(sdev, dev, fw_test, skb);
+ break;
+ case MLME_ADD_VIF_REQ:
+ slsi_fw_test_add_vif_req(sdev, dev, fw_test, skb);
+ break;
+ case MLME_DEL_VIF_REQ:
+ slsi_fw_test_del_vif_req(sdev, dev, fw_test, skb);
+ break;
+ case MA_BLOCKACK_IND:
+ slsi_fw_test_ma_blockack_ind(sdev, dev, fw_test, skb);
+ break;
+ default:
+ WARN(1, "Unhandled Signal");
+ slsi_kfree_skb(skb);
+ break;
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ skb = slsi_skb_work_dequeue(&fw_test->fw_test_work);
+ }
+}
+
+void slsi_fw_test_init(struct slsi_dev *sdev, struct slsi_fw_test *fwtest)
+{
+ SLSI_DBG1(sdev, SLSI_FW_TEST, "\n");
+ memset(fwtest, 0x00, sizeof(struct slsi_fw_test));
+ fwtest->sdev = sdev;
+ slsi_spinlock_create(&fwtest->fw_test_lock);
+ slsi_skb_work_init(sdev, NULL, &fwtest->fw_test_work, "slsi_wlan_fw_test", slsi_fw_test_work);
+}
+
+void slsi_fw_test_deinit(struct slsi_dev *sdev, struct slsi_fw_test *fwtest)
+{
+ int i;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ SLSI_DBG1(sdev, SLSI_FW_TEST, "\n");
+ fwtest->fw_test_enabled = false;
+ slsi_skb_work_deinit(&fwtest->fw_test_work);
+ slsi_spinlock_lock(&fwtest->fw_test_lock);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ slsi_kfree_skb(fwtest->mlme_add_vif_req[i]);
+ slsi_kfree_skb(fwtest->mlme_connect_req[i]);
+ slsi_kfree_skb(fwtest->mlme_connect_cfm[i]);
+ slsi_kfree_skb(fwtest->mlme_procedure_started_ind[i]);
+
+ fwtest->mlme_add_vif_req[i] = NULL;
+ fwtest->mlme_connect_req[i] = NULL;
+ fwtest->mlme_connect_cfm[i] = NULL;
+ fwtest->mlme_procedure_started_ind[i] = NULL;
+ }
+ slsi_spinlock_unlock(&fwtest->fw_test_lock);
+ memset(fwtest, 0x00, sizeof(struct slsi_fw_test));
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_FW_TEST_H__
+#define __SLSI_FW_TEST_H__
+
+#include "dev.h"
+
+struct slsi_fw_test {
+ struct slsi_dev *sdev;
+ bool fw_test_enabled;
+ struct slsi_skb_work fw_test_work;
+ struct slsi_spinlock fw_test_lock;
+ struct sk_buff *mlme_add_vif_req[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1];
+ struct sk_buff *mlme_connect_req[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1];
+ struct sk_buff *mlme_connect_cfm[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1];
+ struct sk_buff *mlme_procedure_started_ind[CONFIG_SCSC_WLAN_MAX_INTERFACES + 1]; /* TODO_HARDMAC : Per AID as well as per vif */
+};
+
+void slsi_fw_test_init(struct slsi_dev *sdev, struct slsi_fw_test *fwtest);
+void slsi_fw_test_deinit(struct slsi_dev *sdev, struct slsi_fw_test *fwtest);
+int slsi_fw_test_signal(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *skb);
+int slsi_fw_test_signal_with_udi_header(struct slsi_dev *sdev, struct slsi_fw_test *fwtest, struct sk_buff *skb);
+
+#endif /*__SLSI_FW_TEST_H__*/
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <scsc/scsc_logring.h>
+
+#include "hip.h"
+#include "debug.h"
+#include "procfs.h"
+#include "sap.h"
+#ifdef CONFIG_SCSC_SMAPPER
+#include "hip4_smapper.h"
+#endif
+
+/* SAP implementations container. Local and static to hip */
+static struct hip_sap {
+ struct sap_api *sap[SAP_TOTAL];
+} hip_sap_cont;
+
+/* Register SAP with HIP layer */
+int slsi_hip_sap_register(struct sap_api *sap_api)
+{
+ u8 class = sap_api->sap_class;
+
+ if (class >= SAP_TOTAL)
+ return -ENODEV;
+
+ hip_sap_cont.sap[class] = sap_api;
+
+ return 0;
+}
+
+/* UNregister SAP with HIP layer */
+int slsi_hip_sap_unregister(struct sap_api *sap_api)
+{
+ u8 class = sap_api->sap_class;
+
+ if (class >= SAP_TOTAL)
+ return -ENODEV;
+
+ hip_sap_cont.sap[class] = NULL;
+
+ return 0;
+}
+
+int slsi_hip_sap_setup(struct slsi_dev *sdev)
+{
+ /* Execute callbacks to intorm Supported version */
+ u16 version = 0;
+ u32 conf_hip4_ver = 0;
+
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+
+ /* We enforce that all the SAPs are registered at this point */
+ if ((!hip_sap_cont.sap[SAP_MLME]) || (!hip_sap_cont.sap[SAP_MA]) ||
+ (!hip_sap_cont.sap[SAP_DBG]) || (!hip_sap_cont.sap[SAP_TST]))
+ return -ENODEV;
+
+ if (hip_sap_cont.sap[SAP_MLME]->sap_version_supported) {
+ if (conf_hip4_ver == 4)
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_mlme_ver);
+ if (conf_hip4_ver == 5)
+ version = scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_mlme_ver);
+ if (hip_sap_cont.sap[SAP_MLME]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_MA]->sap_version_supported) {
+ if (conf_hip4_ver == 4)
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_ma_ver);
+ if (conf_hip4_ver == 5)
+ version = scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_ma_ver);
+ if (hip_sap_cont.sap[SAP_MA]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_DBG]->sap_version_supported) {
+ if (conf_hip4_ver == 4)
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_debug_ver);
+ if (conf_hip4_ver == 5)
+ version = scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_debug_ver);
+ if (hip_sap_cont.sap[SAP_DBG]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_TST]->sap_version_supported) {
+ if (conf_hip4_ver == 4)
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_test_ver);
+ if (conf_hip4_ver == 5)
+ version = scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_test_ver);
+ if (hip_sap_cont.sap[SAP_TST]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ /* Success */
+ return 0;
+}
+
+static int slsi_hip_service_notifier(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)data;
+ int i;
+
+ if (!sdev)
+ return NOTIFY_BAD;
+
+ /* We enforce that all the SAPs are registered at this point */
+ if ((!hip_sap_cont.sap[SAP_MLME]) || (!hip_sap_cont.sap[SAP_MA]) ||
+ (!hip_sap_cont.sap[SAP_DBG]) || (!hip_sap_cont.sap[SAP_TST]))
+ return NOTIFY_BAD;
+
+ /* Check whether any sap is interested in the notifications */
+ for (i = 0; i < SAP_TOTAL; i++)
+ if (hip_sap_cont.sap[i]->sap_notifier) {
+ if (hip_sap_cont.sap[i]->sap_notifier(sdev, event))
+ return NOTIFY_BAD;
+ }
+
+ switch (event) {
+ case SCSC_WIFI_STOP:
+ SLSI_INFO(sdev, "Freeze HIP4\n");
+ mutex_lock(&sdev->hip.hip_mutex);
+ hip4_freeze(&sdev->hip4_inst);
+ mutex_unlock(&sdev->hip.hip_mutex);
+ break;
+
+ case SCSC_WIFI_FAILURE_RESET:
+ SLSI_INFO(sdev, "Set HIP4 up again\n");
+ mutex_lock(&sdev->hip.hip_mutex);
+ hip4_setup(&sdev->hip4_inst);
+ mutex_unlock(&sdev->hip.hip_mutex);
+ break;
+
+ case SCSC_WIFI_SUSPEND:
+ SLSI_INFO(sdev, "Suspend HIP4\n");
+ mutex_lock(&sdev->hip.hip_mutex);
+ hip4_suspend(&sdev->hip4_inst);
+ mutex_unlock(&sdev->hip.hip_mutex);
+ break;
+
+ case SCSC_WIFI_RESUME:
+ SLSI_INFO(sdev, "Resume HIP4\n");
+ mutex_lock(&sdev->hip.hip_mutex);
+ hip4_resume(&sdev->hip4_inst);
+ mutex_unlock(&sdev->hip.hip_mutex);
+ break;
+
+ default:
+ SLSI_INFO(sdev, "Unknown event code %lu\n", event);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cm_nb = {
+ .notifier_call = slsi_hip_service_notifier,
+};
+
+int slsi_hip_init(struct slsi_dev *sdev, struct device *dev)
+{
+ SLSI_UNUSED_PARAMETER(dev);
+
+ memset(&sdev->hip4_inst, 0, sizeof(sdev->hip4_inst));
+
+ sdev->hip.sdev = sdev;
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STOPPED);
+ mutex_init(&sdev->hip.hip_mutex);
+
+ /* Register with the service notifier to receiver
+ * asynchronus messages such as SCSC_WIFI_STOP(Freeze), SCSC_WIFI_FAILURE_RESET i
+ */
+ slsi_wlan_service_notifier_register(&cm_nb);
+
+ return 0;
+}
+
+void slsi_hip_deinit(struct slsi_dev *sdev)
+{
+ slsi_wlan_service_notifier_unregister(&cm_nb);
+ mutex_destroy(&sdev->hip.hip_mutex);
+}
+
+int slsi_hip_start(struct slsi_dev *sdev)
+{
+ if (!sdev->maxwell_core) {
+ SLSI_ERR(sdev, "Maxwell core does not exist\n");
+ return -EINVAL;
+ }
+
+ SLSI_DBG4(sdev, SLSI_HIP_INIT_DEINIT, "[1/3]. Update HIP state (SLSI_HIP_STATE_STARTING)\n");
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STARTING);
+
+ SLSI_DBG4(sdev, SLSI_HIP_INIT_DEINIT, "[2/3]. Initialise HIP\n");
+ if (hip4_init(&sdev->hip4_inst)) {
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STOPPED);
+ SLSI_ERR(sdev, "hip4_init failed\n");
+ return -EINVAL;
+ }
+
+ SLSI_DBG4(sdev, SLSI_HIP_INIT_DEINIT, "[3/3]. Update HIP state (SLSI_HIP_STATE_STARTED)\n");
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STARTED);
+
+ return 0;
+}
+
+/* SAP rx proxy */
+int slsi_hip_rx(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ u16 pid;
+
+ /* We enforce that all the SAPs are registered at this point */
+ if ((!hip_sap_cont.sap[SAP_MLME]) || (!hip_sap_cont.sap[SAP_MA]) ||
+ (!hip_sap_cont.sap[SAP_DBG]) || (!hip_sap_cont.sap[SAP_TST]))
+ return -ENODEV;
+
+ /* Here we push a copy of the bare RECEIVED skb data also to the
+ * logring as a binary record.
+ * Note that bypassing UDI subsystem as a whole means we are losing:
+ * UDI filtering / UDI Header INFO / UDI QueuesFrames Throttling /
+ * UDI Skb Asynchronous processing
+ * We keep split DATA/CTRL path.
+ */
+ if (fapi_is_ma(skb))
+ SCSC_BIN_TAG_DEBUG(BIN_WIFI_DATA_RX, skb->data, skb->len);
+ else
+ SCSC_BIN_TAG_DEBUG(BIN_WIFI_CTRL_RX, skb->data, skb->len);
+ /* Udi test : If pid in UDI range then pass to UDI and ignore */
+ slsi_log_clients_log_signal_fast(sdev, &sdev->log_clients, skb, SLSI_LOG_DIRECTION_TO_HOST);
+ pid = fapi_get_u16(skb, receiver_pid);
+ if (pid >= SLSI_TX_PROCESS_ID_UDI_MIN && pid <= SLSI_TX_PROCESS_ID_UDI_MAX) {
+ slsi_kfree_skb(skb);
+ return 0;
+ }
+
+ if (fapi_is_ma(skb))
+ return hip_sap_cont.sap[SAP_MA]->sap_handler(sdev, skb);
+
+ if (fapi_is_mlme(skb))
+ return hip_sap_cont.sap[SAP_MLME]->sap_handler(sdev, skb);
+
+ if (fapi_is_debug(skb))
+ return hip_sap_cont.sap[SAP_DBG]->sap_handler(sdev, skb);
+
+ if (fapi_is_test(skb))
+ return hip_sap_cont.sap[SAP_TST]->sap_handler(sdev, skb);
+
+ return -EIO;
+}
+
+/* Only DATA plane will look at the returning FB to account BoT */
+int slsi_hip_tx_done(struct slsi_dev *sdev, u16 colour)
+{
+ return hip_sap_cont.sap[SAP_MA]->sap_txdone(sdev, colour);
+}
+
+int slsi_hip_setup(struct slsi_dev *sdev)
+{
+ /* Setup hip4 after initialization */
+ return hip4_setup(&sdev->hip4_inst);
+}
+
+#ifdef CONFIG_SCSC_SMAPPER
+int slsi_hip_consume_smapper_entry(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ return hip4_smapper_consume_entry(sdev, &sdev->hip4_inst, skb);
+}
+
+struct sk_buff *slsi_hip_get_skb_from_smapper(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ return hip4_smapper_get_skb(sdev, &sdev->hip4_inst, skb);
+}
+
+void *slsi_hip_get_skb_data_from_smapper(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ return hip4_smapper_get_skb_data(sdev, &sdev->hip4_inst, skb);
+}
+#endif
+
+int slsi_hip_stop(struct slsi_dev *sdev)
+{
+ mutex_lock(&sdev->hip.hip_mutex);
+ SLSI_DBG4(sdev, SLSI_HIP_INIT_DEINIT, "Update HIP state (SLSI_HIP_STATE_STOPPING)\n");
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STOPPING);
+
+ hip4_deinit(&sdev->hip4_inst);
+
+ SLSI_DBG4(sdev, SLSI_HIP_INIT_DEINIT, "Update HIP state (SLSI_HIP_STATE_STOPPED)\n");
+ atomic_set(&sdev->hip.hip_state, SLSI_HIP_STATE_STOPPED);
+
+ mutex_unlock(&sdev->hip.hip_mutex);
+ return 0;
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_HIP_H__
+#define __SLSI_HIP_H__
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+
+struct slsi_dev;
+
+/* This structure describes the chip and HIP core lib
+ * information that exposed to the OS layer.
+ */
+struct slsi_card_info {
+ u16 chip_id;
+ u32 fw_build;
+ u16 fw_hip_version;
+ u32 sdio_block_size;
+};
+
+/* HIP States:
+ * STOPPED : (default) state, avoid running the HIP
+ * STARTING : HIP is being initialised, avoid running the HIP
+ * STARTED : HIP cycles can run
+ * STOPPING : HIP is being de-initialised, avoid running the HIP
+ * BLOCKED : HIP TX CMD53 failure or WLAN subsystem crashed indication from Hydra,
+ * avoid running the HIP
+ */
+enum slsi_hip_state {
+ SLSI_HIP_STATE_STOPPED,
+ SLSI_HIP_STATE_STARTING,
+ SLSI_HIP_STATE_STARTED,
+ SLSI_HIP_STATE_STOPPING,
+ SLSI_HIP_STATE_BLOCKED
+};
+
+struct slsi_hip {
+ struct slsi_dev *sdev;
+ struct slsi_card_info card_info;
+ /* a std mutex */
+ struct mutex hip_mutex;
+
+ /* refer to enum slsi_hip_state */
+ atomic_t hip_state;
+};
+
+#define SLSI_HIP_PARAM_SLOT_COUNT 2
+
+int slsi_hip_init(struct slsi_dev *sdev, struct device *dev);
+void slsi_hip_deinit(struct slsi_dev *sdev);
+
+int slsi_hip_start(struct slsi_dev *sdev);
+int slsi_hip_setup(struct slsi_dev *sdev);
+int slsi_hip_consume_smapper_entry(struct slsi_dev *sdev, struct sk_buff *skb);
+void *slsi_hip_get_skb_data_from_smapper(struct slsi_dev *sdev, struct sk_buff *skb);
+struct sk_buff *slsi_hip_get_skb_from_smapper(struct slsi_dev *sdev, struct sk_buff *skb);
+int slsi_hip_stop(struct slsi_dev *sdev);
+
+/* Forward declaration */
+struct sap_api;
+struct sk_buff;
+
+/* Register SAP with HIP layer */
+int slsi_hip_sap_register(struct sap_api *sap_api);
+/* Unregister SAP with HIP layer */
+int slsi_hip_sap_unregister(struct sap_api *sap_api);
+/* SAP rx proxy */
+int slsi_hip_rx(struct slsi_dev *sdev, struct sk_buff *skb);
+/* SAP setup once we receive SAP versions */
+int slsi_hip_sap_setup(struct slsi_dev *sdev);
+/* Allow the SAP to act on a buffer in the free list. */
+int slsi_hip_tx_done(struct slsi_dev *sdev, u16 colour);
+
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_mifram.h>
+#include <linux/ktime.h>
+#include <linux/kthread.h>
+#include <scsc/scsc_logring.h>
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#endif
+
+#include "hip4.h"
+#include "mbulk.h"
+#include "dev.h"
+#include "hip4_sampler.h"
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+
+#include "debug.h"
+
+static bool hip4_system_wq;
+module_param(hip4_system_wq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_system_wq, "Use system wq instead of named workqueue. (default: N)");
+
+#ifdef CONFIG_SCSC_LOGRING
+static bool hip4_dynamic_logging = true;
+module_param(hip4_dynamic_logging, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_dynamic_logging, "Dynamic logging, logring is disabled if tput > hip4_qos_med_tput_in_mbps. (default: Y)");
+
+static int hip4_dynamic_logging_tput_in_mbps = 150;
+module_param(hip4_dynamic_logging_tput_in_mbps, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_dynamic_logging_tput_in_mbps, "throughput (in Mbps) to apply dynamic logring logging");
+#endif
+
+#ifdef CONFIG_SCSC_QOS
+static bool hip4_qos_enable = true;
+module_param(hip4_qos_enable, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_qos_enable, "enable HIP4 PM QoS. (default: Y)");
+
+static int hip4_qos_max_tput_in_mbps = 300;
+module_param(hip4_qos_max_tput_in_mbps, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_qos_max_tput_in_mbps, "throughput (in Mbps) to apply Max PM QoS");
+
+static int hip4_qos_med_tput_in_mbps = 150;
+module_param(hip4_qos_med_tput_in_mbps, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_qos_med_tput_in_mbps, "throughput (in Mbps) to apply Median PM QoS");
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+static bool hip4_smapper_enable;
+module_param(hip4_smapper_enable, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_smapper_enable, "enable HIP4 SMAPPER. (default: Y)");
+static bool hip4_smapper_is_enabled;
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+/* run NAPI poll on a specific CPU (preferably a big CPU if online) */
+static int napi_select_cpu; /* CPU number */
+module_param(napi_select_cpu, int, 0644);
+MODULE_PARM_DESC(napi_select_cpu, "select a specific CPU to execute NAPI poll");
+#endif
+
+static int max_buffered_frames = 10000;
+module_param(max_buffered_frames, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_buffered_frames, "Maximum number of frames to buffer in the driver");
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+static ktime_t intr_received_fb;
+static ktime_t bh_init_fb;
+static ktime_t bh_end_fb;
+static ktime_t intr_received_ctrl;
+static ktime_t bh_init_ctrl;
+static ktime_t bh_end_ctrl;
+static ktime_t intr_received_data;
+static ktime_t bh_init_data;
+static ktime_t bh_end_data;
+#endif
+static ktime_t intr_received;
+static ktime_t bh_init;
+static ktime_t bh_end;
+
+static ktime_t wdt;
+static ktime_t send;
+static ktime_t closing;
+
+enum rw {
+ widx,
+ ridx,
+};
+
+static u8 hip4_read_index(struct slsi_hip4 *hip, u32 q, enum rw r_w);
+
+/* Q mapping V3 - V4 */
+/*offset of F/W owned indices */
+#define FW_OWN_OFS (64)
+/**
+ * HIP queue indices layout in the scoreboard (SC-505612-DD). v3
+ *
+ * 3 2 1 0
+ * +-----------------------------------+
+ * +0 | Q3R | Q2R | Q1W | Q0W | Owned by the host
+ * +-----------------------------------+
+ * +4 | | | Q5W | Q4R | Owned by the host
+ * +-----------------------------------+
+ *
+ * +-----------------------------------+
+ * +64 | Q3W | Q2W | Q1R | Q0R | Owned by the F/W
+ * +-----------------------------------+
+ * +68 | | | Q5R | Q4W | Owned by the F/W
+ * +-----------------------------------+
+ *
+ * The queue indcies which owned by the host are only writable by the host.
+ * F/W can only read them. And vice versa.
+ */
+static int q_idx_layout[6][2] = {
+ { 0, FW_OWN_OFS + 0}, /* mif_q_fh_ctl : 0 */
+ { 1, FW_OWN_OFS + 1}, /* mif_q_fh_dat : 1 */
+ { FW_OWN_OFS + 2, 2}, /* mif_q_fh_rfb : 2 */
+ { FW_OWN_OFS + 3, 3}, /* mif_q_th_ctl : 3 */
+ { FW_OWN_OFS + 4, 4}, /* mif_q_th_dat : 4 */
+ { 5, FW_OWN_OFS + 5} /* mif_q_th_rfb : 5 */
+};
+
+/*offset of F/W owned VIF Status */
+#define FW_OWN_VIF (96)
+/**
+ * HIP Pause state VIF. v4. 2 bits per PEER
+ *
+ * +-----------------------------------+
+ * +96 | VIF[0] Peers [15-1] | Owned by the F/W
+ * +-----------------------------------+
+ * +100 | VIF[0] Peers [31-16] | Owned by the F/W
+ * +-----------------------------------+
+ * +104 | VIF[1] Peers [15-1] | Owned by the F/W
+ * +-----------------------------------+
+ * +108 | VIF[1] Peers [31-16] | Owned by the F/W
+ * +-----------------------------------+
+ * +112 | VIF[2] Peers [15-1] | Owned by the F/W
+ * +-----------------------------------+
+ * +116 | VIF[2] Peers [31-16] | Owned by the F/W
+ * +-----------------------------------+
+ * +120 | VIF[3] Peers [15-1] | Owned by the F/W
+ * +-----------------------------------+
+ * +124 | VIF[3] Peers [31-16] | Owned by the F/W
+ * +-----------------------------------+
+ *
+ */
+
+/* MAX_STORM. Max Interrupts allowed when platform is in suspend */
+#define MAX_STORM 5
+
+/* Timeout for Wakelocks in HIP */
+#define SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS (1000)
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+
+static u64 histogram_1;
+static u64 histogram_2;
+static u64 histogram_3;
+static u64 histogram_4;
+static u64 histogram_5;
+static u64 histogram_6;
+static u64 max_jitter;
+
+#define HISTO_1 1000 /* 1 us */
+#define HISTO_2 10000 /* 10 us */
+#define HISTO_3 100000 /* 100 us */
+#define HISTO_4 1000000 /* 1ms */
+#define HISTO_5 10000000 /* 10ms */
+
+static u64 histogram_1_data;
+static u64 histogram_2_data;
+static u64 histogram_3_data;
+static u64 histogram_4_data;
+static u64 histogram_5_data;
+static u64 histogram_6_data;
+static u64 max_data;
+
+#define HISTO_1_DATA 50 /* 50 th data packets */
+#define HISTO_2_DATA 100/* 100 th data packets */
+#define HISTO_3_DATA 150/* 150 th data packets */
+#define HISTO_4_DATA 200/* 200 th data packets */
+#define HISTO_5_DATA 250/* 250 th data packets */
+
+/* MAX_HISTORY_RECORDS should be power of two */
+#define MAX_HISTORY_RECORDS 32
+
+#define FH 0
+#define TH 1
+
+struct hip4_history {
+ bool dir;
+ u32 signal;
+ u32 cnt;
+ ktime_t last_time;
+} hip4_signal_history[MAX_HISTORY_RECORDS];
+
+static u32 history_record;
+
+/* This function should be called from atomic context */
+static void hip4_history_record_add(bool dir, u32 signal_id)
+{
+ struct hip4_history record;
+
+ record = hip4_signal_history[history_record];
+
+ if (record.signal == signal_id && record.dir == dir) {
+ /* If last signal and direction is the same, increment counter */
+ record.last_time = ktime_get();
+ record.cnt += 1;
+ hip4_signal_history[history_record] = record;
+ return;
+ }
+
+ history_record = (history_record + 1) & (MAX_HISTORY_RECORDS - 1);
+
+ record = hip4_signal_history[history_record];
+ record.dir = dir;
+ record.signal = signal_id;
+ record.cnt = 1;
+ record.last_time = ktime_get();
+ hip4_signal_history[history_record] = record;
+}
+
+#define HIP4_HISTORY(in_seq_file, m, fmt, arg ...) \
+ do { \
+ if (in_seq_file) \
+ seq_printf(m, fmt, ## arg); \
+ else \
+ SLSI_ERR_NODEV(fmt, ## arg); \
+ } while (0)
+
+static void hip4_history_record_print(bool in_seq_file, struct seq_file *m)
+{
+ struct hip4_history record;
+ u32 i, pos;
+ ktime_t old;
+
+ old = ktime_set(0, 0);
+
+ /* Start with the Next record to print history in order */
+ pos = (history_record + 1) & (MAX_HISTORY_RECORDS - 1);
+
+ HIP4_HISTORY(in_seq_file, m, "dir\t signal\t cnt\t last_time(ns) \t\t gap(ns)\n");
+ HIP4_HISTORY(in_seq_file, m, "-----------------------------------------------------------------------------\n");
+ for (i = 0; i < MAX_HISTORY_RECORDS; i++) {
+ record = hip4_signal_history[pos];
+ /*next pos*/
+ if (record.cnt) {
+ HIP4_HISTORY(in_seq_file, m, "%s\t 0x%04x\t %d\t %lld \t%lld\n", record.dir ? "<--TH" : "FH-->",
+ record.signal, record.cnt, ktime_to_ns(record.last_time), ktime_to_ns(ktime_sub(record.last_time, old)));
+ }
+ old = record.last_time;
+ pos = (pos + 1) & (MAX_HISTORY_RECORDS - 1);
+ }
+}
+
+static int hip4_proc_show_history(struct seq_file *m, void *v)
+{
+ hip4_history_record_print(true, m);
+ return 0;
+}
+
+static int hip4_proc_history_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hip4_proc_show_history, PDE_DATA(inode));
+}
+
+static const struct file_operations hip4_procfs_history_fops = {
+ .owner = THIS_MODULE,
+ .open = hip4_proc_history_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hip4_proc_show(struct seq_file *m, void *v)
+{
+ struct slsi_hip4 *hip = m->private;
+ struct hip4_hip_control *hip_control;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ u8 i;
+
+ u32 conf_hip4_ver = 0;
+ void *hip_ptr;
+
+ if (!hip->hip_priv) {
+ seq_puts(m, "HIP4 not active\n");
+ return 0;
+ }
+
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+ /* Check if the version is supported. And get the index */
+ /* This is hardcoded and may change in future versions */
+ if (conf_hip4_ver != 4 && conf_hip4_ver != 3) {
+ SLSI_ERR_NODEV("FW Version %d not supported or Hip has not been set up\n", conf_hip4_ver);
+ return 0;
+ }
+
+ /* hip_ref contains the reference of the start of shared memory allocated for WLAN */
+ /* hip_ptr is the kernel address of hip_ref*/
+ hip_ptr = scsc_mx_service_mif_addr_to_ptr(sdev->service, hip->hip_ref);
+ /* Get hip_control pointer on shared memory */
+ hip_control = (struct hip4_hip_control *)(hip_ptr +
+ HIP4_WLAN_CONFIG_OFFSET);
+
+ seq_puts(m, "-----------------------------------------\n");
+ seq_puts(m, "HIP4 CONFIG:\n");
+ seq_puts(m, "-----------------------------------------\n");
+ seq_printf(m, "config kernel addr = %p\n", hip_control);
+ if (conf_hip4_ver == 4) {
+ seq_printf(m, "hip4_version_4 addr = 0x%p\n", &hip_control->config_v4);
+ seq_printf(m, "magic_number = 0x%x\n", hip_control->config_v4.magic_number);
+ seq_printf(m, "hip_config_ver = 0x%x\n", hip_control->config_v4.hip_config_ver);
+ seq_printf(m, "config_len = 0x%x\n", hip_control->config_v4.config_len);
+ seq_printf(m, "compat_flag = 0x%x\n", hip_control->config_v4.compat_flag);
+ seq_printf(m, "sap_mlme_ver = 0x%x\n", hip_control->config_v4.sap_mlme_ver);
+ seq_printf(m, "sap_ma_ver = 0x%x\n", hip_control->config_v4.sap_ma_ver);
+ seq_printf(m, "sap_debug_ver = 0x%x\n", hip_control->config_v4.sap_debug_ver);
+ seq_printf(m, "sap_test_ver = 0x%x\n", hip_control->config_v4.sap_test_ver);
+ seq_printf(m, "fw_build_id = 0x%x\n", hip_control->config_v4.fw_build_id);
+ seq_printf(m, "fw_patch_id = 0x%x\n", hip_control->config_v4.fw_patch_id);
+ seq_printf(m, "unidat_req_headroom = 0x%x\n", hip_control->config_v4.unidat_req_headroom);
+ seq_printf(m, "unidat_req_tailroom = 0x%x\n", hip_control->config_v4.unidat_req_tailroom);
+ seq_printf(m, "bulk_buffer_align = 0x%x\n", hip_control->config_v4.bulk_buffer_align);
+ seq_printf(m, "host_cache_line = 0x%x\n", hip_control->config_v4.host_cache_line);
+ seq_printf(m, "host_buf_loc = 0x%x\n", hip_control->config_v4.host_buf_loc);
+ seq_printf(m, "host_buf_sz = 0x%x\n", hip_control->config_v4.host_buf_sz);
+ seq_printf(m, "fw_buf_loc = 0x%x\n", hip_control->config_v4.fw_buf_loc);
+ seq_printf(m, "fw_buf_sz = 0x%x\n", hip_control->config_v4.fw_buf_sz);
+ seq_printf(m, "mib_buf_loc = 0x%x\n", hip_control->config_v4.mib_loc);
+ seq_printf(m, "mib_buf_sz = 0x%x\n", hip_control->config_v4.mib_sz);
+ seq_printf(m, "log_config_loc = 0x%x\n", hip_control->config_v4.log_config_loc);
+ seq_printf(m, "log_config_sz = 0x%x\n", hip_control->config_v4.log_config_sz);
+ seq_printf(m, "mif_fh_int_n = 0x%x\n", hip_control->config_v4.mif_fh_int_n);
+ seq_printf(m, "mif_th_int_n[FH_CTRL] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_FH_CTRL]);
+ seq_printf(m, "mif_th_int_n[FH_DAT] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_FH_DAT]);
+ seq_printf(m, "mif_th_int_n[FH_RFB] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_FH_RFB]);
+ seq_printf(m, "mif_th_int_n[TH_CTRL] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_TH_CTRL]);
+ seq_printf(m, "mif_th_int_n[TH_DAT] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_TH_DAT]);
+ seq_printf(m, "mif_th_int_n[TH_RFB] = 0x%x\n", hip_control->config_v4.mif_th_int_n[HIP4_MIF_Q_TH_RFB]);
+ seq_printf(m, "scbrd_loc = 0x%x\n", hip_control->config_v4.scbrd_loc);
+ seq_printf(m, "q_num = 0x%x\n", hip_control->config_v4.q_num);
+ seq_printf(m, "q_len = 0x%x\n", hip_control->config_v4.q_len);
+ seq_printf(m, "q_idx_sz = 0x%x\n", hip_control->config_v4.q_idx_sz);
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ seq_printf(m, "q_loc[%d] = 0x%x\n", i, hip_control->config_v4.q_loc[i]);
+ } else if (conf_hip4_ver == 5) {
+ seq_printf(m, "hip4_version_5 addr = 0x%p\n", &hip_control->config_v5);
+ seq_printf(m, "magic_number = 0x%x\n", hip_control->config_v5.magic_number);
+ seq_printf(m, "hip_config_ver = 0x%x\n", hip_control->config_v5.hip_config_ver);
+ seq_printf(m, "config_len = 0x%x\n", hip_control->config_v5.config_len);
+ seq_printf(m, "compat_flag = 0x%x\n", hip_control->config_v5.compat_flag);
+ seq_printf(m, "sap_mlme_ver = 0x%x\n", hip_control->config_v5.sap_mlme_ver);
+ seq_printf(m, "sap_ma_ver = 0x%x\n", hip_control->config_v5.sap_ma_ver);
+ seq_printf(m, "sap_debug_ver = 0x%x\n", hip_control->config_v5.sap_debug_ver);
+ seq_printf(m, "sap_test_ver = 0x%x\n", hip_control->config_v5.sap_test_ver);
+ seq_printf(m, "fw_build_id = 0x%x\n", hip_control->config_v5.fw_build_id);
+ seq_printf(m, "fw_patch_id = 0x%x\n", hip_control->config_v5.fw_patch_id);
+ seq_printf(m, "unidat_req_headroom = 0x%x\n", hip_control->config_v5.unidat_req_headroom);
+ seq_printf(m, "unidat_req_tailroom = 0x%x\n", hip_control->config_v5.unidat_req_tailroom);
+ seq_printf(m, "bulk_buffer_align = 0x%x\n", hip_control->config_v5.bulk_buffer_align);
+ seq_printf(m, "host_cache_line = 0x%x\n", hip_control->config_v5.host_cache_line);
+ seq_printf(m, "host_buf_loc = 0x%x\n", hip_control->config_v5.host_buf_loc);
+ seq_printf(m, "host_buf_sz = 0x%x\n", hip_control->config_v5.host_buf_sz);
+ seq_printf(m, "fw_buf_loc = 0x%x\n", hip_control->config_v5.fw_buf_loc);
+ seq_printf(m, "fw_buf_sz = 0x%x\n", hip_control->config_v5.fw_buf_sz);
+ seq_printf(m, "mib_buf_loc = 0x%x\n", hip_control->config_v5.mib_loc);
+ seq_printf(m, "mib_buf_sz = 0x%x\n", hip_control->config_v5.mib_sz);
+ seq_printf(m, "log_config_loc = 0x%x\n", hip_control->config_v5.log_config_loc);
+ seq_printf(m, "log_config_sz = 0x%x\n", hip_control->config_v5.log_config_sz);
+ seq_printf(m, "mif_fh_int_n = 0x%x\n", hip_control->config_v5.mif_fh_int_n);
+ seq_printf(m, "mif_th_int_n = 0x%x\n", hip_control->config_v5.mif_th_int_n);
+ seq_printf(m, "scbrd_loc = 0x%x\n", hip_control->config_v5.scbrd_loc);
+ seq_printf(m, "q_num = 0x%x\n", hip_control->config_v5.q_num);
+ seq_printf(m, "q_len = 0x%x\n", hip_control->config_v5.q_len);
+ seq_printf(m, "q_idx_sz = 0x%x\n", hip_control->config_v5.q_idx_sz);
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ seq_printf(m, "q_loc[%d] = 0x%x\n", i, hip_control->config_v5.q_loc[i]);
+ }
+ seq_puts(m, "\n-----------------------------------------\n");
+ seq_puts(m, "HIP4 SCOREBOARD INDEXES:\n");
+ seq_puts(m, "-----------------------------------------\n");
+ seq_printf(m, "ktime start %lld (ns)\n", ktime_to_ns(hip->hip_priv->stats.start));
+ seq_printf(m, "ktime now %lld (ns)\n\n", ktime_to_ns(ktime_get()));
+
+ seq_printf(m, "rx_intr_tohost 0x%x\n", hip->hip_priv->intr_tohost);
+ seq_printf(m, "rx_intr_fromhost 0x%x\n\n", hip->hip_priv->intr_fromhost);
+
+ /* HIP statistics */
+ seq_printf(m, "HIP IRQs: %u\n", atomic_read(&hip->hip_priv->stats.irqs));
+ seq_printf(m, "HIP IRQs spurious: %u\n", atomic_read(&hip->hip_priv->stats.spurious_irqs));
+ seq_printf(m, "FW debug-inds: %u\n\n", atomic_read(&sdev->debug_inds));
+
+ seq_puts(m, "Queue\tIndex\tFrames\n");
+ seq_puts(m, "-----\t-----\t------\n");
+ /* Print scoreboard */
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++) {
+ seq_printf(m, "Q%dW\t0x%x\t\n", i, hip4_read_index(hip, i, widx));
+ seq_printf(m, "Q%dR\t0x%x\t%d\n", i, hip4_read_index(hip, i, ridx), hip->hip_priv->stats.q_num_frames[i]);
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int hip4_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hip4_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations hip4_procfs_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = hip4_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int hip4_proc_jitter_show(struct seq_file *m, void *v)
+{
+ seq_puts(m, "Values in ns\n");
+ seq_printf(m, "<%d(ns)\t\t\t\t %lld\n", HISTO_1, histogram_1);
+ seq_printf(m, "%d(ns)-%d(ns)\t\t\t %lld\n", HISTO_1, HISTO_2, histogram_2);
+ seq_printf(m, "%d(ns)-%d(ns)\t\t\t %lld\n", HISTO_2, HISTO_3, histogram_3);
+ seq_printf(m, "%d(ns)-%d(ns)\t\t\t %lld\n", HISTO_3, HISTO_4, histogram_4);
+ seq_printf(m, "%d(ns)-%d(ns)\t\t %lld\n", HISTO_4, HISTO_5, histogram_5);
+ seq_printf(m, ">%d(ns)\t\t\t\t %lld\n", HISTO_5, histogram_6);
+ seq_printf(m, "max jitter(ns)\t\t\t\t %lld\n", max_jitter);
+ seq_puts(m, "--------------------------\n");
+ seq_puts(m, "Packets in TH DATA Q\n");
+ seq_printf(m, "<%d\t\t%lld\n", HISTO_1_DATA, histogram_1_data);
+ seq_printf(m, "%d-%d\t\t%lld\n", HISTO_1_DATA, HISTO_2_DATA, histogram_2_data);
+ seq_printf(m, "%d-%d\t\t%lld\n", HISTO_2_DATA, HISTO_3_DATA, histogram_3_data);
+ seq_printf(m, "%d-%d\t\t%lld\n", HISTO_3_DATA, HISTO_4_DATA, histogram_4_data);
+ seq_printf(m, "%d-%d\t\t%lld\n", HISTO_4_DATA, HISTO_5_DATA, histogram_5_data);
+ seq_printf(m, ">%d\t\t%lld\n", HISTO_5_DATA, histogram_6_data);
+ seq_printf(m, "max data\t%lld\n", max_data);
+ return 0;
+}
+
+static int hip4_proc_jitter_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hip4_proc_jitter_show, PDE_DATA(inode));
+}
+
+static ssize_t hip4_proc_jitter_clear(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ SLSI_INFO_NODEV("Clear Histogram\n");
+
+ histogram_1 = 0;
+ histogram_2 = 0;
+ histogram_3 = 0;
+ histogram_4 = 0;
+ histogram_5 = 0;
+ histogram_6 = 0;
+ max_jitter = 0;
+
+ histogram_1_data = 0;
+ histogram_2_data = 0;
+ histogram_3_data = 0;
+ histogram_4_data = 0;
+ histogram_5_data = 0;
+ histogram_6_data = 0;
+ max_data = 0;
+
+ return count;
+}
+
+static const struct file_operations hip4_procfs_jitter_fops = {
+ .owner = THIS_MODULE,
+ .open = hip4_proc_jitter_open,
+ .write = hip4_proc_jitter_clear,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
+static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
+}
+#endif
+
+#define FB_NO_SPC_NUM_RET 100
+#define FB_NO_SPC_SLEEP_MS 10
+#define FB_NO_SPC_DELAY_US 1000
+
+/* Update scoreboard index */
+/* Function can be called from BH context */
+static void hip4_update_index(struct slsi_hip4 *hip, u32 q, enum rw r_w, u8 value)
+{
+ struct hip4_priv *hip_priv = hip->hip_priv;
+
+ write_lock_bh(&hip_priv->rw_scoreboard);
+ if (hip->hip_priv->version == 5 || hip->hip_priv->version == 4) {
+ *((u8 *)(hip->hip_priv->scbrd_base + q_idx_layout[q][r_w])) = value;
+ } else {
+ SLSI_ERR_NODEV("Incorrect version\n");
+ goto error;
+ }
+
+ /* Memory barrier when updating shared mailbox/memory */
+ smp_wmb();
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, q, r_w, value, 0);
+error:
+ write_unlock_bh(&hip_priv->rw_scoreboard);
+}
+
+/* Read scoreboard index */
+/* Function can be called from BH context */
+static u8 hip4_read_index(struct slsi_hip4 *hip, u32 q, enum rw r_w)
+{
+ struct hip4_priv *hip_priv = hip->hip_priv;
+ u32 value = 0;
+
+ read_lock_bh(&hip_priv->rw_scoreboard);
+ if (hip->hip_priv->version == 5 || hip->hip_priv->version == 4) {
+ value = *((u8 *)(hip->hip_priv->scbrd_base + q_idx_layout[q][r_w]));
+ } else {
+ SLSI_ERR_NODEV("Incorrect version\n");
+ goto error;
+ }
+
+ /* Memory barrier when reading shared mailbox/memory */
+ smp_rmb();
+error:
+ read_unlock_bh(&hip_priv->rw_scoreboard);
+ return value;
+}
+
+static void hip4_dump_dbg(struct slsi_hip4 *hip, struct mbulk *m, struct sk_buff *skb, struct scsc_service *service)
+{
+ unsigned int i = 0;
+ scsc_mifram_ref ref;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ SLSI_ERR_NODEV("intr_tohost_fb 0x%x\n", hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_RFB]);
+ SLSI_ERR_NODEV("intr_tohost_ctrl 0x%x\n", hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_CTRL]);
+ SLSI_ERR_NODEV("intr_tohost_dat 0x%x\n", hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ } else {
+ SLSI_ERR_NODEV("intr_tohost 0x%x\n", hip->hip_priv->intr_tohost);
+ }
+#else
+ SLSI_ERR_NODEV("intr_tohost 0x%x\n", hip->hip_priv->intr_tohost);
+#endif
+ SLSI_ERR_NODEV("intr_fromhost 0x%x\n", hip->hip_priv->intr_fromhost);
+
+ /* Print scoreboard */
+ for (i = 0; i < 6; i++) {
+ SLSI_ERR_NODEV("Q%dW 0x%x\n", i, hip4_read_index(hip, i, widx));
+ SLSI_ERR_NODEV("Q%dR 0x%x\n", i, hip4_read_index(hip, i, ridx));
+ }
+
+ if (service)
+ scsc_mx_service_mif_dump_registers(service);
+
+ if (m && service) {
+ if (scsc_mx_service_mif_ptr_to_addr(service, m, &ref))
+ return;
+ SLSI_ERR_NODEV("m: %p 0x%x\n", m, ref);
+ print_hex_dump(KERN_ERR, SCSC_PREFIX "mbulk ", DUMP_PREFIX_NONE, 16, 1, m, sizeof(struct mbulk), 0);
+ }
+ if (m && mbulk_has_signal(m))
+ print_hex_dump(KERN_ERR, SCSC_PREFIX "sig ", DUMP_PREFIX_NONE, 16, 1, mbulk_get_signal(m),
+ MBULK_SEG_SIG_BUFSIZE(m), 0);
+ if (skb)
+ print_hex_dump(KERN_ERR, SCSC_PREFIX "skb ", DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len > 0xff ? 0xff : skb->len, 0);
+
+ SLSI_ERR_NODEV("time: wdt %lld\n", ktime_to_ns(wdt));
+ SLSI_ERR_NODEV("time: send %lld\n", ktime_to_ns(send));
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ if (conf_hip4_ver == 4) {
+ SLSI_ERR_NODEV("time: intr_fb %lld\n", ktime_to_ns(intr_received_fb));
+ SLSI_ERR_NODEV("time: bh_init_fb %lld\n", ktime_to_ns(bh_init_fb));
+ SLSI_ERR_NODEV("time: bh_end_fb %lld\n", ktime_to_ns(bh_end_fb));
+ SLSI_ERR_NODEV("time: intr_ctrl %lld\n", ktime_to_ns(intr_received_ctrl));
+ SLSI_ERR_NODEV("time: bh_init_ctrl %lld\n", ktime_to_ns(bh_init_ctrl));
+ SLSI_ERR_NODEV("time: bh_end_ctrl %lld\n", ktime_to_ns(bh_end_ctrl));
+ SLSI_ERR_NODEV("time: intr_data %lld\n", ktime_to_ns(intr_received_data));
+ SLSI_ERR_NODEV("time: bh_init_data %lld\n", ktime_to_ns(bh_init_data));
+ SLSI_ERR_NODEV("time: bh_end_data %lld\n", ktime_to_ns(bh_end_data));
+ } else {
+ SLSI_ERR_NODEV("time: intr %lld\n", ktime_to_ns(intr_received));
+ SLSI_ERR_NODEV("time: bh_init %lld\n", ktime_to_ns(bh_init));
+ SLSI_ERR_NODEV("time: bh_end %lld\n", ktime_to_ns(bh_end));
+ }
+#else
+ SLSI_ERR_NODEV("time: intr %lld\n", ktime_to_ns(intr_received));
+ SLSI_ERR_NODEV("time: bh_init %lld\n", ktime_to_ns(bh_init));
+ SLSI_ERR_NODEV("time: bh_end %lld\n", ktime_to_ns(bh_end));
+#endif
+ SLSI_ERR_NODEV("time: closing %lld\n", ktime_to_ns(closing));
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ /* Discard noise if it is a mbulk/skb issue */
+ if (!skb && !m)
+ hip4_history_record_print(false, NULL);
+#endif
+}
+
+/* Transform skb to mbulk (fapi_signal + payload) */
+static struct mbulk *hip4_skb_to_mbulk(struct hip4_priv *hip, struct sk_buff *skb, bool ctrl_packet)
+{
+ struct mbulk *m = NULL;
+ void *sig = NULL, *b_data = NULL;
+ size_t payload = 0;
+ u8 pool_id = ctrl_packet ? MBULK_POOL_ID_CTRL : MBULK_POOL_ID_DATA;
+ u8 headroom = 0, tailroom = 0;
+ enum mbulk_class clas = ctrl_packet ? MBULK_CLASS_FROM_HOST_CTL : MBULK_CLASS_FROM_HOST_DAT;
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb);
+#ifdef CONFIG_SCSC_WLAN_SG
+ u32 linear_data;
+ u32 offset;
+ u8 i;
+#endif
+
+ payload = skb->len - cb->sig_length;
+
+ /* Get headroom/tailroom */
+ headroom = hip->unidat_req_headroom;
+ tailroom = hip->unidat_req_tailroom;
+
+ /* Allocate mbulk */
+ if (payload > 0) {
+ /* If signal include payload, add headroom and tailroom */
+ m = mbulk_with_signal_alloc_by_pool(pool_id, cb->colour, clas, cb->sig_length + 4,
+ payload + headroom + tailroom);
+ if (!m)
+ return NULL;
+ if (!mbulk_reserve_head(m, headroom))
+ return NULL;
+ } else {
+ /* If it is only a signal do not add headroom */
+ m = mbulk_with_signal_alloc_by_pool(pool_id, cb->colour, clas, cb->sig_length + 4, 0);
+ if (!m)
+ return NULL;
+ }
+
+ /* Get signal handler */
+ sig = mbulk_get_signal(m);
+ if (!sig) {
+ mbulk_free_virt_host(m);
+ return NULL;
+ }
+
+ /* Copy signal */
+ /* 4Bytes offset is required for FW fapi header */
+ memcpy(sig + 4, skb->data, cb->sig_length);
+
+ /* Copy payload */
+ /* If the signal has payload memcpy the data */
+ if (payload > 0) {
+ /* Get head pointer */
+ b_data = mbulk_dat_rw(m);
+ if (!b_data) {
+ mbulk_free_virt_host(m);
+ return NULL;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_SG
+ /* The amount of non-paged data at skb->data can be calculated as skb->len - skb->data_len.
+ * Helper routine: skb_headlen() .
+ */
+ linear_data = skb_headlen(skb) - cb->sig_length;
+
+ offset = 0;
+ /* Copy the linear data */
+ if (linear_data > 0) {
+ /* Copy the linear payload skipping the signal data */
+ memcpy(b_data, skb->data + cb->sig_length, linear_data);
+ offset = linear_data;
+ }
+
+ /* Traverse fragments and copy in to linear DRAM memory */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = NULL;
+ void *frag_va_data;
+ unsigned int frag_size;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ WARN_ON(!frag);
+ if (!frag)
+ continue;
+ frag_va_data = skb_frag_address_safe(frag);
+ WARN_ON(!frag_va_data);
+ if (!frag_va_data)
+ continue;
+ frag_size = skb_frag_size(frag);
+ /* Copy the fragmented data */
+ memcpy(b_data + offset, frag_va_data, frag_size);
+ offset += frag_size;
+ }
+
+ /* Check whether the driver should perform the checksum */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ SLSI_DBG3_NODEV(SLSI_HIP, "CHECKSUM_PARTIAL. Driver performing checksum\n");
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct ethhdr *mach = (struct ethhdr *)b_data;
+ struct iphdr *iph = (struct iphdr *)((char *)b_data + sizeof(*mach));
+ unsigned int len = payload - sizeof(*mach) - (iph->ihl << 2);
+
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *th = (struct tcphdr *)((char *)b_data + sizeof(*mach) +
+ (iph->ihl << 2));
+ th->check = 0;
+ th->check = csum_tcpudp_magic(iph->saddr, iph->daddr, len,
+ IPPROTO_TCP,
+ csum_partial((char *)th, len, 0));
+ SLSI_DBG3_NODEV(SLSI_HIP, "th->check 0x%x\n", ntohs(th->check));
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *uh = (struct udphdr *)((char *)b_data + sizeof(*mach) +
+ (iph->ihl << 2));
+ uh->check = 0;
+ uh->check = csum_tcpudp_magic(iph->saddr, iph->daddr, len,
+ IPPROTO_UDP,
+ csum_partial((char *)uh, len, 0));
+ SLSI_DBG3_NODEV(SLSI_HIP, "uh->check 0x%x\n", ntohs(uh->check));
+ }
+ }
+ }
+#else
+ /* Copy payload skipping the signal data */
+ memcpy(b_data, skb->data + cb->sig_length, payload);
+#endif
+ mbulk_append_tail(m, payload);
+ }
+ m->flag |= MBULK_F_OBOUND;
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Clear smapper field */
+ cb->skb_addr = NULL;
+#endif
+ return m;
+}
+
+/* Transform mbulk to skb (fapi_signal + payload) */
+static struct sk_buff *hip4_mbulk_to_skb(struct scsc_service *service, struct hip4_priv *hip_priv, struct mbulk *m, scsc_mifram_ref *to_free, bool atomic)
+{
+ struct slsi_skb_cb *cb;
+ struct mbulk *next_mbulk[MBULK_MAX_CHAIN];
+ struct sk_buff *skb = NULL;
+ scsc_mifram_ref ref;
+ scsc_mifram_ref m_chain_next;
+ u8 free = 0;
+ u8 i = 0, j = 0;
+ u8 *p;
+ size_t bytes_to_alloc = 0;
+
+ /* Get the mif ref pointer, check for incorrect mbulk */
+ if (scsc_mx_service_mif_ptr_to_addr(service, m, &ref)) {
+ SLSI_ERR_NODEV("mbulk address conversion failed\n");
+ return NULL;
+ }
+
+ /* Track mbulk that should be freed */
+ to_free[free++] = ref;
+
+ bytes_to_alloc += m->sig_bufsz - 4;
+ bytes_to_alloc += m->len;
+
+ /* Detect Chained mbulk to start building the chain */
+ if ((MBULK_SEG_IS_CHAIN_HEAD(m)) && (MBULK_SEG_IS_CHAINED(m))) {
+ m_chain_next = mbulk_chain_next(m);
+ if (!m_chain_next) {
+ SLSI_ERR_NODEV("Mbulk is set MBULK_F_CHAIN_HEAD and MBULK_F_CHAIN but m_chain_next is NULL\n");
+ goto cont;
+ }
+ while (1) {
+ /* increase number mbulks in chain */
+ i++;
+ /* Get next_mbulk kernel address space pointer */
+ next_mbulk[i - 1] = scsc_mx_service_mif_addr_to_ptr(service, m_chain_next);
+ if (!next_mbulk[i - 1]) {
+ SLSI_ERR_NODEV("First Mbulk is set as MBULK_F_CHAIN but next_mbulk is NULL\n");
+ return NULL;
+ }
+ /* Track mbulk to be freed */
+ to_free[free++] = m_chain_next;
+ bytes_to_alloc += next_mbulk[i - 1]->len;
+ if (MBULK_SEG_IS_CHAINED(next_mbulk[i - 1])) {
+ /* continue traversing the chain */
+ m_chain_next = mbulk_chain_next(next_mbulk[i - 1]);
+ if (!m_chain_next)
+ break;
+
+ if (i >= MBULK_MAX_CHAIN) {
+ SLSI_ERR_NODEV("Max number of chained MBULK reached\n");
+ return NULL;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+
+cont:
+ if (atomic)
+ skb = slsi_alloc_skb(bytes_to_alloc, GFP_ATOMIC);
+ else {
+ spin_unlock_bh(&hip_priv->rx_lock);
+ skb = slsi_alloc_skb(bytes_to_alloc, GFP_KERNEL);
+ spin_lock_bh(&hip_priv->rx_lock);
+ }
+ if (!skb) {
+ SLSI_ERR_NODEV("Error allocating skb %d bytes\n", bytes_to_alloc);
+ return NULL;
+ }
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = m->sig_bufsz - 4;
+ /* fapi_data_append adds to the data_length */
+ cb->data_length = cb->sig_length;
+
+ p = mbulk_get_signal(m);
+ if (!p) {
+ SLSI_ERR_NODEV("No signal in Mbulk\n");
+ print_hex_dump(KERN_ERR, SCSC_PREFIX "mbulk ", DUMP_PREFIX_NONE, 16, 1, m, sizeof(struct mbulk), 0);
+ slsi_kfree_skb(skb);
+ return NULL;
+ }
+ /* Remove 4Bytes offset coming from FW */
+ p += 4;
+
+ /* Don't need to copy the 4Bytes header coming from the FW */
+ memcpy(skb_put(skb, cb->sig_length), p, cb->sig_length);
+
+ if (m->len)
+ fapi_append_data(skb, mbulk_dat_r(m), m->len);
+ for (j = 0; j < i; j++)
+ fapi_append_data(skb, mbulk_dat_r(next_mbulk[j]), next_mbulk[j]->len);
+
+ return skb;
+}
+
+/* Add signal reference (offset in shared memory) in the selected queue */
+/* This function should be called in atomic context. Callers should supply proper locking mechanism */
+static int hip4_q_add_signal(struct slsi_hip4 *hip, enum hip4_hip_q_conf conf, scsc_mifram_ref phy_m, struct scsc_service *service)
+{
+ struct hip4_hip_control *ctrl = hip->hip_control;
+ struct hip4_priv *hip_priv = hip->hip_priv;
+ u8 idx_w;
+ u8 idx_r;
+
+ /* Read the current q write pointer */
+ idx_w = hip4_read_index(hip, conf, widx);
+ /* Read the current q read pointer */
+ idx_r = hip4_read_index(hip, conf, ridx);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, conf, widx, idx_w, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, conf, ridx, idx_r, 1);
+
+ /* Queueu is full */
+ if (idx_r == ((idx_w + 1) & (MAX_NUM - 1)))
+ return -ENOSPC;
+
+ /* Update array */
+ ctrl->q[conf].array[idx_w] = phy_m;
+ /* Memory barrier before updating shared mailbox */
+ smp_wmb();
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, phy_m, conf);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip->hip_priv->stats.q_num_frames[conf] = hip->hip_priv->stats.q_num_frames[conf] + 1;
+#endif
+
+ /* Increase index */
+ idx_w++;
+ idx_w &= (MAX_NUM - 1);
+
+ /* Update the scoreboard */
+ hip4_update_index(hip, conf, widx, idx_w);
+
+ send = ktime_get();
+ scsc_service_mifintrbit_bit_set(service, hip_priv->intr_fromhost, SCSC_MIFINTR_TARGET_R4);
+
+ return 0;
+}
+
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+static void hip4_watchdog(struct timer_list *t)
+#else
+static void hip4_watchdog(unsigned long data)
+#endif
+{
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ struct hip4_priv *priv = from_timer(priv, t, watchdog);
+ struct slsi_hip4 *hip = priv->hip;
+#else
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+#endif
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct scsc_service *service;
+ ktime_t intr_ov;
+ unsigned long flags;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ bool retrigger_watchdog = true;
+ u32 conf_hip4_ver = 0;
+#endif
+
+ if (!hip || !sdev || !sdev->service || !hip->hip_priv)
+ return;
+
+ spin_lock_irqsave(&hip->hip_priv->watchdog_lock, flags);
+ if (!atomic_read(&hip->hip_priv->watchdog_timer_active))
+ goto exit;
+
+ wdt = ktime_get();
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ /* if intr_received > wdt skip as intr has been unblocked */
+ if (test_and_clear_bit(HIP4_MIF_Q_FH_RFB, hip->hip_priv->irq_bitmap)) {
+ intr_ov = ktime_add_ms(intr_received_fb, jiffies_to_msecs(HZ));
+ if ((ktime_compare(intr_ov, wdt) < 0))
+ retrigger_watchdog = false;
+ }
+ if (test_and_clear_bit(HIP4_MIF_Q_TH_CTRL, hip->hip_priv->irq_bitmap)) {
+ intr_ov = ktime_add_ms(intr_received_ctrl, jiffies_to_msecs(HZ));
+ if ((ktime_compare(intr_ov, wdt) < 0))
+ retrigger_watchdog = false;
+ }
+ if (test_and_clear_bit(HIP4_MIF_Q_TH_DAT, hip->hip_priv->irq_bitmap)) {
+ intr_ov = ktime_add_ms(intr_received_data, jiffies_to_msecs(HZ));
+ if ((ktime_compare(intr_ov, wdt) < 0))
+ retrigger_watchdog = false;
+ }
+ if (retrigger_watchdog) {
+ wdt = ktime_set(0, 0);
+ /* Retrigger WDT to check flags again in the future */
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ / 2);
+ goto exit;
+ }
+ } else {
+ /* if intr_received > wdt skip as intr has been unblocked */
+ if (ktime_compare(intr_received, wdt) > 0) {
+ wdt = ktime_set(0, 0);
+ goto exit;
+ }
+
+ intr_ov = ktime_add_ms(intr_received, jiffies_to_msecs(HZ));
+
+ /* Check that wdt is > 1 HZ intr */
+ if (!(ktime_compare(intr_ov, wdt) < 0)) {
+ wdt = ktime_set(0, 0);
+ /* Retrigger WDT to check flags again in the future */
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ / 2);
+ goto exit;
+ }
+ }
+#else
+ /* if intr_received > wdt skip as intr has been unblocked */
+ if (ktime_compare(intr_received, wdt) > 0) {
+ wdt = ktime_set(0, 0);
+ goto exit;
+ }
+
+ intr_ov = ktime_add_ms(intr_received, jiffies_to_msecs(HZ));
+
+ /* Check that wdt is > 1 HZ intr */
+ if (!(ktime_compare(intr_ov, wdt) < 0)) {
+ wdt = ktime_set(0, 0);
+ /* Retrigger WDT to check flags again in the future */
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ / 2);
+ goto exit;
+ }
+#endif
+
+ /* Unlock irq to avoid __local_bh_enable_ip warning */
+ spin_unlock_irqrestore(&hip->hip_priv->watchdog_lock, flags);
+ hip4_dump_dbg(hip, NULL, NULL, sdev->service);
+ spin_lock_irqsave(&hip->hip_priv->watchdog_lock, flags);
+
+ service = sdev->service;
+
+ SLSI_INFO_NODEV("Hip4 watchdog triggered\n");
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ if (conf_hip4_ver == 4) {
+ for (u8 i = 0; i < MIF_HIP_CFG_Q_NUM; i++) {
+ if (hip->hip_priv->intr_tohost_mul[i] == MIF_NO_IRQ)
+ continue;
+ if (scsc_service_mifintrbit_bit_mask_status_get(service) & (1 << hip->hip_priv->intr_tohost_mul[i])) {
+ /* Interrupt might be pending! */
+ SLSI_INFO_NODEV("%d: Interrupt Masked. Unmask to restart Interrupt processing\n", i);
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[i]);
+ }
+ }
+ } else {
+ if (scsc_service_mifintrbit_bit_mask_status_get(service) & (1 << hip->hip_priv->intr_tohost)) {
+ /* Interrupt might be pending! */
+ SLSI_INFO_NODEV("Interrupt Masked. Unmask to restart Interrupt processing\n");
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+ }
+#else
+ if (scsc_service_mifintrbit_bit_mask_status_get(service) & (1 << hip->hip_priv->intr_tohost)) {
+ /* Interrupt might be pending! */
+ SLSI_INFO_NODEV("Interrupt Masked. Unmask to restart Interrupt processing\n");
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+#endif
+exit:
+ spin_unlock_irqrestore(&hip->hip_priv->watchdog_lock, flags);
+}
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+static void hip4_wq_fb(struct work_struct *data)
+{
+ struct hip4_priv *hip_priv = container_of(data, struct hip4_priv, intr_wq_fb);
+ struct slsi_hip4 *hip;
+ struct hip4_hip_control *ctrl;
+ struct scsc_service *service;
+ struct slsi_dev *sdev;
+ bool no_change = true;
+ u8 idx_r;
+ u8 idx_w;
+ scsc_mifram_ref ref;
+ void *mem;
+
+ if (!hip_priv || !hip_priv->hip) {
+ SLSI_ERR_NODEV("hip_priv or hip_priv->hip is Null\n");
+ return;
+ }
+
+ hip = hip_priv->hip;
+ ctrl = hip->hip_control;
+
+ if (!ctrl) {
+ SLSI_ERR_NODEV("hip->hip_control is Null\n");
+ return;
+ }
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!sdev || !sdev->service) {
+ SLSI_ERR_NODEV("sdev or sdev->service is Null\n");
+ return;
+ }
+
+ spin_lock_bh(&hip_priv->rx_lock);
+ service = sdev->service;
+ SCSC_HIP4_SAMPLER_INT_BH(hip->hip_priv->minor, 2);
+ bh_init_fb = ktime_get();
+ clear_bit(HIP4_MIF_Q_FH_RFB, hip->hip_priv->irq_bitmap);
+
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_FH_RFB, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_FH_RFB, widx);
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ if (idx_r != idx_w) {
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_FH_RFB, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_FH_RFB, widx, idx_w, 1);
+ }
+#endif
+ while (idx_r != idx_w) {
+ struct mbulk *m;
+ u16 colour;
+
+ no_change = false;
+ ref = ctrl->q[HIP4_MIF_Q_FH_RFB].array[idx_r];
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_FH_RFB);
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)mem;
+
+ if (!m) {
+ SLSI_ERR_NODEV("FB: Mbulk is NULL\n");
+ goto consume_fb_mbulk;
+ }
+ /* colour is defined as: */
+ /* u16 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ * [10:8] - ac queue
+ */
+ colour = ((m->clas & 0xc0) << 2) | (m->pid & 0xfe);
+ /* Account ONLY for data RFB */
+ if ((m->pid & 0x1) == MBULK_POOL_ID_DATA) {
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_VIF_PEER(hip->hip_priv->minor, 0, (colour & 0x6) >> 1, (colour & 0xf8) >> 3);
+ /* to profile round-trip */
+ {
+ u16 host_tag;
+ u8 *get_host_tag;
+ /* This is a nasty way of getting the host_tag without involving mbulk processing
+ * This hostag value should also be include in the cb descriptor which goes to
+ * mbulk descriptor (no room left at the moment)
+ */
+ get_host_tag = (u8 *)m;
+ host_tag = get_host_tag[37] << 8 | get_host_tag[36];
+ SCSC_HIP4_SAMPLER_PKT_TX_FB(hip->hip_priv->minor, host_tag);
+ }
+#endif
+ /* Ignore return value */
+ slsi_hip_tx_done(sdev, colour);
+ }
+ mbulk_free_virt_host(m);
+consume_fb_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+ hip4_update_index(hip, HIP4_MIF_Q_FH_RFB, ridx, idx_r);
+ }
+
+ if (no_change)
+ atomic_inc(&hip->hip_priv->stats.spurious_irqs);
+
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ scsc_service_mifintrbit_bit_unmask(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_RFB]);
+ }
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip->hip_priv->minor, 2);
+
+#ifdef CONFIG_ANDROID
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock_tx)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock_tx);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock_tx", WL_REASON_RX);
+ }
+#endif
+ bh_end_fb = ktime_get();
+ spin_unlock_bh(&hip_priv->rx_lock);
+}
+
+static void hip4_irq_handler_fb(int irq, void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 2);
+ intr_received_fb = ktime_get();
+
+#ifdef CONFIG_ANDROID
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock_tx)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock_tx, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock_tx", WL_REASON_RX);
+ }
+#endif
+
+ if (!atomic_read(&hip->hip_priv->watchdog_timer_active)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 1);
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ);
+ }
+ set_bit(HIP4_MIF_Q_FH_RFB, hip->hip_priv->irq_bitmap);
+
+ scsc_service_mifintrbit_bit_mask(sdev->service, irq);
+ if (hip4_system_wq)
+ schedule_work(&hip->hip_priv->intr_wq_fb);
+ else
+ queue_work(hip->hip_priv->hip4_workq, &hip->hip_priv->intr_wq_fb);
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, irq);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 2);
+}
+
+static void hip4_wq_ctrl(struct work_struct *data)
+{
+ struct hip4_priv *hip_priv = container_of(data, struct hip4_priv, intr_wq_ctrl);
+ struct slsi_hip4 *hip;
+ struct hip4_hip_control *ctrl;
+ struct scsc_service *service;
+ struct slsi_dev *sdev;
+ u8 retry;
+ bool no_change = true;
+ u8 idx_r;
+ u8 idx_w;
+ scsc_mifram_ref ref;
+ void *mem;
+ struct mbulk *m;
+#if defined(CONFIG_SCSC_WLAN_DEBUG) || defined(CONFIG_SCSC_WLAN_HIP4_PROFILING)
+ int id;
+#endif
+
+ if (!hip_priv || !hip_priv->hip) {
+ SLSI_ERR_NODEV("hip_priv or hip_priv->hip is Null\n");
+ return;
+ }
+
+ hip = hip_priv->hip;
+ ctrl = hip->hip_control;
+
+ if (!ctrl) {
+ SLSI_ERR_NODEV("hip->hip_control is Null\n");
+ return;
+ }
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!sdev || !sdev->service) {
+ SLSI_ERR_NODEV("sdev or sdev->service is Null\n");
+ return;
+ }
+
+ spin_lock_bh(&hip_priv->rx_lock);
+ service = sdev->service;
+ SCSC_HIP4_SAMPLER_INT_BH(hip->hip_priv->minor, 1);
+ bh_init_ctrl = ktime_get();
+ clear_bit(HIP4_MIF_Q_TH_CTRL, hip->hip_priv->irq_bitmap);
+
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_TH_CTRL, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_TH_CTRL, widx);
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ if (idx_r != idx_w) {
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_CTRL, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_CTRL, widx, idx_w, 1);
+ }
+#endif
+ while (idx_r != idx_w) {
+ struct sk_buff *skb;
+ /* TODO: currently the max number to be freed is 2. In future
+ * implementations (i.e. AMPDU) this number may be bigger
+ * list of mbulks to be freedi
+ */
+ scsc_mifram_ref to_free[MBULK_MAX_CHAIN + 1] = { 0 };
+ u8 i = 0;
+
+ no_change = false;
+ /* Catch-up with idx_w */
+ ref = ctrl->q[HIP4_MIF_Q_TH_CTRL].array[idx_r];
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_TH_CTRL);
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)(mem);
+ /* Process Control Signal */
+ skb = hip4_mbulk_to_skb(service, hip_priv, m, to_free, false);
+ if (!skb) {
+ SLSI_ERR_NODEV("Ctrl: Error parsing or allocating skb\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ goto consume_ctl_mbulk;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by MLME frame 0x%x:\n", fapi_get_sigid(skb));
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
+ }
+#else
+ if (m->flag & MBULK_F_WAKEUP)
+ SLSI_INFO(sdev, "WIFI wakeup by MLME frame 0x%x\n", fapi_get_sigid(skb));
+#endif
+
+#if defined(CONFIG_SCSC_WLAN_DEBUG) || defined(CONFIG_SCSC_WLAN_HIP4_PROFILING)
+ id = fapi_get_sigid(skb);
+#endif
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ /* log control signal, not unidata not debug */
+ if (fapi_is_mlme(skb))
+ SCSC_HIP4_SAMPLER_SIGNAL_CTRLRX(hip_priv->minor, (id & 0xff00) >> 8, id & 0xff);
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip4_history_record_add(TH, id);
+#endif
+ if (slsi_hip_rx(sdev, skb) < 0) {
+ SLSI_ERR_NODEV("Ctrl: Error detected slsi_hip_rx\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ slsi_kfree_skb(skb);
+ }
+consume_ctl_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+
+ /* Go through the list of references to free */
+ while ((ref = to_free[i++])) {
+ /* Set the number of retries */
+ retry = FB_NO_SPC_NUM_RET;
+ /* return to the firmware */
+ while (hip4_q_add_signal(hip, HIP4_MIF_Q_TH_RFB, ref, service) && retry > 0) {
+ SLSI_WARN_NODEV("Ctrl: Not enough space in FB, retry: %d/%d\n", retry, FB_NO_SPC_NUM_RET);
+ spin_unlock_bh(&hip_priv->rx_lock);
+ msleep(FB_NO_SPC_SLEEP_MS);
+ spin_lock_bh(&hip_priv->rx_lock);
+ retry--;
+ if (retry == 0)
+ SLSI_ERR_NODEV("Ctrl: FB has not been freed for %d ms\n", FB_NO_SPC_NUM_RET * FB_NO_SPC_SLEEP_MS);
+ SCSC_HIP4_SAMPLER_QFULL(hip_priv->minor, HIP4_MIF_Q_TH_RFB);
+ }
+ }
+ hip4_update_index(hip, HIP4_MIF_Q_TH_CTRL, ridx, idx_r);
+ }
+
+ if (no_change)
+ atomic_inc(&hip->hip_priv->stats.spurious_irqs);
+
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ scsc_service_mifintrbit_bit_unmask(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_CTRL]);
+ }
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip->hip_priv->minor, 1);
+#ifdef CONFIG_ANDROID
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock_ctrl)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock_ctrl);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock_ctrl", WL_REASON_RX);
+ }
+#endif
+ bh_end_ctrl = ktime_get();
+ spin_unlock_bh(&hip_priv->rx_lock);
+}
+
+static void hip4_irq_handler_ctrl(int irq, void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 1);
+ intr_received_ctrl = ktime_get();
+
+#ifdef CONFIG_ANDROID
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock_ctrl)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock_ctrl, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock_ctrl", WL_REASON_RX);
+ }
+#endif
+
+ if (!atomic_read(&hip->hip_priv->watchdog_timer_active)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 1);
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ);
+ }
+ set_bit(HIP4_MIF_Q_TH_CTRL, hip->hip_priv->irq_bitmap);
+
+ scsc_service_mifintrbit_bit_mask(sdev->service, irq);
+
+ if (hip4_system_wq)
+ schedule_work(&hip->hip_priv->intr_wq_ctrl);
+ else
+ queue_work(hip->hip_priv->hip4_workq, &hip->hip_priv->intr_wq_ctrl);
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, irq);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 1);
+}
+
+static int hip4_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct hip4_priv *hip_priv = container_of(napi, struct hip4_priv, napi);
+ struct slsi_hip4 *hip;
+ struct hip4_hip_control *ctrl;
+ struct scsc_service *service;
+ struct slsi_dev *sdev;
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ int id;
+#endif
+ u8 idx_r;
+ u8 idx_w;
+ scsc_mifram_ref ref;
+ void *mem;
+ struct mbulk *m;
+ u8 retry;
+ int work_done = 0;
+
+ if (!hip_priv || !hip_priv->hip) {
+ SLSI_ERR_NODEV("hip_priv or hip_priv->hip is Null\n");
+ return 0;
+ }
+
+ hip = hip_priv->hip;
+ ctrl = hip->hip_control;
+
+ if (!ctrl) {
+ SLSI_ERR_NODEV("hip->hip_control is Null\n");
+ return 0;
+ }
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!sdev || !sdev->service) {
+ SLSI_ERR_NODEV("sdev or sdev->service is Null\n");
+ return 0;
+ }
+
+ spin_lock_bh(&hip_priv->rx_lock);
+ SCSC_HIP4_SAMPLER_INT_BH(hip->hip_priv->minor, 0);
+ if (ktime_compare(bh_init_data, bh_end_data) <= 0) {
+ bh_init_data = ktime_get();
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ }
+ }
+ clear_bit(HIP4_MIF_Q_TH_DAT, hip->hip_priv->irq_bitmap);
+
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_TH_DAT, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_TH_DAT, widx);
+
+ service = sdev->service;
+
+ SLSI_DBG4(sdev, SLSI_RX, "todo:%d\n", (idx_w - idx_r) & 0xff);
+ if (idx_r == idx_w) {
+ SLSI_DBG4(sdev, SLSI_RX, "nothing to do, NAPI Complete\n");
+ bh_end_data = ktime_get();
+ napi_complete(napi);
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ /* Nothing more to drain, unmask interrupt */
+ scsc_service_mifintrbit_bit_unmask(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ }
+#ifdef CONFIG_ANDROID
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock_data)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock_data);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock_data", WL_REASON_RX);
+ }
+#endif
+ goto end;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ if (idx_r != idx_w) {
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, widx, idx_w, 1);
+ }
+#endif
+
+ while (idx_r != idx_w) {
+ struct sk_buff *skb;
+ /* TODO: currently the max number to be freed is 2. In future
+ * implementations (i.e. AMPDU) this number may be bigger
+ */
+ /* list of mbulks to be freed */
+ scsc_mifram_ref to_free[MBULK_MAX_CHAIN + 1] = { 0 };
+ u8 i = 0;
+
+ /* Catch-up with idx_w */
+ ref = ctrl->q[HIP4_MIF_Q_TH_DAT].array[idx_r];
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_TH_DAT);
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)(mem);
+ skb = hip4_mbulk_to_skb(service, hip_priv, m, to_free, true);
+ if (!skb) {
+ SLSI_ERR_NODEV("Dat: Error parsing or allocating skb\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ goto consume_dat_mbulk;
+ }
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by DATA frame:\n");
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
+ }
+#else
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by DATA frame:\n");
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, fapi_get_siglen(skb) + ETH_HLEN);
+ }
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ id = fapi_get_sigid(skb);
+ hip4_history_record_add(TH, id);
+#endif
+ if (slsi_hip_rx(sdev, skb) < 0) {
+ SLSI_ERR_NODEV("Dat: Error detected slsi_hip_rx\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ slsi_kfree_skb(skb);
+ }
+consume_dat_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+
+ while ((ref = to_free[i++])) {
+ /* Set the number of retries */
+ retry = FB_NO_SPC_NUM_RET;
+ while (hip4_q_add_signal(hip, HIP4_MIF_Q_TH_RFB, ref, service) && retry > 0) {
+ SLSI_WARN_NODEV("Dat: Not enough space in FB, retry: %d/%d\n", retry, FB_NO_SPC_NUM_RET);
+ udelay(FB_NO_SPC_DELAY_US);
+ retry--;
+
+ if (retry == 0)
+ SLSI_ERR_NODEV("Dat: FB has not been freed for %d us\n", FB_NO_SPC_NUM_RET * FB_NO_SPC_DELAY_US);
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_QFULL(hip_priv->minor, HIP4_MIF_Q_TH_RFB);
+#endif
+ }
+ }
+
+ work_done++;
+ if (budget == work_done) {
+ /* We have consumed all the bugdet */
+ break;
+ }
+ }
+
+ hip4_update_index(hip, HIP4_MIF_Q_TH_DAT, ridx, idx_r);
+
+ if (work_done < budget) {
+ SLSI_DBG4(sdev, SLSI_RX, "NAPI Complete\n");
+ bh_end_data = ktime_get();
+ napi_complete(napi);
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ /* Nothing more to drain, unmask interrupt */
+ scsc_service_mifintrbit_bit_unmask(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ }
+#ifdef CONFIG_ANDROID
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock_data)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock_data);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock_data", WL_REASON_RX);
+ }
+#endif
+ }
+end:
+ SLSI_DBG4(sdev, SLSI_RX, "work done:%d\n", work_done);
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip->hip_priv->minor, 0);
+ spin_unlock_bh(&hip_priv->rx_lock);
+ return work_done;
+}
+
+static void hip4_napi_schedule(void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+
+ napi_schedule(&hip->hip_priv->napi);
+}
+
+static void hip4_irq_data_tasklet(unsigned long data)
+{
+ /* TODO: NAPI - formalize the CPU selection code */
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ int err = -EINVAL;
+
+ if (cpu_online(napi_select_cpu))
+ err = smp_call_function_single(napi_select_cpu, hip4_napi_schedule, hip, 0);
+
+ if (err)
+ napi_schedule(&hip->hip_priv->napi);
+}
+
+static void hip4_irq_handler_dat(int irq, void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!hip || !sdev || !sdev->service || !hip->hip_priv)
+ return;
+
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 0);
+ intr_received_data = ktime_get();
+
+#ifdef CONFIG_ANDROID
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock_data)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock_data, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock_data", WL_REASON_RX);
+ }
+#endif
+ if (!atomic_read(&hip->hip_priv->watchdog_timer_active)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 1);
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ);
+ }
+ set_bit(HIP4_MIF_Q_TH_DAT, hip->hip_priv->irq_bitmap);
+
+ if (napi_select_cpu)
+ tasklet_schedule(&hip->hip_priv->intr_tasklet);
+ else
+ napi_schedule(&hip->hip_priv->napi);
+
+ /* Mask interrupt to avoid interrupt storm and let BH run */
+ scsc_service_mifintrbit_bit_mask(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 0);
+}
+
+#endif /* #ifdef CONFIG_SCSC_WLAN_RX_NAPI */
+
+static bool slsi_check_rx_flowcontrol(struct slsi_dev *sdev)
+{
+ struct netdev_vif *ndev_vif;
+ int qlen = 0;
+
+ ndev_vif = netdev_priv(sdev->netdev[SLSI_NET_INDEX_WLAN]);
+ if (ndev_vif)
+ qlen = skb_queue_len(&ndev_vif->rx_data.queue);
+
+ if (!mutex_trylock(&sdev->netdev_remove_mutex))
+ goto evaluate;
+
+#if defined(SLSI_NET_INDEX_P2PX_SWLAN)
+ if (sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]) {
+ ndev_vif = netdev_priv(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
+ if (ndev_vif)
+ qlen += skb_queue_len(&ndev_vif->rx_data.queue);
+ }
+#elif defined(SLSI_NET_INDEX_P2PX)
+ if (sdev->netdev[SLSI_NET_INDEX_P2PX]) {
+ ndev_vif = netdev_priv(sdev->netdev[SLSI_NET_INDEX_P2PX]);
+ if (ndev_vif)
+ qlen += skb_queue_len(&ndev_vif->rx_data.queue);
+ }
+#endif
+ mutex_unlock(&sdev->netdev_remove_mutex);
+
+evaluate:
+ if (qlen > max_buffered_frames) {
+ SLSI_DBG1_NODEV(SLSI_HIP, "max qlen reached: %d\n", qlen);
+ return true;
+ }
+ SLSI_DBG3_NODEV(SLSI_HIP, "qlen %d\n", qlen);
+
+ return false;
+}
+
+/* Worqueue: Lower priority, run in process context. Can run simultaneously on
+ * different CPUs
+ */
+static void hip4_wq(struct work_struct *data)
+{
+ struct hip4_priv *hip_priv = container_of(data, struct hip4_priv, intr_wq);
+ struct slsi_hip4 *hip = hip_priv->hip;
+ struct hip4_hip_control *ctrl = hip->hip_control;
+ scsc_mifram_ref ref;
+ void *mem;
+ struct mbulk *m;
+ u8 idx_r;
+ u8 idx_w;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct scsc_service *service;
+ bool no_change = true;
+ u8 retry;
+ u32 packets_total;
+ bool rx_flowcontrol = false;
+
+#if defined(CONFIG_SCSC_WLAN_HIP4_PROFILING) || defined(CONFIG_SCSC_WLAN_DEBUG)
+ int id;
+#endif
+
+ if (!sdev || !sdev->service) {
+ WARN_ON(1);
+ return;
+ }
+
+ service = sdev->service;
+
+ atomic_set(&hip->hip_priv->in_rx, 1);
+ if (slsi_check_rx_flowcontrol(sdev))
+ rx_flowcontrol = true;
+
+ atomic_set(&hip->hip_priv->in_rx, 2);
+ spin_lock_bh(&hip_priv->rx_lock);
+ atomic_set(&hip->hip_priv->in_rx, 3);
+
+ bh_init = ktime_get();
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ /* Compute jitter */
+ {
+ u64 jitter;
+
+ jitter = ktime_to_ns(ktime_sub(bh_init, intr_received));
+
+ if (jitter <= HISTO_1)
+ histogram_1++;
+ else if (jitter > HISTO_1 && jitter <= HISTO_2)
+ histogram_2++;
+ else if (jitter > HISTO_2 && jitter <= HISTO_3)
+ histogram_3++;
+ else if (jitter > HISTO_3 && jitter <= HISTO_4)
+ histogram_4++;
+ else if (jitter > HISTO_4 && jitter <= HISTO_5)
+ histogram_5++;
+ else
+ histogram_6++;
+
+ if (jitter > max_jitter)
+ max_jitter = jitter;
+ }
+#endif
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_FH_RFB, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_FH_RFB, widx);
+
+ if (idx_r != idx_w) {
+ no_change = false;
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_FH_RFB, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_FH_RFB, widx, idx_w, 1);
+ }
+
+ SCSC_HIP4_SAMPLER_INT_BH(hip_priv->minor, 2);
+ while (idx_r != idx_w) {
+ struct mbulk *m;
+ u16 colour;
+
+ ref = ctrl->q[HIP4_MIF_Q_FH_RFB].array[idx_r];
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_FH_RFB);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_FH_RFB] = hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_FH_RFB] + 1;
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)mem;
+ if (!m) {
+ SLSI_ERR_NODEV("FB: Mbulk is NULL 0x%x\n", ref);
+ goto consume_fb_mbulk;
+ }
+ /* colour is defined as: */
+ /* u16 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ * [10:8] - ac queue
+ */
+ colour = ((m->clas & 0xc0) << 2) | (m->pid & 0xfe);
+ /* Account ONLY for data RFB */
+ if ((m->pid & 0x1) == MBULK_POOL_ID_DATA) {
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ SCSC_HIP4_SAMPLER_VIF_PEER(hip->hip_priv->minor, 0, (colour & 0x6) >> 1, (colour & 0xf8) >> 3);
+ /* to profile round-trip */
+ {
+ u16 host_tag;
+ u8 *get_host_tag;
+ /* This is a nasty way of getting the host_tag without involving mbulk processing
+ * This hostag value should also be include in the cb descriptor which goes to
+ * mbulk descriptor (no room left at the moment)
+ */
+ get_host_tag = (u8 *)m;
+ host_tag = get_host_tag[37] << 8 | get_host_tag[36];
+ SCSC_HIP4_SAMPLER_PKT_TX_FB(hip->hip_priv->minor, host_tag);
+ }
+#endif
+ /* Ignore return value */
+ slsi_hip_tx_done(sdev, colour);
+ }
+ mbulk_free_virt_host(m);
+consume_fb_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+ hip4_update_index(hip, HIP4_MIF_Q_FH_RFB, ridx, idx_r);
+ }
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip_priv->minor, 2);
+
+ atomic_set(&hip->hip_priv->in_rx, 4);
+
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_TH_CTRL, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_TH_CTRL, widx);
+
+ if (idx_r != idx_w) {
+ no_change = false;
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_CTRL, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_CTRL, widx, idx_w, 1);
+ }
+
+ SCSC_HIP4_SAMPLER_INT_BH(hip_priv->minor, 1);
+ while (idx_r != idx_w) {
+ struct sk_buff *skb;
+ /* TODO: currently the max number to be freed is 2. In future
+ * implementations (i.e. AMPDU) this number may be bigger
+ * list of mbulks to be freedi
+ */
+ scsc_mifram_ref to_free[MBULK_MAX_CHAIN + 1] = { 0 };
+ u8 i = 0;
+
+ /* Catch-up with idx_w */
+ ref = ctrl->q[HIP4_MIF_Q_TH_CTRL].array[idx_r];
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_TH_CTRL);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_TH_CTRL] = hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_TH_CTRL] + 1;
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)(mem);
+ if (!m) {
+ SLSI_ERR_NODEV("Ctrl: Mbulk is NULL 0x%x\n", ref);
+ goto consume_ctl_mbulk;
+ }
+ /* Process Control Signal */
+ skb = hip4_mbulk_to_skb(service, hip_priv, m, to_free, false);
+ if (!skb) {
+ SLSI_ERR_NODEV("Ctrl: Error parsing or allocating skb\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ goto consume_ctl_mbulk;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by MLME frame 0x%x:\n", fapi_get_sigid(skb));
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
+ }
+#else
+ if (m->flag & MBULK_F_WAKEUP)
+ SLSI_INFO(sdev, "WIFI wakeup by MLME frame 0x%x\n", fapi_get_sigid(skb));
+#endif
+
+#if defined(CONFIG_SCSC_WLAN_HIP4_PROFILING) || defined(CONFIG_SCSC_WLAN_DEBUG)
+ id = fapi_get_sigid(skb);
+#endif
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ /* log control signal, not unidata not debug */
+ if (fapi_is_mlme(skb))
+ SCSC_HIP4_SAMPLER_SIGNAL_CTRLRX(hip_priv->minor, (id & 0xff00) >> 8, id & 0xff);
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip4_history_record_add(TH, id);
+#endif
+ if (slsi_hip_rx(sdev, skb) < 0) {
+ SLSI_ERR_NODEV("Ctrl: Error detected slsi_hip_rx\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ slsi_kfree_skb(skb);
+ }
+consume_ctl_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+
+ /* Go through the list of references to free */
+ while ((ref = to_free[i++])) {
+ /* Set the number of retries */
+ retry = FB_NO_SPC_NUM_RET;
+ /* return to the firmware */
+ while (hip4_q_add_signal(hip, HIP4_MIF_Q_TH_RFB, ref, service) && retry > 0) {
+ SLSI_WARN_NODEV("Ctrl: Not enough space in FB, retry: %d/%d\n", retry, FB_NO_SPC_NUM_RET);
+ spin_unlock_bh(&hip_priv->rx_lock);
+ msleep(FB_NO_SPC_SLEEP_MS);
+ spin_lock_bh(&hip_priv->rx_lock);
+ retry--;
+ if (retry == 0)
+ SLSI_ERR_NODEV("Ctrl: FB has not been freed for %d ms\n", FB_NO_SPC_NUM_RET * FB_NO_SPC_SLEEP_MS);
+ SCSC_HIP4_SAMPLER_QFULL(hip_priv->minor, HIP4_MIF_Q_TH_RFB);
+ }
+ }
+ hip4_update_index(hip, HIP4_MIF_Q_TH_CTRL, ridx, idx_r);
+ }
+
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip_priv->minor, 1);
+
+ if (rx_flowcontrol)
+ goto skip_data_q;
+
+ atomic_set(&hip->hip_priv->in_rx, 5);
+
+ idx_r = hip4_read_index(hip, HIP4_MIF_Q_TH_DAT, ridx);
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_TH_DAT, widx);
+
+ if (idx_r != idx_w) {
+ packets_total = 0;
+ no_change = false;
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, widx, idx_w, 1);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ /* Compute DAT histogram */
+ {
+ u8 num_packets = (idx_w - idx_r) % 256;
+
+ if (num_packets <= HISTO_1_DATA)
+ histogram_1_data++;
+ else if (num_packets > HISTO_1_DATA && num_packets <= HISTO_2_DATA)
+ histogram_2_data++;
+ else if (num_packets > HISTO_2_DATA && num_packets <= HISTO_3_DATA)
+ histogram_3_data++;
+ else if (num_packets > HISTO_3_DATA && num_packets <= HISTO_4_DATA)
+ histogram_4_data++;
+ else if (num_packets > HISTO_4_DATA && num_packets <= HISTO_5_DATA)
+ histogram_5_data++;
+ else
+ histogram_6_data++;
+ if (num_packets > max_data)
+ max_data = num_packets;
+ }
+#endif
+ }
+
+ SCSC_HIP4_SAMPLER_INT_BH(hip_priv->minor, 0);
+ while (idx_r != idx_w) {
+ struct sk_buff *skb;
+ /* TODO: currently the max number to be freed is 2. In future
+ * implementations (i.e. AMPDU) this number may be bigger
+ */
+ /* list of mbulks to be freed */
+ scsc_mifram_ref to_free[MBULK_MAX_CHAIN + 1] = { 0 };
+ u8 i = 0;
+
+ packets_total++;
+ /* Catch-up with idx_w */
+ ref = ctrl->q[HIP4_MIF_Q_TH_DAT].array[idx_r];
+ SCSC_HIP4_SAMPLER_QREF(hip_priv->minor, ref, HIP4_MIF_Q_TH_DAT);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_TH_DAT] = hip->hip_priv->stats.q_num_frames[HIP4_MIF_Q_TH_DAT] + 1;
+#endif
+ mem = scsc_mx_service_mif_addr_to_ptr(service, ref);
+ m = (struct mbulk *)(mem);
+ if (!m) {
+ SLSI_ERR_NODEV("Dat: Mbulk is NULL 0x%x\n", ref);
+ goto consume_dat_mbulk;
+ }
+
+ skb = hip4_mbulk_to_skb(service, hip_priv, m, to_free, false);
+ if (!skb) {
+ SLSI_ERR_NODEV("Dat: Error parsing or allocating skb\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ goto consume_dat_mbulk;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by DATA frame:\n");
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
+ }
+#else
+ if (m->flag & MBULK_F_WAKEUP) {
+ SLSI_INFO(sdev, "WIFI wakeup by DATA frame:\n");
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, fapi_get_siglen(skb) + ETH_HLEN);
+ }
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ id = fapi_get_sigid(skb);
+ hip4_history_record_add(TH, id);
+#endif
+ if (slsi_hip_rx(sdev, skb) < 0) {
+ SLSI_ERR_NODEV("Dat: Error detected slsi_hip_rx\n");
+ hip4_dump_dbg(hip, m, skb, service);
+ slsi_kfree_skb(skb);
+ }
+consume_dat_mbulk:
+ /* Increase index */
+ idx_r++;
+ idx_r &= (MAX_NUM - 1);
+
+ /* Go through the list of references to free */
+ while ((ref = to_free[i++])) {
+ /* Set the number of retries */
+ retry = FB_NO_SPC_NUM_RET;
+ /* return to the firmware */
+ while (hip4_q_add_signal(hip, HIP4_MIF_Q_TH_RFB, ref, service) && retry > 0) {
+ SLSI_WARN_NODEV("Dat: Not enough space in FB, retry: %d/%d\n", retry, FB_NO_SPC_NUM_RET);
+ spin_unlock_bh(&hip_priv->rx_lock);
+ msleep(FB_NO_SPC_SLEEP_MS);
+ spin_lock_bh(&hip_priv->rx_lock);
+ retry--;
+ if (retry == 0)
+ SLSI_ERR_NODEV("Dat: FB has not been freed for %d ms\n", FB_NO_SPC_NUM_RET * FB_NO_SPC_SLEEP_MS);
+ SCSC_HIP4_SAMPLER_QFULL(hip_priv->minor, HIP4_MIF_Q_TH_RFB);
+ }
+ }
+
+ hip4_update_index(hip, HIP4_MIF_Q_TH_DAT, ridx, idx_r);
+
+ /* read again the write index */
+ if ((idx_r == idx_w) && (packets_total < HIP4_POLLING_MAX_PACKETS)) {
+ u8 old_idx = idx_w;
+
+ idx_w = hip4_read_index(hip, HIP4_MIF_Q_TH_DAT, widx);
+ if (idx_w != old_idx) {
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, ridx, idx_r, 1);
+ SCSC_HIP4_SAMPLER_Q(hip_priv->minor, HIP4_MIF_Q_TH_DAT, widx, idx_w, 1);
+ }
+ }
+ }
+ SCSC_HIP4_SAMPLER_INT_OUT_BH(hip_priv->minor, 0);
+
+ if (no_change)
+ atomic_inc(&hip->hip_priv->stats.spurious_irqs);
+
+skip_data_q:
+ if (!atomic_read(&hip->hip_priv->closing)) {
+ /* Reset status variable. DO THIS BEFORE UNMASKING!!!*/
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+
+#ifdef CONFIG_ANDROID
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock", WL_REASON_RX);
+ }
+#endif
+ bh_end = ktime_get();
+ atomic_set(&hip->hip_priv->in_rx, 0);
+ spin_unlock_bh(&hip_priv->rx_lock);
+}
+
+/* IRQ handler for hip4. The function runs in Interrupt context, so all the
+ * asssumptions related to interrupt should be applied (sleep, fast,...)
+ */
+static void hip4_irq_handler(int irq, void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ (void)irq; /* unused */
+
+ if (!hip || !sdev || !sdev->service || !hip->hip_priv)
+ return;
+
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 0);
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 1);
+ SCSC_HIP4_SAMPLER_INT(hip->hip_priv->minor, 2);
+ if (!atomic_read(&hip->hip_priv->rx_ready))
+ goto end;
+
+ intr_received = ktime_get();
+
+#ifdef CONFIG_ANDROID
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock", WL_REASON_RX);
+ }
+#endif
+ /* if wd timer is active system might be in trouble as it should be
+ * cleared in the BH. Ignore updating the timer
+ */
+ if (!atomic_read(&hip->hip_priv->watchdog_timer_active)) {
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 1);
+ mod_timer(&hip->hip_priv->watchdog, jiffies + HZ);
+ } else {
+ SLSI_ERR_NODEV("INT triggered while WDT is active\n");
+ SLSI_ERR_NODEV("bh_init %lld\n", ktime_to_ns(bh_init));
+ SLSI_ERR_NODEV("bh_end %lld\n", ktime_to_ns(bh_end));
+ SLSI_ERR_NODEV("hip4_wq work_busy %d\n", work_busy(&hip->hip_priv->intr_wq));
+ SLSI_ERR_NODEV("hip4_priv->in_rx %d\n", atomic_read(&hip->hip_priv->in_rx));
+ }
+ /* If system is not in suspend, mask interrupt to avoid interrupt storm and let BH run */
+ if (!atomic_read(&hip->hip_priv->in_suspend)) {
+ scsc_service_mifintrbit_bit_mask(sdev->service, hip->hip_priv->intr_tohost);
+ hip->hip_priv->storm_count = 0;
+ } else if (++hip->hip_priv->storm_count >= MAX_STORM) {
+ /* A MAX_STORM number of interrupts has been received
+ * when platform was in suspend. This indicates FW interrupt activity
+ * that should resume the hip4, so it is safe to mask to avoid
+ * interrupt storm.
+ */
+ hip->hip_priv->storm_count = 0;
+ scsc_service_mifintrbit_bit_mask(sdev->service, hip->hip_priv->intr_tohost);
+ }
+
+ atomic_inc(&hip->hip_priv->stats.irqs);
+
+ if (hip4_system_wq)
+ schedule_work(&hip->hip_priv->intr_wq);
+ else
+ queue_work(hip->hip_priv->hip4_workq, &hip->hip_priv->intr_wq);
+end:
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, hip->hip_priv->intr_tohost);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 0);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 1);
+ SCSC_HIP4_SAMPLER_INT_OUT(hip->hip_priv->minor, 2);
+}
+
+#ifdef CONFIG_SCSC_QOS
+static void hip4_pm_qos_work(struct work_struct *data)
+{
+ struct hip4_priv *hip_priv = container_of(data, struct hip4_priv, pm_qos_work);
+ struct slsi_hip4 *hip = hip_priv->hip;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ u8 state;
+
+ if (!sdev || !sdev->service) {
+ WARN_ON(1);
+ return;
+ }
+
+ SLSI_DBG1(sdev, SLSI_HIP, "update to state %d\n", hip_priv->pm_qos_state);
+ spin_lock_bh(&hip_priv->pm_qos_lock);
+ state = hip_priv->pm_qos_state;
+ spin_unlock_bh(&hip_priv->pm_qos_lock);
+ scsc_service_pm_qos_update_request(sdev->service, state);
+}
+
+static void hip4_traffic_monitor_cb(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)client_ctx;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!sdev)
+ return;
+
+ spin_lock_bh(&hip->hip_priv->pm_qos_lock);
+ SLSI_DBG1(sdev, SLSI_HIP, "event (state:%u, tput_tx:%u bps, tput_rx:%u bps)\n", state, tput_tx, tput_rx);
+ if (state == TRAFFIC_MON_CLIENT_STATE_HIGH)
+ hip->hip_priv->pm_qos_state = SCSC_QOS_MAX;
+ else if (state == TRAFFIC_MON_CLIENT_STATE_MID)
+ hip->hip_priv->pm_qos_state = SCSC_QOS_MED;
+ else
+ hip->hip_priv->pm_qos_state = SCSC_QOS_DISABLED;
+
+ spin_unlock_bh(&hip->hip_priv->pm_qos_lock);
+
+ schedule_work(&hip->hip_priv->pm_qos_work);
+}
+#endif
+
+#ifdef CONFIG_SCSC_LOGRING
+static void hip4_traffic_monitor_logring_cb(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx)
+{
+ struct hip4_priv *hip_priv = (struct hip4_priv *)client_ctx;
+ struct slsi_hip4 *hip = hip_priv->hip;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ if (!sdev)
+ return;
+
+ SLSI_DBG1(sdev, SLSI_HIP, "event (state:%u, tput_tx:%u bps, tput_rx:%u bps)\n", state, tput_tx, tput_rx);
+ if (state == TRAFFIC_MON_CLIENT_STATE_HIGH || state == TRAFFIC_MON_CLIENT_STATE_MID) {
+ if (hip4_dynamic_logging)
+ scsc_logring_enable(false);
+ } else {
+ scsc_logring_enable(true);
+ }
+}
+#endif
+
+int hip4_init(struct slsi_hip4 *hip)
+{
+ void *hip_ptr;
+ struct hip4_hip_control *hip_control;
+ struct scsc_service *service;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ scsc_mifram_ref ref, ref_scoreboard;
+ int i;
+ int ret;
+ u32 total_mib_len;
+ u32 mib_file_offset;
+
+ if (!sdev || !sdev->service)
+ return -EINVAL;
+
+ hip->hip_priv = kzalloc(sizeof(*hip->hip_priv), GFP_ATOMIC);
+ if (!hip->hip_priv)
+ return -ENOMEM;
+
+ SLSI_INFO_NODEV("HIP4_WLAN_CONFIG_SIZE (%d)\n", HIP4_WLAN_CONFIG_SIZE);
+ SLSI_INFO_NODEV("HIP4_WLAN_MIB_SIZE (%d)\n", HIP4_WLAN_MIB_SIZE);
+ SLSI_INFO_NODEV("HIP4_WLAN_TX_DAT_SIZE (%d)\n", HIP4_WLAN_TX_DAT_SIZE);
+ SLSI_INFO_NODEV("HIP4_WLAN_TX_CTL_SIZE (%d)\n", HIP4_WLAN_TX_CTL_SIZE);
+ SLSI_INFO_NODEV("HIP4_WLAN_RX_SIZE (%d)\n", HIP4_WLAN_RX_SIZE);
+ SLSI_INFO_NODEV("HIP4_WLAN_TOTAL_MEM (%d)\n", HIP4_WLAN_TOTAL_MEM);
+ SLSI_INFO_NODEV("HIP4_DAT_SLOTS (%d)\n", HIP4_DAT_SLOTS);
+ SLSI_INFO_NODEV("HIP4_CTL_SLOTS (%d)\n", HIP4_CTL_SLOTS);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ memset(&hip->hip_priv->stats, 0, sizeof(hip->hip_priv->stats));
+ hip->hip_priv->stats.start = ktime_get();
+ hip->hip_priv->stats.procfs_dir = proc_mkdir("driver/hip4", NULL);
+ if (hip->hip_priv->stats.procfs_dir) {
+ proc_create_data("info", S_IRUSR | S_IRGRP,
+ hip->hip_priv->stats.procfs_dir, &hip4_procfs_stats_fops, hip);
+ proc_create_data("history", S_IRUSR | S_IRGRP,
+ hip->hip_priv->stats.procfs_dir, &hip4_procfs_history_fops, hip);
+ proc_create_data("jitter", S_IRUSR | S_IRGRP,
+ hip->hip_priv->stats.procfs_dir, &hip4_procfs_jitter_fops, hip);
+ }
+
+ hip->hip_priv->minor = hip4_sampler_register_hip(sdev->maxwell_core);
+ if (hip->hip_priv->minor < SCSC_HIP4_INTERFACES) {
+ SLSI_DBG1_NODEV(SLSI_HIP, "registered with minor %d\n", hip->hip_priv->minor);
+ sdev->minor_prof = hip->hip_priv->minor;
+ } else {
+ SLSI_DBG1_NODEV(SLSI_HIP, "hip4_sampler is not enabled\n");
+ }
+#endif
+
+ /* Used in the workqueue */
+ hip->hip_priv->hip = hip;
+
+ service = sdev->service;
+
+ hip->hip_priv->host_pool_id_dat = MBULK_POOL_ID_DATA;
+ hip->hip_priv->host_pool_id_ctl = MBULK_POOL_ID_CTRL;
+
+ /* hip_ref contains the reference of the start of shared memory allocated for WLAN */
+ /* hip_ptr is the kernel address of hip_ref*/
+ hip_ptr = scsc_mx_service_mif_addr_to_ptr(service, hip->hip_ref);
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ /* Configure mbulk allocator - Data QUEUES */
+ ret = mbulk_pool_add(MBULK_POOL_ID_DATA, hip_ptr + HIP4_WLAN_TX_DAT_OFFSET,
+ hip_ptr + HIP4_WLAN_TX_DAT_OFFSET + HIP4_WLAN_TX_DAT_SIZE,
+ (HIP4_WLAN_TX_DAT_SIZE / HIP4_DAT_SLOTS) - sizeof(struct mbulk), 5,
+ hip->hip_priv->minor);
+ if (ret)
+ return ret;
+
+ /* Configure mbulk allocator - Control QUEUES */
+ ret = mbulk_pool_add(MBULK_POOL_ID_CTRL, hip_ptr + HIP4_WLAN_TX_CTL_OFFSET,
+ hip_ptr + HIP4_WLAN_TX_CTL_OFFSET + HIP4_WLAN_TX_CTL_SIZE,
+ (HIP4_WLAN_TX_CTL_SIZE / HIP4_CTL_SLOTS) - sizeof(struct mbulk), 0,
+ hip->hip_priv->minor);
+ if (ret)
+ return ret;
+#else
+ /* Configure mbulk allocator - Data QUEUES */
+ ret = mbulk_pool_add(MBULK_POOL_ID_DATA, hip_ptr + HIP4_WLAN_TX_DAT_OFFSET,
+ hip_ptr + HIP4_WLAN_TX_DAT_OFFSET + HIP4_WLAN_TX_DAT_SIZE,
+ (HIP4_WLAN_TX_DAT_SIZE / HIP4_DAT_SLOTS) - sizeof(struct mbulk), 5);
+ if (ret)
+ return ret;
+
+ /* Configure mbulk allocator - Control QUEUES */
+ ret = mbulk_pool_add(MBULK_POOL_ID_CTRL, hip_ptr + HIP4_WLAN_TX_CTL_OFFSET,
+ hip_ptr + HIP4_WLAN_TX_CTL_OFFSET + HIP4_WLAN_TX_CTL_SIZE,
+ (HIP4_WLAN_TX_CTL_SIZE / HIP4_CTL_SLOTS) - sizeof(struct mbulk), 0);
+ if (ret)
+ return ret;
+#endif
+
+ /* Reset hip_control table */
+ memset(hip_ptr, 0, sizeof(struct hip4_hip_control));
+
+ /* Reset Sample q values sending 0xff */
+ SCSC_HIP4_SAMPLER_RESET(hip->hip_priv->minor);
+
+ /* Set driver is not ready to receive interrupts */
+ atomic_set(&hip->hip_priv->rx_ready, 0);
+
+ /***** VERSION 4 *******/
+ /* TOHOST Handler allocator */
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ /* Q0 FH CTRL */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_CTRL] = MIF_NO_IRQ;
+ /* Q1 FH DATA */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_DAT] = MIF_NO_IRQ;
+ /* Q5 TH RFB */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_RFB] = MIF_NO_IRQ;
+ /* Q2 FH FB */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_RFB] =
+ scsc_service_mifintrbit_register_tohost(service, hip4_irq_handler_fb, hip);
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_RFB]);
+ /* Q3 TH CTRL */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_CTRL] =
+ scsc_service_mifintrbit_register_tohost(service, hip4_irq_handler_ctrl, hip);
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_CTRL]);
+ /* Q4 TH DAT */
+ hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT] =
+ scsc_service_mifintrbit_register_tohost(service, hip4_irq_handler_dat, hip);
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+#endif
+ /* TOHOST Handler allocator */
+ hip->hip_priv->intr_tohost =
+ scsc_service_mifintrbit_register_tohost(service, hip4_irq_handler, hip);
+
+ /* Mask the interrupt to prevent intr been kicked during start */
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost);
+
+ /* FROMHOST Handler allocator */
+ hip->hip_priv->intr_fromhost =
+ scsc_service_mifintrbit_alloc_fromhost(service, SCSC_MIFINTR_TARGET_R4);
+
+ /* Get hip_control pointer on shared memory */
+ hip_control = (struct hip4_hip_control *)(hip_ptr +
+ HIP4_WLAN_CONFIG_OFFSET);
+
+ /* Initialize scoreboard */
+ if (scsc_mx_service_mif_ptr_to_addr(service, &hip_control->scoreboard, &ref_scoreboard))
+ return -EFAULT;
+
+ /* Calculate total space used by wlan*.hcf files */
+ for (i = 0, total_mib_len = 0; i < SLSI_WLAN_MAX_MIB_FILE; i++)
+ total_mib_len += sdev->mib[i].mib_len;
+
+ /* Copy MIB content in shared memory if any */
+ /* Clear the area to avoid picking up old values */
+ memset(hip_ptr + HIP4_WLAN_MIB_OFFSET, 0, HIP4_WLAN_MIB_SIZE);
+
+ if (total_mib_len > HIP4_WLAN_MIB_SIZE) {
+ SLSI_ERR_NODEV("MIB size (%d), is bigger than the MIB AREA (%d). Aborting memcpy\n", total_mib_len, HIP4_WLAN_MIB_SIZE);
+ hip_control->config_v4.mib_loc = 0;
+ hip_control->config_v4.mib_sz = 0;
+ hip_control->config_v5.mib_loc = 0;
+ hip_control->config_v5.mib_sz = 0;
+ total_mib_len = 0;
+ } else if (total_mib_len) {
+ SLSI_INFO_NODEV("Loading MIB into shared memory, size (%d)\n", total_mib_len);
+ /* Load each MIB file into shared DRAM region */
+ for (i = 0, mib_file_offset = 0;
+ i < SLSI_WLAN_MAX_MIB_FILE;
+ i++) {
+ SLSI_INFO_NODEV("Loading MIB %d into shared memory, offset (%d), size (%d), total (%d)\n", i, mib_file_offset, sdev->mib[i].mib_len, total_mib_len);
+ if (sdev->mib[i].mib_len) {
+ memcpy((u8 *)hip_ptr + HIP4_WLAN_MIB_OFFSET + mib_file_offset, sdev->mib[i].mib_data, sdev->mib[i].mib_len);
+ mib_file_offset += sdev->mib[i].mib_len;
+ }
+ }
+ hip_control->config_v4.mib_loc = hip->hip_ref + HIP4_WLAN_MIB_OFFSET;
+ hip_control->config_v4.mib_sz = total_mib_len;
+ hip_control->config_v5.mib_loc = hip->hip_ref + HIP4_WLAN_MIB_OFFSET;
+ hip_control->config_v5.mib_sz = total_mib_len;
+ } else {
+ hip_control->config_v4.mib_loc = 0;
+ hip_control->config_v4.mib_sz = 0;
+ hip_control->config_v5.mib_loc = 0;
+ hip_control->config_v5.mib_sz = 0;
+ }
+
+ /* Initialize hip_control table for version 4 */
+ /***** VERSION 4 *******/
+ hip_control->config_v4.magic_number = 0xcaba0401;
+ hip_control->config_v4.hip_config_ver = 4;
+ hip_control->config_v4.config_len = sizeof(struct hip4_hip_config_version_4);
+ hip_control->config_v4.host_cache_line = 64;
+ hip_control->config_v4.host_buf_loc = hip->hip_ref + HIP4_WLAN_TX_OFFSET;
+ hip_control->config_v4.host_buf_sz = HIP4_WLAN_TX_SIZE;
+ hip_control->config_v4.fw_buf_loc = hip->hip_ref + HIP4_WLAN_RX_OFFSET;
+ hip_control->config_v4.fw_buf_sz = HIP4_WLAN_RX_SIZE;
+ hip_control->config_v4.log_config_loc = 0;
+
+ hip_control->config_v4.mif_fh_int_n = hip->hip_priv->intr_fromhost;
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++) {
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ hip_control->config_v4.mif_th_int_n[i] = hip->hip_priv->intr_tohost_mul[i];
+#else
+ hip_control->config_v4.mif_th_int_n[i] = hip->hip_priv->intr_tohost;
+#endif
+ }
+
+ hip_control->config_v4.scbrd_loc = (u32)ref_scoreboard;
+ hip_control->config_v4.q_num = 6;
+ hip_control->config_v4.q_len = 256;
+ hip_control->config_v4.q_idx_sz = 1;
+ /* Initialize q relative positions */
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++) {
+ if (scsc_mx_service_mif_ptr_to_addr(service, &hip_control->q[i].array, &ref))
+ return -EFAULT;
+ hip_control->config_v4.q_loc[i] = (u32)ref;
+ }
+ /***** END VERSION 4 *******/
+
+ /* Initialize hip_control table for version 5 */
+ /***** VERSION 5 *******/
+ hip_control->config_v5.magic_number = 0xcaba0401;
+ hip_control->config_v5.hip_config_ver = 5;
+ hip_control->config_v5.config_len = sizeof(struct hip4_hip_config_version_5);
+ hip_control->config_v5.host_cache_line = 64;
+ hip_control->config_v5.host_buf_loc = hip->hip_ref + HIP4_WLAN_TX_OFFSET;
+ hip_control->config_v5.host_buf_sz = HIP4_WLAN_TX_SIZE;
+ hip_control->config_v5.fw_buf_loc = hip->hip_ref + HIP4_WLAN_RX_OFFSET;
+ hip_control->config_v5.fw_buf_sz = HIP4_WLAN_RX_SIZE;
+ hip_control->config_v5.log_config_loc = 0;
+ hip_control->config_v5.mif_fh_int_n = hip->hip_priv->intr_fromhost;
+ hip_control->config_v5.mif_th_int_n = hip->hip_priv->intr_tohost;
+ hip_control->config_v5.q_num = 6;
+ hip_control->config_v5.q_len = 256;
+ hip_control->config_v5.q_idx_sz = 1;
+ hip_control->config_v5.scbrd_loc = (u32)ref_scoreboard; /* scoreborad location */
+
+ /* Initialize q relative positions */
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++) {
+ if (scsc_mx_service_mif_ptr_to_addr(service, &hip_control->q[i].array, &ref))
+ return -EFAULT;
+ hip_control->config_v5.q_loc[i] = (u32)ref;
+ }
+ /***** END VERSION 5 *******/
+
+ /* Initialzie hip_init configuration */
+ hip_control->init.magic_number = 0xcaaa0400;
+ if (scsc_mx_service_mif_ptr_to_addr(service, &hip_control->config_v4, &ref))
+ return -EFAULT;
+ hip_control->init.version_a_ref = ref;
+
+ if (scsc_mx_service_mif_ptr_to_addr(service, &hip_control->config_v5, &ref))
+ return -EFAULT;
+ hip_control->init.version_b_ref = ref;
+ /* End hip_init configuration */
+
+ hip->hip_control = hip_control;
+ hip->hip_priv->scbrd_base = &hip_control->scoreboard;
+
+ spin_lock_init(&hip->hip_priv->rx_lock);
+ atomic_set(&hip->hip_priv->in_rx, 0);
+ spin_lock_init(&hip->hip_priv->tx_lock);
+ atomic_set(&hip->hip_priv->in_tx, 0);
+
+ /* Init work structs */
+ hip->hip_priv->hip4_workq = create_singlethread_workqueue("hip4_work");
+ if (!hip->hip_priv->hip4_workq) {
+ SLSI_ERR_NODEV("Error creating singlethread_workqueue\n");
+ return -ENOMEM;
+ }
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ tasklet_init(&hip->hip_priv->intr_tasklet, hip4_irq_data_tasklet, (unsigned long)hip);
+ INIT_WORK(&hip->hip_priv->intr_wq_ctrl, hip4_wq_ctrl);
+ INIT_WORK(&hip->hip_priv->intr_wq_fb, hip4_wq_fb);
+#endif
+ INIT_WORK(&hip->hip_priv->intr_wq, hip4_wq);
+
+ rwlock_init(&hip->hip_priv->rw_scoreboard);
+
+ /* Setup watchdog timer */
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ spin_lock_init(&hip->hip_priv->watchdog_lock);
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ timer_setup(&hip->hip_priv->watchdog, hip4_watchdog, 0);
+#else
+ setup_timer(&hip->hip_priv->watchdog, hip4_watchdog, (unsigned long)hip);
+#endif
+
+ atomic_set(&hip->hip_priv->gmod, HIP4_DAT_SLOTS);
+ atomic_set(&hip->hip_priv->gactive, 1);
+ spin_lock_init(&hip->hip_priv->gbot_lock);
+ hip->hip_priv->saturated = 0;
+
+#ifdef CONFIG_ANDROID
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ wake_lock_init(&hip->hip_priv->hip4_wake_lock_tx, WAKE_LOCK_SUSPEND, "hip4_wake_lock_tx");
+ wake_lock_init(&hip->hip_priv->hip4_wake_lock_ctrl, WAKE_LOCK_SUSPEND, "hip4_wake_lock_ctrl");
+ wake_lock_init(&hip->hip_priv->hip4_wake_lock_data, WAKE_LOCK_SUSPEND, "hip4_wake_lock_data");
+#endif
+ wake_lock_init(&hip->hip_priv->hip4_wake_lock, WAKE_LOCK_SUSPEND, "hip4_wake_lock");
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Init SMAPPER */
+ if (hip4_smapper_enable) {
+ if (hip4_smapper_init(sdev, hip)) {
+ SLSI_ERR_NODEV("Error on hip4_smapper init\n");
+ hip4_smapper_is_enabled = false;
+ } else {
+ hip4_smapper_is_enabled = true;
+ }
+ }
+#endif
+#ifdef CONFIG_SCSC_QOS
+ /* setup for PM QoS */
+ spin_lock_init(&hip->hip_priv->pm_qos_lock);
+
+ if (hip4_qos_enable) {
+ if (!scsc_service_pm_qos_add_request(service, SCSC_QOS_DISABLED)) {
+ /* register to traffic monitor for throughput events */
+ if (slsi_traffic_mon_client_register(sdev, hip, TRAFFIC_MON_CLIENT_MODE_EVENTS, (hip4_qos_med_tput_in_mbps * 1000 * 1000), (hip4_qos_max_tput_in_mbps * 1000 * 1000), hip4_traffic_monitor_cb))
+ SLSI_WARN(sdev, "failed to add PM QoS client to traffic monitor\n");
+ else
+ INIT_WORK(&hip->hip_priv->pm_qos_work, hip4_pm_qos_work);
+ } else {
+ SLSI_WARN(sdev, "failed to add PM QoS request\n");
+ }
+ }
+#endif
+#ifdef CONFIG_SCSC_LOGRING
+ /* register to traffic monitor for dynamic logring logging */
+ if (slsi_traffic_mon_client_register(sdev, hip->hip_priv, TRAFFIC_MON_CLIENT_MODE_EVENTS, 0, (hip4_dynamic_logging_tput_in_mbps * 1000 * 1000), hip4_traffic_monitor_logring_cb))
+ SLSI_WARN(sdev, "failed to add Logring client to traffic monitor\n");
+#endif
+ return 0;
+}
+
+/**
+ * This function returns the number of free slots available to
+ * transmit control packet.
+ */
+int hip4_free_ctrl_slots_count(struct slsi_hip4 *hip)
+{
+ return mbulk_pool_get_free_count(MBULK_POOL_ID_CTRL);
+}
+
+/**
+ * This function is in charge to transmit a frame through the HIP.
+ * It does NOT take ownership of the SKB unless it successfully transmit it;
+ * as a consequence skb is NOT freed on error.
+ * We return ENOSPC on queue related troubles in order to trigger upper
+ * layers of kernel to requeue/retry.
+ * We free ONLY locally-allocated stuff.
+ */
+int scsc_wifi_transmit_frame(struct slsi_hip4 *hip, bool ctrl_packet, struct sk_buff *skb)
+{
+ struct scsc_service *service;
+ scsc_mifram_ref offset;
+ struct mbulk *m;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct fapi_signal_header *fapi_header;
+ int ret = 0;
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb);
+#endif
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+
+ if (!hip || !sdev || !sdev->service || !skb || !hip->hip_priv)
+ return -EINVAL;
+
+ spin_lock_bh(&hip->hip_priv->tx_lock);
+ atomic_set(&hip->hip_priv->in_tx, 1);
+
+#ifdef CONFIG_ANDROID
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ if (conf_hip4_ver == 4) {
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock_tx)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock_tx, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock_tx", WL_REASON_TX);
+ }
+ } else {
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock", WL_REASON_TX);
+ }
+ }
+#else
+ if (!wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_lock_timeout(&hip->hip_priv->hip4_wake_lock, msecs_to_jiffies(SLSI_HIP_WAKELOCK_TIME_OUT_IN_MS));
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_TAKEN, "hip4_wake_lock", WL_REASON_TX);
+ }
+#endif
+#endif
+ service = sdev->service;
+
+ fapi_header = (struct fapi_signal_header *)skb->data;
+
+ m = hip4_skb_to_mbulk(hip->hip_priv, skb, ctrl_packet);
+ if (!m) {
+ SCSC_HIP4_SAMPLER_MFULL(hip->hip_priv->minor);
+ ret = -ENOSPC;
+ SLSI_ERR_NODEV("mbulk is NULL\n");
+ goto error;
+ }
+
+ if (scsc_mx_service_mif_ptr_to_addr(service, m, &offset) < 0) {
+ mbulk_free_virt_host(m);
+ ret = -EFAULT;
+ SLSI_ERR_NODEV("Incorrect reference memory\n");
+ goto error;
+ }
+
+ if (hip4_q_add_signal(hip, ctrl_packet ? HIP4_MIF_Q_FH_CTRL : HIP4_MIF_Q_FH_DAT, offset, service)) {
+ SCSC_HIP4_SAMPLER_QFULL(hip->hip_priv->minor, ctrl_packet ? HIP4_MIF_Q_FH_CTRL : HIP4_MIF_Q_FH_DAT);
+ mbulk_free_virt_host(m);
+ ret = -ENOSPC;
+ SLSI_ERR_NODEV("No space\n");
+ goto error;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ /* colour is defined as: */
+ /* u8 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ */
+ if (ctrl_packet) {
+ /* Record control signal */
+ SCSC_HIP4_SAMPLER_SIGNAL_CTRLTX(hip->hip_priv->minor, (fapi_header->id & 0xff00) >> 8, fapi_header->id & 0xff);
+ } else {
+ SCSC_HIP4_SAMPLER_PKT_TX_HIP4(hip->hip_priv->minor, fapi_get_u16(skb, u.ma_unitdata_req.host_tag));
+ SCSC_HIP4_SAMPLER_VIF_PEER(hip->hip_priv->minor, 1, (cb->colour & 0x6) >> 1, (cb->colour & 0xf8) >> 3);
+ }
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ hip4_history_record_add(FH, fapi_header->id);
+#endif
+
+ /* Here we push a copy of the bare skb TRANSMITTED data also to the logring
+ * as a binary record. Note that bypassing UDI subsystem as a whole
+ * means we are losing:
+ * UDI filtering / UDI Header INFO / UDI QueuesFrames Throttling /
+ * UDI Skb Asynchronous processing
+ * We keep separated DATA/CTRL paths.
+ */
+ if (ctrl_packet)
+ SCSC_BIN_TAG_DEBUG(BIN_WIFI_CTRL_TX, skb->data, skb_headlen(skb));
+ else
+ SCSC_BIN_TAG_DEBUG(BIN_WIFI_DATA_TX, skb->data, skb_headlen(skb));
+ /* slsi_log_clients_log_signal_fast: skb is copied to all the log clients */
+ slsi_log_clients_log_signal_fast(sdev, &sdev->log_clients, skb, SLSI_LOG_DIRECTION_FROM_HOST);
+ slsi_kfree_skb(skb);
+ atomic_set(&hip->hip_priv->in_tx, 0);
+ spin_unlock_bh(&hip->hip_priv->tx_lock);
+ return 0;
+
+error:
+#ifdef CONFIG_ANDROID
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ if (conf_hip4_ver == 4) {
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock_tx)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock_tx);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock_tx", WL_REASON_TX);
+ }
+ } else {
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock", WL_REASON_TX);
+ }
+ }
+#else
+ if (wake_lock_active(&hip->hip_priv->hip4_wake_lock)) {
+ wake_unlock(&hip->hip_priv->hip4_wake_lock);
+ SCSC_WLOG_WAKELOCK(WLOG_LAZY, WL_RELEASED, "hip4_wake_lock", WL_REASON_TX);
+ }
+#endif
+#endif
+ atomic_set(&hip->hip_priv->in_tx, 0);
+ spin_unlock_bh(&hip->hip_priv->tx_lock);
+ return ret;
+}
+
+/* HIP4 has been initialize, setup with values
+ * provided by FW
+ */
+int hip4_setup(struct slsi_hip4 *hip)
+{
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct scsc_service *service;
+ u32 conf_hip4_ver = 0;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct net_device *dev;
+#endif
+
+ if (!sdev || !sdev->service)
+ return -EIO;
+
+ if (atomic_read(&sdev->hip.hip_state) != SLSI_HIP_STATE_STARTED)
+ return -EIO;
+
+ service = sdev->service;
+
+ /* Get the Version reported by the FW */
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+ /* Check if the version is supported. And get the index */
+ /* This is hardcoded and may change in future versions */
+ if (conf_hip4_ver != 4 && conf_hip4_ver != 3) {
+ SLSI_ERR_NODEV("FW Version %d not supported\n", conf_hip4_ver);
+ return -EIO;
+ }
+
+ if (conf_hip4_ver == 4) {
+ hip->hip_priv->unidat_req_headroom =
+ scsc_wifi_get_hip_config_u8(&hip->hip_control, unidat_req_headroom, 4);
+ hip->hip_priv->unidat_req_tailroom =
+ scsc_wifi_get_hip_config_u8(&hip->hip_control, unidat_req_tailroom, 4);
+ hip->hip_priv->version = 4;
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ rcu_read_lock();
+ /* one NAPI instance is ok for multiple netdev devices */
+ dev = slsi_get_netdev_rcu(sdev, SLSI_NET_INDEX_WLAN);
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev No longer exists\n");
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ netif_napi_add(dev, &hip->hip_priv->napi, hip4_napi_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&hip->hip_priv->napi);
+ rcu_read_unlock();
+#endif
+ } else {
+ /* version 5 */
+ hip->hip_priv->unidat_req_headroom =
+ scsc_wifi_get_hip_config_u8(&hip->hip_control, unidat_req_headroom, 5);
+ hip->hip_priv->unidat_req_tailroom =
+ scsc_wifi_get_hip_config_u8(&hip->hip_control, unidat_req_tailroom, 5);
+ hip->hip_priv->version = 5;
+ }
+ /* Unmask interrupts - now host should handle them */
+ atomic_set(&hip->hip_priv->stats.irqs, 0);
+ atomic_set(&hip->hip_priv->stats.spurious_irqs, 0);
+ atomic_set(&sdev->debug_inds, 0);
+
+ atomic_set(&hip->hip_priv->closing, 0);
+
+ /* Driver is ready to process IRQ */
+ atomic_set(&hip->hip_priv->rx_ready, 1);
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ if (conf_hip4_ver == 4) {
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_FH_RFB]);
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_CTRL]);
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[HIP4_MIF_Q_TH_DAT]);
+ } else {
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+#else
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+#endif
+ return 0;
+}
+
+/* On suspend hip4 needs to ensure that TH interrupts *are* unmasked */
+void hip4_suspend(struct slsi_hip4 *hip)
+{
+ struct slsi_dev *sdev;
+ struct scsc_service *service;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+
+ if (!hip || !hip->hip_priv)
+ return;
+
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ if (!sdev || !sdev->service)
+ return;
+
+ if (atomic_read(&sdev->hip.hip_state) != SLSI_HIP_STATE_STARTED)
+ return;
+
+ service = sdev->service;
+
+ slsi_log_client_msg(sdev, UDI_DRV_SUSPEND_IND, 0, NULL);
+ SCSC_HIP4_SAMPLER_SUSPEND(hip->hip_priv->minor);
+
+ atomic_set(&hip->hip_priv->in_suspend, 1);
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ for (u8 i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ if (hip->hip_priv->intr_tohost_mul[i] != MIF_NO_IRQ)
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[i]);
+ } else {
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+#else
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+#endif
+}
+
+void hip4_resume(struct slsi_hip4 *hip)
+{
+ struct slsi_dev *sdev;
+ struct scsc_service *service;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+
+ if (!hip || !hip->hip_priv)
+ return;
+
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ if (!sdev || !sdev->service)
+ return;
+
+ if (atomic_read(&sdev->hip.hip_state) != SLSI_HIP_STATE_STARTED)
+ return;
+
+ service = sdev->service;
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ for (u8 i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ if (hip->hip_priv->intr_tohost_mul[i] != MIF_NO_IRQ)
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost_mul[i]);
+ } else {
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+ }
+#else
+ scsc_service_mifintrbit_bit_unmask(service, hip->hip_priv->intr_tohost);
+#endif
+
+ slsi_log_client_msg(sdev, UDI_DRV_RESUME_IND, 0, NULL);
+ SCSC_HIP4_SAMPLER_RESUME(hip->hip_priv->minor);
+ atomic_set(&hip->hip_priv->in_suspend, 0);
+}
+
+void hip4_freeze(struct slsi_hip4 *hip)
+{
+ struct slsi_dev *sdev;
+ struct scsc_service *service;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+
+ if (!hip || !hip->hip_priv)
+ return;
+
+ sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ if (!sdev || !sdev->service)
+ return;
+
+ if (atomic_read(&sdev->hip.hip_state) != SLSI_HIP_STATE_STARTED)
+ return;
+
+ service = sdev->service;
+
+ closing = ktime_get();
+ atomic_set(&hip->hip_priv->closing, 1);
+
+ hip4_dump_dbg(hip, NULL, NULL, service);
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ for (u8 i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ if (hip->hip_priv->intr_tohost_mul[i] != MIF_NO_IRQ)
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost_mul[i]);
+
+ tasklet_kill(&hip->hip_priv->intr_tasklet);
+ cancel_work_sync(&hip->hip_priv->intr_wq_ctrl);
+ cancel_work_sync(&hip->hip_priv->intr_wq_fb);
+ } else {
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost);
+ cancel_work_sync(&hip->hip_priv->intr_wq);
+ }
+#else
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost);
+ cancel_work_sync(&hip->hip_priv->intr_wq);
+#endif
+ flush_workqueue(hip->hip_priv->hip4_workq);
+ destroy_workqueue(hip->hip_priv->hip4_workq);
+ atomic_set(&hip->hip_priv->rx_ready, 0);
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+
+ /* Deactive the wd timer prior its expiration */
+ del_timer_sync(&hip->hip_priv->watchdog);
+}
+
+void hip4_deinit(struct slsi_hip4 *hip)
+{
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct scsc_service *service;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+ if (!sdev || !sdev->service)
+ return;
+
+ service = sdev->service;
+
+#ifdef CONFIG_SCSC_LOGRING
+ slsi_traffic_mon_client_unregister(sdev, hip->hip_priv);
+ /* Reenable logring in case was disabled */
+ scsc_logring_enable(true);
+#endif
+#ifdef CONFIG_SCSC_QOS
+ /* de-register with traffic monitor */
+ slsi_traffic_mon_client_unregister(sdev, hip);
+ scsc_service_pm_qos_remove_request(service);
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Init SMAPPER */
+ if (hip4_smapper_is_enabled) {
+ hip4_smapper_is_enabled = false;
+ hip4_smapper_deinit(sdev, hip);
+ }
+#endif
+#ifdef CONFIG_ANDROID
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ wake_lock_destroy(&hip->hip_priv->hip4_wake_lock_tx);
+ wake_lock_destroy(&hip->hip_priv->hip4_wake_lock_ctrl);
+ wake_lock_destroy(&hip->hip_priv->hip4_wake_lock_data);
+#endif
+ wake_lock_destroy(&hip->hip_priv->hip4_wake_lock);
+#endif
+ closing = ktime_get();
+ atomic_set(&hip->hip_priv->closing, 1);
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ for (u8 i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ if (hip->hip_priv->intr_tohost_mul[i] != MIF_NO_IRQ)
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost_mul[i]);
+
+ tasklet_kill(&hip->hip_priv->intr_tasklet);
+ cancel_work_sync(&hip->hip_priv->intr_wq_ctrl);
+ cancel_work_sync(&hip->hip_priv->intr_wq_fb);
+
+ for (i = 0; i < MIF_HIP_CFG_Q_NUM; i++)
+ if (hip->hip_priv->intr_tohost_mul[i] != MIF_NO_IRQ)
+ scsc_service_mifintrbit_unregister_tohost(service, hip->hip_priv->intr_tohost_mul[i]);
+
+ /* Get the Version reported by the FW */
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&hip->hip_control->init);
+
+ if (conf_hip4_ver == 4) {
+ netif_napi_del(&hip->hip_priv->napi);
+ }
+#endif
+ scsc_service_mifintrbit_bit_mask(service, hip->hip_priv->intr_tohost);
+ cancel_work_sync(&hip->hip_priv->intr_wq);
+ scsc_service_mifintrbit_unregister_tohost(service, hip->hip_priv->intr_tohost);
+
+ flush_workqueue(hip->hip_priv->hip4_workq);
+ destroy_workqueue(hip->hip_priv->hip4_workq);
+
+ scsc_service_mifintrbit_free_fromhost(service, hip->hip_priv->intr_fromhost, SCSC_MIFINTR_TARGET_R4);
+
+ /* If we get to that point with rx_lock/tx_lock claimed, trigger BUG() */
+ WARN_ON(atomic_read(&hip->hip_priv->in_tx));
+ WARN_ON(atomic_read(&hip->hip_priv->in_rx));
+
+ atomic_set(&hip->hip_priv->rx_ready, 0);
+ atomic_set(&hip->hip_priv->watchdog_timer_active, 0);
+ /* Deactive the wd timer prior its expiration */
+ del_timer_sync(&hip->hip_priv->watchdog);
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (hip->hip_priv->stats.procfs_dir) {
+ remove_proc_entry("driver/hip4/jitter", NULL);
+ remove_proc_entry("driver/hip4/info", NULL);
+ remove_proc_entry("driver/hip4/history", NULL);
+ remove_proc_entry("driver/hip4", NULL);
+ }
+#endif
+ kfree(hip->hip_priv);
+
+ hip->hip_priv = NULL;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __HIP4_H__
+#define __HIP4_H__
+
+/**
+ * This header file is the public HIP4 interface, which will be accessible by
+ * Wi-Fi service driver components.
+ *
+ * All struct and internal HIP functions shall be moved to a private header
+ * file.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/scsc_mx.h>
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+#include <linux/netdevice.h>
+#endif
+#ifndef SLSI_TEST_DEV
+#ifdef CONFIG_ANDROID
+#include <linux/wakelock.h>
+#endif
+#endif
+#include "mbulk.h"
+#ifdef CONFIG_SCSC_SMAPPER
+#include "hip4_smapper.h"
+#endif
+
+/* Shared memory Layout
+ *
+ * |-------------------------| CONFIG
+ * | CONFIG + Queues |
+ * | --------- |
+ * | MIB |
+ * |-------------------------| TX Pool
+ * | TX DAT |
+ * | --------- |
+ * | TX CTL |
+ * |-------------------------| RX Pool
+ * | RX |
+ * |-------------------------|
+ */
+
+/**** OFFSET SHOULD BE 4096 BYTES ALIGNED ***/
+/*** CONFIG POOL ***/
+#define HIP4_WLAN_CONFIG_OFFSET 0x00000
+#define HIP4_WLAN_CONFIG_SIZE 0x02000 /* 8 kB*/
+/*** MIB POOL ***/
+#define HIP4_WLAN_MIB_OFFSET (HIP4_WLAN_CONFIG_OFFSET + HIP4_WLAN_CONFIG_SIZE)
+#define HIP4_WLAN_MIB_SIZE 0x08000 /* 32 kB*/
+/*** TX POOL ***/
+#define HIP4_WLAN_TX_OFFSET (HIP4_WLAN_MIB_OFFSET + HIP4_WLAN_MIB_SIZE)
+/*** TX POOL - DAT POOL ***/
+#define HIP4_WLAN_TX_DAT_OFFSET HIP4_WLAN_TX_OFFSET
+#define HIP4_WLAN_TX_DAT_SIZE 0xe6000 /* 920 kB*/
+/*** TX POOL - CTL POOL ***/
+#define HIP4_WLAN_TX_CTL_OFFSET (HIP4_WLAN_TX_DAT_OFFSET + HIP4_WLAN_TX_DAT_SIZE)
+#define HIP4_WLAN_TX_CTL_SIZE 0x10000 /* 64 kB*/
+#define HIP4_WLAN_TX_SIZE (HIP4_WLAN_TX_DAT_SIZE + HIP4_WLAN_TX_CTL_SIZE)
+/*** RX POOL ***/
+#define HIP4_WLAN_RX_OFFSET (HIP4_WLAN_TX_CTL_OFFSET + HIP4_WLAN_TX_CTL_SIZE)
+#ifdef CONFIG_SCSC_PCIE
+#define HIP4_WLAN_RX_SIZE 0x80000 /* 512 kB */
+#else
+#define HIP4_WLAN_RX_SIZE 0x100000 /* 1MB */
+#endif
+/*** TOTAL : CONFIG POOL + TX POOL + RX POOL ***/
+#define HIP4_WLAN_TOTAL_MEM (HIP4_WLAN_CONFIG_SIZE + HIP4_WLAN_MIB_SIZE + \
+ HIP4_WLAN_TX_SIZE + HIP4_WLAN_RX_SIZE) /* 2 MB */
+
+#define HIP4_POLLING_MAX_PACKETS 512
+
+#define HIP4_DAT_MBULK_SIZE (2 * 1024)
+#define HIP4_DAT_SLOTS (HIP4_WLAN_TX_DAT_SIZE / HIP4_DAT_MBULK_SIZE)
+#define HIP4_CTL_MBULK_SIZE (2 * 1024)
+#define HIP4_CTL_SLOTS (HIP4_WLAN_TX_CTL_SIZE / HIP4_CTL_MBULK_SIZE)
+
+#define MIF_HIP_CFG_Q_NUM 6
+
+#define MIF_NO_IRQ 0xff
+
+/* Current versions supported by this HIP */
+#define HIP4_SUPPORTED_V1 3
+#define HIP4_SUPPORTED_V2 4
+
+enum hip4_hip_q_conf {
+ HIP4_MIF_Q_FH_CTRL,
+ HIP4_MIF_Q_FH_DAT,
+ HIP4_MIF_Q_FH_RFB,
+ HIP4_MIF_Q_TH_CTRL,
+ HIP4_MIF_Q_TH_DAT,
+ HIP4_MIF_Q_TH_RFB
+};
+
+struct hip4_hip_config_version_4 {
+ /* Host owned */
+ u32 magic_number; /* 0xcaba0401 */
+ u16 hip_config_ver; /* Version of this configuration structure = 2*/
+ u16 config_len; /* Size of this configuration structure */
+
+ /* FW owned */
+ u32 compat_flag; /* flag of the expected driver's behaviours */
+
+ u16 sap_mlme_ver; /* Fapi SAP_MLME version*/
+ u16 sap_ma_ver; /* Fapi SAP_MA version */
+ u16 sap_debug_ver; /* Fapi SAP_DEBUG version */
+ u16 sap_test_ver; /* Fapi SAP_TEST version */
+
+ u32 fw_build_id; /* Firmware Build Id */
+ u32 fw_patch_id; /* Firmware Patch Id */
+
+ u8 unidat_req_headroom; /* Headroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u8 unidat_req_tailroom; /* Tailroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u8 bulk_buffer_align; /* 4 */
+
+ /* Host owned */
+ u8 host_cache_line; /* 64 */
+
+ u32 host_buf_loc; /* location of the host buffer in MIF_ADDR */
+ u32 host_buf_sz; /* in byte, size of the host buffer */
+ u32 fw_buf_loc; /* location of the firmware buffer in MIF_ADDR */
+ u32 fw_buf_sz; /* in byte, size of the firmware buffer */
+ u32 mib_loc; /* MIB location in MIF_ADDR */
+ u32 mib_sz; /* MIB size */
+ u32 log_config_loc; /* Logging Configuration Location in MIF_ADDR */
+ u32 log_config_sz; /* Logging Configuration Size in MIF_ADDR */
+
+ u8 mif_fh_int_n; /* MIF from-host interrupt bit position for all HIP queue */
+ u8 reserved1[3];
+
+ u8 mif_th_int_n[6]; /* MIF to-host interrupt bit positions for each HIP queue */
+ u8 reserved2[2];
+
+ u32 scbrd_loc; /* Scoreboard locatin in MIF_ADDR */
+
+ u16 q_num; /* 6 */
+ u16 q_len; /* 256 */
+ u16 q_idx_sz; /* 1 */
+ u8 reserved3[2];
+
+ u32 q_loc[MIF_HIP_CFG_Q_NUM];
+
+#ifdef CONFIG_SCSC_SMAPPER
+ u8 smapper_th_req; /* TH smapper request interrupt bit position */
+ u8 smapper_fh_ind; /* FH smapper ind interrupt bit position */
+ u8 smapper_mbox_scb; /* SMAPPER MBOX scoreboard location */
+ u8 smapper_entries_banks[16]; /* num entries banks */
+ u8 smapper_pow_sz[16]; /* Power of size of entry i.e. 12 = 4096B */
+ u32 smapper_bank_addr[16]; /* Bank start addr */
+#else
+ u8 reserved_nosmapper[99];
+#endif
+ u8 reserved4[16];
+} __packed;
+
+struct hip4_hip_config_version_5 {
+ /* Host owned */
+ u32 magic_number; /* 0xcaba0401 */
+ u16 hip_config_ver; /* Version of this configuration structure = 2*/
+ u16 config_len; /* Size of this configuration structure */
+
+ /* FW owned */
+ u32 compat_flag; /* flag of the expected driver's behaviours */
+
+ u16 sap_mlme_ver; /* Fapi SAP_MLME version*/
+ u16 sap_ma_ver; /* Fapi SAP_MA version */
+ u16 sap_debug_ver; /* Fapi SAP_DEBUG version */
+ u16 sap_test_ver; /* Fapi SAP_TEST version */
+
+ u32 fw_build_id; /* Firmware Build Id */
+ u32 fw_patch_id; /* Firmware Patch Id */
+
+ u8 unidat_req_headroom; /* Headroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u8 unidat_req_tailroom; /* Tailroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u8 bulk_buffer_align; /* 4 */
+
+ /* Host owned */
+ u8 host_cache_line; /* 64 */
+
+ u32 host_buf_loc; /* location of the host buffer in MIF_ADDR */
+ u32 host_buf_sz; /* in byte, size of the host buffer */
+ u32 fw_buf_loc; /* location of the firmware buffer in MIF_ADDR */
+ u32 fw_buf_sz; /* in byte, size of the firmware buffer */
+ u32 mib_loc; /* MIB location in MIF_ADDR */
+ u32 mib_sz; /* MIB size */
+ u32 log_config_loc; /* Logging Configuration Location in MIF_ADDR */
+ u32 log_config_sz; /* Logging Configuration Size in MIF_ADDR */
+
+ u8 mif_fh_int_n; /* MIF from-host interrupt bit position */
+ u8 mif_th_int_n; /* MIF to-host interrpt bit position */
+ u8 reserved[2];
+
+ u32 scbrd_loc; /* Scoreboard locatin in MIF_ADDR */
+
+ u16 q_num; /* 6 */
+ u16 q_len; /* 256 */
+ u16 q_idx_sz; /* 1 */
+ u8 reserved2[2];
+
+ u32 q_loc[MIF_HIP_CFG_Q_NUM];
+
+ u8 reserved3[16];
+} __packed;
+
+struct hip4_hip_init {
+ /* Host owned */
+ u32 magic_number; /* 0xcaaa0400 */
+ /* FW owned */
+ u32 conf_hip4_ver;
+ /* Host owned */
+ u32 version_a_ref; /* Location of Config structure A (old) */
+ u32 version_b_ref; /* Location of Config structure B (new) */
+} __packed;
+
+#define MAX_NUM 256
+struct hip4_hip_q {
+ u32 array[MAX_NUM];
+ u8 idx_read; /* To keep track */
+ u8 idx_write; /* To keep track */
+ u8 total;
+} __aligned(64);
+
+struct hip4_hip_control {
+ struct hip4_hip_init init;
+ struct hip4_hip_config_version_5 config_v5 __aligned(32);
+ struct hip4_hip_config_version_4 config_v4 __aligned(32);
+ u32 scoreboard[256] __aligned(64);
+ struct hip4_hip_q q[MIF_HIP_CFG_Q_NUM] __aligned(64);
+} __aligned(4096);
+
+struct slsi_hip4;
+
+/* This struct is private to the HIP implementation */
+struct hip4_priv {
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct tasklet_struct intr_tasklet;
+ struct work_struct intr_wq_ctrl;
+ struct work_struct intr_wq_fb;
+ struct napi_struct napi;
+#endif
+ struct work_struct intr_wq;
+
+ /* Interrupts cache < v4 */
+ /* TOHOST */
+ u32 intr_tohost;
+
+ /* Interrupts cache v4 */
+ u32 intr_tohost_mul[MIF_HIP_CFG_Q_NUM];
+ /* FROMHOST */
+ u32 intr_fromhost;
+
+ /* For workqueue */
+ struct slsi_hip4 *hip;
+
+ /* Pool for data frames*/
+ u8 host_pool_id_dat;
+ /* Pool for ctl frames*/
+ u8 host_pool_id_ctl;
+
+ /* rx cycle lock */
+ spinlock_t rx_lock;
+ /* tx cycle lock */
+ spinlock_t tx_lock;
+
+ /* Scoreboard update spinlock */
+ rwlock_t rw_scoreboard;
+
+ /* Watchdog timer */
+ struct timer_list watchdog;
+ /* wd spinlock */
+ spinlock_t watchdog_lock;
+ /* wd timer control */
+ atomic_t watchdog_timer_active;
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ DECLARE_BITMAP(irq_bitmap, MIF_HIP_CFG_Q_NUM);
+#endif
+
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct wake_lock hip4_wake_lock_tx;
+ struct wake_lock hip4_wake_lock_ctrl;
+ struct wake_lock hip4_wake_lock_data;
+#endif
+ /* Wakelock for modem_ctl */
+ struct wake_lock hip4_wake_lock;
+#endif
+
+ /* Control the hip4 init */
+ atomic_t rx_ready;
+
+ /* Control the hip4 deinit */
+ atomic_t closing;
+ atomic_t in_tx;
+ atomic_t in_rx;
+ atomic_t in_suspend;
+ u32 storm_count;
+
+ struct {
+ atomic_t irqs;
+ atomic_t spurious_irqs;
+ u32 q_num_frames[MIF_HIP_CFG_Q_NUM];
+ ktime_t start;
+ struct proc_dir_entry *procfs_dir;
+ } stats;
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ /*minor*/
+ u32 minor;
+#endif
+ u8 unidat_req_headroom; /* Headroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u8 unidat_req_tailroom; /* Tailroom the host shall reserve in mbulk for MA-UNITDATA.REQ signal */
+ u32 version; /* Version of the running FW */
+ void *scbrd_base; /* Scbrd_base pointer */
+
+ /* Global domain Q control*/
+ atomic_t gactive;
+ atomic_t gmod;
+ atomic_t gcod;
+ int saturated;
+ int guard;
+ /* Global domain Q spinlock */
+ spinlock_t gbot_lock;
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* SMAPPER */
+ /* Leman has 4 Banks of 160 entries each and 4 Banks of 64 entries each. Each Tx stream is
+ * expected to use 2 Bank . In RSDB, 5GHz streams require higher throughput
+ * so the bigger banks are allocated for 5GHz streams and the
+ * smaller banks are for 2.4GHz streams
+ */
+ struct hip4_smapper_bank smapper_banks[HIP4_SMAPPER_TOTAL_BANKS];
+ struct hip4_smapper_control smapper_control;
+#endif
+#ifdef CONFIG_SCSC_QOS
+ /* PM QoS control */
+ struct work_struct pm_qos_work;
+ /* PM QoS control spinlock */
+ spinlock_t pm_qos_lock;
+ u8 pm_qos_state;
+#endif
+ /* Collection artificats */
+ void *mib_collect;
+ u16 mib_sz;
+ /* Mutex to protect hcf file collection if a tear down is triggered */
+ struct mutex in_collection;
+
+ struct workqueue_struct *hip4_workq;
+};
+
+struct scsc_service;
+
+struct slsi_hip4 {
+ struct hip4_priv *hip_priv;
+ struct hip4_hip_control *hip_control;
+ scsc_mifram_ref hip_ref;
+};
+
+/* Public functions */
+int hip4_init(struct slsi_hip4 *hip);
+int hip4_setup(struct slsi_hip4 *hip);
+void hip4_suspend(struct slsi_hip4 *hip);
+void hip4_resume(struct slsi_hip4 *hip);
+void hip4_freeze(struct slsi_hip4 *hip);
+void hip4_deinit(struct slsi_hip4 *hip);
+int hip4_free_ctrl_slots_count(struct slsi_hip4 *hip);
+
+int scsc_wifi_transmit_frame(struct slsi_hip4 *hip, bool ctrl_packet, struct sk_buff *skb);
+
+/* Macros for accessing information stored in the hip_config struct */
+#define scsc_wifi_get_hip_config_version_4_u8(buff_ptr, member) le16_to_cpu((((struct hip4_hip_config_version_4 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_version_4_u16(buff_ptr, member) le16_to_cpu((((struct hip4_hip_config_version_4 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_version_4_u32(buff_ptr, member) le32_to_cpu((((struct hip4_hip_config_version_4 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_version_5_u8(buff_ptr, member) le16_to_cpu((((struct hip4_hip_config_version_5 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_version_5_u16(buff_ptr, member) le16_to_cpu((((struct hip4_hip_config_version_5 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_version_5_u32(buff_ptr, member) le32_to_cpu((((struct hip4_hip_config_version_5 *)(buff_ptr))->member))
+#define scsc_wifi_get_hip_config_u8(buff_ptr, member, ver) le16_to_cpu((((struct hip4_hip_config_version_##ver *)(buff_ptr->config_v##ver))->member))
+#define scsc_wifi_get_hip_config_u16(buff_ptr, member, ver) le16_to_cpu((((struct hip4_hip_config_version_##ver *)(buff_ptr->config_v##ver))->member))
+#define scsc_wifi_get_hip_config_u32(buff_ptr, member, ver) le32_to_cpu((((struct hip4_hip_config_version_##ver *)(buff_ptr->config_v##ver))->member))
+#define scsc_wifi_get_hip_config_version(buff_ptr) le32_to_cpu((((struct hip4_hip_init *)(buff_ptr))->conf_hip4_ver))
+
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <linux/hardirq.h>
+#include <linux/cpufreq.h>
+
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <asm/page.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <scsc/scsc_mx.h>
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include "hip4_sampler.h"
+
+#include "debug.h"
+
+struct hip4_record {
+ u32 record_num;
+ ktime_t ts;
+ u32 record;
+ u32 record2;
+} __packed;
+
+static atomic_t in_read;
+
+/* Create a global spinlock for all the instances */
+/* It is less efficent, but we simplify the implementation */
+static spinlock_t g_spinlock;
+
+static bool hip4_sampler_enable = true;
+module_param(hip4_sampler_enable, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_enable, "Enable hip4_sampler_enable. Run-time option - (default: Y)");
+
+static bool hip4_sampler_dynamic = true;
+module_param(hip4_sampler_dynamic, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_dynamic, "Enable hip4_sampler dynamic adaptation based on TPUT. Run-time option - (default: Y)");
+
+static int hip4_sampler_kfifo_len = 128 * 1024;
+module_param(hip4_sampler_kfifo_len, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_kfifo_len, "Streaming fifo buffer length in num of records- default: 262144 Max: 262144. Loaded at /dev open");
+
+static int hip4_sampler_static_kfifo_len = 128 * 1024;
+module_param(hip4_sampler_static_kfifo_len, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_static_kfifo_len, "Offline fifo buffer length in num of records- default: 262144 Max: 262144. Loaded at /dev open");
+
+bool hip4_sampler_sample_q = true;
+module_param(hip4_sampler_sample_q, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_q, "Sample Queues. Default: Y. Run time option");
+
+bool hip4_sampler_sample_qref = true;
+module_param(hip4_sampler_sample_qref, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_qref, "Sample Queue References. Default: Y. Run time option");
+
+bool hip4_sampler_sample_int = true;
+module_param(hip4_sampler_sample_int, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_int, "Sample WQ/Tasklet Intr BH in/out. Default: Y. Run time option");
+
+bool hip4_sampler_sample_fapi = true;
+module_param(hip4_sampler_sample_fapi, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_fapi, "Sample FAPI ctrl signals. Default: Y. Run time option");
+
+bool hip4_sampler_sample_through = true;
+module_param(hip4_sampler_sample_through, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_through, "Sample throughput. Default: Y. Run time option");
+
+bool hip4_sampler_sample_tcp = true;
+module_param(hip4_sampler_sample_tcp, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_tcp, "Sample TCP streams. Default: Y. Run time option");
+
+bool hip4_sampler_sample_start_stop_q = true;
+module_param(hip4_sampler_sample_start_stop_q, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_start_stop_q, "Sample Stop/Start queues. Default: Y. Run time option");
+
+bool hip4_sampler_sample_mbulk = true;
+module_param(hip4_sampler_sample_mbulk, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_mbulk, "Sample Mbulk counter. Default: Y. Run time option");
+
+bool hip4_sampler_sample_qfull;
+module_param(hip4_sampler_sample_qfull, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_qfull, "Sample Q full event. Default: N. Run time option");
+
+bool hip4_sampler_sample_mfull = true;
+module_param(hip4_sampler_sample_mfull, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_sample_mfull, "Sample Mbulk full event. Default: Y. Run time option");
+
+bool hip4_sampler_vif = true;
+module_param(hip4_sampler_vif, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_vif, "Sample VIF. Default: Y. Run time option");
+
+bool hip4_sampler_bot = true;
+module_param(hip4_sampler_bot, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_bot, "Sample BOT. Default: Y. Run time option");
+
+bool hip4_sampler_pkt_tx = true;
+module_param(hip4_sampler_pkt_tx, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_pkt_tx, "Track TX Data packet TX->HIP4->FB. Default: Y. Run time option");
+
+bool hip4_sampler_suspend_resume = true;
+module_param(hip4_sampler_suspend_resume, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hip4_sampler_suspend_resume, "Sample Suspend/Resume events. Default: Y. Run time option");
+
+#define HIP4_TPUT_HLIMIT 400000000 /* 400Mbps */
+#define HIP4_TPUT_LLIMIT 350000000 /* 350Mbps */
+#define HIP4_TPUT_HSECONDS 1
+#define HIP4_TPUT_LSECONDS 5
+
+static bool hip4_sampler_sample_q_hput;
+static bool hip4_sampler_sample_qref_hput;
+static bool hip4_sampler_sample_int_hput;
+static bool hip4_sampler_sample_fapi_hput;
+/* static bool hip4_sampler_sample_through_hput; */
+/* static bool hip4_sampler_sample_start_stop_q_hput; */
+/* static bool hip4_sampler_sample_mbulk_hput; */
+/* static bool hip4_sampler_sample_qfull_hput; */
+/* static bool hip4_sampler_sample_mfull_hput; */
+static bool hip4_sampler_vif_hput;
+static bool hip4_sampler_bot_hput;
+static bool hip4_sampler_pkt_tx_hput;
+static bool hip4_sampler_suspend_resume_hput;
+
+static bool hip4_sampler_in_htput;
+static u16 hip4_sampler_in_htput_seconds;
+
+#define DRV_NAME "hip4_sampler"
+#define DEVICE_NAME "hip4_sampler"
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+
+DECLARE_BITMAP(bitmap_hip4_sampler_minor, SCSC_HIP4_DEBUG_INTERFACES);
+
+enum hip4_dg_errors {
+ NO_ERROR = 0,
+ BUFFER_OVERFLOW,
+ KFIFO_ERROR,
+ KFIFO_FULL,
+};
+
+enum hip4_type {
+ STREAMING = 0,
+ OFFLINE,
+};
+
+struct hip4_sampler_dev {
+ /* file pointer */
+ struct file *filp;
+ /* char device */
+ struct cdev cdev;
+ /*device pointer*/
+ struct device *dev;
+ /*mx pointer*/
+ struct scsc_mx *mx;
+ /* Associated kfifo */
+ DECLARE_KFIFO_PTR(fifo, struct hip4_record);
+ /* Associated read_wait queue.*/
+ wait_queue_head_t read_wait;
+ /* Device in error */
+ enum hip4_dg_errors error;
+ /* Device node mutex for fops */
+ struct mutex mutex;
+ /* Record number */
+ u32 record_num;
+ /* To profile kfifo num elements */
+ u32 kfifo_max;
+ /* Sampler type streaming/offline */
+ enum hip4_type type;
+ /* reference to minor number */
+ u32 minor;
+};
+
+/**
+ * SCSC User Space debug sampler interface (singleton)
+ */
+static struct {
+ bool init;
+ dev_t device;
+ struct class *class_hip4_sampler;
+ struct hip4_sampler_dev devs[SCSC_HIP4_DEBUG_INTERFACES];
+} hip4_sampler;
+
+void __hip4_sampler_update_record(struct hip4_sampler_dev *hip4_dev, u32 minor, u8 param1, u8 param2, u8 param3, u8 param4, u32 param5)
+{
+ struct hip4_record ev;
+ u32 ret;
+ u32 cpu;
+ u32 freq = 0;
+
+ /* If char device if open, use streaming buffer */
+ if (hip4_dev->filp) {
+ /* put string into the Streaming fifo */
+ if (kfifo_avail(&hip4_dev->fifo)) {
+ /* Push values in Fifo*/
+ cpu = smp_processor_id();
+ if (hip4_dev->record_num % 64 == 0)
+ freq = (cpufreq_quick_get(cpu) / 1000) & 0xfff;
+ ev.record_num = (0x0000ffff & hip4_dev->record_num++) | (cpu << 28) | (freq << 16);
+ ev.ts = ktime_get();
+ ev.record = ((param1 & 0xff) << 24) | ((param2 & 0xff) << 16) | ((param3 & 0xff) << 8) | (param4 & 0xff);
+ ev.record2 = param5;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ kfifo_put(&hip4_dev->fifo, ev);
+#else
+ kfifo_put(&hip4_dev->fifo, &ev);
+#endif
+ ret = kfifo_len(&hip4_dev->fifo);
+ if (ret > hip4_dev->kfifo_max)
+ hip4_dev->kfifo_max = ret;
+ } else {
+ hip4_dev->error = KFIFO_FULL;
+ return;
+ }
+ wake_up_interruptible(&hip4_dev->read_wait);
+ /* If streaming buffer is not in use, put samples in offline buffer */
+ } else {
+ /* Get associated Offline buffer */
+ hip4_dev = &hip4_sampler.devs[minor + 1];
+ /* Record in offline fifo */
+ /* if fifo is full, remove last item */
+ if (kfifo_is_full(&hip4_dev->fifo))
+ ret = kfifo_get(&hip4_dev->fifo, &ev);
+ cpu = smp_processor_id();
+ if (hip4_dev->record_num % 64 == 0)
+ freq = (cpufreq_quick_get(cpu) / 1000) & 0xfff;
+ /* Push values in Static Fifo*/
+ ev.record_num = (0x0000ffff & hip4_dev->record_num++) | (cpu << 28) | (freq << 16);
+ ev.ts = ktime_get();
+ ev.record = ((param1 & 0xff) << 24) | ((param2 & 0xff) << 16) | ((param3 & 0xff) << 8) | (param4 & 0xff);
+ ev.record2 = param5;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ kfifo_put(&hip4_dev->fifo, ev);
+#else
+ kfifo_put(&hip4_dev->fifo, &ev);
+#endif
+ }
+}
+
+void hip4_sampler_update_record(u32 minor, u8 param1, u8 param2, u8 param3, u8 param4, u32 param5)
+{
+ struct hip4_sampler_dev *hip4_dev;
+ unsigned long flags;
+
+ if (!hip4_sampler_enable || !hip4_sampler.init)
+ return;
+
+ if (atomic_read(&in_read))
+ return;
+
+ if (minor >= SCSC_HIP4_INTERFACES)
+ return;
+
+ spin_lock_irqsave(&g_spinlock, flags);
+ hip4_dev = &hip4_sampler.devs[minor];
+ __hip4_sampler_update_record(hip4_dev, minor, param1, param2, param3, param4, param5);
+ spin_unlock_irqrestore(&g_spinlock, flags);
+}
+
+static void hip4_sampler_store_param(void)
+{
+ hip4_sampler_sample_q_hput = hip4_sampler_sample_q;
+ hip4_sampler_sample_qref_hput = hip4_sampler_sample_qref;
+ hip4_sampler_sample_int_hput = hip4_sampler_sample_int;
+ hip4_sampler_sample_fapi_hput = hip4_sampler_sample_fapi;
+ /* hip4_sampler_sample_through_hput = hip4_sampler_sample_through; */
+ /* hip4_sampler_sample_start_stop_q_hput = hip4_sampler_sample_start_stop_q; */
+ /* hip4_sampler_sample_mbulk_hput = hip4_sampler_sample_mbulk; */
+ /* hip4_sampler_sample_qfull_hput = hip4_sampler_sample_qfull; */
+ /* hip4_sampler_sample_mfull_hput = hip4_sampler_sample_mfull; */
+ hip4_sampler_vif_hput = hip4_sampler_vif;
+ hip4_sampler_bot_hput = hip4_sampler_bot;
+ hip4_sampler_pkt_tx_hput = hip4_sampler_pkt_tx;
+ hip4_sampler_suspend_resume_hput = hip4_sampler_suspend_resume;
+
+ /* Reset values to avoid contention */
+ hip4_sampler_sample_q = false;
+ hip4_sampler_sample_qref = false;
+ hip4_sampler_sample_int = false;
+ hip4_sampler_sample_fapi = false;
+ /* hip4_sampler_sample_through_hput = false; */
+ /* hip4_sampler_sample_start_stop_q_hput = false; */
+ /* hip4_sampler_sample_mbulk = false; */
+ /* hip4_sampler_sample_qfull_hput = false; */
+ /* hip4_sampler_sample_mfull_hput = false; */
+ hip4_sampler_vif = false;
+ hip4_sampler_bot = false;
+ hip4_sampler_pkt_tx = false;
+ hip4_sampler_suspend_resume = false;
+}
+
+static void hip4_sampler_restore_param(void)
+{
+ hip4_sampler_sample_q = hip4_sampler_sample_q_hput;
+ hip4_sampler_sample_qref = hip4_sampler_sample_qref_hput;
+ hip4_sampler_sample_int = hip4_sampler_sample_int_hput;
+ hip4_sampler_sample_fapi = hip4_sampler_sample_fapi_hput;
+ /* hip4_sampler_sample_through = hip4_sampler_sample_through_hput; */
+ /* hip4_sampler_sample_start_stop_q = hip4_sampler_sample_start_stop_q_hput; */
+ /* hip4_sampler_sample_mbulk = hip4_sampler_sample_mbulk_hput; */
+ /* hip4_sampler_sample_qfull = hip4_sampler_sample_qfull_hput; */
+ /* hip4_sampler_sample_mfull = hip4_sampler_sample_mfull_hput; */
+ hip4_sampler_vif = hip4_sampler_vif_hput;
+ hip4_sampler_bot = hip4_sampler_bot_hput;
+ hip4_sampler_pkt_tx = hip4_sampler_pkt_tx_hput;
+ hip4_sampler_suspend_resume = hip4_sampler_suspend_resume_hput;
+}
+
+static void hip4_sampler_dynamic_switcher(u32 bps)
+{
+ /* Running in htput, */
+ if (hip4_sampler_in_htput) {
+ if (bps < HIP4_TPUT_LLIMIT) {
+ /* bps went down , count number of times to switch */
+ hip4_sampler_in_htput_seconds++;
+ if (hip4_sampler_in_htput_seconds >= HIP4_TPUT_LSECONDS) {
+ /* HIP4_TPUT_LSECONDS have passed, switch to low tput samples */
+ hip4_sampler_in_htput = false;
+ hip4_sampler_in_htput_seconds = 0;
+ hip4_sampler_restore_param();
+ }
+ } else {
+ hip4_sampler_in_htput_seconds = 0;
+ }
+ } else {
+ if (bps > HIP4_TPUT_HLIMIT) {
+ /* bps went up, count number of times to switch */
+ hip4_sampler_in_htput_seconds++;
+ if (hip4_sampler_in_htput_seconds >= HIP4_TPUT_HSECONDS) {
+ /* HIP4_TPUT_LSECONDS have passed, switch to high tput samples */
+ hip4_sampler_in_htput = true;
+ hip4_sampler_in_htput_seconds = 0;
+ hip4_sampler_store_param();
+ }
+ } else {
+ hip4_sampler_in_htput_seconds = 0;
+ }
+ }
+}
+
+static u32 g_tput_rx;
+static u32 g_tput_tx;
+
+void hip4_sampler_tput_monitor(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx)
+{
+ struct hip4_sampler_dev *hip4_sampler_dev = (struct hip4_sampler_dev *)client_ctx;
+
+ if (!hip4_sampler_enable)
+ return;
+
+ if ((g_tput_tx == tput_tx) && (g_tput_rx == tput_rx))
+ return;
+
+ g_tput_rx == tput_tx;
+ g_tput_rx == tput_rx;
+
+ if (hip4_sampler_dynamic) {
+ /* Call the dynamic switcher with the computed bps
+ * The algorithm will decide to not change, decrease
+ * or increase the sampler verbosity
+ */
+ if (tput_rx > tput_tx)
+ hip4_sampler_dynamic_switcher(tput_rx);
+ else
+ hip4_sampler_dynamic_switcher(tput_tx);
+ }
+
+ /* Generate the TX sample, in bps, Kbps, or Mbps */
+ if (tput_tx < 1000) {
+ SCSC_HIP4_SAMPLER_THROUG(hip4_sampler_dev->minor, 1, (tput_tx & 0xff00) >> 8, tput_tx & 0xff);
+ } else if ((tput_tx >= 1000) && (tput_tx < (1000 * 1000))) {
+ tput_tx = tput_tx / 1000;
+ SCSC_HIP4_SAMPLER_THROUG_K(hip4_sampler_dev->minor, 1, (tput_tx & 0xff00) >> 8, tput_tx & 0xff);
+ } else {
+ tput_tx = tput_tx / (1000 * 1000);
+ SCSC_HIP4_SAMPLER_THROUG_M(hip4_sampler_dev->minor, 1, (tput_tx & 0xff00) >> 8, tput_tx & 0xff);
+ }
+
+ /* Generate the RX sample, in bps, Kbps, or Mbps */
+ if (tput_rx < 1000) {
+ SCSC_HIP4_SAMPLER_THROUG(hip4_sampler_dev->minor, 0, (tput_rx & 0xff00) >> 8, tput_rx & 0xff);
+ } else if ((tput_rx >= 1000) && (tput_rx < (1000 * 1000))) {
+ tput_rx = tput_rx / 1000;
+ SCSC_HIP4_SAMPLER_THROUG_K(hip4_sampler_dev->minor, 0, (tput_rx & 0xff00) >> 8, tput_rx & 0xff);
+ } else {
+ tput_rx = tput_rx / (1000 * 1000);
+ SCSC_HIP4_SAMPLER_THROUG_M(hip4_sampler_dev->minor, 0, (tput_rx & 0xff00) >> 8, tput_rx & 0xff);
+ }
+}
+
+void hip4_sampler_tcp_decode(struct slsi_dev *sdev, struct net_device *dev, u8 *frame, bool from_ba)
+{
+ struct tcphdr *tcp_hdr;
+ struct ethhdr *ehdr = (struct ethhdr *)(frame);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 *ip_frame;
+ u16 ip_data_offset;
+ u8 hlen;
+ u16 len;
+ u8 proto;
+ u8 idx;
+
+ if (be16_to_cpu(ehdr->h_proto) != ETH_P_IP)
+ return;
+
+ ip_frame = frame + ETH_HLEN;
+ proto = ip_frame[9];
+
+ if (proto != IPPROTO_TCP)
+ return;
+
+ ip_data_offset = 20;
+ hlen = ip_frame[0] & 0x0F;
+ len = ip_frame[2] << 8 | ip_frame[3];
+
+ if (hlen > 5)
+ ip_data_offset += (hlen - 5) * 4;
+
+ tcp_hdr = (struct tcphdr *)(ip_frame + ip_data_offset);
+
+ /* Search for an existing record on this connection. */
+ for (idx = 0; idx < TCP_ACK_SUPPRESSION_RECORDS_MAX; idx++) {
+ struct slsi_tcp_ack_s *tcp_ack;
+ u32 rwnd = 0;
+
+ tcp_ack = &ndev_vif->ack_suppression[idx];
+ slsi_spinlock_lock(&tcp_ack->lock);
+ if ((tcp_ack->dport == tcp_hdr->source) && (tcp_ack->sport == tcp_hdr->dest)) {
+ if (from_ba && tcp_hdr->syn && tcp_hdr->ack) {
+ unsigned char *options;
+ u32 optlen = 0, len = 0;
+
+ if (tcp_hdr->doff > 5)
+ optlen = (tcp_hdr->doff - 5) * 4;
+
+ options = (u8 *)tcp_hdr + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET;
+
+ while (optlen > 0) {
+ switch (options[0]) {
+ case TCP_ACK_SUPPRESSION_OPTION_EOL:
+ len = 1;
+ break;
+ case TCP_ACK_SUPPRESSION_OPTION_NOP:
+ len = 1;
+ break;
+ case TCP_ACK_SUPPRESSION_OPTION_WINDOW:
+ tcp_ack->rx_window_scale = options[2];
+ len = options[1];
+ break;
+ default:
+ len = options[1];
+ break;
+ }
+ /* if length field in TCP options is 0, or greater than
+ * total options length, then options are incorrect
+ */
+ if ((len == 0) || (len >= optlen))
+ break;
+
+ if (optlen >= len)
+ optlen -= len;
+ else
+ optlen = 0;
+ options += len;
+ }
+ }
+ if (len > ((hlen * 4) + (tcp_hdr->doff * 4))) {
+ if (from_ba)
+ SCSC_HIP4_SAMPLER_TCP_DATA(sdev->minor_prof, tcp_ack->stream_id, tcp_hdr->seq);
+ else
+ SCSC_HIP4_SAMPLER_TCP_DATA_IN(sdev->minor_prof, tcp_ack->stream_id, tcp_hdr->seq);
+ } else {
+ if (tcp_ack->rx_window_scale)
+ rwnd = be16_to_cpu(tcp_hdr->window) * (2 << tcp_ack->rx_window_scale);
+ else
+ rwnd = be16_to_cpu(tcp_hdr->window);
+ if (from_ba) {
+ SCSC_HIP4_SAMPLER_TCP_ACK(sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr->ack_seq));
+ SCSC_HIP4_SAMPLER_TCP_RWND(sdev->minor_prof, tcp_ack->stream_id, rwnd);
+ } else {
+ SCSC_HIP4_SAMPLER_TCP_ACK_IN(sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr->ack_seq));
+ }
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ break;
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+}
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+int hip4_collect_init(struct scsc_log_collector_client *collect_client)
+{
+ /* Stop Sampling */
+ atomic_set(&in_read, 1);
+ return 0;
+}
+
+int hip4_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ int i = SCSC_HIP4_DEBUG_INTERFACES;
+ int ret = 0;
+ unsigned long flags;
+ u32 num_samples;
+ struct hip4_sampler_dev *hip4_dev;
+ void *buf;
+
+ SLSI_INFO_NODEV("Triggered log collection in hip4_sampler\n");
+
+ if (!hip4_sampler_enable)
+ return 0;
+
+ while (i--)
+ if (hip4_sampler.devs[i].mx == collect_client->prv && hip4_sampler.devs[i].type == OFFLINE) {
+ hip4_dev = &hip4_sampler.devs[i];
+ num_samples = kfifo_len(&hip4_dev->fifo);
+ if (!num_samples)
+ continue;
+ buf = vmalloc(num_samples * sizeof(struct hip4_record));
+ if (!buf)
+ continue;
+ spin_lock_irqsave(&g_spinlock, flags);
+ ret = kfifo_out(&hip4_dev->fifo, buf, num_samples);
+ spin_unlock_irqrestore(&g_spinlock, flags);
+ if (!ret)
+ goto error;
+ SLSI_DBG1_NODEV(SLSI_HIP, "num_samples %d ret %d size of hip4_record %zu\n", num_samples, ret, sizeof(struct hip4_record));
+ ret = scsc_log_collector_write(buf, ret * sizeof(struct hip4_record), 1);
+ if (ret)
+ goto error;
+ vfree(buf);
+ }
+ return 0;
+error:
+ vfree(buf);
+ return ret;
+}
+
+int hip4_collect_end(struct scsc_log_collector_client *collect_client)
+{
+ /* Restart sampling */
+ atomic_set(&in_read, 0);
+ return 0;
+}
+
+/* Collect client registration */
+struct scsc_log_collector_client hip4_collect_client = {
+ .name = "HIP4 Sampler",
+ .type = SCSC_LOG_CHUNK_HIP4_SAMPLER,
+ .collect_init = hip4_collect_init,
+ .collect = hip4_collect,
+ .collect_end = hip4_collect_end,
+ .prv = NULL,
+};
+#endif
+
+static int hip4_sampler_open(struct inode *inode, struct file *filp)
+{
+ struct hip4_sampler_dev *hip4_dev;
+ int ret = 0;
+
+ hip4_dev = container_of(inode->i_cdev, struct hip4_sampler_dev, cdev);
+
+ if (hip4_dev->type == OFFLINE) {
+ /* Offline buffer skip open */
+ filp->private_data = hip4_dev;
+ return 0;
+ }
+ if (mutex_lock_interruptible(&hip4_dev->mutex))
+ return -ERESTARTSYS;
+
+ if (filp->private_data) {
+ SLSI_INFO_NODEV("Service already started\n");
+ ret = 0;
+ goto end;
+ }
+
+ if (hip4_sampler_kfifo_len > 256 * 1024) {
+ SLSI_DBG1_NODEV(SLSI_HIP, "hip4_sampler_kfifo_len %d > 2262144. Set to MAX", hip4_sampler_kfifo_len);
+ hip4_sampler_kfifo_len = 256 * 1024;
+ }
+
+ ret = kfifo_alloc(&hip4_dev->fifo, hip4_sampler_kfifo_len, GFP_KERNEL);
+ if (ret) {
+ SLSI_ERR_NODEV("kfifo_alloc failed");
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ filp->private_data = hip4_dev;
+
+ /* Clear any remaining error */
+ hip4_dev->error = NO_ERROR;
+
+ hip4_dev->record_num = 0;
+ hip4_dev->kfifo_max = 0;
+ hip4_dev->filp = filp;
+
+ SLSI_INFO_NODEV("%s: Sampling....\n", DRV_NAME);
+end:
+ mutex_unlock(&hip4_dev->mutex);
+ return ret;
+}
+
+static ssize_t hip4_sampler_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
+{
+ unsigned int copied;
+ int ret = 0;
+ struct hip4_sampler_dev *hip4_dev;
+
+ hip4_dev = filp->private_data;
+
+ if (hip4_dev->type == OFFLINE) {
+ /* Offline buffer skip open */
+ if (mutex_lock_interruptible(&hip4_dev->mutex))
+ return -EINTR;
+ atomic_set(&in_read, 1);
+ ret = kfifo_to_user(&hip4_dev->fifo, buf, len, &copied);
+ mutex_unlock(&hip4_dev->mutex);
+ return ret ? ret : copied;
+ }
+
+ if (mutex_lock_interruptible(&hip4_dev->mutex))
+ return -EINTR;
+
+ /* Check whether the device is in error */
+ if (hip4_dev->error != NO_ERROR) {
+ SLSI_ERR_NODEV("Device in error\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ while (len) {
+ if (kfifo_len(&hip4_dev->fifo)) {
+ ret = kfifo_to_user(&hip4_dev->fifo, buf, len, &copied);
+ if (!ret)
+ ret = copied;
+ break;
+ }
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ ret = wait_event_interruptible(hip4_dev->read_wait,
+ !kfifo_is_empty(&hip4_dev->fifo));
+ if (ret < 0)
+ break;
+ }
+end:
+ mutex_unlock(&hip4_dev->mutex);
+ return ret;
+}
+
+static unsigned hip4_sampler_poll(struct file *filp, poll_table *wait)
+{
+ struct hip4_sampler_dev *hip4_dev;
+ int ret;
+
+ hip4_dev = filp->private_data;
+
+ if (hip4_dev->type == OFFLINE)
+ /* Offline buffer skip poll */
+ return 0;
+
+ if (mutex_lock_interruptible(&hip4_dev->mutex))
+ return -EINTR;
+
+ if (hip4_dev->error != NO_ERROR) {
+ ret = POLLERR;
+ goto end;
+ }
+
+ poll_wait(filp, &hip4_dev->read_wait, wait);
+
+ if (!kfifo_is_empty(&hip4_dev->fifo)) {
+ ret = POLLIN | POLLRDNORM; /* readeable */
+ goto end;
+ }
+
+ ret = POLLOUT | POLLWRNORM; /* writable */
+
+end:
+ mutex_unlock(&hip4_dev->mutex);
+ return ret;
+}
+
+static int hip4_sampler_release(struct inode *inode, struct file *filp)
+{
+ struct hip4_sampler_dev *hip4_dev;
+
+ hip4_dev = container_of(inode->i_cdev, struct hip4_sampler_dev, cdev);
+
+ if (hip4_dev->type == OFFLINE) {
+ atomic_set(&in_read, 0);
+ /* Offline buffer skip release */
+ return 0;
+ }
+
+ if (mutex_lock_interruptible(&hip4_dev->mutex))
+ return -EINTR;
+
+ if (!hip4_dev->filp) {
+ SLSI_ERR_NODEV("Device already closed\n");
+ mutex_unlock(&hip4_dev->mutex);
+ return -EIO;
+ }
+
+ if (hip4_dev != filp->private_data) {
+ SLSI_ERR_NODEV("Data mismatch\n");
+ mutex_unlock(&hip4_dev->mutex);
+ return -EIO;
+ }
+
+ filp->private_data = NULL;
+ hip4_dev->filp = NULL;
+ kfifo_free(&hip4_dev->fifo);
+
+ mutex_unlock(&hip4_dev->mutex);
+ SLSI_INFO_NODEV("%s: Sampling... end. Kfifo_max = %d\n", DRV_NAME, hip4_dev->kfifo_max);
+ return 0;
+}
+
+static const struct file_operations hip4_sampler_fops = {
+ .owner = THIS_MODULE,
+ .open = hip4_sampler_open,
+ .read = hip4_sampler_read,
+ .release = hip4_sampler_release,
+ .poll = hip4_sampler_poll,
+};
+
+/* Return minor (if exists) associated with this maxwell instance */
+int hip4_sampler_register_hip(struct scsc_mx *mx)
+{
+ int i = SCSC_HIP4_DEBUG_INTERFACES;
+
+ while (i--)
+ if (hip4_sampler.devs[i].mx == mx &&
+ hip4_sampler.devs[i].type == STREAMING)
+ return i;
+ return -ENODEV;
+}
+
+void hip4_sampler_create(struct slsi_dev *sdev, struct scsc_mx *mx)
+{
+ dev_t devn;
+ int ret;
+ char dev_name[20];
+ int minor;
+ int i;
+
+ SLSI_INFO_NODEV("hip4_sampler version: %d.%d\n", VER_MAJOR, VER_MINOR);
+
+ memset(&hip4_sampler, 0, sizeof(hip4_sampler));
+ /* Check whether exists */
+ if (!hip4_sampler.init) {
+ ret = alloc_chrdev_region(&hip4_sampler.device, 0, SCSC_HIP4_DEBUG_INTERFACES, "hip4_sampler_char");
+ if (ret)
+ goto error;
+
+ hip4_sampler.class_hip4_sampler = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(hip4_sampler.class_hip4_sampler)) {
+ SLSI_ERR_NODEV("hip4_sampler class creation failed\n");
+ ret = PTR_ERR(hip4_sampler.class_hip4_sampler);
+ goto error_class;
+ }
+ }
+
+ /* Search for free minors */
+ minor = find_first_zero_bit(bitmap_hip4_sampler_minor, SCSC_HIP4_DEBUG_INTERFACES);
+ if (minor == SCSC_HIP4_DEBUG_INTERFACES) {
+ SLSI_INFO_NODEV("minor %d > SCSC_TTY_MINORS\n", minor);
+ return;
+ }
+
+ /* Create Stream channels */
+ /* Each Stream channel will have an associated Offline channel */
+ for (i = 0; i < SCSC_HIP4_STREAM_CH; i++) {
+ minor += i;
+ devn = MKDEV(MAJOR(hip4_sampler.device), MINOR(minor));
+
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "hip4", i, "sam_str");
+
+ cdev_init(&hip4_sampler.devs[minor].cdev, &hip4_sampler_fops);
+ hip4_sampler.devs[minor].cdev.owner = THIS_MODULE;
+ hip4_sampler.devs[minor].cdev.ops = &hip4_sampler_fops;
+
+ ret = cdev_add(&hip4_sampler.devs[minor].cdev, devn, 1);
+ if (ret) {
+ hip4_sampler.devs[minor].cdev.dev = 0;
+ return;
+ }
+
+ hip4_sampler.devs[minor].dev =
+ device_create(hip4_sampler.class_hip4_sampler, NULL, hip4_sampler.devs[minor].cdev.dev, NULL, dev_name);
+
+ if (!hip4_sampler.devs[minor].dev) {
+ SLSI_ERR_NODEV("dev is NULL\n");
+ hip4_sampler.devs[minor].cdev.dev = 0;
+ cdev_del(&hip4_sampler.devs[minor].cdev);
+ return;
+ }
+
+ hip4_sampler.devs[minor].mx = mx;
+
+ mutex_init(&hip4_sampler.devs[minor].mutex);
+ hip4_sampler.devs[minor].kfifo_max = 0;
+ hip4_sampler.devs[minor].type = STREAMING;
+ hip4_sampler.devs[minor].minor = minor;
+
+ init_waitqueue_head(&hip4_sampler.devs[minor].read_wait);
+
+ slsi_traffic_mon_client_register(
+ sdev,
+ &hip4_sampler.devs[minor],
+ TRAFFIC_MON_CLIENT_MODE_PERIODIC,
+ 0,
+ 0,
+ hip4_sampler_tput_monitor);
+
+ /* Update bit mask */
+ set_bit(minor, bitmap_hip4_sampler_minor);
+
+ minor++;
+
+ /* Create associated offline channel */
+ devn = MKDEV(MAJOR(hip4_sampler.device), MINOR(minor));
+
+ snprintf(dev_name, sizeof(dev_name), "%s_%d_%s", "hip4", i, "sam_off");
+
+ cdev_init(&hip4_sampler.devs[minor].cdev, &hip4_sampler_fops);
+ hip4_sampler.devs[minor].cdev.owner = THIS_MODULE;
+ hip4_sampler.devs[minor].cdev.ops = &hip4_sampler_fops;
+
+ ret = cdev_add(&hip4_sampler.devs[minor].cdev, devn, 1);
+ if (ret) {
+ hip4_sampler.devs[minor].cdev.dev = 0;
+ return;
+ }
+
+ hip4_sampler.devs[minor].dev =
+ device_create(hip4_sampler.class_hip4_sampler, NULL, hip4_sampler.devs[minor].cdev.dev, NULL, dev_name);
+
+ if (!hip4_sampler.devs[minor].dev) {
+ hip4_sampler.devs[minor].cdev.dev = 0;
+ cdev_del(&hip4_sampler.devs[minor].cdev);
+ return;
+ }
+
+ if (hip4_sampler_static_kfifo_len > 256 * 1024) {
+ SLSI_DBG1_NODEV(SLSI_HIP, "hip4_sampler_static_kfifo_len %d > 2262144. Set to MAX", hip4_sampler_static_kfifo_len);
+ hip4_sampler_static_kfifo_len = 256 * 1024;
+ }
+ ret = kfifo_alloc(&hip4_sampler.devs[minor].fifo, hip4_sampler_static_kfifo_len, GFP_KERNEL);
+ if (ret) {
+ SLSI_ERR_NODEV("kfifo_alloc failed");
+ hip4_sampler.devs[minor].dev = NULL;
+ hip4_sampler.devs[minor].cdev.dev = 0;
+ cdev_del(&hip4_sampler.devs[minor].cdev);
+ return;
+ }
+
+ hip4_sampler.devs[minor].mx = mx;
+
+ mutex_init(&hip4_sampler.devs[minor].mutex);
+ hip4_sampler.devs[minor].kfifo_max = 0;
+ hip4_sampler.devs[minor].type = OFFLINE;
+
+ /* Update bit mask */
+ set_bit(minor, bitmap_hip4_sampler_minor);
+ }
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ hip4_collect_client.prv = mx;
+ scsc_log_collector_register_client(&hip4_collect_client);
+#endif
+ spin_lock_init(&g_spinlock);
+ hip4_sampler.init = true;
+
+ SLSI_INFO_NODEV("%s: Ready to start sampling....\n", DRV_NAME);
+
+ return;
+
+error_class:
+ unregister_chrdev_region(hip4_sampler.device, SCSC_HIP4_DEBUG_INTERFACES);
+ hip4_sampler.init = false;
+error:
+ return;
+}
+
+void hip4_sampler_destroy(struct slsi_dev *sdev, struct scsc_mx *mx)
+{
+ int i = SCSC_HIP4_DEBUG_INTERFACES;
+ struct hip4_sampler_dev *hip4_dev;
+
+ while (i--)
+ if (hip4_sampler.devs[i].cdev.dev && hip4_sampler.devs[i].mx) {
+ hip4_dev = &hip4_sampler.devs[i];
+ /* This should be never be true - as knod should prevent unloading while
+ * the service (device node) is open
+ */
+ if (hip4_sampler.devs[i].filp) {
+ hip4_sampler.devs[i].filp = NULL;
+ kfifo_free(&hip4_sampler.devs[i].fifo);
+ }
+ if (hip4_sampler.devs[i].type == OFFLINE)
+ kfifo_free(&hip4_sampler.devs[i].fifo);
+
+ slsi_traffic_mon_client_unregister(sdev, hip4_dev);
+ device_destroy(hip4_sampler.class_hip4_sampler, hip4_sampler.devs[i].cdev.dev);
+ cdev_del(&hip4_sampler.devs[i].cdev);
+ memset(&hip4_sampler.devs[i].cdev, 0, sizeof(struct cdev));
+ hip4_sampler.devs[i].mx = NULL;
+ clear_bit(i, bitmap_hip4_sampler_minor);
+ }
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_unregister_client(&hip4_collect_client);
+#endif
+ class_destroy(hip4_sampler.class_hip4_sampler);
+ unregister_chrdev_region(hip4_sampler.device, SCSC_HIP4_DEBUG_INTERFACES);
+ hip4_sampler.init = false;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+#include <linux/seq_file.h>
+#include <net/tcp.h>
+
+#include "dev.h"
+
+/* TCP send buffer sizes */
+extern int sysctl_tcp_wmem[3];
+
+#ifndef __HIP4_SAMPLER_H__
+#define __HIP4_SAMPLER_H__
+
+#define HIP4_SAMPLER_SIGNAL_CTRLTX 0x20
+#define HIP4_SAMPLER_SIGNAL_CTRLRX 0x21
+#define HIP4_SAMPLER_THROUG 0x22
+#define HIP4_SAMPLER_THROUG_K 0x23
+#define HIP4_SAMPLER_THROUG_M 0x24
+#define HIP4_SAMPLER_STOP_Q 0x25
+#define HIP4_SAMPLER_START_Q 0x26
+#define HIP4_SAMPLER_QREF 0x27
+#define HIP4_SAMPLER_PEER 0x29
+#define HIP4_SAMPLER_BOT_RX 0x2a
+#define HIP4_SAMPLER_BOT_TX 0x2b
+#define HIP4_SAMPLER_BOT_ADD 0x2c
+#define HIP4_SAMPLER_BOT_REMOVE 0x2d
+#define HIP4_SAMPLER_BOT_STOP_Q 0x2e
+#define HIP4_SAMPLER_BOT_START_Q 0x2f
+#define HIP4_SAMPLER_BOT_QMOD_RX 0x30
+#define HIP4_SAMPLER_BOT_QMOD_TX 0x31
+#define HIP4_SAMPLER_BOT_QMOD_STOP 0x32
+#define HIP4_SAMPLER_BOT_QMOD_START 0x33
+#define HIP4_SAMPLER_PKT_TX 0x40
+#define HIP4_SAMPLER_PKT_TX_HIP4 0x41
+#define HIP4_SAMPLER_PKT_TX_FB 0x42
+#define HIP4_SAMPLER_SUSPEND 0x50
+#define HIP4_SAMPLER_RESUME 0x51
+
+#define HIP4_SAMPLER_TCP_SYN 0x60
+#define HIP4_SAMPLER_TCP_FIN 0x61
+#define HIP4_SAMPLER_TCP_DATA 0x62
+#define HIP4_SAMPLER_TCP_ACK 0x63
+#define HIP4_SAMPLER_TCP_RWND 0x64
+#define HIP4_SAMPLER_TCP_CWND 0x65
+#define HIP4_SAMPLER_TCP_SEND_BUF 0x66
+#define HIP4_SAMPLER_TCP_DATA_IN 0x67
+#define HIP4_SAMPLER_TCP_ACK_IN 0x68
+
+#define HIP4_SAMPLER_MBULK 0xaa
+#define HIP4_SAMPLER_QFULL 0xbb
+#define HIP4_SAMPLER_MFULL 0xcc
+#define HIP4_SAMPLER_INT 0xdd
+#define HIP4_SAMPLER_INT_OUT 0xee
+#define HIP4_SAMPLER_INT_BH 0xde
+#define HIP4_SAMPLER_INT_OUT_BH 0xef
+#define HIP4_SAMPLER_RESET 0xff
+
+#define SCSC_HIP4_INTERFACES 1
+
+#define SCSC_HIP4_STREAM_CH 1
+#define SCSC_HIP4_OFFLINE_CH SCSC_HIP4_STREAM_CH
+
+#if (SCSC_HIP4_OFFLINE_CH != SCSC_HIP4_STREAM_CH)
+#error "SCSC_HIP4_STREAM_CH has to be equal to SCSC_HIP4_OFFLINE_CH"
+#endif
+
+#define SCSC_HIP4_DEBUG_INTERFACES ((SCSC_HIP4_INTERFACES) * (SCSC_HIP4_STREAM_CH + SCSC_HIP4_OFFLINE_CH))
+
+struct scsc_mx;
+
+void hip4_sampler_create(struct slsi_dev *sdev, struct scsc_mx *mx);
+void hip4_sampler_destroy(struct slsi_dev *sdev, struct scsc_mx *mx);
+
+/* Register hip4 instance with the logger */
+/* return char device minor associated with the maxwell instance*/
+int hip4_sampler_register_hip(struct scsc_mx *mx);
+
+void hip4_sampler_update_record(u32 minor, u8 param1, u8 param2, u8 param3, u8 param4, u32 param5);
+void hip4_sampler_tcp_decode(struct slsi_dev *sdev, struct net_device *dev, u8 *frame, bool from_ba);
+
+extern bool hip4_sampler_sample_q;
+extern bool hip4_sampler_sample_qref;
+extern bool hip4_sampler_sample_int;
+extern bool hip4_sampler_sample_fapi;
+extern bool hip4_sampler_sample_through;
+extern bool hip4_sampler_sample_tcp;
+extern bool hip4_sampler_sample_start_stop_q;
+extern bool hip4_sampler_sample_mbulk;
+extern bool hip4_sampler_sample_qfull;
+extern bool hip4_sampler_sample_mfull;
+extern bool hip4_sampler_vif;
+extern bool hip4_sampler_bot;
+extern bool hip4_sampler_pkt_tx;
+extern bool hip4_sampler_suspend_resume;
+
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+#define SCSC_HIP4_SAMPLER_Q(minor, q, idx_rw, value, rw) \
+ do { \
+ if (hip4_sampler_sample_q) { \
+ hip4_sampler_update_record(minor, q, idx_rw, value, rw, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_QREF(minor, ref, q) \
+ do { \
+ if (hip4_sampler_sample_qref) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_QREF, (ref & 0xff0000) >> 16, (ref & 0xff00) >> 8, (ref & 0xf0) | q, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_SIGNAL_CTRLTX(minor, bytes16_h, bytes16_l) \
+ do { \
+ if (hip4_sampler_sample_fapi) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_SIGNAL_CTRLTX, 0, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_SIGNAL_CTRLRX(minor, bytes16_h, bytes16_l) \
+ do { \
+ if (hip4_sampler_sample_fapi) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_SIGNAL_CTRLRX, 0, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, frame, from_ba) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_tcp_decode(sdev, dev, frame, from_ba); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_THROUG(minor, rx_tx, bytes16_h, bytes16_l) \
+ do { \
+ if (hip4_sampler_sample_through) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_THROUG, rx_tx, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_THROUG_K(minor, rx_tx, bytes16_h, bytes16_l) \
+ do { \
+ if (hip4_sampler_sample_through) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_THROUG_K, rx_tx, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_THROUG_M(minor, rx_tx, bytes16_h, bytes16_l) \
+ do { \
+ if (hip4_sampler_sample_through) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_THROUG_M, rx_tx, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_STOP_Q(minor, vif_id) \
+ do { \
+ if (hip4_sampler_sample_start_stop_q) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_STOP_Q, 0, 0, vif_id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_START_Q(minor, vif_id) \
+ do { \
+ if (hip4_sampler_sample_start_stop_q) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_START_Q, 0, 0, vif_id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_MBULK(minor, bytes16_h, bytes16_l, clas) \
+ do { \
+ if (hip4_sampler_sample_mbulk) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_MBULK, clas, bytes16_h, bytes16_l, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_QFULL(minor, q) \
+ do { \
+ if (hip4_sampler_sample_qfull) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_QFULL, 0, 0, q, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_MFULL(minor) \
+ do { \
+ if (hip4_sampler_sample_mfull) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_MFULL, 0, 0, 0, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_INT(minor, id) \
+ do { \
+ if (hip4_sampler_sample_int) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_INT, 0, 0, id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_INT_OUT(minor, id) \
+ do { \
+ if (hip4_sampler_sample_int) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_INT_OUT, 0, 0, id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_INT_BH(minor, id) \
+ do { \
+ if (hip4_sampler_sample_int) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_INT_BH, 0, 0, id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_INT_OUT_BH(minor, id) \
+ do { \
+ if (hip4_sampler_sample_int) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_INT_OUT_BH, 0, 0, id, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_RESET(minor) \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_RESET, 0, 0, 0, 0)
+
+#define SCSC_HIP4_SAMPLER_VIF_PEER(minor, tx, vif, peer_index) \
+ do { \
+ if (hip4_sampler_vif) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_PEER, tx, vif, peer_index, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_RX(minor, scod, smod, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_RX, scod, smod, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_TX(minor, scod, smod, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_TX, scod, smod, vif_peer, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_BOT_ADD(minor, addr_1, addr_2, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_ADD, addr_1, addr_2, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_REMOVE(minor, addr_1, addr_2, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_REMOVE, addr_1, addr_2, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_START_Q(minor, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_START_Q, 0, 0, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_STOP_Q(minor, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_STOP_Q, 0, 0, vif_peer, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_RX(minor, qcod, qmod, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_QMOD_RX, qcod, qmod, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_TX(minor, qcod, qmod, vif_peer) \
+ do { \
+ if (hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_QMOD_TX, qcod, qmod, vif_peer, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_START(minor, vif_peer) \
+ do { \
+ if (hip4_sampler_sample_start_stop_q || hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_QMOD_START, 0, 0, vif_peer, 0); \
+ } \
+ } while (0)
+
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_STOP(minor, vif_peer) \
+ do { \
+ if (hip4_sampler_sample_start_stop_q || hip4_sampler_bot) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_BOT_QMOD_STOP, 0, 0, vif_peer, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_PKT_TX(minor, host_tag) \
+ do { \
+ if (hip4_sampler_pkt_tx) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_PKT_TX, 0, (host_tag >> 8) & 0xff, host_tag & 0xff, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_PKT_TX_HIP4(minor, host_tag) \
+ do { \
+ if (hip4_sampler_pkt_tx) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_PKT_TX_HIP4, 0, (host_tag >> 8) & 0xff, host_tag & 0xff, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_PKT_TX_FB(minor, host_tag) \
+ do { \
+ if (hip4_sampler_pkt_tx) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_PKT_TX_FB, 0, (host_tag >> 8) & 0xff, host_tag & 0xff, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_SUSPEND(minor) \
+ do { \
+ if (hip4_sampler_suspend_resume) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_SUSPEND, 0, 0, 0, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_RESUME(minor) \
+ do { \
+ if (hip4_sampler_suspend_resume) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_RESUME, 0, 0, 0, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_SYN(minor, id, mss) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_SYN, id, 0, 0, mss); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_FIN(minor, id) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_FIN, id, 0, 0, 0); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_DATA(minor, id, seq_num) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_DATA, id, 0, 0, seq_num); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_ACK(minor, id, ack_num) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_ACK, id, 0, 0, ack_num); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_DATA_IN(minor, id, seq_num) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_DATA_IN, id, 0, 0, seq_num); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_ACK_IN(minor, id, ack_num) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_ACK_IN, id, 0, 0, ack_num); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_RWND(minor, id, rwnd) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_RWND, id, 0, 0, rwnd); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_CWND(minor, id, cwnd) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_CWND, id, 0, 0, cwnd); \
+ } \
+ } while (0)
+#define SCSC_HIP4_SAMPLER_TCP_SEND_BUF(minor, id, send_buff_size) \
+ do { \
+ if (hip4_sampler_sample_tcp) { \
+ hip4_sampler_update_record(minor, HIP4_SAMPLER_TCP_SEND_BUF, id, 0, 0, send_buff_size); \
+ } \
+ } while (0)
+#else
+#define SCSC_HIP4_SAMPLER_Q(minor, q, idx_rw, value, rw)
+#define SCSC_HIP4_SAMPLER_QREF(minor, ref, q)
+#define SCSC_HIP4_SAMPLER_SIGNAL_CTRLTX(minor, bytes16_h, bytes16_l)
+#define SCSC_HIP4_SAMPLER_SIGNAL_CTRLRX(minor, bytes16_h, bytes16_l)
+#define SCSC_HIP4_SAMPLER_TPUT(minor, rx_tx, payload)
+#define SCSC_HIP4_SAMPLER_THROUG(minor, bytes16_h, bytes16_l)
+#define SCSC_HIP4_SAMPLER_THROUG_K(minor, bytes16_h, bytes16_l)
+#define SCSC_HIP4_SAMPLER_THROUG_M(minor, bytes16_h, bytes16_l)
+#define SCSC_HIP4_SAMPLER_MBULK(minor, bytes16_h, bytes16_l, clas)
+#define SCSC_HIP4_SAMPLER_QFULL(minor, q)
+#define SCSC_HIP4_SAMPLER_MFULL(minor)
+#define SCSC_HIP4_SAMPLER_INT(minor, id)
+#define SCSC_HIP4_SAMPLER_INT_BH(minor, id)
+#define SCSC_HIP4_SAMPLER_INT_OUT(minor, id)
+#define SCSC_HIP4_SAMPLER_INT_OUT_BH(minor, id)
+#define SCSC_HIP4_SAMPLER_RESET(minor)
+#define SCSC_HIP4_SAMPLER_VIF_PEER(minor, tx, vif, peer_index)
+#define SCSC_HIP4_SAMPLER_BOT_RX(minor, scod, smod, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_TX(minor, scod, smod, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT(minor, init, addr_1, addr_2, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_ADD(minor, addr_1, addr_2, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_REMOVE(minor, addr_1, addr_2, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_START_Q(minor, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_STOP_Q(minor, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_RX(minor, qcod, qmod, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_TX(minor, qcod, qmod, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_START(minor, vif_peer)
+#define SCSC_HIP4_SAMPLER_BOT_QMOD_STOP(minor, vif_peer)
+#define SCSC_HIP4_SAMPLER_PKT_TX(minor, host_tag)
+#define SCSC_HIP4_SAMPLER_PKT_TX_HIP4(minor, host_tag)
+#define SCSC_HIP4_SAMPLER_PKT_TX_FB(minor, host_tag)
+#define SCSC_HIP4_SAMPLER_SUSPEND(minor)
+#define SCSC_HIP4_SAMPLER_RESUME(minor)
+#define SCSC_HIP4_SAMPLER_TCP_SYN(minor, id, mss)
+#define SCSC_HIP4_SAMPLER_TCP_FIN(minor, id)
+#define SCSC_HIP4_SAMPLER_TCP_DATA(minor, id, seq_num)
+#define SCSC_HIP4_SAMPLER_TCP_ACK(minor, id, ack_num)
+#define SCSC_HIP4_SAMPLER_TCP_DATA_IN(minor, id, seq_num)
+#define SCSC_HIP4_SAMPLER_TCP_ACK_IN(minor, id, ack_num)
+#define SCSC_HIP4_SAMPLER_TCP_RWND(minor, id, rwnd)
+#define SCSC_HIP4_SAMPLER_TCP_CWND(minor, id, cwnd)
+#define SCSC_HIP4_SAMPLER_TCP_SEND_BUF(minor, id, send_buff_size)
+#define SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, frame, from_ba)
+#endif /* CONFIG_SCSC_WLAN_HIP4_PROFILING */
+
+#endif /* __HIP4_SAMPLER_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_mifram.h>
+#include <scsc/scsc_logring.h>
+#include <linux/ratelimit.h>
+
+#include "debug.h"
+#include "dev.h"
+#include "hip4.h"
+#include "hip4_smapper.h"
+
+#define SMAPPER_GRANULARITY (4 * 1024)
+
+static void hip4_smapper_refill_isr(int irq, void *data);
+
+static int hip4_smapper_alloc_bank(struct slsi_dev *sdev, struct hip4_priv *priv, enum smapper_banks bank_name, u32 entry_size, bool is_large)
+{
+ u16 i;
+ struct hip4_smapper_bank *bank = &(priv)->smapper_banks[bank_name];
+ struct hip4_smapper_control *control = &(priv)->smapper_control;
+ int err;
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Init bank %d entry_size %d is_large %d\n", bank_name, entry_size, is_large);
+ bank->entry_size = entry_size;
+
+ /* function returns negative number if an error occurs, otherwise returns the bank number */
+ err = scsc_service_mifsmapper_alloc_bank(sdev->service, is_large, bank->entry_size, &bank->entries);
+ if (err < 0) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Error allocating bank %d\n", err);
+ return -ENOMEM;
+ }
+
+ bank->bank = (u32)err;
+ if (bank->bank >= HIP4_SMAPPER_TOTAL_BANKS) {
+ scsc_service_mifsmapper_free_bank(sdev->service, bank->bank);
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Incorrect bank_num %d\n", bank->bank);
+ return -ENOMEM;
+ }
+
+ bank->skbuff = kmalloc_array(bank->entries, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ bank->skbuff_dma = kmalloc_array(bank->entries, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!bank->skbuff || !bank->skbuff_dma) {
+ kfree(bank->skbuff_dma);
+ kfree(bank->skbuff);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < bank->entries; i++)
+ bank->skbuff[i] = NULL;
+
+ bank->align = scsc_service_get_alignment(sdev->service);
+ bank->in_use = true;
+
+ /* update the mapping with BANK# in WLAN with PHY BANK#*/
+ control->lookuptable[bank->bank] = bank_name;
+
+ return 0;
+}
+
+static int hip4_smapper_allocate_skb_buffer_entry(struct slsi_dev *sdev, struct hip4_smapper_bank *bank, int idx)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = slsi_alloc_skb(bank->entry_size, GFP_ATOMIC);
+ if (!skb) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Not enough memory\n");
+ return -ENOMEM;
+ }
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "SKB allocated: 0x%p at bank %d entry %d\n", skb, bank->bank, idx);
+ bank->skbuff_dma[idx] = dma_map_single(sdev->dev, skb->data,
+ bank->entry_size, DMA_FROM_DEVICE);
+ err = dma_mapping_error(sdev->dev, bank->skbuff_dma[idx]);
+ if (err) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Error mapping SKB: 0x%p at bank %d entry %d\n", skb, bank->bank, idx);
+ slsi_kfree_skb(skb);
+ return err;
+ }
+
+ /* Check alignment */
+ if (!IS_ALIGNED(bank->skbuff_dma[idx], bank->align)) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Phys address: 0x%x not %d aligned. Unmap memory and return error\n",
+ bank->skbuff_dma[idx], bank->align);
+ dma_unmap_single(sdev->dev, bank->skbuff_dma[idx], bank->entry_size, DMA_FROM_DEVICE);
+ slsi_kfree_skb(skb);
+ bank->skbuff_dma[idx] = 0;
+ return -ENOMEM;
+ }
+ bank->skbuff[idx] = skb;
+ return 0;
+}
+
+/* Pre-Allocate the skbs for the RX entries */
+static int hip4_smapper_allocate_skb_buffers(struct slsi_dev *sdev, struct hip4_smapper_bank *bank)
+{
+ unsigned int i;
+ unsigned int n;
+ int res;
+
+ if (!bank)
+ return -EINVAL;
+
+ n = bank->entries;
+ for (i = 0; i < n; i++) {
+ if (!bank->skbuff[i]) {
+ res = hip4_smapper_allocate_skb_buffer_entry(sdev, bank, i);
+ if (res != 0)
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int hip4_smapper_free_skb_buffers(struct slsi_dev *sdev, struct hip4_smapper_bank *bank)
+{
+ unsigned int i;
+ unsigned int n;
+
+ if (!bank)
+ return -EINVAL;
+
+ n = bank->entries;
+ for (i = 0; i < n; i++) {
+ if (bank->skbuff[i]) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "SKB free: 0x%p at bank %d entry %d\n", bank->skbuff[i], bank->bank, i);
+ dma_unmap_single(sdev->dev, bank->skbuff_dma[i], bank->entry_size, DMA_FROM_DEVICE);
+ bank->skbuff_dma[i] = 0;
+ slsi_kfree_skb(bank->skbuff[i]);
+ bank->skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int hip4_smapper_program(struct slsi_dev *sdev, struct hip4_smapper_bank *bank)
+{
+ unsigned int n;
+
+ if (!bank)
+ return -EINVAL;
+
+ n = bank->entries;
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Programming Bank %d\n", bank->bank);
+
+ return scsc_service_mifsmapper_write_sram(sdev->service, bank->bank, n, 0, bank->skbuff_dma);
+}
+
+/* refill ISR. FW signals the Host whenever it wants to refill the smapper buffers */
+/* Only the Host Owned Buffers should be refilled */
+static void hip4_smapper_refill_isr(int irq, void *data)
+{
+ struct slsi_hip4 *hip = (struct slsi_hip4 *)data;
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+ struct hip4_smapper_control *control;
+ struct hip4_smapper_bank *bank;
+ enum smapper_banks i;
+ unsigned long flags;
+ /* Temporary removed
+ * static DEFINE_RATELIMIT_STATE(ratelimit, 1 * HZ, 1);
+ */
+
+ control = &(hip->hip_priv->smapper_control);
+#ifdef CONFIG_SCSC_QOS
+ /* Ignore request if TPUT is low or platform is in suspend */
+ if (hip->hip_priv->pm_qos_state == SCSC_QOS_DISABLED ||
+ atomic_read(&hip->hip_priv->in_suspend) ||
+ *control->mbox_ptr == 0x0) {
+#else
+ /* Ignore if platform is in suspend */
+ if (atomic_read(&hip->hip_priv->in_suspend) ||
+ *control->mbox_ptr == 0x0) {
+#endif
+ /*
+ * Temporary removed
+ * if (__ratelimit(&ratelimit))
+ * SLSI_DBG1_NODEV(SLSI_SMAPPER, "Ignore SMAPPER request. Invalid state.\n");
+ */
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, control->th_req);
+ return;
+ }
+
+ spin_lock_irqsave(&control->smapper_lock, flags);
+ /* Check if FW has requested a BANK configuration */
+ if (HIP4_SMAPPER_BANKS_CHECK_CONFIGURE(*control->mbox_ptr)) {
+ /* Temporary removed
+ * SLSI_DBG4_NODEV(SLSI_SMAPPER, "Trigger SMAPPER configuration\n");
+ */
+ scsc_service_mifsmapper_configure(sdev->service, SMAPPER_GRANULARITY);
+ HIP4_SMAPPER_BANKS_CONFIGURE_DONE(*control->mbox_ptr);
+ }
+ /* Read the first RX bank and check whether needs to be reprogrammed */
+ for (i = RX_0; i < END_RX_BANKS; i++) {
+ bank = &hip->hip_priv->smapper_banks[i];
+
+ if (!bank->in_use)
+ continue;
+
+ if (HIP4_SMAPPER_GET_BANK_OWNER(bank->bank, *control->mbox_ptr) == HIP_SMAPPER_OWNER_HOST) {
+ /* Temporary removed
+ * SLSI_DBG4_NODEV(SLSI_SMAPPER, "SKB allocation at bank %d\n", i);
+ */
+ if (hip4_smapper_allocate_skb_buffers(sdev, bank)) {
+ /* Temporary removed
+ * SLSI_DBG4_NODEV(SLSI_SMAPPER, "Error Allocating skb buffers at bank %d. Setting owner to FW\n", i);
+ */
+ HIP4_SMAPPER_SET_BANK_OWNER(bank->bank, *control->mbox_ptr, HIP_SMAPPER_OWNER_FW);
+ continue;
+ }
+ if (hip4_smapper_program(sdev, bank)) {
+ /* Temporary removed
+ * SLSI_DBG4_NODEV(SLSI_SMAPPER, "Error Programming bank %d. Setting owner to FW\n", i);
+ */
+ HIP4_SMAPPER_SET_BANK_OWNER(bank->bank, *control->mbox_ptr, HIP_SMAPPER_OWNER_FW);
+ continue;
+ }
+ HIP4_SMAPPER_SET_BANK_STATE(bank->bank, *control->mbox_ptr, HIP_SMAPPER_STATUS_MAPPED);
+ HIP4_SMAPPER_SET_BANK_OWNER(bank->bank, *control->mbox_ptr, HIP_SMAPPER_OWNER_FW);
+ }
+ }
+ /* Inform FW that entries have been programmed */
+ scsc_service_mifintrbit_bit_set(sdev->service, control->fh_ind, SCSC_MIFINTR_TARGET_R4);
+
+ /* Clear interrupt */
+ scsc_service_mifintrbit_bit_clear(sdev->service, control->th_req);
+
+ spin_unlock_irqrestore(&control->smapper_lock, flags);
+}
+
+int hip4_smapper_consume_entry(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi)
+{
+ struct sk_buff *skb;
+ struct hip4_smapper_bank *bank;
+ u8 bank_num;
+ u8 entry;
+ u16 len;
+ u16 headroom;
+ struct hip4_smapper_descriptor *desc;
+ struct hip4_smapper_control *control;
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb_fapi);
+
+ control = &(hip->hip_priv->smapper_control);
+
+ desc = (struct hip4_smapper_descriptor *)skb_fapi->data;
+
+ bank_num = desc->bank_num;
+ entry = desc->entry_num;
+ len = desc->entry_size;
+ headroom = desc->headroom;
+
+
+ if (bank_num >= HIP4_SMAPPER_TOTAL_BANKS) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Incorrect bank_num %d\n", bank_num);
+ goto error;
+ }
+
+ /* Transform PHY BANK# with BANK# in Wlan service*/
+ bank_num = control->lookuptable[bank_num];
+
+ bank = &hip->hip_priv->smapper_banks[bank_num];
+
+ if (entry > bank->entries) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Incorrect entry number %d\n", entry);
+ goto error;
+ }
+
+ if (len > bank->entry_size) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Incorrect entry len %d\n", len);
+ goto error;
+ }
+
+ skb = bank->skbuff[entry];
+ if (!skb) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "SKB IS NULL at bank %d entry %d\n", bank_num, entry);
+ goto error;
+ }
+
+ bank->skbuff[entry] = NULL;
+ dma_unmap_single(sdev->dev, bank->skbuff_dma[entry], bank->entry_size, DMA_FROM_DEVICE);
+ bank->skbuff_dma[entry] = 0;
+
+ hip4_smapper_allocate_skb_buffer_entry(sdev, bank, entry);
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ cb->skb_addr = skb;
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Consumed Bank %d Entry %d Len %d SKB smapper: 0x%p, SKB fapi %p\n", bank_num, entry, len, skb, skb_fapi);
+
+ return 0;
+error:
+ /* RX is broken.....*/
+ return -EIO;
+}
+
+void *hip4_smapper_get_skb_data(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi)
+{
+ struct sk_buff *skb;
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb_fapi);
+ struct hip4_smapper_control *control;
+
+ control = &(hip->hip_priv->smapper_control);
+
+ skb = (struct sk_buff *)cb->skb_addr;
+
+ if (!skb) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "NULL SKB smapper\n");
+ return NULL;
+ }
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Get SKB smapper: 0x%p, SKB fapi 0x%p\n", skb, skb_fapi);
+ return skb->data;
+}
+
+struct sk_buff *hip4_smapper_get_skb(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi)
+{
+ struct sk_buff *skb;
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb_fapi);
+ struct hip4_smapper_control *control;
+
+ control = &(hip->hip_priv->smapper_control);
+
+ skb = (struct sk_buff *)cb->skb_addr;
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Get SKB smapper: 0x%p, SKB fapi 0x%p\n", skb, skb_fapi);
+ cb->free_ma_unitdat = true;
+ slsi_kfree_skb(skb_fapi);
+
+ return skb;
+}
+
+int hip4_smapper_init(struct slsi_dev *sdev, struct slsi_hip4 *hip)
+{
+ u8 i;
+ struct hip4_smapper_control *control;
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "SMAPPER init\n");
+
+ control = &(hip->hip_priv->smapper_control);
+
+ spin_lock_init(&control->smapper_lock);
+
+ if (dma_set_mask_and_coherent(sdev->dev, DMA_BIT_MASK(64)) != 0)
+ return -EIO;
+
+ if (!scsc_mx_service_alloc_mboxes(sdev->service, 1, &control->mbox_scb)) {
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "Unable to allocate mbox\n");
+ return -ENODEV;
+ }
+
+ /* Claim the RX buffers */
+ hip4_smapper_alloc_bank(sdev, hip->hip_priv, RX_0, SMAPPER_GRANULARITY, HIP4_SMAPPER_BANK_LARGE);
+ hip4_smapper_alloc_bank(sdev, hip->hip_priv, RX_1, SMAPPER_GRANULARITY, HIP4_SMAPPER_BANK_LARGE);
+ hip4_smapper_alloc_bank(sdev, hip->hip_priv, RX_2, SMAPPER_GRANULARITY, HIP4_SMAPPER_BANK_LARGE);
+ hip4_smapper_alloc_bank(sdev, hip->hip_priv, RX_3, SMAPPER_GRANULARITY, HIP4_SMAPPER_BANK_LARGE);
+ /*Pre-allocate buffers */
+ hip4_smapper_allocate_skb_buffers(sdev, &hip->hip_priv->smapper_banks[RX_0]);
+ hip4_smapper_allocate_skb_buffers(sdev, &hip->hip_priv->smapper_banks[RX_1]);
+ hip4_smapper_allocate_skb_buffers(sdev, &hip->hip_priv->smapper_banks[RX_2]);
+ hip4_smapper_allocate_skb_buffers(sdev, &hip->hip_priv->smapper_banks[RX_3]);
+
+ /* Allocate Maxwell resources */
+ control->th_req =
+ scsc_service_mifintrbit_register_tohost(sdev->service, hip4_smapper_refill_isr, hip);
+ control->fh_ind =
+ scsc_service_mifintrbit_alloc_fromhost(sdev->service, SCSC_MIFINTR_TARGET_R4);
+
+ control->mbox_ptr =
+ scsc_mx_service_get_mbox_ptr(sdev->service, control->mbox_scb);
+
+ /* All banks to REMAP and FW owner*/
+ *control->mbox_ptr = 0x0;
+
+ /* Update hip4 config table */
+ hip->hip_control->config_v4.smapper_th_req =
+ control->th_req;
+ hip->hip_control->config_v4.smapper_fh_ind =
+ control->fh_ind;
+ hip->hip_control->config_v4.smapper_mbox_scb =
+ (u8)control->mbox_scb;
+
+ for (i = RX_0; i < END_RX_BANKS; i++) {
+ u8 has_entries;
+ u8 bank;
+
+ has_entries = hip->hip_priv->smapper_banks[i].entries;
+ if (has_entries) {
+ /* Get the bank index */
+ bank = hip->hip_priv->smapper_banks[i].bank;
+ hip->hip_control->config_v4.smapper_bank_addr[bank] = scsc_service_mifsmapper_get_bank_base_address(sdev->service, bank);
+ hip->hip_control->config_v4.smapper_entries_banks[bank] = has_entries;
+ hip->hip_control->config_v4.smapper_pow_sz[bank] = 12; /* 4kB */
+ }
+ }
+ return 0;
+}
+
+void hip4_smapper_deinit(struct slsi_dev *sdev, struct slsi_hip4 *hip)
+{
+ struct hip4_smapper_bank *bank;
+ struct hip4_smapper_control *control;
+ unsigned long flags;
+ u8 i;
+
+ SLSI_DBG4_NODEV(SLSI_SMAPPER, "SMAPPER deinit\n");
+ control = &(hip->hip_priv->smapper_control);
+
+ spin_lock_irqsave(&control->smapper_lock, flags);
+ for (i = RX_0; i < END_RX_BANKS; i++) {
+ bank = &hip->hip_priv->smapper_banks[i];
+ bank->in_use = false;
+ hip4_smapper_free_skb_buffers(sdev, bank);
+ kfree(bank->skbuff_dma);
+ kfree(bank->skbuff);
+ scsc_service_mifsmapper_free_bank(sdev->service, bank->bank);
+ }
+ spin_unlock_irqrestore(&control->smapper_lock, flags);
+
+ scsc_service_mifintrbit_unregister_tohost(sdev->service, control->th_req);
+ scsc_service_mifintrbit_free_fromhost(sdev->service, control->fh_ind, SCSC_MIFINTR_TARGET_R4);
+ scsc_service_free_mboxes(sdev->service, 1, control->mbox_scb);
+
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __HIP4_SMAPPER_H__
+#define __HIP4_SMAPPER_H__
+
+struct slsi_dev;
+struct slsi_hip4;
+
+enum smapper_type {
+ TX_5G,
+ TX_2G,
+ RX
+};
+
+#define HIP4_SMAPPER_TOTAL_BANKS 10
+
+#define HIP4_SMAPPER_BANK_SMALL false
+#define HIP4_SMAPPER_BANK_LARGE true
+
+#define HIP_SMAPPER_OWNER_FW 0
+#define HIP_SMAPPER_OWNER_HOST 1
+
+#define HIP_SMAPPER_STATUS_REFILL 0
+#define HIP_SMAPPER_STATUS_MAPPED 1
+
+#define HIP4_SMAPPER_OTHER_CPU 0
+#define HIP4_SMAPPER_OWN_CPU 1
+
+#define HIP4_SMAPPER_STATE_OUT 0
+#define HIP4_SMAPPER_STATE_WANT 1
+#define HIP4_SMAPPER_STATE_CLAIM 2
+
+#define HIP4_SMAPPER_BANKS_CHECK_CONFIGURE(reg) (((reg) >> 30) == 0 ? 1 : 0)
+#define HIP4_SMAPPER_BANKS_CONFIGURE_DONE(reg) ((reg) = (reg) | 0xc0000000)
+
+#define HIP4_SMAPPER_GET_BANK_STATE(b, reg) (((0x1 << ((b) * 2)) & (reg)) > 0 ? 1 : 0)
+#define HIP4_SMAPPER_GET_BANK_OWNER(b, reg) (((0x2 << ((b) * 2)) & (reg)) > 0 ? 1 : 0)
+
+#define HIP4_SMAPPER_SET_BANK_STATE(b, reg, val) ((reg) = ((reg) & ~(0x1 << ((b) * 2))) | \
+ ((val) << ((b) * 2)))
+#define HIP4_SMAPPER_SET_BANK_OWNER(b, reg, val) ((reg) = (reg & ~(0x2 << ((b) * 2))) | \
+ (((val) << 1) << ((b) * 2)))
+
+
+struct hip4_smapper_descriptor {
+ u8 bank_num;
+ u8 entry_num;
+ u16 entry_size;
+ u16 headroom;
+};
+
+/* There should be an agreement between host and FW about bank mapping */
+/* TODO : think about this agreement */
+enum smapper_banks {
+ RX_0,
+ RX_1,
+ RX_2,
+ RX_3,
+ END_RX_BANKS
+};
+
+struct hip4_smapper_control {
+ u32 emul_loc; /* Smapper emulator location in MIF_ADDR */
+ u32 emul_sz; /* Smapper emulator size */
+ u8 th_req; /* TH smapper request interrupt bit position */
+ u8 fh_ind; /* FH smapper ind interrupt bit position */
+ u32 mbox_scb; /* SMAPPER MBOX scoreboard location */
+ u32 *mbox_ptr; /* Mbox pointer */
+ spinlock_t smapper_lock;
+ /* Lookup table to map the virtual bank mapping in wlan with the phy mapping in HW */
+ /* Currently is safe to use this indexing as only WIFI is using smapper */
+ u8 lookuptable[HIP4_SMAPPER_TOTAL_BANKS];
+};
+
+struct hip4_smapper_bank {
+ enum smapper_type type;
+ u16 entries;
+ bool in_use;
+ u8 bank;
+ u8 cur;
+ u32 entry_size;
+ struct sk_buff **skbuff;
+ dma_addr_t *skbuff_dma;
+ struct hip4_smapper_control_entry *entry;
+ u16 align;
+};
+
+int hip4_smapper_init(struct slsi_dev *sdev, struct slsi_hip4 *hip);
+void hip4_smapper_deinit(struct slsi_dev *sdev, struct slsi_hip4 *hip);
+
+struct mbulk *hip4_smapper_send(struct slsi_hip4 *hip, struct sk_buff *skb, int *val);
+int hip4_smapper_consume_entry(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi);
+void *hip4_smapper_get_skb_data(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi);
+struct sk_buff *hip4_smapper_get_skb(struct slsi_dev *sdev, struct slsi_hip4 *hip, struct sk_buff *skb_fapi);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_HIP_BH_H__
+#define __SLSI_HIP_BH_H__
+
+#include "wl_result.h"
+
+CsrResult slsi_sdio_func_drv_register(void);
+void slsi_sdio_func_drv_unregister(void);
+
+#endif /* __SLSI_HIP_BH_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "ioctl.h"
+#include "debug.h"
+#include "mlme.h"
+#include "mgt.h"
+#include "cac.h"
+#include "hip.h"
+#include "netif.h"
+#include <net/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include "mib.h"
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_log_collector.h>
+#include "dev.h"
+#include "fapi.h"
+
+#define CMD_RXFILTERADD "RXFILTER-ADD"
+#define CMD_RXFILTERREMOVE "RXFILTER-REMOVE"
+#define CMD_RXFILTERSTART "RXFILTER-START"
+#define CMD_RXFILTERSTOP "RXFILTER-STOP"
+#define CMD_SETCOUNTRYREV "SETCOUNTRYREV"
+#define CMD_GETCOUNTRYREV "GETCOUNTRYREV"
+#define CMD_SETROAMTRIGGER "SETROAMTRIGGER"
+#define CMD_GETROAMTRIGGER "GETROAMTRIGGER"
+#define CMD_SETSUSPENDMODE "SETSUSPENDMODE"
+#define CMD_SETROAMDELTA "SETROAMDELTA"
+#define CMD_GETROAMDELTA "GETROAMDELTA"
+#define CMD_SETROAMSCANPERIOD "SETROAMSCANPERIOD"
+#define CMD_GETROAMSCANPERIOD "GETROAMSCANPERIOD"
+#define CMD_SETFULLROAMSCANPERIOD "SETFULLROAMSCANPERIOD"
+#define CMD_GETFULLROAMSCANPERIOD "GETFULLROAMSCANPERIOD"
+#define CMD_SETSCANCHANNELTIME "SETSCANCHANNELTIME"
+#define CMD_GETSCANCHANNELTIME "GETSCANCHANNELTIME"
+#define CMD_SETSCANNPROBES "SETSCANNPROBES"
+#define CMD_GETSCANNPROBES "GETSCANNPROBES"
+#define CMD_SETROAMMODE "SETROAMMODE"
+#define CMD_GETROAMMODE "GETROAMMODE"
+#define CMD_SETROAMINTRABAND "SETROAMINTRABAND"
+#define CMD_GETROAMINTRABAND "GETROAMINTRABAND"
+#define CMD_SETROAMBAND "SETROAMBAND"
+#define CMD_GETROAMBAND "GETROAMBAND"
+#define CMD_SETROAMSCANCONTROL "SETROAMSCANCONTROL"
+#define CMD_GETROAMSCANCONTROL "GETROAMSCANCONTROL"
+#define CMD_SETSCANHOMETIME "SETSCANHOMETIME"
+#define CMD_GETSCANHOMETIME "GETSCANHOMETIME"
+#define CMD_SETSCANHOMEAWAYTIME "SETSCANHOMEAWAYTIME"
+#define CMD_GETSCANHOMEAWAYTIME "GETSCANHOMEAWAYTIME"
+#define CMD_SETOKCMODE "SETOKCMODE"
+#define CMD_GETOKCMODE "GETOKCMODE"
+#define CMD_SETWESMODE "SETWESMODE"
+#define CMD_GETWESMODE "GETWESMODE"
+#define CMD_SET_PMK "SET_PMK"
+#define CMD_HAPD_GET_CHANNEL "HAPD_GET_CHANNEL"
+#define CMD_SET_SAP_CHANNEL_LIST "SET_SAP_CHANNEL_LIST"
+#define CMD_REASSOC "REASSOC"
+#define CMD_SETROAMSCANCHANNELS "SETROAMSCANCHANNELS"
+#define CMD_GETROAMSCANCHANNELS "GETROAMSCANCHANNELS"
+#define CMD_SENDACTIONFRAME "SENDACTIONFRAME"
+#define CMD_HAPD_MAX_NUM_STA "HAPD_MAX_NUM_STA"
+#define CMD_COUNTRY "COUNTRY"
+#define CMD_SEND_GK "SEND_GK"
+#define CMD_SETAPP2PWPSIE "SET_AP_P2P_WPS_IE"
+#define CMD_P2PSETPS "P2P_SET_PS"
+#define CMD_P2PSETNOA "P2P_SET_NOA"
+#define CMD_P2PECSA "P2P_ECSA"
+#define CMD_P2PLOSTART "P2P_LO_START"
+#define CMD_P2PLOSTOP "P2P_LO_STOP"
+#define CMD_TDLSCHANNELSWITCH "TDLS_CHANNEL_SWITCH"
+#define CMD_SETROAMOFFLOAD "SETROAMOFFLOAD"
+#define CMD_SETROAMOFFLAPLIST "SETROAMOFFLAPLIST"
+#ifdef CONFIG_SCSC_WLAN_LOW_LATENCY_MODE
+#define CMD_SET_LATENCY_MODE "SET_LATENCY_MODE"
+#define CMD_SET_POWER_MGMT "SET_POWER_MGMT"
+#endif
+
+#define CMD_SETBAND "SETBAND"
+#define CMD_GETBAND "GETBAND"
+#define CMD_SET_FCC_CHANNEL "SET_FCC_CHANNEL"
+
+#define CMD_FAKEMAC "FAKEMAC"
+
+#define CMD_GETBSSRSSI "GET_BSS_RSSI"
+#define CMD_GETBSSINFO "GETBSSINFO"
+#define CMD_GETSTAINFO "GETSTAINFO"
+#define CMD_GETASSOCREJECTINFO "GETASSOCREJECTINFO"
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+#define CMD_BEACON_RECV "BEACON_RECV"
+#endif
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+#define CMD_SET_ENHANCED_ARP_TARGET "SET_ENHANCED_ARP_TARGET"
+#define CMD_GET_ENHANCED_ARP_COUNTS "GET_ENHANCED_ARP_COUNTS"
+#endif
+
+/* Known commands from framework for which no handlers */
+#define CMD_AMPDU_MPDU "AMPDU_MPDU"
+#define CMD_BTCOEXMODE "BTCOEXMODE"
+#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
+#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
+#define CMD_CHANGE_RL "CHANGE_RL"
+#define CMD_INTERFACE_CREATE "INTERFACE_CREATE"
+#define CMD_INTERFACE_DELETE "INTERFACE_DELETE"
+#define CMD_SET_INDOOR_CHANNELS "SET_INDOOR_CHANNELS"
+#define CMD_GET_INDOOR_CHANNELS "GET_INDOOR_CHANNELS"
+#define CMD_LTECOEX "LTECOEX"
+#define CMD_MIRACAST "MIRACAST"
+#define CMD_RESTORE_RL "RESTORE_RL"
+#define CMD_RPSMODE "RPSMODE"
+#define CMD_SETCCXMODE "SETCCXMODE"
+#define CMD_SETDFSSCANMODE "SETDFSSCANMODE"
+#define CMD_SETJOINPREFER "SETJOINPREFER"
+#define CMD_SETSINGLEANT "SETSINGLEANT"
+#define CMD_SET_TX_POWER_CALLING "SET_TX_POWER_CALLING"
+
+#define CMD_DRIVERDEBUGDUMP "DEBUG_DUMP"
+#define CMD_DRIVERDEBUGCOMMAND "DEBUG_COMMAND"
+#define CMD_TESTFORCEHANG "SLSI_TEST_FORCE_HANG"
+#define CMD_GETREGULATORY "GETREGULATORY"
+
+#define CMD_SET_TX_POWER_SAR "SET_TX_POWER_SAR"
+#define CMD_GET_TX_POWER_SAR "GET_TX_POWER_SAR"
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+#define CMD_ENHANCED_PKT_FILTER "ENHANCED_PKT_FILTER"
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_SET_NUM_ANTENNAS
+#define CMD_SET_NUM_ANTENNAS "SET_NUM_ANTENNAS"
+#endif
+
+#define ROAMOFFLAPLIST_MIN 1
+#define ROAMOFFLAPLIST_MAX 100
+
+static int slsi_parse_hex(unsigned char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return 0;
+}
+
+static void slsi_machexstring_to_macarray(char *mac_str, u8 *mac_arr)
+{
+ mac_arr[0] = slsi_parse_hex(mac_str[0]) << 4 | slsi_parse_hex(mac_str[1]);
+ mac_arr[1] = slsi_parse_hex(mac_str[3]) << 4 | slsi_parse_hex(mac_str[4]);
+ mac_arr[2] = slsi_parse_hex(mac_str[6]) << 4 | slsi_parse_hex(mac_str[7]);
+ mac_arr[3] = slsi_parse_hex(mac_str[9]) << 4 | slsi_parse_hex(mac_str[10]);
+ mac_arr[4] = slsi_parse_hex(mac_str[12]) << 4 | slsi_parse_hex(mac_str[13]);
+ mac_arr[5] = slsi_parse_hex(mac_str[15]) << 4 | slsi_parse_hex(mac_str[16]);
+}
+
+static ssize_t slsi_set_suspend_mode(struct net_device *dev, char *command)
+{
+ int vif;
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+ int user_suspend_mode;
+ int previous_suspend_mode;
+ u8 host_state;
+ int ret = 0;
+
+ user_suspend_mode = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0';
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ previous_suspend_mode = sdev->device_config.user_suspend_mode;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ if (user_suspend_mode != previous_suspend_mode) {
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (vif = 1; vif <= CONFIG_SCSC_WLAN_MAX_INTERFACES; vif++) {
+ struct net_device *dev = slsi_get_netdev_locked(sdev, vif);
+ struct netdev_vif *ndev_vif;
+
+ if (!dev)
+ continue;
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if ((ndev_vif->activated) &&
+ (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ if (user_suspend_mode)
+ ret = slsi_update_packet_filters(sdev, dev);
+ else
+ ret = slsi_clear_packet_filters(sdev, dev);
+ if (ret != 0)
+ SLSI_NET_ERR(dev, "Error in updating /clearing the packet filters,ret=%d", ret);
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ }
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->device_config.user_suspend_mode = user_suspend_mode;
+ host_state = sdev->device_config.host_state;
+
+ if (!sdev->device_config.user_suspend_mode)
+ host_state = host_state | FAPI_HOSTSTATE_LCD_ACTIVE;
+ else
+ host_state = host_state & ~FAPI_HOSTSTATE_LCD_ACTIVE;
+ sdev->device_config.host_state = host_state;
+
+ ret = slsi_mlme_set_host_state(sdev, dev, host_state);
+ if (ret != 0)
+ SLSI_NET_ERR(dev, "Error in setting the Host State, ret=%d", ret);
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return ret;
+}
+
+static ssize_t slsi_set_p2p_oppps(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif;
+ struct slsi_dev *sdev;
+ u8 *p2p_oppps_param = NULL;
+ int offset = 0;
+ unsigned int ct_param;
+ unsigned int legacy_ps;
+ unsigned int opp_ps;
+ int readbyte = 0;
+ int result = 0;
+
+ p2p_oppps_param = command + strlen(CMD_P2PSETPS) + 1;
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* The NOA param shall be added only after P2P-VIF is active */
+ if ((!ndev_vif->activated) || (ndev_vif->iftype != NL80211_IFTYPE_P2P_GO)) {
+ SLSI_ERR_NODEV("P2P GO vif not activated\n");
+ result = -EINVAL;
+ goto exit;
+ }
+
+ sdev = ndev_vif->sdev;
+ readbyte = slsi_str_to_int(&p2p_oppps_param[offset], &legacy_ps);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "ct_param: failed to read legacy_ps\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+
+ readbyte = slsi_str_to_int(&p2p_oppps_param[offset], &opp_ps);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "ct_param: failed to read ct_param\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+
+ readbyte = slsi_str_to_int(&p2p_oppps_param[offset], &ct_param);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "ct_param: failed to read ct_param\n");
+ result = -EINVAL;
+ goto exit;
+ }
+
+ if (opp_ps == 0)
+ result = slsi_mlme_set_ctwindow(sdev, dev, opp_ps);
+ else if (ct_param < (unsigned int)ndev_vif->ap.beacon_interval)
+ result = slsi_mlme_set_ctwindow(sdev, dev, ct_param);
+ else
+ SLSI_DBG1(sdev, SLSI_CFG80211, "p2p ct window = %d is out of range for beacon interval(%d)\n", ct_param, ndev_vif->ap.beacon_interval);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return result;
+}
+
+static ssize_t slsi_p2p_set_noa_params(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif;
+ struct slsi_dev *sdev;
+ int result = 0;
+ u8 *noa_params = NULL;
+ int offset = 0;
+ int readbyte = 0;
+ unsigned int noa_count;
+ unsigned int duration;
+ unsigned int interval;
+
+ noa_params = command + strlen(CMD_P2PSETNOA) + 1;
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ /* The NOA param shall be added only after P2P-VIF is active */
+ if ((!ndev_vif->activated) || (ndev_vif->iftype != NL80211_IFTYPE_P2P_GO)) {
+ SLSI_ERR_NODEV("P2P GO vif not activated\n");
+ result = -EINVAL;
+ goto exit;
+ }
+
+ sdev = ndev_vif->sdev;
+ readbyte = slsi_str_to_int(&noa_params[offset], &noa_count);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "noa_count: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+
+ readbyte = slsi_str_to_int(&noa_params[offset], &interval);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "interval: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+
+ readbyte = slsi_str_to_int(&noa_params[offset], &duration);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "duration: failed to read a numeric value, at offset(%d)\n", offset);
+ result = -EINVAL;
+ goto exit;
+ }
+
+ /* Skip start time */
+ result = slsi_mlme_set_p2p_noa(sdev, dev, noa_count, interval, duration);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return result;
+}
+
+static ssize_t slsi_p2p_ecsa(struct net_device *dev, char *command)
+{
+ struct netdev_vif *ndev_vif;
+ struct netdev_vif *group_dev_vif;
+ struct slsi_dev *sdev;
+ struct net_device *group_dev = NULL;
+ int result = 0;
+ u8 *ecsa_params = NULL;
+ int offset = 0;
+ int readbyte = 0;
+ unsigned int channel;
+ unsigned int bandwidth;
+ u16 center_freq = 0;
+ u16 chan_info = 0;
+ struct cfg80211_chan_def chandef;
+ enum nl80211_band band;
+ enum nl80211_channel_type chan_type = NL80211_CHAN_NO_HT;
+
+ ecsa_params = command + strlen(CMD_P2PECSA) + 1;
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ sdev = ndev_vif->sdev;
+ group_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+ if (!group_dev) {
+ SLSI_INFO(sdev, "No Group net_dev found\n");
+ return -EINVAL;
+ }
+ readbyte = slsi_str_to_int(&ecsa_params[offset], &channel);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "channel: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&ecsa_params[offset], &bandwidth);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "bandwidth: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ band = (channel <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ center_freq = ieee80211_channel_to_frequency(channel, band);
+ SLSI_DBG1(sdev, SLSI_CFG80211, "p2p ecsa_params (center_freq)= (%d)\n", center_freq);
+ chandef.chan = ieee80211_get_channel(sdev->wiphy, center_freq);
+ chandef.width = (band == NL80211_BAND_2GHZ) ? NL80211_CHAN_WIDTH_20_NOHT : NL80211_CHAN_WIDTH_80;
+
+#ifndef SSB_4963_FIXED
+ /* Default HT40 configuration */
+ if (sdev->band_5g_supported) {
+ if (bandwidth == 80) {
+ chandef.width = NL80211_CHAN_WIDTH_40;
+ bandwidth = 40;
+ if (channel == 36 || channel == 44 || channel == 149 || channel == 157)
+ chan_type = NL80211_CHAN_HT40PLUS;
+ else
+ chan_type = NL80211_CHAN_HT40MINUS;
+ }
+ }
+#endif
+ if (channel == 165 && bandwidth != 20) {
+ bandwidth = 20;
+ chan_type = NL80211_CHAN_HT20;
+ }
+ cfg80211_chandef_create(&chandef, chandef.chan, chan_type);
+ chan_info = slsi_get_chann_info(sdev, &chandef);
+ if (bandwidth != 20)
+ center_freq = slsi_get_center_freq1(sdev, chan_info, center_freq);
+ group_dev_vif = netdev_priv(group_dev);
+ SLSI_MUTEX_LOCK(group_dev_vif->vif_mutex);
+ result = slsi_mlme_channel_switch(sdev, group_dev, center_freq, chan_info);
+ SLSI_MUTEX_UNLOCK(group_dev_vif->vif_mutex);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return result;
+}
+
+static ssize_t slsi_ap_vendor_ies_write(struct slsi_dev *sdev, struct net_device *dev, u8 *ie,
+ size_t ie_len, u16 purpose)
+{
+ u8 *vendor_ie = NULL;
+ int result = 0;
+ struct netdev_vif *ndev_vif;
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ /* During AP start before mlme_start_req, supplicant calls set_ap_wps_ie() to send the vendor IEs for each
+ * beacon, probe response and association response. As we get all of them in mlme_start_req, ignoring the
+ * same which comes before adding GO VIF
+ */
+ if (!ndev_vif->activated) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "vif not activated\n");
+ result = 0;
+ goto exit;
+ }
+ if (!(ndev_vif->iftype == NL80211_IFTYPE_P2P_GO || ndev_vif->iftype == NL80211_IFTYPE_AP)) {
+ SLSI_ERR(sdev, "Not AP or P2P interface. interfaceType:%d\n", ndev_vif->iftype);
+ result = -EINVAL;
+ goto exit;
+ }
+
+ vendor_ie = kmalloc(ie_len, GFP_KERNEL);
+ if (!vendor_ie) {
+ SLSI_ERR(sdev, "kmalloc failed\n");
+ result = -ENOMEM;
+ goto exit;
+ }
+ memcpy(vendor_ie, ie, ie_len);
+
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ result = slsi_ap_prepare_add_info_ies(ndev_vif, vendor_ie, ie_len);
+
+ if (result == 0)
+ result = slsi_mlme_add_info_elements(sdev, dev, purpose, ndev_vif->ap.add_info_ies, ndev_vif->ap.add_info_ies_len);
+
+ slsi_clear_cached_ies(&ndev_vif->ap.add_info_ies, &ndev_vif->ap.add_info_ies_len);
+ kfree(vendor_ie);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return result;
+}
+
+static ssize_t slsi_set_ap_p2p_wps_ie(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int readbyte = 0;
+ int offset = 0;
+ int result = 0;
+ enum if_type {
+ IF_TYPE_NONE,
+ IF_TYPE_P2P_DEVICE,
+ IF_TYPE_AP_P2P
+ } iftype = IF_TYPE_NONE;
+ enum frame_type {
+ FRAME_TYPE_NONE,
+ FRAME_TYPE_BEACON,
+ FRAME_TYPE_PROBE_RESPONSE,
+ FRAME_TYPE_ASSOC_RESPONSE
+ } frametype = FRAME_TYPE_NONE;
+ u8 *params = command + strlen(CMD_SETAPP2PWPSIE) + 1;
+ int params_len = buf_len - strlen(CMD_SETAPP2PWPSIE) - 1;
+
+ readbyte = slsi_str_to_int(¶ms[offset], (int *)&frametype);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "frametype: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(¶ms[offset], (int *)&iftype);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "iftype: failed to read a numeric value\n");
+ result = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ params_len = params_len - offset;
+
+ SLSI_NET_DBG2(dev, SLSI_NETDEV,
+ "command=%s, frametype=%d, iftype=%d, total buf_len=%d, params_len=%d\n",
+ command, frametype, iftype, buf_len, params_len);
+
+ /* check the net device interface type */
+ if (iftype == IF_TYPE_P2P_DEVICE) {
+ u8 *probe_resp_ie = NULL; /* params+offset; */
+
+ if (frametype != FRAME_TYPE_PROBE_RESPONSE) {
+ SLSI_NET_ERR(dev, "Wrong frame type received\n");
+ goto exit;
+ }
+ probe_resp_ie = kmalloc(params_len, GFP_KERNEL);
+ if (probe_resp_ie == NULL) {
+ SLSI_ERR(sdev, "Malloc for IEs failed\n");
+ return -ENOMEM;
+ }
+
+ memcpy(probe_resp_ie, params+offset, params_len);
+
+ return slsi_p2p_dev_probe_rsp_ie(sdev, dev, probe_resp_ie, params_len);
+ } else if (iftype == IF_TYPE_AP_P2P) {
+ if (frametype == FRAME_TYPE_BEACON)
+ return slsi_ap_vendor_ies_write(sdev, dev, params + offset, params_len, FAPI_PURPOSE_BEACON);
+ else if (frametype == FRAME_TYPE_PROBE_RESPONSE)
+ return slsi_ap_vendor_ies_write(sdev, dev, params + offset, params_len,
+ FAPI_PURPOSE_PROBE_RESPONSE);
+ else if (frametype == FRAME_TYPE_ASSOC_RESPONSE)
+ return slsi_ap_vendor_ies_write(sdev, dev, params + offset, params_len,
+ FAPI_PURPOSE_ASSOCIATION_RESPONSE);
+ }
+exit:
+ return result;
+}
+
+/**
+ * P2P_LO_START handling.
+ * Add unsync vif, register for action frames and set the listen channel.
+ * The probe response IEs would be configured later.
+ */
+static int slsi_p2p_lo_start(struct net_device *dev, char *command)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct ieee80211_channel *chan = NULL;
+ char *lo_params = NULL;
+ unsigned int channel, duration, interval, count;
+ int ret = 0;
+ int freq;
+ int readbyte = 0;
+ enum nl80211_band band;
+ int offset = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* Reject LO if other operations are in progress. Back to back LO can be received.
+ * In such a case, if state is Listening then the listen offload flag should be true else
+ * reject the request as the Listening state would then be due to ROC.
+ */
+ if ((sdev->p2p_state == P2P_SCANNING) || (sdev->p2p_state > P2P_LISTENING) ||
+ ((sdev->p2p_state == P2P_LISTENING) && (!ndev_vif->unsync.listen_offload))) {
+ SLSI_NET_ERR(dev, "Reject LO due to ongoing P2P operation (state: %s)\n", slsi_p2p_state_text(sdev->p2p_state));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ lo_params = command + strlen(CMD_P2PLOSTART) + 1;
+ readbyte = slsi_str_to_int(&lo_params[offset], &channel);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "channel: failed to read a numeric value\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&lo_params[offset], &duration);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "duration: failed to read a numeric value\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&lo_params[offset], &interval);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "interval: failed to read a numeric value\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&lo_params[offset], &count);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "count: failed to read a numeric value\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (!ndev_vif->activated) {
+ ret = slsi_mlme_add_vif(sdev, dev, dev->dev_addr, dev->dev_addr);
+ if (ret != 0) {
+ SLSI_NET_ERR(dev, "Unsync vif addition failed\n");
+ goto exit;
+ }
+
+ ndev_vif->activated = true;
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+
+ ret = slsi_mlme_register_action_frame(sdev, dev, SLSI_ACTION_FRAME_PUBLIC, SLSI_ACTION_FRAME_PUBLIC);
+ if (ret != 0) {
+ SLSI_NET_ERR(dev, "Action frame registration for unsync vif failed\n");
+ goto exit_with_vif_deactivate;
+ }
+ }
+
+ /* Send set_channel irrespective of the values of LO parameters as they are not cached
+ * in driver to check whether they have changed.
+ */
+ band = (channel <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(channel, band);
+ chan = ieee80211_get_channel(sdev->wiphy, freq);
+ if (!chan) {
+ SLSI_NET_ERR(dev, "Incorrect channel: %u - Listen Offload failed\n", channel);
+ ret = -EINVAL;
+ goto exit_with_vif_deactivate;
+ }
+
+ ret = slsi_mlme_set_channel(sdev, dev, chan, duration, interval, count);
+ if (ret != 0) {
+ SLSI_NET_ERR(dev, "Set channel for unsync vif failed\n");
+ goto exit_with_vif_deactivate;
+ } else {
+ ndev_vif->chan = chan;
+ ndev_vif->driver_channel = chan->hw_value;
+ }
+ /* If framework sends the values for listen offload as 1,500,5000 and 6,
+ * where 5000ms (5 seconds) is the listen interval which needs to be repeated
+ * 6 times(i.e. count). Hence listen_end_ind comes after 30 seconds
+ * (6 * 5000 = 30000ms) Hence host should wait 31 seconds to delete the
+ * unsync VIF for one such P2P listen offload request.
+ */
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, interval * count + 1000);
+ ndev_vif->unsync.listen_offload = true;
+ SLSI_P2P_STATE_CHANGE(ndev_vif->sdev, P2P_LISTENING);
+ goto exit;
+
+exit_with_vif_deactivate:
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return ret;
+}
+
+/**
+ * P2P_LO_STOP handling.
+ * Clear listen offload flag.
+ * Delete the P2P unsynchronized vif.
+ */
+static int slsi_p2p_lo_stop(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ WARN_ON((!ndev_vif->unsync.listen_offload) || (ndev_vif->sdev->p2p_state != P2P_LISTENING));
+
+ ndev_vif->unsync.listen_offload = false;
+
+ /* Deactivating the p2p unsynchronized vif */
+ if (ndev_vif->sdev->p2p_state == P2P_LISTENING)
+ slsi_p2p_vif_deactivate(ndev_vif->sdev, ndev_vif->wdev.netdev, true);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return 0;
+}
+
+static ssize_t slsi_rx_filter_num_write(struct net_device *dev, int add_remove, int filter_num)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int ret = 0;
+
+ if (add_remove)
+ sdev->device_config.rx_filter_num = filter_num;
+ else
+ sdev->device_config.rx_filter_num = 0;
+ return ret;
+}
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if !defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION < 90000)
+static ssize_t slsi_create_interface(struct net_device *dev, char *intf_name)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct net_device *ap_dev;
+
+ ap_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+ if (ap_dev && (strcmp(ap_dev->name, intf_name) == 0)) {
+ SLSI_NET_ERR(dev, "%s already created\n", intf_name);
+ return -EINVAL;
+ }
+
+ ap_dev = slsi_dynamic_interface_create(sdev->wiphy, intf_name, NL80211_IFTYPE_AP, NULL);
+ if (ap_dev) {
+ sdev->netdev_ap = ap_dev;
+ return 0;
+ }
+
+ SLSI_NET_ERR(dev, "Failed to create AP interface %s\n", intf_name);
+ return -EINVAL;
+}
+
+static ssize_t slsi_delete_interface(struct net_device *dev, char *intf_name)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ if (strcmp(intf_name, CONFIG_SCSC_AP_INTERFACE_NAME) == 0)
+ dev = sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN];
+
+ if (!dev) {
+ SLSI_WARN(sdev, "AP dev is NULL");
+ return -EINVAL;
+ }
+ ndev_vif = netdev_priv(dev);
+
+ if (ndev_vif->activated)
+ slsi_stop_net_dev(sdev, dev);
+ slsi_netif_remove_rtlnl_locked(sdev, dev);
+
+ sdev->netdev_ap = NULL;
+ SLSI_DBG1_NODEV(SLSI_MLME, "Successfully deleted AP interface %s ", intf_name);
+
+ return 0;
+}
+#endif
+
+static ssize_t slsi_set_indoor_channels(struct net_device *dev, char *arg)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int readbyte = 0;
+ int offset = 0;
+ int res;
+ int ret;
+
+ readbyte = slsi_str_to_int(&arg[offset], &res);
+
+ ret = slsi_set_mib_wifi_sharing_5ghz_channel(sdev, SLSI_PSID_UNIFI_WI_FI_SHARING5_GHZ_CHANNEL,
+ res, offset, readbyte, arg);
+
+ return ret;
+}
+
+static ssize_t slsi_get_indoor_channels(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ char op[150] = "";
+ char int_string[30] = "";
+ int i;
+ int len = 0;
+
+ SLSI_DBG1_NODEV(SLSI_MLME, "GET_INDOOR_CHANNELS : %d ", sdev->num_5g_restricted_channels);
+
+ for (i = 0; i < sdev->num_5g_restricted_channels; i++) {
+ sprintf(int_string, "%d", sdev->wifi_sharing_5g_restricted_channels[i]);
+ strcat(op, int_string);
+ strcat(op, " ");
+ }
+
+ len = snprintf(command, buf_len, "%d %s", sdev->num_5g_restricted_channels, op);
+
+ return len;
+}
+#endif
+static ssize_t slsi_set_country_rev(struct net_device *dev, char *country_code)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ char alpha2_rev[4];
+ int status = 0;
+
+ if (!country_code)
+ return -EINVAL;
+
+ memcpy(alpha2_rev, country_code, 4);
+
+ status = slsi_set_country_update_regd(sdev, alpha2_rev, 4);
+
+ return status;
+}
+
+static ssize_t slsi_get_country_rev(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u8 buf[5];
+ int len = 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ len = snprintf(command, buf_len, "%s %c%c %d", CMD_GETCOUNTRYREV,
+ sdev->device_config.domain_info.regdomain->alpha2[0],
+ sdev->device_config.domain_info.regdomain->alpha2[1],
+ sdev->device_config.domain_info.regdomain->dfs_region);
+
+ return len;
+}
+
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+static ssize_t slsi_roam_scan_trigger_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_RSSI_ROAM_SCAN_TRIGGER, mib_value);
+}
+
+static ssize_t slsi_roam_scan_trigger_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_RSSI_ROAM_SCAN_TRIGGER, &mib_value);
+ if (res)
+ return res;
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMTRIGGER, mib_value);
+ return res;
+}
+
+static ssize_t slsi_roam_delta_trigger_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_DELTA_TRIGGER, mib_value);
+}
+
+static ssize_t slsi_roam_delta_trigger_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_DELTA_TRIGGER, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMDELTA, mib_value);
+ return res;
+}
+
+static ssize_t slsi_cached_channel_scan_period_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_CACHED_CHANNEL_SCAN_PERIOD, mib_value * 1000000);
+}
+
+static ssize_t slsi_cached_channel_scan_period_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_CACHED_CHANNEL_SCAN_PERIOD, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMSCANPERIOD, mib_value / 1000000);
+
+ return res;
+}
+
+static ssize_t slsi_full_roam_scan_period_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_FULL_ROAM_SCAN_PERIOD, mib_value * 1000000);
+}
+
+static ssize_t slsi_full_roam_scan_period_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_FULL_ROAM_SCAN_PERIOD, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETFULLROAMSCANPERIOD, mib_value / 1000000);
+
+ return res;
+}
+
+static ssize_t slsi_roam_scan_max_active_channel_time_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_MAX_ACTIVE_CHANNEL_TIME, mib_value);
+}
+
+static ssize_t slsi_roam_scan_max_active_channel_time_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_MAX_ACTIVE_CHANNEL_TIME, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETSCANCHANNELTIME, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_roam_scan_probe_interval_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_NPROBE, mib_value);
+}
+
+static ssize_t slsi_roam_scan_probe_interval_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_NPROBE, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETSCANNPROBES, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_roam_mode_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ if (slsi_is_rf_test_mode_enabled()) {
+ SLSI_DBG1_NODEV(SLSI_MLME, "SLSI_PSID_UNIFI_ROAM_MODE is not supported because of rf test mode.\n");
+ return -ENOTSUPP;
+ }
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, mib_value);
+}
+
+static ssize_t slsi_roam_mode_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_MODE, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMMODE, mib_value);
+
+ return res;
+}
+
+static int slsi_roam_offload_ap_list(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct cfg80211_acl_data *mac_acl;
+ int ap_count = 0;
+ int buf_pos = 0;
+ int i, r;
+ int malloc_len;
+
+ /* command format:
+ * x,aa:bb:cc:dd:ee:ff,xx:yy:zz:qq:ww:ee...
+ * x = 1 to 100
+ * each mac address id 17 bytes and every mac address is separated by ','
+ */
+ buf_pos = slsi_str_to_int(command, &ap_count);
+ if (ap_count < ROAMOFFLAPLIST_MIN || ap_count > ROAMOFFLAPLIST_MAX) {
+ SLSI_ERR(sdev, "ap_count: %d\n", ap_count);
+ return -EINVAL;
+ }
+ buf_pos++;
+ /* each mac address takes 18 bytes(17 for mac address and 1 for ',') except the last one.
+ * the last mac address is just 17 bytes(without a coma)
+ */
+ if ((buf_len - buf_pos) < (ap_count*18 - 1)) {
+ SLSI_ERR(sdev, "Invalid buff len:%d for %d APs\n", (buf_len - buf_pos), ap_count);
+ return -EINVAL;
+ }
+ malloc_len = sizeof(struct cfg80211_acl_data) + sizeof(struct mac_address) * ap_count;
+ mac_acl = kmalloc(malloc_len, GFP_KERNEL);
+ if (!mac_acl) {
+ SLSI_ERR(sdev, "MEM fail for size:%ld\n", sizeof(struct cfg80211_acl_data) + sizeof(struct mac_address) * ap_count);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ap_count; i++) {
+ slsi_machexstring_to_macarray(&command[buf_pos], mac_acl->mac_addrs[i].addr);
+ buf_pos += 18;
+ SLSI_DBG3_NODEV(SLSI_MLME, "[%pM]", mac_acl->mac_addrs[i].addr);
+ }
+ mac_acl->acl_policy = NL80211_ACL_POLICY_DENY_UNLESS_LISTED;
+ mac_acl->n_acl_entries = ap_count;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ r = slsi_mlme_set_acl(sdev, dev, ndev_vif->ifnum, mac_acl);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ kfree(mac_acl);
+ return r;
+}
+
+static ssize_t slsi_roam_scan_band_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_BAND, mib_value);
+}
+
+static ssize_t slsi_roam_scan_band_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_BAND, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMINTRABAND, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_freq_band_write(struct net_device *dev, uint band)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ slsi_band_update(sdev, band);
+ /* Convert to correct Mib value (intra_band:1, all_band:2) */
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_BAND, (band == SLSI_FREQ_BAND_AUTO) ? 2 : 1);
+}
+
+static ssize_t slsi_freq_band_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ memset(buf, '\0', 128);
+ pos += scnprintf(buf + pos, bufsz - pos, "Band %d", sdev->device_config.supported_band);
+
+ buf[pos] = '\0';
+ memcpy(command, buf, pos + 1);
+
+ return pos;
+}
+
+static ssize_t slsi_roam_scan_control_write(struct net_device *dev, int mode)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ if (mode == 0 || mode == 1) {
+ sdev->device_config.roam_scan_mode = mode;
+ } else {
+ SLSI_ERR(sdev, "Invalid roam Mode: Must be 0 or, 1 Not '%c'\n", mode);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_CONTROL, sdev->device_config.roam_scan_mode);
+}
+
+static ssize_t slsi_roam_scan_control_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_CONTROL, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETROAMSCANCONTROL, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_roam_scan_home_time_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_HOME_TIME, mib_value);
+}
+
+static ssize_t slsi_roam_scan_home_time_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_HOME_TIME, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETSCANHOMETIME, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_roam_scan_home_away_time_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+
+ slsi_str_to_int(command, &mib_value);
+ return slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_SCAN_HOME_AWAY_TIME, mib_value);
+}
+
+static ssize_t slsi_roam_scan_home_away_time_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mib_value = 0;
+ int res;
+
+ res = slsi_get_mib_roam(sdev, SLSI_PSID_UNIFI_ROAM_SCAN_HOME_AWAY_TIME, &mib_value);
+ if (res)
+ return res;
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETSCANHOMEAWAYTIME, mib_value);
+
+ return res;
+}
+
+static ssize_t slsi_roam_scan_channels_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int result = 0;
+ int i, channel_count;
+ int offset = 0;
+ int readbyte = 0;
+ int channels[MAX_CHANNEL_LIST];
+
+ readbyte = slsi_str_to_int(command, &channel_count);
+
+ if (!readbyte) {
+ SLSI_ERR(sdev, "channel count: failed to read a numeric value");
+ return -EINVAL;
+ }
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ if (channel_count > MAX_CHANNEL_LIST)
+ channel_count = MAX_CHANNEL_LIST;
+ sdev->device_config.wes_roam_scan_list.n = channel_count;
+
+ for (i = 0; i < channel_count; i++) {
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&command[offset], &channels[i]);
+ if (!readbyte) {
+ SLSI_ERR(sdev, "failed to read a numeric value\n");
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -EINVAL;
+ }
+
+ sdev->device_config.wes_roam_scan_list.channels[i] = channels[i];
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ result = slsi_mlme_set_cached_channels(sdev, dev, channel_count, sdev->device_config.wes_roam_scan_list.channels);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return result;
+}
+
+static ssize_t slsi_roam_scan_channels_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ char channel_buf[128] = { 0 };
+ int pos = 0;
+ int i;
+ int channel_count = 0;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ channel_count = sdev->device_config.wes_roam_scan_list.n;
+ pos = scnprintf(channel_buf, sizeof(channel_buf), "%s %d", CMD_GETROAMSCANCHANNELS, channel_count);
+ for (i = 0; i < channel_count; i++)
+ pos += scnprintf(channel_buf + pos, sizeof(channel_buf) - pos, " %d", sdev->device_config.wes_roam_scan_list.channels[i]);
+ channel_buf[pos] = '\0';
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ memcpy(command, channel_buf, pos + 1);
+
+ return pos;
+}
+
+static ssize_t slsi_okc_mode_write(struct net_device *dev, int mode)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ if (mode == 0 || mode == 1) {
+ sdev->device_config.okc_mode = mode;
+ } else {
+ SLSI_ERR(sdev, "Invalid OKC Mode: Must be 0 or, 1 Not '%c'\n", mode);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return 0;
+}
+
+static ssize_t slsi_okc_mode_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int okc_mode;
+ int res;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ okc_mode = sdev->device_config.okc_mode;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETOKCMODE, okc_mode);
+
+ return res;
+}
+
+static ssize_t slsi_wes_mode_write(struct net_device *dev, int mode)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int result = 0;
+ u32 action_frame_bmap = SLSI_STA_ACTION_FRAME_BITMAP;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ if (mode == 0 || mode == 1) {
+ sdev->device_config.wes_mode = mode;
+ } else {
+ SLSI_ERR(sdev, "Invalid WES Mode: Must be 0 or 1 Not '%c'\n", mode);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -EINVAL;
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((ndev_vif->activated) && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ if (sdev->device_config.wes_mode)
+ action_frame_bmap |= SLSI_ACTION_FRAME_VENDOR_SPEC;
+
+ result = slsi_mlme_register_action_frame(sdev, dev, action_frame_bmap, action_frame_bmap);
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return result;
+}
+
+static ssize_t slsi_wes_mode_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int wes_mode;
+ int res;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ wes_mode = sdev->device_config.wes_mode;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ res = snprintf(command, buf_len, "%s %d", CMD_GETWESMODE, wes_mode);
+
+ return res;
+}
+#endif
+
+static ssize_t slsi_set_pmk(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u8 pmk[33];
+ int result = 0;
+
+ memcpy((u8 *)pmk, command + strlen("SET_PMK "), 32);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ result = slsi_mlme_set_pmk(sdev, dev, pmk, 32);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return result;
+}
+
+static ssize_t slsi_auto_chan_read(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int ap_auto_chan;
+ int result = 0;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ ap_auto_chan = sdev->device_config.ap_auto_chan;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ result = snprintf(command, buf_len, "%d\n", ap_auto_chan);
+ return result;
+}
+
+static ssize_t slsi_auto_chan_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int n_channels;
+ struct ieee80211_channel *channels[SLSI_NO_OF_SCAN_CHANLS_FOR_AUTO_CHAN_MAX] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
+ int count_channels;
+ int offset;
+ int chan;
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ struct net_device *sta_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ struct netdev_vif *ndev_sta_vif = netdev_priv(sta_dev);
+ int sta_frequency;
+#endif
+
+ offset = slsi_str_to_int(&command[21], &n_channels);
+ if (!offset) {
+ SLSI_ERR(sdev, "channel count: failed to read a numeric value");
+ return -EINVAL;
+ }
+
+ if (n_channels > SLSI_NO_OF_SCAN_CHANLS_FOR_AUTO_CHAN_MAX) {
+ SLSI_ERR(sdev, "channel count:%d > SLSI_NO_OF_SCAN_CHANLS_FOR_AUTO_CHAN_MAX:%d\n", n_channels, SLSI_NO_OF_SCAN_CHANLS_FOR_AUTO_CHAN_MAX);
+ return -EINVAL;
+ }
+
+ /* If "1 6 11" are passed, scan all "1 - 14" channels. If "1 6" are passed, scan "1 - 9" channels */
+ if (n_channels == 3)
+ n_channels = 14;
+ else if (n_channels == 2)
+ n_channels = 9;
+ count_channels = 0;
+ for (chan = 1; chan <= n_channels; chan++) {
+ int center_freq;
+
+ center_freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
+ channels[count_channels] = ieee80211_get_channel(sdev->wiphy, center_freq);
+ if (!channels[count_channels])
+ SLSI_WARN(sdev, "channel number:%d invalid\n", chan);
+ else
+ count_channels++;
+
+ }
+
+ SLSI_DBG3(sdev, SLSI_INIT_DEINIT, "Number of channels for autochannel selection= %d", count_channels);
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->device_config.ap_auto_chan = 0;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+if ((ndev_sta_vif->activated) && (ndev_sta_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_sta_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTING ||
+ ndev_sta_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ sta_frequency = ndev_sta_vif->chan->center_freq;
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if ((sta_frequency / 1000) == 2)
+ sdev->device_config.ap_auto_chan = ieee80211_frequency_to_channel(sta_frequency);
+ else
+ sdev->device_config.ap_auto_chan = 1;
+ SLSI_INFO(sdev, "Channel selected = %d", sdev->device_config.ap_auto_chan);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return 0;
+}
+#endif /*wifi sharing*/
+ return slsi_auto_chan_select_scan(sdev, count_channels, channels);
+}
+
+static ssize_t slsi_reassoc_write(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u8 bssid[6] = { 0 };
+ int channel;
+ int freq;
+ enum nl80211_band band = NL80211_BAND_2GHZ;
+ int r = 0;
+
+ if (command[17] != ' ') {
+ SLSI_ERR(sdev, "Invalid Format '%s' '%c'\n", command, command[17]);
+ return -EINVAL;
+ }
+
+ command[17] = '\0';
+
+ slsi_machexstring_to_macarray(command, bssid);
+
+ if (!slsi_str_to_int(&command[18], &channel)) {
+ SLSI_ERR(sdev, "Invalid channel string: '%s'\n", &command[18]);
+ return -EINVAL;
+ }
+
+ if (channel > 14)
+ band = NL80211_BAND_5GHZ;
+ freq = (u16)ieee80211_channel_to_frequency(channel, band);
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ r = slsi_mlme_roam(sdev, dev, bssid, freq);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+static ssize_t slsi_send_action_frame(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ char *temp;
+ u8 bssid[6] = { 0 };
+ int channel = 0;
+ int freq = 0;
+ enum nl80211_band band = NL80211_BAND_2GHZ;
+ int r = 0;
+ u16 host_tag = slsi_tx_mgmt_host_tag(sdev);
+ u32 dwell_time;
+ struct ieee80211_hdr *hdr;
+ u8 *buf = NULL;
+ u8 *final_buf = NULL;
+ u8 temp_byte;
+ int len = 0;
+ int final_length = 0;
+ int i = 0, j = 0;
+ char *pos;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "Not a STA vif or status is not CONNECTED\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EINVAL;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ command[17] = '\0';
+ slsi_machexstring_to_macarray(command, bssid);
+
+ command[17] = ' ';
+ pos = strchr(command, ' ');
+ if (pos == NULL)
+ return -EINVAL;
+ *pos++ = '\0';
+
+ if (!slsi_str_to_int(pos, &channel)) {
+ SLSI_ERR(sdev, "Invalid channel string: '%s'\n", pos);
+ return -EINVAL;
+ }
+ pos++;
+
+ if (channel > 14)
+ band = NL80211_BAND_5GHZ;
+ freq = (u16)ieee80211_channel_to_frequency(channel, band);
+
+ pos = strchr(pos, ' ');
+ if (pos == NULL)
+ return -EINVAL;
+ *pos++ = '\0';
+
+ if (!slsi_str_to_int(pos, &dwell_time)) {
+ SLSI_ERR(sdev, "Invalid dwell time string: '%s'\n", pos);
+ return -EINVAL;
+ }
+
+ pos = strchr(pos, ' ');
+ if (pos == NULL)
+ return -EINVAL;
+ pos++;
+
+ /*Length of data*/
+ temp = pos;
+ while (*temp != '\0')
+ temp++;
+ len = temp - pos;
+
+ if (len <= 0)
+ return -EINVAL;
+ buf = kmalloc((len + 1) / 2, GFP_KERNEL);
+
+ if (buf == NULL) {
+ SLSI_ERR(sdev, "Malloc failed\n");
+ return -ENOMEM;
+ }
+ /*We receive a char buffer, convert to hex*/
+ temp = pos;
+ for (i = 0, j = 0; j < len; j += 2) {
+ if (j + 1 == len)
+ temp_byte = slsi_parse_hex(temp[j]);
+ else
+ temp_byte = slsi_parse_hex(temp[j]) << 4 | slsi_parse_hex(temp[j + 1]);
+ buf[i++] = temp_byte;
+ }
+ len = i;
+
+ final_length = len + IEEE80211_HEADER_SIZE;
+ final_buf = kmalloc(final_length, GFP_KERNEL);
+ if (final_buf == NULL) {
+ SLSI_ERR(sdev, "Malloc failed\n");
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ hdr = (struct ieee80211_hdr *)final_buf;
+ hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_ACTION);
+ SLSI_ETHER_COPY(hdr->addr1, bssid);
+ SLSI_ETHER_COPY(hdr->addr2, sdev->hw_addr);
+ SLSI_ETHER_COPY(hdr->addr3, bssid);
+ memcpy(final_buf + IEEE80211_HEADER_SIZE, buf, len);
+
+ kfree(buf);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ r = slsi_mlme_send_frame_mgmt(sdev, dev, final_buf, final_length, FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME, FAPI_MESSAGETYPE_IEEE80211_ACTION, host_tag, SLSI_FREQ_HOST_TO_FW(freq), dwell_time * 1000, 0);
+
+ kfree(final_buf);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+static ssize_t slsi_setting_max_sta_write(struct net_device *dev, int sta_number)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int result = 0;
+
+ if (sta_number > 10 || sta_number < 1)
+ return -EINVAL;
+ result = slsi_mib_encode_uint(&mib_data, SLSI_PSID_UNIFI_MAX_CLIENT, sta_number, 0);
+ if ((result != SLSI_MIB_STATUS_SUCCESS) || (mib_data.dataLength == 0))
+ return -ENOMEM;
+ result = slsi_mlme_set(sdev, dev, mib_data.data, mib_data.dataLength);
+ if (result != 0)
+ SLSI_ERR(sdev, "max_sta: mlme_set_req failed: Result code: %d\n", result);
+ kfree(mib_data.data);
+
+ return result;
+}
+
+static ssize_t slsi_country_write(struct net_device *dev, char *country_code)
+{
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+ char alpha2_code[SLSI_COUNTRY_CODE_LEN];
+ int status;
+
+ if (strlen(country_code) < 2)
+ return -EINVAL;
+
+ memcpy(alpha2_code, country_code, 2);
+ alpha2_code[2] = ' '; /* set 3rd byte of countrycode to ASCII space */
+
+ status = slsi_set_country_update_regd(sdev, alpha2_code, SLSI_COUNTRY_CODE_LEN);
+
+ return status;
+}
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+static ssize_t slsi_forward_beacon(struct net_device *dev, char *action)
+{
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+ int intended_action = 0;
+ int ret = 0;
+
+ if (strncasecmp(action, "stop", 4) == 0) {
+ intended_action = FAPI_ACTION_STOP;
+ } else if (strncasecmp(action, "start", 5) == 0) {
+ intended_action = FAPI_ACTION_START;
+ } else {
+ SLSI_NET_ERR(dev, "BEACON_RECV should be used with start or stop\n");
+ return -EINVAL;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "BEACON_RECV %s!!\n", intended_action ? "START" : "STOP");
+ SLSI_MUTEX_LOCK(netdev_vif->vif_mutex);
+
+ if ((!netdev_vif->activated) || (netdev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (netdev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "Not a STA vif or status is not CONNECTED\n");
+ ret = -EINVAL;
+ goto exit_vif_mutex;
+ }
+
+ if (((intended_action == FAPI_ACTION_START) && netdev_vif->is_wips_running) ||
+ ((intended_action == FAPI_ACTION_STOP) && !netdev_vif->is_wips_running)) {
+ SLSI_NET_INFO(dev, "Forwarding beacon is already %s!!\n",
+ netdev_vif->is_wips_running ? "running" : "stopped");
+ ret = 0;
+ goto exit_vif_mutex;
+ }
+
+ SLSI_MUTEX_LOCK(netdev_vif->scan_mutex);
+ if (intended_action == FAPI_ACTION_START &&
+ (netdev_vif->scan[SLSI_SCAN_HW_ID].scan_req || netdev_vif->sta.roam_in_progress)) {
+ SLSI_NET_ERR(dev, "Rejecting BEACON_RECV start as scan/roam is running\n");
+ ret = -EBUSY;
+ goto exit_scan_mutex;
+ }
+
+ ret = slsi_mlme_set_forward_beacon(sdev, dev, intended_action);
+exit_scan_mutex:
+ SLSI_MUTEX_UNLOCK(netdev_vif->scan_mutex);
+exit_vif_mutex:
+ SLSI_MUTEX_UNLOCK(netdev_vif->vif_mutex);
+ return ret;
+}
+#endif
+
+static ssize_t slsi_update_rssi_boost(struct net_device *dev, char *rssi_boost_string)
+{
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+ int digit1, digit2, band, lendigit1, lendigit2;
+ int boost = 0, length = 0, i = 0;
+
+ if (strlen(rssi_boost_string) < 8)
+ return -EINVAL;
+ for (i = 0; i < (strlen(rssi_boost_string) - 4);) {
+ if (rssi_boost_string[i] == '0' &&
+ rssi_boost_string[i + 1] == '4') {
+ if (rssi_boost_string[i + 2] == '0' &&
+ rssi_boost_string[i + 3] == '2' &&
+ ((i + 7) < strlen(rssi_boost_string)))
+ i = i + 4;
+ else
+ return -EINVAL;
+ digit1 = slsi_parse_hex(rssi_boost_string[i]);
+ digit2 = slsi_parse_hex(rssi_boost_string[i + 1]);
+ boost = (digit1 * 16) + digit2;
+ band = rssi_boost_string[i + 3] - '0';
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if (band == 0) {
+ sdev->device_config.rssi_boost_2g = 0;
+ sdev->device_config.rssi_boost_5g = 0;
+ } else if (band == 1) {
+ sdev->device_config.rssi_boost_2g = 0;
+ sdev->device_config.rssi_boost_5g = boost;
+ } else {
+ sdev->device_config.rssi_boost_2g = boost;
+ sdev->device_config.rssi_boost_5g = 0;
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ if ((netdev_vif->activated) &&
+ (netdev_vif->vif_type == FAPI_VIFTYPE_STATION)) {
+ return slsi_set_boost(sdev, dev);
+ } else {
+ return 0;
+ }
+ } else {
+ i = i + 2;
+ lendigit1 = slsi_parse_hex(rssi_boost_string[i]);
+ lendigit2 = slsi_parse_hex(rssi_boost_string[i + 1]);
+ length = (lendigit1 * 16) + lendigit2;
+ i = i + (length * 2) + 2;
+ }
+ }
+ return -EINVAL;
+}
+
+static int slsi_tdls_channel_switch(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int r, len_processed;
+ u8 peer_mac[6];
+ u32 center_freq = 0;
+ u32 chan_info = 0;
+ int is_ch_switch;
+ struct slsi_peer *peer;
+
+/* Command format:
+ * [0/1] [mac address] [center frequency] [channel_info]
+ * channel switch: "1 00:01:02:03:04:05 2412 20"
+ * cancel channel switch: "0 00:01:02:03:04:05"
+ */
+/* switch/cancel(1 byte) space(1byte) macaddress 17 char */
+#define SLSI_TDLS_IOCTL_CMD_DATA_MIN_LEN 19
+
+ if (buf_len < SLSI_TDLS_IOCTL_CMD_DATA_MIN_LEN) {
+ SLSI_NET_ERR(dev, "buf len should be atleast %d. but is:%d\n", SLSI_TDLS_IOCTL_CMD_DATA_MIN_LEN, buf_len);
+ return -EINVAL;
+ }
+
+ if (ndev_vif->sta.sta_bss == NULL) {
+ SLSI_NET_ERR(dev, "sta_bss is not available\n");
+ return -EINVAL;
+ }
+
+ is_ch_switch = command[0] - '0';
+ buf_len -= 2;
+ command += 2;
+
+ slsi_machexstring_to_macarray(command, peer_mac);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (is_ch_switch) {
+ /* mac address(17 char) + space(1 char) = 18 */
+ command += 18;
+ buf_len -= 18;
+
+ len_processed = slsi_str_to_int(command, ¢er_freq);
+ /* +1 for space */
+ buf_len -= len_processed + 1;
+ command += len_processed + 1;
+ if (buf_len <= 0) {
+ SLSI_NET_ERR(dev, "No buf for chan_info\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ buf_len -= slsi_str_to_int(command, &chan_info);
+
+ if (((chan_info & 0xFF) != 20) && ((chan_info & 0xFF) != 40)) {
+ SLSI_NET_ERR(dev, "Invalid chan_info(%d)\n", chan_info);
+ r = -EINVAL;
+ goto exit;
+ }
+ /* In 2.4 Ghz channel 1(2412MHz) to channel 14(2484MHz) */
+ /* for 40MHz channels are from 1 to 13, its 2422MHz to 2462MHz. */
+ if ((((chan_info & 0xFF) == 20) && (center_freq < 2412 || center_freq > 2484)) ||
+ (((chan_info & 0xFF) == 40) && (center_freq < 2422 || center_freq > 2462))) {
+ SLSI_NET_ERR(dev, "Invalid center_freq(%d) for chan_info(%d)\n", center_freq, chan_info);
+ r = -EINVAL;
+ goto exit;
+ }
+ } else {
+ /* Incase of cancel channel switch fallback to bss channel */
+ center_freq = ndev_vif->sta.sta_bss->channel->center_freq;
+ chan_info = 20; /* Hardcoded to 20MHz as cert tests use BSS with 20MHz */
+ }
+
+ peer = slsi_get_peer_from_mac(sdev, dev, peer_mac);
+
+ if (!peer || !slsi_is_tdls_peer(dev, peer)) {
+ SLSI_NET_ERR(dev, "%s peer aid:%d\n", peer ? "Invalid" : "No", peer ? peer->aid : 0);
+ r = -EINVAL;
+ goto exit;
+ }
+
+ r = slsi_mlme_tdls_action(sdev, dev, peer_mac, FAPI_TDLSACTION_CHANNEL_SWITCH, center_freq, chan_info);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_set_tx_power_calling(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mode;
+ int error = 0;
+ u8 host_state;
+
+ (void)slsi_str_to_int(command, &mode);
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ host_state = sdev->device_config.host_state;
+
+ if (!mode)
+ host_state = host_state | FAPI_HOSTSTATE_SAR_ACTIVE;
+ else
+ host_state = host_state & ~FAPI_HOSTSTATE_SAR_ACTIVE;
+
+ error = slsi_mlme_set_host_state(sdev, dev, host_state);
+ if (!error)
+ sdev->device_config.host_state = host_state;
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ return error;
+}
+
+int slsi_set_tx_power_sar(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mode;
+ int error = 0;
+ u8 host_state;
+
+ (void)slsi_str_to_int(command, &mode);
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ host_state = sdev->device_config.host_state;
+ host_state &= ~(FAPI_HOSTSTATE_SAR_ACTIVE | BIT(3) | BIT(4));
+
+ if (mode)
+ host_state |= ((mode - 1) << 3) | FAPI_HOSTSTATE_SAR_ACTIVE;
+
+ error = slsi_mlme_set_host_state(sdev, dev, host_state);
+ if (!error)
+ sdev->device_config.host_state = host_state;
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ return error;
+}
+
+int slsi_get_tx_power_sar(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int len = 0;
+ u8 host_state, index;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ host_state = sdev->device_config.host_state;
+
+ if (host_state & FAPI_HOSTSTATE_SAR_ACTIVE)
+ index = ((host_state >> 3) & 3) + 1;
+ else
+ index = 0;
+
+ len = snprintf(command, buf_len, "%u", index);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ return len;
+}
+
+static int slsi_print_regulatory(struct slsi_802_11d_reg_domain *domain_info, char *buf, int buf_len, struct slsi_supported_channels *supported_channels, int supp_chan_length)
+{
+ int cur_pos = 0;
+ int i, j, k;
+ char *dfs_region_str[] = {"unknown", "ETSI", "FCC", "JAPAN", "GLOBAL", "CHINA"};
+ u8 dfs_region_index;
+ struct ieee80211_reg_rule *reg_rule;
+ int channel_start_freq = 0;
+ int channel_end_freq = 0;
+ int channel_start_num = 0;
+ int channel_end_num = 0;
+ int channel_count = 0;
+ int channel_increment = 0;
+ int channel_band = 0;
+ bool display_pattern = false;
+
+ cur_pos = snprintf(buf, buf_len, "country %c%c:", domain_info->regdomain->alpha2[0],
+ domain_info->regdomain->alpha2[1]);
+ dfs_region_index = domain_info->regdomain->dfs_region <= 5 ? domain_info->regdomain->dfs_region : 0;
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, "DFS-%s\n", dfs_region_str[dfs_region_index]);
+ for (i = 0; i < domain_info->regdomain->n_reg_rules; i++) {
+ reg_rule = &domain_info->regdomain->reg_rules[i];
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, "\t(%d-%d @ %d), (N/A, %d)",
+ reg_rule->freq_range.start_freq_khz/1000,
+ reg_rule->freq_range.end_freq_khz/1000,
+ reg_rule->freq_range.max_bandwidth_khz/1000,
+ MBM_TO_DBM(reg_rule->power_rule.max_eirp));
+ if (reg_rule->flags) {
+ if (reg_rule->flags & NL80211_RRF_DFS)
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", DFS");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (reg_rule->flags & NL80211_RRF_NO_OFDM)
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", NO_OFDM");
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ if (reg_rule->flags & (NL80211_RRF_PASSIVE_SCAN|NL80211_RRF_NO_IBSS))
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", NO_IR");
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if (reg_rule->flags & (NL80211_RRF_NO_IR))
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", NO_IR");
+#endif
+ if (reg_rule->flags & NL80211_RRF_NO_INDOOR)
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", NO_INDOOR");
+ if (reg_rule->flags & NL80211_RRF_NO_OUTDOOR)
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", NO_OUTDOOR");
+ }
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, "\n");
+ }
+
+ /* Display of Supported Channels for 2.4GHz and 5GHz */
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, "Channels:");
+
+ for (i = 0; i < supp_chan_length; i++) {
+ channel_start_num = supported_channels[i].start_chan_num;
+ channel_count = supported_channels[i].channel_count;
+ channel_increment = supported_channels[i].increment;
+ channel_band = supported_channels[i].band;
+ channel_end_num = channel_start_num + ((channel_count - 1) * channel_increment);
+ for (j = channel_start_num; j <= channel_end_num; j += channel_increment) {
+ channel_start_freq = (ieee80211_channel_to_frequency(j, channel_band)*1000) - 10000;
+ channel_end_freq = (ieee80211_channel_to_frequency(j, channel_band)*1000) + 10000;
+ for (k = 0; k < domain_info->regdomain->n_reg_rules; k++) {
+ reg_rule = &domain_info->regdomain->reg_rules[k];
+ if ((reg_rule->freq_range.start_freq_khz <= channel_start_freq) &&
+ (reg_rule->freq_range.end_freq_khz >= channel_end_freq)) {
+ if (display_pattern)
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, ", %d", j);
+ else
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, " %d", j);
+ display_pattern = true;
+ break;
+ }
+ }
+ }
+ }
+ cur_pos += snprintf(buf + cur_pos, buf_len - cur_pos, "\n");
+ return cur_pos;
+}
+
+static int slsi_get_supported_channels(struct slsi_dev *sdev, struct net_device *dev, struct slsi_supported_channels *supported_channels)
+{
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_data supported_chan_mib = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_get_entry get_values[] = {{SLSI_PSID_UNIFI_SUPPORTED_CHANNELS, { 0, 0 } } };
+ int i, chan_count, chan_start;
+ int supp_chan_length = 0;
+
+ /* Expect each mib length in response is <= 16. So assume 16 bytes for each MIB */
+ mibrsp.dataLength = 16;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (mibrsp.data == NULL) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ return 0;
+ }
+ values = slsi_read_mibs(sdev, dev, get_values, 1, &mibrsp);
+ if (!values)
+ goto exit_with_mibrsp;
+
+ if (values[0].type != SLSI_MIB_TYPE_OCTET) {
+ SLSI_ERR(sdev, "Supported_Chan invalid type.");
+ goto exit_with_values;
+ }
+
+ supported_chan_mib = values[0].u.octetValue;
+ for (i = 0; i < supported_chan_mib.dataLength / 2; i++) {
+ chan_start = supported_chan_mib.data[i*2];
+ chan_count = supported_chan_mib.data[i*2 + 1];
+ if (chan_start == 1) { /* for 2.4GHz */
+ supported_channels[supp_chan_length].start_chan_num = 1;
+ if (!(sdev->device_config.host_state & FAPI_HOSTSTATE_CELLULAR_ACTIVE) &&
+ chan_count > 11 && sdev->device_config.disable_ch12_ch13) {
+ chan_count = 11;
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Channels 12 and 13 have been disabled");
+ }
+ supported_channels[supp_chan_length].channel_count = chan_count;
+ supported_channels[supp_chan_length].increment = 1;
+ supported_channels[supp_chan_length].band = NL80211_BAND_2GHZ;
+ supp_chan_length = supp_chan_length + 1;
+ } else { /* for 5GHz */
+ supported_channels[supp_chan_length].start_chan_num = chan_start;
+ supported_channels[supp_chan_length].channel_count = chan_count;
+ supported_channels[supp_chan_length].increment = 4;
+ supported_channels[supp_chan_length].band = NL80211_BAND_5GHZ;
+ supp_chan_length = supp_chan_length + 1;
+ }
+ }
+exit_with_values:
+ kfree(values);
+exit_with_mibrsp:
+ kfree(mibrsp.data);
+ return supp_chan_length;
+}
+
+static int slsi_get_regulatory(struct net_device *dev, char *buf, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int mode;
+ int cur_pos = 0;
+ int status;
+ u8 alpha2[3];
+ struct slsi_supported_channels supported_channels[5];
+ int supp_chan_length;
+
+ mode = buf[strlen(CMD_GETREGULATORY) + 1] - '0';
+ if (mode == 1) {
+ struct slsi_802_11d_reg_domain domain_info;
+
+ memset(&domain_info, 0, sizeof(struct slsi_802_11d_reg_domain));
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated || ndev_vif->vif_type != FAPI_VIFTYPE_STATION || !ndev_vif->sta.sta_bss) {
+ cur_pos += snprintf(buf, buf_len - cur_pos, "Station not connected");
+ SLSI_ERR(sdev, "station not connected. vif.activated:%d, vif.type:%d, vif.bss:%s\n",
+ ndev_vif->activated, ndev_vif->vif_type, ndev_vif->sta.sta_bss ? "yes" : "no");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EINVAL;
+ }
+ /* read vif specific country code, index = vifid+1 */
+ status = slsi_read_default_country(sdev, alpha2, ndev_vif->ifnum + 1);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ if (status)
+ return status;
+
+ /* max 20 rules */
+ domain_info.regdomain = kmalloc(sizeof(*domain_info.regdomain) + sizeof(struct ieee80211_reg_rule) * 20, GFP_KERNEL);
+ if (!domain_info.regdomain) {
+ SLSI_ERR(sdev, "no memory size:%lu\n",
+ sizeof(struct ieee80211_regdomain) + sizeof(struct ieee80211_reg_rule) * 20);
+ return -ENOMEM;
+ }
+
+ /* get regulatory rules based on country code */
+ domain_info.countrylist = sdev->device_config.domain_info.countrylist;
+ domain_info.country_len = sdev->device_config.domain_info.country_len;
+ status = slsi_read_regulatory_rules(sdev, &domain_info, alpha2);
+ if (status) {
+ kfree(domain_info.regdomain);
+ return status;
+ }
+ /* get supported channels based on country code */
+ supp_chan_length = slsi_get_supported_channels(sdev, dev, &supported_channels[0]);
+ cur_pos += slsi_print_regulatory(&domain_info, buf + cur_pos, buf_len - cur_pos, &supported_channels[0], supp_chan_length);
+ kfree(domain_info.regdomain);
+ } else if (mode == 0) {
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ supp_chan_length = slsi_get_supported_channels(sdev, dev, &supported_channels[0]);
+ cur_pos += slsi_print_regulatory(&sdev->device_config.domain_info, buf + cur_pos, buf_len - cur_pos, &supported_channels[0], supp_chan_length);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ } else {
+ cur_pos += snprintf(buf, buf_len - cur_pos, "invalid option %d", mode);
+ SLSI_ERR(sdev, "invalid option:%d\n", mode);
+ return -EINVAL;
+ }
+ /* Buf is somewhere close to 4Kbytes. so expect some spare space. If there is no spare
+ * space we might have missed printing some text in buf.
+ */
+ if (buf_len - cur_pos)
+ return cur_pos;
+ else
+ return -ENOMEM;
+}
+
+void slsi_disable_ch12_13(struct slsi_dev *sdev)
+{
+ struct wiphy *wiphy = sdev->wiphy;
+ struct ieee80211_channel *chan;
+
+ if (wiphy->bands[0]) {
+ chan = &wiphy->bands[0]->channels[11];
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ chan = &wiphy->bands[0]->channels[12];
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Channels 12 and 13 have been disabled");
+}
+
+int slsi_set_fcc_channel(struct net_device *dev, char *cmd, int cmd_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int status;
+ bool flight_mode_ena;
+ u8 host_state;
+ int err;
+ char alpha2[3];
+
+ /* SET_FCC_CHANNEL 0 when device is in flightmode */
+ flight_mode_ena = (cmd[0] == '0');
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ host_state = sdev->device_config.host_state;
+
+ if (flight_mode_ena)
+ host_state = host_state & ~FAPI_HOSTSTATE_CELLULAR_ACTIVE;
+ else
+ host_state = host_state | FAPI_HOSTSTATE_CELLULAR_ACTIVE;
+ sdev->device_config.host_state = host_state;
+
+ status = slsi_mlme_set_host_state(sdev, dev, host_state);
+ if (status) {
+ SLSI_ERR(sdev, "Err setting MMaxPowerEna. error = %d\n", status);
+ } else {
+ err = slsi_read_default_country(sdev, alpha2, 1);
+ if (err) {
+ SLSI_WARN(sdev, "Err updating reg_rules = %d\n", err);
+ } else {
+ memcpy(sdev->device_config.domain_info.regdomain->alpha2, alpha2, 2);
+ /* Read the regulatory params for the country.*/
+ if (slsi_read_regulatory_rules(sdev, &sdev->device_config.domain_info, alpha2) == 0) {
+ slsi_reset_channel_flags(sdev);
+ wiphy_apply_custom_regulatory(sdev->wiphy, sdev->device_config.domain_info.regdomain);
+ slsi_update_supported_channels_regd_flags(sdev);
+ if (flight_mode_ena && sdev->device_config.disable_ch12_ch13)
+ slsi_disable_ch12_13(sdev);
+ }
+ }
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ return status;
+}
+
+int slsi_fake_mac_write(struct net_device *dev, char *cmd)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int status;
+ bool enable;
+
+ if (strncmp(cmd, "ON", strlen("ON")) == 0)
+ enable = 1;
+ else
+ enable = 0;
+
+ status = slsi_mib_encode_bool(&mib_data, SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION, enable, 0);
+ if (status != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "FAKE MAC FAIL: no mem for MIB\n");
+ return -ENOMEM;
+ }
+
+ status = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+
+ kfree(mib_data.data);
+
+ if (status)
+ SLSI_ERR(sdev, "Err setting unifiMacAddrRandomistaion MIB. error = %d\n", status);
+
+ return status;
+}
+
+static char *slsi_get_assoc_status(u16 fw_result_code)
+{
+ char *assoc_status_label = "unspecified_failure";
+
+ switch (fw_result_code) {
+ case FAPI_RESULTCODE_SUCCESS:
+ assoc_status_label = "success";
+ break;
+ case FAPI_RESULTCODE_TRANSMISSION_FAILURE:
+ assoc_status_label = "transmission_failure";
+ break;
+ case FAPI_RESULTCODE_HOST_REQUEST_SUCCESS:
+ assoc_status_label = "host_request_success";
+ break;
+ case FAPI_RESULTCODE_HOST_REQUEST_FAILED:
+ assoc_status_label = "host_request_failed";
+ break;
+ case FAPI_RESULTCODE_PROBE_TIMEOUT:
+ assoc_status_label = "probe_timeout";
+ break;
+ case FAPI_RESULTCODE_AUTH_TIMEOUT:
+ assoc_status_label = "auth_timeout";
+ break;
+ case FAPI_RESULTCODE_ASSOC_TIMEOUT:
+ assoc_status_label = "assoc_timeout";
+ break;
+ case FAPI_RESULTCODE_ASSOC_ABORT:
+ assoc_status_label = "assoc_abort";
+ break;
+ }
+ return assoc_status_label;
+}
+
+int slsi_get_sta_info(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int len;
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ struct net_device *ap_dev;
+ struct netdev_vif *ndev_ap_vif;
+
+ ap_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+
+ if (ap_dev) {
+ ndev_ap_vif = netdev_priv(ap_dev);
+ SLSI_MUTEX_LOCK(ndev_ap_vif->vif_mutex);
+ if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_ap_vif))
+ ndev_vif = ndev_ap_vif;
+ SLSI_MUTEX_UNLOCK(ndev_ap_vif->vif_mutex);
+ }
+#endif
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_AP)) {
+ SLSI_ERR(sdev, "slsi_get_sta_info: AP is not up.Command not allowed vif.activated:%d, vif.type:%d\n",
+ ndev_vif->activated, ndev_vif->vif_type);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EINVAL;
+ }
+
+#if defined(ANDROID_VERSION) && (ANDROID_VERSION >= 90000)
+ len = snprintf(command, buf_len, "GETSTAINFO %pM Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d CAP=%04x %02x:%02x:%02x ",
+ ndev_vif->ap.last_disconnected_sta.address,
+ ndev_vif->ap.last_disconnected_sta.rx_retry_packets,
+ ndev_vif->ap.last_disconnected_sta.rx_bc_mc_packets,
+ ndev_vif->ap.last_disconnected_sta.capabilities,
+ ndev_vif->ap.last_disconnected_sta.address[0],
+ ndev_vif->ap.last_disconnected_sta.address[1],
+ ndev_vif->ap.last_disconnected_sta.address[2]);
+
+ len += snprintf(&command[len], (buf_len - len), "%d %d %d %d %d %d %d %u %d",
+ ieee80211_frequency_to_channel(ndev_vif->ap.channel_freq),
+ ndev_vif->ap.last_disconnected_sta.bandwidth, ndev_vif->ap.last_disconnected_sta.rssi,
+ ndev_vif->ap.last_disconnected_sta.tx_data_rate, ndev_vif->ap.last_disconnected_sta.mode,
+ ndev_vif->ap.last_disconnected_sta.antenna_mode,
+ ndev_vif->ap.last_disconnected_sta.mimo_used, ndev_vif->ap.last_disconnected_sta.reason,
+ ndev_vif->ap.last_disconnected_sta.support_mode);
+#else
+ len = snprintf(command, buf_len, "wl_get_sta_info : %02x%02x%02x %u %d %d %d %d %d %d %u ",
+ ndev_vif->ap.last_disconnected_sta.address[0], ndev_vif->ap.last_disconnected_sta.address[1],
+ ndev_vif->ap.last_disconnected_sta.address[2], ndev_vif->ap.channel_freq,
+ ndev_vif->ap.last_disconnected_sta.bandwidth, ndev_vif->ap.last_disconnected_sta.rssi,
+ ndev_vif->ap.last_disconnected_sta.tx_data_rate, ndev_vif->ap.last_disconnected_sta.mode,
+ ndev_vif->ap.last_disconnected_sta.antenna_mode,
+ ndev_vif->ap.last_disconnected_sta.mimo_used, ndev_vif->ap.last_disconnected_sta.reason);
+#endif
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return len;
+}
+
+static int slsi_get_bss_rssi(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int len = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ len = snprintf(command, buf_len, "%d", ndev_vif->sta.last_connected_bss.rssi);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return len;
+}
+
+static int slsi_get_bss_info(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int len = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ len = snprintf(command, buf_len, "%02x:%02x:%02x %u %u %d %u %u %u %u %u %d %d %u %u %u %u",
+ ndev_vif->sta.last_connected_bss.address[0], ndev_vif->sta.last_connected_bss.address[1],
+ ndev_vif->sta.last_connected_bss.address[2],
+ ndev_vif->sta.last_connected_bss.channel_freq, ndev_vif->sta.last_connected_bss.bandwidth,
+ ndev_vif->sta.last_connected_bss.rssi, ndev_vif->sta.last_connected_bss.tx_data_rate,
+ ndev_vif->sta.last_connected_bss.mode, ndev_vif->sta.last_connected_bss.antenna_mode,
+ ndev_vif->sta.last_connected_bss.mimo_used,
+ ndev_vif->sta.last_connected_bss.passpoint_version, ndev_vif->sta.last_connected_bss.snr,
+ ndev_vif->sta.last_connected_bss.noise_level, ndev_vif->sta.last_connected_bss.roaming_akm,
+ ndev_vif->sta.last_connected_bss.roaming_count, ndev_vif->sta.last_connected_bss.kv,
+ ndev_vif->sta.last_connected_bss.kvie);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return len;
+}
+
+static int slsi_get_assoc_reject_info(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int len = 0;
+
+ len = snprintf(command, buf_len, "assoc_reject.status : %d %s", sdev->assoc_result_code,
+ slsi_get_assoc_status(sdev->assoc_result_code));
+
+ return len;
+}
+
+#ifdef CONFIG_SCSC_WLAN_LOW_LATENCY_MODE
+int slsi_set_latency_mode(struct net_device *dev, char *cmd, int cmd_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ bool enable_roaming;
+
+ /* latency_mode =0 (Normal), latency_mode =1 (Low) */
+ enable_roaming = (cmd[0] == '0') ? true : false;
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Setting latency mode %d\n", cmd[0] - '0');
+
+ return slsi_set_mib_soft_roaming_enabled(sdev, dev, enable_roaming);
+}
+
+static int slsi_set_power_mode(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int status;
+ u16 power_mode;
+
+ power_mode = (command[0] == '0') ? FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE : FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if ((!ndev_vif->activated) || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ !(ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_ERR(sdev, "Command not allowed vif.activated:%d, vif.type:%d, ndev_vif->sta.vif_status:%d\n",
+ ndev_vif->activated, ndev_vif->vif_type, ndev_vif->sta.vif_status);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EINVAL;
+ }
+ status = slsi_mlme_powermgt(sdev, dev, power_mode);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return status;
+}
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+static int slsi_enhanced_arp_start_stop(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int result = 0;
+ int readbyte = 0;
+ int readvalue = 0;
+ int i = 0;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if (!sdev->device_config.fw_enhanced_arp_detect_supported) {
+ SLSI_ERR(sdev, "Enhanced ARP Detect Feature is not supported.\n");
+ return -ENOTSUPP;
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_ERR(sdev, "Not in STA mode\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex)
+ return -EPERM;
+ }
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Enhanced ARP Start/Stop\n");
+
+ for (i = 0; i < 4 ; i++) {
+ readbyte = slsi_str_to_int(command, &readvalue);
+ ndev_vif->target_ip_addr[i] = readvalue;
+ command = command + readbyte + 1;
+ }
+
+ if (ndev_vif->target_ip_addr[0] != 0) { /* start enhanced arp detect */
+ /* clear all the counters */
+ memset(&ndev_vif->enhanced_arp_stats, 0, sizeof(ndev_vif->enhanced_arp_stats));
+ ndev_vif->enhanced_arp_detect_enabled = true;
+ result = slsi_mlme_arp_detect_request(sdev, dev, FAPI_ACTION_START, ndev_vif->target_ip_addr);
+ } else { /* stop enhanced arp detect */
+ ndev_vif->enhanced_arp_detect_enabled = false;
+ result = slsi_mlme_arp_detect_request(sdev, dev, FAPI_ACTION_STOP, ndev_vif->target_ip_addr);
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return result;
+}
+
+static int slsi_enhanced_arp_get_stats(struct net_device *dev, char *command, int buf_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int r = 0;
+ int len = 0;
+
+ r = slsi_read_enhanced_arp_rx_count_by_lower_mac(sdev, dev, SLSI_PSID_UNIFI_ARP_DETECT_RESPONSE_COUNTER);
+
+ if (r == 0) {
+ len = snprintf(command, buf_len, "%d %d %d %d %d %d %d %d %d %d",
+ ndev_vif->enhanced_arp_stats.arp_req_count_from_netdev,
+ ndev_vif->enhanced_arp_stats.arp_req_count_to_lower_mac,
+ ndev_vif->enhanced_arp_stats.arp_req_rx_count_by_lower_mac,
+ ndev_vif->enhanced_arp_stats.arp_req_count_tx_success,
+ ndev_vif->enhanced_arp_stats.arp_rsp_rx_count_by_lower_mac,
+ ndev_vif->enhanced_arp_stats.arp_rsp_rx_count_by_upper_mac,
+ ndev_vif->enhanced_arp_stats.arp_rsp_count_to_netdev,
+ ndev_vif->enhanced_arp_stats.arp_rsp_count_out_of_order_drop,
+ 0, /*ap_link_active not supported, set as 0*/
+ ndev_vif->enhanced_arp_stats.is_duplicate_addr_detected);
+ }
+
+ /*clear all the counters*/
+ memset(&ndev_vif->enhanced_arp_stats, 0, sizeof(ndev_vif->enhanced_arp_stats));
+
+ return len;
+}
+#endif
+
+int slsi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#define MAX_LEN_PRIV_COMMAND 4096 /*This value is the max reply size set in supplicant*/
+ struct android_wifi_priv_cmd priv_cmd;
+ int ret = 0;
+ u8 *command = NULL;
+
+ if (!dev) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ if (!rq->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user((void *)&priv_cmd, (void *)rq->ifr_data, sizeof(struct android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ SLSI_NET_ERR(dev, "ifr data failed\n");
+ goto exit;
+ }
+
+ if ((priv_cmd.total_len > MAX_LEN_PRIV_COMMAND) || (priv_cmd.total_len < 0)) {
+ ret = -EINVAL;
+ SLSI_NET_ERR(dev, "Length mismatch total_len = %d\n", priv_cmd.total_len);
+ goto exit;
+ }
+ command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
+ if (!command) {
+ ret = -ENOMEM;
+ SLSI_NET_ERR(dev, "No memory\n");
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ SLSI_NET_ERR(dev, "Buffer copy fail\n");
+ goto exit;
+ }
+ command[priv_cmd.total_len] = '\0';
+
+ SLSI_INFO_NODEV("command: %.*s\n", priv_cmd.total_len, command);
+
+ if (strncasecmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+ ret = slsi_set_suspend_mode(dev, command);
+ } else if (strncasecmp(command, CMD_SETJOINPREFER, strlen(CMD_SETJOINPREFER)) == 0) {
+ char *rssi_boost_string = command + strlen(CMD_SETJOINPREFER) + 1;
+
+ ret = slsi_update_rssi_boost(dev, rssi_boost_string);
+ } else if (strncasecmp(command, CMD_RXFILTERADD, strlen(CMD_RXFILTERADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTERADD) + 1) - '0';
+
+ ret = slsi_rx_filter_num_write(dev, 1, filter_num);
+ } else if (strncasecmp(command, CMD_RXFILTERREMOVE, strlen(CMD_RXFILTERREMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTERREMOVE) + 1) - '0';
+
+ ret = slsi_rx_filter_num_write(dev, 0, filter_num);
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if !defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION < 90000)
+ } else if (strncasecmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) {
+ char *intf_name = command + strlen(CMD_INTERFACE_CREATE) + 1;
+
+ ret = slsi_create_interface(dev, intf_name);
+ } else if (strncasecmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) {
+ char *intf_name = command + strlen(CMD_INTERFACE_DELETE) + 1;
+
+ ret = slsi_delete_interface(dev, intf_name);
+#endif
+ } else if (strncasecmp(command, CMD_SET_INDOOR_CHANNELS, strlen(CMD_SET_INDOOR_CHANNELS)) == 0) {
+ char *arg = command + strlen(CMD_SET_INDOOR_CHANNELS) + 1;
+
+ ret = slsi_set_indoor_channels(dev, arg);
+ } else if (strncasecmp(command, CMD_GET_INDOOR_CHANNELS, strlen(CMD_GET_INDOOR_CHANNELS)) == 0) {
+ ret = slsi_get_indoor_channels(dev, command, priv_cmd.total_len);
+#endif
+ } else if (strncasecmp(command, CMD_SETCOUNTRYREV, strlen(CMD_SETCOUNTRYREV)) == 0) {
+ char *country_code = command + strlen(CMD_SETCOUNTRYREV) + 1;
+
+ ret = slsi_set_country_rev(dev, country_code);
+ } else if (strncasecmp(command, CMD_GETCOUNTRYREV, strlen(CMD_GETCOUNTRYREV)) == 0) {
+ ret = slsi_get_country_rev(dev, command, priv_cmd.total_len);
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ } else if (strncasecmp(command, CMD_SETROAMTRIGGER, strlen(CMD_SETROAMTRIGGER)) == 0) {
+ int skip = strlen(CMD_SETROAMTRIGGER) + 1;
+
+ ret = slsi_roam_scan_trigger_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMTRIGGER, strlen(CMD_GETROAMTRIGGER)) == 0) {
+ ret = slsi_roam_scan_trigger_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMDELTA, strlen(CMD_SETROAMDELTA)) == 0) {
+ int skip = strlen(CMD_SETROAMDELTA) + 1;
+
+ ret = slsi_roam_delta_trigger_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMDELTA, strlen(CMD_GETROAMDELTA)) == 0) {
+ ret = slsi_roam_delta_trigger_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMSCANPERIOD, strlen(CMD_SETROAMSCANPERIOD)) == 0) {
+ int skip = strlen(CMD_SETROAMSCANPERIOD) + 1;
+
+ ret = slsi_cached_channel_scan_period_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMSCANPERIOD, strlen(CMD_GETROAMSCANPERIOD)) == 0) {
+ ret = slsi_cached_channel_scan_period_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETFULLROAMSCANPERIOD, strlen(CMD_SETFULLROAMSCANPERIOD)) == 0) {
+ int skip = strlen(CMD_SETFULLROAMSCANPERIOD) + 1;
+
+ ret = slsi_full_roam_scan_period_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETFULLROAMSCANPERIOD, strlen(CMD_GETFULLROAMSCANPERIOD)) == 0) {
+ ret = slsi_full_roam_scan_period_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETSCANCHANNELTIME, strlen(CMD_SETSCANCHANNELTIME)) == 0) {
+ int skip = strlen(CMD_SETSCANCHANNELTIME) + 1;
+
+ ret = slsi_roam_scan_max_active_channel_time_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETSCANCHANNELTIME, strlen(CMD_GETSCANCHANNELTIME)) == 0) {
+ ret = slsi_roam_scan_max_active_channel_time_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETSCANNPROBES, strlen(CMD_SETSCANNPROBES)) == 0) {
+ int skip = strlen(CMD_SETSCANNPROBES) + 1;
+
+ ret = slsi_roam_scan_probe_interval_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETSCANNPROBES, strlen(CMD_GETSCANNPROBES)) == 0) {
+ ret = slsi_roam_scan_probe_interval_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0) {
+ int skip = strlen(CMD_SETROAMMODE) + 1;
+
+ ret = slsi_roam_mode_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMMODE, strlen(CMD_GETROAMMODE)) == 0) {
+ ret = slsi_roam_mode_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMINTRABAND, strlen(CMD_SETROAMINTRABAND)) == 0) {
+ int skip = strlen(CMD_SETROAMINTRABAND) + 1;
+
+ ret = slsi_roam_scan_band_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMINTRABAND, strlen(CMD_GETROAMINTRABAND)) == 0) {
+ ret = slsi_roam_scan_band_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMBAND, strlen(CMD_SETROAMBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETROAMBAND) + 1) - '0';
+
+ ret = slsi_freq_band_write(dev, band);
+ } else if (strncasecmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+
+ ret = slsi_freq_band_write(dev, band);
+ } else if ((strncasecmp(command, CMD_GETROAMBAND, strlen(CMD_GETROAMBAND)) == 0) || (strncasecmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0)) {
+ ret = slsi_freq_band_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMSCANCONTROL, strlen(CMD_SETROAMSCANCONTROL)) == 0) {
+ int mode = *(command + strlen(CMD_SETROAMSCANCONTROL) + 1) - '0';
+
+ ret = slsi_roam_scan_control_write(dev, mode);
+ } else if (strncasecmp(command, CMD_GETROAMSCANCONTROL, strlen(CMD_GETROAMSCANCONTROL)) == 0) {
+ ret = slsi_roam_scan_control_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETSCANHOMETIME, strlen(CMD_SETSCANHOMETIME)) == 0) {
+ int skip = strlen(CMD_SETSCANHOMETIME) + 1;
+
+ ret = slsi_roam_scan_home_time_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETSCANHOMETIME, strlen(CMD_GETSCANHOMETIME)) == 0) {
+ ret = slsi_roam_scan_home_time_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETSCANHOMEAWAYTIME, strlen(CMD_SETSCANHOMEAWAYTIME)) == 0) {
+ int skip = strlen(CMD_SETSCANHOMEAWAYTIME) + 1;
+
+ ret = slsi_roam_scan_home_away_time_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETSCANHOMEAWAYTIME, strlen(CMD_GETSCANHOMEAWAYTIME)) == 0) {
+ ret = slsi_roam_scan_home_away_time_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETOKCMODE, strlen(CMD_SETOKCMODE)) == 0) {
+ int mode = *(command + strlen(CMD_SETOKCMODE) + 1) - '0';
+
+ ret = slsi_okc_mode_write(dev, mode);
+ } else if (strncasecmp(command, CMD_GETOKCMODE, strlen(CMD_GETOKCMODE)) == 0) {
+ ret = slsi_okc_mode_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETWESMODE, strlen(CMD_SETWESMODE)) == 0) {
+ int mode = *(command + strlen(CMD_SETWESMODE) + 1) - '0';
+
+ ret = slsi_wes_mode_write(dev, mode);
+ } else if (strncasecmp(command, CMD_GETWESMODE, strlen(CMD_GETWESMODE)) == 0) {
+ ret = slsi_wes_mode_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SETROAMSCANCHANNELS, strlen(CMD_SETROAMSCANCHANNELS)) == 0) {
+ int skip = strlen(CMD_SETROAMSCANCHANNELS) + 1;
+
+ ret = slsi_roam_scan_channels_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GETROAMSCANCHANNELS, strlen(CMD_GETROAMSCANCHANNELS)) == 0) {
+ ret = slsi_roam_scan_channels_read(dev, command, priv_cmd.total_len);
+#endif
+ } else if (strncasecmp(command, CMD_SET_PMK, strlen(CMD_SET_PMK)) == 0) {
+ ret = slsi_set_pmk(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_HAPD_GET_CHANNEL, strlen(CMD_HAPD_GET_CHANNEL)) == 0) {
+ ret = slsi_auto_chan_read(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_SET_SAP_CHANNEL_LIST, strlen(CMD_SET_SAP_CHANNEL_LIST)) == 0) {
+ ret = slsi_auto_chan_write(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_REASSOC, strlen(CMD_REASSOC)) == 0) {
+ int skip = strlen(CMD_REASSOC) + 1;
+
+ ret = slsi_reassoc_write(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_SENDACTIONFRAME, strlen(CMD_SENDACTIONFRAME)) == 0) {
+ int skip = strlen(CMD_SENDACTIONFRAME) + 1;
+
+ ret = slsi_send_action_frame(dev, command + skip,
+ priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_HAPD_MAX_NUM_STA, strlen(CMD_HAPD_MAX_NUM_STA)) == 0) {
+ int sta_num;
+ u8 *max_sta = command + strlen(CMD_HAPD_MAX_NUM_STA) + 1;
+
+ slsi_str_to_int(max_sta, &sta_num);
+ ret = slsi_setting_max_sta_write(dev, sta_num);
+ } else if (strncasecmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ char *country_code = command + strlen(CMD_COUNTRY) + 1;
+
+ ret = slsi_country_write(dev, country_code);
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ } else if (strncasecmp(command, CMD_BEACON_RECV, strlen(CMD_BEACON_RECV)) == 0) {
+ char *action = command + strlen(CMD_BEACON_RECV) + 1;
+
+ ret = slsi_forward_beacon(dev, action);
+#endif
+ } else if (strncasecmp(command, CMD_SETAPP2PWPSIE, strlen(CMD_SETAPP2PWPSIE)) == 0) {
+ ret = slsi_set_ap_p2p_wps_ie(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_P2PSETPS, strlen(CMD_P2PSETPS)) == 0) {
+ ret = slsi_set_p2p_oppps(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_P2PSETNOA, strlen(CMD_P2PSETNOA)) == 0) {
+ ret = slsi_p2p_set_noa_params(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_P2PECSA, strlen(CMD_P2PECSA)) == 0) {
+ ret = slsi_p2p_ecsa(dev, command);
+ } else if (strncasecmp(command, CMD_P2PLOSTART, strlen(CMD_P2PLOSTART)) == 0) {
+ ret = slsi_p2p_lo_start(dev, command);
+ } else if (strncasecmp(command, CMD_P2PLOSTOP, strlen(CMD_P2PLOSTOP)) == 0) {
+ ret = slsi_p2p_lo_stop(dev);
+ } else if (strncasecmp(command, CMD_TDLSCHANNELSWITCH, strlen(CMD_TDLSCHANNELSWITCH)) == 0) {
+ ret = slsi_tdls_channel_switch(dev, command + strlen(CMD_TDLSCHANNELSWITCH) + 1,
+ priv_cmd.total_len - (strlen(CMD_TDLSCHANNELSWITCH) + 1));
+ } else if (strncasecmp(command, CMD_SETROAMOFFLOAD, strlen(CMD_SETROAMOFFLOAD)) == 0) {
+ ret = slsi_roam_mode_write(dev, command + strlen(CMD_SETROAMOFFLOAD) + 1,
+ priv_cmd.total_len - (strlen(CMD_SETROAMOFFLOAD) + 1));
+ } else if (strncasecmp(command, CMD_SETROAMOFFLAPLIST, strlen(CMD_SETROAMOFFLAPLIST)) == 0) {
+ ret = slsi_roam_offload_ap_list(dev, command + strlen(CMD_SETROAMOFFLAPLIST) + 1,
+ priv_cmd.total_len - (strlen(CMD_SETROAMOFFLAPLIST) + 1));
+ } else if (strncasecmp(command, CMD_SET_TX_POWER_CALLING, strlen(CMD_SET_TX_POWER_CALLING)) == 0) {
+ ret = slsi_set_tx_power_calling(dev, command + strlen(CMD_SET_TX_POWER_CALLING) + 1,
+ priv_cmd.total_len - (strlen(CMD_SET_TX_POWER_CALLING) + 1));
+ } else if (strncasecmp(command, CMD_SET_TX_POWER_SAR, strlen(CMD_SET_TX_POWER_SAR)) == 0) {
+ ret = slsi_set_tx_power_sar(dev, command + strlen(CMD_SET_TX_POWER_SAR) + 1,
+ priv_cmd.total_len - (strlen(CMD_SET_TX_POWER_SAR) + 1));
+ } else if (strncasecmp(command, CMD_GET_TX_POWER_SAR, strlen(CMD_GET_TX_POWER_SAR)) == 0) {
+ ret = slsi_get_tx_power_sar(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_GETREGULATORY, strlen(CMD_GETREGULATORY)) == 0) {
+ ret = slsi_get_regulatory(dev, command, priv_cmd.total_len);
+#ifdef CONFIG_SCSC_WLAN_HANG_TEST
+ } else if (strncasecmp(command, CMD_TESTFORCEHANG, strlen(CMD_TESTFORCEHANG)) == 0) {
+ ret = slsi_test_send_hanged_vendor_event(dev);
+#endif
+ } else if (strncasecmp(command, CMD_SET_FCC_CHANNEL, strlen(CMD_SET_FCC_CHANNEL)) == 0) {
+ ret = slsi_set_fcc_channel(dev, command + strlen(CMD_SET_FCC_CHANNEL) + 1,
+ priv_cmd.total_len - (strlen(CMD_SET_FCC_CHANNEL) + 1));
+ } else if (strncasecmp(command, CMD_FAKEMAC, strlen(CMD_FAKEMAC)) == 0) {
+ ret = slsi_fake_mac_write(dev, command + strlen(CMD_FAKEMAC) + 1);
+ } else if (strncasecmp(command, CMD_GETBSSRSSI, strlen(CMD_GETBSSRSSI)) == 0) {
+ ret = slsi_get_bss_rssi(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_GETBSSINFO, strlen(CMD_GETBSSINFO)) == 0) {
+ ret = slsi_get_bss_info(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_GETSTAINFO, strlen(CMD_GETSTAINFO)) == 0) {
+ ret = slsi_get_sta_info(dev, command, priv_cmd.total_len);
+ } else if (strncasecmp(command, CMD_GETASSOCREJECTINFO, strlen(CMD_GETASSOCREJECTINFO)) == 0) {
+ ret = slsi_get_assoc_reject_info(dev, command, priv_cmd.total_len);
+#ifdef CONFIG_SCSC_WLAN_LOW_LATENCY_MODE
+ } else if (strncasecmp(command, CMD_SET_LATENCY_MODE, strlen(CMD_SET_LATENCY_MODE)) == 0) {
+ ret = slsi_set_latency_mode(dev, command + strlen(CMD_SET_LATENCY_MODE) + 1,
+ priv_cmd.total_len - (strlen(CMD_SET_LATENCY_MODE) + 1));
+ } else if (strncasecmp(command, CMD_SET_POWER_MGMT, strlen(CMD_SET_POWER_MGMT)) == 0) {
+ ret = slsi_set_power_mode(dev, command + strlen(CMD_SET_POWER_MGMT) + 1,
+ priv_cmd.total_len - (strlen(CMD_SET_POWER_MGMT) + 1));
+#endif
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ } else if (strncasecmp(command, CMD_SET_ENHANCED_ARP_TARGET, strlen(CMD_SET_ENHANCED_ARP_TARGET)) == 0) {
+ int skip = strlen(CMD_SET_ENHANCED_ARP_TARGET) + 1;
+
+ ret = slsi_enhanced_arp_start_stop(dev, command + skip, priv_cmd.total_len - skip);
+ } else if (strncasecmp(command, CMD_GET_ENHANCED_ARP_COUNTS, strlen(CMD_SET_ENHANCED_ARP_TARGET)) == 0) {
+ ret = slsi_enhanced_arp_get_stats(dev, command, priv_cmd.total_len);
+#endif
+ } else if ((strncasecmp(command, CMD_RXFILTERSTART, strlen(CMD_RXFILTERSTART)) == 0) ||
+ (strncasecmp(command, CMD_RXFILTERSTOP, strlen(CMD_RXFILTERSTOP)) == 0) ||
+ (strncasecmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) ||
+ (strncasecmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) ||
+ (strncasecmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) ||
+ (strncasecmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)) {
+ ret = 0;
+ } else if ((strncasecmp(command, CMD_AMPDU_MPDU, strlen(CMD_AMPDU_MPDU)) == 0) ||
+ (strncasecmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0) ||
+ (strncasecmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) ||
+ (strncasecmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) ||
+ (strncasecmp(command, CMD_LTECOEX, strlen(CMD_LTECOEX)) == 0) ||
+ (strncasecmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0) ||
+ (strncasecmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) ||
+ (strncasecmp(command, CMD_SETCCXMODE, strlen(CMD_SETCCXMODE)) == 0) ||
+ (strncasecmp(command, CMD_SETDFSSCANMODE, strlen(CMD_SETDFSSCANMODE)) == 0) ||
+ (strncasecmp(command, CMD_SETSINGLEANT, strlen(CMD_SETSINGLEANT)) == 0)) {
+ ret = -ENOTSUPP;
+#ifndef SLSI_TEST_DEV
+ } else if ((strncasecmp(command, CMD_DRIVERDEBUGDUMP, strlen(CMD_DRIVERDEBUGDUMP)) == 0) ||
+ (strncasecmp(command, CMD_DRIVERDEBUGCOMMAND, strlen(CMD_DRIVERDEBUGCOMMAND)) == 0)) {
+ slsi_dump_stats(dev);
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_DUMPSTATE, SCSC_LOG_DUMPSTATE_REASON_DRIVERDEBUGDUMP);
+#else
+ ret = mx140_log_dump();
+#endif
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ } else if ((strncasecmp(command, CMD_ENHANCED_PKT_FILTER, strlen(CMD_ENHANCED_PKT_FILTER)) == 0)) {
+ const u8 enable = *(command + strlen(CMD_ENHANCED_PKT_FILTER) + 1) - '0';
+
+ ret = slsi_set_enhanced_pkt_filter(dev, enable);
+#endif
+#ifdef CONFIG_SCSC_WLAN_SET_NUM_ANTENNAS
+ } else if (strncasecmp(command, CMD_SET_NUM_ANTENNAS, strlen(CMD_SET_NUM_ANTENNAS)) == 0) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ const u16 num_of_antennas = *(command + strlen(CMD_SET_NUM_ANTENNAS) + 1) - '0';
+
+ /* We cannot lock in slsi_set_num_antennas as
+ * this is also called in slsi_start_ap with netdev_vif lock.
+ */
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ ret = slsi_set_num_antennas(dev, num_of_antennas);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+#endif
+ } else {
+ ret = -ENOTSUPP;
+ }
+ if (strncasecmp(command, CMD_SETROAMBAND, strlen(CMD_SETROAMBAND)) != 0 && strncasecmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) != 0 && copy_to_user(priv_cmd.buf, command, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ SLSI_NET_ERR(dev, "Buffer copy fail\n");
+ }
+
+exit:
+ kfree(command);
+ return ret;
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __MX_IOCTL_H__
+#define __MX_IOCTL_H__
+
+#include <linux/if.h>
+#include <linux/netdevice.h>
+
+struct android_wifi_priv_cmd {
+ char *buf;
+ int used_len;
+ int total_len;
+};
+
+int slsi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int slsi_get_sta_info(struct net_device *dev, char *command, int buf_len);
+
+struct slsi_supported_channels {
+ int start_chan_num;
+ int channel_count;
+ int increment;
+ int band;
+};
+#endif /* __MX_IOCTL_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <scsc/kic/slsi_kic_wifi.h>
+#include "dev.h"
+#include "debug.h"
+#include "mxman.h"
+
+static int wifi_kic_trigger_recovery(void *priv, enum slsi_kic_test_recovery_type type)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)priv;
+ char reason[80];
+
+ if (!sdev)
+ return -EINVAL;
+
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED)
+ return -EAGAIN;
+
+ switch (type) {
+ case slsi_kic_test_recovery_type_subsystem_panic:
+ SLSI_INFO(sdev, "Trigger Wi-Fi firmware subsystem panic\n");
+ if (scsc_service_force_panic(sdev->service))
+ return -EINVAL;
+ return 0;
+ case slsi_kic_test_recovery_type_emulate_firmware_no_response:
+ SLSI_INFO(sdev, "Trigger Wi-Fi host panic\n");
+ snprintf(reason, sizeof(reason), "slsi_kic_test_recovery_type_emulate_firmware_no_response");
+ slsi_sm_service_failed(sdev, reason);
+ return 0;
+ case slsi_kic_test_recovery_type_watch_dog:
+ case slsi_kic_test_recovery_type_chip_crash:
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct slsi_kic_wifi_ops kic_ops = {
+ .trigger_recovery = wifi_kic_trigger_recovery,
+};
+
+int wifi_kic_register(struct slsi_dev *sdev)
+{
+ return slsi_kic_wifi_ops_register((void *)sdev, &kic_ops);
+}
+
+void wifi_kic_unregister(void)
+{
+ return slsi_kic_wifi_ops_unregister(&kic_ops);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __KIC_H
+#define __KIC_H
+
+int wifi_kic_register(struct slsi_dev *sdev);
+void wifi_kic_unregister(void);
+
+#endif /* #ifndef __KIC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h"
+#include "debug.h"
+#include "udi.h"
+#include "log_clients.h"
+#include "unifiio.h"
+
+/* These functions should NOT be called from interrupt context */
+/* It is supposed to be called from process context, or
+ * NET_TX_SOFTIRQ - with BHs disabled and interrupts disabled
+ */
+/* Do not sleep */
+
+void slsi_log_clients_log_signal_safe(struct slsi_dev *sdev, struct sk_buff *skb, u32 direction)
+{
+ struct list_head *pos, *n;
+ struct slsi_log_client *log_client;
+ int dir = (direction == SLSI_LOG_DIRECTION_FROM_HOST) ? UDI_FROM_HOST : UDI_TO_HOST;
+
+ spin_lock_bh(&sdev->log_clients.log_client_spinlock);
+ list_for_each_safe(pos, n, &sdev->log_clients.log_client_list) {
+ log_client = list_entry(pos, struct slsi_log_client, q);
+ log_client->log_client_cb(log_client, skb, dir);
+ }
+ spin_unlock_bh(&sdev->log_clients.log_client_spinlock);
+}
+
+void slsi_log_clients_init(struct slsi_dev *sdev)
+{
+ INIT_LIST_HEAD(&sdev->log_clients.log_client_list);
+ spin_lock_init(&sdev->log_clients.log_client_spinlock);
+}
+
+/* The arg called "filter" will eventually be passed to kfree().
+ * - so pass a NULL if you are not doing any filtering
+ */
+int slsi_log_client_register(struct slsi_dev *sdev, void *log_client_ctx,
+ int (*log_client_cb)(struct slsi_log_client *, struct sk_buff *, int),
+ char *filter, int min_signal_id, int max_signal_id)
+{
+ struct slsi_log_client *log_client;
+ int first_in_list = 0;
+
+ first_in_list = list_empty(&sdev->log_clients.log_client_list);
+ log_client = kmalloc(sizeof(*log_client), GFP_KERNEL);
+ if (log_client == NULL)
+ return -ENOMEM;
+
+ log_client->min_signal_id = min_signal_id;
+ log_client->max_signal_id = max_signal_id;
+ log_client->signal_filter = filter;
+ log_client->log_client_ctx = log_client_ctx;
+ log_client->log_client_cb = log_client_cb;
+
+ /* Add to tail of log queue */
+ spin_lock_bh(&sdev->log_clients.log_client_spinlock);
+ list_add_tail(&log_client->q, &sdev->log_clients.log_client_list);
+ spin_unlock_bh(&sdev->log_clients.log_client_spinlock);
+
+ return 0;
+}
+
+void slsi_log_clients_terminate(struct slsi_dev *sdev)
+{
+ /* If the driver is configured to try and terminate UDI user space
+ * applications, the following will try to do so.
+ */
+ if (*sdev->term_udi_users) {
+ int num_polls_left = 50;
+ unsigned int timeout_ms = 4;
+
+ slsi_log_client_msg(sdev, UDI_DRV_UNLOAD_IND, 0, NULL);
+
+ /* Poll until all refs have gone away or timeout */
+ while (slsi_check_cdev_refs() && num_polls_left) {
+ msleep(timeout_ms);
+ num_polls_left--;
+ }
+ }
+}
+
+void slsi_log_client_msg(struct slsi_dev *sdev, u16 event, u32 event_data_length, const u8 *event_data)
+{
+ struct list_head *pos, *n;
+ struct slsi_log_client *log_client;
+
+ spin_lock_bh(&sdev->log_clients.log_client_spinlock);
+ list_for_each_safe(pos, n, &sdev->log_clients.log_client_list) {
+ log_client = list_entry(pos, struct slsi_log_client, q);
+ spin_unlock_bh(&sdev->log_clients.log_client_spinlock);
+ if (slsi_kernel_to_user_space_event(log_client, event, event_data_length, event_data))
+ SLSI_WARN(sdev, "Failed to send event(0x%.4X) to UDI client 0x%p\n", event, log_client);
+ spin_lock_bh(&sdev->log_clients.log_client_spinlock);
+ }
+ spin_unlock_bh(&sdev->log_clients.log_client_spinlock);
+}
+
+void slsi_log_client_unregister(struct slsi_dev *sdev, void *log_client_ctx)
+{
+ struct list_head *pos, *n;
+ struct slsi_log_client *log_client;
+
+ spin_lock_bh(&sdev->log_clients.log_client_spinlock);
+ list_for_each_safe(pos, n, &sdev->log_clients.log_client_list) {
+ log_client = list_entry(pos, struct slsi_log_client, q);
+ if (log_client->log_client_ctx == log_client_ctx) {
+ kfree(log_client->signal_filter);
+ list_del(pos);
+ kfree(log_client);
+ }
+ }
+ spin_unlock_bh(&sdev->log_clients.log_client_spinlock);
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_LOG_CLIENTS_H__
+#define __SLSI_LOG_CLIENTS_H__
+
+#include <linux/types.h>
+
+struct slsi_dev;
+
+struct slsi_log_client {
+ struct list_head q;
+ void *log_client_ctx;
+ int (*log_client_cb)(struct slsi_log_client *, struct sk_buff *, int);
+ int min_signal_id;
+ int max_signal_id;
+ char *signal_filter;
+};
+
+struct slsi_log_clients {
+ /* a std spinlock */
+ spinlock_t log_client_spinlock;
+ struct list_head log_client_list;
+};
+
+#define SLSI_LOG_DIRECTION_FROM_HOST 0
+#define SLSI_LOG_DIRECTION_TO_HOST 1
+
+void slsi_log_clients_init(struct slsi_dev *sdev);
+int slsi_log_client_register(struct slsi_dev *sdev, void *log_client_ctx,
+ int (*log_client_cb)(struct slsi_log_client *, struct sk_buff *, int),
+ char *filter, int min_signal_id, int max_signal_id);
+
+void slsi_log_client_unregister(struct slsi_dev *sdev, void *log_client_ctx);
+void slsi_log_client_msg(struct slsi_dev *sdev, u16 event, u32 event_data_length, const u8 *event_data);
+void slsi_log_clients_log_signal_safe(struct slsi_dev *sdev, struct sk_buff *skb, u32 direction);
+static inline void slsi_log_clients_log_signal_fast(struct slsi_dev *sdev, struct slsi_log_clients *log_clients, struct sk_buff *skb, u32 direction)
+{
+ /* list_empty() is unsafe but fast
+ * slsi_log_clients_log_signal_safe() will make sure all is good
+ */
+ if (!list_empty(&log_clients->log_client_list))
+ slsi_log_clients_log_signal_safe(sdev, skb, direction);
+}
+
+void slsi_log_clients_terminate(struct slsi_dev *sdev);
+
+#endif /*__SLSI_LOG_CLIENTS_H__*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include "mbulk.h"
+#include "hip4_sampler.h"
+
+#include "debug.h"
+
+#define MBULK_DEBUG
+
+/* mbulk descriptor is aligned to 64 bytes considering the host processor's
+ * cache line size
+ */
+#define MBULK_ALIGN (64)
+#define MBULK_IS_ALIGNED(s) (((uintptr_t)(s) & (MBULK_ALIGN - 1)) == 0)
+#define MBULK_SZ_ROUNDUP(s) round_up(s, MBULK_ALIGN)
+
+/* a magic number to allocate the remaining buffer to the bulk buffer
+ * in a segment. Used in chained mbulk allocation.
+ */
+#define MBULK_DAT_BUFSZ_REQ_BEST_MAGIC ((u32)(-2))
+
+DEFINE_SPINLOCK(mbulk_pool_lock);
+
+static inline void mbulk_debug(struct mbulk *m)
+{
+ (void)m; /* may be unused */
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->next_offset %p: %d\n", &m->next_offset, m->next_offset);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->flag %p: %d\n", &m->flag, m->flag);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->clas %p: %d\n", &m->clas, m->clas);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->pid %p: %d\n", &m->pid, m->pid);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->refcnt %p: %d\n", &m->refcnt, m->refcnt);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->dat_bufsz %p: %d\n", &m->dat_bufsz, m->dat_bufsz);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->sig_bufsz %p: %d\n", &m->sig_bufsz, m->sig_bufsz);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->len %p: %d\n", &m->len, m->len);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->head %p: %d\n", &m->head, m->head);
+ SLSI_DBG1_NODEV(SLSI_MBULK, "m->chain_next_offset %p: %d\n", &m->chain_next_offset, m->chain_next_offset);
+}
+
+/* mbulk pool */
+struct mbulk_pool {
+ bool valid; /** is valid */
+ u8 pid; /** pool id */
+ struct mbulk *free_list; /** head of free segment list */
+ int free_cnt; /** current number of free segments */
+ int usage[MBULK_CLASS_MAX]; /** statistics of usage per mbulk clas*/
+ char *base_addr; /** base address of the pool */
+ char *end_addr; /** exclusive end address of the pool */
+ mbulk_len_t seg_size; /** segment size in bytes "excluding" struct mbulk */
+ u8 guard; /** pool guard **/
+#ifdef MBULK_DEBUG
+ int tot_seg_num; /** total number of segments in this pool */
+#endif
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ int minor;
+#endif
+};
+
+/* get a segment from a pool */
+static inline struct mbulk *mbulk_pool_get(struct mbulk_pool *pool, enum mbulk_class clas)
+{
+ struct mbulk *m;
+ u8 guard = pool->guard;
+
+ spin_lock_bh(&mbulk_pool_lock);
+ m = pool->free_list;
+
+ if (m == NULL || pool->free_cnt <= guard) { /* guard */
+ spin_unlock_bh(&mbulk_pool_lock);
+ return NULL;
+ }
+
+ pool->free_cnt--;
+ pool->usage[clas]++;
+
+ SCSC_HIP4_SAMPLER_MBULK(pool->minor, (pool->free_cnt & 0x100) >> 8, (pool->free_cnt & 0xff), clas);
+
+ if (m->next_offset == 0)
+ pool->free_list = NULL;
+ else
+ pool->free_list = (struct mbulk *)((uintptr_t)pool->free_list + m->next_offset);
+
+ memset(m, 0, sizeof(*m));
+ m->pid = pool->pid;
+ m->clas = clas;
+
+ spin_unlock_bh(&mbulk_pool_lock);
+ return m;
+}
+
+/* put a segment to a pool */
+static inline void mbulk_pool_put(struct mbulk_pool *pool, struct mbulk *m)
+{
+ if (m->flag == MBULK_F_FREE)
+ return;
+
+ spin_lock_bh(&mbulk_pool_lock);
+ pool->usage[m->clas]--;
+ pool->free_cnt++;
+
+ SCSC_HIP4_SAMPLER_MBULK(pool->minor, (pool->free_cnt & 0x100) >> 8, (pool->free_cnt & 0xff), m->clas);
+ m->flag = MBULK_F_FREE;
+ if (pool->free_list != NULL)
+ m->next_offset = (uintptr_t)pool->free_list - (uintptr_t)m;
+ else
+ m->next_offset = 0;
+ pool->free_list = m;
+ spin_unlock_bh(&mbulk_pool_lock);
+}
+
+/** mbulk pool configuration */
+struct mbulk_pool_config {
+ mbulk_len_t seg_sz; /** segment size "excluding" struct mbulk */
+ int seg_num; /** number of segments. If -1, all remaining space is used */
+};
+
+/** mbulk pools */
+static struct mbulk_pool mbulk_pools[MBULK_POOL_ID_MAX];
+
+/**
+ * allocate a mbulk segment from the pool
+ *
+ * Note that the refcnt would be zero if \dat_bufsz is zero, as there is no
+ * allocated bulk data.
+ * If \dat_bufsz is \MBULK_DAT_BUFSZ_REQ_BEST_MAGIC, then this function
+ * allocates all remaining buffer space to the bulk buffer.
+ *
+ */
+static struct mbulk *mbulk_seg_generic_alloc(struct mbulk_pool *pool,
+ enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz)
+{
+ struct mbulk *m;
+
+ if (pool == NULL)
+ return NULL;
+
+ /* get a segment from the pool */
+ m = mbulk_pool_get(pool, clas);
+ if (m == NULL)
+ return NULL;
+
+ /* signal buffer */
+ m->sig_bufsz = (mbulk_len_t)sig_bufsz;
+ if (sig_bufsz)
+ m->flag = MBULK_F_SIG;
+
+ /* data buffer.
+ * Note that data buffer size can be larger than the requested.
+ */
+ m->head = m->sig_bufsz;
+ if (dat_bufsz == 0) {
+ m->dat_bufsz = 0;
+ m->refcnt = 0;
+ } else if (dat_bufsz == MBULK_DAT_BUFSZ_REQ_BEST_MAGIC) {
+ m->dat_bufsz = pool->seg_size - m->sig_bufsz;
+ m->refcnt = 1;
+ } else {
+ m->dat_bufsz = (mbulk_len_t)dat_bufsz;
+ m->refcnt = 1;
+ }
+
+ mbulk_debug(m);
+ return m;
+}
+
+int mbulk_pool_get_free_count(u8 pool_id)
+{
+ struct mbulk_pool *pool;
+ int num_free;
+
+ if (pool_id >= MBULK_POOL_ID_MAX) {
+ WARN_ON(pool_id >= MBULK_POOL_ID_MAX);
+ return -EIO;
+ }
+
+ spin_lock_bh(&mbulk_pool_lock);
+ pool = &mbulk_pools[pool_id];
+
+ if (!pool->valid) {
+ WARN_ON(!pool->valid);
+ spin_unlock_bh(&mbulk_pool_lock);
+ return -EIO;
+ }
+
+ num_free = pool->free_cnt;
+ spin_unlock_bh(&mbulk_pool_lock);
+
+ return num_free;
+}
+
+/**
+ * Allocate a bulk buffer with an in-lined signal buffer
+ *
+ * A mbulk segment is allocated from the given the pool, if its size
+ * meeting the requested size.
+ *
+ */
+struct mbulk *mbulk_with_signal_alloc_by_pool(u8 pool_id, u16 colour,
+ enum mbulk_class clas, size_t sig_bufsz_req, size_t dat_bufsz)
+{
+ struct mbulk_pool *pool;
+ size_t sig_bufsz;
+ size_t tot_bufsz;
+ struct mbulk *m_ret;
+
+ /* data buffer should be aligned */
+ sig_bufsz = MBULK_SIG_BUFSZ_ROUNDUP(sizeof(struct mbulk) + sig_bufsz_req) - sizeof(struct mbulk);
+
+ if (pool_id >= MBULK_POOL_ID_MAX) {
+ WARN_ON(pool_id >= MBULK_POOL_ID_MAX);
+ return NULL;
+ }
+
+ pool = &mbulk_pools[pool_id];
+
+ if (!pool->valid) {
+ WARN_ON(!pool->valid);
+ return NULL;
+ }
+
+ /* check if this pool meets the size */
+ tot_bufsz = sig_bufsz + dat_bufsz;
+ if (dat_bufsz != MBULK_DAT_BUFSZ_REQ_BEST_MAGIC &&
+ pool->seg_size < tot_bufsz)
+ return NULL;
+
+ m_ret = mbulk_seg_generic_alloc(pool, clas, sig_bufsz, dat_bufsz);
+ /* Colour the mbulk */
+ if (m_ret) {
+ /* Use pool id for coding vif and peer_id */
+ m_ret->pid = m_ret->pid | (colour & 0xfe);
+ /* Code AC queue at the [7:6] bits */
+ m_ret->clas = m_ret->clas | ((colour & 0x300) >> 2);
+ }
+ return m_ret;
+}
+
+#ifdef MBULK_SUPPORT_SG_CHAIN
+/**
+ * allocate a chained mbulk buffer from a specific mbulk pool
+ *
+ */
+struct mbulk *mbulk_chain_with_signal_alloc_by_pool(u8 pool_id,
+ enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz)
+{
+ size_t tot_len;
+ struct mbulk *m, *head, *pre;
+
+ head = mbulk_with_signal_alloc_by_pool(pool_id, clas, sig_bufsz,
+ MBULK_DAT_BUFSZ_REQ_BEST_MAGIC);
+ if (head == NULL || MBULK_SEG_TAILROOM(head) >= dat_bufsz)
+ return head;
+
+ head->flag |= (MBULK_F_CHAIN_HEAD | MBULK_F_CHAIN);
+ tot_len = MBULK_SEG_TAILROOM(head);
+ pre = head;
+
+ while (tot_len < dat_bufsz) {
+ m = mbulk_with_signal_alloc_by_pool(pool_id, clas, 0,
+ MBULK_DAT_BUFSZ_REQ_BEST_MAGIC);
+ if (m == NULL)
+ break;
+ /* all mbulk in this chain has an attribue, MBULK_F_CHAIN */
+ m->flag |= MBULK_F_CHAIN;
+ tot_len += MBULK_SEG_TAILROOM(m);
+ pre->chain_next = m;
+ pre = m;
+ }
+
+ if (tot_len < dat_bufsz) {
+ mbulk_chain_free(head);
+ return NULL;
+ }
+
+ return head;
+}
+
+/**
+ * free a chained mbulk
+ */
+void mbulk_chain_free(struct mbulk *sg)
+{
+ struct mbulk *chain_next, *m;
+
+ /* allow null pointer */
+ if (sg == NULL)
+ return;
+
+ m = sg;
+ while (m != NULL) {
+ chain_next = m->chain_next;
+
+ /* is not scatter-gather anymore */
+ m->flag &= ~(MBULK_F_CHAIN | MBULK_F_CHAIN_HEAD);
+ mbulk_seg_free(m);
+
+ m = chain_next;
+ }
+}
+
+/**
+ * get a tail mbulk in the chain
+ *
+ */
+struct mbulk *mbulk_chain_tail(struct mbulk *m)
+{
+ while (m->chain_next != NULL)
+ m = m->chain_next;
+ return m;
+}
+
+/**
+ * total buffer size in a chanied mbulk
+ *
+ */
+size_t mbulk_chain_bufsz(struct mbulk *m)
+{
+ size_t tbufsz = 0;
+
+ while (m != NULL) {
+ tbufsz += m->dat_bufsz;
+ m = m->chain_next;
+ }
+
+ return tbufsz;
+}
+
+/**
+ * total data length in a chanied mbulk
+ *
+ */
+size_t mbulk_chain_tlen(struct mbulk *m)
+{
+ size_t tlen = 0;
+
+ while (m != NULL) {
+ tlen += m->len;
+ m = m->chain_next;
+ }
+
+ return tlen;
+}
+#endif /*MBULK_SUPPORT_SG_CHAIN*/
+
+/**
+ * add a memory zone to a mbulk pool list
+ *
+ */
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t seg_size, u8 guard, int minor)
+#else
+int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t seg_size, u8 guard)
+#endif
+{
+ struct mbulk_pool *pool;
+ struct mbulk *next;
+ size_t byte_per_block;
+
+ if (pool_id >= MBULK_POOL_ID_MAX) {
+ WARN_ON(pool_id >= MBULK_POOL_ID_MAX);
+ return -EIO;
+ }
+
+ pool = &mbulk_pools[pool_id];
+
+ if (!MBULK_IS_ALIGNED(base)) {
+ WARN_ON(!MBULK_IS_ALIGNED(base));
+ return -EIO;
+ }
+
+ /* total required memory per block */
+ byte_per_block = MBULK_SZ_ROUNDUP(sizeof(struct mbulk) + seg_size);
+
+ /* init pool structure */
+ memset(pool, 0, sizeof(*pool));
+ pool->pid = pool_id;
+ pool->base_addr = base;
+ pool->end_addr = end;
+ pool->seg_size = (mbulk_len_t)(byte_per_block - sizeof(struct mbulk));
+ pool->guard = guard;
+
+ /* allocate segments */
+ next = (struct mbulk *)base;
+ while (((uintptr_t)next + byte_per_block) <= (uintptr_t)end) {
+ memset(next, 0, sizeof(struct mbulk));
+ next->pid = pool_id;
+
+ /* add to the free list */
+ if (pool->free_list == NULL)
+ next->next_offset = 0;
+ else
+ next->next_offset = (uintptr_t)pool->free_list - (uintptr_t)next;
+ next->flag = MBULK_F_FREE;
+ pool->free_list = next;
+#ifdef MBULK_DEBUG
+ pool->tot_seg_num++;
+#endif
+ pool->free_cnt++;
+/* TOM.. BUG. */
+ next = (struct mbulk *)((uintptr_t)next + byte_per_block);
+ }
+
+ pool->valid = (pool->free_cnt) ? true : false;
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ pool->minor = minor;
+#endif
+ return 0;
+}
+
+/**
+ * add mbulk pools in MIF address space
+ */
+void mbulk_pool_dump(u8 pool_id, int max_cnt)
+{
+ struct mbulk_pool *pool;
+ struct mbulk *m;
+ int cnt = max_cnt;
+
+ pool = &mbulk_pools[pool_id];
+ m = pool->free_list;
+ while (m != NULL && cnt--)
+ m = (m->next_offset == 0) ? NULL :
+ (struct mbulk *)(pool->base_addr + m->next_offset);
+}
+
+/**
+ * free a mbulk in the virtual host
+ */
+void mbulk_free_virt_host(struct mbulk *m)
+{
+ u8 pool_id;
+ struct mbulk_pool *pool;
+
+ if (m == NULL)
+ return;
+
+ /* Remove colour */
+ pool_id = m->pid & 0x1;
+
+ pool = &mbulk_pools[pool_id];
+
+ if (!pool->valid) {
+ WARN_ON(!pool->valid);
+ return;
+ }
+
+ /* put to the pool */
+ mbulk_pool_put(pool, m);
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __MBULK_H__
+#define __MBULK_H__
+
+/**
+ * mbulk(bulk memory) API
+ *
+ * This header file describes APIs of the bulk memory management.
+ * The diagram below is an example of a mbulk buffer with one
+ * segment (i.e. not a chained mbulk).
+ *
+ * sig_bufsz
+ * |<-------->|
+ * | |<--------- dat_bufsz ---------->|
+ * +--------------------------------------------------+
+ * | mbulk| signal | bulk buffer |
+ * +-------------------------+---------------+--------+
+ * | | | valid data | |
+ * | | |<--------+---->| |
+ * | | | mbulk_tlen(m) | |
+ * | |<----->| | |<------>|
+ * | mbulk_headroom(m)| | mbulk_tailroom(m)
+ * | | |
+ * | |-- off -->|
+ * v v |
+ * mbulk_get_sig(m) mbulk_dat(m) |
+ * v
+ * mbulk_dat_at(m,off)
+ *
+ * In general, all clients are supposed to use only mbulk_xxx() APIs (but not
+ * mbulk_seg_xxx() APIs), as they can handle S/G chained mbulk as well.
+ * But as of now, specially in Condor, S/G chained mbulk is not supported,
+ * which means the most of mbulk_xxx() would be wrappers of mbulk_seg_xxx().
+ *
+ * An in-lined signal buffer can be allocated along with a mbulk buffer.
+ * There is no direct life-cycle relationship between the signal and the
+ * associated mbulk in this case, which means that the signal buffer should be
+ * de-allocated independently of the mbulk buffer.
+ *
+ */
+
+/**
+ * bulk buffer descriptor
+ */
+struct mbulk;
+
+/**
+ * mbulk host pool ID
+ */
+#define MBULK_POOL_ID_DATA (0)
+#define MBULK_POOL_ID_CTRL (1)
+#define MBULK_POOL_ID_MAX (2)
+
+/**
+ * mbulk buffer classification
+ *
+ * Note that PACKED attribute is added to enum definition so that
+ * compiler assigns the smallest integral type (u8).
+ */
+enum mbulk_class {
+ MBULK_CLASS_CONTROL = 0,
+ MBULK_CLASS_HOSTIO = 1,
+ MBULK_CLASS_DEBUG = 2,
+
+ MBULK_CLASS_FROM_HOST_DAT = 3,
+ MBULK_CLASS_FROM_HOST_CTL = 4,
+ MBULK_CLASS_FROM_RADIO = 5,
+ MBULK_CLASS_DPLP = 6,
+ MBULK_CLASS_OTHERS = 7,
+ MBULK_CLASS_FROM_RADIO_FORWARDED = 8,
+ MBULK_CLASS_MAX
+} __packed;
+
+/**
+ * The private definition of mbulk structure is included here
+ * so that its members can be directly accessed, and the access
+ * codes can be in-lined by the compiler.
+ * But client codes are not supposed to directly refer to mbulk
+ * members, nor use mbulk_seg_xxx() functions. Only modules handling
+ * mbulk scatter/gather chain would directly use mulk_seg_xxx() APIs.
+ */
+#include "mbulk_def.h"
+
+/**
+ * Get the bulk data reference counter
+ *
+ * After a bulk buffer with non-zero data buffer size is created,
+ * the reference counter is set to one. Each time it is duplicated,
+ * its reference counter would be increased.
+ *
+ * Note that the reference counter is initialized to zero if a signal
+ * is created from mbulk pool but with zero data buffer size, as there
+ * is no data buffer.
+ */
+static inline int mbulk_refcnt(const struct mbulk *m)
+{
+ return MBULK_SEG_REFCNT(m);
+}
+
+/**
+ * Get the bulk data buffer size
+ *
+ */
+static inline int mbulk_buffer_size(const struct mbulk *m)
+{
+ return MBULK_SEG_DAT_BUFSIZE(m);
+}
+
+/**
+ * Check if mbulk has an in-lined signal buffer
+ *
+ */
+static inline bool mbulk_has_signal(const struct mbulk *m)
+{
+ return MBULK_SEG_HAS_SIGNAL(m);
+}
+
+/**
+ * Set mbulk to be read-only
+ */
+static inline void mbulk_set_readonly(struct mbulk *m)
+{
+ MBULK_SEG_SET_READONLY(m);
+}
+
+/**
+ * is mbulk read-only
+ */
+static inline bool mbulk_is_readonly(const struct mbulk *m)
+{
+ return MBULK_SEG_IS_READONLY(m);
+}
+
+/**
+ * check if mbulk is a scatter/gather chained buffer
+ *
+ */
+static inline bool mbulk_is_sg(const struct mbulk *m)
+{
+ return MBULK_SEG_IS_CHAIN_HEAD(m);
+}
+
+/**
+ * check if mbulk is a part of scatter/gather chained buffer
+ *
+ */
+static inline bool mbulk_is_chained(const struct mbulk *m)
+{
+ return MBULK_SEG_IS_CHAINED(m);
+}
+
+/**
+ * Allocate a bulk buffer with an in-lined signal buffer
+ *
+ * Only one mbulk segment is used for allocation starting from the
+ * mbulk pool with the smallest segment size. If no segment fitting
+ * the requested size, then return NULL without trying to create
+ * a chained buffer.
+ *
+ */
+struct mbulk *mbulk_with_signal_alloc(enum mbulk_class clas, size_t sig_bufsz,
+ size_t dat_bufsz);
+/**
+ * Allocate a bulk buffer with an in-lined signal buffer
+ *
+ * A mbulk segment is allocated from the given the pool, if its size
+ * meeting the requested size.
+ *
+ */
+struct mbulk *mbulk_with_signal_alloc_by_pool(u8 pool_id, u16 colour,
+ enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz);
+
+/**
+ * Get the number of free mbulk slots in a pool
+ *
+ * Returns the number of mbulk slots available in a given pool.
+ */
+int mbulk_pool_get_free_count(u8 pool_id);
+
+/**
+ * Get a signal buffer address
+ *
+ * Given a mbulk buffer, returns a signal buffer address.
+ *
+ * @param m mbulk
+ * @return in-lined signal buffer
+ */
+static inline void *mbulk_get_seg(const struct mbulk *m)
+{
+ return (void *)MBULK_SEG_B(m);
+}
+
+/**
+ * Get a signal buffer address
+ *
+ * Given a mbulk buffer, returns a signal buffer address if any in-lined
+ * signal buffer.
+ *
+ */
+static inline void *mbulk_get_signal(const struct mbulk *m)
+{
+ bool ret = false;
+
+ ret = mbulk_has_signal(m);
+
+ return ret ? mbulk_get_seg(m) : NULL;
+}
+
+/**
+ * Allocate a bulk buffer
+ *
+ * Only one mbulk segment is used for allocation starting from the
+ * mbulk pool with the smallest segment size. If no segment fitting
+ * the requested size, then return NULL without trying to create
+ * a chained buffer.
+ *
+ */
+static inline struct mbulk *mbulk_alloc(enum mbulk_class clas, size_t dat_bufsz)
+{
+ return mbulk_with_signal_alloc(clas, 0, dat_bufsz);
+}
+
+/**
+ * free mbulk buffer
+ *
+ * After checking the bulk reference counter, this function return the buffer
+ * to the mbulk pool if it is zero. Note that this doesn't free the in-lined
+ * signal buffer.
+ */
+static inline void mbulk_free(struct mbulk *m)
+{
+ mbulk_seg_free(m);
+}
+
+/**
+ * get bulk buffer address for read or write access
+ *
+ * The address is the buffer address after the headroom in the mbulk segment.
+ * Note that this function can only be used to access the data in the same
+ * segment, including a segment in the mbulk chain (for example, to access
+ * the 802.11 header of A-MSDU).
+ *
+ */
+static inline void *mbulk_dat_rw(const struct mbulk *m)
+{
+ WARN_ON(MBULK_SEG_IS_READONLY(m));
+ return MBULK_SEG_DAT(m);
+}
+
+/**
+ * get bulk buffer address for read-only
+ *
+ * The address is the buffer address after the headroom in the mbulk segment.
+ * Note that this function can only be used to access the data in the same
+ * segment, including a segment in the mbulk chain (for example, to access
+ * the 802.11 header of A-MSDU).
+ *
+ */
+static inline const void *mbulk_dat_r(const struct mbulk *m)
+{
+ return (const void *)MBULK_SEG_DAT(m);
+}
+
+/**
+ * get bulk buffer address at the offset for read or write access
+ *
+ */
+static inline void *mbulk_dat_at_rw(const struct mbulk *m, size_t off)
+{
+ WARN_ON(MBULK_SEG_IS_READONLY(m));
+ return MBULK_SEG_DAT_AT(m, off);
+}
+
+/**
+ * get bulk buffer address at the offset for read access
+ *
+ */
+static inline /*const*/ void *mbulk_dat_at_r(const struct mbulk *m, size_t off)
+{
+ return (/*const */ void *)MBULK_SEG_DAT_AT(m, off);
+}
+
+/**
+ * get valid data length
+ *
+ */
+static inline size_t mbulk_tlen(const struct mbulk *m)
+{
+ return MBULK_SEG_LEN(m);
+}
+
+/**
+ * get headroom
+ *
+ */
+static inline size_t mbulk_headroom(const struct mbulk *m)
+{
+ return MBULK_SEG_HEADROOM(m);
+}
+
+static inline size_t mbulk_tailroom(const struct mbulk *m)
+{
+ return MBULK_SEG_TAILROOM(m);
+}
+
+/**
+ * reserve headroom
+ *
+ * Note this API should be called right after mbulk is created or the valid
+ * data length is zero.
+ *
+ */
+static inline bool mbulk_reserve_head(struct mbulk *m, size_t headroom)
+{
+ return mbulk_seg_reserve_head(m, headroom);
+}
+
+/**
+ * adjust the valid data range
+ *
+ * headroom would be placed after the signal buffer (or mbuf descriptor if
+ * no in-lined signal), and the valid data length is set to \len.
+ *
+ */
+static inline bool mbulk_adjust_range(struct mbulk *m, size_t headroom, size_t len)
+{
+ return mbulk_seg_adjust_range(m, headroom, len);
+}
+
+/**
+ * extend the data range at the head
+ *
+ * The headroom would be reduced, and the data range is extended.
+ * To prepend data in the head, the headroom should have been reserved before.
+ *
+ */
+static inline bool mbulk_prepend_head(struct mbulk *m, size_t more)
+{
+ return mbulk_seg_prepend_head(m, more);
+}
+
+/**
+ * extend the data at the tail
+ *
+ * Data range is expanded towards the tail.
+ *
+ */
+static inline bool mbulk_append_tail(struct mbulk *m, size_t more)
+{
+ return mbulk_seg_append_tail(m, more);
+}
+
+/**
+ * trim data at the head
+ *
+ * The headroom would be increased, and the valid data range is reduced
+ * accordingly.
+ *
+ */
+static inline bool mbulk_trim_head(struct mbulk *m, size_t less)
+{
+ return mbulk_seg_trim_head(m, less);
+}
+
+/**
+ * trim data at the tail
+ *
+ * The data length would be reduced.
+ *
+ */
+static inline bool mbulk_trim_tail(struct mbulk *m, size_t less)
+{
+ return mbulk_seg_trim_tail(m, less);
+}
+
+/**
+ * duplicate a mbulk
+ *
+ * There is no data copy. but the referece counter of the orignal mbulk is
+ * increased by one.
+ *
+ */
+static inline struct mbulk *mbulk_duplicate(struct mbulk *m)
+{
+ return mbulk_seg_duplicate(m);
+}
+
+/**
+ * clone a mbulk
+ *
+ * New mbulk buffer is created, and contents are copied. The signal is copied
+ * only when \copy_sig is TRUE.
+ *
+ */
+static inline struct mbulk *mbulk_clone(const struct mbulk *m, enum mbulk_class clas,
+ bool copy_sig)
+{
+ return mbulk_seg_clone(m, clas, copy_sig);
+}
+
+/**
+ * allocate a signal buffer from mbulk pool
+ *
+ */
+void *msignal_alloc(size_t sig_sz);
+
+/**
+ * free a signal buffer created from mbulk pool
+ *
+ */
+void msignal_free(void *sig);
+
+/**
+ * get mbulk descriptor given a signal buffer address
+ *
+ */
+struct mbulk *msignal_to_mbulk(void *sig);
+
+/**
+ * get next chained mbulk in a scatter/gathered list
+ */
+static inline scsc_mifram_ref mbulk_chain_next(struct mbulk *m)
+{
+ return MBULK_SEG_CHAIN_NEXT(m);
+}
+
+#ifdef MBULK_SUPPORT_SG_CHAIN
+/**
+ * Scatter/Gather Chained Mbulk APIs
+ * =================================
+ */
+
+/**
+ * allocate a chained mbulk buffer from a specific mbulk pool
+ *
+ */
+struct mbulk *mbulk_chain_with_signal_alloc_by_pool(u8 pool_id,
+ enum mbulk_class clas, size_t sig_bufsz, size_t dat_bufsz);
+
+/**
+ * free a chained mbulk
+ */
+void mbulk_chain_free(struct mbulk *sg);
+
+/**
+ * get a tail mbulk in the chain
+ *
+ */
+struct mbulk *mbulk_chain_tail(struct mbulk *m);
+
+/**
+ * total buffer size in a chanied mbulk
+ *
+ */
+size_t mbulk_chain_bufsz(struct mbulk *m);
+
+/**
+ * total data length in a chanied mbulk
+ *
+ */
+size_t mbulk_chain_tlen(struct mbulk *m);
+
+/**
+ * get a number of mbulk segments in a chained mbulk
+ */
+static inline int mbulk_chain_num(const struct mbulk *m)
+{
+ if (mbulk_is_sg(m)) {
+ int n = 0;
+
+ while (m != NULL) {
+ n++;
+ m = m->chain_next;
+ }
+ return n;
+ }
+ return 1;
+}
+
+/* NOT IMPLEMENTED YET. */
+void *mbulk_chain_access(struct mbulk *m, size_t off, char *local_buf, size_t local_bufsz);
+void *mbulk_chain_writeback(struct mbulk *m, size_t off, char *local_buf, size_t local_bufsz);
+void *mbulk_chain_copy_from(struct mbulk *m, size_t off, char *buf, int len);
+void *mbulk_chain_copy_to(struct mbulk *m, size_t off, char *buf, int len);
+#endif /*MBULK_SUPPORT_SG_CHAIN*/
+
+/**
+ * init mbulk library
+ */
+/*extern void init_mbulk(void);*/
+void init_mbulk(void *mem, size_t pool_size);
+
+/**
+ * add a memory zone to a mbulk pool list
+ *
+ */
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t seg_size, u8 guard, int minor);
+#else
+int mbulk_pool_add(u8 pool_id, char *base, char *end, size_t buf_size, u8 guard);
+#endif
+/**
+ * check sanity of a mbulk pool
+ */
+void mbulk_pool_check_sanity(u8 pool_id);
+
+/**
+ * configure the handler which returning the buffer to the host
+ */
+void mbulk_set_handler_return_host_mbulk(void (*free_host_buf)(struct mbulk *m));
+
+/**
+ * free a mbulk in the virtual host
+ */
+void mbulk_free_virt_host(struct mbulk *m);
+void mbulk_pool_dump(u8 pool_id, int max_cnt);
+
+#endif /*__MBULK_H__*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __MBULK_DEF_H__
+#define __MBULK_DEF_H__
+
+#include <linux/bug.h>
+#include <scsc/scsc_mifram.h>
+
+/**
+ * mbulk
+ *
+ * The mbulk supports ....
+ */
+
+/* MIF_HIP_CFG_BLUK_BUFFER_ALIGNE = 4
+ * Hence, data buffer is always aligned to 4.
+ */
+#define MBULK_SIG_BUFSZ_ROUNDUP(sig_bufsz) round_up(sig_bufsz, 4)
+
+/**
+ * bulk memory descriptor, managing semgments allocated from mbulk pools
+ *
+ * mbulk is defined in public header, to avoid the function call dummy operations...
+ * Privae variables staring with __ should be not be referred by the client
+ * codes.
+ *
+ * mbulk structure is shared with the host. Hence the structure should be packed.
+ * data buffer size in byte
+ *
+ * This could be duplicated as the buffer size can be calculated from
+ * pool block size. There are two reasons why the data buffer size is
+ * set in the descritpor:
+ * - if mbulk comes from the host, the firmware is not aware of its pool
+ * configuration
+ * - if the data buffer is less than the block size in the pool, it's more
+ * cache efficient to flush only the data buffer area
+ * <Q> probably the firwmare has to invalidate the entire block ?
+ *
+ */
+typedef u16 mbulk_len_t;
+struct mbulk {
+ scsc_mifram_ref next_offset; /** next mbulk offset */
+ u8 flag; /** mbulk flags */
+ enum mbulk_class clas; /** bulk buffer classification */
+ u8 pid; /** mbulk pool id */
+ u8 refcnt; /** reference counter of bulk buffer */
+ mbulk_len_t dat_bufsz; /** data buffer size in byte */
+ mbulk_len_t sig_bufsz; /** signal buffer size in byte */
+ mbulk_len_t len; /** valid data length */
+ mbulk_len_t head; /** start offset of data after mbulk structure */
+ scsc_mifram_ref chain_next_offset; /** chain next mbulk offset */
+} __packed;
+
+/* mbulk flags */
+#define MBULK_F_SIG (1 << 0) /** has a signal after mbulk descriptor*/
+#define MBULK_F_READONLY (1 << 1) /** is read-only */
+#define MBULK_F_OBOUND (1 << 2) /** other CPU(host) owns the buffer */
+#define MBULK_F_WAKEUP (1 << 3) /** frame waking up the host */
+#define MBULK_F_FREE (1 << 5) /** mbulk alreay freed */
+#define MBULK_F_CHAIN_HEAD (1 << 6) /** is an head in scatter/gather chain */
+#define MBULK_F_CHAIN (1 << 7) /** is scatter/gather chain */
+
+/* TODO: Define max number of chained mbulk */
+#define MBULK_MAX_CHAIN 16
+/* the buffer address just after mbulk descriptor */
+#define MBULK_SEG_B(m) ((uintptr_t)(m) + sizeof(struct mbulk))
+
+/* raw buffer size of mbulk segment including struct mbulk */
+#define MBULK_SEG_RAW_BUFSZ(m) (sizeof(struct mbulk) + (m)->sig_bufsz + (m)->dat_bufsz)
+
+/* operations against "a" mbulk segment. */
+#define MBULK_SEG_NEXT(m) ((m)->next_offset)
+#define MBULK_SEG_REFCNT(m) ((m)->refcnt)
+#define MBULK_SEG_HAS_SIGNAL(m) ((m)->flag & MBULK_F_SIG)
+#define MBULK_SEG_CHAIN_NEXT(m) ((m)->chain_next_offset)
+#define MBULK_SEG_IS_CHAIN_HEAD(m) ((m)->flag & MBULK_F_CHAIN_HEAD)
+#define MBULK_SEG_IS_CHAINED(m) ((m)->flag & MBULK_F_CHAIN)
+
+#define MBULK_SEG_IS_READONLY(m) ((m)->flag & MBULK_F_READONLY)
+#define MBULK_SEG_SET_READONLY(m) ((m)->flag |= MBULK_F_READONLY)
+
+#define MBULK_SEG_DAT(m) ((char *)MBULK_SEG_B(m) + (m)->head)
+#define MBULK_SEG_DAT_AT(m, off) ((char *)MBULK_SEG_B(m) + (m)->head + (mbulk_len_t)(off))
+#define MBULK_SEG_DAT_BUFSIZE(m) ((size_t)((m)->dat_bufsz))
+#define MBULK_SEG_SIG_BUFSIZE(m) ((size_t)((m)->sig_bufsz))
+#define MBULK_SEG_LEN(m) ((m)->len)
+#define MBULK_SEG_HEADROOM(m) ((size_t)((m)->head - (m)->sig_bufsz))
+#define MBULK_SEG_TAILROOM(m) ((size_t)((m)->dat_bufsz - (MBULK_SEG_HEADROOM(m) + (m)->len)))
+
+static inline bool mbulk_seg_reserve_head(struct mbulk *m, size_t headroom)
+{
+ if (WARN_ON(!(m->dat_bufsz >= headroom)))
+ return false;
+ m->head += (mbulk_len_t)headroom;
+ return true;
+}
+
+static inline bool mbulk_seg_adjust_range(struct mbulk *m, size_t headroom,
+ size_t len)
+{
+ if (WARN_ON(!(m->dat_bufsz >= (headroom + len))))
+ return false;
+ m->head = m->sig_bufsz + (mbulk_len_t)headroom;
+ m->len = (mbulk_len_t)len;
+ return true;
+}
+
+static inline bool mbulk_seg_prepend_head(struct mbulk *m, size_t more)
+{
+ if (WARN_ON(!(MBULK_SEG_HEADROOM(m) >= more)))
+ return false;
+ m->head -= (mbulk_len_t)more;
+ m->len += (mbulk_len_t)more;
+ return true;
+}
+
+static inline bool mbulk_seg_append_tail(struct mbulk *m, size_t more)
+{
+ if (WARN_ON(!(MBULK_SEG_TAILROOM(m) >= more)))
+ return false;
+ m->len += (mbulk_len_t)more;
+ return true;
+}
+
+static inline bool mbulk_seg_trim_head(struct mbulk *m, size_t less)
+{
+ m->head += (mbulk_len_t)less;
+ m->len = (m->len <= (mbulk_len_t)less) ? 0 : (m->len - (mbulk_len_t)less);
+ return true;
+}
+
+static inline bool mbulk_seg_trim_tail(struct mbulk *m, size_t less)
+{
+ if (WARN_ON(!(m->len >= (mbulk_len_t)less)))
+ return false;
+ m->len -= (mbulk_len_t)less;
+ return true;
+}
+
+/**
+ * free the bulk buffer of a segment
+ *
+ * Simply decrement the bulk reference counter and put to the pool if
+ * it is zero. Note that the signal buffer is not affected.
+ *
+ */
+void mbulk_seg_free(struct mbulk *m);
+
+/**
+ * duplicate the bulk buffer of a mbulk segment
+ *
+ * This is used to share the read-only bulk buffer. The reference counter is
+ * increased by one each time it is duplicated.
+ *
+ */
+struct mbulk *mbulk_seg_duplicate(struct mbulk *m);
+
+/**
+ * clone the bulk buffer of a mbulk segment
+ *
+ * A separate mbulk segment is created and the data is copied to it.
+ * If \copy_sig is true, then the signal is copied as well.
+ *
+ */
+struct mbulk *mbulk_seg_clone(const struct mbulk *m, enum mbulk_class clas, bool copy_sig);
+
+#endif /*__MBULK_DEF_H__*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <scsc/kic/slsi_kic_lib.h>
+
+#ifdef CONFIG_ARCH_EXYNOS
+#include <linux/soc/samsung/exynos-soc.h>
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#endif
+
+#include <scsc/scsc_mx.h>
+#include <scsc/scsc_release.h>
+#include "mgt.h"
+#include "debug.h"
+#include "mlme.h"
+#include "netif.h"
+#include "utils.h"
+#include "udi.h"
+#include "log_clients.h"
+#ifdef SLSI_TEST_DEV
+#include "unittest.h"
+#endif
+#include "hip.h"
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#include "procfs.h"
+#include "mib.h"
+#include "unifiio.h"
+#include "ba.h"
+#include "scsc_wifi_fcq.h"
+#include "cac.h"
+#include "cfg80211_ops.h"
+#include "nl80211_vendor.h"
+
+#ifdef CONFIG_SCSC_WLBTD
+#include "scsc_wlbtd.h"
+#endif
+#define CSR_WIFI_SME_MIB2_HOST_PSID_MASK 0x8000
+#define SLSI_DEFAULT_HW_MAC_ADDR "\x00\x00\x0F\x11\x22\x33"
+#define MX_WLAN_FILE_PATH_LEN_MAX (128)
+#define SLSI_MIB_REG_RULES_MAX (50)
+#define SLSI_MIB_MAX_CLIENT (10)
+#define SLSI_REG_PARAM_START_INDEX (1)
+
+static char *mib_file_t = "wlan_t.hcf";
+module_param(mib_file_t, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mib_file_t, "mib data filename");
+
+static char *mib_file2_t = "wlan_t_sw.hcf";
+module_param(mib_file2_t, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mib_file2_t, "mib data filename");
+
+/* MAC address override. If set to FF's, then
+ * the address is taken from config files or
+ * default derived from HW ID.
+ */
+static char mac_addr_override[] = "ff:ff:ff:ff:ff:ff";
+module_param_string(mac_addr, mac_addr_override, sizeof(mac_addr_override), S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mac_addr_override, "WLAN MAC address override");
+
+static int slsi_mib_open_file(struct slsi_dev *sdev, struct slsi_dev_mib_info *mib_info, const struct firmware **fw);
+static int slsi_mib_close_file(struct slsi_dev *sdev, const struct firmware *e);
+static int slsi_mib_download_file(struct slsi_dev *sdev, struct slsi_dev_mib_info *mib_info);
+static int slsi_country_to_index(struct slsi_802_11d_reg_domain *domain_info, const char *alpha2);
+static int slsi_mib_initial_get(struct slsi_dev *sdev);
+static int slsi_hanged_event_count;
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#define SLSI_MAX_CHAN_5G_BAND 25
+#define SLSI_2G_CHANNEL_ONE 2412
+static int slsi_5ghz_all_channels[] = {5180, 5200, 5220, 5240, 5260, 5280, 5300, 5320, 5500, 5520,
+ 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5720,
+ 5745, 5765, 5785, 5805, 5825 };
+#endif
+
+/* MAC address override stored in /sys/wifi/mac_addr */
+static ssize_t sysfs_show_macaddr(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+static ssize_t sysfs_store_macaddr(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+
+static struct kobject *wifi_kobj_ref;
+static char sysfs_mac_override[] = "ff:ff:ff:ff:ff:ff";
+static struct kobj_attribute mac_attr = __ATTR(mac_addr, 0660, sysfs_show_macaddr, sysfs_store_macaddr);
+
+/* Retrieve mac address in sysfs global */
+static ssize_t sysfs_show_macaddr(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, sizeof(sysfs_mac_override), "%s", sysfs_mac_override);
+}
+
+/* Update mac address in sysfs global */
+static ssize_t sysfs_store_macaddr(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int r;
+
+ SLSI_INFO_NODEV("Override WLAN MAC address %s\n", buf);
+
+ /* size of macaddr string */
+ r = sscanf(buf, "%18s", &sysfs_mac_override);
+
+ return (r > 0) ? count : 0;
+}
+
+/* Register sysfs mac address override */
+void slsi_create_sysfs_macaddr(void)
+{
+ int r;
+
+ wifi_kobj_ref = mxman_wifi_kobject_ref_get();
+ pr_info("wifi_kobj_ref: 0x%p\n", wifi_kobj_ref);
+
+ if (wifi_kobj_ref) {
+ /* Create sysfs file /sys/wifi/mac_addr */
+ r = sysfs_create_file(wifi_kobj_ref, &mac_attr.attr);
+ if (r) {
+ /* Failed, so clean up dir */
+ pr_err("Can't create /sys/wifi/mac_addr\n");
+ return;
+ }
+ } else {
+ pr_err("failed to create /sys/wifi/mac_addr\n");
+ }
+}
+
+/* Unregister sysfs mac address override */
+void slsi_destroy_sysfs_macaddr(void)
+{
+ if (!wifi_kobj_ref)
+ return;
+
+ /* Destroy /sys/wifi/mac_addr file */
+ sysfs_remove_file(wifi_kobj_ref, &mac_attr.attr);
+
+ /* Destroy /sys/wifi virtual dir */
+ mxman_wifi_kobject_ref_put();
+}
+
+void slsi_purge_scan_results_locked(struct netdev_vif *ndev_vif, u16 scan_id)
+{
+ struct slsi_scan_result *scan_result;
+ struct slsi_scan_result *prev = NULL;
+
+ scan_result = ndev_vif->scan[scan_id].scan_results;
+ while (scan_result) {
+ slsi_kfree_skb(scan_result->beacon);
+ slsi_kfree_skb(scan_result->probe_resp);
+ prev = scan_result;
+ scan_result = scan_result->next;
+ kfree(prev);
+ }
+ ndev_vif->scan[scan_id].scan_results = NULL;
+}
+
+void slsi_purge_scan_results(struct netdev_vif *ndev_vif, u16 scan_id)
+{
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ slsi_purge_scan_results_locked(ndev_vif, scan_id);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+}
+
+struct sk_buff *slsi_dequeue_cached_scan_result(struct slsi_scan *scan, int *count)
+{
+ struct sk_buff *skb = NULL;
+ struct slsi_scan_result *scan_result = scan->scan_results;
+
+ if (scan_result) {
+ if (scan_result->beacon) {
+ skb = scan_result->beacon;
+ scan_result->beacon = NULL;
+ } else if (scan_result->probe_resp) {
+ skb = scan_result->probe_resp;
+ scan_result->probe_resp = NULL;
+ } else {
+ SLSI_ERR_NODEV("Scan entry with no beacon /probe resp!!\n");
+ }
+
+ /*If beacon and probe response indicated above , remove the entry*/
+ if (!scan_result->beacon && !scan_result->probe_resp) {
+ scan->scan_results = scan_result->next;
+ kfree(scan_result);
+ if (count)
+ (*count)++;
+ }
+ }
+ return skb;
+}
+
+void slsi_get_hw_mac_address(struct slsi_dev *sdev, u8 *addr)
+{
+#ifndef SLSI_TEST_DEV
+ const struct firmware *e = NULL;
+ int i;
+ u32 u[ETH_ALEN];
+ char path_name[MX_WLAN_FILE_PATH_LEN_MAX];
+ int r;
+ bool valid = false;
+
+ /* Module parameter override */
+ r = sscanf(mac_addr_override, "%02X:%02X:%02X:%02X:%02X:%02X", &u[0], &u[1], &u[2], &u[3], &u[4], &u[5]);
+ if (r != ETH_ALEN) {
+ SLSI_ERR(sdev, "mac_addr modparam set, but format is incorrect (should be e.g. xx:xx:xx:xx:xx:xx)\n");
+ goto mac_sysfs;
+ }
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (u[i] != 0xff)
+ valid = true;
+ addr[i] = u[i] & 0xff;
+ }
+
+ /* If the override is valid, use it */
+ if (valid) {
+ SLSI_INFO(sdev, "MAC address from modparam: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ u[0], u[1], u[2], u[3], u[4], u[5]);
+ return;
+ }
+
+ /* Sysfs parameter override */
+mac_sysfs:
+ r = sscanf(sysfs_mac_override, "%02X:%02X:%02X:%02X:%02X:%02X", &u[0], &u[1], &u[2], &u[3], &u[4], &u[5]);
+ if (r != ETH_ALEN) {
+ SLSI_ERR(sdev, "mac_addr in sysfs set, but format is incorrect (should be e.g. xx:xx:xx:xx:xx:xx)\n");
+ goto mac_file;
+ }
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (u[i] != 0xff)
+ valid = true;
+ addr[i] = u[i] & 0xff;
+ }
+
+ /* If the override is valid, use it */
+ if (valid) {
+ SLSI_INFO(sdev, "MAC address from sysfs: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ u[0], u[1], u[2], u[3], u[4], u[5]);
+ return;
+ }
+
+ /* read mac.txt */
+mac_file:
+ if (sdev->maddr_file_name) {
+ scnprintf(path_name, MX_WLAN_FILE_PATH_LEN_MAX, "wlan/%s", sdev->maddr_file_name);
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "MAC address file : %s\n", path_name);
+
+ r = mx140_file_request_device_conf(sdev->maxwell_core, &e, path_name);
+ if (r != 0)
+ goto mac_efs;
+
+ if (!e) {
+ SLSI_ERR(sdev, "mx140_file_request_device_conf() returned succes, but firmware was null\n");
+ goto mac_efs;
+ }
+ r = sscanf(e->data, "%02X:%02X:%02X:%02X:%02X:%02X", &u[0], &u[1], &u[2], &u[3], &u[4], &u[5]);
+ mx140_file_release_conf(sdev->maxwell_core, e);
+ if (r != ETH_ALEN) {
+ SLSI_ERR(sdev, "%s exists, but format is incorrect (should be e.g. xx:xx:xx:xx:xx:xx)\n", path_name);
+ goto mac_efs;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ addr[i] = u[i] & 0xff;
+ SLSI_INFO(sdev, "MAC address loaded from %s: %02X:%02X:%02X:%02X:%02X:%02X\n", path_name, u[0], u[1], u[2], u[3], u[4], u[5]);
+ return;
+ }
+mac_efs:
+#ifdef CONFIG_SCSC_WLAN_MAC_ADDRESS_FILENAME
+ r = mx140_request_file(sdev->maxwell_core, CONFIG_SCSC_WLAN_MAC_ADDRESS_FILENAME, &e);
+ if (r != 0)
+ goto mac_default;
+ if (!e) {
+ SLSI_ERR(sdev, "mx140_request_file() returned succes, but firmware was null\n");
+ goto mac_default;
+ }
+ r = sscanf(e->data, "%02X:%02X:%02X:%02X:%02X:%02X", &u[0], &u[1], &u[2], &u[3], &u[4], &u[5]);
+ if (r != ETH_ALEN) {
+ SLSI_ERR(sdev, "%s exists, but format is incorrect (%d) [%20s] (should be e.g. xx:xx:xx:xx:xx:xx)\n",
+ CONFIG_SCSC_WLAN_MAC_ADDRESS_FILENAME, r, e->data);
+ goto mac_default;
+ }
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (u[i] != 0xff)
+ valid = true;
+ addr[i] = u[i] & 0xff;
+ }
+#endif
+ /* If MAC address seems valid, finished */
+ if (valid) {
+ SLSI_INFO(sdev, "MAC address loaded from %s: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ CONFIG_SCSC_WLAN_MAC_ADDRESS_FILENAME, u[0], u[1], u[2], u[3], u[4], u[5]);
+
+ /* MAC address read could hold invalid values, try to fix it to normal address */
+ if (addr[0] & 0x01) {
+ addr[0] = addr[0] & 0xfe;
+ SLSI_INFO(sdev, "MAC address invalid, fixed address: %pM\n", addr);
+ }
+ mx140_release_file(sdev->maxwell_core, e);
+ return;
+ }
+mac_default:
+ /* This is safe to call, even if the struct firmware handle is NULL */
+ mx140_file_release_conf(sdev->maxwell_core, e);
+
+ SLSI_ETHER_COPY(addr, SLSI_DEFAULT_HW_MAC_ADDR);
+#ifdef CONFIG_ARCH_EXYNOS
+ /* Randomise MAC address from the soc uid */
+ addr[3] = (exynos_soc_info.unique_id & 0xFF0000000000) >> 40;
+ addr[4] = (exynos_soc_info.unique_id & 0x00FF00000000) >> 32;
+ addr[5] = (exynos_soc_info.unique_id & 0x0000FF000000) >> 24;
+#endif
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT,
+ "MAC addr file NOT found, using default MAC ADDR: %pM\n", addr);
+#else
+ /* We use FIXED Mac addresses with the unittest driver */
+ struct slsi_test_dev *uftestdev = (struct slsi_test_dev *)sdev->maxwell_core;
+
+ SLSI_ETHER_COPY(addr, uftestdev->hw_addr);
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Test Device Address: %pM\n", addr);
+#endif
+}
+
+static void write_wifi_version_info_file(struct slsi_dev *sdev)
+{
+#if defined(ANDROID_VERSION) && (ANDROID_VERSION >= 90000)
+ char *filepath = "/data/vendor/conn/.wifiver.info";
+#else
+ char *filepath = "/data/misc/conn/.wifiver.info";
+#endif
+ char buf[256];
+ char build_id_fw[128];
+ char build_id_drv[64];
+
+#ifndef CONFIG_SCSC_WLBTD
+ struct file *fp = NULL;
+
+ fp = filp_open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+
+ if (IS_ERR(fp)) {
+ SLSI_WARN(sdev, "version file wasn't found\n");
+ return;
+ } else if (!fp) {
+ SLSI_WARN(sdev, "%s doesn't exist.\n", filepath);
+ return;
+ }
+#endif
+ mxman_get_fw_version(build_id_fw, 128);
+ mxman_get_driver_version(build_id_drv, 64);
+
+ /* WARNING:
+ * Please do not change the format of the following string
+ * as it can have fatal consequences.
+ * The framework parser for the version may depend on this
+ * exact formatting.
+ *
+ * Also beware that ANDROID_VERSION will not be defined in AOSP.
+ */
+#if defined(ANDROID_VERSION) && (ANDROID_VERSION >= 90000)
+ /* P-OS */
+ snprintf(buf, sizeof(buf),
+ "%s\n" /* drv_ver: already appended by mxman_get_driver_version() */
+ "f/w_ver: %s\n"
+ "hcf_ver_hw: %s\n"
+ "hcf_ver_sw: %s\n"
+ "regDom_ver: %d.%d\n",
+ build_id_drv,
+ build_id_fw,
+ sdev->mib[0].platform,
+ sdev->mib[1].platform,
+ ((sdev->reg_dom_version >> 8) & 0xFF), (sdev->reg_dom_version & 0xFF));
+#else
+ /* O-OS, or unknown */
+ snprintf(buf, sizeof(buf),
+ "%s (f/w_ver: %s)\nregDom_ver: %d.%d\n",
+ build_id_drv,
+ build_id_fw,
+ ((sdev->reg_dom_version >> 8) & 0xFF), (sdev->reg_dom_version & 0xFF));
+#endif
+
+/* If ANDROID_VERSION is not known, avoid writing the file, as it could go to the wrong
+ * location.
+ */
+#ifdef ANDROID_VERSION
+#ifdef CONFIG_SCSC_WLBTD
+ wlbtd_write_file(filepath, buf);
+#else
+ kernel_write(fp, buf, strlen(buf), 0);
+ if (fp)
+ filp_close(fp, NULL);
+#endif
+
+ SLSI_INFO(sdev, "Succeed to write firmware/host information to .wifiver.info\n");
+#else
+ SLSI_UNUSED_PARAMETER(filepath);
+#endif
+}
+
+static void write_m_test_chip_version_file(struct slsi_dev *sdev)
+{
+#ifdef CONFIG_SCSC_WLBTD
+ char *filepath = "/data/vendor/conn/.cid.info";
+ char buf[256];
+
+ snprintf(buf, sizeof(buf), "%s\n", SCSC_RELEASE_SOLUTION);
+
+ wlbtd_write_file(filepath, buf);
+
+ SLSI_WARN(sdev, "Wrote chip information to .cid.info\n");
+#endif
+}
+
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+int slsi_start_monitor_mode(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 device_address[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+ ndev_vif->vif_type = FAPI_VIFTYPE_MONITOR;
+
+ if (slsi_mlme_add_vif(sdev, dev, dev->dev_addr, device_address) != 0) {
+ SLSI_NET_ERR(dev, "add VIF for Monitor mode failed\n");
+ ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ /* set the link type for the device; it depends on the format of
+ * packet the firmware is going to Pass to Host.
+ *
+ * If the firmware passes MA data in 802.11 frame format, then
+ * dev->type = ARPHRD_IEEE80211;
+ *
+ * If the firmware adds Radio TAP header to MA data,
+ * dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ */
+ dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ ndev_vif->activated = true;
+ return 0;
+}
+
+void slsi_stop_monitor_mode(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "de-activate monitor VIF\n");
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+}
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+static int slsi_hcf_collect(struct scsc_log_collector_client *collect_client, size_t size)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)collect_client->prv;
+ int ret = 0;
+ u8 index = sdev->collect_mib.num_files;
+ u8 i;
+ u8 *data;
+
+ SLSI_INFO_NODEV("Collecting WLAN HCF\n");
+
+ if (!sdev->collect_mib.enabled)
+ SLSI_INFO_NODEV("Collection not enabled\n");
+
+ spin_lock(&sdev->collect_mib.in_collection);
+ ret = scsc_log_collector_write(&index, sizeof(char), 1);
+ if (ret) {
+ spin_unlock(&sdev->collect_mib.in_collection);
+ return ret;
+ }
+
+ for (i = 0; i < index; i++) {
+ SLSI_INFO_NODEV("Collecting WLAN HCF. File %s\n", sdev->collect_mib.file[i].file_name);
+ /* Write file name */
+ ret = scsc_log_collector_write((char *)&sdev->collect_mib.file[i].file_name, 32, 1);
+ if (ret) {
+ spin_unlock(&sdev->collect_mib.in_collection);
+ return ret;
+ }
+ /* Write file len */
+ ret = scsc_log_collector_write((char *)&sdev->collect_mib.file[i].len, sizeof(u16), 1);
+ if (ret) {
+ spin_unlock(&sdev->collect_mib.in_collection);
+ return ret;
+ }
+ /* Write data */
+ data = sdev->collect_mib.file[i].data;
+ if (!data)
+ continue;
+ ret = scsc_log_collector_write((char *)data, sdev->collect_mib.file[i].len, 1);
+ if (ret) {
+ spin_unlock(&sdev->collect_mib.in_collection);
+ return ret;
+ }
+ }
+ spin_unlock(&sdev->collect_mib.in_collection);
+
+ return ret;
+}
+
+/* Collect client registration for HCF file*/
+struct scsc_log_collector_client slsi_hcf_client = {
+ .name = "wlan_hcf",
+ .type = SCSC_LOG_CHUNK_WLAN_HCF,
+ .collect_init = NULL,
+ .collect = slsi_hcf_collect,
+ .collect_end = NULL,
+ .prv = NULL,
+};
+#endif
+
+int slsi_start(struct slsi_dev *sdev)
+{
+#ifndef CONFIG_SCSC_DOWNLOAD_FILE
+ const struct firmware *fw = NULL;
+#endif
+ int err = 0, r;
+ int i;
+ char alpha2[3];
+#ifdef CONFIG_SCSC_WLAN_AP_INFO_FILE
+ u32 offset = 0;
+ struct file *fp = NULL;
+#if defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000
+ char *filepath = "/data/vendor/conn/.softap.info";
+#else
+ char *filepath = "/data/misc/conn/.softap.info";
+#endif
+ char buf[512];
+#endif
+#ifdef CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA
+ struct file *file_ptr = NULL;
+ char *ant_file_path = "/data/vendor/conn/.ant.info";
+ char ant_mode = '0';
+ u16 antenna = 0;
+#endif
+
+ if (WARN_ON(!sdev))
+ return -EINVAL;
+
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+
+ slsi_wakelock(&sdev->wlan_wl);
+
+ if (sdev->device_state != SLSI_DEVICE_STATE_STOPPED) {
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Device already started: device_state:%d\n", sdev->device_state);
+ goto done;
+ }
+
+ if (sdev->recovery_status) {
+ r = wait_for_completion_timeout(&sdev->recovery_completed,
+ msecs_to_jiffies(sdev->recovery_timeout));
+ if (r == 0)
+ SLSI_INFO(sdev, "recovery_completed timeout\n");
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ reinit_completion(&sdev->recovery_completed);
+#else
+ /*This is how the macro is used in the older version.*/
+ INIT_COMPLETION(sdev->recovery_completed);
+#endif
+ }
+
+ sdev->device_state = SLSI_DEVICE_STATE_STARTING;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ reinit_completion(&sdev->sig_wait.completion);
+#else
+ INIT_COMPLETION(sdev->sig_wait.completion);
+#endif
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Step [1/2]: Start WLAN service\n");
+ SLSI_EC_GOTO(slsi_sm_wlan_service_open(sdev), err, err_done);
+ /**
+ * Download MIB data, if any.
+ */
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Step [2/3]: Send MIB configuration\n");
+
+ sdev->local_mib.mib_hash = 0; /* Reset localmib hash value */
+#ifndef SLSI_TEST_DEV
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ spin_lock_init(&sdev->collect_mib.in_collection);
+ sdev->collect_mib.num_files = 0;
+ sdev->collect_mib.enabled = false;
+#endif
+#ifndef CONFIG_SCSC_DOWNLOAD_FILE
+ /* The "_t" HCF is used in RF Test mode and wlanlite/production test mode */
+ if (slsi_is_rf_test_mode_enabled() || slsi_is_test_mode_enabled()) {
+ sdev->mib[0].mib_file_name = mib_file_t;
+ sdev->mib[1].mib_file_name = mib_file2_t;
+ } else {
+ sdev->mib[0].mib_file_name = slsi_mib_file;
+ sdev->mib[1].mib_file_name = slsi_mib_file2;
+ }
+
+ /* Place MIB files in shared memory */
+ for (i = 0; i < SLSI_WLAN_MAX_MIB_FILE; i++) {
+ err = slsi_mib_open_file(sdev, &sdev->mib[i], &fw);
+
+ /* Only the first file is mandatory */
+ if (i == 0 && err) {
+ SLSI_ERR(sdev, "mib: Mandatory wlan hcf missing. WLAN will not start (err=%d)\n", err);
+ slsi_sm_wlan_service_close(sdev);
+ goto err_done;
+ }
+ }
+
+ err = slsi_sm_wlan_service_start(sdev);
+ if (err) {
+ SLSI_ERR(sdev, "slsi_sm_wlan_service_start failed: err=%d\n", err);
+ slsi_mib_close_file(sdev, fw);
+ slsi_sm_wlan_service_close(sdev);
+ goto err_done;
+ }
+ slsi_mib_close_file(sdev, fw);
+#else
+ /* Download main MIB file via mlme_set */
+ err = slsi_sm_wlan_service_start(sdev);
+ if (err) {
+ SLSI_ERR(sdev, "slsi_sm_wlan_service_start failed: err=%d\n", err);
+ slsi_sm_wlan_service_close(sdev);
+ goto err_done;
+ }
+ SLSI_EC_GOTO(slsi_mib_download_file(sdev, &sdev->mib), err, err_hip_started);
+#endif
+ /* Always try to download optional localmib file via mlme_set, ignore error */
+ (void)slsi_mib_download_file(sdev, &sdev->local_mib);
+#endif
+ /**
+ * Download MIB data, if any.
+ * Get f/w capabilities and default configuration
+ * configure firmware
+ */
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->device_config.rssi_boost_2g = 0;
+ sdev->device_config.rssi_boost_5g = 0;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Step [3/3]: Get MIB configuration\n");
+ SLSI_EC_GOTO(slsi_mib_initial_get(sdev), err, err_hip_started);
+ SLSI_INFO(sdev, "=== Version info from the [MIB] ===\n");
+ SLSI_INFO(sdev, "HW Version : 0x%.4X (%u)\n", sdev->chip_info_mib.chip_version, sdev->chip_info_mib.chip_version);
+ SLSI_INFO(sdev, "Platform : 0x%.4X (%u)\n", sdev->plat_info_mib.plat_build, sdev->plat_info_mib.plat_build);
+ slsi_cfg80211_update_wiphy(sdev);
+
+ /* Get UnifiCountryList */
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->device_config.host_state = FAPI_HOSTSTATE_CELLULAR_ACTIVE;
+ err = slsi_read_unifi_countrylist(sdev, SLSI_PSID_UNIFI_COUNTRY_LIST);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ if (err)
+ goto err_hip_started;
+
+ /* Get unifiDefaultCountry */
+ err = slsi_read_default_country(sdev, alpha2, 1);
+ alpha2[2] = '\0';
+ if (err < 0)
+ goto err_hip_started;
+
+ /* unifiDefaultCountry != world_domain */
+ if (!(alpha2[0] == '0' && alpha2[1] == '0'))
+ if (memcmp(sdev->device_config.domain_info.regdomain->alpha2, alpha2, 2) != 0) {
+ memcpy(sdev->device_config.domain_info.regdomain->alpha2, alpha2, 2);
+
+ /* Read the regulatory params for the country*/
+ if (slsi_read_regulatory_rules(sdev, &sdev->device_config.domain_info, alpha2) == 0) {
+ slsi_reset_channel_flags(sdev);
+ wiphy_apply_custom_regulatory(sdev->wiphy, sdev->device_config.domain_info.regdomain);
+ }
+ }
+ /* Do nothing for unifiDefaultCountry == world_domain */
+
+ /* write .wifiver.info */
+ write_wifi_version_info_file(sdev);
+
+ /* write .cid.info */
+ write_m_test_chip_version_file(sdev);
+
+#ifdef CONFIG_SCSC_WLAN_AP_INFO_FILE
+ /* writing .softap.info in /data/vendor/conn */
+ fp = filp_open(filepath, O_WRONLY | O_CREAT, 0644);
+
+ if (!fp) {
+ WARN(1, "%s doesn't exist\n", filepath);
+ } else if (IS_ERR(fp)) {
+ WARN(1, "%s open returned error %d\n", filepath, IS_ERR(fp));
+ } else {
+ offset = snprintf(buf + offset, sizeof(buf), "#softap.info\n");
+ offset += snprintf(buf + offset, sizeof(buf), "DualBandConcurrency=%s\n", sdev->dualband_concurrency ? "yes" : "no");
+ offset += snprintf(buf + offset, sizeof(buf), "DualInterface=%s\n", "yes");
+ offset += snprintf(buf + offset, sizeof(buf), "5G=%s\n", sdev->band_5g_supported ? "yes" : "no");
+ offset += snprintf(buf + offset, sizeof(buf), "maxClient=%d\n", !sdev->softap_max_client ? SLSI_MIB_MAX_CLIENT : sdev->softap_max_client);
+
+ /* following are always supported */
+ offset += snprintf(buf + offset, sizeof(buf), "HalFn_setCountryCodeHal=yes\n");
+ offset += snprintf(buf + offset, sizeof(buf), "HalFn_getValidChannels=yes\n");
+#ifdef CONFIG_SCSC_WLBTD
+ wlbtd_write_file(filepath, buf);
+#else
+
+ kernel_write(fp, buf, strlen(buf), 0);
+#endif
+ if (fp)
+ filp_close(fp, NULL);
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Succeed to write softap information to .softap.info\n");
+ }
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA
+ /* reading antenna mode from /data/vendor/conn/.ant.info */
+ file_ptr = filp_open(ant_file_path, O_RDONLY, 0);
+
+ if (!file_ptr) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "%s doesn't exist\n", ant_file_path);
+ } else if (IS_ERR(file_ptr)) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "%s open returned error %d\n", ant_file_path, IS_ERR(file_ptr));
+ } else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ kernel_read(file_ptr, &ant_mode, 1, &file_ptr->f_pos);
+#else
+ kernel_read(file_ptr, file_ptr->f_pos, &ant_mode, 1);
+#endif
+ antenna = ant_mode - '0';
+ filp_close(file_ptr, NULL);
+
+ slsi_set_mib_preferred_antenna(sdev, antenna);
+ }
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Register with log collector to collect wlan hcf file */
+ slsi_hcf_client.prv = sdev;
+ scsc_log_collector_register_client(&slsi_hcf_client);
+ sdev->collect_mib.enabled = true;
+#endif
+ slsi_update_supported_channels_regd_flags(sdev);
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "---Driver started successfully---\n");
+ sdev->device_state = SLSI_DEVICE_STATE_STARTED;
+ memset(sdev->rtt_vif, -1, sizeof(sdev->rtt_vif));
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+
+ slsi_kic_system_event(slsi_kic_system_event_category_initialisation,
+ slsi_kic_system_events_wifi_service_driver_started, GFP_KERNEL);
+
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return err;
+
+err_hip_started:
+#ifndef SLSI_TEST_DEV
+ slsi_sm_wlan_service_stop(sdev);
+ slsi_hip_stop(sdev);
+ slsi_sm_wlan_service_close(sdev);
+#endif
+
+err_done:
+ sdev->device_state = SLSI_DEVICE_STATE_STOPPED;
+
+done:
+ slsi_wakeunlock(&sdev->wlan_wl);
+
+ slsi_kic_system_event(slsi_kic_system_event_category_initialisation,
+ slsi_kic_system_events_wifi_on, GFP_KERNEL);
+
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+ return err;
+}
+
+struct net_device *slsi_dynamic_interface_create(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+ int err = -EINVAL;
+ int iface;
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "name:%s\n", name);
+
+ iface = slsi_netif_dynamic_iface_add(sdev, name);
+ if (iface < 0)
+ return NULL;
+
+ dev = slsi_get_netdev(sdev, iface);
+ if (!dev)
+ return NULL;
+
+ ndev_vif = netdev_priv(dev);
+
+ err = slsi_netif_register_rtlnl_locked(sdev, dev);
+ if (err)
+ return NULL;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ ndev_vif->iftype = type;
+ dev->ieee80211_ptr->iftype = type;
+ if (params)
+ dev->ieee80211_ptr->use_4addr = params->use_4addr;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ return dev;
+}
+
+static void slsi_stop_chip(struct slsi_dev *sdev)
+{
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ u8 index = sdev->collect_mib.num_files;
+ u8 i;
+#endif
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->start_stop_mutex));
+
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "netdev_up_count:%d device_state:%d\n", sdev->netdev_up_count, sdev->device_state);
+
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED)
+ return;
+
+ /* Only shutdown on the last device going down. */
+ if (sdev->netdev_up_count)
+ return;
+
+ complete_all(&sdev->sig_wait.completion);
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ sdev->collect_mib.enabled = false;
+ scsc_log_collector_unregister_client(&slsi_hcf_client);
+ for (i = 0; i < index; i++)
+ kfree(sdev->collect_mib.file[i].data);
+#endif
+
+ slsi_reset_channel_flags(sdev);
+ slsi_regd_init(sdev);
+ sdev->device_state = SLSI_DEVICE_STATE_STOPPING;
+
+ slsi_sm_wlan_service_stop(sdev);
+ sdev->device_state = SLSI_DEVICE_STATE_STOPPED;
+
+ slsi_hip_stop(sdev);
+#ifndef SLSI_TEST_DEV
+ slsi_sm_wlan_service_close(sdev);
+#endif
+ slsi_kic_system_event(slsi_kic_system_event_category_deinitialisation,
+ slsi_kic_system_events_wifi_service_driver_stopped, GFP_KERNEL);
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->mlme_blocked = false;
+
+ slsi_kic_system_event(slsi_kic_system_event_category_deinitialisation,
+ slsi_kic_system_events_wifi_off, GFP_KERNEL);
+
+ slsi_dbg_track_skb_report();
+ slsi_dbg_track_skb_reset();
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+}
+
+void slsi_vif_cleanup(struct slsi_dev *sdev, struct net_device *dev, bool hw_available)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int i;
+
+ SLSI_NET_DBG3(dev, SLSI_INIT_DEINIT, "clean VIF\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (ndev_vif->activated) {
+ netif_carrier_off(dev);
+ for (i = 0; i < SLSI_ADHOC_PEER_CONNECTIONS_MAX; i++) {
+ struct slsi_peer *peer = ndev_vif->peer_sta_record[i];
+
+ if (peer && peer->valid)
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ }
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ bool already_disconnected = false;
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Station active: hw_available=%d\n", hw_available);
+ if (hw_available) {
+ if (ndev_vif->sta.sta_bss) {
+ slsi_mlme_disconnect(sdev, dev, ndev_vif->sta.sta_bss->bssid, FAPI_REASONCODE_UNSPECIFIED_REASON, true);
+ slsi_handle_disconnect(sdev, dev, ndev_vif->sta.sta_bss->bssid, 0);
+ already_disconnected = true;
+ } else {
+ slsi_mlme_del_vif(sdev, dev);
+ }
+ }
+ if (!already_disconnected) {
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Calling slsi_vif_deactivated\n");
+ slsi_vif_deactivated(sdev, dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ cfg80211_disconnected(dev, FAPI_REASONCODE_UNSPECIFIED_REASON, NULL, 0, false, GFP_ATOMIC);
+#else
+ cfg80211_disconnected(dev, FAPI_REASONCODE_UNSPECIFIED_REASON, NULL, 0, GFP_ATOMIC);
+#endif
+ }
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "AP active\n");
+ if (hw_available) {
+ struct slsi_peer *peer;
+ int j = 0;
+
+ while (j < SLSI_PEER_INDEX_MAX) {
+ peer = ndev_vif->peer_sta_record[j];
+ if (peer && peer->valid)
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ ++j;
+ }
+ slsi_mlme_del_vif(sdev, dev);
+ }
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Calling slsi_vif_deactivated\n");
+ slsi_vif_deactivated(sdev, dev);
+
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO)
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_NO_VIF);
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_UNSYNCHRONISED) {
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ slsi_wlan_unsync_vif_deactivate(sdev, dev, hw_available);
+ } else {
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "P2P active - Deactivate\n");
+ slsi_p2p_vif_deactivate(sdev, dev, hw_available);
+ }
+ }
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ else if (ndev_vif->vif_type == FAPI_VIFTYPE_MONITOR)
+ slsi_stop_monitor_mode(sdev, dev);
+#endif
+ }
+}
+
+void slsi_scan_cleanup(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int i;
+ struct cfg80211_scan_info info = {.aborted = false};
+
+ SLSI_NET_DBG3(dev, SLSI_INIT_DEINIT, "clean scan_data\n");
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ for (i = 0; i < SLSI_SCAN_MAX; i++) {
+ if ((ndev_vif->scan[i].scan_req || ndev_vif->scan[i].acs_request) &&
+ !sdev->mlme_blocked)
+ slsi_mlme_del_scan(sdev, dev, (ndev_vif->ifnum << 8 | i), false);
+ slsi_purge_scan_results(ndev_vif, i);
+ if (ndev_vif->scan[i].scan_req && i == SLSI_SCAN_HW_ID)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(ndev_vif->scan[i].scan_req, &info);
+#else
+ cfg80211_scan_done(ndev_vif->scan[i].scan_req, false);
+#endif
+
+ if (ndev_vif->scan[i].sched_req && i == SLSI_SCAN_SCHED_ID)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ cfg80211_sched_scan_stopped(sdev->wiphy, ndev_vif->scan[i].sched_req->reqid);
+#else
+ cfg80211_sched_scan_stopped(sdev->wiphy);
+#endif
+
+ ndev_vif->scan[i].scan_req = NULL;
+ kfree(ndev_vif->scan[i].acs_request);
+ ndev_vif->scan[i].acs_request = NULL;
+ ndev_vif->scan[i].sched_req = NULL;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+}
+
+static void slsi_stop_net_dev_locked(struct slsi_dev *sdev, struct net_device *dev, bool hw_available)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "Stopping netdev_up_count=%d, hw_available = %d\n", sdev->netdev_up_count, hw_available);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->start_stop_mutex));
+
+ if (!ndev_vif->is_available) {
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "Not Available\n");
+ return;
+ }
+
+ if (WARN_ON(!sdev->netdev_up_count)) {
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "sdev->netdev_up_count=%d\n", sdev->netdev_up_count);
+ return;
+ }
+
+ complete_all(&ndev_vif->sig_wait.completion);
+
+ slsi_scan_cleanup(sdev, dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ slsi_vif_cleanup(sdev, dev, hw_available);
+ ndev_vif->is_available = false;
+ sdev->netdev_up_count--;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ slsi_stop_chip(sdev);
+}
+
+/* Called when a net device wants to go DOWN */
+void slsi_stop_net_dev(struct slsi_dev *sdev, struct net_device *dev)
+{
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ slsi_stop_net_dev_locked(sdev, dev, sdev->recovery_status ? false : true);
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+}
+
+/* Called when we get sdio_removed */
+void slsi_stop(struct slsi_dev *sdev)
+{
+ struct net_device *dev;
+ int i;
+
+ SLSI_MUTEX_LOCK(sdev->start_stop_mutex);
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "netdev_up_count:%d\n", sdev->netdev_up_count);
+
+ complete_all(&sdev->sig_wait.completion);
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = slsi_get_netdev_locked(sdev, i);
+ if (dev)
+ slsi_stop_net_dev_locked(sdev, sdev->netdev[i], false);
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ SLSI_MUTEX_UNLOCK(sdev->start_stop_mutex);
+}
+
+/* MIB download handling */
+static u8 *slsi_mib_slice(struct slsi_dev *sdev, const u8 *data, u32 length, u32 *p_parsed_len,
+ u32 *p_mib_slice_len)
+{
+ const u8 *p = data;
+ u8 *mib_slice;
+ u32 mib_slice_len = 0;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (!length)
+ return NULL;
+
+ mib_slice = kmalloc(length + 4, GFP_KERNEL);
+ if (!mib_slice)
+ return NULL;
+
+ while (length >= 4) {
+ u16 psid = SLSI_BUFF_LE_TO_U16(p);
+ u16 pslen = (u16)(4 + SLSI_BUFF_LE_TO_U16(&p[2]));
+
+ if (pslen & 0x1)
+ pslen++;
+
+ if (psid & CSR_WIFI_SME_MIB2_HOST_PSID_MASK) {
+ /* do nothing */
+ } else {
+ /* SLSI_ERR (sdev, "PSID=0x%04X : FW\n", psid); */
+#define CSR_WIFI_HOSTIO_MIB_SET_MAX (1800)
+ if ((mib_slice_len + pslen) > CSR_WIFI_HOSTIO_MIB_SET_MAX)
+ break;
+ if (pslen > length + 4) {
+ SLSI_ERR(sdev, "length %u read from MIB file > space %u - corrupt file?\n", pslen, length + 4);
+ mib_slice_len = 0;
+ break;
+ }
+ memcpy(&mib_slice[mib_slice_len], p, pslen);
+ mib_slice_len += pslen;
+ }
+ p += pslen;
+ length -= pslen;
+ }
+
+ *p_mib_slice_len = mib_slice_len;
+ *p_parsed_len = (p - data);
+
+ return mib_slice;
+}
+
+/* Extract the platform name string from the HCF file */
+static int slsi_mib_get_platform(struct slsi_dev_mib_info *mib_info)
+{
+ size_t plat_name_len;
+ int pos = 0;
+
+ /* The mib_data passed to this function should already
+ * have had its HCF header skipped.
+ *
+ * This is shoehorned into specific PSIDs to allow backward
+ * compatibility, so we must look into the HCF payload
+ * instead of the header :(
+ *
+ * The updated configcmd util guarantees that these keys
+ * will appear first:
+ *
+ * PSIDs:
+ * 0xfffe - 16 bit version ID, value 1.
+ * 0xffff - If version ID=1, holds platform name string.
+ */
+
+ mib_info->platform[0] = '\0';
+
+ /* Sanity - payload long enough for info? */
+ if (mib_info->mib_len < 12) {
+ SLSI_INFO_NODEV("HCF file too short\n");
+ return -EINVAL; /* file too short */
+ }
+
+ if (mib_info->mib_data[pos++] != 0xFE || /* Version ID FFFE */
+ mib_info->mib_data[pos++] != 0xFF) {
+ SLSI_INFO_NODEV("No HCF version ID\n");
+ return -EINVAL; /* No version ID */
+ }
+ if (mib_info->mib_data[pos++] != 0x01 || /* Len 1, LE */
+ mib_info->mib_data[pos++] != 0x00) {
+ SLSI_INFO_NODEV("Bad length\n");
+ return -EINVAL; /* Unknown length */
+ }
+ if (mib_info->mib_data[pos++] != 0x01 || /* Header ID 1, LE */
+ mib_info->mib_data[pos++] != 0x00) {
+ SLSI_INFO_NODEV("Bad version ID\n");
+ return -EINVAL; /* Unknown version ID */
+ }
+ if (mib_info->mib_data[pos++] != 0xFF || /* Platform Name FFFF */
+ mib_info->mib_data[pos++] != 0xFF) {
+ SLSI_INFO_NODEV("No HCF platform name\n");
+ return -EINVAL; /* No platform name */
+ }
+
+ /* Length of platform name */
+ plat_name_len = mib_info->mib_data[pos++];
+ plat_name_len |= (mib_info->mib_data[pos++] << 16);
+
+ /* Sanity check */
+ if (plat_name_len + pos > mib_info->mib_len || plat_name_len < 2) {
+ SLSI_ERR_NODEV("Bad HCF FFFF key length %zu\n",
+ plat_name_len);
+ return -EINVAL; /* Implausible length */
+ }
+
+ /* Skip vldata header SC-506179-SP. This conveys the
+ * length of the platform string and is 2 or 3 octets long
+ * depending on the length of the string.
+ */
+ {
+#define SLSI_VLDATA_STRING 0xA0
+#define SLSI_VLDATA_LEN 0x17
+
+ u8 vlen_hdr = mib_info->mib_data[pos++];
+ u8 vlen_len = vlen_hdr & SLSI_VLDATA_LEN; /* size of length field */
+
+ /* Skip vlen header octet */
+ plat_name_len--;
+
+ SLSI_DBG1_NODEV(SLSI_INIT_DEINIT, "vlhdr 0x%x, len %u\n", vlen_hdr, vlen_len);
+
+ /* Is it an octet string type? */
+ if (!(vlen_hdr & SLSI_VLDATA_STRING)) {
+ SLSI_ERR_NODEV("No string vlen header 0x%x\n", vlen_hdr);
+ return -EINVAL;
+ }
+
+ /* Handle 1 or 2 octet length field only */
+ if (vlen_len > 2) {
+ SLSI_ERR_NODEV("Too long octet string header %u\n", vlen_len);
+ return -EINVAL;
+ }
+
+ /* Skip over the string length field.
+ * Note we just use datalength anyway.
+ */
+ pos += vlen_len;
+ plat_name_len -= vlen_len;
+ }
+
+ /* Limit the platform name to space in driver and read */
+ {
+ size_t trunc_len = plat_name_len;
+
+ if (trunc_len >= sizeof(mib_info->platform))
+ trunc_len = sizeof(mib_info->platform) - 1;
+
+ /* Extract platform name */
+ memcpy(mib_info->platform, &mib_info->mib_data[pos], trunc_len);
+ mib_info->platform[trunc_len] = '\0';
+
+ /* Print non-truncated string in log now */
+ SLSI_INFO_NODEV("MIB platform: %.*s\n", (int)plat_name_len, &mib_info->mib_data[pos]);
+
+ SLSI_DBG1_NODEV(SLSI_INIT_DEINIT, "plat_name_len: %zu + %u\n",
+ plat_name_len, (plat_name_len & 1));
+ }
+
+ /* Pad string to 16-bit boundary */
+ plat_name_len += (plat_name_len & 1);
+ pos += plat_name_len;
+
+ /* Advance over the keys we read, FW doesn't need them */
+ mib_info->mib_data += pos;
+ mib_info->mib_len -= pos;
+
+ SLSI_DBG1_NODEV(SLSI_INIT_DEINIT, "Skip %d octets HCF payload\n", pos);
+
+ return 0;
+}
+
+#define MGT_HASH_SIZE_BYTES 2 /* Hash will be contained in a uint32 */
+#define MGT_HASH_OFFSET 4
+static int slsi_mib_open_file(struct slsi_dev *sdev, struct slsi_dev_mib_info *mib_info, const struct firmware **fw)
+{
+ int r = -1;
+ const struct firmware *e = NULL;
+ const char *mib_file_ext;
+ char path_name[MX_WLAN_FILE_PATH_LEN_MAX];
+ char *mib_file_name = mib_info->mib_file_name;
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ u8 index = sdev->collect_mib.num_files;
+ u8 *data;
+#endif
+
+ if (!mib_file_name || !fw)
+ return -EINVAL;
+
+ mib_info->mib_data = NULL;
+ mib_info->mib_len = 0;
+ mib_info->mib_hash = 0; /* Reset mib hash value */
+
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "MIB file - Name : %s\n", mib_file_name);
+
+ /* Use MIB file compatibility mode? */
+ mib_file_ext = strrchr(mib_file_name, '.');
+ if (!mib_file_ext) {
+ SLSI_ERR(sdev, "configuration file name '%s' invalid\n", mib_file_name);
+ return -EINVAL;
+ }
+
+ /* Build MIB file path from override */
+ scnprintf(path_name, MX_WLAN_FILE_PATH_LEN_MAX, "wlan/%s", mib_file_name);
+ SLSI_INFO(sdev, "Path to the MIB file : %s\n", path_name);
+
+ r = mx140_file_request_conf(sdev->maxwell_core, &e, "wlan", mib_file_name);
+ if (r || (!e)) {
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "Skip MIB download as file %s is NOT found\n", mib_file_name);
+ *fw = e;
+ return r;
+ }
+
+ mib_info->mib_data = (u8 *)e->data;
+ mib_info->mib_len = e->size;
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ spin_lock(&sdev->collect_mib.in_collection);
+ memset(&sdev->collect_mib.file[index].file_name, 0, 32);
+ memcpy(&sdev->collect_mib.file[index].file_name, mib_file_name, 32);
+ sdev->collect_mib.file[index].len = mib_info->mib_len;
+ data = kmalloc(mib_info->mib_len, GFP_ATOMIC);
+ if (!data) {
+ spin_unlock(&sdev->collect_mib.in_collection);
+ goto cont;
+ }
+ memcpy(data, mib_info->mib_data, mib_info->mib_len);
+ sdev->collect_mib.file[index].data = data;
+ sdev->collect_mib.num_files += 1;
+ spin_unlock(&sdev->collect_mib.in_collection);
+cont:
+#endif
+ /* Check MIB file header */
+ if (mib_info->mib_len >= 8 && /* Room for header */
+ /*(sdev->mib_data[6] & 0xF0) == 0x20 && */ /* WLAN subsystem */
+ mib_info->mib_data[7] == 1) { /* First file format */
+ int i;
+
+ mib_info->mib_hash = 0;
+
+ for (i = 0; i < MGT_HASH_SIZE_BYTES; i++)
+ mib_info->mib_hash = (mib_info->mib_hash << 8) | mib_info->mib_data[i + MGT_HASH_OFFSET];
+
+ SLSI_INFO(sdev, "MIB hash: 0x%.04x\n", mib_info->mib_hash);
+ /* All good - skip header and continue */
+ mib_info->mib_data += 8;
+ mib_info->mib_len -= 8;
+
+ /* Extract platform name if available */
+ slsi_mib_get_platform(mib_info);
+ } else {
+ /* Bad header */
+ SLSI_ERR(sdev, "configuration file '%s' has bad header\n", mib_info->mib_file_name);
+ mx140_file_release_conf(sdev->maxwell_core, e);
+ return -EINVAL;
+ }
+
+ *fw = e;
+ return 0;
+}
+
+static int slsi_mib_close_file(struct slsi_dev *sdev, const struct firmware *e)
+{
+ SLSI_DBG2(sdev, SLSI_INIT_DEINIT, "MIB close %p\n", e);
+
+ if (!e || !sdev)
+ return -EIO;
+
+ mx140_file_release_conf(sdev->maxwell_core, e);
+
+ return 0;
+}
+
+static int slsi_mib_download_file(struct slsi_dev *sdev, struct slsi_dev_mib_info *mib_info)
+{
+ int r = -1;
+ const struct firmware *e = NULL;
+ u8 *mib_slice;
+ u32 mib_slice_len, parsed_len;
+
+ r = slsi_mib_open_file(sdev, mib_info, &e);
+ if (r)
+ return r;
+ /**
+ * MIB data should not be larger than CSR_WIFI_HOSTIO_MIB_SET_MAX.
+ * Slice it into smaller ones and download one by one
+ */
+ while (mib_info->mib_len > 0) {
+ mib_slice = slsi_mib_slice(sdev, mib_info->mib_data, mib_info->mib_len, &parsed_len, &mib_slice_len);
+ if (!mib_slice)
+ break;
+ if (mib_slice_len == 0 || mib_slice_len > mib_info->mib_len) {
+ /* Sanity check MIB parsing */
+ SLSI_ERR(sdev, "slsi_mib_slice returned implausible %d\n", mib_slice_len);
+ r = -EINVAL;
+ kfree(mib_slice);
+ break;
+ }
+ r = slsi_mlme_set(sdev, NULL, mib_slice, mib_slice_len);
+ kfree(mib_slice);
+ if (r != 0) /* some mib can fail to be set, but continue */
+ SLSI_ERR(sdev, "mlme set failed r=0x%x during downloading:'%s'\n",
+ r, mib_info->mib_file_name);
+
+ mib_info->mib_data += parsed_len;
+ mib_info->mib_len -= parsed_len;
+ }
+
+ slsi_mib_close_file(sdev, e);
+
+ return r;
+}
+
+static int slsi_mib_initial_get(struct slsi_dev *sdev)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ int *band = sdev->supported_5g_channels;
+ int rx_len = 0;
+ int r;
+ int i = 0;
+ int j = 0;
+ int chan_start = 0;
+ int chan_count = 0;
+ int index = 0;
+ int mib_index = 0;
+ static const struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_CHIP_VERSION, { 0, 0 } },
+ { SLSI_PSID_UNIFI_SUPPORTED_CHANNELS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_HT_ACTIVATED, {0, 0} },
+ { SLSI_PSID_UNIFI_VHT_ACTIVATED, {0, 0} },
+ { SLSI_PSID_UNIFI_HT_CAPABILITIES, {0, 0} },
+ { SLSI_PSID_UNIFI_VHT_CAPABILITIES, {0, 0} },
+ { SLSI_PSID_UNIFI_HARDWARE_PLATFORM, {0, 0} },
+ { SLSI_PSID_UNIFI_REG_DOM_VERSION, {0, 0} },
+ { SLSI_PSID_UNIFI_NAN_ENABLED, {0, 0} },
+ { SLSI_PSID_UNIFI_DEFAULT_DWELL_TIME, {0, 0} },
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ { SLSI_PSID_UNIFI_WI_FI_SHARING5_GHZ_CHANNEL, {0, 0} },
+#endif
+#ifdef CONFIG_SCSC_WLAN_AP_INFO_FILE
+ { SLSI_PSID_UNIFI_DUAL_BAND_CONCURRENCY, {0, 0} },
+ { SLSI_PSID_UNIFI_MAX_CLIENT, {0, 0} },
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ { SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION, {0, 0} },
+#endif
+ { SLSI_PSID_UNIFI_DEFAULT_COUNTRY_WITHOUT_CH12_CH13, {0, 0} },
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ { SLSI_PSID_UNIFI_ARP_DETECT_ACTIVATED, {0, 0} },
+#endif
+ { SLSI_PSID_UNIFI_APF_ENABLED, {0, 0} }
+ };/*Check the mibrsp.dataLength when a new mib is added*/
+
+ /* 40 MHz bandwidth is not supported in 2.4 GHz in AP/GO Mode Currently.
+ * Reading the mib to check the support is removed and is initialized to 0.
+ */
+ sdev->fw_2g_40mhz_enabled = 0;
+ r = slsi_mib_encode_get_list(&mibreq, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+ if (r != SLSI_MIB_STATUS_SUCCESS)
+ return -ENOMEM;
+
+ mibrsp.dataLength = 184;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data, mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+ if (r == 0) {
+ struct slsi_mib_value *values;
+
+ mibrsp.dataLength = (u32)rx_len;
+
+ values = slsi_mib_decode_get_list(&mibrsp, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+
+ if (!values) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+
+ if (values[mib_index].type != SLSI_MIB_TYPE_NONE) { /* CHIP_VERSION */
+ SLSI_CHECK_TYPE(sdev, values[mib_index].type, SLSI_MIB_TYPE_UINT);
+ sdev->chip_info_mib.chip_version = values[mib_index].u.uintValue;
+ }
+
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) { /* SUPPORTED_CHANNELS */
+ int k = 0;
+ int increment = 4; /* increment channel by 4 for 5G and by 1 for 2G */
+ int buf_len = 150; /* 150 bytes for 14+25=39 channels and spaces between them */
+ char *supported_channels_buffer = kmalloc(buf_len, GFP_KERNEL);
+ int buf_pos = 0;
+ SLSI_CHECK_TYPE(sdev, values[mib_index].type, SLSI_MIB_TYPE_OCTET);
+ if (values[mib_index].type == SLSI_MIB_TYPE_OCTET) {
+ sdev->band_5g_supported = 0;
+ memset(sdev->supported_2g_channels, 0, sizeof(sdev->supported_2g_channels));
+ memset(sdev->supported_5g_channels, 0, sizeof(sdev->supported_5g_channels));
+ for (i = 0; i < values[mib_index].u.octetValue.dataLength / 2; i++) {
+ /* If any 5GHz channel is supported, update band_5g_supported */
+ if ((values[mib_index].u.octetValue.data[i * 2] > 14) &&
+ (values[mib_index].u.octetValue.data[i * 2 + 1] > 0)) {
+ sdev->band_5g_supported = 1;
+ break;
+ }
+ }
+ for (i = 0; i < values[mib_index].u.octetValue.dataLength; i += 2) {
+ increment = 4;
+ k = 0;
+ chan_start = values[mib_index].u.octetValue.data[i];
+ chan_count = values[mib_index].u.octetValue.data[i + 1];
+ band = sdev->supported_5g_channels;
+ if (chan_start < 15) {
+ increment = 1;
+ index = chan_start - 1;
+ band = sdev->supported_2g_channels;
+ } else if (chan_start >= 36 && chan_start <= 48) {
+ index = (chan_start - 36) / 4;
+ } else if (chan_start >= 52 && chan_start <= 64) {
+ index = ((chan_start - 52) / 4) + 4;
+ } else if (chan_start >= 100 && chan_start <= 140) {
+ index = ((chan_start - 100) / 4) + 8;
+ } else if (chan_start >= 149 && chan_start <= 165) {
+ index = ((chan_start - 149) / 4) + 20;
+ } else {
+ continue;
+ }
+
+ for (j = 0; j < chan_count; j++) {
+ band[index + j] = 1;
+ buf_pos += snprintf(supported_channels_buffer + buf_pos,
+ buf_len - buf_pos, "%d ", (chan_start + k));
+ k = k + increment;
+ }
+ sdev->enabled_channel_count += chan_count;
+ }
+ }
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Value for Supported Channels mib: %s\n",
+ supported_channels_buffer);
+ }
+
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* HT enabled? */
+ sdev->fw_ht_enabled = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading HT enabled mib\n");
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* VHT enabled? */
+ sdev->fw_vht_enabled = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading VHT enabled mib\n");
+ if (values[++mib_index].type == SLSI_MIB_TYPE_OCTET) { /* HT capabilities */
+ if (values[mib_index].u.octetValue.dataLength >= 4)
+ memcpy(&sdev->fw_ht_cap, values[mib_index].u.octetValue.data, 4);
+ else
+ SLSI_WARN(sdev, "Error reading HT capabilities\n");
+ } else {
+ SLSI_WARN(sdev, "Error reading HT capabilities\n");
+ }
+ if (values[++mib_index].type == SLSI_MIB_TYPE_OCTET) { /* VHT capabilities */
+ if (values[mib_index].u.octetValue.dataLength >= 4)
+ memcpy(&sdev->fw_vht_cap, values[mib_index].u.octetValue.data, 4);
+ else
+ SLSI_WARN(sdev, "Error reading VHT capabilities\n");
+ } else {
+ SLSI_WARN(sdev, "Error reading VHT capabilities\n");
+ }
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) { /* HARDWARE_PLATFORM */
+ SLSI_CHECK_TYPE(sdev, values[mib_index].type, SLSI_MIB_TYPE_UINT);
+ sdev->plat_info_mib.plat_build = values[mib_index].u.uintValue;
+ } else {
+ SLSI_WARN(sdev, "Error reading Hardware platform\n");
+ }
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) { /* REG_DOM_VERSION */
+ SLSI_CHECK_TYPE(sdev, values[mib_index].type, SLSI_MIB_TYPE_UINT);
+ sdev->reg_dom_version = values[mib_index].u.uintValue;
+ } else {
+ SLSI_WARN(sdev, "Error reading Reg domain version\n");
+ }
+
+ /* NAN enabled? */
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) {
+ sdev->nan_enabled = values[mib_index].u.boolValue;
+ } else {
+ sdev->nan_enabled = false;
+ SLSI_WARN(sdev, "Error reading NAN enabled mib\n");
+ }
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Value for NAN enabled mib : %d\n", sdev->nan_enabled);
+
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) { /* UnifiForcedScheduleDuration */
+ SLSI_CHECK_TYPE(sdev, values[mib_index].type, SLSI_MIB_TYPE_UINT);
+ sdev->fw_dwell_time = values[mib_index].u.uintValue;
+ } else {
+ SLSI_WARN(sdev, "Error reading UnifiForcedScheduleDuration\n");
+ }
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (values[++mib_index].type == SLSI_MIB_TYPE_OCTET) { /* 5Ghz Allowed Channels */
+ if (values[mib_index].u.octetValue.dataLength >= 8) {
+ memcpy(&sdev->wifi_sharing_5ghz_channel, values[mib_index].u.octetValue.data, 8);
+ slsi_extract_valid_wifi_sharing_channels(sdev);
+ } else {
+ SLSI_WARN(sdev, "Error reading 5Ghz Allowed Channels\n");
+ }
+ } else {
+ SLSI_WARN(sdev, "Error reading 5Ghz Allowed Channels\n");
+ }
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_AP_INFO_FILE
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* Dual band concurrency */
+ sdev->dualband_concurrency = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading dual band concurrency\n");
+ if (values[++mib_index].type == SLSI_MIB_TYPE_UINT) /* max client for soft AP */
+ sdev->softap_max_client = values[mib_index].u.uintValue;
+ else
+ SLSI_WARN(sdev, "Error reading SoftAP max client\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* Mac Randomization enable? */
+ sdev->fw_mac_randomization_enabled = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading Mac Randomization Support\n");
+#endif
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) { /* Disable ch12/ch13 */
+ sdev->device_config.disable_ch12_ch13 = values[mib_index].u.boolValue;
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Value for default country without ch12/13 mib: %d\n",
+ sdev->device_config.disable_ch12_ch13);
+ } else {
+ SLSI_WARN(sdev, "Error reading default country without ch12/13 mib\n");
+ }
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* Enhanced Arp Detect Support */
+ sdev->device_config.fw_enhanced_arp_detect_supported = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading Enhanced Arp Detect Support mib\n");
+#endif
+ if (values[++mib_index].type != SLSI_MIB_TYPE_NONE) /* APF Support */
+ sdev->device_config.fw_apf_supported = values[mib_index].u.boolValue;
+ else
+ SLSI_WARN(sdev, "Error reading APF Support mib\n");
+
+ kfree(values);
+ }
+ kfree(mibrsp.data);
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ slsi_check_num_radios(sdev);
+#endif
+ return r;
+}
+
+int slsi_set_mib_roam(struct slsi_dev *dev, struct net_device *ndev, u16 psid, int value)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+
+ if (slsi_mib_encode_int(&mib_data, psid, value, 0) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(dev, ndev, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(dev, "Err Setting MIB failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+
+ return error;
+}
+
+#ifdef CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA
+int slsi_set_mib_preferred_antenna(struct slsi_dev *dev, u16 value)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+
+ if (slsi_mib_encode_uint(&mib_data, SLSI_PSID_UNIFI_PREFERRED_ANTENNA_BITMAP,
+ value, 0) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(dev, NULL, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(dev, "Err Setting MIB failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+
+ return error;
+}
+#endif
+
+void slsi_reset_throughput_stats(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+
+ if (slsi_mib_encode_int(&mib_data, SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, 0, 0) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(sdev, dev, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(sdev, "Err Setting MIB failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+}
+
+int slsi_get_mib_roam(struct slsi_dev *sdev, u16 psid, int *mib_value)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ int rx_len = 0;
+ int r;
+ struct slsi_mib_get_entry get_values[] = { { psid, { 0, 0 } } };
+
+ r = slsi_mib_encode_get_list(&mibreq, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+ if (r != SLSI_MIB_STATUS_SUCCESS)
+ return -ENOMEM;
+
+ mibrsp.dataLength = 64;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data, mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ struct slsi_mib_value *values;
+
+ mibrsp.dataLength = (u32)rx_len;
+
+ values = slsi_mib_decode_get_list(&mibrsp, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+
+ if (!values) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+
+ WARN_ON(values[0].type == SLSI_MIB_TYPE_OCTET ||
+ values[0].type == SLSI_MIB_TYPE_NONE);
+
+ if (values[0].type == SLSI_MIB_TYPE_INT)
+ *mib_value = (int)(values->u.intValue);
+ else if (values[0].type == SLSI_MIB_TYPE_UINT)
+ *mib_value = (int)(values->u.uintValue);
+ else if (values[0].type == SLSI_MIB_TYPE_BOOL)
+ *mib_value = (int)(values->u.boolValue);
+
+ SLSI_DBG2(sdev, SLSI_MLME, "MIB value = %d\n", *mib_value);
+ kfree(values);
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+int slsi_mib_get_gscan_cap(struct slsi_dev *sdev, struct slsi_nl_gscan_capabilities *cap)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ int rx_len = 0;
+ int r;
+ static const struct slsi_mib_get_entry get_values[] = { { SLSI_PSID_UNIFI_GOOGLE_MAX_NUMBER_OF_PERIODIC_SCANS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_GOOGLE_MAX_RSSI_SAMPLE_SIZE, { 0, 0 } },
+ { SLSI_PSID_UNIFI_GOOGLE_MAX_HOTLIST_APS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_GOOGLE_MAX_SIGNIFICANT_WIFI_CHANGE_APS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_GOOGLE_MAX_BSSID_HISTORY_ENTRIES, { 0, 0 } },};
+
+ r = slsi_mib_encode_get_list(&mibreq, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+ if (r != SLSI_MIB_STATUS_SUCCESS)
+ return -ENOMEM;
+
+ mibrsp.dataLength = 64;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data, mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ struct slsi_mib_value *values;
+
+ mibrsp.dataLength = (u32)rx_len;
+
+ values = slsi_mib_decode_get_list(&mibrsp, sizeof(get_values) / sizeof(struct slsi_mib_get_entry), get_values);
+
+ if (!values) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+
+ if (values[0].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[0].type, SLSI_MIB_TYPE_UINT);
+ cap->max_scan_buckets = values[0].u.uintValue;
+ }
+
+ if (values[1].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[1].type, SLSI_MIB_TYPE_UINT);
+ cap->max_rssi_sample_size = values[1].u.uintValue;
+ }
+
+ if (values[2].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[2].type, SLSI_MIB_TYPE_UINT);
+ cap->max_hotlist_aps = values[2].u.uintValue;
+ }
+
+ if (values[3].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[3].type, SLSI_MIB_TYPE_UINT);
+ cap->max_significant_wifi_change_aps = values[3].u.uintValue;
+ }
+
+ if (values[4].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[4].type, SLSI_MIB_TYPE_UINT);
+ cap->max_bssid_history_entries = values[4].u.uintValue;
+ }
+
+ kfree(values);
+ }
+ kfree(mibrsp.data);
+ return r;
+}
+#endif
+
+int slsi_mib_get_apf_cap(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ int data_length = 0;
+ int r = 0;
+ static const struct slsi_mib_get_entry get_values[] = {
+ { SLSI_PSID_UNIFI_APF_VERSION, { 0, 0 } }, /* to get the supported APF version*/
+ { SLSI_PSID_UNIFI_APF_MAX_SIZE, { 0, 0 } } /* to get APF_MAX_SIZE*/
+ };
+
+ r = slsi_mib_encode_get_list(&mibreq, (sizeof(get_values) / sizeof(struct slsi_mib_get_entry)),
+ get_values);
+ if (r != SLSI_MIB_STATUS_SUCCESS)
+ return -ENOMEM;
+
+ /* 15*2 bytes for 2 Mib's */
+ mibrsp.dataLength = 30;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "failed to allocate memory\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, dev, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &data_length);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = (u32)data_length;
+ values = slsi_mib_decode_get_list(&mibrsp,
+ (sizeof(get_values) / sizeof(struct slsi_mib_get_entry)), get_values);
+ if (!values) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mib decode list failed\n");
+ kfree(mibrsp.data);
+ return -ENOMEM;
+ }
+
+ if (values[0].type == SLSI_MIB_TYPE_UINT)
+ sdev->device_config.apf_cap.version = values[0].u.uintValue; /* supported APF version */
+ else
+ SLSI_ERR(sdev, "invalid type. index:%d\n", 0);
+ if (values[1].type == SLSI_MIB_TYPE_UINT)
+ sdev->device_config.apf_cap.max_length = values[1].u.uintValue; /* APF_MAX_LENGTH */
+ else
+ SLSI_ERR(sdev, "invalid type. index:%d\n", 1);
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_get_req failed(result:0x%4x)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ kfree(values);
+ return r;
+}
+
+int slsi_mib_get_rtt_cap(struct slsi_dev *sdev, struct net_device *dev, struct slsi_rtt_capabilities *cap)
+{
+ struct slsi_mib_data supported_rtt_capab = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+
+ struct slsi_mib_get_entry get_values[] = { { SLSI_PSID_UNIFI_RTT_CAPABILITIES, { 0, 0 } } };
+
+ mibrsp.dataLength = 64;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ kfree(mibrsp.data);
+ return -ENOMEM;
+ }
+
+ values = slsi_read_mibs(sdev, dev, get_values, 1, &mibrsp);
+ if (!values) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+
+ if (values[0].type != SLSI_MIB_TYPE_OCTET) {
+ SLSI_ERR(sdev, "Invalid type (%d) for SLSI_PSID_UNIFI_RTT_CAPABILITIES", values[0].type);
+ kfree(mibrsp.data);
+ kfree(values);
+ return -EINVAL;
+ }
+ supported_rtt_capab = values[0].u.octetValue;
+ cap->rtt_one_sided_supported = supported_rtt_capab.data[0];
+ cap->rtt_ftm_supported = supported_rtt_capab.data[1];
+ cap->lci_support = supported_rtt_capab.data[2];
+ cap->lcr_support = supported_rtt_capab.data[3];
+ cap->responder_supported = supported_rtt_capab.data[4];
+ cap->preamble_support = supported_rtt_capab.data[5];
+ cap->bw_support = supported_rtt_capab.data[6];
+ cap->mc_version = supported_rtt_capab.data[7];
+
+ kfree(values);
+ kfree(mibrsp.data);
+ return 0;
+}
+
+struct slsi_peer *slsi_peer_add(struct slsi_dev *sdev, struct net_device *dev, u8 *peer_address, u16 aid)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ u16 queueset = 0;
+
+ if (WARN_ON(!aid)) {
+ SLSI_NET_ERR(dev, "Invalid aid(0) received\n");
+ return NULL;
+ }
+ queueset = MAP_AID_TO_QS(aid);
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ if (WARN_ON(!ndev_vif->activated))
+ return NULL;
+
+ if (!peer_address) {
+ SLSI_NET_WARN(dev, "Peer without address\n");
+ return NULL;
+ }
+
+ peer = slsi_get_peer_from_mac(sdev, dev, peer_address);
+ if (peer) {
+ if (ndev_vif->sta.tdls_enabled && (peer->queueset == 0)) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "TDLS enabled and its station queueset\n");
+ } else {
+ SLSI_NET_WARN(dev, "Peer (MAC:%pM) already exists\n", peer_address);
+ return NULL;
+ }
+ }
+
+ if (slsi_get_peer_from_qs(sdev, dev, queueset)) {
+ SLSI_NET_WARN(dev, "Peer (queueset:%d) already exists\n", queueset);
+ return NULL;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "%pM, aid:%d\n", peer_address, aid);
+
+ peer = ndev_vif->peer_sta_record[queueset];
+ if (!peer) {
+ /* If it reaches here, something has gone wrong */
+ SLSI_NET_ERR(dev, "Peer (queueset:%d) is NULL\n", queueset);
+ return NULL;
+ }
+
+ peer->aid = aid;
+ peer->queueset = queueset;
+ SLSI_ETHER_COPY(peer->address, peer_address);
+ peer->assoc_ie = NULL;
+ peer->assoc_resp_ie = NULL;
+ peer->is_wps = false;
+ peer->connected_state = SLSI_STA_CONN_STATE_DISCONNECTED;
+ /* Initialise the Station info */
+ slsi_peer_reset_stats(sdev, dev, peer);
+ ratelimit_state_init(&peer->sinfo_mib_get_rs, SLSI_SINFO_MIB_ACCESS_TIMEOUT, 0);
+
+ if (scsc_wifi_fcq_ctrl_q_init(&peer->ctrl_q) < 0) {
+ SLSI_NET_ERR(dev, "scsc_wifi_fcq_ctrl_q_init failed\n");
+ return NULL;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (scsc_wifi_fcq_unicast_qset_init(dev, &peer->data_qs, peer->queueset, sdev, ndev_vif->ifnum, peer) < 0) {
+#else
+ if (scsc_wifi_fcq_unicast_qset_init(dev, &peer->data_qs, peer->queueset, sdev, ndev_vif->ifnum, peer) < 0) {
+#endif
+ SLSI_NET_ERR(dev, "scsc_wifi_fcq_unicast_qset_init failed\n");
+ scsc_wifi_fcq_ctrl_q_deinit(&peer->ctrl_q);
+ return NULL;
+ }
+
+ /* A peer is only valid once all the data is initialised
+ * otherwise a process could check the flag and start to read
+ * uninitialised data.
+ */
+
+ if (ndev_vif->sta.tdls_enabled)
+ ndev_vif->sta.tdls_peer_sta_records++;
+ else
+ ndev_vif->peer_sta_records++;
+
+ ndev_vif->cfg80211_sinfo_generation++;
+ skb_queue_head_init(&peer->buffered_frames);
+
+ /* For TDLS this flag will be set while moving the packets from STAQ to TDLSQ */
+ /* TODO: changes for moving packets is removed for now. Enable this when these data path changes go in*/
+/* if (!ndev_vif->sta.tdls_enabled)
+ * peer->valid = true;
+ */
+ peer->valid = true;
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "created station peer %pM AID:%d\n", peer->address, aid);
+ return peer;
+}
+
+void slsi_peer_reset_stats(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Peer:%pM\n", peer->address);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ memset(&peer->sinfo, 0x00, sizeof(peer->sinfo));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled = BIT(NL80211_STA_INFO_RX_BYTES) |
+ BIT(NL80211_STA_INFO_TX_BYTES) |
+ BIT(NL80211_STA_INFO_RX_PACKETS) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) |
+ BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_TX_FAILED) |
+ BIT(NL80211_STA_INFO_SIGNAL) |
+ BIT(NL80211_STA_INFO_BSS_PARAM);
+#else
+ peer->sinfo.filled = STATION_INFO_RX_BYTES |
+ STATION_INFO_TX_BYTES |
+ STATION_INFO_RX_PACKETS |
+ STATION_INFO_TX_PACKETS |
+ STATION_INFO_RX_DROP_MISC |
+ STATION_INFO_TX_FAILED |
+ STATION_INFO_SIGNAL |
+ STATION_INFO_BSS_PARAM;
+#endif
+}
+
+void slsi_dump_stats(struct net_device *dev)
+{
+ SLSI_UNUSED_PARAMETER(dev);
+
+ SLSI_INFO_NODEV("slsi_hanged_event_count: %d\n", slsi_hanged_event_count);
+}
+
+enum slsi_wlan_vendor_attr_hanged_event {
+ SLSI_WLAN_VENDOR_ATTR_HANGED_EVENT_PANIC_CODE = 1,
+ SLSI_WLAN_VENDOR_ATTR_HANGED_EVENT_MAX
+};
+
+int slsi_send_hanged_vendor_event(struct slsi_dev *sdev, u16 scsc_panic_code)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ int length = sizeof(scsc_panic_code);
+
+ slsi_hanged_event_count++;
+ SLSI_INFO(sdev, "Sending SLSI_NL80211_VENDOR_HANGED_EVENT , count: %d, reason =0x%2x\n", slsi_hanged_event_count, scsc_panic_code);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, length, SLSI_NL80211_VENDOR_HANGED_EVENT, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, length, SLSI_NL80211_VENDOR_HANGED_EVENT, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate SKB for vendor hanged event");
+ return -ENOMEM;
+ }
+
+ if (nla_put(skb, SLSI_WLAN_VENDOR_ATTR_HANGED_EVENT_PANIC_CODE, length, &scsc_panic_code)) {
+ SLSI_ERR_NODEV("Failed nla_put for panic code\n");
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+int slsi_send_forward_beacon_vendor_event(struct slsi_dev *sdev, const u8 *ssid, const int ssid_len, const u8 *bssid,
+ u8 channel, const u16 beacon_int, const u64 timestamp, const u64 sys_time)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ u8 err = 0;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_DBG2(sdev, SLSI_CFG80211, "Sending SLSI_NL80211_VENDOR_FORWARD_BEACON\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, &ndev_vif->wdev, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_FORWARD_BEACON, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_FORWARD_BEACON, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate SKB for vendor forward_beacon event\n");
+ return -ENOMEM;
+ }
+
+ err |= nla_put(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_SSID, ssid_len, ssid);
+ err |= nla_put(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_BSSID, ETH_ALEN, bssid);
+ err |= nla_put_u8(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_CHANNEL, channel);
+ err |= nla_put_u16(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_BCN_INTERVAL, beacon_int);
+ err |= nla_put_u32(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_TIME_STAMP1, (timestamp & 0x00000000FFFFFFFF));
+ err |= nla_put_u32(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_TIME_STAMP2,
+ ((timestamp >> 32) & 0x00000000FFFFFFFF));
+ err |= nla_put_u64_64bit(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_SYS_TIME, sys_time, 0);
+
+ if (err) {
+ SLSI_ERR_NODEV("Failed nla_put for forward_beacon\n");
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+
+#endif
+ return 0;
+}
+
+int slsi_send_forward_beacon_abort_vendor_event(struct slsi_dev *sdev, u16 reason_code)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct sk_buff *skb;
+ u8 err = 0;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_INFO(sdev, "Sending SLSI_NL80211_VENDOR_FORWARD_BEACON_ABORT\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, &ndev_vif->wdev, sizeof(reason_code),
+ SLSI_NL80211_VENDOR_FORWARD_BEACON_ABORT, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, sizeof(reason_code),
+ SLSI_NL80211_VENDOR_FORWARD_BEACON_ABORT, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate SKB for vendor forward_beacon_abort event\n");
+ return -ENOMEM;
+ }
+
+ err = nla_put_u16(skb, SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_ABORT, reason_code);
+
+ if (err) {
+ SLSI_ERR_NODEV("Failed nla_put for beacon_recv_abort\n");
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+
+#endif
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_HANG_TEST
+int slsi_test_send_hanged_vendor_event(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_INFO(ndev_vif->sdev, "Test FORCE HANG\n");
+ return slsi_send_hanged_vendor_event(ndev_vif->sdev, SCSC_PANIC_CODE_HOST << 15);
+}
+#endif
+
+static bool slsi_search_ies_for_qos_indicators(struct slsi_dev *sdev, u8 *ies, int ies_len)
+{
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len)) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "QOS enabled due to WLAN_EID_HT_CAPABILITY\n");
+ return true;
+ }
+ if (cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len)) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "QOS enabled due to WLAN_EID_VHT_CAPABILITY\n");
+ return true;
+ }
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, ies, ies_len)) {
+ SLSI_DBG1(sdev, SLSI_CFG80211, "QOS enabled due to WLAN_OUI_TYPE_MICROSOFT_WMM\n");
+ return true;
+ }
+ return false;
+}
+
+void slsi_peer_update_assoc_req(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 id = fapi_get_u16(skb, id);
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ switch (id) {
+ case MLME_CONNECTED_IND:
+ case MLME_PROCEDURE_STARTED_IND:
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_AP &&
+ ndev_vif->vif_type != FAPI_VIFTYPE_STATION)) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+ break;
+ default:
+ slsi_kfree_skb(skb);
+ WARN_ON(1);
+ return;
+ }
+
+ slsi_kfree_skb(peer->assoc_ie);
+ peer->assoc_ie = NULL;
+ peer->capabilities = 0;
+
+ if (fapi_get_datalen(skb)) {
+ int mgmt_hdr_len;
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ /* Update the skb to just point to the frame */
+ skb_pull(skb, fapi_get_siglen(skb));
+
+ if (ieee80211_is_assoc_req(mgmt->frame_control)) {
+ mgmt_hdr_len = (mgmt->u.assoc_req.variable - (u8 *)mgmt);
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
+ peer->capabilities = le16_to_cpu(mgmt->u.assoc_req.capab_info);
+ } else if (ieee80211_is_reassoc_req(mgmt->frame_control)) {
+ mgmt_hdr_len = (mgmt->u.reassoc_req.variable - (u8 *)mgmt);
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
+ peer->capabilities = le16_to_cpu(mgmt->u.reassoc_req.capab_info);
+ } else {
+ WARN_ON(1);
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ skb_pull(skb, mgmt_hdr_len);
+
+ peer->assoc_ie = skb;
+ peer->sinfo.assoc_req_ies = skb->data;
+ peer->sinfo.assoc_req_ies_len = skb->len;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+#endif
+ peer->qos_enabled = slsi_search_ies_for_qos_indicators(sdev, skb->data, skb->len);
+ }
+}
+
+void slsi_peer_update_assoc_rsp(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 id = fapi_get_u16(skb, id);
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
+ goto exit_with_warnon;
+
+ if (id != MLME_CONNECT_IND && id != MLME_ROAMED_IND && id != MLME_REASSOCIATE_IND) {
+ SLSI_NET_ERR(dev, "Unexpected id =0x%4x\n", id);
+ goto exit_with_warnon;
+ }
+
+ slsi_kfree_skb(peer->assoc_resp_ie);
+ peer->assoc_resp_ie = NULL;
+ peer->capabilities = 0;
+ if (fapi_get_datalen(skb)) {
+ int mgmt_hdr_len;
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+
+ /* Update the skb to just point to the frame */
+ skb_pull(skb, fapi_get_siglen(skb));
+
+ if (ieee80211_is_assoc_resp(mgmt->frame_control)) {
+ mgmt_hdr_len = (mgmt->u.assoc_resp.variable - (u8 *)mgmt);
+ peer->capabilities = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ } else if (ieee80211_is_reassoc_resp(mgmt->frame_control)) {
+ mgmt_hdr_len = (mgmt->u.reassoc_resp.variable - (u8 *)mgmt);
+ peer->capabilities = le16_to_cpu(mgmt->u.reassoc_resp.capab_info);
+ } else {
+ goto exit_with_warnon;
+ }
+
+ skb_pull(skb, mgmt_hdr_len);
+ peer->assoc_resp_ie = skb;
+ }
+ return;
+
+exit_with_warnon:
+ WARN_ON(1);
+ slsi_kfree_skb(skb);
+}
+
+int slsi_peer_remove(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *buff_frame;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!peer) {
+ SLSI_NET_WARN(dev, "peer=NULL");
+ return -EINVAL;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "%pM\n", peer->address);
+
+ buff_frame = slsi_skb_dequeue(&peer->buffered_frames);
+ while (buff_frame) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "FLUSHING BUFFERED FRAMES\n");
+ slsi_kfree_skb(buff_frame);
+ buff_frame = slsi_skb_dequeue(&peer->buffered_frames);
+ }
+
+ slsi_rx_ba_stop_all(dev, peer);
+
+ /* Take the peer lock to protect the transmit data path
+ * when accessing peer records.
+ */
+ slsi_spinlock_lock(&ndev_vif->peer_lock);
+
+ /* The information is no longer valid so first update the flag to ensure that
+ * another process doesn't try to use it any more.
+ */
+ peer->valid = false;
+ peer->is_wps = false;
+ peer->connected_state = SLSI_STA_CONN_STATE_DISCONNECTED;
+
+ if (slsi_is_tdls_peer(dev, peer))
+ ndev_vif->sta.tdls_peer_sta_records--;
+ else
+ ndev_vif->peer_sta_records--;
+
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+
+ ndev_vif->cfg80211_sinfo_generation++;
+
+ scsc_wifi_fcq_qset_deinit(dev, &peer->data_qs, sdev, ndev_vif->ifnum, peer);
+ scsc_wifi_fcq_ctrl_q_deinit(&peer->ctrl_q);
+
+ slsi_kfree_skb(peer->assoc_ie);
+ slsi_kfree_skb(peer->assoc_resp_ie);
+ memset(peer, 0x00, sizeof(*peer));
+
+ return 0;
+}
+
+int slsi_vif_activated(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ /* MUST have cleared any peer records previously */
+ WARN_ON(ndev_vif->peer_sta_records);
+
+ if (WARN_ON(ndev_vif->activated))
+ return -EALREADY;
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
+ /* Enable the Multicast queue set for AP mode */
+ if (scsc_wifi_fcq_multicast_qset_init(dev, &ndev_vif->ap.group_data_qs, sdev, ndev_vif->ifnum) < 0)
+ return -EFAULT;
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ /* MUST have cleared any tdls peer records previously */
+ WARN_ON(ndev_vif->sta.tdls_peer_sta_records);
+
+ ndev_vif->sta.tdls_peer_sta_records = 0;
+ ndev_vif->sta.tdls_enabled = false;
+ ndev_vif->sta.roam_in_progress = false;
+ ndev_vif->sta.nd_offload_enabled = true;
+
+ memset(ndev_vif->sta.keepalive_host_tag, 0, sizeof(ndev_vif->sta.keepalive_host_tag));
+ }
+
+ ndev_vif->cfg80211_sinfo_generation = 0;
+ ndev_vif->peer_sta_records = 0;
+ ndev_vif->activated = true;
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ return 0;
+}
+
+void slsi_vif_deactivated(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int i;
+
+ /* MUST only be called from the control path that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ /* The station type VIF is deactivated when the AP connection is lost */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ ndev_vif->sta.group_key_set = false;
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_UNSPECIFIED;
+ memset(ndev_vif->sta.keepalive_host_tag, 0, sizeof(ndev_vif->sta.keepalive_host_tag));
+
+ /* delete the TSPEC entries (if any) if it is a STA vif */
+ if (ndev_vif->iftype == NL80211_IFTYPE_STATION)
+ cac_delete_tspec_list(sdev);
+
+ if (ndev_vif->sta.tdls_enabled)
+ WARN(ndev_vif->sta.tdls_peer_sta_records, "vif:%d, tdls_peer_sta_records:%d", ndev_vif->ifnum, ndev_vif->sta.tdls_peer_sta_records);
+
+ if (ndev_vif->sta.sta_bss) {
+ slsi_cfg80211_put_bss(sdev->wiphy, ndev_vif->sta.sta_bss);
+ ndev_vif->sta.sta_bss = NULL;
+ }
+ ndev_vif->sta.tdls_enabled = false;
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ ndev_vif->is_wips_running = false;
+#endif
+ }
+
+ /* MUST be done first to ensure that other code doesn't treat the VIF as still active */
+ ndev_vif->activated = false;
+ slsi_skb_queue_purge(&ndev_vif->rx_data.queue);
+
+ for (i = 0; i < (SLSI_ADHOC_PEER_CONNECTIONS_MAX); i++) {
+ struct slsi_peer *peer = ndev_vif->peer_sta_record[i];
+
+ if (peer && peer->valid) {
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && peer->assoc_ie)
+ cfg80211_del_sta(dev, peer->address, GFP_KERNEL);
+ slsi_peer_remove(sdev, dev, peer);
+ }
+ }
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ memset(&ndev_vif->ap.last_disconnected_sta, 0, sizeof(ndev_vif->ap.last_disconnected_sta));
+ scsc_wifi_fcq_qset_deinit(dev, &ndev_vif->ap.group_data_qs, sdev, ndev_vif->ifnum, NULL);
+ }
+
+ if ((ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) || (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO)) {
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_NO_VIF);
+ sdev->p2p_group_exp_frame = SLSI_P2P_PA_INVALID;
+ }
+
+ /* MUST be done last as lots of code is dependent on checking the vif_type */
+ ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
+ ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
+ if (slsi_is_rf_test_mode_enabled()) {
+ SLSI_NET_ERR(dev, "*#rf# rf test mode set is enabled.\n");
+ ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
+ } else {
+ ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
+ }
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+
+ /* SHOULD have cleared any peer records */
+ WARN(ndev_vif->peer_sta_records, "vif:%d, peer_sta_records:%d", ndev_vif->ifnum, ndev_vif->peer_sta_records);
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ if (ndev_vif->sta.tdls_enabled)
+ WARN(ndev_vif->sta.tdls_peer_sta_records, "vif:%d, tdls_peer_sta_records:%d",
+ ndev_vif->ifnum, ndev_vif->sta.tdls_peer_sta_records);
+
+ if (ndev_vif->sta.sta_bss) {
+ slsi_cfg80211_put_bss(sdev->wiphy, ndev_vif->sta.sta_bss);
+ ndev_vif->sta.sta_bss = NULL;
+ }
+ ndev_vif->sta.tdls_enabled = false;
+ }
+
+ sdev->device_config.qos_info = -1;
+}
+
+static int slsi_sta_ieee80211_mode(struct net_device *dev, u16 current_bss_channel_frequency)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ const u8 *ie;
+
+ ie = cfg80211_find_ie(WLAN_EID_VHT_OPERATION, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+ if (ie)
+ return SLSI_80211_MODE_11AC;
+
+ ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+ if (ie)
+ return SLSI_80211_MODE_11N;
+
+ if (current_bss_channel_frequency > 5000)
+ return SLSI_80211_MODE_11A;
+
+ ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+ if (ie)
+ return slsi_get_supported_mode(ie);
+ return -EINVAL;
+}
+
+static int slsi_get_sta_mode(struct net_device *dev, const u8 *last_peer_mac)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_peer *last_peer;
+ const u8 *peer_ie;
+
+ last_peer = slsi_get_peer_from_mac(sdev, dev, last_peer_mac);
+
+ if (!last_peer) {
+ SLSI_NET_ERR(dev, "Peer not found\n");
+ return -EINVAL;
+ }
+
+ ndev_vif->ap.last_disconnected_sta.support_mode = 0;
+ if (cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len))
+ ndev_vif->ap.last_disconnected_sta.support_mode = 3;
+ else if (cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len))
+ ndev_vif->ap.last_disconnected_sta.support_mode = 1;
+
+ if (ndev_vif->ap.mode == SLSI_80211_MODE_11AC) { /*AP supports VHT*/
+ peer_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len);
+ if (peer_ie)
+ return SLSI_80211_MODE_11AC;
+
+ peer_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len);
+ if (peer_ie)
+ return SLSI_80211_MODE_11N;
+ return SLSI_80211_MODE_11A;
+ }
+ if (ndev_vif->ap.mode == SLSI_80211_MODE_11N) { /*AP supports HT*/
+ peer_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len);
+ if (peer_ie)
+ return SLSI_80211_MODE_11N;
+ if (ndev_vif->ap.channel_freq > 5000)
+ return SLSI_80211_MODE_11A;
+ peer_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len);
+ if (peer_ie)
+ return slsi_get_supported_mode(peer_ie);
+ }
+
+ if (ndev_vif->ap.channel_freq > 5000)
+ return SLSI_80211_MODE_11A;
+
+ if (ndev_vif->ap.mode == SLSI_80211_MODE_11G) { /*AP supports 11g mode */
+ peer_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, last_peer->assoc_ie->data,
+ last_peer->assoc_ie->len);
+ if (peer_ie)
+ return slsi_get_supported_mode(peer_ie);
+ }
+
+ return SLSI_80211_MODE_11B;
+}
+
+int slsi_populate_bss_record(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ const u8 *ie, *ext_capab, *rm_capab, *ext_data, *rm_data, *bss_load;
+ u8 ext_capab_ie_len, rm_capab_ie_len;
+ bool neighbor_report_bit = 0, btm = 0;
+ u16 fw_tx_rate;
+ struct slsi_mib_get_entry get_values[] = { { SLSI_PSID_UNIFI_CURRENT_BSS_CHANNEL_FREQUENCY, { 0, 0 } },
+ { SLSI_PSID_UNIFI_CURRENT_BSS_BANDWIDTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_CURRENT_BSS_NSS, {0, 0} },
+ { SLSI_PSID_UNIFI_AP_MIMO_USED, {0, 0} },
+ { SLSI_PSID_UNIFI_LAST_BSS_SNR, {0, 0} },
+ { SLSI_PSID_UNIFI_LAST_BSS_RSSI, { 0, 0 } },
+ { SLSI_PSID_UNIFI_ROAMING_COUNT, {0, 0} },
+ { SLSI_PSID_UNIFI_LAST_BSS_TX_DATA_RATE, { 0, 0 } },
+ { SLSI_PSID_UNIFI_ROAMING_AKM, {0, 0} } };
+
+ mibrsp.dataLength = 10 * ARRAY_SIZE(get_values);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes for interface MIBs\n", mibrsp.dataLength);
+ return -ENOMEM;
+ }
+
+ values = slsi_read_mibs(sdev, dev, get_values, ARRAY_SIZE(get_values), &mibrsp);
+
+ memset(&ndev_vif->sta.last_connected_bss, 0, sizeof(ndev_vif->sta.last_connected_bss));
+
+ if (!values) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mib decode list failed\n");
+ kfree(values);
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+
+ /* The Below sequence of reading the BSS Info related Mibs is very important */
+ if (values[0].type != SLSI_MIB_TYPE_NONE) { /* CURRENT_BSS_CHANNEL_FREQUENCY */
+ SLSI_CHECK_TYPE(sdev, values[0].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.channel_freq = ((values[0].u.uintValue) / 2);
+ }
+
+ if (values[1].type != SLSI_MIB_TYPE_NONE) { /* CURRENT_BSS_BANDWIDTH */
+ SLSI_CHECK_TYPE(sdev, values[1].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.bandwidth = values[1].u.uintValue;
+ }
+
+ if (values[2].type != SLSI_MIB_TYPE_NONE) { /* CURRENT_BSS_NSS */
+ SLSI_CHECK_TYPE(sdev, values[2].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.antenna_mode = values[2].u.uintValue;
+ }
+
+ if (values[3].type != SLSI_MIB_TYPE_NONE) { /* AP_MIMO_USED */
+ SLSI_CHECK_TYPE(sdev, values[3].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.mimo_used = values[3].u.uintValue;
+ }
+
+ if (values[4].type != SLSI_MIB_TYPE_NONE) { /* SNR */
+ SLSI_CHECK_TYPE(sdev, values[4].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.snr = values[4].u.uintValue;
+ }
+
+ if (values[5].type != SLSI_MIB_TYPE_NONE) { /* RSSI */
+ SLSI_CHECK_TYPE(sdev, values[5].type, SLSI_MIB_TYPE_INT);
+ ndev_vif->sta.last_connected_bss.rssi = values[5].u.intValue;
+ }
+
+ if (values[6].type != SLSI_MIB_TYPE_NONE) { /* ROAMING_COUNT */
+ SLSI_CHECK_TYPE(sdev, values[6].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.roaming_count = values[6].u.uintValue;
+ }
+
+ if (values[7].type != SLSI_MIB_TYPE_NONE) { /* TX_DATA_RATE */
+ SLSI_CHECK_TYPE(sdev, values[7].type, SLSI_MIB_TYPE_UINT);
+ fw_tx_rate = values[7].u.uintValue;
+ slsi_fw_tx_rate_calc(fw_tx_rate, NULL,
+ (unsigned long *)(&ndev_vif->sta.last_connected_bss.tx_data_rate));
+ }
+
+ if (values[8].type != SLSI_MIB_TYPE_NONE) { /* ROAMING_AKM */
+ SLSI_CHECK_TYPE(sdev, values[8].type, SLSI_MIB_TYPE_UINT);
+ ndev_vif->sta.last_connected_bss.roaming_akm = values[8].u.uintValue;
+ }
+
+ kfree(values);
+ kfree(mibrsp.data);
+
+ if (!ndev_vif->sta.sta_bss) {
+ SLSI_WARN(sdev, "Bss missing due to out of order msg from firmware!! Cannot collect Big Data\n");
+ return -EINVAL;
+ }
+
+ SLSI_ETHER_COPY(ndev_vif->sta.last_connected_bss.address, ndev_vif->sta.sta_bss->bssid);
+
+ ndev_vif->sta.last_connected_bss.mode = slsi_sta_ieee80211_mode(dev,
+ ndev_vif->sta.last_connected_bss.channel_freq);
+ if (ndev_vif->sta.last_connected_bss.mode == -EINVAL) {
+ SLSI_ERR(sdev, "slsi_get_bss_info : Supported Rates IE is null");
+ return -EINVAL;
+ }
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, SLSI_WLAN_OUI_TYPE_WFA_HS20_IND,
+ ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+ if (ie) {
+ if ((ie[6] >> 4) == 0)
+ ndev_vif->sta.last_connected_bss.passpoint_version = 1;
+ else
+ ndev_vif->sta.last_connected_bss.passpoint_version = 2;
+ }
+
+ ndev_vif->sta.last_connected_bss.noise_level = (ndev_vif->sta.last_connected_bss.rssi -
+ ndev_vif->sta.last_connected_bss.snr);
+
+ ext_capab = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+ rm_capab = cfg80211_find_ie(WLAN_EID_RRM_ENABLED_CAPABILITIES, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+ bss_load = cfg80211_find_ie(WLAN_EID_QBSS_LOAD, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+
+ if (ext_capab) {
+ ext_capab_ie_len = ext_capab[1];
+ ext_data = &ext_capab[2];
+ if ((ext_capab_ie_len >= 2) && (ext_data[1] &
+ SLSI_WLAN_EXT_CAPA1_PROXY_ARP_ENABLED)) /*check bit12 is set or not */
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 1;
+ if (ext_capab_ie_len >= 3) {
+ if (ext_data[2] & SLSI_WLAN_EXT_CAPA2_TFS_ENABLED) /*check bit16 is set or not */
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 2;
+ if (ext_data[2] & SLSI_WLAN_EXT_CAPA2_WNM_SLEEP_ENABLED) /*check bit17 is set or not */
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 3;
+ if (ext_data[2] & SLSI_WLAN_EXT_CAPA2_TIM_ENABLED) /*check bit18 is set or not */
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 4;
+ /*check bit19 is set or not */
+ if (ext_data[2] & SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED) {
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 5;
+ btm = 1;
+ }
+ if (ext_data[2] & SLSI_WLAN_EXT_CAPA2_DMS_ENABLED) /*check bit20 is set or not */
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 6;
+ }
+ }
+ if (bss_load)
+ ndev_vif->sta.last_connected_bss.kvie |= 1;
+ if (rm_capab) {
+ rm_capab_ie_len = rm_capab[1];
+ rm_data = &rm_capab[2];
+ if (rm_capab_ie_len >= 1) {
+ neighbor_report_bit = SLSI_WLAN_RM_CAPA0_NEIGHBOR_REPORT_ENABLED & rm_data[0];
+ if (SLSI_WLAN_RM_CAPA0_LINK_MEASUREMENT_ENABLED & rm_data[0])
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 7;
+ if (neighbor_report_bit)
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 8;
+ if (SLSI_WLAN_RM_CAPA0_PASSIVE_MODE_ENABLED & rm_data[0])
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 9;
+ if (SLSI_WLAN_RM_CAPA0_ACTIVE_MODE_ENABLED & rm_data[0])
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 10;
+ if (SLSI_WLAN_RM_CAPA0_TABLE_MODE_ENABLED & rm_data[0])
+ ndev_vif->sta.last_connected_bss.kvie |= 1 << 11;
+ }
+ }
+ if (!neighbor_report_bit && !btm && !bss_load)
+ ndev_vif->sta.last_connected_bss.kv = 0;
+ else if (neighbor_report_bit != 0 && (!btm && !bss_load))
+ ndev_vif->sta.last_connected_bss.kv = 1; /*11k support */
+ else if (!neighbor_report_bit && (btm || bss_load))
+ ndev_vif->sta.last_connected_bss.kv = 2; /*11v support */
+ else
+ ndev_vif->sta.last_connected_bss.kv = 3; /*11kv support */
+
+ return 0;
+}
+
+static int slsi_fill_last_disconnected_sta_info(struct slsi_dev *sdev, struct net_device *dev,
+ const u8 *last_peer_mac, const u16 reason_code)
+{
+ int i;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *last_peer;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ u16 fw_tx_rate;
+ struct slsi_mib_get_entry get_values[] = { { SLSI_PSID_UNIFI_PEER_BANDWIDTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_CURRENT_PEER_NSS, {0, 0} },
+ { SLSI_PSID_UNIFI_PEER_RSSI, { 0, 0 } },
+ { SLSI_PSID_UNIFI_PEER_TX_DATA_RATE, { 0, 0 } } };
+
+ SLSI_ETHER_COPY(ndev_vif->ap.last_disconnected_sta.address,
+ last_peer_mac);
+ ndev_vif->ap.last_disconnected_sta.reason = reason_code;
+ ndev_vif->ap.last_disconnected_sta.mode = slsi_get_sta_mode(dev, last_peer_mac);
+ last_peer = slsi_get_peer_from_mac(sdev, dev, last_peer_mac);
+ if (!last_peer) {
+ SLSI_NET_ERR(dev, "Peer not found\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(get_values); i++)
+ get_values[i].index[0] = last_peer->aid;
+
+ ndev_vif->ap.last_disconnected_sta.rx_retry_packets = SLSI_DEFAULT_UNIFI_PEER_RX_RETRY_PACKETS;
+ ndev_vif->ap.last_disconnected_sta.rx_bc_mc_packets = SLSI_DEFAULT_UNIFI_PEER_RX_BC_MC_PACKETS;
+ ndev_vif->ap.last_disconnected_sta.capabilities = last_peer->capabilities;
+ ndev_vif->ap.last_disconnected_sta.bandwidth = SLSI_DEFAULT_UNIFI_PEER_BANDWIDTH;
+ ndev_vif->ap.last_disconnected_sta.antenna_mode = SLSI_DEFAULT_UNIFI_PEER_NSS;
+ ndev_vif->ap.last_disconnected_sta.rssi = SLSI_DEFAULT_UNIFI_PEER_RSSI;
+ ndev_vif->ap.last_disconnected_sta.tx_data_rate = SLSI_DEFAULT_UNIFI_PEER_TX_DATA_RATE;
+
+ mibrsp.dataLength = 15 * ARRAY_SIZE(get_values);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes for interface MIBs\n", mibrsp.dataLength);
+ return -ENOMEM;
+ }
+
+ values = slsi_read_mibs(sdev, dev, get_values, ARRAY_SIZE(get_values), &mibrsp);
+
+ if (!values) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mib decode list failed\n");
+ kfree(values);
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+ if (values[0].type != SLSI_MIB_TYPE_NONE) { /* LAST_PEER_BANDWIDTH */
+ SLSI_CHECK_TYPE(sdev, values[0].type, SLSI_MIB_TYPE_INT);
+ ndev_vif->ap.last_disconnected_sta.bandwidth = values[0].u.intValue;
+ }
+
+ if (values[1].type != SLSI_MIB_TYPE_NONE) { /*LAST_PEER_NSS*/
+ SLSI_CHECK_TYPE(sdev, values[1].type, SLSI_MIB_TYPE_INT);
+ ndev_vif->ap.last_disconnected_sta.antenna_mode = values[1].u.intValue;
+ }
+
+ if (values[2].type != SLSI_MIB_TYPE_NONE) { /* LAST_PEER_RSSI*/
+ SLSI_CHECK_TYPE(sdev, values[2].type, SLSI_MIB_TYPE_INT);
+ ndev_vif->ap.last_disconnected_sta.rssi = values[2].u.intValue;
+ }
+
+ if (values[3].type != SLSI_MIB_TYPE_NONE) { /* LAST_PEER_TX_DATA_RATE */
+ SLSI_CHECK_TYPE(sdev, values[3].type, SLSI_MIB_TYPE_UINT);
+ fw_tx_rate = values[3].u.uintValue;
+ slsi_fw_tx_rate_calc(fw_tx_rate, NULL,
+ (unsigned long *)&ndev_vif->ap.last_disconnected_sta.tx_data_rate);
+ }
+
+ kfree(values);
+ kfree(mibrsp.data);
+
+ return 0;
+}
+
+int slsi_handle_disconnect(struct slsi_dev *sdev, struct net_device *dev, u8 *peer_address, u16 reason)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ if (WARN_ON(!dev))
+ goto exit;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "slsi_handle_disconnect(vif:%d)\n", ndev_vif->ifnum);
+
+ /* MUST only be called from somewhere that has acquired the lock */
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit;
+ }
+
+ switch (ndev_vif->vif_type) {
+ case FAPI_VIFTYPE_STATION:
+ {
+ netif_carrier_off(dev);
+
+ /* MLME-DISCONNECT-IND could indicate the completion of a MLME-DISCONNECT-REQ or
+ * the connection with the AP has been lost
+ */
+ if (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTING) {
+ if (!peer_address)
+ SLSI_NET_WARN(dev, "Connection failure\n");
+ } else if (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED) {
+ if (reason == FAPI_REASONCODE_SYNCHRONISATION_LOSS)
+ reason = 0; /*reason code to recognise beacon loss */
+ else if (reason == FAPI_REASONCODE_KEEP_ALIVE_FAILURE)
+ reason = WLAN_REASON_DEAUTH_LEAVING;/* Change to a standard reason code */
+ else if (reason >= 0x8200 && reason <= 0x82FF)
+ reason = reason & 0x00FF;
+
+ if (ndev_vif->sta.is_wps) /* Ignore sending deauth or disassoc event to cfg80211 during WPS session */
+ SLSI_NET_INFO(dev, "Ignoring Deauth notification to cfg80211 from the peer during WPS procedure\n");
+ else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ cfg80211_disconnected(dev, reason, NULL, 0, false, GFP_KERNEL);
+#else
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+#endif
+ SLSI_NET_DBG3(dev, SLSI_MLME, "Received disconnect from AP, reason = %d\n", reason);
+ }
+ } else if (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_DISCONNECTING) {
+ /* Change keep alive and sync_loss reason code while sending to supplicant to a standard reason code */
+ if (reason == FAPI_REASONCODE_KEEP_ALIVE_FAILURE ||
+ reason == FAPI_REASONCODE_SYNCHRONISATION_LOSS)
+ reason = WLAN_REASON_DEAUTH_LEAVING;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ cfg80211_disconnected(dev, reason, NULL, 0, true, GFP_KERNEL);
+#else
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+#endif
+ SLSI_NET_DBG3(dev, SLSI_MLME, "Completion of disconnect from AP\n");
+ } else {
+ /* Vif status is in erronus state.*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ cfg80211_disconnected(dev, reason, NULL, 0, false, GFP_KERNEL);
+#else
+ cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+#endif
+ SLSI_NET_WARN(dev, "disconnect in wrong state vif_status(%d)\n", ndev_vif->sta.vif_status);
+ }
+
+ ndev_vif->sta.is_wps = false;
+
+ /* Populate bss records on incase of disconnection.
+ * For connection failure its not required.
+ */
+ if (!(ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTING ||
+ ndev_vif->sta.vif_status == SLSI_VIF_STATUS_UNSPECIFIED))
+ slsi_populate_bss_record(dev);
+
+ kfree(ndev_vif->sta.assoc_req_add_info_elem);
+ if (ndev_vif->sta.assoc_req_add_info_elem) {
+ ndev_vif->sta.assoc_req_add_info_elem = NULL;
+ ndev_vif->sta.assoc_req_add_info_elem_len = 0;
+ }
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ memset(&ndev_vif->enhanced_arp_stats, 0, sizeof(ndev_vif->enhanced_arp_stats));
+#endif
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+ break;
+ }
+ case FAPI_VIFTYPE_AP:
+ {
+ struct slsi_peer *peer = NULL;
+
+ peer = slsi_get_peer_from_mac(sdev, dev, peer_address);
+ if (!peer) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "peer NOT found by MAC address\n");
+ goto exit;
+ }
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "MAC:%pM\n", peer_address);
+ slsi_fill_last_disconnected_sta_info(sdev, dev, peer_address, reason);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+ if ((peer->connected_state == SLSI_STA_CONN_STATE_CONNECTED) || (peer->connected_state == SLSI_STA_CONN_STATE_DOING_KEY_CONFIG))
+ cfg80211_del_sta(dev, peer->address, GFP_KERNEL);
+
+ slsi_peer_remove(sdev, dev, peer);
+
+ /* If last client disconnects (after WPA2 handshake) then take wakelock till group is removed
+ * to avoid possibility of delay in group removal if platform suspends at this point.
+ */
+ if (ndev_vif->ap.p2p_gc_keys_set && (ndev_vif->peer_sta_records == 0)) {
+ SLSI_NET_DBG2(dev, SLSI_MLME, "P2PGO - Acquire wakelock after last client disconnection\n");
+ slsi_wakelock(&sdev->wlan_wl);
+ }
+ break;
+ }
+ default:
+ SLSI_NET_WARN(dev, "mlme_disconnect_ind(vif:%d, unexpected vif type:%d)\n", ndev_vif->ifnum, ndev_vif->vif_type);
+ break;
+ }
+exit:
+ return 0;
+}
+
+int slsi_ps_port_control(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, enum slsi_sta_conn_state s)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ switch (s) {
+ case SLSI_STA_CONN_STATE_DISCONNECTED:
+ SLSI_NET_DBG1(dev, SLSI_TX, "STA disconnected, SET : FCQ - Disabled\n");
+ peer->authorized = false;
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && !ndev_vif->peer_sta_records)
+ (void)scsc_wifi_fcq_8021x_port_state(dev, &ndev_vif->ap.group_data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+ return scsc_wifi_fcq_8021x_port_state(dev, &peer->data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+
+ case SLSI_STA_CONN_STATE_DOING_KEY_CONFIG:
+ SLSI_NET_DBG1(dev, SLSI_TX, "STA doing KEY config, SET : FCQ - Disabled\n");
+ peer->authorized = false;
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && !ndev_vif->peer_sta_records)
+ (void)scsc_wifi_fcq_8021x_port_state(dev, &ndev_vif->ap.group_data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+ return scsc_wifi_fcq_8021x_port_state(dev, &peer->data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+
+ case SLSI_STA_CONN_STATE_CONNECTED:
+ SLSI_NET_DBG1(dev, SLSI_TX, "STA connected, SET : FCQ - Enabled\n");
+ peer->authorized = true;
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
+ (void)scsc_wifi_fcq_8021x_port_state(dev, &ndev_vif->ap.group_data_qs, SCSC_WIFI_FCQ_8021x_STATE_OPEN);
+ return scsc_wifi_fcq_8021x_port_state(dev, &peer->data_qs, SCSC_WIFI_FCQ_8021x_STATE_OPEN);
+
+ default:
+ SLSI_NET_DBG1(dev, SLSI_TX, "SET : FCQ - Disabled\n");
+ peer->authorized = false;
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && !ndev_vif->peer_sta_records)
+ (void)scsc_wifi_fcq_8021x_port_state(dev, &ndev_vif->ap.group_data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+ return scsc_wifi_fcq_8021x_port_state(dev, &peer->data_qs, SCSC_WIFI_FCQ_8021x_STATE_BLOCKED);
+ }
+
+ return 0;
+}
+
+int slsi_set_uint_mib(struct slsi_dev *sdev, struct net_device *dev, u16 psid, int value)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int r = 0;
+
+ SLSI_DBG2(sdev, SLSI_MLME, "UINT MIB Set Request (PSID = 0x%04X, Value = %d)\n", psid, value);
+
+ r = slsi_mib_encode_uint(&mib_data, psid, value, 0);
+ if (r == SLSI_MIB_STATUS_SUCCESS) {
+ if (mib_data.dataLength) {
+ r = slsi_mlme_set(sdev, dev, mib_data.data, mib_data.dataLength);
+ if (r != 0)
+ SLSI_ERR(sdev, "MIB (PSID = 0x%04X) set error = %d\n", psid, r);
+ kfree(mib_data.data);
+ }
+ }
+ return r;
+}
+
+int slsi_send_max_transmit_msdu_lifetime(struct slsi_dev *dev, struct net_device *ndev, u32 msdu_lifetime)
+{
+#ifdef CCX_MSDU_LIFETIME_MIB_NA
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = 0;
+
+ if (slsi_mib_encode_uint(&mib_data, SLSI_PSID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME, msdu_lifetime, 0) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(dev, ndev, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(dev, "Err Sending max msdu lifetime failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+ return error;
+#endif
+ /* TODO: current firmware do not have this MIB yet */
+ return 0;
+}
+
+int slsi_read_max_transmit_msdu_lifetime(struct slsi_dev *dev, struct net_device *ndev, u32 *msdu_lifetime)
+{
+#ifdef CCX_MSDU_LIFETIME_MIB_NA
+ struct slsi_mib_data mib_data = { 0, NULL };
+ struct slsi_mib_data mib_res = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int error = 0;
+ int mib_rx_len = 0;
+ size_t len;
+
+ SLSI_UNUSED_PARAMETER(ndev);
+
+ mib_res.dataLength = 10; /* PSID header(5) + dot11MaxReceiveLifetime 4 bytes + status(1) */
+ mib_res.data = kmalloc(mib_res.dataLength, GFP_KERNEL);
+
+ if (!mib_res.data)
+ return -ENOMEM;
+
+ slsi_mib_encode_get(&mib_data, SLSI_PSID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME, 0);
+ error = slsi_mlme_get(dev, NULL, mib_data.data, mib_data.dataLength,
+ mib_res.data, mib_res.dataLength, &mib_rx_len);
+ kfree(mib_data.data);
+
+ if (error) {
+ SLSI_ERR(dev, "Err Reading max msdu lifetime failed. error = %d\n", error);
+ kfree(mib_res.data);
+ return error;
+ }
+
+ len = slsi_mib_decode(&mib_res, &mib_val);
+
+ if (len != 8) {
+ kfree(mib_res.data);
+ return -EINVAL;
+ }
+ *msdu_lifetime = mib_val.value.u.uintValue;
+
+ kfree(mib_res.data);
+
+ return error;
+#endif
+ /* TODO: current firmware do not have this MIB yet */
+ return 0;
+}
+
+void slsi_band_cfg_update(struct slsi_dev *sdev, int band)
+{
+ /* TODO: lock scan_mutex*/
+ switch (band) {
+ case SLSI_FREQ_BAND_AUTO:
+ sdev->wiphy->bands[0] = sdev->device_config.band_2G;
+ sdev->wiphy->bands[1] = sdev->device_config.band_5G;
+ break;
+ case SLSI_FREQ_BAND_5GHZ:
+ sdev->wiphy->bands[0] = NULL;
+ sdev->wiphy->bands[1] = sdev->device_config.band_5G;
+ break;
+ case SLSI_FREQ_BAND_2GHZ:
+ sdev->wiphy->bands[0] = sdev->device_config.band_2G;
+ sdev->wiphy->bands[1] = NULL;
+ break;
+ default:
+ break;
+ }
+ wiphy_apply_custom_regulatory(sdev->wiphy, sdev->device_config.domain_info.regdomain);
+ slsi_update_supported_channels_regd_flags(sdev);
+}
+
+int slsi_band_update(struct slsi_dev *sdev, int band)
+{
+ int i;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ SLSI_DBG3(sdev, SLSI_CFG80211, "supported_band:%d\n", band);
+
+ if (band == sdev->device_config.supported_band) {
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return 0;
+ }
+
+ sdev->device_config.supported_band = band;
+
+ slsi_band_cfg_update(sdev, band);
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ /* If new band is auto(2.4GHz + 5GHz, no need to check for station connection.*/
+ if (band == 0)
+ return 0;
+
+ /* If station is connected on any rejected band, disconnect the station. */
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (i = 1; i < (CONFIG_SCSC_WLAN_MAX_INTERFACES + 1); i++) {
+ dev = slsi_get_netdev_locked(sdev, i);
+ if (!dev)
+ break;
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ /**
+ * 1. vif should be activated and vif type should be station.
+ * 2. Station should be either in connecting or connected state.
+ * 3. if (new band is 5G and connection is on 2.4) or (new band is 2.4 and connection is 5)
+ * when all the above conditions are true drop the connection
+ * Do not wait for disconnect ind.
+ */
+ if ((ndev_vif->activated) && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTING || ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED) &&
+ (ndev_vif->chan->hw_value <= 14 ? band == SLSI_FREQ_BAND_5GHZ : band == SLSI_FREQ_BAND_2GHZ)) {
+ int r;
+
+ if (!ndev_vif->sta.sta_bss) {
+ SLSI_ERR(sdev, "slsi_mlme_disconnect failed, sta_bss is not available\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -EINVAL;
+ }
+
+ r = slsi_mlme_disconnect(sdev, dev, ndev_vif->sta.sta_bss->bssid, WLAN_REASON_DEAUTH_LEAVING, true);
+ LOG_CONDITIONALLY(r != 0, SLSI_ERR(sdev, "slsi_mlme_disconnect(%pM) failed with %d\n", ndev_vif->sta.sta_bss->bssid, r));
+
+ r = slsi_handle_disconnect(sdev, dev, ndev_vif->sta.sta_bss->bssid, 0);
+ LOG_CONDITIONALLY(r != 0, SLSI_ERR(sdev, "slsi_handle_disconnect(%pM) failed with %d\n", ndev_vif->sta.sta_bss->bssid, r));
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ return 0;
+}
+
+/* This takes care to free the SKB on failure */
+int slsi_send_gratuitous_arp(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int ret = 0;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *arp;
+ struct ethhdr *ehdr;
+ static const u8 arp_hdr[] = { 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01 };
+ int arp_size = sizeof(arp_hdr) + ETH_ALEN + sizeof(ndev_vif->ipaddress) + ETH_ALEN + sizeof(ndev_vif->ipaddress);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "\n");
+
+ if (!ndev_vif->ipaddress)
+ return 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION))
+ return -EINVAL;
+ if (WARN_ON(ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED))
+ return -EINVAL;
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "IP:%pI4\n", &ndev_vif->ipaddress);
+
+ arp = slsi_alloc_skb_headroom(sizeof(struct ethhdr) + arp_size, GFP_KERNEL);
+ if (WARN_ON(!arp))
+ return -ENOMEM;
+
+ /* The Ethernet header is accessed in the stack. */
+ skb_reset_mac_header(arp);
+
+ /* Ethernet Header */
+ ehdr = (struct ethhdr *)skb_put(arp, sizeof(struct ethhdr));
+ memset(ehdr->h_dest, 0xFF, ETH_ALEN);
+ SLSI_ETHER_COPY(ehdr->h_source, dev->dev_addr);
+ ehdr->h_proto = cpu_to_be16(ETH_P_ARP);
+
+ /* Arp Data */
+ memcpy(skb_put(arp, sizeof(arp_hdr)), arp_hdr, sizeof(arp_hdr));
+ SLSI_ETHER_COPY(skb_put(arp, ETH_ALEN), dev->dev_addr);
+ memcpy(skb_put(arp, sizeof(ndev_vif->ipaddress)), &ndev_vif->ipaddress, sizeof(ndev_vif->ipaddress));
+ memset(skb_put(arp, ETH_ALEN), 0xFF, ETH_ALEN);
+ memcpy(skb_put(arp, sizeof(ndev_vif->ipaddress)), &ndev_vif->ipaddress, sizeof(ndev_vif->ipaddress));
+
+ arp->dev = dev;
+ arp->protocol = ETH_P_ARP;
+ arp->ip_summed = CHECKSUM_UNNECESSARY;
+ arp->queue_mapping = slsi_netif_get_peer_queue(0, 0); /* Queueset 0 AC 0 */
+
+ ret = slsi_tx_data(sdev, dev, arp);
+ if (ret)
+ slsi_kfree_skb(arp);
+
+ return ret;
+}
+
+const u8 addr_mask[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+const u8 solicited_node_addr_mask[6] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
+
+static void slsi_create_packet_filter_element(u8 filterid,
+ u8 pkt_filter_mode,
+ u8 num_pattern_desc,
+ struct slsi_mlme_pattern_desc *pattern_desc,
+ struct slsi_mlme_pkt_filter_elem *pkt_filter_elem,
+ u8 *pkt_filters_len)
+{
+ u8 pkt_filter_hdr[SLSI_PKT_FILTER_ELEM_HDR_LEN] = { 0xdd, /* vendor ie*/
+ 0x00, /*Length to be filled*/
+ 0x00, 0x16, 0x32, /*oui*/
+ 0x02,
+ filterid, /*filter id to be filled*/
+ pkt_filter_mode /* pkt filter mode to be filled */
+ };
+ u8 i, pattern_desc_len = 0;
+
+ WARN_ON(num_pattern_desc > SLSI_MAX_PATTERN_DESC);
+
+ memcpy(pkt_filter_elem->header, pkt_filter_hdr, SLSI_PKT_FILTER_ELEM_HDR_LEN);
+ pkt_filter_elem->num_pattern_desc = num_pattern_desc;
+
+ for (i = 0; i < num_pattern_desc; i++) {
+ memcpy(&pkt_filter_elem->pattern_desc[i], &pattern_desc[i], sizeof(struct slsi_mlme_pattern_desc));
+ pattern_desc_len += SLSI_PKT_DESC_FIXED_LEN + (2 * pattern_desc[i].mask_length);
+ }
+
+ /*Update the length in the header*/
+ pkt_filter_elem->header[1] = SLSI_PKT_FILTER_ELEM_FIXED_LEN + pattern_desc_len;
+ *pkt_filters_len += (SLSI_PKT_FILTER_ELEM_HDR_LEN + pattern_desc_len);
+
+ SLSI_DBG3_NODEV(SLSI_MLME, "filterid=0x%x,pkt_filter_mode=0x%x,num_pattern_desc=0x%x\n",
+ filterid, pkt_filter_mode, num_pattern_desc);
+}
+
+#define SLSI_SCREEN_OFF_FILTERS_COUNT 1
+
+static int slsi_set_common_packet_filters(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem[1];
+ u8 pkt_filters_len = 0, num_filters = 0;
+
+ /*Opt out all broadcast and multicast packets (filter on I/G bit)*/
+ pattern_desc.offset = 0;
+ pattern_desc.mask_length = 1;
+ pattern_desc.mask[0] = 0x01;
+ pattern_desc.pattern[0] = 0x01;
+
+ slsi_create_packet_filter_element(SLSI_ALL_BC_MC_FILTER_ID,
+ FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP | FAPI_PACKETFILTERMODE_OPT_OUT,
+ 1, &pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ return slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+}
+
+int slsi_set_arp_packet_filter(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc[SLSI_MAX_PATTERN_DESC];
+ int num_pattern_desc = 0;
+ u8 pkt_filters_len = 0, num_filters = 0;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem[2];
+ int ret;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION))
+ return -EINVAL;
+
+ if (WARN_ON(!peer))
+ return -EINVAL;
+
+ if (slsi_is_proxy_arp_supported_on_ap(peer->assoc_resp_ie))
+ return 0;
+
+ /*Set the IP address while suspending as this will be used by firmware for ARP/NDP offloading*/
+ slsi_mlme_set_ip_address(sdev, dev);
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ slsi_mlme_set_ipv6_address(sdev, dev);
+#endif
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Set ARP filter\n");
+
+ /*Opt in the broadcast ARP packets for Local IP address*/
+ num_pattern_desc = 0;
+ pattern_desc[num_pattern_desc].offset = 0; /*filtering on MAC destination Address*/
+ pattern_desc[num_pattern_desc].mask_length = ETH_ALEN;
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].mask, addr_mask);
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].pattern, addr_mask);
+ num_pattern_desc++;
+
+ /*filter on ethertype ARP*/
+ SET_ETHERTYPE_PATTERN_DESC(pattern_desc[num_pattern_desc], ETH_P_ARP);
+ num_pattern_desc++;
+
+ pattern_desc[num_pattern_desc].offset = 0x26; /*filtering on Target IP Address*/
+ pattern_desc[num_pattern_desc].mask_length = 4;
+ memcpy(pattern_desc[num_pattern_desc].mask, addr_mask, pattern_desc[num_pattern_desc].mask_length);
+ memcpy(pattern_desc[num_pattern_desc].pattern, &ndev_vif->ipaddress, pattern_desc[num_pattern_desc].mask_length);
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_LOCAL_ARP_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_IN,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+ if (ret)
+ return ret;
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ pkt_filters_len = 0;
+ num_filters = 0;
+
+ /*Opt in the multicast NS packets for Local IP address in active mode*/
+ num_pattern_desc = 0;
+ pattern_desc[num_pattern_desc].offset = 0; /*filtering on MAC destination Address*/
+ pattern_desc[num_pattern_desc].mask_length = ETH_ALEN;
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].mask, addr_mask);
+ memcpy(pattern_desc[num_pattern_desc].pattern, solicited_node_addr_mask, 3);
+ memcpy(&pattern_desc[num_pattern_desc].pattern[3], &ndev_vif->ipv6address.s6_addr[13], 3); /* last 3 bytes of IPv6 address*/
+ num_pattern_desc++;
+
+ /*filter on ethertype ARP*/
+ SET_ETHERTYPE_PATTERN_DESC(pattern_desc[num_pattern_desc], 0x86DD);
+ num_pattern_desc++;
+
+ pattern_desc[num_pattern_desc].offset = 0x14; /*filtering on next header*/
+ pattern_desc[num_pattern_desc].mask_length = 1;
+ pattern_desc[num_pattern_desc].mask[0] = 0xff;
+ pattern_desc[num_pattern_desc].pattern[0] = 0x3a;
+ num_pattern_desc++;
+
+ pattern_desc[num_pattern_desc].offset = 0x36; /*filtering on ICMP6 packet type*/
+ pattern_desc[num_pattern_desc].mask_length = 1;
+ pattern_desc[num_pattern_desc].mask[0] = 0xff;
+ pattern_desc[num_pattern_desc].pattern[0] = 0x87; /* Neighbor Solicitation type in ICMPv6 */
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_LOCAL_NS_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_IN,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+ if (ret)
+ return ret;
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+int slsi_set_enhanced_pkt_filter(struct net_device *dev, u8 pkt_filter_enable)
+{
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+ int ret = 0;
+ int is_suspend = 0;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ is_suspend = sdev->device_config.user_suspend_mode;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ if (is_suspend) {
+ SLSI_ERR(sdev, "Host is in early suspend state.\n");
+ return -EPERM; /* set_enhanced_pkt_filter should not be called after suspend */
+ }
+
+ sdev->enhanced_pkt_filter_enabled = pkt_filter_enable;
+ SLSI_INFO(sdev, "Enhanced packet filter is %s", (pkt_filter_enable ? "enabled" : "disabled"));
+ return ret;
+}
+
+static int slsi_set_opt_out_unicast_packet_filter(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc;
+ u8 pkt_filters_len = 0;
+ int ret = 0;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem;
+
+ /* IPv4 packet */
+ pattern_desc.offset = 0; /* destination mac address*/
+ pattern_desc.mask_length = ETH_ALEN;
+ memset(pattern_desc.mask, 0xff, ETH_ALEN);
+ memcpy(pattern_desc.pattern, sdev->hw_addr, ETH_ALEN);
+
+ slsi_create_packet_filter_element(SLSI_OPT_OUT_ALL_FILTER_ID,
+ FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP,
+ 1, &pattern_desc,
+ &pkt_filter_elem, &pkt_filters_len);
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, 1, &pkt_filter_elem);
+
+ return ret;
+}
+
+static int slsi_set_opt_in_tcp4_packet_filter(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc[2];
+ u8 pkt_filters_len = 0;
+ int ret = 0;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem;
+
+ /* IPv4 packet */
+ pattern_desc[0].offset = ETH_ALEN + ETH_ALEN; /* ethhdr->h_proto */
+ pattern_desc[0].mask_length = 2;
+ pattern_desc[0].mask[0] = 0xff; /* Big endian 0xffff */
+ pattern_desc[0].mask[1] = 0xff;
+ pattern_desc[0].pattern[0] = 0x08; /* Big endian 0x0800 */
+ pattern_desc[0].pattern[1] = 0x00;
+
+ /* dest.addr(6) + src.addr(6) + Protocol(2) = sizeof(struct ethhdr) = 14 */
+ /* VER(1) + Svc(1) + TotalLen(2) + ID(2) + Flag&Fragmentation(2) + TTL(1) = 9 */
+ pattern_desc[1].offset = 23; /* iphdr->protocol */
+ pattern_desc[1].mask_length = 1;
+ pattern_desc[1].mask[0] = 0xff;
+ pattern_desc[1].pattern[0] = IPPROTO_TCP; /* 0x11 */
+ slsi_create_packet_filter_element(SLSI_OPT_IN_TCP4_FILTER_ID,
+ FAPI_PACKETFILTERMODE_OPT_IN_SLEEP,
+ 2,
+ pattern_desc,
+ &pkt_filter_elem,
+ &pkt_filters_len);
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, 1, &pkt_filter_elem);
+
+ return ret;
+}
+
+static int slsi_set_opt_in_tcp6_packet_filter(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc[2];
+ u8 pkt_filters_len = 0;
+ int ret = 0;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem;
+
+ /* IPv6 packet */
+ pattern_desc[0].offset = ETH_ALEN + ETH_ALEN; /* ethhdr->h_proto */
+ pattern_desc[0].mask_length = 2;
+ pattern_desc[0].mask[0] = 0xff; /* Big endian 0xffff */
+ pattern_desc[0].mask[1] = 0xff;
+ pattern_desc[0].pattern[0] = 0x86; /* Big endian 0x86DD */
+ pattern_desc[0].pattern[1] = 0xdd;
+
+ pattern_desc[1].offset = sizeof(struct ethhdr) + 6; /*filtering on ipv6->next header*/
+ pattern_desc[1].mask_length = 1;
+ pattern_desc[1].mask[0] = 0xff;
+ pattern_desc[1].pattern[0] = IPPROTO_TCP;
+
+ slsi_create_packet_filter_element(SLSI_OPT_IN_TCP6_FILTER_ID,
+ FAPI_PACKETFILTERMODE_OPT_IN_SLEEP,
+ 2,
+ pattern_desc,
+ &pkt_filter_elem,
+ &pkt_filters_len);
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, 1, &pkt_filter_elem);
+
+ return ret;
+}
+#endif
+
+static int slsi_set_multicast_packet_filters(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc;
+ u8 pkt_filters_len = 0, i, num_filters = 0;
+ int ret = 0;
+ struct slsi_mlme_pkt_filter_elem *pkt_filter_elem = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 mc_filter_id, mc_filter_count;
+
+ /* Multicast packets for registered multicast addresses to be opted in on screen off*/
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Set mc filters ,count =%d\n", ndev_vif->sta.regd_mc_addr_count);
+
+ mc_filter_count = ndev_vif->sta.regd_mc_addr_count;
+ if (!mc_filter_count)
+ return 0;
+
+ pkt_filter_elem = kmalloc((mc_filter_count * sizeof(struct slsi_mlme_pkt_filter_elem)), GFP_KERNEL);
+ if (!pkt_filter_elem) {
+ SLSI_NET_ERR(dev, "ERROR Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ pattern_desc.offset = 0;
+ pattern_desc.mask_length = ETH_ALEN;
+ SLSI_ETHER_COPY(pattern_desc.mask, addr_mask);
+
+ for (i = 0; i < mc_filter_count; i++) {
+ SLSI_ETHER_COPY(pattern_desc.pattern, ndev_vif->sta.regd_mc_addr[i]);
+ mc_filter_id = SLSI_REGD_MC_FILTER_ID + i;
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ if (sdev->enhanced_pkt_filter_enabled)
+ slsi_create_packet_filter_element(mc_filter_id,
+ FAPI_PACKETFILTERMODE_OPT_IN,
+ 1, &pattern_desc,
+ &pkt_filter_elem[num_filters], &pkt_filters_len);
+ else
+#endif
+ slsi_create_packet_filter_element(mc_filter_id,
+ FAPI_PACKETFILTERMODE_OPT_IN |
+ FAPI_PACKETFILTERMODE_OPT_IN_SLEEP,
+ 1, &pattern_desc,
+ &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ }
+
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+ kfree(pkt_filter_elem);
+
+ return ret;
+}
+
+int slsi_clear_packet_filters(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+
+ u8 i, pkt_filters_len = 0;
+ int num_filters = 0;
+ int ret = 0;
+ struct slsi_mlme_pkt_filter_elem *pkt_filter_elem;
+ u8 mc_filter_id;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION))
+ return -EINVAL;
+
+ if (WARN_ON(!peer))
+ return -EINVAL;
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Clear filters on Screen on");
+
+ /*calculate number of filters*/
+ num_filters = ndev_vif->sta.regd_mc_addr_count + SLSI_SCREEN_OFF_FILTERS_COUNT;
+ if ((slsi_is_proxy_arp_supported_on_ap(peer->assoc_resp_ie)) == false) {
+ num_filters++;
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ num_filters++;
+#endif
+ }
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ if (sdev->enhanced_pkt_filter_enabled) {
+ num_filters++; /*All OPT OUT*/
+ num_filters++; /*TCP IPv4 OPT IN*/
+ num_filters++; /*TCP IPv6 OPT IN*/
+ }
+#endif
+
+ pkt_filter_elem = kmalloc((num_filters * sizeof(struct slsi_mlme_pkt_filter_elem)), GFP_KERNEL);
+ if (!pkt_filter_elem) {
+ SLSI_NET_ERR(dev, "ERROR Memory allocation failure");
+ return -ENOMEM;
+ }
+
+ num_filters = 0;
+ for (i = 0; i < ndev_vif->sta.regd_mc_addr_count; i++) {
+ mc_filter_id = SLSI_REGD_MC_FILTER_ID + i;
+ slsi_create_packet_filter_element(mc_filter_id, 0, 0, NULL, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ }
+ if ((slsi_is_proxy_arp_supported_on_ap(peer->assoc_resp_ie)) == false) {
+ slsi_create_packet_filter_element(SLSI_LOCAL_ARP_FILTER_ID, 0, 0, NULL, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ slsi_create_packet_filter_element(SLSI_LOCAL_NS_FILTER_ID, 0, 0, NULL, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+#endif
+ }
+
+ slsi_create_packet_filter_element(SLSI_ALL_BC_MC_FILTER_ID, 0, 0, NULL, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ if (sdev->enhanced_pkt_filter_enabled) {
+ slsi_create_packet_filter_element(SLSI_OPT_OUT_ALL_FILTER_ID, 0, 0, NULL,
+ &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ slsi_create_packet_filter_element(SLSI_OPT_IN_TCP4_FILTER_ID, 0, 0, NULL,
+ &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ slsi_create_packet_filter_element(SLSI_OPT_IN_TCP6_FILTER_ID, 0, 0, NULL,
+ &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ }
+#endif
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+ kfree(pkt_filter_elem);
+ return ret;
+}
+
+int slsi_update_packet_filters(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int ret = 0;
+
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION);
+
+ ret = slsi_set_multicast_packet_filters(sdev, dev);
+ if (ret)
+ return ret;
+
+ ret = slsi_set_arp_packet_filter(sdev, dev);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+ if (sdev->enhanced_pkt_filter_enabled) {
+ ret = slsi_set_opt_out_unicast_packet_filter(sdev, dev);
+ if (ret)
+ return ret;
+ ret = slsi_set_opt_in_tcp4_packet_filter(sdev, dev);
+ if (ret)
+ return ret;
+ ret = slsi_set_opt_in_tcp6_packet_filter(sdev, dev);
+ if (ret)
+ return ret;
+ }
+#endif
+ return slsi_set_common_packet_filters(sdev, dev);
+}
+
+#define IPV6_PF_PATTERN_MASK 0xf0
+#define IPV6_PF_PATTERN 0x60
+
+#ifdef CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+#define SLSI_ON_CONNECT_FILTERS_COUNT 2
+#else
+#define SLSI_ON_CONNECT_FILTERS_COUNT 3
+#endif
+
+void slsi_set_packet_filters(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct slsi_mlme_pattern_desc pattern_desc[SLSI_MAX_PATTERN_DESC];
+ int num_pattern_desc = 0;
+ u8 pkt_filters_len = 0;
+ int num_filters = 0;
+
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem[SLSI_ON_CONNECT_FILTERS_COUNT];
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ const u8 *ie;
+
+ if (WARN_ON(!ndev_vif->activated))
+ return;
+
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION))
+ return;
+
+ if (WARN_ON(!peer))
+ return;
+
+ if (WARN_ON(!peer->assoc_resp_ie))
+ return;
+
+#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
+
+ /*Opt out all IPv6 packets in active and suspended mode (ipv6 filtering)*/
+ num_pattern_desc = 0;
+ pattern_desc[num_pattern_desc].offset = 0x0E; /*filtering on IP Protocol version*/
+ pattern_desc[num_pattern_desc].mask_length = 1;
+ pattern_desc[num_pattern_desc].mask[0] = IPV6_PF_PATTERN_MASK;
+ pattern_desc[num_pattern_desc].pattern[0] = IPV6_PF_PATTERN;
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_ALL_IPV6_PKTS_FILTER_ID,
+ FAPI_PACKETFILTERMODE_OPT_OUT | FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+
+#endif
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, SLSI_WLAN_OUI_TYPE_WFA_HS20_IND,
+ ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+
+ if (ie) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Connected to HS2 AP ");
+
+ if (slsi_is_proxy_arp_supported_on_ap(peer->assoc_resp_ie)) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Proxy ARP service supported on HS2 AP ");
+
+ /* Opt out Gratuitous ARP packets (ARP Announcement) in active and suspended mode.
+ * For suspended mode, gratituous ARP is dropped by "opt out all broadcast" that will be
+ * set in slsi_set_common_packet_filters on screen off
+ */
+ num_pattern_desc = 0;
+ pattern_desc[num_pattern_desc].offset = 0; /*filtering on MAC destination Address*/
+ pattern_desc[num_pattern_desc].mask_length = ETH_ALEN;
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].mask, addr_mask);
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].pattern, addr_mask);
+ num_pattern_desc++;
+
+ SET_ETHERTYPE_PATTERN_DESC(pattern_desc[num_pattern_desc], ETH_P_ARP);
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_PROXY_ARP_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_OUT,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters],
+ &pkt_filters_len);
+ num_filters++;
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ /* Opt out unsolicited Neighbor Advertisement packets .For suspended mode, NA is dropped by
+ * "opt out all IPv6 multicast" already set in slsi_create_common_packet_filters
+ */
+
+ num_pattern_desc = 0;
+
+ pattern_desc[num_pattern_desc].offset = 0; /*filtering on MAC destination Address*/
+ pattern_desc[num_pattern_desc].mask_length = ETH_ALEN;
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].mask, addr_mask);
+ SLSI_ETHER_COPY(pattern_desc[num_pattern_desc].pattern, solicited_node_addr_mask);
+ num_pattern_desc++;
+
+ SET_ETHERTYPE_PATTERN_DESC(pattern_desc[num_pattern_desc], 0x86DD);
+ num_pattern_desc++;
+
+ pattern_desc[num_pattern_desc].offset = 0x14; /*filtering on next header*/
+ pattern_desc[num_pattern_desc].mask_length = 1;
+ pattern_desc[num_pattern_desc].mask[0] = 0xff;
+ pattern_desc[num_pattern_desc].pattern[0] = 0x3a;
+ num_pattern_desc++;
+
+ pattern_desc[num_pattern_desc].offset = 0x36; /*filtering on ICMP6 packet type*/
+ pattern_desc[num_pattern_desc].mask_length = 1;
+ pattern_desc[num_pattern_desc].mask[0] = 0xff;
+ pattern_desc[num_pattern_desc].pattern[0] = 0x88; /* Neighbor Advertisement type in ICMPv6 */
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_PROXY_ARP_NA_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_OUT,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters],
+ &pkt_filters_len);
+ num_filters++;
+#endif
+ }
+ }
+
+#ifndef CONFIG_SCSC_WLAN_DISABLE_NAT_KA
+ {
+ const u8 nat_ka_pattern[4] = { 0x11, 0x94, 0x00, 0x09 };
+ /*Opt out the NAT T for IPsec*/
+ num_pattern_desc = 0;
+ pattern_desc[num_pattern_desc].offset = 0x24; /*filtering on destination port number*/
+ pattern_desc[num_pattern_desc].mask_length = 4;
+ memcpy(pattern_desc[num_pattern_desc].mask, addr_mask, 4);
+ memcpy(pattern_desc[num_pattern_desc].pattern, nat_ka_pattern, 4);
+ num_pattern_desc++;
+
+ slsi_create_packet_filter_element(SLSI_NAT_IPSEC_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP,
+ num_pattern_desc, pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ }
+#endif
+
+ if (num_filters)
+ slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+}
+
+int slsi_ip_address_changed(struct slsi_dev *sdev, struct net_device *dev, __be32 ipaddress)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int ret = 0;
+
+ /* Store the IP address outside the check for vif being active
+ * as we get the same notification in case of static IP
+ */
+ if (ndev_vif->ipaddress != ipaddress)
+ ndev_vif->ipaddress = ipaddress;
+
+ if (ndev_vif->activated && (ndev_vif->vif_type == FAPI_VIFTYPE_AP)) {
+ struct slsi_mlme_pattern_desc pattern_desc[1];
+ u8 num_patterns = 0;
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem[1];
+ u8 pkt_filters_len = 0;
+ u8 num_filters = 0;
+
+ ndev_vif->ipaddress = ipaddress;
+ ret = slsi_mlme_set_ip_address(sdev, dev);
+ if (ret != 0)
+ SLSI_NET_ERR(dev, "slsi_mlme_set_ip_address ERROR. ret=%d", ret);
+
+ /* Opt out IPv6 packets in platform suspended mode */
+ pattern_desc[num_patterns].offset = 0x0E;
+ pattern_desc[num_patterns].mask_length = 0x01;
+ pattern_desc[num_patterns].mask[0] = IPV6_PF_PATTERN_MASK;
+ pattern_desc[num_patterns++].pattern[0] = IPV6_PF_PATTERN;
+
+ slsi_create_packet_filter_element(SLSI_AP_ALL_IPV6_PKTS_FILTER_ID, FAPI_PACKETFILTERMODE_OPT_OUT_SLEEP,
+ num_patterns, pattern_desc, &pkt_filter_elem[num_filters], &pkt_filters_len);
+ num_filters++;
+ ret = slsi_mlme_set_packet_filter(sdev, dev, pkt_filters_len, num_filters, pkt_filter_elem);
+ if (ret != 0)
+ SLSI_NET_ERR(dev, "slsi_mlme_set_packet_filter (return :%d) ERROR\n", ret);
+ } else if ((ndev_vif->activated) &&
+ (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+
+ if (WARN_ON(!peer))
+ return -EINVAL;
+
+ if (!(peer->capabilities & WLAN_CAPABILITY_PRIVACY) ||
+ (ndev_vif->sta.group_key_set && peer->pairwise_key_set))
+ slsi_send_gratuitous_arp(sdev, dev);
+ else
+ ndev_vif->sta.gratuitous_arp_needed = true;
+
+ slsi_mlme_powermgt(sdev, dev, ndev_vif->set_power_mode);
+ }
+
+ return ret;
+}
+
+#define SLSI_AP_AUTO_CHANLS_LIST_FROM_HOSTAPD_MAX 3
+
+int slsi_auto_chan_select_scan(struct slsi_dev *sdev, int n_channels, struct ieee80211_channel *channels[])
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct sk_buff_head unique_scan_results;
+ int scan_result_count[SLSI_AP_AUTO_CHANLS_LIST_FROM_HOSTAPD_MAX] = { 0, 0, 0 };
+ int i, j;
+ int r = 0;
+ int selected_index = 0;
+ int min_index = 0;
+ u32 freqdiff = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_WARN(sdev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ skb_queue_head_init(&unique_scan_results);
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN); /* use the main VIF */
+ if (!dev) {
+ r = -EINVAL;
+ return r;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ r = -EBUSY;
+ goto exit_with_vif;
+ }
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = true;
+ r = slsi_mlme_add_scan(sdev,
+ dev,
+ FAPI_SCANTYPE_AP_AUTO_CHANNEL_SELECTION,
+ FAPI_REPORTMODE_REAL_TIME,
+ 0, /* n_ssids */
+ NULL, /* ssids */
+ n_channels,
+ channels,
+ NULL,
+ NULL, /* ie */
+ 0, /* ie_len */
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan);
+
+ if (r == 0) {
+ struct sk_buff *unique_scan;
+ struct sk_buff *scan;
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ while (scan) {
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(scan);
+ struct ieee80211_channel *channel;
+
+ /* make sure this BSSID has not already been used */
+ skb_queue_walk(&unique_scan_results, unique_scan) {
+ struct ieee80211_mgmt *unique_mgmt = fapi_get_mgmt(unique_scan);
+
+ if (compare_ether_addr(mgmt->bssid, unique_mgmt->bssid) == 0) {
+ slsi_kfree_skb(scan);
+ goto next_scan;
+ }
+ }
+
+ slsi_skb_queue_head(&unique_scan_results, scan);
+
+ channel = slsi_find_scan_channel(sdev, mgmt, fapi_get_mgmtlen(scan), fapi_get_u16(scan, u.mlme_scan_ind.channel_frequency) / 2);
+ if (!channel)
+ goto next_scan;
+
+ /* check for interfering channels for 1, 6 and 11 */
+ for (i = 0, j = 0; i < SLSI_AP_AUTO_CHANLS_LIST_FROM_HOSTAPD_MAX && channels[j]; i++, j = j + 5) {
+ if (channel->center_freq == channels[j]->center_freq) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "exact match:%d\n", i);
+ scan_result_count[i] += 5;
+ goto next_scan;
+ }
+ freqdiff = abs((int)channel->center_freq - (channels[j]->center_freq));
+ if (freqdiff <= 20) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "overlapping:%d, freqdiff:%d\n", i, freqdiff);
+ scan_result_count[i] += (5 - (freqdiff / 5));
+ }
+ }
+
+next_scan:
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+
+ /* Select the channel to use */
+ for (i = 0, j = 0; i < SLSI_AP_AUTO_CHANLS_LIST_FROM_HOSTAPD_MAX; i++, j = j + 5) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "score[%d]:%d\n", i, scan_result_count[i]);
+ if (scan_result_count[i] <= scan_result_count[min_index]) {
+ min_index = i;
+ selected_index = j;
+ }
+ }
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "selected:%d with score:%d\n", selected_index, scan_result_count[min_index]);
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ sdev->device_config.ap_auto_chan = channels[selected_index]->hw_value & 0xFF;
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ SLSI_INFO(sdev, "Channel selected = %d", sdev->device_config.ap_auto_chan);
+ }
+ slsi_skb_queue_purge(&unique_scan_results);
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = false;
+
+exit_with_vif:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+int slsi_set_boost(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int error = 0;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ error = slsi_set_mib_rssi_boost(sdev, dev, SLSI_PSID_UNIFI_ROAM_RSSI_BOOST, 1,
+ sdev->device_config.rssi_boost_2g);
+ if (error)
+ SLSI_ERR(sdev, "Err setting boost value For 2g after adding vif. error = %d\n", error);
+ error = slsi_set_mib_rssi_boost(sdev, dev, SLSI_PSID_UNIFI_ROAM_RSSI_BOOST, 2,
+ sdev->device_config.rssi_boost_5g);
+ if (error)
+ SLSI_ERR(sdev, "Err setting boost value for 5g after adding vif . error = %d\n", error);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return error;
+}
+
+/**
+ * Work to be done when ROC retention duration expires:
+ * Send ROC expired event to cfg80211 and queue work to delete unsync vif after retention timeout.
+ */
+static void slsi_p2p_roc_duration_expiry_work(struct work_struct *work)
+{
+ struct netdev_vif *ndev_vif = container_of((struct delayed_work *)work, struct netdev_vif, unsync.roc_expiry_work);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* There can be a race condition of this work function waiting for ndev_vif->vif_mutex and meanwhile the vif is deleted (due to net_stop).
+ * In such cases ndev_vif->chan would have been cleared.
+ */
+ if (ndev_vif->sdev->p2p_state == P2P_IDLE_NO_VIF) {
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_CFG80211, "P2P unsync vif is not present\n");
+ goto exit;
+ }
+
+ SLSI_NET_DBG3(ndev_vif->wdev.netdev, SLSI_CFG80211, "Send ROC expired event\n");
+
+ /* If action frame tx is in progress don't schedule work to delete vif */
+ if (ndev_vif->sdev->p2p_state != P2P_ACTION_FRAME_TX_RX) {
+ /* After sucessful frame transmission, we will move to LISTENING or VIF ACTIVE state.
+ * Unset channel should not be sent down during p2p procedure.
+ */
+ if (!ndev_vif->drv_in_p2p_procedure) {
+ if (delayed_work_pending(&ndev_vif->unsync.unset_channel_expiry_work))
+ cancel_delayed_work(&ndev_vif->unsync.unset_channel_expiry_work);
+ queue_delayed_work(ndev_vif->sdev->device_wq, &ndev_vif->unsync.unset_channel_expiry_work,
+ msecs_to_jiffies(SLSI_P2P_UNSET_CHANNEL_EXTRA_MSEC));
+ }
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC);
+ SLSI_P2P_STATE_CHANGE(ndev_vif->sdev, P2P_IDLE_VIF_ACTIVE);
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_remain_on_channel_expired(&ndev_vif->wdev, ndev_vif->unsync.roc_cookie, ndev_vif->chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(ndev_vif->wdev.netdev, ndev_vif->unsync.roc_cookie,
+ ndev_vif->chan, ndev_vif->channel_type, GFP_KERNEL);
+#endif
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+/**
+ * Work to be done when unsync vif retention duration expires:
+ * Delete the unsync vif.
+ */
+static void slsi_p2p_unsync_vif_delete_work(struct work_struct *work)
+{
+ struct netdev_vif *ndev_vif = container_of((struct delayed_work *)work, struct netdev_vif, unsync.del_vif_work);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_CFG80211, "Delete vif duration expired - Deactivate unsync vif\n");
+ slsi_p2p_vif_deactivate(ndev_vif->sdev, ndev_vif->wdev.netdev, true);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+/**
+ * Work to be done after roc expiry or cancel remain on channel:
+ * Unset channel to be sent to Fw.
+ */
+static void slsi_p2p_unset_channel_expiry_work(struct work_struct *work)
+{
+ struct netdev_vif *ndev_vif = container_of((struct delayed_work *)work, struct netdev_vif,
+ unsync.unset_channel_expiry_work);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct net_device *dev = ndev_vif->wdev.netdev;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (ndev_vif->activated) {
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_CFG80211, "Unset channel expiry work-Send Unset Channel\n");
+ if (!ndev_vif->drv_in_p2p_procedure) {
+ /* Supplicant has stopped FIND/LISTEN. Clear Probe Response IEs in firmware and driver */
+ if (slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_RESPONSE, NULL, 0) != 0)
+ SLSI_NET_ERR(dev, "Clearing Probe Response IEs failed for unsync vif\n");
+ slsi_unsync_vif_set_probe_rsp_ie(ndev_vif, NULL, 0);
+
+ /* Send Unset Channel */
+ if (ndev_vif->driver_channel != 0) {
+ slsi_mlme_unset_channel_req(sdev, dev);
+ ndev_vif->driver_channel = 0;
+ }
+ }
+ } else {
+ SLSI_NET_ERR(dev, "P2P vif is not activated\n");
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+/* Initializations for P2P - Change vif type to unsync, create workqueue and init work */
+int slsi_p2p_init(struct slsi_dev *sdev, struct netdev_vif *ndev_vif)
+{
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Initialize P2P - Init P2P state to P2P_IDLE_NO_VIF\n");
+ sdev->p2p_state = P2P_IDLE_NO_VIF;
+ sdev->p2p_group_exp_frame = SLSI_P2P_PA_INVALID;
+
+ ndev_vif->vif_type = FAPI_VIFTYPE_UNSYNCHRONISED;
+ ndev_vif->unsync.slsi_p2p_continuous_fullscan = false;
+
+
+ INIT_DELAYED_WORK(&ndev_vif->unsync.roc_expiry_work, slsi_p2p_roc_duration_expiry_work);
+ INIT_DELAYED_WORK(&ndev_vif->unsync.del_vif_work, slsi_p2p_unsync_vif_delete_work);
+ INIT_DELAYED_WORK(&ndev_vif->unsync.unset_channel_expiry_work, slsi_p2p_unset_channel_expiry_work);
+ return 0;
+}
+
+/* De-initializations for P2P - Reset vif type, cancel work and destroy workqueue */
+void slsi_p2p_deinit(struct slsi_dev *sdev, struct netdev_vif *ndev_vif)
+{
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "De-initialize P2P\n");
+
+ ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
+
+ /* Work should have been cleaned up by now */
+ if (WARN_ON(delayed_work_pending(&ndev_vif->unsync.del_vif_work)))
+ cancel_delayed_work(&ndev_vif->unsync.del_vif_work);
+
+ if (WARN_ON(delayed_work_pending(&ndev_vif->unsync.roc_expiry_work)))
+ cancel_delayed_work(&ndev_vif->unsync.roc_expiry_work);
+}
+
+/**
+ * P2P vif activation:
+ * Add unsync vif, register for action frames, configure Probe Rsp IEs if required and set channel
+ */
+int slsi_p2p_vif_activate(struct slsi_dev *sdev, struct net_device *dev, struct ieee80211_channel *chan, u16 duration, bool set_probe_rsp_ies)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u32 af_bmap_active = SLSI_ACTION_FRAME_PUBLIC;
+ u32 af_bmap_suspended = SLSI_ACTION_FRAME_PUBLIC;
+ int r = 0;
+
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Activate P2P unsync vif\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ /* Interface address and device address are same for P2P unsync vif */
+ if (slsi_mlme_add_vif(sdev, dev, dev->dev_addr, dev->dev_addr) != 0) {
+ SLSI_NET_ERR(dev, "slsi_mlme_add_vif failed for unsync vif\n");
+ goto exit_with_error;
+ }
+
+ ndev_vif->activated = true;
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+
+ if (slsi_mlme_register_action_frame(sdev, dev, af_bmap_active, af_bmap_suspended) != 0) {
+ SLSI_NET_ERR(dev, "Action frame registration failed for unsync vif\n");
+ goto exit_with_vif;
+ }
+
+ if (set_probe_rsp_ies) {
+ u16 purpose = FAPI_PURPOSE_PROBE_RESPONSE;
+
+ if (!ndev_vif->unsync.probe_rsp_ies) {
+ SLSI_NET_ERR(dev, "Probe Response IEs not available for ROC\n");
+ goto exit_with_vif;
+ }
+
+ if (slsi_mlme_add_info_elements(sdev, dev, purpose, ndev_vif->unsync.probe_rsp_ies, ndev_vif->unsync.probe_rsp_ies_len) != 0) {
+ SLSI_NET_ERR(dev, "Setting Probe Response IEs for unsync vif failed\n");
+ goto exit_with_vif;
+ }
+ ndev_vif->unsync.ies_changed = false;
+ }
+
+ if (slsi_mlme_set_channel(sdev, dev, chan, SLSI_FW_CHANNEL_DURATION_UNSPECIFIED, 0, 0) != 0) {
+ SLSI_NET_ERR(dev, "Set channel failed for unsync vif\n");
+ goto exit_with_vif;
+ } else {
+ ndev_vif->chan = chan;
+ ndev_vif->driver_channel = chan->hw_value;
+ }
+
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ goto exit;
+
+exit_with_vif:
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+exit_with_error:
+ r = -EINVAL;
+exit:
+ return r;
+}
+
+/* Delete unsync vif - DON'T update the vif type */
+void slsi_p2p_vif_deactivate(struct slsi_dev *sdev, struct net_device *dev, bool hw_available)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "De-activate P2P unsync vif\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (sdev->p2p_state == P2P_IDLE_NO_VIF) {
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "P2P unsync vif already deactivated\n");
+ return;
+ }
+
+ /* Indicate failure using cfg80211_mgmt_tx_status() if frame TX is not completed during VIF delete */
+ if (ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID) {
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ cfg80211_mgmt_tx_status(&ndev_vif->wdev, ndev_vif->mgmt_tx_data.cookie, ndev_vif->mgmt_tx_data.buf, ndev_vif->mgmt_tx_data.buf_len, false, GFP_KERNEL);
+#else
+ cfg80211_mgmt_tx_status(dev, ndev_vif->mgmt_tx_data.cookie, ndev_vif->mgmt_tx_data.buf, ndev_vif->mgmt_tx_data.buf_len, false, GFP_KERNEL);
+#endif
+ }
+
+ cancel_delayed_work(&ndev_vif->unsync.del_vif_work);
+ cancel_delayed_work(&ndev_vif->unsync.roc_expiry_work);
+
+ if (hw_available)
+ slsi_mlme_del_vif(sdev, dev);
+
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_NO_VIF);
+
+ /* slsi_vif_deactivated is not used here after del_vif as it modifies vif type as well */
+
+ ndev_vif->activated = false;
+ ndev_vif->chan = NULL;
+
+ if (WARN_ON(ndev_vif->unsync.listen_offload))
+ ndev_vif->unsync.listen_offload = false;
+
+ slsi_unsync_vif_set_probe_rsp_ie(ndev_vif, NULL, 0);
+ (void)slsi_set_mgmt_tx_data(ndev_vif, 0, 0, NULL, 0);
+
+ SLSI_NET_DBG2(dev, SLSI_INIT_DEINIT, "P2P unsync vif deactivated\n");
+}
+
+/**
+ * Delete unsync vif when group role is being started.
+ * For such cases the net_device during the call would be of the group interface (called from ap_start/connect).
+ * Hence get the net_device using P2P Index. Take the mutex lock and call slsi_p2p_vif_deactivate.
+ */
+void slsi_p2p_group_start_remove_unsync_vif(struct slsi_dev *sdev)
+{
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Starting P2P Group - Remove unsync vif\n");
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2P);
+ if (!dev) {
+ SLSI_ERR(sdev, "Failed to deactivate p2p vif as dev is not found\n");
+ return;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ if (WARN_ON(!(SLSI_IS_P2P_UNSYNC_VIF(ndev_vif))))
+ return;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+/**
+ * Called only for P2P Device mode (p2p0 interface) to store the Probe Response IEs
+ * which would be used in Listen (ROC) state.
+ * If the IEs are received in Listen Offload mode, then configure the IEs in firmware.
+ */
+int slsi_p2p_dev_probe_rsp_ie(struct slsi_dev *sdev, struct net_device *dev, u8 *probe_rsp_ie, size_t probe_rsp_ie_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int ret = 0;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (!SLSI_IS_P2P_UNSYNC_VIF(ndev_vif)) {
+ SLSI_NET_ERR(dev, "Incorrect vif type - Not unsync vif\n");
+ kfree(probe_rsp_ie);
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Received Probe Rsp IE len = %zu, Current IE len = %zu\n", probe_rsp_ie_len, ndev_vif->unsync.probe_rsp_ies_len);
+
+ if (!ndev_vif->unsync.listen_offload) { /* ROC */
+ /* Store the IEs. Upon receiving it on subsequent occassions, store only if IEs have changed */
+ if (ndev_vif->unsync.probe_rsp_ies_len != probe_rsp_ie_len) /* Check if IE length changed */
+ ndev_vif->unsync.ies_changed = true;
+ else if (memcmp(ndev_vif->unsync.probe_rsp_ies, probe_rsp_ie, probe_rsp_ie_len) != 0) /* Check if IEs changed */
+ ndev_vif->unsync.ies_changed = true;
+ else { /* No change in IEs */
+ kfree(probe_rsp_ie);
+ goto exit;
+ }
+
+ slsi_unsync_vif_set_probe_rsp_ie(ndev_vif, probe_rsp_ie, probe_rsp_ie_len);
+ } else { /* P2P Listen Offloading */
+ if (sdev->p2p_state == P2P_LISTENING) {
+ ret = slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_RESPONSE, probe_rsp_ie, probe_rsp_ie_len);
+ if (ret != 0) {
+ SLSI_NET_ERR(dev, "Listen Offloading: Setting Probe Response IEs for unsync vif failed\n");
+ ndev_vif->unsync.listen_offload = false;
+ slsi_p2p_vif_deactivate(sdev, dev, true);
+ }
+ }
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return ret;
+}
+
+/**
+ * This should be called only for P2P Device mode (p2p0 interface). NULL IEs to clear Probe Response IEs are not updated
+ * in driver to avoid configuring the Probe Response IEs to firmware on every ROC.
+ * Use this call as a cue to stop any ongoing P2P scan as there is no API from user space for cancelling scan.
+ * If ROC was in progress as part of P2P_FIND then Cancel ROC will be received.
+ */
+int slsi_p2p_dev_null_ies(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct cfg80211_scan_info info = {.aborted = true};
+
+ if (!SLSI_IS_P2P_UNSYNC_VIF(ndev_vif)) {
+ SLSI_NET_ERR(dev, "Incorrect vif type - Not unsync vif\n");
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Probe Rsp NULL IEs\n");
+
+ if (sdev->p2p_state == P2P_SCANNING) {
+ struct sk_buff *scan_result;
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Stop Find - Abort ongoing P2P scan\n");
+
+ (void)slsi_mlme_del_scan(sdev, dev, ((ndev_vif->ifnum << 8) | SLSI_SCAN_HW_ID), false);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan_result = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ while (scan_result) {
+ slsi_rx_scan_pass_to_cfg80211(sdev, dev, scan_result);
+ scan_result = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+
+ WARN_ON(!ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, &info);
+#else
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, true);
+#endif
+
+ ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req = NULL;
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+
+ if (ndev_vif->activated) {
+ /* Supplicant has stopped FIND. Also clear Probe Response IEs in firmware and driver
+ * as Cancel ROC will not be sent as driver was not in Listen
+ */
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Stop Find - Clear Probe Response IEs in firmware\n");
+ if (slsi_mlme_add_info_elements(sdev, dev, FAPI_PURPOSE_PROBE_RESPONSE, NULL, 0) != 0)
+ SLSI_NET_ERR(dev, "Clearing Probe Response IEs failed for unsync vif\n");
+ slsi_unsync_vif_set_probe_rsp_ie(ndev_vif, NULL, 0);
+
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+ } else {
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_NO_VIF);
+ }
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+
+/**
+ * Returns the P2P public action frame subtype.
+ * Returns SLSI_P2P_PA_INVALID if it is not a P2P public action frame.
+ */
+int slsi_p2p_get_public_action_subtype(const struct ieee80211_mgmt *mgmt)
+{
+ int subtype = SLSI_P2P_PA_INVALID;
+ /* Vendor specific Public Action (0x09), P2P OUI (0x50, 0x6f, 0x9a), P2P Subtype (0x09) */
+ u8 p2p_pa_frame[5] = { 0x09, 0x50, 0x6f, 0x9a, 0x09 };
+ u8 *action = (u8 *)&mgmt->u.action.u;
+
+ if (memcmp(&action[0], p2p_pa_frame, 5) == 0) {
+ subtype = action[5];
+ } else {
+ /* For service discovery action frames dummy subtype is used */
+ switch (action[0]) {
+ case SLSI_PA_GAS_INITIAL_REQ:
+ case SLSI_PA_GAS_INITIAL_RSP:
+ case SLSI_PA_GAS_COMEBACK_REQ:
+ case SLSI_PA_GAS_COMEBACK_RSP:
+ subtype = (action[0] | SLSI_PA_GAS_DUMMY_SUBTYPE_MASK);
+ break;
+ }
+ }
+
+ return subtype;
+}
+
+/**
+ * Returns the P2P status code of Status attribute of the GO Neg Rsp frame.
+ * Returns -1 if status attribute is NOT found.
+ */
+int slsi_p2p_get_go_neg_rsp_status(struct net_device *dev, const struct ieee80211_mgmt *mgmt)
+{
+ int status = -1;
+ u8 p2p_oui_type[4] = { 0x50, 0x6f, 0x9a, 0x09 };
+ u8 *action = (u8 *)&mgmt->u.action.u;
+ u8 *vendor_ie = &action[7]; /* 1 (0x09), 4 (0x50, 0x6f, 0x9a, 0x09), 1 (0x01), 1 (Dialog Token) */
+ u8 ie_length, elem_idx;
+ u16 attr_length;
+
+ while (vendor_ie && (*vendor_ie == SLSI_WLAN_EID_VENDOR_SPECIFIC)) {
+ ie_length = vendor_ie[1];
+
+ if (memcmp(&vendor_ie[2], p2p_oui_type, 4) == 0) {
+ elem_idx = 6; /* 1 (Id - 0xdd) + 1 (Length) + 4 (OUI and Type) */
+
+ while (ie_length > elem_idx) {
+ attr_length = ((vendor_ie[elem_idx + 1]) | (vendor_ie[elem_idx + 2] << 8));
+
+ if (vendor_ie[elem_idx] == SLSI_P2P_STATUS_ATTR_ID) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Status Attribute Found, attr_length = %d, value (%u %u %u %u)\n",
+ attr_length, vendor_ie[elem_idx], vendor_ie[elem_idx + 1], vendor_ie[elem_idx + 2], vendor_ie[elem_idx + 3]);
+ status = vendor_ie[elem_idx + 3];
+ break;
+ }
+ elem_idx += 3 + attr_length;
+ }
+
+ break;
+ }
+ vendor_ie += 2 + ie_length;
+ }
+
+ SLSI_UNUSED_PARAMETER(dev);
+
+ return status;
+}
+
+/**
+ * Returns the next expected P2P public action frame subtype for input subtype.
+ * Returns SLSI_P2P_PA_INVALID if no frame is expected.
+ */
+u8 slsi_p2p_get_exp_peer_frame_subtype(u8 subtype)
+{
+ switch (subtype) {
+ /* Peer response is expected for following frames */
+ case SLSI_P2P_PA_GO_NEG_REQ:
+ case SLSI_P2P_PA_GO_NEG_RSP:
+ case SLSI_P2P_PA_INV_REQ:
+ case SLSI_P2P_PA_DEV_DISC_REQ:
+ case SLSI_P2P_PA_PROV_DISC_REQ:
+ case SLSI_PA_GAS_INITIAL_REQ_SUBTYPE:
+ case SLSI_PA_GAS_COMEBACK_REQ_SUBTYPE:
+ return subtype + 1;
+ default:
+ return SLSI_P2P_PA_INVALID;
+ }
+}
+
+void slsi_wlan_dump_public_action_subtype(struct slsi_dev *sdev, struct ieee80211_mgmt *mgmt, bool tx)
+{
+ u8 action_code = ((u8 *)&mgmt->u.action.u)[0];
+ u8 action_category = mgmt->u.action.category;
+ char *tx_rx_string = "Received";
+ char wnm_action_fields[28][35] = { "Event Request", "Event Report", "Diagnostic Request",
+ "Diagnostic Report", "Location Configuration Request",
+ "Location Configuration Response", "BSS Transition Management Query",
+ "BSS Transition Management Request",
+ "BSS Transition Management Response", "FMS Request", "FMS Response",
+ "Collocated Interference Request", "Collocated Interference Report",
+ "TFS Request", "TFS Response", "TFS Notify", "WNM Sleep Mode Request",
+ "WNM Sleep Mode Response", "TIM Broadcast Request",
+ "TIM Broadcast Response", "QoS Traffic Capability Update",
+ "Channel Usage Request", "Channel Usage Response", "DMS Request",
+ "DMS Response", "Timing Measurement Request",
+ "WNM Notification Request", "WNM Notification Response" };
+
+ if (tx)
+ tx_rx_string = "Send";
+
+ switch (action_category) {
+ case WLAN_CATEGORY_RADIO_MEASUREMENT:
+ switch (action_code) {
+ case SLSI_RM_RADIO_MEASUREMENT_REQ:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Radio Measurement Req)\n", tx_rx_string);
+ break;
+ case SLSI_RM_RADIO_MEASUREMENT_REP:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Radio Measurement Rep)\n", tx_rx_string);
+ break;
+ case SLSI_RM_LINK_MEASUREMENT_REQ:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Link Measurement Req)\n", tx_rx_string);
+ break;
+ case SLSI_RM_LINK_MEASUREMENT_REP:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Link Measurement Rep)\n", tx_rx_string);
+ break;
+ case SLSI_RM_NEIGH_REP_REQ:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Neighbor Report Req)\n", tx_rx_string);
+ break;
+ case SLSI_RM_NEIGH_REP_RSP:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Neighbor Report Resp)\n", tx_rx_string);
+ break;
+ default:
+ SLSI_INFO(sdev, "%s Radio Measurement Frame (Reserved)\n", tx_rx_string);
+ }
+ break;
+ case WLAN_CATEGORY_PUBLIC:
+ switch (action_code) {
+ case SLSI_PA_GAS_INITIAL_REQ:
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "%s: GAS Initial Request\n", tx ? "TX" : "RX");
+ break;
+ case SLSI_PA_GAS_INITIAL_RSP:
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "%s: GAS Initial Response\n", tx ? "TX" : "RX");
+ break;
+ case SLSI_PA_GAS_COMEBACK_REQ:
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "%s: GAS Comeback Request\n", tx ? "TX" : "RX");
+ break;
+ case SLSI_PA_GAS_COMEBACK_RSP:
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "%s: GAS Comeback Response\n", tx ? "TX" : "RX");
+ break;
+ default:
+ SLSI_DBG1_NODEV(SLSI_CFG80211, "Unknown GAS Frame : %d\n", action_code);
+ }
+ break;
+ case WLAN_CATEGORY_WNM:
+ if (action_code >= SLSI_WNM_ACTION_FIELD_MIN && action_code <= SLSI_WNM_ACTION_FIELD_MAX)
+ SLSI_INFO(sdev, "%s WNM Frame (%s)\n", tx_rx_string, wnm_action_fields[action_code]);
+ else
+ SLSI_INFO(sdev, "%s WNM Frame (Reserved)\n", tx_rx_string);
+ break;
+ }
+}
+
+void slsi_abort_sta_scan(struct slsi_dev *sdev)
+{
+ struct net_device *wlan_net_dev = NULL;
+ struct netdev_vif *ndev_vif;
+ struct cfg80211_scan_info info = {.aborted = true};
+
+ wlan_net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+
+ if (!wlan_net_dev) {
+ SLSI_ERR(sdev, "Dev not found\n");
+ return;
+ }
+
+ ndev_vif = netdev_priv(wlan_net_dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ struct sk_buff *scan_result;
+
+ SLSI_DBG2(sdev, SLSI_CFG80211, "Abort ongoing WLAN scan\n");
+ (void)slsi_mlme_del_scan(sdev, wlan_net_dev, ((ndev_vif->ifnum << 8) | SLSI_SCAN_HW_ID), false);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan_result = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ while (scan_result) {
+ slsi_rx_scan_pass_to_cfg80211(sdev, wlan_net_dev, scan_result);
+ scan_result = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, &info);
+#else
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, true);
+#endif
+
+ ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req = NULL;
+ ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+/**
+ * Returns a slsi_dhcp_tx enum value after verifying whether the 802.11 packet in skb
+ * is a DHCP packet (identified by UDP port numbers)
+ */
+int slsi_is_dhcp_packet(u8 *data)
+{
+ u8 *p;
+ int ret = SLSI_TX_IS_NOT_DHCP;
+
+ p = data + SLSI_IP_TYPE_OFFSET;
+
+ if (*p == SLSI_IP_TYPE_UDP) {
+ u16 source_port, dest_port;
+
+ p = data + SLSI_IP_SOURCE_PORT_OFFSET;
+ source_port = p[0] << 8 | p[1];
+ p = data + SLSI_IP_DEST_PORT_OFFSET;
+ dest_port = p[0] << 8 | p[1];
+ if ((source_port == SLSI_DHCP_CLIENT_PORT) && (dest_port == SLSI_DHCP_SERVER_PORT))
+ ret = SLSI_TX_IS_DHCP_CLIENT;
+ else if ((source_port == SLSI_DHCP_SERVER_PORT) && (dest_port == SLSI_DHCP_CLIENT_PORT))
+ ret = SLSI_TX_IS_DHCP_SERVER;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
+int slsi_is_tcp_sync_packet(struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
+ * the IP header and TCP header are not set; so return 0
+ */
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (compare_ether_addr(eth_hdr(skb)->h_source, dev->dev_addr) != 0))
+ return 0;
+ if (be16_to_cpu(eth_hdr(skb)->h_proto) != ETH_P_IP)
+ return 0;
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP)
+ return 0;
+ if (!skb_transport_header_was_set(skb))
+ return 0;
+ if (tcp_hdr(skb)->syn)
+ return 1;
+
+ return 0;
+}
+
+int slsi_is_dns_packet(u8 *data)
+{
+ u8 *p;
+
+ p = data + SLSI_IP_TYPE_OFFSET;
+
+ if (*p == SLSI_IP_TYPE_UDP) {
+ u16 dest_port;
+
+ p = data + SLSI_IP_DEST_PORT_OFFSET;
+ dest_port = p[0] << 8 | p[1];
+ if (dest_port == SLSI_DNS_DEST_PORT) /* 0x0035 */
+ return 1;
+ }
+
+ return 0;
+}
+
+int slsi_is_mdns_packet(u8 *data)
+{
+ u8 *p;
+
+ p = data + SLSI_IP_TYPE_OFFSET;
+
+ if (*p == SLSI_IP_TYPE_UDP) {
+ u16 dest_port;
+
+ p = data + SLSI_IP_DEST_PORT_OFFSET;
+ dest_port = p[0] << 8 | p[1];
+ if (dest_port == SLSI_MDNS_DEST_PORT)
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int slsi_ap_prepare_add_info_ies(struct netdev_vif *ndev_vif, const u8 *ies, size_t ies_len)
+{
+ const u8 *wps_p2p_ies = NULL;
+ size_t wps_p2p_ie_len = 0;
+
+ /* The ies may contain Extended Capability followed by WPS IE. The Extended capability IE needs to be excluded. */
+ wps_p2p_ies = cfg80211_find_ie(SLSI_WLAN_EID_VENDOR_SPECIFIC, ies, ies_len);
+ if (wps_p2p_ies) {
+ size_t temp_len = wps_p2p_ies - ies;
+
+ wps_p2p_ie_len = ies_len - temp_len;
+ }
+
+ SLSI_NET_DBG2(ndev_vif->wdev.netdev, SLSI_MLME, "WPA IE len = %zu, WMM IE len = %zu, IEs len = %zu, WPS_P2P IEs len = %zu\n",
+ ndev_vif->ap.wpa_ie_len, ndev_vif->ap.wmm_ie_len, ies_len, wps_p2p_ie_len);
+
+ ndev_vif->ap.add_info_ies_len = ndev_vif->ap.wpa_ie_len + ndev_vif->ap.wmm_ie_len + wps_p2p_ie_len;
+ ndev_vif->ap.add_info_ies = kmalloc(ndev_vif->ap.add_info_ies_len, GFP_KERNEL); /* Caller needs to free this */
+
+ if (!ndev_vif->ap.add_info_ies) {
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_MLME, "Failed to allocate memory for IEs\n");
+ ndev_vif->ap.add_info_ies_len = 0;
+ return -ENOMEM;
+ }
+
+ if (ndev_vif->ap.cache_wpa_ie) {
+ memcpy(ndev_vif->ap.add_info_ies, ndev_vif->ap.cache_wpa_ie, ndev_vif->ap.wpa_ie_len);
+ ndev_vif->ap.add_info_ies += ndev_vif->ap.wpa_ie_len;
+ }
+
+ if (ndev_vif->ap.cache_wmm_ie) {
+ memcpy(ndev_vif->ap.add_info_ies, ndev_vif->ap.cache_wmm_ie, ndev_vif->ap.wmm_ie_len);
+ ndev_vif->ap.add_info_ies += ndev_vif->ap.wmm_ie_len;
+ }
+
+ if (wps_p2p_ies) {
+ memcpy(ndev_vif->ap.add_info_ies, wps_p2p_ies, wps_p2p_ie_len);
+ ndev_vif->ap.add_info_ies += wps_p2p_ie_len;
+ }
+
+ ndev_vif->ap.add_info_ies -= ndev_vif->ap.add_info_ies_len;
+
+ return 0;
+}
+
+/* Set the correct bit in the channel sets */
+static void slsi_roam_channel_cache_add_channel(struct slsi_roaming_network_map_entry *network_map, u8 channel)
+{
+ if (channel <= 14)
+ network_map->channels_24_ghz |= (1 << channel);
+ else if (channel >= 36 && channel <= 64) /* Uni1 */
+ network_map->channels_5_ghz |= (1 << ((channel - 36) / 4));
+ else if (channel >= 100 && channel <= 140) /* Uni2 */
+ network_map->channels_5_ghz |= (1 << (8 + ((channel - 100) / 4)));
+ else if (channel >= 149 && channel <= 165) /* Uni3 */
+ network_map->channels_5_ghz |= (1 << (24 + ((channel - 149) / 4)));
+}
+
+void slsi_roam_channel_cache_add_entry(struct slsi_dev *sdev, struct net_device *dev, const u8 *ssid, const u8 *bssid, u8 channel)
+{
+ struct list_head *pos;
+ int found = 0;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ list_for_each(pos, &ndev_vif->sta.network_map) {
+ struct slsi_roaming_network_map_entry *network_map = list_entry(pos, struct slsi_roaming_network_map_entry, list);
+
+ if (network_map->ssid.ssid_len == ssid[1] &&
+ memcmp(network_map->ssid.ssid, &ssid[2], ssid[1]) == 0) {
+ found = 1;
+ network_map->last_seen_jiffies = jiffies;
+ if (network_map->only_one_ap_seen && memcmp(network_map->initial_bssid, bssid, ETH_ALEN) != 0)
+ network_map->only_one_ap_seen = false;
+ slsi_roam_channel_cache_add_channel(network_map, channel);
+ break;
+ }
+ }
+ if (!found) {
+ struct slsi_roaming_network_map_entry *network_map;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "New Entry : Channel: %d : %.*s\n", channel, ssid[1], &ssid[2]);
+ network_map = kmalloc(sizeof(*network_map), GFP_ATOMIC);
+ if (network_map) {
+ network_map->ssid.ssid_len = ssid[1];
+ memcpy(network_map->ssid.ssid, &ssid[2], ssid[1]);
+ network_map->channels_24_ghz = 0;
+ network_map->channels_5_ghz = 0;
+ network_map->last_seen_jiffies = jiffies;
+ SLSI_ETHER_COPY(network_map->initial_bssid, bssid);
+ network_map->only_one_ap_seen = true;
+ slsi_roam_channel_cache_add_channel(network_map, channel);
+ list_add(&network_map->list, &ndev_vif->sta.network_map);
+ } else {
+ SLSI_ERR(sdev, "New Entry : %.*s kmalloc() failed\n", ssid[1], &ssid[2]);
+ }
+ }
+}
+
+void slsi_roam_channel_cache_add(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ size_t mgmt_len = fapi_get_mgmtlen(skb);
+ int ielen = mgmt_len - (mgmt->u.beacon.variable - (u8 *)mgmt);
+ u32 freq = fapi_get_u16(skb, u.mlme_scan_ind.channel_frequency) / 2;
+ const u8 *scan_ds = cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable, ielen);
+ const u8 *scan_ht = cfg80211_find_ie(WLAN_EID_HT_OPERATION, mgmt->u.beacon.variable, ielen);
+ const u8 *scan_ssid = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable, ielen);
+ u8 chan = 0;
+
+ /* Use the DS or HT channel as the Offchannel results mean the RX freq is not reliable */
+ if (scan_ds)
+ chan = scan_ds[2];
+ else if (scan_ht)
+ chan = scan_ht[2];
+ else
+ chan = ieee80211_frequency_to_channel(freq);
+
+ if (chan) {
+ enum nl80211_band band = NL80211_BAND_2GHZ;
+
+ if (chan > 14)
+ band = NL80211_BAND_5GHZ;
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (freq != (u32)ieee80211_channel_to_frequency(chan, band)) {
+ if (band == NL80211_BAND_5GHZ && freq < 3000)
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Off Band Result : mlme_scan_ind(freq:%d) != DS(freq:%d)\n", freq, ieee80211_channel_to_frequency(chan, band));
+
+ if (band == NL80211_BAND_2GHZ && freq > 3000)
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Off Band Result : mlme_scan_ind(freq:%d) != DS(freq:%d)\n", freq, ieee80211_channel_to_frequency(chan, band));
+ }
+#endif
+ }
+
+ if (!scan_ssid || !scan_ssid[1] || scan_ssid[1] > 32) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "SSID not defined : Could not find SSID ie or Hidden\n");
+ return;
+ }
+
+ slsi_roam_channel_cache_add_entry(sdev, dev, scan_ssid, mgmt->bssid, chan);
+}
+
+void slsi_roam_channel_cache_prune(struct net_device *dev, int seconds)
+{
+ struct slsi_roaming_network_map_entry *network_map;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct list_head *pos, *q;
+ unsigned long now = jiffies;
+ unsigned long age;
+
+ list_for_each_safe(pos, q, &ndev_vif->sta.network_map) {
+ network_map = list_entry(pos, struct slsi_roaming_network_map_entry, list);
+ age = (now - network_map->last_seen_jiffies) / HZ;
+
+ if (time_after_eq(now, network_map->last_seen_jiffies + (seconds * HZ))) {
+ list_del(pos);
+ kfree(network_map);
+ }
+ }
+}
+
+int slsi_roam_channel_cache_get_channels_int(struct net_device *dev, struct slsi_roaming_network_map_entry *network_map, u8 *channels)
+{
+ int index = 0;
+ int i;
+
+ SLSI_UNUSED_PARAMETER(dev);
+
+ /* 2.4 Ghz Channels */
+ for (i = 1; i <= 14; i++)
+ if (network_map->channels_24_ghz & (1 << i)) {
+ channels[index] = i;
+ index++;
+ }
+
+ /* 5 Ghz Uni1 Channels */
+ for (i = 36; i <= 64; i += 4)
+ if (network_map->channels_5_ghz & (1 << ((i - 36) / 4))) {
+ channels[index] = i;
+ index++;
+ }
+
+ /* 5 Ghz Uni2 Channels */
+ for (i = 100; i <= 140; i += 4)
+ if (network_map->channels_5_ghz & (1 << (8 + ((i - 100) / 4)))) {
+ channels[index] = i;
+ index++;
+ }
+
+ /* 5 Ghz Uni3 Channels */
+ for (i = 149; i <= 165; i += 4)
+ if (network_map->channels_5_ghz & (1 << (24 + ((i - 149) / 4)))) {
+ channels[index] = i;
+ index++;
+ }
+ return index;
+}
+
+static struct slsi_roaming_network_map_entry *slsi_roam_channel_cache_get(struct net_device *dev, const u8 *ssid)
+{
+ struct slsi_roaming_network_map_entry *network_map = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct list_head *pos;
+
+ if (WARN_ON(!ssid))
+ return NULL;
+
+ list_for_each(pos, &ndev_vif->sta.network_map) {
+ network_map = list_entry(pos, struct slsi_roaming_network_map_entry, list);
+ if (network_map->ssid.ssid_len == ssid[1] &&
+ memcmp(network_map->ssid.ssid, &ssid[2], ssid[1]) == 0)
+ break;
+ }
+ return network_map;
+}
+
+u32 slsi_roam_channel_cache_get_channels(struct net_device *dev, const u8 *ssid, u8 *channels)
+{
+ u32 channels_count = 0;
+ struct slsi_roaming_network_map_entry *network_map;
+
+ network_map = slsi_roam_channel_cache_get(dev, ssid);
+ if (network_map)
+ channels_count = slsi_roam_channel_cache_get_channels_int(dev, network_map, channels);
+
+ return channels_count;
+}
+
+static bool slsi_roam_channel_cache_single_ap(struct net_device *dev, const u8 *ssid)
+{
+ bool only_one_ap_seen = true;
+ struct slsi_roaming_network_map_entry *network_map;
+
+ network_map = slsi_roam_channel_cache_get(dev, ssid);
+ if (network_map)
+ only_one_ap_seen = network_map->only_one_ap_seen;
+
+ return only_one_ap_seen;
+}
+
+int slsi_roaming_scan_configure_channels(struct slsi_dev *sdev, struct net_device *dev, const u8 *ssid, u8 *channels)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u32 cached_channels_count;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ WARN_ON(!ndev_vif->activated);
+ WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION);
+
+ cached_channels_count = slsi_roam_channel_cache_get_channels(dev, ssid, channels);
+ if (slsi_roam_channel_cache_single_ap(dev, ssid)) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "Skip Roaming Scan for Single AP %.*s\n", ssid[1], &ssid[2]);
+ return 0;
+ }
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "Roaming Scan Channels. %d cached\n", cached_channels_count);
+
+ return cached_channels_count;
+}
+
+int slsi_send_acs_event(struct slsi_dev *sdev, struct slsi_acs_selected_channels acs_selected_channels)
+{
+ struct sk_buff *skb = NULL;
+ u8 err = 0;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_ACS_EVENT, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_ACS_EVENT, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for VENDOR ACS event\n");
+ return -ENOMEM;
+ }
+ err |= nla_put_u8(skb, SLSI_ACS_ATTR_PRIMARY_CHANNEL, acs_selected_channels.pri_channel);
+ err |= nla_put_u8(skb, SLSI_ACS_ATTR_SECONDARY_CHANNEL, acs_selected_channels.sec_channel);
+ err |= nla_put_u8(skb, SLSI_ACS_ATTR_VHT_SEG0_CENTER_CHANNEL, acs_selected_channels.vht_seg0_center_ch);
+ err |= nla_put_u8(skb, SLSI_ACS_ATTR_VHT_SEG1_CENTER_CHANNEL, acs_selected_channels.vht_seg1_center_ch);
+ err |= nla_put_u16(skb, SLSI_ACS_ATTR_CHWIDTH, acs_selected_channels.ch_width);
+ err |= nla_put_u8(skb, SLSI_ACS_ATTR_HW_MODE, acs_selected_channels.hw_mode);
+ SLSI_DBG3(sdev, SLSI_MLME, "pri_channel=%d,sec_channel=%d,vht_seg0_center_ch=%d,"
+ "vht_seg1_center_ch=%d, ch_width=%d, hw_mode=%d\n",
+ acs_selected_channels.pri_channel, acs_selected_channels.sec_channel,
+ acs_selected_channels.vht_seg0_center_ch, acs_selected_channels.vht_seg1_center_ch,
+ acs_selected_channels.ch_width, acs_selected_channels.hw_mode);
+ if (err) {
+ SLSI_ERR_NODEV("Failed nla_put err=%d\n", err);
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ SLSI_INFO(sdev, "Event: SLSI_NL80211_VENDOR_ACS_EVENT(%d)\n", SLSI_NL80211_VENDOR_ACS_EVENT);
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+int slsi_is_wes_action_frame(const struct ieee80211_mgmt *mgmt)
+{
+ int r = 0;
+ /* Vendor specific Action (0x7f), SAMSUNG OUI (0x00, 0x00, 0xf0) */
+ u8 wes_vs_action_frame[4] = { 0x7f, 0x00, 0x00, 0xf0 };
+ u8 *action = (u8 *)&mgmt->u.action;
+
+ if (memcmp(action, wes_vs_action_frame, 4) == 0)
+ r = 1;
+
+ return r;
+}
+#endif
+
+static u32 slsi_remap_reg_rule_flags(u8 flags)
+{
+ u32 remapped_flags = 0;
+
+ if (flags & SLSI_REGULATORY_DFS)
+ remapped_flags |= NL80211_RRF_DFS;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (flags & SLSI_REGULATORY_NO_OFDM)
+ remapped_flags |= NL80211_RRF_NO_OFDM;
+#endif
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 13, 0))
+ if (flags & SLSI_REGULATORY_NO_IR)
+ remapped_flags |= NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS;
+#endif
+ if (flags & SLSI_REGULATORY_NO_INDOOR)
+ remapped_flags |= NL80211_RRF_NO_INDOOR;
+ if (flags & SLSI_REGULATORY_NO_OUTDOOR)
+ remapped_flags |= NL80211_RRF_NO_OUTDOOR;
+
+ return remapped_flags;
+}
+
+static void slsi_reg_mib_to_regd(struct slsi_mib_data *mib, struct slsi_802_11d_reg_domain *domain_info)
+{
+ int i = 0;
+ int num_rules = 0;
+ u16 freq;
+ u8 byte_val;
+ struct ieee80211_reg_rule *reg_rule;
+
+ domain_info->regdomain->alpha2[0] = *(u8 *)(&mib->data[i]);
+ i++;
+
+ domain_info->regdomain->alpha2[1] = *(u8 *)(&mib->data[i]);
+ i++;
+
+ domain_info->regdomain->dfs_region = *(u8 *)(&mib->data[i]);
+ i++;
+
+ while (i < mib->dataLength) {
+ reg_rule = &domain_info->regdomain->reg_rules[num_rules];
+
+ /* start freq 2 bytes */
+ freq = __le16_to_cpu(*(u16 *)(&mib->data[i]));
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(freq);
+
+ /* end freq 2 bytes */
+ freq = __le16_to_cpu(*(u16 *)(&mib->data[i + 2]));
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(freq);
+
+ /* Max Bandwidth 1 byte */
+ byte_val = *(u8 *)(&mib->data[i + 4]);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(byte_val);
+
+ /* max_antenna_gain is obsolute now.*/
+ reg_rule->power_rule.max_antenna_gain = 0;
+
+ /* Max Power 1 byte */
+ byte_val = *(u8 *)(&mib->data[i + 5]);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(byte_val);
+
+ /* Flags 1 byte */
+ reg_rule->flags = slsi_remap_reg_rule_flags(*(u8 *)(&mib->data[i + 6]));
+
+ i += 7;
+
+ num_rules++; /* Num of reg rules */
+ }
+
+ domain_info->regdomain->n_reg_rules = num_rules;
+}
+
+void slsi_reset_channel_flags(struct slsi_dev *sdev)
+{
+ enum nl80211_band band;
+ struct ieee80211_channel *chan;
+ int i;
+ struct wiphy *wiphy = sdev->wiphy;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+#else
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+#endif
+ if (!wiphy->bands[band])
+ continue;
+ for (i = 0; i < wiphy->bands[band]->n_channels; i++) {
+ chan = &wiphy->bands[band]->channels[i];
+ chan->flags = 0;
+ }
+ }
+}
+
+int slsi_read_regulatory_rules(struct slsi_dev *sdev, struct slsi_802_11d_reg_domain *domain_info, const char *alpha2)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int r = 0;
+ int rx_len = 0;
+ int len = 0;
+ int index;
+
+ index = slsi_country_to_index(domain_info, alpha2);
+
+ if (index == -1) {
+ SLSI_ERR(sdev, "Unsupported index\n");
+ return -EINVAL;
+ }
+
+ slsi_mib_encode_get(&mibreq, SLSI_PSID_UNIFI_REGULATORY_PARAMETERS, index);
+
+ /* Max of 6 regulatory constraints.
+ * each constraint start_freq(2 byte), end_freq(2 byte), Band width(1 byte), Max power(1 byte),
+ * rules flag (1 byte)
+ * firmware can have a max of 6 rules for a country.
+ */
+ /* PSID header (5 bytes) + ((3 bytes) alpha2 code + dfs) + (max of 50 regulatory rules * 7 bytes each row) + MIB status(1) */
+ mibrsp.dataLength = 5 + 3 + (SLSI_MIB_REG_RULES_MAX * 7) + 1;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength,
+ mibrsp.data, mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_len;
+
+ len = slsi_mib_decode(&mibrsp, &mib_val);
+
+ if (len == 0) {
+ kfree(mibrsp.data);
+ SLSI_ERR(sdev, "Mib decode error\n");
+ return -EINVAL;
+ }
+ slsi_reg_mib_to_regd(&mib_val.value.u.octetValue, domain_info);
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+static int slsi_country_to_index(struct slsi_802_11d_reg_domain *domain_info, const char *alpha2)
+{
+ int index = 0;
+ bool index_found = false;
+
+ SLSI_DBG3_NODEV(SLSI_MLME, "\n");
+ if (domain_info->countrylist) {
+ for (index = 0; index < domain_info->country_len; index += 2) {
+ if (memcmp(&domain_info->countrylist[index], alpha2, 2) == 0) {
+ index_found = true;
+ break;
+ }
+ }
+
+ /* If the set country is not present in the country list, fall back to
+ * world domain i.e. regulatory rules index = 1
+ */
+ if (index_found)
+ return (index / 2) + 1;
+ else
+ return 1;
+ }
+
+ return -1;
+}
+
+/* Set the rssi boost value of a particular band as set in the SETJOINPREFER command*/
+int slsi_set_mib_rssi_boost(struct slsi_dev *sdev, struct net_device *dev, u16 psid, int index, int boost)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+
+ SLSI_DBG2(sdev, SLSI_MLME, "Set rssi boost: %d\n", boost);
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->device_config_mutex));
+ if (slsi_mib_encode_int(&mib_data, psid, boost, index) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(sdev, "Err Setting MIB failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+
+ return error;
+}
+
+#ifdef CONFIG_SCSC_WLAN_LOW_LATENCY_MODE
+int slsi_set_mib_soft_roaming_enabled(struct slsi_dev *sdev, struct net_device *dev, bool enable)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+
+ if (slsi_mib_encode_bool(&mib_data, SLSI_PSID_UNIFI_ROAM_SOFT_ROAMING_ENABLED,
+ enable, 0) == SLSI_MIB_STATUS_SUCCESS)
+ if (mib_data.dataLength) {
+ error = slsi_mlme_set(sdev, dev, mib_data.data, mib_data.dataLength);
+ if (error)
+ SLSI_ERR(sdev, "Err Setting MIB failed. error = %d\n", error);
+ kfree(mib_data.data);
+ }
+
+ return error;
+}
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+int slsi_read_enhanced_arp_rx_count_by_lower_mac(struct slsi_dev *sdev, struct net_device *dev, u16 psid)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int r = 0;
+ int rx_len = 0;
+ int len = 0;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "\n");
+
+ slsi_mib_encode_get(&mibreq, psid, 0);
+
+ mibrsp.dataLength = 10; /* PSID header(5) + uint 4 bytes + status(1) */
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, dev, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_len;
+ len = slsi_mib_decode(&mibrsp, &mib_val);
+
+ if (len == 0) {
+ kfree(mibrsp.data);
+ SLSI_ERR(sdev, "Mib decode error\n");
+ return -EINVAL;
+ }
+ ndev_vif->enhanced_arp_stats.arp_rsp_rx_count_by_lower_mac = mib_val.value.u.uintValue;
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+void slsi_fill_enhanced_arp_out_of_order_drop_counter(struct netdev_vif *ndev_vif,
+ struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr;
+ u8 *frame;
+ u16 arp_opcode;
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Check if the payload is in the SMAPPER entry */
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ frame = slsi_hip_get_skb_data_from_smapper(ndev_vif->sdev, skb);
+ eth_hdr = (struct ethhdr *)frame;
+ if (!(eth_hdr)) {
+ SLSI_DBG2(ndev_vif->sdev, SLSI_RX, "SKB from SMAPPER is NULL\n");
+ return;
+ }
+ frame = frame + sizeof(struct ethhdr);
+ } else {
+ frame = fapi_get_data(skb) + sizeof(struct ethhdr);
+ eth_hdr = (struct ethhdr *)fapi_get_data(skb);
+ }
+#else
+ frame = fapi_get_data(skb) + sizeof(struct ethhdr);
+ eth_hdr = (struct ethhdr *)fapi_get_data(skb);
+#endif
+
+ arp_opcode = frame[SLSI_ARP_OPCODE_OFFSET] << 8 | frame[SLSI_ARP_OPCODE_OFFSET + 1];
+ /* check if sender ip = gateway ip and it is an ARP response*/
+ if ((ntohs(eth_hdr->h_proto) == ETH_P_ARP) &&
+ (arp_opcode == SLSI_ARP_REPLY_OPCODE) &&
+ !SLSI_IS_GRATUITOUS_ARP(frame) &&
+ !memcmp(&frame[SLSI_ARP_SRC_IP_ADDR_OFFSET], &ndev_vif->target_ip_addr, 4))
+ ndev_vif->enhanced_arp_stats.arp_rsp_count_out_of_order_drop++;
+}
+#endif
+
+void slsi_modify_ies_on_channel_switch(struct net_device *dev, struct cfg80211_ap_settings *settings,
+ u8 *ds_params_ie, u8 *ht_operation_ie, struct ieee80211_mgmt *mgmt,
+ u16 beacon_ie_head_len)
+{
+ slsi_modify_ies(dev, WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable,
+ beacon_ie_head_len, 2, ieee80211_frequency_to_channel(settings->chandef.chan->center_freq));
+
+ slsi_modify_ies(dev, WLAN_EID_HT_OPERATION, (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, 2,
+ ieee80211_frequency_to_channel(settings->chandef.chan->center_freq));
+}
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+void slsi_extract_valid_wifi_sharing_channels(struct slsi_dev *sdev)
+{
+ int i, j;
+ int p = 0;
+ int k = (SLSI_MAX_CHAN_5G_BAND - 1);
+ int flag = 0;
+
+ for (i = 4; i >= 0 ; i--) {
+ for (j = 0; j <= 7 ; j++) {
+ if ((i == 4) && (j == 0))
+ j = 1;
+ if (sdev->wifi_sharing_5ghz_channel[i] & (u8)(1 << (7 - j)))
+ sdev->valid_5g_freq[p] = slsi_5ghz_all_channels[k];
+ else
+ sdev->valid_5g_freq[p] = 0;
+ p++;
+ k--;
+ if (p == SLSI_MAX_CHAN_5G_BAND) {
+ flag = 1;
+ break;
+ }
+ }
+ if (flag == 1)
+ break;
+ }
+}
+
+bool slsi_if_valid_wifi_sharing_channel(struct slsi_dev *sdev, int freq)
+{
+ int i;
+
+ for (i = 0; i <= (SLSI_MAX_CHAN_5G_BAND - 1) ; i++) {
+ if (sdev->valid_5g_freq[i] == freq)
+ return 1;
+ }
+ return 0;
+}
+
+int slsi_check_if_non_indoor_non_dfs_channel(struct slsi_dev *sdev, int freq)
+{
+ struct ieee80211_channel *channel = NULL;
+ u32 chan_flags = 0;
+
+ channel = ieee80211_get_channel(sdev->wiphy, freq);
+ if (!channel) {
+ SLSI_ERR(sdev, "Invalid frequency %d used to start AP. Channel not found\n", freq);
+ return 0;
+ }
+
+ chan_flags = (IEEE80211_CHAN_INDOOR_ONLY | IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_DISABLED |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 10, 13)
+ IEEE80211_CHAN_PASSIVE_SCAN
+#else
+ IEEE80211_CHAN_NO_IR
+#endif
+ );
+
+ if ((channel->flags) & chan_flags)
+ return 0;
+
+ return 1;
+}
+
+int slsi_select_wifi_sharing_ap_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings,
+ struct slsi_dev *sdev, int *wifi_sharing_channel_switched)
+{
+ struct net_device *sta_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ struct netdev_vif *ndev_sta_vif = netdev_priv(sta_dev);
+ int sta_frequency = ndev_sta_vif->chan->center_freq;
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Station connected on frequency: %d\n", sta_frequency);
+
+ if (((sta_frequency) / 1000) == 2) { /*For 2.4GHz */
+ /*if single antenna*/
+#ifdef CONFIG_SCSC_WLAN_SINGLE_ANTENNA
+ if ((settings->chandef.chan->center_freq) != (sta_frequency)) {
+ *wifi_sharing_channel_switched = 1;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, sta_frequency);
+ settings->chandef.center_freq1 = sta_frequency;
+ }
+#else
+ /* if dual antenna */
+ if ((((settings->chandef.chan->center_freq) / 1000) == 5) &&
+ !(slsi_check_if_channel_restricted_already(sdev,
+ ieee80211_frequency_to_channel(settings->chandef.chan->center_freq))) &&
+ slsi_if_valid_wifi_sharing_channel(sdev, settings->chandef.chan->center_freq) &&
+ slsi_check_if_non_indoor_non_dfs_channel(sdev, settings->chandef.chan->center_freq)) {
+ settings->chandef.chan = ieee80211_get_channel(wiphy, settings->chandef.chan->center_freq);
+ settings->chandef.center_freq1 = settings->chandef.chan->center_freq;
+ } else {
+ if ((settings->chandef.chan->center_freq) != (sta_frequency)) {
+ *wifi_sharing_channel_switched = 1;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, sta_frequency);
+ settings->chandef.center_freq1 = sta_frequency;
+ }
+ }
+#endif
+ }
+
+ else { /* For 5GHz */
+ /* For single antenna */
+#ifdef CONFIG_SCSC_WLAN_SINGLE_ANTENNA
+ if (!slsi_check_if_non_indoor_non_dfs_channel(sdev, sta_frequency))
+ return 1; /*AP cannot start on indoor channel so we will reject request from the host*/
+ if ((settings->chandef.chan->center_freq) != (sta_frequency)) {
+ *wifi_sharing_channel_switched = 1;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, sta_frequency);
+ settings->chandef.center_freq1 = sta_frequency;
+ }
+ /* Single antenna end */
+#else
+ /* For Dual Antenna */
+ if (((settings->chandef.chan->center_freq) / 1000) == 5) {
+ if (!(slsi_check_if_channel_restricted_already(sdev,
+ ieee80211_frequency_to_channel(sta_frequency))) &&
+ slsi_if_valid_wifi_sharing_channel(sdev, sta_frequency) &&
+ slsi_check_if_non_indoor_non_dfs_channel(sdev, sta_frequency)) {
+ if ((settings->chandef.chan->center_freq) != (sta_frequency)) {
+ *wifi_sharing_channel_switched = 1;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, sta_frequency);
+ }
+ } else {
+ *wifi_sharing_channel_switched = 1;
+ settings->chandef.chan = ieee80211_get_channel(wiphy, SLSI_2G_CHANNEL_ONE);
+ settings->chandef.center_freq1 = SLSI_2G_CHANNEL_ONE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (!sdev->fw_2g_40mhz_enabled && (settings->chandef.width == NL80211_CHAN_WIDTH_40))
+ settings->chandef.width = NL80211_CHAN_WIDTH_20;
+#endif
+ }
+ }
+#endif
+ }
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "AP frequency chosen: %d\n", settings->chandef.chan->center_freq);
+ return 0;
+}
+
+int slsi_get_byte_position(int bit)
+{
+ int byte_pos = 0;
+
+ /* bit will find which bit, pos will tell which pos in the array */
+ if (bit >= 8 && bit <= 15)
+ byte_pos = 1;
+ else if (bit >= 16 && bit <= 23)
+ byte_pos = 2;
+ else if (bit >= 24 && bit <= 31)
+ byte_pos = 3;
+ else if (bit >= 32 && bit <= 38)
+ byte_pos = 4;
+
+ return byte_pos;
+}
+
+int slsi_check_if_channel_restricted_already(struct slsi_dev *sdev, int channel)
+{
+ int i;
+
+ for (i = 0; i < sdev->num_5g_restricted_channels; i++)
+ if (sdev->wifi_sharing_5g_restricted_channels[i] == channel)
+ return 1;
+
+ return 0;
+}
+
+int slsi_set_mib_wifi_sharing_5ghz_channel(struct slsi_dev *sdev, u16 psid, int res,
+ int offset, int readbyte, char *arg)
+{
+ struct slsi_mib_entry mib_entry;
+ struct slsi_mib_data buffer = { 0, NULL };
+ int error = SLSI_MIB_STATUS_FAILURE;
+ int i;
+ int bit = 0; /* find which bit to set */
+ int byte_pos = 0; /* which index to set bit among 8 larger set*/
+ int freq;
+ int j;
+ int bit_mask;
+ int num_channels;
+ int p = 0;
+ int new_channels = 0;
+ int freq_to_be_checked = 0;
+
+ mib_entry.value.type = SLSI_MIB_TYPE_OCTET;
+ mib_entry.value.u.octetValue.dataLength = 8;
+ mib_entry.value.u.octetValue.data = kmalloc(64, GFP_KERNEL);
+
+ if (!mib_entry.value.u.octetValue.data) {
+ error = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < 8; i++)
+ mib_entry.value.u.octetValue.data[i] = sdev->wifi_sharing_5ghz_channel[i];
+
+ if (res == 0) {
+ for (i = 0; i < 25 ; i++)
+ sdev->wifi_sharing_5g_restricted_channels[i] = 0;
+ sdev->num_5g_restricted_channels = 0;
+ new_channels = 1;
+ } else if (res == -1) {
+ for (i = 0; i < 8; i++)
+ mib_entry.value.u.octetValue.data[i] = 0x00;
+
+ for (i = 0; i < 25 ; i++)
+ sdev->wifi_sharing_5g_restricted_channels[i] = 0;
+
+ for (i = 24; i >= 0 ; i--) {
+ if (sdev->valid_5g_freq[i] != 0)
+ sdev->wifi_sharing_5g_restricted_channels[p++] =
+ ieee80211_frequency_to_channel(sdev->valid_5g_freq[i]);
+ }
+ sdev->num_5g_restricted_channels = p;
+ new_channels = 1;
+ } else {
+ num_channels = res;
+
+ for (i = 0; i < num_channels; i++) {
+ offset = offset + readbyte + 1;
+ readbyte = slsi_str_to_int(&arg[offset], &res);
+ /*if channel is not already present , then only add it*/
+ freq_to_be_checked = ieee80211_channel_to_frequency(res, NL80211_BAND_5GHZ);
+ if (slsi_if_valid_wifi_sharing_channel(sdev, freq_to_be_checked) &&
+ (!slsi_check_if_channel_restricted_already(sdev, res))) {
+ if ((sdev->num_5g_restricted_channels) > 24)
+ break;
+ new_channels = 1;
+ sdev->wifi_sharing_5g_restricted_channels[(sdev->num_5g_restricted_channels)++] = res;
+ }
+ }
+
+ if (new_channels) {
+ for (i = 0; i < (sdev->num_5g_restricted_channels); i++) {
+ freq = ieee80211_channel_to_frequency(sdev->wifi_sharing_5g_restricted_channels[i],
+ NL80211_BAND_5GHZ);
+ for (j = 0; j < 25; j++) {
+ if (slsi_5ghz_all_channels[j] == freq) {
+ bit = j + 14;
+ break;
+ }
+ }
+ byte_pos = slsi_get_byte_position(bit);
+ bit_mask = (bit % 8);
+ mib_entry.value.u.octetValue.data[byte_pos] &= (u8)(~(1 << (bit_mask)));
+ }
+ }
+ }
+
+ if (new_channels) {
+ error = slsi_mib_encode_octet(&buffer, psid, mib_entry.value.u.octetValue.dataLength,
+ mib_entry.value.u.octetValue.data, 0);
+ if (error != SLSI_MIB_STATUS_SUCCESS) {
+ error = -ENOMEM;
+ goto exit;
+ }
+
+ if (WARN_ON(buffer.dataLength == 0)) {
+ error = -EINVAL;
+ goto exit;
+ }
+
+ error = slsi_mlme_set(sdev, NULL, buffer.data, buffer.dataLength);
+ kfree(buffer.data);
+
+ if (!error)
+ return 0;
+
+exit:
+ SLSI_ERR(sdev, "Error in setting wifi sharing 5ghz channel. error = %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+int slsi_set_mac_randomisation_mask(struct slsi_dev *sdev, u8 *mac_address_mask)
+{
+ int r = 0;
+ struct slsi_mib_data mib_data = { 0, NULL };
+
+ SLSI_DBG1(sdev, SLSI_CFG80211, "Mask is :%pM\n", mac_address_mask);
+ r = slsi_mib_encode_octet(&mib_data, SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION_MASK, ETH_ALEN,
+ mac_address_mask, 0);
+ if (r != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "Err setting unifiMacAddrRandomistaionMask MIB. error = %d\n", r);
+ if (sdev->scan_addr_set) {
+ struct slsi_mib_data mib_data_randomization_activated = { 0, NULL };
+
+ r = slsi_mib_encode_bool(&mib_data_randomization_activated,
+ SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION, 1, 0);
+ if (r != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "UNIFI_MAC_ADDRESS_RANDOMISATION_ACTIVATED: no mem for MIB\n");
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_set(sdev, NULL, mib_data_randomization_activated.data,
+ mib_data_randomization_activated.dataLength);
+
+ kfree(mib_data_randomization_activated.data);
+
+ if (r)
+ SLSI_ERR(sdev, "Err setting unifiMacAddrRandomistaionActivated MIB. error = %d\n", r);
+ return r;
+ }
+ return -ENOMEM;
+ }
+ if (mib_data.dataLength == 0) {
+ SLSI_WARN(sdev, "Mib Data length is Zero\n");
+ return -EINVAL;
+ }
+ r = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+ if (r)
+ SLSI_ERR(sdev, "Err setting Randomized mac mask= %d\n", r);
+ kfree(mib_data.data);
+ return r;
+}
+#endif
+/* Set the new country code and read the regulatory parameters of updated country. */
+int slsi_set_country_update_regd(struct slsi_dev *sdev, const char *alpha2_code, int size)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ char alpha2[4];
+ int error = 0;
+
+ SLSI_DBG2(sdev, SLSI_MLME, "Set country code: %c%c\n", alpha2_code[0], alpha2_code[1]);
+
+ if (size == 4) {
+ memcpy(alpha2, alpha2_code, 4);
+ } else {
+ memcpy(alpha2, alpha2_code, 3);
+ alpha2[3] = '\0';
+ }
+
+ if (memcmp(alpha2, sdev->device_config.domain_info.regdomain->alpha2, 2) == 0) {
+ SLSI_DBG3(sdev, SLSI_MLME, "Country is already set to the requested country code\n");
+ return 0;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+
+ error = slsi_mib_encode_octet(&mib_data, SLSI_PSID_UNIFI_DEFAULT_COUNTRY, 3, alpha2, 0);
+ if (error != SLSI_MIB_STATUS_SUCCESS) {
+ error = -ENOMEM;
+ goto exit;
+ }
+
+ if (WARN_ON(mib_data.dataLength == 0)) {
+ error = -EINVAL;
+ goto exit;
+ }
+
+ error = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+
+ kfree(mib_data.data);
+
+ if (error) {
+ SLSI_ERR(sdev, "Err setting country error = %d\n", error);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -1;
+ }
+
+ /* Read the regulatory params for the country */
+ if (slsi_read_regulatory_rules(sdev, &sdev->device_config.domain_info, alpha2) == 0) {
+ slsi_reset_channel_flags(sdev);
+ wiphy_apply_custom_regulatory(sdev->wiphy, sdev->device_config.domain_info.regdomain);
+ slsi_update_supported_channels_regd_flags(sdev);
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return error;
+}
+
+/* Read unifiDisconnectTimeOut MIB */
+int slsi_read_disconnect_ind_timeout(struct slsi_dev *sdev, u16 psid)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int r = 0;
+ int rx_len = 0;
+ int len = 0;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "\n");
+
+ slsi_mib_encode_get(&mibreq, psid, 0);
+
+ mibrsp.dataLength = 10; /* PSID header(5) + uint 4 bytes + status(1) */
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &rx_len);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_len;
+ len = slsi_mib_decode(&mibrsp, &mib_val);
+
+ if (len == 0) {
+ kfree(mibrsp.data);
+ SLSI_ERR(sdev, "Mib decode error\n");
+ return -EINVAL;
+ }
+ /* Add additional 1 sec delay */
+ sdev->device_config.ap_disconnect_ind_timeout = ((mib_val.value.u.uintValue + 1) * 1000);
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+/* Read unifiDefaultCountry MIB */
+int slsi_read_default_country(struct slsi_dev *sdev, u8 *alpha2, u16 index)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int r = 0;
+ int rx_len = 0;
+ int len = 0;
+
+ slsi_mib_encode_get(&mibreq, SLSI_PSID_UNIFI_DEFAULT_COUNTRY, index);
+
+ mibrsp.dataLength = 11; /* PSID header(5) + index(1) + country code alpha2 3 bytes + status(1) */
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &rx_len);
+
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_len;
+ len = slsi_mib_decode(&mibrsp, &mib_val);
+
+ if (len == 0) {
+ kfree(mibrsp.data);
+ SLSI_ERR(sdev, "Mib decode error\n");
+ return -EINVAL;
+ }
+ memcpy(alpha2, mib_val.value.u.octetValue.data, 2);
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+int slsi_copy_country_table(struct slsi_dev *sdev, struct slsi_mib_data *mib, int len)
+{
+ SLSI_DBG3(sdev, SLSI_MLME, "\n");
+
+ kfree(sdev->device_config.domain_info.countrylist);
+ sdev->device_config.domain_info.countrylist = kmalloc(len, GFP_KERNEL);
+
+ if (!sdev->device_config.domain_info.countrylist) {
+ SLSI_ERR(sdev, "kmalloc failed\n");
+ return -EINVAL;
+ }
+
+ if (!mib || !mib->data) {
+ SLSI_ERR(sdev, "Invalid MIB country table\n");
+ return -EINVAL;
+ }
+
+ memcpy(sdev->device_config.domain_info.countrylist, mib->data, len);
+ sdev->device_config.domain_info.country_len = len;
+
+ return 0;
+}
+
+/* Read unifi country list */
+int slsi_read_unifi_countrylist(struct slsi_dev *sdev, u16 psid)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_entry mib_val;
+ int r = 0;
+ int rx_len = 0;
+ int len = 0;
+ int ret;
+
+ slsi_mib_encode_get(&mibreq, psid, 0);
+
+ /* Fixed fields len (5) : 2 bytes(PSID) + 2 bytes (Len) + 1 byte (status)
+ * Data : 148 countries??? for SLSI_PSID_UNIFI_COUNTRY_LIST
+ */
+ mibrsp.dataLength = 5 + (NUM_COUNTRY * 2);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &rx_len);
+
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_len;
+ len = slsi_mib_decode(&mibrsp, &mib_val);
+
+ if (len == 0) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+ ret = slsi_copy_country_table(sdev, &mib_val.value.u.octetValue, len);
+ if (ret < 0) {
+ kfree(mibrsp.data);
+ return ret;
+ }
+ } else {
+ SLSI_ERR(sdev, "Mib read failed (error: %d)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ return r;
+}
+
+void slsi_regd_deinit(struct slsi_dev *sdev)
+{
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "slsi_regd_deinit\n");
+
+ kfree(sdev->device_config.domain_info.countrylist);
+}
+
+void slsi_clear_offchannel_data(struct slsi_dev *sdev, bool acquire_lock)
+{
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2PX_SWLAN);
+ if (WARN_ON(!dev)) {
+ SLSI_ERR(sdev, "No Group net dev found\n");
+ return;
+ }
+ ndev_vif = netdev_priv(dev);
+
+ if (acquire_lock)
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* Reset dwell time should be sent on group vif */
+ (void)slsi_mlme_reset_dwell_time(sdev, dev);
+
+ if (acquire_lock)
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ sdev->p2p_group_exp_frame = SLSI_P2P_PA_INVALID;
+}
+
+static void slsi_hs2_unsync_vif_delete_work(struct work_struct *work)
+{
+ struct netdev_vif *ndev_vif = container_of((struct delayed_work *)work, struct netdev_vif, unsync.hs2_del_vif_work);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_CFG80211, "Delete HS vif duration expired - Deactivate unsync vif\n");
+ slsi_wlan_unsync_vif_deactivate(ndev_vif->sdev, ndev_vif->wdev.netdev, true);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+int slsi_wlan_unsync_vif_activate(struct slsi_dev *sdev, struct net_device *dev,
+ struct ieee80211_channel *chan, u16 wait)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+ u8 device_address[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u32 action_frame_bmap;
+
+ SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Activate wlan unsync vif\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ ndev_vif->vif_type = FAPI_VIFTYPE_UNSYNCHRONISED;
+
+ /* Avoid suspend when wlan unsync VIF is active */
+ slsi_wakelock(&sdev->wlan_wl);
+
+ /* Interface address and device address are same for unsync vif */
+ if (slsi_mlme_add_vif(sdev, dev, dev->dev_addr, device_address) != 0) {
+ SLSI_NET_ERR(dev, "add vif failed for wlan unsync vif\n");
+ goto exit_with_error;
+ }
+
+ if (slsi_vif_activated(sdev, dev) != 0) {
+ SLSI_NET_ERR(dev, "vif activate failed for wlan unsync vif\n");
+ slsi_mlme_del_vif(sdev, dev);
+ goto exit_with_error;
+ }
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_ACTIVE;
+ INIT_DELAYED_WORK(&ndev_vif->unsync.hs2_del_vif_work, slsi_hs2_unsync_vif_delete_work);
+ action_frame_bmap = SLSI_ACTION_FRAME_PUBLIC | SLSI_ACTION_FRAME_RADIO_MEASUREMENT;
+
+ r = slsi_mlme_register_action_frame(sdev, dev, action_frame_bmap, action_frame_bmap);
+ if (r != 0) {
+ SLSI_NET_ERR(dev, "slsi_mlme_register_action_frame failed: resultcode = %d, action_frame_bmap:%d\n",
+ r, action_frame_bmap);
+ goto exit_with_vif;
+ }
+
+ if (slsi_mlme_set_channel(sdev, dev, chan, SLSI_FW_CHANNEL_DURATION_UNSPECIFIED, 0, 0) != 0) {
+ SLSI_NET_ERR(dev, "Set channel failed for wlan unsync vif\n");
+ goto exit_with_vif;
+ }
+ ndev_vif->chan = chan;
+ ndev_vif->driver_channel = chan->hw_value;
+ return r;
+
+exit_with_vif:
+ slsi_wlan_unsync_vif_deactivate(sdev, dev, true);
+exit_with_error:
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return -EINVAL;
+}
+
+/* Delete unsync vif - DON'T update the vif type */
+void slsi_wlan_unsync_vif_deactivate(struct slsi_dev *sdev, struct net_device *dev, bool hw_available)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "De-activate wlan unsync vif\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (sdev->wlan_unsync_vif_state == WLAN_UNSYNC_NO_VIF) {
+ SLSI_NET_DBG1(dev, SLSI_INIT_DEINIT, "wlan unsync vif already deactivated\n");
+ return;
+ }
+
+ cancel_delayed_work(&ndev_vif->unsync.hs2_del_vif_work);
+
+ /* slsi_vif_deactivated is not used here after slsi_mlme_del_vif
+ * as it modifies vif type as well
+ */
+ if (hw_available)
+ slsi_mlme_del_vif(sdev, dev);
+
+ slsi_wakeunlock(&sdev->wlan_wl);
+
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
+ ndev_vif->activated = false;
+ ndev_vif->chan = NULL;
+
+ (void)slsi_set_mgmt_tx_data(ndev_vif, 0, 0, NULL, 0);
+}
+
+void slsi_scan_ind_timeout_handle(struct work_struct *work)
+{
+ struct netdev_vif *ndev_vif = container_of((struct delayed_work *)work, struct netdev_vif, scan_timeout_work);
+ struct net_device *dev = slsi_get_netdev(ndev_vif->sdev, ndev_vif->ifnum);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work) {
+ queue_delayed_work(ndev_vif->sdev->device_wq, &ndev_vif->scan_timeout_work,
+ msecs_to_jiffies(SLSI_FW_SCAN_DONE_TIMEOUT_MSEC));
+ ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
+ } else {
+ SLSI_WARN(ndev_vif->sdev, "Mlme_scan_done_ind not received\n");
+ (void)slsi_mlme_del_scan(ndev_vif->sdev, dev, ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID, true);
+ slsi_scan_complete(ndev_vif->sdev, dev, SLSI_SCAN_HW_ID, false);
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+void slsi_update_supported_channels_regd_flags(struct slsi_dev *sdev)
+{
+ int i = 0;
+ struct wiphy *wiphy = sdev->wiphy;
+ struct ieee80211_channel *chan;
+
+ /* If all channels are supported by chip no need disable any channel
+ * So return
+ */
+ if (sdev->enabled_channel_count == 39)
+ return;
+ if (wiphy->bands[0]) {
+ for (i = 0; i < ARRAY_SIZE(sdev->supported_2g_channels); i++) {
+ if (sdev->supported_2g_channels[i] == 0) {
+ chan = &wiphy->bands[0]->channels[i];
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ }
+ }
+ if (sdev->band_5g_supported && wiphy->bands[1]) {
+ for (i = 0; i < ARRAY_SIZE(sdev->supported_5g_channels); i++) {
+ if (sdev->supported_5g_channels[i] == 0) {
+ chan = &wiphy->bands[1]->channels[i];
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ }
+ }
+}
+
+int slsi_find_chan_idx(u16 chan, u8 hw_mode)
+{
+ int idx = 0, i = 0;
+ u16 slsi_5ghz_channels_list[25] = {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132,
+ 136, 140, 144, 149, 153, 157, 161, 165};
+
+ if (hw_mode == SLSI_ACS_MODE_IEEE80211B || hw_mode == SLSI_ACS_MODE_IEEE80211G) {
+ idx = chan - 1;
+ return idx;
+ }
+ for (i = 0; i < 25; i++) {
+ if (chan == slsi_5ghz_channels_list[i]) {
+ idx = i;
+ break;
+ }
+ }
+ return idx;
+}
+
+#ifdef CONFIG_SCSC_WLAN_SET_NUM_ANTENNAS
+/* Note : netdev_vif lock should be taken care by caller. */
+int slsi_set_num_antennas(struct net_device *dev, const u16 num_of_antennas)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int ret = 0;
+ const bool is_sta = (ndev_vif->iftype == NL80211_IFTYPE_STATION);
+ const bool is_softap = (ndev_vif->iftype == NL80211_IFTYPE_AP);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (num_of_antennas > 2 || num_of_antennas == 0) {
+ SLSI_NET_ERR(dev, "Invalid num_of_antennas %hu\n", num_of_antennas);
+ return -EINVAL;
+ }
+ if (!is_sta && !is_softap) {
+ SLSI_NET_ERR(dev, "Invalid interface type %s\n", dev->name);
+ return -EPERM;
+ }
+ if (is_sta && (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_NET_ERR(dev, "sta is not in connected state\n");
+ return -EPERM;
+ }
+ SLSI_NET_INFO(dev, "mlme_set_num_antennas_req(vif:%u num_of_antennas:%u)\n", ndev_vif->ifnum, num_of_antennas);
+ /* TODO: Change signal name to MLME_SET_NUM_ANTENNAS_REQ and MLME_SET_NUM_ANTENNAS_CFM. */
+ req = fapi_alloc(mlme_set_nss_req, MLME_SET_NSS_REQ, ndev_vif->ifnum, 0);
+ fapi_set_u16(req, u.mlme_set_nss_req.vif, ndev_vif->ifnum);
+ fapi_set_u16(req, u.mlme_set_nss_req.rx_nss, num_of_antennas);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_NSS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_nss_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_nss_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_nss_cfm.result_code));
+ ret = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return ret;
+}
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __SLSI_MGT_H__
+#define __SLSI_MGT_H__
+
+#include <linux/mutex.h>
+
+#include "dev.h"
+#include "debug.h"
+
+/* For 3.4.11 kernel support */
+#ifndef WLAN_OUI_MICROSOFT
+#define WLAN_OUI_MICROSOFT 0x0050f2
+#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
+#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
+#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
+#endif
+
+#define SLSI_COUNTRY_CODE_LEN 3
+
+#define SLSI_EAPOL_TYPE_RSN_KEY (2)
+#define SLSI_EAPOL_TYPE_WPA_KEY (254)
+
+#define SLSI_IEEE8021X_TYPE_EAPOL_KEY 3
+#define SLSI_IEEE8021X_TYPE_EAP_PACKET 0
+
+#define SLSI_EAPOL_KEY_INFO_KEY_TYPE_BIT_IN_LOWER_BYTE BIT(3) /* Group = 0, Pairwise = 1 */
+#define SLSI_EAPOL_KEY_INFO_MIC_BIT_IN_HIGHER_BYTE BIT(0)
+#define SLSI_EAPOL_KEY_INFO_SECURE_BIT_IN_HIGHER_BYTE BIT(1)
+/* pkt_data would start from 802.1X Authentication field (pkt_data[0] = Version).
+ * For M4 packet, it will be something as below... member(size, position)
+ * Version (1, 0) + Type (1, 1) + Length (2, 2:3) + Descriptor Type (1, 4) + Key Information (2, 5:6) +
+ * key_length(2, 7:8) + replay_counter(8, 9:16) + key_nonce(32, 17:48) + key_iv(16, 49:64) +
+ * key_rsc (8, 65:72) + key_id(16, 73:80) + key_mic (16, 81:96) + key_data_length(2, 97:98) +
+ * keydata(key_data_length, 99:99+key_data_length)
+ */
+#define SLSI_EAPOL_IEEE8021X_TYPE_POS (1)
+#define SLSI_EAPOL_TYPE_POS (4)
+#define SLSI_EAPOL_KEY_INFO_HIGHER_BYTE_POS (5)
+#define SLSI_EAPOL_KEY_INFO_LOWER_BYTE_POS (6)
+#define SLSI_EAPOL_KEY_DATA_LENGTH_HIGHER_BYTE_POS (97)
+#define SLSI_EAPOL_KEY_DATA_LENGTH_LOWER_BYTE_POS (98)
+
+#define SLSI_EAP_CODE_POS (4)
+#define SLSI_EAP_PACKET_REQUEST (1)
+#define SLSI_EAP_PACKET_RESPONSE (2)
+#define SLSI_EAP_PACKET_SUCCESS (3)
+#define SLSI_EAP_PACKET_FAILURE (4)
+#define SLSI_EAP_TYPE_POS (8)
+#define SLSI_EAP_TYPE_EXPANDED (254)
+#define SLSI_EAP_OPCODE_POS (16)
+#define SLSI_EAP_OPCODE_WSC_MSG (4)
+#define SLSI_EAP_OPCODE_WSC_START (1)
+#define SLSI_EAP_MSGTYPE_POS (27)
+#define SLSI_EAP_MSGTYPE_M8 (12)
+#define SLSI_EAP_WPS_DWELL_TIME (100000) /*100 ms */
+#define SLSI_EAP_TYPE_IDENTITY (1)
+
+#define SLSI_80211_AC_VO 0
+#define SLSI_80211_AC_VI 1
+#define SLSI_80211_AC_BE 2
+#define SLSI_80211_AC_BK 3
+
+/* IF Number (Index) based checks */
+#define SLSI_IS_VIF_INDEX_WLAN(ndev_vif) (ndev_vif->ifnum == SLSI_NET_INDEX_WLAN)
+#define SLSI_IS_VIF_INDEX_P2P(ndev_vif) (ndev_vif->ifnum == SLSI_NET_INDEX_P2P)
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#define SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif) ((ndev_vif->ifnum == SLSI_NET_INDEX_P2PX_SWLAN) &&\
+ (sdev->netdev_ap != sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]))
+#define SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif) ((ndev_vif->ifnum == SLSI_NET_INDEX_P2PX_SWLAN) &&\
+ (sdev->netdev_ap == sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]))
+#else
+#define SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif) (ndev_vif->ifnum == SLSI_NET_INDEX_P2PX_SWLAN)
+#endif
+#define SLSI_IS_VIF_INDEX_NAN(ndev_vif) ((ndev_vif)->ifnum == SLSI_NET_INDEX_NAN)
+
+/* Check for P2P unsync vif type */
+#define SLSI_IS_P2P_UNSYNC_VIF(ndev_vif) ((ndev_vif->ifnum == SLSI_NET_INDEX_P2P) && (ndev_vif->vif_type == FAPI_VIFTYPE_UNSYNCHRONISED))
+
+/* Check for HS unsync vif type */
+#define SLSI_IS_HS2_UNSYNC_VIF(ndev_vif) ((ndev_vif->ifnum == SLSI_NET_INDEX_WLAN) && (ndev_vif->vif_type == FAPI_VIFTYPE_UNSYNCHRONISED))
+
+/* Check for P2P Group role */
+#define SLSI_IS_P2P_GROUP_STATE(sdev) ((sdev->p2p_state == P2P_GROUP_FORMED_GO) || (sdev->p2p_state == P2P_GROUP_FORMED_CLI))
+
+/* Extra delay to wait after MLME-Roam.Response before obtaining roam reports */
+#define SLSI_STA_ROAM_REPORT_EXTRA_DELAY_MSEC 50
+
+/* Extra duration in addition to ROC duration - For any workqueue scheduling delay */
+#define SLSI_P2P_ROC_EXTRA_MSEC 10
+
+/* Extra duration to retain unsync vif even after ROC/mgmt_tx completes */
+#define SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC 2000
+/* Extra duration to retain HS2 unsync vif even after mgmt_tx completes */
+#define SLSI_HS2_UNSYNC_VIF_EXTRA_MSEC 1000
+
+/* Increased wait duration to retain unsync vif for GO-Negotiated to complete
+ * due to delayed response or, to allow peer to retry GO-Negotiation
+ */
+#define SLSI_P2P_NEG_PROC_UNSYNC_VIF_RETAIN_DURATION 3000
+
+/* Increased wait duration to send unset channel to Fw.
+ * This would increase the listen time.
+ */
+#define SLSI_P2P_UNSET_CHANNEL_EXTRA_MSEC 600
+/* Extra duration in addition to mgmt tx wait */
+#define SLSI_P2P_MGMT_TX_EXTRA_MSEC 100
+
+#define SLSI_FORCE_SCHD_ACT_FRAME_MSEC 100
+#define SLSI_P2PGO_KEEP_ALIVE_PERIOD_SEC 10
+#define SLSI_P2PGC_CONN_TIMEOUT_MSEC 10000
+
+/* P2P Public Action Frames */
+#define SLSI_P2P_PA_GO_NEG_REQ 0
+#define SLSI_P2P_PA_GO_NEG_RSP 1
+#define SLSI_P2P_PA_GO_NEG_CFM 2
+#define SLSI_P2P_PA_INV_REQ 3
+#define SLSI_P2P_PA_INV_RSP 4
+#define SLSI_P2P_PA_DEV_DISC_REQ 5
+#define SLSI_P2P_PA_DEV_DISC_RSP 6
+#define SLSI_P2P_PA_PROV_DISC_REQ 7
+#define SLSI_P2P_PA_PROV_DISC_RSP 8
+#define SLSI_P2P_PA_INVALID 0xFF
+
+/* Service discovery public action frame types */
+#define SLSI_PA_GAS_INITIAL_REQ (10)
+#define SLSI_PA_GAS_INITIAL_RSP (11)
+#define SLSI_PA_GAS_COMEBACK_REQ (12)
+#define SLSI_PA_GAS_COMEBACK_RSP (13)
+
+/*Radio Measurement action frames types */
+#define SLSI_RM_RADIO_MEASUREMENT_REQ (0)
+#define SLSI_RM_RADIO_MEASUREMENT_REP (1)
+#define SLSI_RM_LINK_MEASUREMENT_REQ (2)
+#define SLSI_RM_LINK_MEASUREMENT_REP (3)
+#define SLSI_RM_NEIGH_REP_REQ (4)
+#define SLSI_RM_NEIGH_REP_RSP (5)
+
+#define SLSI_WNM_ACTION_FIELD_MIN (0)
+#define SLSI_WNM_ACTION_FIELD_MAX (27)
+
+/* For service discovery action frames dummy subtype is used by setting the 7th bit */
+#define SLSI_PA_GAS_DUMMY_SUBTYPE_MASK 0x80
+#define SLSI_PA_GAS_INITIAL_REQ_SUBTYPE (SLSI_PA_GAS_INITIAL_REQ | SLSI_PA_GAS_DUMMY_SUBTYPE_MASK)
+#define SLSI_PA_GAS_INITIAL_RSP_SUBTYPE (SLSI_PA_GAS_INITIAL_RSP | SLSI_PA_GAS_DUMMY_SUBTYPE_MASK)
+#define SLSI_PA_GAS_COMEBACK_REQ_SUBTYPE (SLSI_PA_GAS_COMEBACK_REQ | SLSI_PA_GAS_DUMMY_SUBTYPE_MASK)
+#define SLSI_PA_GAS_COMEBACK_RSP_SUBTYPE (SLSI_PA_GAS_COMEBACK_RSP | SLSI_PA_GAS_DUMMY_SUBTYPE_MASK)
+
+#define SLSI_P2P_STATUS_ATTR_ID 0
+#define SLSI_P2P_STATUS_CODE_SUCCESS 0
+
+#define SLSI_ROAMING_CHANNEL_CACHE_TIMEOUT (5 * 60)
+
+#define SLSI_RX_SEQ_NUM_MASK 0xFFF
+#define SLSI_RX_VIA_TDLS_LINK 0x8000
+
+#define SET_ETHERTYPE_PATTERN_DESC(pd, ethertype) \
+ pd.offset = 0x0C; \
+ pd.mask_length = 2; \
+ pd.mask[0] = 0xff; \
+ pd.mask[1] = 0xff; \
+ pd.pattern[0] = ethertype >> 8; \
+ pd.pattern[1] = ethertype & 0xFF
+
+/* For checking DHCP frame */
+#define SLSI_IP_TYPE_UDP 0x11
+#define SLSI_IP_TYPE_OFFSET 23
+#define SLSI_IP_SOURCE_PORT_OFFSET 34
+#define SLSI_IP_DEST_PORT_OFFSET 36
+#define SLSI_DHCP_SERVER_PORT 67
+#define SLSI_DHCP_CLIENT_PORT 68
+#define SLSI_DNS_DEST_PORT 53
+#define SLSI_MDNS_DEST_PORT 5353
+
+#define SLSI_DHCP_MSG_MAGIC_OFFSET 278
+#define SLSI_DHCP_OPTION 53
+#define SLSI_DHCP_MESSAGE_TYPE_DISCOVER 0x01
+#define SLSI_DHCP_MESSAGE_TYPE_OFFER 0x02
+#define SLSI_DHCP_MESSAGE_TYPE_REQUEST 0x03
+#define SLSI_DHCP_MESSAGE_TYPE_DECLINE 0x04
+#define SLSI_DHCP_MESSAGE_TYPE_ACK 0x05
+#define SLSI_DHCP_MESSAGE_TYPE_NAK 0x06
+#define SLSI_DHCP_MESSAGE_TYPE_RELEASE 0x07
+#define SLSI_DHCP_MESSAGE_TYPE_INFORM 0x08
+#define SLSI_DHCP_MESSAGE_TYPE_FORCERENEW 0x09
+#define SLSI_DHCP_MESSAGE_TYPE_INVALID 0x0A
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+#define SLSI_MAX_ARP_SEND_FRAME 8
+#endif
+#define SLSI_ARP_SRC_IP_ADDR_OFFSET 14
+#define SLSI_ARP_DEST_IP_ADDR_OFFSET 24
+#define SLSI_IS_GRATUITOUS_ARP(frame) (!memcmp(&frame[SLSI_ARP_SRC_IP_ADDR_OFFSET],\
+ &frame[SLSI_ARP_DEST_IP_ADDR_OFFSET], 4))
+#define SLSI_ARP_REPLY_OPCODE 2
+#define SLSI_ARP_REQUEST_OPCODE 1
+#define SLSI_ARP_OPCODE_OFFSET 6
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 4, 0))
+ #define WLAN_CATEGORY_WNM 10
+#endif
+
+enum slsi_dhcp_tx {
+ SLSI_TX_IS_NOT_DHCP,
+ SLSI_TX_IS_DHCP_SERVER,
+ SLSI_TX_IS_DHCP_CLIENT
+};
+
+enum slsi_fw_regulatory_rule_flags {
+ SLSI_REGULATORY_NO_IR = 1 << 0,
+ SLSI_REGULATORY_DFS = 1 << 1,
+ SLSI_REGULATORY_NO_OFDM = 1 << 2,
+ SLSI_REGULATORY_NO_INDOOR = 1 << 3,
+ SLSI_REGULATORY_NO_OUTDOOR = 1 << 4
+};
+
+enum slsi_sta_conn_state {
+ SLSI_STA_CONN_STATE_DISCONNECTED = 0,
+ SLSI_STA_CONN_STATE_CONNECTING = 1,
+ SLSI_STA_CONN_STATE_DOING_KEY_CONFIG = 2,
+ SLSI_STA_CONN_STATE_CONNECTED = 3
+};
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0))
+static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+ return !ether_addr_equal(addr1, addr2);
+}
+#endif
+
+/**
+ * Peer record handling:
+ * Records are created/destroyed by the control path eg cfg80211 connect or
+ * when handling a MLME-CONNECT-IND when the VIA is an AP.
+ *
+ * However peer records are also currently accessed from the data path in both
+ * Tx and Rx directions:
+ * Tx - to determine the queueset
+ * Rx - for routing received packets back out to peers
+ *
+ * So the interactions required for the data path:
+ * 1. can NOT block
+ * 2. needs to be as quick as possible
+ */
+static inline struct slsi_peer *slsi_get_peer_from_mac(struct slsi_dev *sdev, struct net_device *dev, const u8 *mac)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ (void)sdev; /* unused */
+
+ /* Accesses the peer records but doesn't block as called from the data path.
+ * MUST check the valid flag on the record before accessing any other data in the record.
+ * Records are static, so having obtained a pointer the pointer will remain valid
+ * it just maybe the data that it points to gets set to ZERO.
+ */
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ if (ndev_vif->sta.tdls_enabled) {
+ int i;
+
+ for (i = 1; i < SLSI_TDLS_PEER_INDEX_MAX; i++)
+ if (ndev_vif->peer_sta_record[i] && ndev_vif->peer_sta_record[i]->valid &&
+ compare_ether_addr(ndev_vif->peer_sta_record[i]->address, mac) == 0)
+ return ndev_vif->peer_sta_record[i];
+ }
+ if (ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET] && ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET]->valid)
+ return ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET];
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ int i = 0;
+
+ for (i = 0; i < SLSI_PEER_INDEX_MAX; i++)
+ if (ndev_vif->peer_sta_record[i] && ndev_vif->peer_sta_record[i]->valid &&
+ compare_ether_addr(ndev_vif->peer_sta_record[i]->address, mac) == 0)
+ return ndev_vif->peer_sta_record[i];
+ }
+ return NULL;
+}
+
+static inline struct slsi_peer *slsi_get_peer_from_qs(struct slsi_dev *sdev, struct net_device *dev, u16 queueset)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ (void)sdev; /* unused */
+
+ if (!ndev_vif->peer_sta_record[queueset] || !ndev_vif->peer_sta_record[queueset]->valid)
+ return NULL;
+
+ return ndev_vif->peer_sta_record[queueset];
+}
+
+static inline bool slsi_is_tdls_peer(struct net_device *dev, struct slsi_peer *peer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ return (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) && (peer->aid >= SLSI_TDLS_PEER_INDEX_MIN);
+}
+
+static inline bool slsi_is_proxy_arp_supported_on_ap(struct sk_buff *assoc_resp_ie)
+{
+ const u8 *ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, assoc_resp_ie->data, assoc_resp_ie->len);
+
+ if ((ie) && (ie[1] > 1))
+ return ie[3] & 0x10; /*0: eid, 1: len; 3: proxy arp is 12th bit*/
+
+ return 0;
+}
+
+static inline int slsi_cache_ies(const u8 *src_ie, size_t src_ie_len, u8 **dest_ie, size_t *dest_ie_len)
+{
+ *dest_ie = kmalloc(src_ie_len, GFP_KERNEL);
+ if (*dest_ie == NULL)
+ return -ENOMEM;
+
+ memcpy(*dest_ie, src_ie, src_ie_len);
+ *dest_ie_len = src_ie_len;
+
+ return 0;
+}
+
+static inline void slsi_clear_cached_ies(u8 **ie, size_t *ie_len)
+{
+ if (*ie_len != 0)
+ kfree(*ie);
+ *ie = NULL;
+ *ie_len = 0;
+}
+
+/* P2P Public Action frame subtype in text format for debug purposes */
+static inline char *slsi_p2p_pa_subtype_text(int subtype)
+{
+ switch (subtype) {
+ case SLSI_P2P_PA_GO_NEG_REQ:
+ return "GO_NEG_REQ";
+ case SLSI_P2P_PA_GO_NEG_RSP:
+ return "GO_NEG_RSP";
+ case SLSI_P2P_PA_GO_NEG_CFM:
+ return "GO_NEG_CFM";
+ case SLSI_P2P_PA_INV_REQ:
+ return "INV_REQ";
+ case SLSI_P2P_PA_INV_RSP:
+ return "INV_RSP";
+ case SLSI_P2P_PA_DEV_DISC_REQ:
+ return "DEV_DISC_REQ";
+ case SLSI_P2P_PA_DEV_DISC_RSP:
+ return "DEV_DISC_RSP";
+ case SLSI_P2P_PA_PROV_DISC_REQ:
+ return "PROV_DISC_REQ";
+ case SLSI_P2P_PA_PROV_DISC_RSP:
+ return "PROV_DISC_RSP";
+ case SLSI_PA_GAS_INITIAL_REQ_SUBTYPE:
+ return "GAS_INITIAL_REQUEST";
+ case SLSI_PA_GAS_INITIAL_RSP_SUBTYPE:
+ return "GAS_INITIAL_RESPONSE";
+ case SLSI_PA_GAS_COMEBACK_REQ_SUBTYPE:
+ return "GAS_COMEBACK_REQUEST";
+ case SLSI_PA_GAS_COMEBACK_RSP_SUBTYPE:
+ return "GAS_COMEBACK_RESPONSE";
+ case SLSI_P2P_PA_INVALID:
+ return "PA_INVALID";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/* Cookie generation and assignment for user space ROC and mgmt_tx request from supplicant */
+static inline void slsi_assign_cookie_id(u64 *cookie, u64 *counter)
+{
+ (*cookie) = ++(*counter);
+ if ((*cookie) == 0)
+ (*cookie) = ++(*counter);
+}
+
+/* Update P2P Probe Response IEs in driver */
+static inline void slsi_unsync_vif_set_probe_rsp_ie(struct netdev_vif *ndev_vif, u8 *ies, size_t ies_len)
+{
+ if (ndev_vif->unsync.probe_rsp_ies_len)
+ kfree(ndev_vif->unsync.probe_rsp_ies);
+ ndev_vif->unsync.probe_rsp_ies = ies;
+ ndev_vif->unsync.probe_rsp_ies_len = ies_len;
+}
+
+/* Set management frame tx data of vif */
+static inline int slsi_set_mgmt_tx_data(struct netdev_vif *ndev_vif, u64 cookie, u16 host_tag, const u8 *buf, size_t buf_len)
+{
+ u8 *tx_frame = NULL;
+
+ if (buf_len != 0) {
+ tx_frame = kmalloc(buf_len, GFP_KERNEL);
+ if (!tx_frame) {
+ SLSI_NET_ERR(ndev_vif->wdev.netdev, "FAILED to allocate memory for Tx frame\n");
+ return -ENOMEM;
+ }
+ SLSI_NET_DBG3(ndev_vif->wdev.netdev, SLSI_CFG80211, "Copy buffer for tx_status\n");
+ memcpy(tx_frame, buf, buf_len);
+ } else if (ndev_vif->mgmt_tx_data.buf) {
+ SLSI_NET_DBG3(ndev_vif->wdev.netdev, SLSI_CFG80211, "Free buffer of tx_status\n");
+ kfree(ndev_vif->mgmt_tx_data.buf);
+ }
+
+ ndev_vif->mgmt_tx_data.cookie = cookie;
+ ndev_vif->mgmt_tx_data.host_tag = host_tag;
+ ndev_vif->mgmt_tx_data.buf = tx_frame;
+ ndev_vif->mgmt_tx_data.buf_len = buf_len;
+
+ return 0;
+}
+
+/**
+ * Handler to queue P2P unsync vif deletion work.
+ */
+static inline void slsi_p2p_queue_unsync_vif_del_work(struct netdev_vif *ndev_vif, unsigned int delay)
+{
+ cancel_delayed_work(&ndev_vif->unsync.del_vif_work);
+ queue_delayed_work(ndev_vif->sdev->device_wq, &ndev_vif->unsync.del_vif_work, msecs_to_jiffies(delay));
+}
+
+/* Update the new state for P2P. Also log the state change for debug purpose */
+#define SLSI_P2P_STATE_CHANGE(sdev, next_state) \
+ do { \
+ SLSI_DBG1(sdev, SLSI_CFG80211, "P2P state change: %s -> %s\n", slsi_p2p_state_text(sdev->p2p_state), slsi_p2p_state_text(next_state)); \
+ sdev->p2p_state = next_state; \
+ } while (0)
+
+void slsi_purge_scan_results(struct netdev_vif *ndev_vif, u16 scan_id);
+void slsi_purge_scan_results_locked(struct netdev_vif *ndev_vif, u16 scan_id);
+struct sk_buff *slsi_dequeue_cached_scan_result(struct slsi_scan *scan, int *count);
+void slsi_get_hw_mac_address(struct slsi_dev *sdev, u8 *addr);
+int slsi_start(struct slsi_dev *sdev);
+int slsi_start_monitor_mode(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_stop_net_dev(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_stop(struct slsi_dev *sdev);
+void slsi_stop_locked(struct slsi_dev *sdev);
+struct slsi_peer *slsi_peer_add(struct slsi_dev *sdev, struct net_device *dev, u8 *peer_address, u16 aid);
+void slsi_peer_update_assoc_req(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, struct sk_buff *skb);
+void slsi_peer_update_assoc_rsp(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, struct sk_buff *skb);
+void slsi_peer_reset_stats(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer);
+int slsi_peer_remove(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer);
+int slsi_ps_port_control(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, enum slsi_sta_conn_state s);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev,
+ struct station_del_parameters *del_params);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac);
+#else
+int slsi_del_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac);
+#endif
+
+int slsi_vif_activated(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_vif_deactivated(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_handle_disconnect(struct slsi_dev *sdev, struct net_device *dev, u8 *peer_address, u16 reason);
+int slsi_band_update(struct slsi_dev *sdev, int band);
+int slsi_ip_address_changed(struct slsi_dev *sdev, struct net_device *dev, __be32 ipaddress);
+int slsi_send_gratuitous_arp(struct slsi_dev *sdev, struct net_device *dev);
+struct ieee80211_channel *slsi_find_scan_channel(struct slsi_dev *sdev, struct ieee80211_mgmt *mgmt, size_t mgmt_len, u16 freq);
+int slsi_auto_chan_select_scan(struct slsi_dev *sdev, int chan_count, struct ieee80211_channel *channels[]);
+int slsi_set_uint_mib(struct slsi_dev *dev, struct net_device *ndev, u16 psid, int value);
+int slsi_update_regd_rules(struct slsi_dev *sdev, bool country_check);
+int slsi_set_boost(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_p2p_init(struct slsi_dev *sdev, struct netdev_vif *ndev_vif);
+void slsi_p2p_deinit(struct slsi_dev *sdev, struct netdev_vif *ndev_vif);
+int slsi_p2p_vif_activate(struct slsi_dev *sdev, struct net_device *dev, struct ieee80211_channel *chan, u16 duration, bool set_probe_rsp_ies);
+void slsi_p2p_vif_deactivate(struct slsi_dev *sdev, struct net_device *dev, bool hw_available);
+void slsi_p2p_group_start_remove_unsync_vif(struct slsi_dev *sdev);
+int slsi_p2p_dev_probe_rsp_ie(struct slsi_dev *sdev, struct net_device *dev, u8 *probe_rsp_ie, size_t probe_rsp_ie_len);
+int slsi_p2p_dev_null_ies(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_p2p_get_public_action_subtype(const struct ieee80211_mgmt *mgmt);
+int slsi_p2p_get_go_neg_rsp_status(struct net_device *dev, const struct ieee80211_mgmt *mgmt);
+u8 slsi_p2p_get_exp_peer_frame_subtype(u8 subtype);
+int slsi_send_txq_params(struct slsi_dev *sdev, struct net_device *ndev);
+void slsi_abort_sta_scan(struct slsi_dev *sdev);
+int slsi_is_dhcp_packet(u8 *data);
+
+#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
+int slsi_is_dns_packet(u8 *data);
+int slsi_is_mdns_packet(u8 *data);
+int slsi_is_tcp_sync_packet(struct net_device *dev, struct sk_buff *skb);
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_PKT_FILTER
+int slsi_set_enhanced_pkt_filter(struct net_device *dev, u8 pkt_filter_enable);
+#endif
+void slsi_set_packet_filters(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_update_packet_filters(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_clear_packet_filters(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_ap_prepare_add_info_ies(struct netdev_vif *ndev_vif, const u8 *ies, size_t ies_len);
+int slsi_set_mib_roam(struct slsi_dev *dev, struct net_device *ndev, u16 psid, int value);
+#ifdef CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA
+int slsi_set_mib_preferred_antenna(struct slsi_dev *dev, u16 value);
+#endif
+void slsi_reset_throughput_stats(struct net_device *dev);
+int slsi_set_mib_rssi_boost(struct slsi_dev *sdev, struct net_device *dev, u16 psid, int index, int boost);
+#ifdef CONFIG_SCSC_WLAN_LOW_LATENCY_MODE
+int slsi_set_mib_soft_roaming_enabled(struct slsi_dev *sdev, struct net_device *dev, bool enable);
+#endif
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+int slsi_read_enhanced_arp_rx_count_by_lower_mac(struct slsi_dev *sdev, struct net_device *dev, u16 psid);
+void slsi_fill_enhanced_arp_out_of_order_drop_counter(struct netdev_vif *ndev_vif,
+ struct sk_buff *skb);
+#endif
+void slsi_modify_ies_on_channel_switch(struct net_device *dev, struct cfg80211_ap_settings *settings,
+ u8 *ds_params_ie, u8 *ht_operation_ie, struct ieee80211_mgmt *mgmt,
+ u16 beacon_ie_head_len);
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+bool slsi_if_valid_wifi_sharing_channel(struct slsi_dev *sdev, int freq);
+void slsi_extract_valid_wifi_sharing_channels(struct slsi_dev *sdev);
+int slsi_check_if_non_indoor_non_dfs_channel(struct slsi_dev *sdev, int freq);
+int slsi_select_wifi_sharing_ap_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ap_settings *settings, struct slsi_dev *sdev,
+ int *wifi_sharing_channel_switched);
+int slsi_set_mib_wifi_sharing_5ghz_channel(struct slsi_dev *sdev, u16 psid, int value,
+ int offset, int readbyte, char *arg);
+int slsi_get_byte_position(int bit);
+int slsi_check_if_channel_restricted_already(struct slsi_dev *sdev, int channel);
+#endif
+struct net_device *slsi_dynamic_interface_create(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ struct vif_params *params);
+int slsi_get_mib_roam(struct slsi_dev *sdev, u16 psid, int *mib_value);
+void slsi_roam_channel_cache_add(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_roam_channel_cache_prune(struct net_device *dev, int seconds);
+int slsi_roaming_scan_configure_channels(struct slsi_dev *sdev, struct net_device *dev, const u8 *ssid, u8 *channels);
+int slsi_send_max_transmit_msdu_lifetime(struct slsi_dev *dev, struct net_device *ndev, u32 msdu_lifetime);
+int slsi_read_max_transmit_msdu_lifetime(struct slsi_dev *dev, struct net_device *ndev, u32 *msdu_lifetime);
+int slsi_read_unifi_countrylist(struct slsi_dev *sdev, u16 psid);
+int slsi_read_default_country(struct slsi_dev *sdev, u8 *alpha2, u16 index);
+int slsi_read_disconnect_ind_timeout(struct slsi_dev *sdev, u16 psid);
+int slsi_read_regulatory_rules(struct slsi_dev *sdev, struct slsi_802_11d_reg_domain *domain_info, const char *alpha2);
+int slsi_send_acs_event(struct slsi_dev *sdev, struct slsi_acs_selected_channels acs_selected_channels);
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+int slsi_set_mac_randomisation_mask(struct slsi_dev *sdev, u8 *mac_address_mask);
+#endif
+int slsi_set_country_update_regd(struct slsi_dev *sdev, const char *alpha2_code, int size);
+void slsi_clear_offchannel_data(struct slsi_dev *sdev, bool acquire_lock);
+int slsi_wlan_unsync_vif_activate(struct slsi_dev *sdev, struct net_device *dev,
+ struct ieee80211_channel *chan, u16 duration);
+void slsi_wlan_unsync_vif_deactivate(struct slsi_dev *sdev, struct net_device *devbool, bool hw_available);
+int slsi_is_wes_action_frame(const struct ieee80211_mgmt *mgmt);
+void slsi_scan_ind_timeout_handle(struct work_struct *work);
+void slsi_vif_cleanup(struct slsi_dev *sdev, struct net_device *dev, bool hw_available);
+void slsi_scan_cleanup(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_dump_stats(struct net_device *dev);
+int slsi_send_hanged_vendor_event(struct slsi_dev *sdev, u16 scsc_panic_code);
+void slsi_update_supported_channels_regd_flags(struct slsi_dev *sdev);
+#ifdef CONFIG_SCSC_WLAN_HANG_TEST
+int slsi_test_send_hanged_vendor_event(struct net_device *dev);
+#endif
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+int slsi_send_forward_beacon_vendor_event(struct slsi_dev *sdev, const u8 *ssid, const int ssid_len, const u8 *bssid,
+ u8 channel, const u16 beacon_int, const u64 timestamp, const u64 sys_time);
+int slsi_send_forward_beacon_abort_vendor_event(struct slsi_dev *sdev, u16 reason_code);
+#endif
+void slsi_wlan_dump_public_action_subtype(struct slsi_dev *sdev, struct ieee80211_mgmt *mgmt, bool tx);
+void slsi_reset_channel_flags(struct slsi_dev *sdev);
+
+/* Sysfs based mac address override */
+void slsi_create_sysfs_macaddr(void);
+void slsi_destroy_sysfs_macaddr(void);
+int slsi_find_chan_idx(u16 chan, u8 hw_mode);
+#ifdef CONFIG_SCSC_WLAN_SET_NUM_ANTENNAS
+int slsi_set_num_antennas(struct net_device *dev, const u16 num_of_antennas);
+#endif
+#endif /*__SLSI_MGT_H__*/
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd and its Licensors.
+ * All rights reserved.
+ *
+ *****************************************************************************/
+
+#include "const.h"
+#include "mib.h"
+#include "debug.h"
+
+#define SLSI_MIB_MORE_MASK 0x80
+#define SLSI_MIB_SIGN_MASK 0x40
+#define SLSI_MIB_TYPE_MASK 0x20
+#define SLSI_MIB_LENGTH_MASK 0x1FU
+/**
+ * @brief
+ * Append a buffer to an existing buffer.
+ * This will kmalloc a new buffer and kfree the old one
+ */
+void slsi_mib_buf_append(struct slsi_mib_data *dst, size_t buffer_length, u8 *buffer)
+{
+ u8 *new_buffer = kmalloc(dst->dataLength + buffer_length, GFP_KERNEL);
+
+ if (!new_buffer) {
+ SLSI_ERR_NODEV("kmalloc(%d) failed\n", (int)(dst->dataLength + buffer_length));
+ return;
+ }
+
+ memcpy(new_buffer, dst->data, dst->dataLength);
+ memcpy(&new_buffer[dst->dataLength], buffer, buffer_length);
+
+ dst->dataLength += (u16)buffer_length;
+ kfree(dst->data);
+ dst->data = new_buffer;
+}
+
+size_t slsi_mib_encode_uint32(u8 *buffer, u32 value)
+{
+ u8 i;
+ u8 write_count = 0;
+
+ if (value < 64) {
+ buffer[0] = (u8)value;
+ return 1;
+ }
+
+ /* Encode the Integer
+ * 0xABFF0055 = [0xAB, 0xFF, 0x00, 0x55]
+ * 0xAB0055 = [0xAB, 0x00, 0x55]
+ * 0xAB55 = [0xAB, 0x55]
+ * 0x55 = [0x55]
+ */
+ for (i = 0; i < 4; i++) {
+ u8 byte_value = (value & 0xFF000000) >> 24;
+
+ if (byte_value || write_count) {
+ buffer[1 + write_count] = byte_value;
+ write_count++;
+ }
+ value = value << 8;
+ }
+
+ /* vldata Length | more bit */
+ buffer[0] = write_count | SLSI_MIB_MORE_MASK;
+
+ return 1 + write_count;
+}
+
+size_t slsi_mib_encode_int32(u8 *buffer, s32 signed_value)
+{
+ u8 i;
+ u8 write_count = 0;
+ u32 value = (u32)signed_value;
+
+ if (!(value & 0x10000000))
+ /* just use the Unsigned Encoder */
+ return slsi_mib_encode_uint32(buffer, value);
+
+ if (signed_value >= -64) {
+ buffer[0] = (u8)value & 0x7F; /* vldata Length | more bit */
+ return 1;
+ }
+
+ /* Encode the Negative Integer */
+ for (i = 0; i < 4; i++) {
+ u8 byte_value = (value & 0xFF000000) >> 24;
+
+ if (!((byte_value == 0xFF) && (value & 0x800000)) || write_count) {
+ buffer[1 + write_count] = byte_value;
+ write_count++;
+ }
+ value = value << 8;
+ }
+ /* vldata Length | more bit | sign bit*/
+ buffer[0] = write_count | SLSI_MIB_MORE_MASK | SLSI_MIB_SIGN_MASK;
+
+ return 1 + write_count;
+}
+
+size_t slsi_mib_encode_octet_str(u8 *buffer, struct slsi_mib_data *octet_value)
+{
+ u8 i;
+ u8 write_count = 0;
+ size_t length = octet_value->dataLength;
+
+ /* Encode the Length (Up to 4 bytes 32 bits worth)
+ * 0xABFF0000 = [0xAB, 0xFF, 0x00, 0x00]
+ * 0xAB0000 = [0xAB, 0x00, 0x00]
+ * 0xAB00 = [0xAB, 0x00]
+ * 0x00 = [0x00]
+ */
+ for (i = 0; i < 3; i++) {
+ u8 byte_value = (length & 0xFF000000) >> 24;
+
+ if (byte_value || write_count) {
+ buffer[1 + write_count] = byte_value;
+ write_count++;
+ }
+ length = length << 8;
+ }
+
+ buffer[0] = (1 + write_count) | SLSI_MIB_MORE_MASK | SLSI_MIB_TYPE_MASK;
+ buffer[1 + write_count] = octet_value->dataLength & 0xFF;
+ memcpy(&buffer[2 + write_count], octet_value->data, octet_value->dataLength);
+
+ return 2U + write_count + octet_value->dataLength;
+}
+
+size_t slsi_mib_decode_uint32(u8 *buffer, u32 *value)
+{
+ size_t i;
+ u32 v = 0;
+ size_t length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ if (!(buffer[0] & SLSI_MIB_MORE_MASK)) {
+ *value = buffer[0] & 0x7F;
+ return 1;
+ }
+
+ for (i = 0; i < length; i++) {
+ v = (v << 8);
+ v |= buffer[1 + i];
+ }
+
+ *value = v;
+
+ return 1 + length;
+}
+
+size_t slsi_mib_decodeUint64(u8 *buffer, u64 *value)
+{
+ size_t i;
+ u64 v = 0;
+ size_t length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ if (!(buffer[0] & SLSI_MIB_MORE_MASK)) {
+ *value = buffer[0] & 0x7F;
+ return 1;
+ }
+
+ for (i = 0; i < length; i++) {
+ v = (v << 8);
+ v |= buffer[1 + i];
+ }
+
+ *value = v;
+
+ return 1 + length;
+}
+
+size_t slsi_mib_decodeInt32(u8 *buffer, s32 *value)
+{
+ size_t i;
+ u32 v = 0xFFFFFFFF;
+ size_t length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ if (!(buffer[0] & SLSI_MIB_SIGN_MASK))
+ /* just use the Unsigned Decoder */
+ return slsi_mib_decode_uint32(buffer, (u32 *)value);
+
+ if (!(buffer[0] & SLSI_MIB_MORE_MASK)) {
+ *value = (s32)(0xFFFFFF80 | buffer[0]);
+ return 1;
+ }
+
+ for (i = 0; i < length; i++) {
+ v = (v << 8);
+ v |= buffer[1 + i];
+ }
+
+ *value = (s32)v;
+
+ return 1 + length;
+}
+
+size_t slsi_mib_decodeInt64(u8 *buffer, s64 *value)
+{
+ size_t i;
+ u64 v = 0xFFFFFFFFFFFFFFFFULL;
+ size_t length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ if (!(buffer[0] & SLSI_MIB_SIGN_MASK))
+ /* just use the Unsigned Decoder */
+ return slsi_mib_decodeUint64(buffer, (u64 *)value);
+
+ if (!(buffer[0] & SLSI_MIB_MORE_MASK)) {
+ *value = (s64)(0xFFFFFFFFFFFFFF80ULL | buffer[0]);
+ return 1;
+ }
+
+ for (i = 0; i < length; i++) {
+ v = (v << 8);
+ v |= buffer[1 + i];
+ }
+
+ *value = (s64)v;
+
+ return 1 + length;
+}
+
+/* Just references the oid in the existing buffer. No new memory is allcated */
+size_t slsi_mib_decode_octet_str(u8 *buffer, struct slsi_mib_data *octet_value)
+{
+ size_t i;
+ u32 oid_length_value = 0;
+ size_t length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ for (i = 0; i < length; i++) {
+ oid_length_value = (oid_length_value << 8);
+ oid_length_value |= buffer[1 + i];
+ }
+
+ octet_value->dataLength = oid_length_value;
+ octet_value->data = NULL;
+ if (oid_length_value)
+ octet_value->data = &buffer[1 + length];
+
+ return 1 + length + oid_length_value;
+}
+
+static u8 slsi_mib_decode_type_length(u8 *buffer, size_t *length)
+{
+ *length = 1;
+ if (buffer[0] & SLSI_MIB_MORE_MASK)
+ *length = buffer[0] & SLSI_MIB_LENGTH_MASK;
+
+ if (buffer[0] & SLSI_MIB_SIGN_MASK)
+ return SLSI_MIB_TYPE_INT;
+
+ if ((buffer[0] & SLSI_MIB_MORE_MASK) &&
+ (buffer[0] & SLSI_MIB_TYPE_MASK)) {
+ size_t i;
+ size_t oid_length_value = 0;
+
+ for (i = 0; i < *length; i++) {
+ oid_length_value = (oid_length_value << 8);
+ oid_length_value |= buffer[1 + i];
+ }
+ *length += oid_length_value;
+ return SLSI_MIB_TYPE_OCTET;
+ }
+ return SLSI_MIB_TYPE_UINT;
+}
+
+static size_t slsi_mib_encode_psid_indexs(u8 *buffer, const struct slsi_mib_get_entry *value)
+{
+ size_t i;
+
+ SLSI_U16_TO_BUFF_LE(value->psid, &buffer[0]);
+ buffer[2] = 0;
+ buffer[3] = 0;
+ for (i = 0; i < SLSI_MIB_MAX_INDEXES && value->index[i] != 0; i++)
+ buffer[2] += (u8)slsi_mib_encode_uint32(&buffer[4 + buffer[2]], value->index[i]);
+
+ if (buffer[2] % 2 == 1) {
+ /* Add a padding byte "0x00" to the encoded buffer. The Length
+ * value is NOT updated to account for this pad value. If the
+ * length is an Odd number the Pad values MUST be there if it
+ * is Even it will not be.
+ */
+ buffer[4 + buffer[2]] = 0x00;
+ return 5 + buffer[2];
+ }
+
+ return 4 + buffer[2];
+}
+
+u16 slsi_mib_encode(struct slsi_mib_data *buffer, struct slsi_mib_entry *value)
+{
+ size_t i;
+ size_t required_size = 5U + (5U * SLSI_MIB_MAX_INDEXES) +
+ (value->value.type == SLSI_MIB_TYPE_OCTET ? value->value.u.octetValue.dataLength : 5U);
+
+ size_t encoded_length = 4;
+
+ u8 *tmp_buffer = kmalloc(required_size, GFP_KERNEL);
+
+ if (!tmp_buffer) {
+ SLSI_ERR_NODEV("kmalloc(%d) failed\n", (int)required_size);
+ return SLSI_MIB_STATUS_FAILURE;
+ }
+
+ SLSI_U16_TO_BUFF_LE(value->psid, &tmp_buffer[0]);
+ tmp_buffer[2] = 0;
+ tmp_buffer[3] = 0;
+ for (i = 0; i < SLSI_MIB_MAX_INDEXES && value->index[i] != 0; i++)
+ tmp_buffer[2] += (u8)slsi_mib_encode_uint32(&tmp_buffer[4 + tmp_buffer[2]], value->index[i]);
+ encoded_length += tmp_buffer[2];
+
+ switch (value->value.type) {
+ case SLSI_MIB_TYPE_UINT:
+ encoded_length += slsi_mib_encode_uint32(&tmp_buffer[encoded_length], value->value.u.uintValue);
+ break;
+ case SLSI_MIB_TYPE_INT:
+ encoded_length += slsi_mib_encode_int32(&tmp_buffer[encoded_length], value->value.u.intValue);
+ break;
+ case SLSI_MIB_TYPE_OCTET:
+ encoded_length += slsi_mib_encode_octet_str(&tmp_buffer[encoded_length], &value->value.u.octetValue);
+ break;
+ case SLSI_MIB_TYPE_BOOL:
+ encoded_length += slsi_mib_encode_uint32(&tmp_buffer[encoded_length], value->value.u.boolValue ? true : false);
+ break;
+ case SLSI_MIB_TYPE_NONE:
+ break;
+ default:
+ SLSI_WARN_NODEV("Invalid Type:%d requested\n", value->value.type);
+ kfree(tmp_buffer);
+ return SLSI_MIB_STATUS_FAILURE;
+ }
+
+ SLSI_U16_TO_BUFF_LE(encoded_length - 4, &tmp_buffer[2]); /* length */
+
+ if (encoded_length % 2 == 1) {
+ /* Add a padding byte "0x00" to the encoded buffer. The Length
+ * value is NOT updated to account for this pad value. If the
+ * length is an Odd number the Pad values MUST be there if it
+ * is Even it will not be.
+ */
+ tmp_buffer[encoded_length] = 0x00;
+ encoded_length++;
+ }
+
+ slsi_mib_buf_append(buffer, encoded_length, tmp_buffer);
+ kfree(tmp_buffer);
+
+ return SLSI_MIB_STATUS_SUCCESS;
+}
+
+size_t slsi_mib_decode(struct slsi_mib_data *data, struct slsi_mib_entry *value)
+{
+ u8 *buffer = data->data;
+ u32 buffer_length = data->dataLength;
+ size_t index_count = 0;
+ size_t length;
+ size_t decoded_length = 4;
+
+ memset(value, 0x00, sizeof(struct slsi_mib_entry));
+
+ if (buffer_length < 4) {
+ SLSI_WARN_NODEV("Mib Decode Length:%d Must be greater than 4\n", buffer_length);
+ return 0;
+ }
+
+ if (!buffer)
+ return 0;
+
+ length = SLSI_BUFF_LE_TO_U16(&buffer[2]);
+
+ if (buffer_length < decoded_length + length) {
+ SLSI_ERR_NODEV("Mib Buffer Length:%d Must be >= than decoded length:%d\n", buffer_length, (int)(decoded_length + length));
+ return 0;
+ }
+
+ value->psid = SLSI_BUFF_LE_TO_U16(buffer);
+ value->value.type = SLSI_MIB_TYPE_NONE;
+
+ while (decoded_length < 4 + length) {
+ size_t next_value_length;
+ u8 type = slsi_mib_decode_type_length(&buffer[decoded_length], &next_value_length);
+
+ if (buffer_length < decoded_length + next_value_length) {
+ SLSI_ERR_NODEV("Mib Buffer Length:%d Must be >= than decoded length:%d\n", buffer_length, (int)(decoded_length + next_value_length));
+ memset(value, 0x00, sizeof(struct slsi_mib_entry));
+ return 0;
+ }
+
+ switch (type) {
+ case SLSI_MIB_TYPE_UINT:
+ {
+ u32 v;
+
+ decoded_length += slsi_mib_decode_uint32(&buffer[decoded_length], &v);
+ /* If this is that last value then it is the "unitValue"
+ * if other values follow it is an Index Value
+ */
+ if ((decoded_length < 4 + length) &&
+ (index_count != SLSI_MIB_MAX_INDEXES)) {
+ value->index[index_count] = (u16)v;
+ index_count++;
+ } else {
+ value->value.type = type;
+ value->value.u.uintValue = v;
+ if (decoded_length != 4 + length)
+ SLSI_WARN_NODEV("Uint Decode length:%d != expected:%d\n", (u32)decoded_length, (u32)(4 + length));
+ }
+ break;
+ }
+ case SLSI_MIB_TYPE_INT:
+ value->value.type = type;
+ decoded_length += slsi_mib_decodeInt32(&buffer[decoded_length], &value->value.u.intValue);
+ if (decoded_length != 4 + length)
+ SLSI_WARN_NODEV("Int Decode length:%d != expected:%d\n", (u32)decoded_length, (u32)(4 + length));
+ break;
+ case SLSI_MIB_TYPE_OCTET:
+ value->value.type = type;
+ decoded_length += slsi_mib_decode_octet_str(&buffer[decoded_length], &value->value.u.octetValue);
+ if (decoded_length != 4 + length)
+ SLSI_WARN_NODEV("Octet Decode length:%d != expected:%d\n", (u32)decoded_length, (u32)(4 + length));
+ break;
+ default:
+ SLSI_ERR_NODEV("Invalid MIB data type(%d). Possible mbulk corruption\n", type);
+ memset(value, 0x00, sizeof(struct slsi_mib_entry));
+ value->value.type = SLSI_MIB_TYPE_NONE;
+ return 0;
+ }
+ }
+ if (length % 2 == 1) {
+ /* Remove the padding byte "0x00" in the encoded buffer.
+ * The Length value does NOT account for this pad value
+ * If the length is an Odd number the Pad values MUST be
+ * there if it is Even it will not be.
+ */
+ if (buffer[decoded_length] != 0x00)
+ SLSI_WARN_NODEV("psid:0x%.4X Padding Not Detected\n", value->psid);
+ length++;
+ }
+ return 4 + length;
+}
+
+int slsi_mib_encode_get_list(struct slsi_mib_data *buffer, u16 psids_length, const struct slsi_mib_get_entry *psids)
+{
+ size_t i;
+
+ buffer->dataLength = 0;
+ /* 13 Bytes per get will be loads of space for the max 3 indexes */
+ buffer->data = kmalloc((u32)(psids_length * 13), GFP_KERNEL);
+ if (!buffer->data) {
+ SLSI_ERR_NODEV("kmalloc(%d) failed\n", psids_length * 13);
+ return SLSI_MIB_STATUS_OUT_OF_MEMORY;
+ }
+ for (i = 0; i < psids_length; i++)
+ buffer->dataLength += (u16)slsi_mib_encode_psid_indexs(&buffer->data[buffer->dataLength], &psids[i]);
+
+ return SLSI_MIB_STATUS_SUCCESS;
+}
+
+void slsi_mib_encode_get(struct slsi_mib_data *buffer, u16 psid, u16 idx)
+{
+ /* 13 Bytes per get will be loads of space for the max 3 indexes */
+ size_t size;
+ u8 tmp_buffer[13];
+ struct slsi_mib_get_entry entry;
+
+ memset(&entry, 0x00, sizeof(struct slsi_mib_get_entry));
+ entry.psid = psid;
+ entry.index[0] = idx;
+ size = slsi_mib_encode_psid_indexs(tmp_buffer, &entry);
+ slsi_mib_buf_append(buffer, size, tmp_buffer);
+}
+
+u8 *slsi_mib_find(struct slsi_mib_data *buffer, const struct slsi_mib_get_entry *entry)
+{
+ size_t buffer_length = buffer->dataLength;
+ u8 *buff = buffer->data;
+
+ if (buffer_length % 2 == 1) {
+ SLSI_WARN_NODEV("buffer_length(%d) %% 2 != 0 (Invalid Mib data Detected)\n", (int)buffer_length);
+ return NULL;
+ }
+ while (buffer_length >= 4) {
+ u16 psid = SLSI_BUFF_LE_TO_U16(buff);
+ size_t length = 4U + SLSI_BUFF_LE_TO_U16(&buff[2]);
+
+ if (entry->psid == psid) {
+ size_t i;
+ u32 idx;
+ size_t bytes_read = 0;
+
+ for (i = 0; i < SLSI_MIB_MAX_INDEXES; i++) {
+ if (!entry->index[i])
+ return buff;
+ bytes_read = slsi_mib_decode_uint32(&buff[4 + bytes_read], &idx);
+ if (entry->index[i] != idx)
+ break;
+ }
+ if (i == SLSI_MIB_MAX_INDEXES)
+ return buff;
+ }
+ if (length % 2 == 1)
+ /* Remove the padding byte "0x00" in the encoded buffer.
+ * The Length value does NOT account for this pad value
+ * If the length is an Odd number the Pad values MUST be
+ * there if it is Even it will not be.
+ */
+ length++;
+
+ buff += length;
+ buffer_length -= length;
+ }
+
+ return NULL;
+}
+
+struct slsi_mib_value *slsi_mib_decode_get_list(struct slsi_mib_data *buffer, u16 psids_length, const struct slsi_mib_get_entry *psids)
+{
+ struct slsi_mib_value *results = kmalloc_array((size_t)psids_length, sizeof(struct slsi_mib_value), GFP_KERNEL);
+ size_t i;
+ int len = 0;
+ char psids_not_found[150] = "";
+
+ if (!results) {
+ SLSI_ERR_NODEV("kmalloc(%d) failed\n", (int)(sizeof(struct slsi_mib_value) * psids_length));
+ return results;
+ }
+
+ for (i = 0; i < psids_length; i++) {
+ struct slsi_mib_entry value;
+ struct slsi_mib_data data;
+
+ data.data = slsi_mib_find(buffer, &psids[i]);
+ if (data.data) {
+ data.dataLength = buffer->dataLength - (data.data - buffer->data);
+ value.psid = psids[i].psid;
+ memcpy(value.index, psids[i].index, sizeof(value.index));
+ (void)slsi_mib_decode(&data, &value);
+
+ results[i] = value.value;
+ } else {
+ len += snprintf(&psids_not_found[0] + len, 150 - len, "%d ", psids[i].psid);
+ results[i].type = SLSI_MIB_TYPE_NONE;
+ }
+ }
+
+ if (len)
+ SLSI_ERR_NODEV("Could not find psid's: %s\n", psids_not_found);
+
+ return results;
+}
+
+u16 slsi_mib_encode_bool(struct slsi_mib_data *buffer, u16 psid, bool value, u16 idx)
+{
+ struct slsi_mib_entry v;
+
+ memset(&v, 0x00, sizeof(struct slsi_mib_entry));
+ v.psid = psid;
+ v.index[0] = idx;
+ v.value.type = SLSI_MIB_TYPE_BOOL;
+ v.value.u.boolValue = value;
+ return slsi_mib_encode(buffer, &v);
+}
+
+u16 slsi_mib_encode_int(struct slsi_mib_data *buffer, u16 psid, s32 value, u16 idx)
+{
+ struct slsi_mib_entry v;
+
+ memset(&v, 0x00, sizeof(struct slsi_mib_entry));
+ v.psid = psid;
+ v.index[0] = idx;
+ v.value.type = SLSI_MIB_TYPE_INT;
+ v.value.u.intValue = value;
+ return slsi_mib_encode(buffer, &v);
+}
+
+u16 slsi_mib_encode_uint(struct slsi_mib_data *buffer, u16 psid, u32 value, u16 idx)
+{
+ struct slsi_mib_entry v;
+
+ memset(&v, 0x00, sizeof(struct slsi_mib_entry));
+ v.psid = psid;
+ v.index[0] = idx;
+ v.value.type = SLSI_MIB_TYPE_UINT;
+ v.value.u.uintValue = value;
+ return slsi_mib_encode(buffer, &v);
+}
+
+u16 slsi_mib_encode_octet(struct slsi_mib_data *buffer, u16 psid, size_t dataLength, const u8 *data, u16 idx)
+{
+ struct slsi_mib_entry v;
+
+ memset(&v, 0x00, sizeof(struct slsi_mib_entry));
+ v.psid = psid;
+ v.index[0] = idx;
+ v.value.type = SLSI_MIB_TYPE_OCTET;
+ v.value.u.octetValue.dataLength = (u32)dataLength;
+ v.value.u.octetValue.data = (u8 *)data;
+ return slsi_mib_encode(buffer, &v);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/* Note: this is an auto-generated file. */
+
+#ifndef SLSI_MIB_H__
+#define SLSI_MIB_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct slsi_mib_data {
+ u32 dataLength;
+ u8 *data;
+};
+
+#define SLSI_MIB_MAX_INDEXES 2U
+
+#define SLSI_MIB_TYPE_BOOL 0
+#define SLSI_MIB_TYPE_UINT 1
+#define SLSI_MIB_TYPE_INT 2
+#define SLSI_MIB_TYPE_OCTET 3U
+#define SLSI_MIB_TYPE_NONE 4
+
+struct slsi_mib_value {
+ u8 type;
+ union {
+ bool boolValue;
+ s32 intValue;
+ u32 uintValue;
+ struct slsi_mib_data octetValue;
+ } u;
+};
+
+struct slsi_mib_entry {
+ u16 psid;
+ u16 index[SLSI_MIB_MAX_INDEXES]; /* 0 = no Index */
+ struct slsi_mib_value value;
+};
+
+struct slsi_mib_get_entry {
+ u16 psid;
+ u16 index[SLSI_MIB_MAX_INDEXES]; /* 0 = no Index */
+};
+
+#define SLSI_MIB_STATUS_SUCCESS 0x0000
+#define SLSI_MIB_STATUS_UNKNOWN_PSID 0x0001
+#define SLSI_MIB_STATUS_INVALID_INDEX 0x0002
+#define SLSI_MIB_STATUS_OUT_OF_RANGE 0x0003
+#define SLSI_MIB_STATUS_WRITE_ONLY 0x0004
+#define SLSI_MIB_STATUS_READ_ONLY 0x0005
+#define SLSI_MIB_STATUS_UNKNOWN_INTERFACE_TAG 0x0006
+#define SLSI_MIB_STATUS_INVALID_NUMBER_OF_INDICES 0x0007
+#define SLSI_MIB_STATUS_ERROR 0x0008
+#define SLSI_MIB_STATUS_UNSUPPORTED_ON_INTERFACE 0x0009
+#define SLSI_MIB_STATUS_UNAVAILABLE 0x000A
+#define SLSI_MIB_STATUS_NOT_FOUND 0x000B
+#define SLSI_MIB_STATUS_INCOMPATIBLE 0x000C
+#define SLSI_MIB_STATUS_OUT_OF_MEMORY 0x000D
+#define SLSI_MIB_STATUS_TO_MANY_REQUESTED_VARIABLES 0x000E
+#define SLSI_MIB_STATUS_NOT_TRIED 0x000F
+#define SLSI_MIB_STATUS_FAILURE 0xFFFF
+
+/*******************************************************************************
+ *
+ * NAME
+ * slsi_mib_encode_get Functions
+ *
+ * DESCRIPTION
+ * For use when getting data from the Wifi Stack.
+ * These functions append the encoded data to the "buffer".
+ *
+ * index == 0 where there is no index required
+ *
+ * EXAMPLE
+ * {
+ * static const struct slsi_mib_get_entry getValues[] = {
+ * { PSID1, { 0, 0 } },
+ * { PSID2, { 3, 0 } },
+ * };
+ * struct slsi_mib_data buffer;
+ * slsi_mib_encode_get_list(&buffer,
+ * sizeof(getValues) / sizeof(struct slsi_mib_get_entry),
+ * getValues);
+ * }
+ * or
+ * {
+ * struct slsi_mib_data buffer = {0, NULL};
+ * slsi_mib_encode_get(&buffer, PSID1, 0);
+ * slsi_mib_encode_get(&buffer, PSID2, 3);
+ * }
+ * RETURN
+ * SlsiResult: See SLSI_MIB_STATUS_*
+ *
+ *******************************************************************************/
+void slsi_mib_encode_get(struct slsi_mib_data *buffer, u16 psid, u16 index);
+int slsi_mib_encode_get_list(struct slsi_mib_data *buffer, u16 psidsLength, const struct slsi_mib_get_entry *psids);
+
+/*******************************************************************************
+ *
+ * NAME
+ * SlsiWifiMibdEncode Functions
+ *
+ * DESCRIPTION
+ * For use when getting data from the Wifi Stack.
+ *
+ * index == 0 where there is no index required
+ *
+ * EXAMPLE
+ * {
+ * static const struct slsi_mib_get_entry getValues[] = {
+ * { PSID1, { 0, 0 } },
+ * { PSID2, { 3, 0 } },
+ * };
+ * struct slsi_mib_data buffer = rxMibData; # Buffer with encoded Mib Data
+ *
+ * getValues = slsi_mib_decode_get_list(&buffer,
+ * sizeof(getValues) / sizeof(struct slsi_mib_get_entry),
+ * getValues);
+ *
+ * print("PSID1 = %d\n", getValues[0].u.uintValue);
+ * print("PSID2.3 = %s\n", getValues[1].u.boolValue?"TRUE":"FALSE");
+ *
+ * kfree(getValues);
+ *
+ * }
+ * or
+ * {
+ * u8* buffer = rxMibData; # Buffer with encoded Mib Data
+ * size_t offset=0;
+ * struct slsi_mib_entry value;
+ *
+ * offset += slsi_mib_decode(&buffer[offset], &value);
+ * print("PSID1 = %d\n", value.u.uintValue);
+ *
+ * offset += slsi_mib_decode(&buffer[offset], &value);
+ * print("PSID2.3 = %s\n", value.u.boolValue?"TRUE":"FALSE");
+ *
+ * }
+ *
+ *******************************************************************************/
+size_t slsi_mib_decode(struct slsi_mib_data *buffer, struct slsi_mib_entry *value);
+struct slsi_mib_value *slsi_mib_decode_get_list(struct slsi_mib_data *buffer, u16 psidsLength, const struct slsi_mib_get_entry *psids);
+
+/*******************************************************************************
+ *
+ * NAME
+ * slsi_mib_encode Functions
+ *
+ * DESCRIPTION
+ * For use when setting data in the Wifi Stack.
+ * These functions append the encoded data to the "buffer".
+ *
+ * index == 0 where there is no index required
+ *
+ * EXAMPLE
+ * {
+ * u8 octets[2] = {0x00, 0x01};
+ * struct slsi_mib_data buffer = {0, NULL};
+ * slsi_mib_encode_bool(&buffer, PSID1, TRUE, 0); # Boolean set with no index
+ * slsi_mib_encode_int(&buffer, PSID2, -1234, 1); # Signed Integer set with on index 1
+ * slsi_mib_encode_uint(&buffer, PSID2, 1234, 3); # Unsigned Integer set with on index 3
+ * slsi_mib_encode_octet(&buffer, PSID3, sizeof(octets), octets, 0); # Octet set with no index
+ * }
+ * or
+ * {
+ # Unsigned Integer set with on index 3
+ # struct slsi_mib_data buffer = {0, NULL};
+ # struct slsi_mib_entry value;
+ # value.psid = psid;
+ # value.index[0] = 3;
+ # value.index[1] = 0;
+ # value.value.type = SLSI_MIB_TYPE_UINT;
+ # value.value.u.uintValue = 1234;
+ # slsi_mib_encode(buffer, &value);
+ # }
+ # RETURN
+ # See SLSI_MIB_STATUS_*
+ #
+ *******************************************************************************/
+u16 slsi_mib_encode(struct slsi_mib_data *buffer, struct slsi_mib_entry *value);
+u16 slsi_mib_encode_bool(struct slsi_mib_data *buffer, u16 psid, bool value, u16 index);
+u16 slsi_mib_encode_int(struct slsi_mib_data *buffer, u16 psid, s32 value, u16 index);
+u16 slsi_mib_encode_uint(struct slsi_mib_data *buffer, u16 psid, u32 value, u16 index);
+u16 slsi_mib_encode_octet(struct slsi_mib_data *buffer, u16 psid, size_t dataLength, const u8 *data, u16 index);
+
+/*******************************************************************************
+ *
+ * NAME
+ * SlsiWifiMib Low level Encode/Decode functions
+ *
+ *******************************************************************************/
+size_t slsi_mib_encode_uint32(u8 *buffer, u32 value);
+size_t slsi_mib_encode_int32(u8 *buffer, s32 signedValue);
+size_t slsi_mib_encode_octet_str(u8 *buffer, struct slsi_mib_data *octetValue);
+
+size_t slsi_mib_decodeUint32(u8 *buffer, u32 *value);
+size_t slsi_mib_decodeInt32(u8 *buffer, s32 *value);
+size_t slsi_mib_decodeUint64(u8 *buffer, u64 *value);
+size_t slsi_mib_decodeInt64(u8 *buffer, s64 *value);
+size_t slsi_mib_decode_octet_str(u8 *buffer, struct slsi_mib_data *octetValue);
+
+/*******************************************************************************
+ *
+ * NAME
+ * SlsiWifiMib Helper Functions
+ *
+ *******************************************************************************/
+
+/* Find a the offset to psid data in an encoded buffer
+ * {
+ * struct slsi_mib_data buffer = rxMibData; # Buffer with encoded Mib Data
+ * struct slsi_mib_get_entry value = {PSID1, {0x01, 0x00}}; # Find value for PSID1.1
+ * u8* mibdata = slsi_mib_find(&buffer, &value);
+ * if(mibdata) {print("Mib Data for PSID1.1 Found\n");
+ * }
+ */
+u8 *slsi_mib_find(struct slsi_mib_data *buffer, const struct slsi_mib_get_entry *entry);
+
+/* Append data to a Buffer */
+void slsi_mib_buf_append(struct slsi_mib_data *dst, size_t bufferLength, u8 *buffer);
+
+/*******************************************************************************
+ *
+ * PSID Definitions
+ *
+ *******************************************************************************/
+
+/*******************************************************************************
+ * NAME : Dot11TdlsPeerUapsdIndicationWindow
+ * PSID : 53 (0x0035)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : beacon intervals
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * The minimum time after the last TPU SP, before a RAME_TPU_SP indication
+ * can be issued.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_TDLS_PEER_UAPSD_INDICATION_WINDOW 0x0035
+
+/*******************************************************************************
+ * NAME : Dot11AssociationSaQueryMaximumTimeout
+ * PSID : 100 (0x0064)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * Timeout (in TUs) before giving up on a Peer that has not responded to a
+ * SA Query frame.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_ASSOCIATION_SA_QUERY_MAXIMUM_TIMEOUT 0x0064
+
+/*******************************************************************************
+ * NAME : Dot11AssociationSaQueryRetryTimeout
+ * PSID : 101 (0x0065)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 201
+ * DESCRIPTION :
+ * Timeout (in TUs) before trying a Query Request frame.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_ASSOCIATION_SA_QUERY_RETRY_TIMEOUT 0x0065
+
+/*******************************************************************************
+ * NAME : Dot11RtsThreshold
+ * PSID : 121 (0x0079)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : octet
+ * MIN : 0
+ * MAX : 65536
+ * DEFAULT : 65536
+ * DESCRIPTION :
+ * Size of an MPDU, below which an RTS/CTS handshake shall not be performed,
+ * except as RTS/CTS is used as a cross modulation protection mechanism as
+ * defined in 9.10. An RTS/CTS handshake shall be performed at the beginning
+ * of any frame exchange sequence where the MPDU is of type Data or
+ * Management, the MPDU has an individual address in the Address1 field, and
+ * the length of the MPDU is greater than this threshold. (For additional
+ * details, refer to Table 21 in 9.7.) Setting larger than the maximum MSDU
+ * size shall have the effect of turning off the RTS/CTS handshake for
+ * frames of Data or Management type transmitted by this STA. Setting to
+ * zero shall have the effect of turning on the RTS/CTS handshake for all
+ * frames of Data or Management type transmitted by this STA.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RTS_THRESHOLD 0x0079
+
+/*******************************************************************************
+ * NAME : Dot11ShortRetryLimit
+ * PSID : 122 (0x007A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 255
+ * DEFAULT : 32
+ * DESCRIPTION :
+ * Maximum number of transmission attempts of a frame, the length of which
+ * is less than or equal to dot11RTSThreshold, that shall be made before a
+ * failure condition is indicated.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_SHORT_RETRY_LIMIT 0x007A
+
+/*******************************************************************************
+ * NAME : Dot11LongRetryLimit
+ * PSID : 123 (0x007B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 255
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * Maximum number of transmission attempts of a frame, the length of which
+ * is greater than dot11RTSThreshold, that shall be made before a failure
+ * condition is indicated.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_LONG_RETRY_LIMIT 0x007B
+
+/*******************************************************************************
+ * NAME : Dot11FragmentationThreshold
+ * PSID : 124 (0x007C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 256
+ * MAX : 11500
+ * DEFAULT : 3000
+ * DESCRIPTION :
+ * Current maximum size, in octets, of the MPDU that may be delivered to the
+ * security encapsulation. This maximum size does not apply when an MSDU is
+ * transmitted using an HT-immediate or HTdelayed Block Ack agreement, or
+ * when an MSDU or MMPDU is carried in an AMPDU that does not contain a VHT
+ * single MPDU. Fields added to the frame by security encapsulation are not
+ * counted against the limit specified. Except as described above, an MSDU
+ * or MMPDU is fragmented when the resulting frame has an individual address
+ * in the Address1 field, and the length of the frame is larger than this
+ * threshold, excluding security encapsulation fields. The default value is
+ * the lesser of 11500 or the aMPDUMaxLength or the aPSDUMaxLength of the
+ * attached PHY and the value never exceeds the lesser of 11500 or the
+ * aMPDUMaxLength or the aPSDUMaxLength of the attached PHY.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_FRAGMENTATION_THRESHOLD 0x007C
+
+/*******************************************************************************
+ * NAME : Dot11RtsSuccessCount
+ * PSID : 146 (0x0092)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * This counter shall increment when a CTS is received in response to an
+ * RTS.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RTS_SUCCESS_COUNT 0x0092
+
+/*******************************************************************************
+ * NAME : Dot11AckFailureCount
+ * PSID : 148 (0x0094)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * This counter shall increment when an ACK is not received when expected.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_ACK_FAILURE_COUNT 0x0094
+
+/*******************************************************************************
+ * NAME : Dot11MulticastReceivedFrameCount
+ * PSID : 150 (0x0096)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * This counter shall increment when a MSDU is received with the multicast
+ * bit set in the destination MAC address.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_MULTICAST_RECEIVED_FRAME_COUNT 0x0096
+
+/*******************************************************************************
+ * NAME : Dot11FcsErrorCount
+ * PSID : 151 (0x0097)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : -9223372036854775808
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * This counter shall increment when an FCS error is detected in a received
+ * MPDU.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_FCS_ERROR_COUNT 0x0097
+
+/*******************************************************************************
+ * NAME : Dot11WepUndecryptableCount
+ * PSID : 153 (0x0099)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * This counter shall increment when a frame is received with the WEP
+ * subfield of the Frame Control field set to one and the WEPOn value for
+ * the key mapped to the transmitter's MAC address indicates that the
+ * frame should not have been encrypted or that frame is discarded due to
+ * the receiving STA not implementing the privacy option.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_WEP_UNDECRYPTABLE_COUNT 0x0099
+
+/*******************************************************************************
+ * NAME : Dot11ManufacturerProductVersion
+ * PSID : 183 (0x00B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 300
+ * DEFAULT :
+ * DESCRIPTION :
+ * Printable string used to identify the manufacturer's product version
+ * of the resource.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_MANUFACTURER_PRODUCT_VERSION 0x00B7
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsStaAddress
+ * PSID : 430 (0x01AE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The MAC address of the STA to which the statistics in this conceptual row
+ * belong.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_STA_ADDRESS 0x01AE
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsTkipicvErrors
+ * PSID : 433 (0x01B1)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counts the number of TKIP ICV errors encountered when decrypting packets
+ * for the STA.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_TKIPICV_ERRORS 0x01B1
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsTkipLocalMicFailures
+ * PSID : 434 (0x01B2)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counts the number of MIC failures encountered when checking the integrity
+ * of packets received from the STA at this entity.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_TKIP_LOCAL_MIC_FAILURES 0x01B2
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsTkipRemoteMicFailures
+ * PSID : 435 (0x01B3)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counts the number of MIC failures encountered by the STA identified by
+ * dot11RSNAStatsSTAAddress and reported back to this entity.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_TKIP_REMOTE_MIC_FAILURES 0x01B3
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsCcmpReplays
+ * PSID : 436 (0x01B4)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of received CCMP MPDUs discarded by the replay mechanism.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_CCMP_REPLAYS 0x01B4
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsCcmpDecryptErrors
+ * PSID : 437 (0x01B5)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of received MPDUs discarded by the CCMP decryption algorithm.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_CCMP_DECRYPT_ERRORS 0x01B5
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsTkipReplays
+ * PSID : 438 (0x01B6)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counts the number of TKIP replay errors detected.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_TKIP_REPLAYS 0x01B6
+
+/*******************************************************************************
+ * NAME : Dot11RsnaStatsRobustMgmtCcmpReplays
+ * PSID : 441 (0x01B9)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of received Robust Management frame MPDUs discarded due to
+ * CCMP replay errors
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_RSNA_STATS_ROBUST_MGMT_CCMP_REPLAYS 0x01B9
+
+/*******************************************************************************
+ * NAME : UnifiMlmeConnectionTimeout
+ * PSID : 2000 (0x07D0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_CONNECTION_TIMEOUT 0x07D0
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanChannelMaxScanTime
+ * PSID : 2001 (0x07D1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 14
+ * MAX : 14
+ * DEFAULT : { 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Test only: overrides max_scan_time. 0 indicates not used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_CHANNEL_MAX_SCAN_TIME 0x07D1
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanChannelProbeInterval
+ * PSID : 2002 (0x07D2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 14
+ * MAX : 14
+ * DEFAULT : { 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Test only: overrides probe interval. 0 indicates not used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_CHANNEL_PROBE_INTERVAL 0x07D2
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanChannelRule
+ * PSID : 2003 (0x07D3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 4
+ * MAX : 4
+ * DEFAULT : { 0X00, 0X01, 0X00, 0X01 }
+ * DESCRIPTION :
+ * Rules for channel scanners.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_CHANNEL_RULE 0x07D3
+
+/*******************************************************************************
+ * NAME : UnifiMlmeDataReferenceTimeout
+ * PSID : 2005 (0x07D5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65534
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum time, in TU, allowed for the data in data references
+ * corresponding to MLME primitives to be made available to the firmware.
+ * The special value 0 specifies an infinite timeout.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_DATA_REFERENCE_TIMEOUT 0x07D5
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanProbeInterval
+ * PSID : 2007 (0x07D7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_PROBE_INTERVAL 0x07D7
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanHighRssiThreshold
+ * PSID : 2008 (0x07D8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -90
+ * DESCRIPTION :
+ * Minimum RSSI, in dB, for a scan indication to be kept.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_HIGH_RSSI_THRESHOLD 0x07D8
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanDeltaRssiThreshold
+ * PSID : 2010 (0x07DA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 255
+ * DEFAULT : 20
+ * DESCRIPTION :
+ * Magnitude of the change in RSSI for which a scan result will be issued.
+ * In dB.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_DELTA_RSSI_THRESHOLD 0x07DA
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanMaximumAge
+ * PSID : 2014 (0x07DE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_MAXIMUM_AGE 0x07DE
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanMaximumResults
+ * PSID : 2015 (0x07DF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * Max number of scan results, per sps, which will be stored before the
+ * oldest result is discarded, irrespective of its age. The value 0
+ * specifies no maximum.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_MAXIMUM_RESULTS 0x07DF
+
+/*******************************************************************************
+ * NAME : UnifiMlmeAutonomousScanNoisy
+ * PSID : 2016 (0x07E0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_AUTONOMOUS_SCAN_NOISY 0x07E0
+
+/*******************************************************************************
+ * NAME : UnifiChannelBusyThreshold
+ * PSID : 2018 (0x07E2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 100
+ * DEFAULT : 25
+ * DESCRIPTION :
+ * The threshold in percentage of CCA busy time when a channel would be
+ * considered busy
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CHANNEL_BUSY_THRESHOLD 0x07E2
+
+/*******************************************************************************
+ * NAME : UnifiMacSequenceNumberRandomisationActivated
+ * PSID : 2020 (0x07E4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enabling Sequence Number Randomisation to be applied for Probe Requests
+ * when scanning. Note: Randomisation only happens, if mac address gets
+ * randomised.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_SEQUENCE_NUMBER_RANDOMISATION_ACTIVATED 0x07E4
+
+/*******************************************************************************
+ * NAME : UnifiFirmwareBuildId
+ * PSID : 2021 (0x07E5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Numeric build identifier for this firmware build. This should normally be
+ * displayed in decimal. The textual build identifier is available via the
+ * standard dot11manufacturerProductVersion MIB attribute.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FIRMWARE_BUILD_ID 0x07E5
+
+/*******************************************************************************
+ * NAME : UnifiChipVersion
+ * PSID : 2022 (0x07E6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Numeric identifier for the UniFi silicon revision (as returned by the
+ * GBL_CHIP_VERSION hardware register). Other than being different for each
+ * design variant (but not for alternative packaging options), the
+ * particular values returned do not have any significance.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CHIP_VERSION 0x07E6
+
+/*******************************************************************************
+ * NAME : UnifiFirmwarePatchBuildId
+ * PSID : 2023 (0x07E7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Numeric build identifier for the patch set that has been applied to this
+ * firmware image. This should normally be displayed in decimal. For a
+ * patched ROM build there will be two build identifiers, the first will
+ * correspond to the base ROM image, the second will correspond to the patch
+ * set that has been applied.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FIRMWARE_PATCH_BUILD_ID 0x07E7
+
+/*******************************************************************************
+ * NAME : UnifiHtCapabilitiesSoftAp
+ * PSID : 2028 (0x07EC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 21
+ * MAX : 21
+ * DEFAULT : { 0XEF, 0X0A, 0X17, 0XFF, 0XFF, 0X00, 0X00, 0X01, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * HT capabilities of the chip. See SC-503520-SP for further details. NOTE:
+ * Greenfield has been disabled due to interoperability issues wuth SGI.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HT_CAPABILITIES_SOFT_AP 0x07EC
+
+/*******************************************************************************
+ * NAME : UnifiSoftAp40MhzOn24g
+ * PSID : 2029 (0x07ED)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables 40MHz operation on 2.4GHz band for SoftAP.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SOFT_AP40_MHZ_ON24G 0x07ED
+
+/*******************************************************************************
+ * NAME : UnifiBasicCapabilities
+ * PSID : 2030 (0x07EE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X0730
+ * DESCRIPTION :
+ * The 16-bit field follows the coding of IEEE 802.11 Capability
+ * Information.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BASIC_CAPABILITIES 0x07EE
+
+/*******************************************************************************
+ * NAME : UnifiExtendedCapabilities
+ * PSID : 2031 (0x07EF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 9
+ * MAX : 9
+ * DEFAULT : { 0X01, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X40, 0X80 }
+ * DESCRIPTION :
+ * Extended capabilities. Bit field definition and coding follows IEEE
+ * 802.11 Extended Capability Information Element, with spare subfields for
+ * capabilities that are independent from chip/firmware implementation.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_EXTENDED_CAPABILITIES 0x07EF
+
+/*******************************************************************************
+ * NAME : UnifiHtCapabilities
+ * PSID : 2032 (0x07F0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 21
+ * MAX : 21
+ * DEFAULT : { 0XEF, 0X0A, 0X17, 0XFF, 0XFF, 0X00, 0X00, 0X01, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * HT capabilities of the chip. See SC-503520-SP for further details. NOTE:
+ * Greenfield has been disabled due to interoperability issues wuth SGI.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HT_CAPABILITIES 0x07F0
+
+/*******************************************************************************
+ * NAME : UnifiRsnCapabilities
+ * PSID : 2034 (0x07F2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSN_CAPABILITIES 0x07F2
+
+/*******************************************************************************
+ * NAME : Unifi24G40MhzChannels
+ * PSID : 2035 (0x07F3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables 40Mz wide channels in the 2.4G band for STA.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI24_G40_MHZ_CHANNELS 0x07F3
+
+/*******************************************************************************
+ * NAME : UnifiExtendedCapabilitiesDisabled
+ * PSID : 2036 (0x07F4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Suppress
+ * extended capabilities IE being sent in the association request. Please
+ * note that this may fix IOP issues with Aruba APs in WMMAC. Singed Decimal
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_EXTENDED_CAPABILITIES_DISABLED 0x07F4
+
+/*******************************************************************************
+ * NAME : UnifiSupportedDataRates
+ * PSID : 2041 (0x07F9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * UNITS : 500 kbps
+ * MIN : 2
+ * MAX : 16
+ * DEFAULT : { 0X02, 0X04, 0X0B, 0X0C, 0X12, 0X16, 0X18, 0X24, 0X30, 0X48, 0X60, 0X6C }
+ * DESCRIPTION :
+ * Defines the supported non-HT data rates. It is encoded as N+1 octets
+ * where the first octet is N and the subsequent octets each describe a
+ * single supported rate.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SUPPORTED_DATA_RATES 0x07F9
+
+/*******************************************************************************
+ * NAME : UnifiRadioMeasurementActivated
+ * PSID : 2043 (0x07FB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * When TRUE Radio Measurements are supported.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_MEASUREMENT_ACTIVATED 0x07FB
+
+/*******************************************************************************
+ * NAME : UnifiRadioMeasurementCapabilities
+ * PSID : 2044 (0x07FC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 5
+ * MAX : 5
+ * DEFAULT : { 0X71, 0X00, 0X00, 0X00, 0X04 }
+ * DESCRIPTION :
+ * RM Enabled capabilities of the chip. See SC-503520-SP for further
+ * details.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_MEASUREMENT_CAPABILITIES 0x07FC
+
+/*******************************************************************************
+ * NAME : UnifiVhtActivated
+ * PSID : 2045 (0x07FD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables VHT mode.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_VHT_ACTIVATED 0x07FD
+
+/*******************************************************************************
+ * NAME : UnifiHtActivated
+ * PSID : 2046 (0x07FE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables HT mode.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HT_ACTIVATED 0x07FE
+
+/*******************************************************************************
+ * NAME : UnifiEnableTwoSimultaneousPassiveScansSameBand
+ * PSID : 2047 (0x07FF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable two passive scans to be simultaneously scheduled on two distinct
+ * channels at the same.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ENABLE_TWO_SIMULTANEOUS_PASSIVE_SCANS_SAME_BAND 0x07FF
+
+/*******************************************************************************
+ * NAME : UnifiRoamingEnabled
+ * PSID : 2049 (0x0801)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enable Roaming functionality
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAMING_ENABLED 0x0801
+
+/*******************************************************************************
+ * NAME : UnifiRssiRoamScanTrigger
+ * PSID : 2050 (0x0802)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -75
+ * DESCRIPTION :
+ * The RSSI value, in dBm, below which roaming scan shall start.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_ROAM_SCAN_TRIGGER 0x0802
+
+/*******************************************************************************
+ * NAME : UnifiRoamDeltaTrigger
+ * PSID : 2051 (0x0803)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 255
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Hysteresis value, in dBm, for UnifiRssiRoamScanTrigger and
+ * unifiCURoamScanTrigger. i.e.: If the current AP RSSI is greater than
+ * UnifiRssiRoamScanTrigger+ UnifiRssiRoamDeltaTrigger, soft roaming scan
+ * can be terminated.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_DELTA_TRIGGER 0x0803
+
+/*******************************************************************************
+ * NAME : UnifiRoamCachedChannelScanPeriod
+ * PSID : 2052 (0x0804)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 1
+ * MAX : 4294967295
+ * DEFAULT : 20000000
+ * DESCRIPTION :
+ * The scan period for cached channels background roaming (microseconds)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_CACHED_CHANNEL_SCAN_PERIOD 0x0804
+
+/*******************************************************************************
+ * NAME : UnifiFullRoamScanPeriod
+ * PSID : 2053 (0x0805)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 1
+ * MAX : 4294967295
+ * DEFAULT : 30000000
+ * DESCRIPTION :
+ * NCHO: For certification and Host use only.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FULL_ROAM_SCAN_PERIOD 0x0805
+
+/*******************************************************************************
+ * NAME : UnifiRoamSoftRoamingEnabled
+ * PSID : 2054 (0x0806)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables CASE_1 and CASE_2 scans. Host writes to the key, firmware reads
+ * it. See MFW-183 for details.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SOFT_ROAMING_ENABLED 0x0806
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanBand
+ * PSID : 2055 (0x0807)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 2
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Indicates whether only intra-band or all-band should be used for roaming
+ * scan. 2 - Roaming across band 1 - Roaming within band
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_BAND 0x0807
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanMaxActiveChannelTime
+ * PSID : 2057 (0x0809)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 120
+ * DESCRIPTION :
+ * NCHO: Name confusion for Host compatibility.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_MAX_ACTIVE_CHANNEL_TIME 0x0809
+
+/*******************************************************************************
+ * NAME : UnifiRoamFullChannelScanFrequency
+ * PSID : 2058 (0x080A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 9
+ * DESCRIPTION :
+ * Every how many cached channel scans run a full channel scan.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_FULL_CHANNEL_SCAN_FREQUENCY 0x080A
+
+/*******************************************************************************
+ * NAME : UnifiRoamMode
+ * PSID : 2060 (0x080C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Enable/Disable host resume when roaming. 0: Wake up the host all the
+ * time. 1: Only wakeup the host if the AP is not white-listed. 2: Don't
+ * wake up the host.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_MODE 0x080C
+
+/*******************************************************************************
+ * NAME : UnifiRssiRoamScanNoCandidateDeltaTrigger
+ * PSID : 2064 (0x0810)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 255
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * The value, in dBm, by which unifiRssiRoamScanTrigger is lowered when no
+ * roaming candidates are found.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_ROAM_SCAN_NO_CANDIDATE_DELTA_TRIGGER 0x0810
+
+/*******************************************************************************
+ * NAME : UnifiRoamEapTimeout
+ * PSID : 2065 (0x0811)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * Timeout, in ms, for receiving the first EAP/EAPOL frame from the AP
+ * during roaming
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_EAP_TIMEOUT 0x0811
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanControl
+ * PSID : 2067 (0x0813)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * NCHO: Enable MCD NCHO feature.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_CONTROL 0x0813
+
+/*******************************************************************************
+ * NAME : UnifiRoamDfsScanMode
+ * PSID : 2068 (0x0814)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * NCHO: For certification and Host use only.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_DFS_SCAN_MODE 0x0814
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanHomeTime
+ * PSID : 2069 (0x0815)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 40
+ * MAX : 65535
+ * DEFAULT : 45
+ * DESCRIPTION :
+ * NCHO: The time, in TU, to spend NOT scanning during a HomeAway scan.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_HOME_TIME 0x0815
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanHomeAwayTime
+ * PSID : 2070 (0x0816)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 40
+ * MAX : 65535
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * NCHO: The time, in TU, to spend scanning during a HomeAway scan.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_HOME_AWAY_TIME 0x0816
+
+/*******************************************************************************
+ * NAME : UnifiRoamScanNProbe
+ * PSID : 2072 (0x0818)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * NCHO: The number of ProbeRequest frames per channel.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_SCAN_NPROBE 0x0818
+
+/*******************************************************************************
+ * NAME : UnifiApOlbcDuration
+ * PSID : 2076 (0x081C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 300
+ * DESCRIPTION :
+ * How long, in milliseconds, the AP enables reception of BEACON frames to
+ * perform Overlapping Legacy BSS Condition(OLBC). If set to 0 then OLBC is
+ * disabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_OLBC_DURATION 0x081C
+
+/*******************************************************************************
+ * NAME : UnifiApOlbcInterval
+ * PSID : 2077 (0x081D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2000
+ * DESCRIPTION :
+ * How long, in milliseconds, between periods of receiving BEACON frames to
+ * perform Overlapping Legacy BSS Condition(OLBC). This value MUST exceed
+ * the OBLC duration MIB unifiApOlbcDuration. If set to 0 then OLBC is
+ * disabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_OLBC_INTERVAL 0x081D
+
+/*******************************************************************************
+ * NAME : UnifiMlmednsSupportEnabled
+ * PSID : 2078 (0x081E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * This MIB enables support for transmitting DNS frame via MLME.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMEDNS_SUPPORT_ENABLED 0x081E
+
+/*******************************************************************************
+ * NAME : UnifiOffchannelScheduleTimeout
+ * PSID : 2079 (0x081F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * Maximum timeout in ms the Offchannel FSM will wait until the complete
+ * dwell time is scheduled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OFFCHANNEL_SCHEDULE_TIMEOUT 0x081F
+
+/*******************************************************************************
+ * NAME : UnifiFrameResponseTimeout
+ * PSID : 2080 (0x0820)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 500
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * Timeout, in TU, to wait for a frame(Auth, Assoc, ReAssoc) after Rame
+ * replies to a send frame request
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAME_RESPONSE_TIMEOUT 0x0820
+
+/*******************************************************************************
+ * NAME : UnifiConnectionFailureTimeout
+ * PSID : 2081 (0x0821)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 20000
+ * DEFAULT : 10000
+ * DESCRIPTION :
+ * Timeout, in TU, for a frame retry before giving up.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CONNECTION_FAILURE_TIMEOUT 0x0821
+
+/*******************************************************************************
+ * NAME : UnifiConnectingProbeTimeout
+ * PSID : 2082 (0x0822)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * How long, in TU, to wait for a ProbeRsp when syncronising before
+ * resending a ProbeReq
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CONNECTING_PROBE_TIMEOUT 0x0822
+
+/*******************************************************************************
+ * NAME : UnifiDisconnectTimeout
+ * PSID : 2083 (0x0823)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 3000
+ * DEFAULT : 1500
+ * DESCRIPTION :
+ * Timeout, in milliseconds, to perform a disconnect or disconnect all STAs
+ * (triggered by MLME_DISCONNECT-REQ or MLME_DISCONNECT-REQ
+ * 00:00:00:00:00:00) before responding with MLME-DISCONNECT-IND and
+ * aborting the disconnection attempt. This is particulary important when a
+ * SoftAP is attempting to disconnect associated stations which might have
+ * "silently" left the ESS.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DISCONNECT_TIMEOUT 0x0823
+
+/*******************************************************************************
+ * NAME : UnifiFrameResponseCfmTxLifetimeTimeout
+ * PSID : 2084 (0x0824)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Timeout, in TU, to wait to retry a frame (Auth, Assoc, ReAssoc) after TX
+ * Cfm trasnmission_status = TxLifetime.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAME_RESPONSE_CFM_TX_LIFETIME_TIMEOUT 0x0824
+
+/*******************************************************************************
+ * NAME : UnifiFrameResponseCfmFailureTimeout
+ * PSID : 2085 (0x0825)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 40
+ * DESCRIPTION :
+ * Timeout, in TU, to wait to retry a frame (Auth, Assoc, ReAssoc) after TX
+ * Cfm trasnmission_status != Successful | TxLifetime.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAME_RESPONSE_CFM_FAILURE_TIMEOUT 0x0825
+
+/*******************************************************************************
+ * NAME : UnifiForceActiveDuration
+ * PSID : 2086 (0x0826)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 1000
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * How long, in milliseconds, the firmware temporarily extends PowerSave for
+ * STA as a workaround for wonky APs such as D-link.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FORCE_ACTIVE_DURATION 0x0826
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanMaxNumberOfProbeSets
+ * PSID : 2087 (0x0827)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_MAX_NUMBER_OF_PROBE_SETS 0x0827
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanStopIfLessThanXFrames
+ * PSID : 2088 (0x0828)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * Stop scanning on a channel if less than X Beacons or Probe Responses are
+ * received.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_STOP_IF_LESS_THAN_XFRAMES 0x0828
+
+/*******************************************************************************
+ * NAME : UnifiApAssociationTimeout
+ * PSID : 2089 (0x0829)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2000
+ * DESCRIPTION :
+ * SoftAP: Permitted time for a station to complete associatation with FW
+ * acting as AP in milliseconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_ASSOCIATION_TIMEOUT 0x0829
+
+/*******************************************************************************
+ * NAME : UnifiHostNumAntennaControlActivated
+ * PSID : 2091 (0x082B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Host has a control of number of antenna to use
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HOST_NUM_ANTENNA_CONTROL_ACTIVATED 0x082B
+
+/*******************************************************************************
+ * NAME : UnifiPeerBandwidth
+ * PSID : 2094 (0x082E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The bandwidth used with peer station prior it disconnects
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PEER_BANDWIDTH 0x082E
+
+/*******************************************************************************
+ * NAME : UnifiCurrentPeerNss
+ * PSID : 2095 (0x082F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of spatial streams used with peer station prior it disconnects
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_PEER_NSS 0x082F
+
+/*******************************************************************************
+ * NAME : UnifiPeerTxDataRate
+ * PSID : 2096 (0x0830)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The tx rate that was used for transmissions prior disconnection
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PEER_TX_DATA_RATE 0x0830
+
+/*******************************************************************************
+ * NAME : UnifiPeerRssi
+ * PSID : 2097 (0x0831)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * The recorded RSSI from peer station prior it disconnects
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PEER_RSSI 0x0831
+
+/*******************************************************************************
+ * NAME : UnifiMlmeStationInactivityTimeout
+ * PSID : 2098 (0x0832)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * Timeout, in seconds, for instigating ConnectonFailure procedures. Setting
+ * it to less than 3 seconds may result in frequent disconnection or roaming
+ * with the AP. Disable with Zero. Values lower than
+ * INACTIVITY_MINIMUM_TIMEOUT becomes INACTIVITY_MINIMUM_TIMEOUT.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_STATION_INACTIVITY_TIMEOUT 0x0832
+
+/*******************************************************************************
+ * NAME : UnifiMlmeCliInactivityTimeout
+ * PSID : 2099 (0x0833)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Timeout, in seconds, for instigating ConnectonFailure procedures. Zero
+ * value disables the feature. Any value written lower than
+ * INACTIVITY_MINIMUM_TIMEOUT becomes INACTIVITY_MINIMUM_TIMEOUT.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_CLI_INACTIVITY_TIMEOUT 0x0833
+
+/*******************************************************************************
+ * NAME : UnifiMlmeStationInitialKickTimeout
+ * PSID : 2100 (0x0834)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Timeout, in
+ * milliseconds, for sending the AP a NULL frame to kick off the EAPOL
+ * exchange.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_STATION_INITIAL_KICK_TIMEOUT 0x0834
+
+/*******************************************************************************
+ * NAME : UnifiUartConfigure
+ * PSID : 2110 (0x083E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * UART configuration using the values of the other unifiUart* attributes.
+ * The value supplied for this attribute is ignored.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_UART_CONFIGURE 0x083E
+
+/*******************************************************************************
+ * NAME : UnifiUartPios
+ * PSID : 2111 (0x083F)
+ * PER INTERFACE?: NO
+ * TYPE : unifiUartPios
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Specification of which PIOs should be connected to the UART. Currently
+ * defined values are: 1 - UART not used; all PIOs are available for other
+ * uses. 2 - Data transmit and receive connected to PIO[12] and PIO[14]
+ * respectively. No hardware handshaking lines. 3 - Data and handshaking
+ * lines connected to PIO[12:15].
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_UART_PIOS 0x083F
+
+/*******************************************************************************
+ * NAME : UnifiClockFrequency
+ * PSID : 2140 (0x085C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : kHz
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Query the nominal frequency of the external clock source or crystal
+ * oscillator used by UniFi. The clock frequency is a system parameter and
+ * can not be modified by key.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CLOCK_FREQUENCY 0x085C
+
+/*******************************************************************************
+ * NAME : UnifiCrystalFrequencyTrim
+ * PSID : 2141 (0x085D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 63
+ * DEFAULT : 31
+ * DESCRIPTION :
+ * The IEEE 802.11 standard requires a frequency accuracy of either +/- 20
+ * ppm or +/- 25 ppm depending on the physical layer being used. If
+ * UniFi's frequency reference is a crystal then this attribute should
+ * be used to tweak the oscillating frequency to compensate for design- or
+ * device-specific variations. Each step change trims the frequency by
+ * approximately 2 ppm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CRYSTAL_FREQUENCY_TRIM 0x085D
+
+/*******************************************************************************
+ * NAME : UnifiEnableDorm
+ * PSID : 2142 (0x085E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Enable Dorm
+ * (deep sleep). When disabled, WLAN will not switch the radio power domain
+ * on/off *and* it will always veto deep sleep. Setting the value to TRUE
+ * means dorm functionality will behave normally. The intention is *not* for
+ * this value to be changed at runtime.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ENABLE_DORM 0x085E
+
+/*******************************************************************************
+ * NAME : UnifiExternalClockDetect
+ * PSID : 2146 (0x0862)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * If UniFi is running with an external fast clock source, i.e.
+ * unifiExternalFastClockRequest is set, it is common for this clock to be
+ * shared with other devices. Setting to true causes UniFi to detect when
+ * the clock is present (presumably in response to a request from another
+ * device), and to perform any pending activities at that time rather than
+ * requesting the clock again some time later. This is likely to reduce
+ * overall system power consumption by reducing the total time that the
+ * clock needs to be active.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_EXTERNAL_CLOCK_DETECT 0x0862
+
+/*******************************************************************************
+ * NAME : UnifiExternalFastClockRequest
+ * PSID : 2149 (0x0865)
+ * PER INTERFACE?: NO
+ * TYPE : unifiExternalFastClockRequest
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * It is possible to supply UniFi with an external fast reference clock, as
+ * an alternative to using a crystal. If such a clock is used then it is
+ * only required when UniFi is active. A signal can be output on PIO[2] or
+ * if the version of UniFi in use is the UF602x or later, any PIO may be
+ * used (see unifiExternalFastClockRequestPIO) to indicate when UniFi
+ * requires a fast clock. Setting makes this signal become active and
+ * determines the type of signal output. 0 - No clock request. 1 - Non
+ * inverted, totem pole. 2 - Inverted, totem pole. 3 - Open drain. 4 - Open
+ * source.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_EXTERNAL_FAST_CLOCK_REQUEST 0x0865
+
+/*******************************************************************************
+ * NAME : UnifiWatchdogTimeout
+ * PSID : 2152 (0x0868)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : ms
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 1500
+ * DESCRIPTION :
+ * Maximum time the background may be busy or locked out for. If this time
+ * is exceeded, UniFi will reset. If this key is set to 65535 then the
+ * watchdog will be disabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WATCHDOG_TIMEOUT 0x0868
+
+/*******************************************************************************
+ * NAME : UnifiScanParameters
+ * PSID : 2154 (0x086A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 18
+ * MAX : 18
+ * DEFAULT :
+ * DESCRIPTION :
+ * Scan parameters. Each row of the table contains 2 entries for a scan:
+ * first entry when there is 0 registered VIFs, second - when there is 1 or
+ * more registered VIFs. Entry has the following structure: octet 0 - Scan
+ * priority (uint8) octet 1 - Scan Flags (uint8) (see unifiScanFlags) bit 0
+ * - Enable Early Channel Exit (bool) bit 1 - Disable Scan (bool) bit 2 -
+ * Enable NCHO (bool) bit 3 - Enable MAC Randomization (bool) octet 2 ~ 3 -
+ * Probe Interval in Time Units (uint16) octet 4 ~ 5 - Max Active Channel
+ * Time in Time Units (uint16) octet 6 ~ 7 - Max Passive Channel Time in
+ * Time Units (uint16) octet 8 - Scan Policy (uint8) Size of each entry is 9
+ * octets, row size is 18 octets. A Time Units value specifies a time
+ * interval as a multiple of TU (1024 us).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SCAN_PARAMETERS 0x086A
+
+/*******************************************************************************
+ * NAME : UnifiOverrideEdcaParamActivated
+ * PSID : 2155 (0x086B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * To enable / disable, override STA edca config parameters with
+ * unifiOverrideEDCAParam. default: True - for volcano, and False - for
+ * others
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_EDCA_PARAM_ACTIVATED 0x086B
+
+/*******************************************************************************
+ * NAME : UnifiOverrideEdcaParam
+ * PSID : 2156 (0x086C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * EDCA Parameters to be used if unifiOverrideEDCAParamEnable is true,
+ * indexed by unifiAccessClassIndex octet 0 - AIFSN octet 1 - [7:4] ECW MAX
+ * [3:0] ECW MIN octet 2 ~ 3 - TXOP[7:0] TXOP[15:8] in 32 usec units for
+ * both non-HT and HT connections.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_EDCA_PARAM 0x086C
+
+/*******************************************************************************
+ * NAME : UnifiExternalFastClockRequestPio
+ * PSID : 2158 (0x086E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 15
+ * DEFAULT : 9
+ * DESCRIPTION :
+ * If an external fast reference clock is being supplied to UniFi as an
+ * alternative to a crystal (see unifiExternalFastClockRequest) and the
+ * version of UniFi in use is the UF602x or later, any PIO may be used as
+ * the external fast clock request output from UniFi. key determines the PIO
+ * to use.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_EXTERNAL_FAST_CLOCK_REQUEST_PIO 0x086E
+
+/*******************************************************************************
+ * NAME : UnifiRxDataRate
+ * PSID : 2196 (0x0894)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The bit rate of the last received frame on this VIF.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_DATA_RATE 0x0894
+
+/*******************************************************************************
+ * NAME : UnifiPeerRxRetryCount
+ * PSID : 2198 (0x0896)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of retry packets from peer station
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PEER_RX_RETRY_COUNT 0x0896
+
+/*******************************************************************************
+ * NAME : UnifiPeerRxMulticastCount
+ * PSID : 2199 (0x0897)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of multicast and broadcast packets received from peer station
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PEER_RX_MULTICAST_COUNT 0x0897
+
+/*******************************************************************************
+ * NAME : UnifiRssi
+ * PSID : 2200 (0x0898)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Running average of the Received Signal Strength Indication (RSSI) for
+ * packets received by UniFi's radio. The value should only be treated
+ * as an indication of the signal strength; it is not an accurate
+ * measurement. The result is only meaningful if the unifiRxExternalGain
+ * attribute is set to the correct calibration value. If UniFi is part of a
+ * BSS, only frames originating from devices in the BSS are reported (so far
+ * as this can be determined). The average is reset when UniFi joins or
+ * starts a BSS or is reset.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI 0x0898
+
+/*******************************************************************************
+ * NAME : UnifiLastBssRssi
+ * PSID : 2201 (0x0899)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Last BSS RSSI. See unifiRSSI description.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAST_BSS_RSSI 0x0899
+
+/*******************************************************************************
+ * NAME : UnifiSnr
+ * PSID : 2202 (0x089A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Provides a running average of the Signal to Noise Ratio (dB) for packets
+ * received by UniFi's radio.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SNR 0x089A
+
+/*******************************************************************************
+ * NAME : UnifiLastBssSnr
+ * PSID : 2203 (0x089B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Last BSS SNR. See unifiSNR description.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAST_BSS_SNR 0x089B
+
+/*******************************************************************************
+ * NAME : UnifiSwTxTimeout
+ * PSID : 2204 (0x089C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : second
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * Maximum time in seconds for a frame to be queued in firmware, ready to be
+ * sent, but not yet actually pumped to hardware.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SW_TX_TIMEOUT 0x089C
+
+/*******************************************************************************
+ * NAME : UnifiHwTxTimeout
+ * PSID : 2205 (0x089D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : milliseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 512
+ * DESCRIPTION :
+ * Maximum time in milliseconds for a frame to be queued in the
+ * hardware/DPIF.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HW_TX_TIMEOUT 0x089D
+
+/*******************************************************************************
+ * NAME : UnifiRateStatsRxSuccessCount
+ * PSID : 2206 (0x089E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of successful receptions of complete management and data
+ * frames at the rate indexed by unifiRateStatsIndex.This number will wrap
+ * to zero after the range is exceeded.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RATE_STATS_RX_SUCCESS_COUNT 0x089E
+
+/*******************************************************************************
+ * NAME : UnifiRateStatsTxSuccessCount
+ * PSID : 2207 (0x089F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of successful (acknowledged) unicast transmissions of complete
+ * data or management frames the rate indexed by unifiRateStatsIndex. This
+ * number will wrap to zero after the range is exceeded.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RATE_STATS_TX_SUCCESS_COUNT 0x089F
+
+/*******************************************************************************
+ * NAME : UnifiTxDataRate
+ * PSID : 2208 (0x08A0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The bit rate currently in use for transmissions of unicast data frames;
+ * On an infrastructure BSS, this is the data rate used in communicating
+ * with the associated access point, if there is none, an error is returned
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_DATA_RATE 0x08A0
+
+/*******************************************************************************
+ * NAME : UnifiSnrExtraOffsetCck
+ * PSID : 2209 (0x08A1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dB
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 8
+ * DESCRIPTION :
+ * This offset is added to SNR values received at 802.11b data rates. This
+ * accounts for differences in the RF pathway between 802.11b and 802.11g
+ * demodulators. The offset applies to values of unifiSNR as well as SNR
+ * values in scan indications. Not used in 5GHz mode.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SNR_EXTRA_OFFSET_CCK 0x08A1
+
+/*******************************************************************************
+ * NAME : UnifiRssiMaxAveragingPeriod
+ * PSID : 2210 (0x08A2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 3000
+ * DESCRIPTION :
+ * Limits the period over which the value of unifiRSSI is averaged. If no
+ * more than unifiRSSIMinReceivedFrames frames have been received in the
+ * period, then the value of unifiRSSI is reset to the value of the next
+ * measurement and the rolling average is restarted. This ensures that the
+ * value is timely (although possibly poorly averaged) when little data is
+ * being received.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_MAX_AVERAGING_PERIOD 0x08A2
+
+/*******************************************************************************
+ * NAME : UnifiRssiMinReceivedFrames
+ * PSID : 2211 (0x08A3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * See the description of unifiRSSIMaxAveragingPeriod for how the
+ * combination of attributes is used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_MIN_RECEIVED_FRAMES 0x08A3
+
+/*******************************************************************************
+ * NAME : UnifiRateStatsRate
+ * PSID : 2212 (0x08A4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : 500 kbps
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The rate corresponding to the current table entry. The value is rounded
+ * to the nearest number of units where necessary. Most rates do not require
+ * rounding, but when short guard interval is in effect the rates are no
+ * longer multiples of the base unit. Note that there may be two occurrences
+ * of the value 130: the first corresponds to MCS index 7, and the second,
+ * if present, to MCS index 6 with short guard interval.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RATE_STATS_RATE 0x08A4
+
+/*******************************************************************************
+ * NAME : UnifiLastBssTxDataRate
+ * PSID : 2213 (0x08A5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Last BSS Tx DataRate. See unifiTxDataRate description.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAST_BSS_TX_DATA_RATE 0x08A5
+
+/*******************************************************************************
+ * NAME : UnifiDiscardedFrameCount
+ * PSID : 2214 (0x08A6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * This is a counter that indicates the number of data and management frames
+ * that have been processed by the UniFi hardware but were discarded before
+ * being processed by the firmware. It does not include frames not processed
+ * by the hardware because they were not addressed to the local device, nor
+ * does it include frames discarded by the firmware in the course of normal
+ * MAC processing (which include, for example, frames in an appropriate
+ * encryption state and multicast frames not requested by the host).
+ * Typically this counter indicates lost data frames for which there was no
+ * buffer space; however, other cases may cause the counter to increment,
+ * such as receiving a retransmitted frame that was already successfully
+ * processed. Hence this counter should not be treated as a reliable guide
+ * to lost frames. The counter wraps to 0 after 65535.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DISCARDED_FRAME_COUNT 0x08A6
+
+/*******************************************************************************
+ * NAME : UnifiMacrameDebugStats
+ * PSID : 2215 (0x08A7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * MACRAME debug stats readout key. Use set to write a debug readout, then
+ * read the same key to get the actual readout.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MACRAME_DEBUG_STATS 0x08A7
+
+/*******************************************************************************
+ * NAME : UnifiCurrentTsfTime
+ * PSID : 2218 (0x08AA)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : -9223372036854775808
+ * MAX : 9223372036854775807
+ * DEFAULT :
+ * DESCRIPTION :
+ * Get TSF time (last 32 bits) for the specified VIF. VIF index can't be 0
+ * as that is treated as global VIF For station VIF - Correct BSS TSF wil
+ * only be reported after MLME-CONNECT.indication(success) indication to
+ * host. Note that if MAC Hardware is switched off then TSF returned is
+ * estimated value
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_TSF_TIME 0x08AA
+
+/*******************************************************************************
+ * NAME : UnifiBaRxEnableTid
+ * PSID : 2219 (0x08AB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X1555
+ * DESCRIPTION :
+ * Configure Block Ack RX on a per-TID basis. Bit mask is two bits per TID
+ * (B1 = Not Used, B0 = enable).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BA_RX_ENABLE_TID 0x08AB
+
+/*******************************************************************************
+ * NAME : UnifiBaTxEnableTid
+ * PSID : 2221 (0x08AD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X0557
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Configure
+ * Block Ack TX on a per-TID basis. Bit mask is two bits per TID (B1 =
+ * autosetup, B0 = enable).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BA_TX_ENABLE_TID 0x08AD
+
+/*******************************************************************************
+ * NAME : UnifiTrafficThresholdToSetupBa
+ * PSID : 2222 (0x08AE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * Sets the default Threshold (as packet count) to setup BA agreement per
+ * TID.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TRAFFIC_THRESHOLD_TO_SETUP_BA 0x08AE
+
+/*******************************************************************************
+ * NAME : UnifiDplaneTxAmsduHwCapability
+ * PSID : 2223 (0x08AF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Returns 0 if A-MSDU size limited to 4K. Returns 1 is A-MSDU size is
+ * limited to 8K. This value is chip specific and limited by HW.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPLANE_TX_AMSDU_HW_CAPABILITY 0x08AF
+
+/*******************************************************************************
+ * NAME : UnifiDplaneTxAmsduSubframeCountMax
+ * PSID : 2224 (0x08B0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 4
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * Defines the maximum number of A-MSDU sub-frames per A-MSDU. A value of 1
+ * indicates A-MSDU aggregation has been disabled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPLANE_TX_AMSDU_SUBFRAME_COUNT_MAX 0x08B0
+
+/*******************************************************************************
+ * NAME : UnifiBaConfig
+ * PSID : 2225 (0x08B1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X3FFF01
+ * DESCRIPTION :
+ * Block Ack Configuration. It is composed of A-MSDU supported, TX MPDU per
+ * A-MPDU, RX Buffer size, TX Buffer size and Block Ack Timeout.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BA_CONFIG 0x08B1
+
+/*******************************************************************************
+ * NAME : UnifiBaTxMaxNumber
+ * PSID : 2226 (0x08B2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X10
+ * DESCRIPTION :
+ * Block Ack Configuration. Maximum number of BAs. Limited by HW.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BA_TX_MAX_NUMBER 0x08B2
+
+/*******************************************************************************
+ * NAME : UnifiMoveBKtoBe
+ * PSID : 2227 (0x08B3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated. Golden Certification MIB don't delete, change PSID or name
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MOVE_BKTO_BE 0x08B3
+
+/*******************************************************************************
+ * NAME : UnifiBeaconReceived
+ * PSID : 2228 (0x08B4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Access point beacon received count from connected AP
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BEACON_RECEIVED 0x08B4
+
+/*******************************************************************************
+ * NAME : UnifiAcRetries
+ * PSID : 2229 (0x08B5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * It represents the number of retransmitted frames under each ac priority
+ * (indexed by unifiAccessClassIndex). This number will wrap to zero after
+ * the range is exceeded.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AC_RETRIES 0x08B5
+
+/*******************************************************************************
+ * NAME : UnifiRadioOnTime
+ * PSID : 2230 (0x08B6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * msecs the radio is awake (32 bits number accruing over time). On
+ * multi-radio platforms an index to the radio instance is required
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_ON_TIME 0x08B6
+
+/*******************************************************************************
+ * NAME : UnifiRadioTxTime
+ * PSID : 2231 (0x08B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * msecs the radio is transmitting (32 bits number accruing over time). On
+ * multi-radio platforms an index to the radio instance is required
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_TX_TIME 0x08B7
+
+/*******************************************************************************
+ * NAME : UnifiRadioRxTime
+ * PSID : 2232 (0x08B8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * msecs the radio is in active receive (32 bits number accruing over time).
+ * On multi-radio platforms an index to the radio instance is required
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_RX_TIME 0x08B8
+
+/*******************************************************************************
+ * NAME : UnifiRadioScanTime
+ * PSID : 2233 (0x08B9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * msecs the radio is awake due to all scan (32 bits number accruing over
+ * time). On multi-radio platforms an index to the radio instance is
+ * required
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_SCAN_TIME 0x08B9
+
+/*******************************************************************************
+ * NAME : UnifiPsLeakyAp
+ * PSID : 2234 (0x08BA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * indicate that this AP typically leaks packets beyond the guard time
+ * (5msecs).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PS_LEAKY_AP 0x08BA
+
+/*******************************************************************************
+ * NAME : UnifiTqamActivated
+ * PSID : 2235 (0x08BB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables Vendor VHT IE for 256-QAM mode on 2.4GHz.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TQAM_ACTIVATED 0x08BB
+
+/*******************************************************************************
+ * NAME : UnifiRadioOnTimeNan
+ * PSID : 2236 (0x08BC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * msecs the radio is awake due to NAN operations (32 bits number accruing
+ * over time). On multi-radio platforms an index to the radio instance is
+ * required
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_ON_TIME_NAN 0x08BC
+
+/*******************************************************************************
+ * NAME : UnifiOutputRadioInfoToKernelLog
+ * PSID : 2239 (0x08BF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Print messages about the radio status to the Android Kernel Log. See
+ * document SC-508266-TC.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OUTPUT_RADIO_INFO_TO_KERNEL_LOG 0x08BF
+
+/*******************************************************************************
+ * NAME : UnifiNoAckActivationCount
+ * PSID : 2240 (0x08C0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of frames that are discarded due to HW No-ack activated during
+ * test. This number will wrap to zero after the range is exceeded.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NO_ACK_ACTIVATION_COUNT 0x08C0
+
+/*******************************************************************************
+ * NAME : UnifiRxFcsErrorCount
+ * PSID : 2241 (0x08C1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of received frames that are discarded due to bad FCS (CRC).
+ * This number will wrap to zero after the range is exceeded.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_FCS_ERROR_COUNT 0x08C1
+
+/*******************************************************************************
+ * NAME : UnifiBeaconsReceivedPercentage
+ * PSID : 2245 (0x08C5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Percentage of beacons received, calculated as received / expected. The
+ * percentage is scaled to an integer value between 0 (0%) and 1000 (100%).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BEACONS_RECEIVED_PERCENTAGE 0x08C5
+
+/*******************************************************************************
+ * NAME : UnifiArpDetectActivated
+ * PSID : 2246 (0x08C6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable feature support for Enhanced ARP Detect. This is required by
+ * Volcano.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ARP_DETECT_ACTIVATED 0x08C6
+
+/*******************************************************************************
+ * NAME : UnifiArpDetectResponseCounter
+ * PSID : 2247 (0x08C7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counter used to track ARP Response frame for Enhanced ARP Detect. This is
+ * required by Volcano.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ARP_DETECT_RESPONSE_COUNTER 0x08C7
+
+/*******************************************************************************
+ * NAME : UnifiEnableMgmtTxPacketStats
+ * PSID : 2249 (0x08C9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Consider management packets for TX stats counters
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ENABLE_MGMT_TX_PACKET_STATS 0x08C9
+
+/*******************************************************************************
+ * NAME : UnifiSwToHwQueueStats
+ * PSID : 2250 (0x08CA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The timing statistics of packets being queued between SW-HW
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SW_TO_HW_QUEUE_STATS 0x08CA
+
+/*******************************************************************************
+ * NAME : UnifiHostToSwQueueStats
+ * PSID : 2251 (0x08CB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The timing statistics of packets being queued between HOST-SW
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HOST_TO_SW_QUEUE_STATS 0x08CB
+
+/*******************************************************************************
+ * NAME : UnifiQueueStatsEnable
+ * PSID : 2252 (0x08CC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables recording timing statistics of packets being queued between
+ * HOST-SW-HW
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_QUEUE_STATS_ENABLE 0x08CC
+
+/*******************************************************************************
+ * NAME : UnifiTxDataConfirm
+ * PSID : 2253 (0x08CD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Allows to request on a per access class basis that an MA_UNITDATA.confirm
+ * be generated after each packet transfer. The default value is applied for
+ * all ACs.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_DATA_CONFIRM 0x08CD
+
+/*******************************************************************************
+ * NAME : UnifiThroughputDebug
+ * PSID : 2254 (0x08CE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * is used to access throughput related counters that can help diagnose
+ * throughput problems. The index of the MIB will access different counters,
+ * as described in SC-506328-DD. Setting any index for a VIF to any value,
+ * clears all DPLP debug stats for the MAC instance used by the VIF. This is
+ * useful mainly for debugging LAA or small scale throughput issues that
+ * require short term collection of the statistics.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_THROUGHPUT_DEBUG 0x08CE
+
+/*******************************************************************************
+ * NAME : UnifiLoadDpdLut
+ * PSID : 2255 (0x08CF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 147
+ * MAX : 147
+ * DEFAULT :
+ * DESCRIPTION :
+ * Write a static DPD LUT to the FW, read DPD LUT from hardware
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LOAD_DPD_LUT 0x08CF
+
+/*******************************************************************************
+ * NAME : UnifiDpdMasterSwitch
+ * PSID : 2256 (0x08D0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Enables Digital Pre-Distortion
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_MASTER_SWITCH 0x08D0
+
+/*******************************************************************************
+ * NAME : UnifiDpdPredistortGains
+ * PSID : 2257 (0x08D1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 14
+ * MAX : 14
+ * DEFAULT :
+ * DESCRIPTION :
+ * DPD pre-distort gains. Takes a range of frequencies, where f_min <=
+ * f_channel < f_max. The format is [freq_min_msb, freq_min_lsb,
+ * freq_max_msb, freq_max_lsb, DPD policy bitmap, bandwidth_bitmap,
+ * power_trim_enable, OFDM0_gain, OFDM1_gain, CCK_gain, TR_gain, CCK PSAT
+ * gain, OFDM PSAT gain].
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_PREDISTORT_GAINS 0x08D1
+
+/*******************************************************************************
+ * NAME : UnifiOverrideDpdLut
+ * PSID : 2258 (0x08D2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 147
+ * MAX : 147
+ * DEFAULT :
+ * DESCRIPTION :
+ * Write a DPD LUT directly to the HW
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_DPD_LUT 0x08D2
+
+/*******************************************************************************
+ * NAME : UnifiGoogleMaxNumberOfPeriodicScans
+ * PSID : 2260 (0x08D4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GOOGLE_MAX_NUMBER_OF_PERIODIC_SCANS 0x08D4
+
+/*******************************************************************************
+ * NAME : UnifiGoogleMaxRssiSampleSize
+ * PSID : 2261 (0x08D5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GOOGLE_MAX_RSSI_SAMPLE_SIZE 0x08D5
+
+/*******************************************************************************
+ * NAME : UnifiGoogleMaxHotlistAPs
+ * PSID : 2262 (0x08D6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GOOGLE_MAX_HOTLIST_APS 0x08D6
+
+/*******************************************************************************
+ * NAME : UnifiGoogleMaxSignificantWifiChangeAPs
+ * PSID : 2263 (0x08D7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GOOGLE_MAX_SIGNIFICANT_WIFI_CHANGE_APS 0x08D7
+
+/*******************************************************************************
+ * NAME : UnifiGoogleMaxBssidHistoryEntries
+ * PSID : 2264 (0x08D8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GOOGLE_MAX_BSSID_HISTORY_ENTRIES 0x08D8
+
+/*******************************************************************************
+ * NAME : UnifiMacBeaconTimeout
+ * PSID : 2270 (0x08DE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 128
+ * DESCRIPTION :
+ * The maximum time in microseconds we want to stall TX data when expecting
+ * a beacon at EBRT time as a station.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_BEACON_TIMEOUT 0x08DE
+
+/*******************************************************************************
+ * NAME : UnifiStaUsesOneAntennaWhenIdle
+ * PSID : 2274 (0x08E2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Allow the platform to downgrade antenna usage for STA VIFs to 1 if the
+ * VIF is idle. Only valid for multi-radio platforms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_USES_ONE_ANTENNA_WHEN_IDLE 0x08E2
+
+/*******************************************************************************
+ * NAME : UnifiStaUsesMultiAntennasDuringConnect
+ * PSID : 2275 (0x08E3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Allow the platform to use multiple antennas for STA VIFs during the
+ * connect phase. Only valid for multi-radio platforms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_USES_MULTI_ANTENNAS_DURING_CONNECT 0x08E3
+
+/*******************************************************************************
+ * NAME : UnifiApUsesOneAntennaWhenPeersIdle
+ * PSID : 2276 (0x08E4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Allow the platform to downgrade antenna usage for AP VIFs when all
+ * connected peers are idle. Only valid for multi-radio platforms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_USES_ONE_ANTENNA_WHEN_PEERS_IDLE 0x08E4
+
+/*******************************************************************************
+ * NAME : deprecated_unifiUpdateAntennaCapabilitiesWhenScanning
+ * PSID : 2277 (0x08E5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Specify whether antenna scan activities will be allowed to cause an
+ * update of VIF capability. Only valid for multi-radio platforms. WARNING:
+ * Changing this value after system start-up will have no effect.
+ *******************************************************************************/
+#define SLSI_PSID_DEPRECATED_UNIFI_UPDATE_ANTENNA_CAPABILITIES_WHEN_SCANNING 0x08E5
+
+/*******************************************************************************
+ * NAME : UnifiPreferredAntennaBitmap
+ * PSID : 2278 (0x08E6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Specify the preferred antenna(s) to use. A value of 0 means that the FW
+ * will decide on the antenna(s) to use. Only valid for multi-radio
+ * platforms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PREFERRED_ANTENNA_BITMAP 0x08E6
+
+/*******************************************************************************
+ * NAME : UnifiMaxConcurrentMaCs
+ * PSID : 2279 (0x08E7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Specify the maximum number of MACs that may be used for the platform. For
+ * multi-MAC platforms that value *could* be greater than 1. WARNING:
+ * Changing this value after system start-up will have no effect.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAX_CONCURRENT_MA_CS 0x08E7
+
+/*******************************************************************************
+ * NAME : UnifiLoadDpdLutPerRadio
+ * PSID : 2280 (0x08E8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 147
+ * MAX : 147
+ * DEFAULT :
+ * DESCRIPTION :
+ * Write a static DPD LUT to the FW, read DPD LUT from hardware (for devices
+ * that support multiple radios)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LOAD_DPD_LUT_PER_RADIO 0x08E8
+
+/*******************************************************************************
+ * NAME : UnifiOverrideDpdLutPerRadio
+ * PSID : 2281 (0x08E9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 147
+ * MAX : 147
+ * DEFAULT :
+ * DESCRIPTION :
+ * Write a DPD LUT directly to the HW (for devices that support multiple
+ * radios)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_DPD_LUT_PER_RADIO 0x08E9
+
+/*******************************************************************************
+ * NAME : UnifiRoamDeauthReason
+ * PSID : 2294 (0x08F6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * A deauthentication reason for which the STA will trigger a roaming scan
+ * rather than disconnect directly.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_DEAUTH_REASON 0x08F6
+
+/*******************************************************************************
+ * NAME : UnifiCuRoamfactor
+ * PSID : 2295 (0x08F7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Table allocating CUfactor to Channel Utilisation values range.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CU_ROAMFACTOR 0x08F7
+
+/*******************************************************************************
+ * NAME : UnifiRoamCuHighLowPoints
+ * PSID : 2296 (0x08F8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Table allocating the high and low points for computing the linear
+ * CUfactor.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_CU_HIGH_LOW_POINTS 0x08F8
+
+/*******************************************************************************
+ * NAME : UnifiRoamRssiHighLowPoints
+ * PSID : 2297 (0x08F9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Table allocating the high and low points for computing the linear RSSI
+ * factor.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_RSSI_HIGH_LOW_POINTS 0x08F9
+
+/*******************************************************************************
+ * NAME : UnifiRoamRssiBoost
+ * PSID : 2298 (0x08FA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * The value in dBm of the RSSI boost for each band
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_RSSI_BOOST 0x08FA
+
+/*******************************************************************************
+ * NAME : UnifiRoamTrackingScanPeriod
+ * PSID : 2299 (0x08FB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 1
+ * MAX : 4294967295
+ * DEFAULT : 5000000
+ * DESCRIPTION :
+ * The scan period for tracking not yet suitable candidate(s)(microseconds)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_TRACKING_SCAN_PERIOD 0x08FB
+
+/*******************************************************************************
+ * NAME : UnifiRoamCuLocal
+ * PSID : 2300 (0x08FC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Channel utilisation for the STA VIF, value 255=100% channel utilisation.
+ * - used for roaming
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_CU_LOCAL 0x08FC
+
+/*******************************************************************************
+ * NAME : UnifiCuRoamScanNoCandidateDeltaTrigger
+ * PSID : 2301 (0x08FD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 15
+ * DESCRIPTION :
+ * The delta, in percentage points, to apply to unifiCuRoamScanTrigger when
+ * no candidate found during first cycle of cached channel soft scan,
+ * triggered by channel utilization.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CU_ROAM_SCAN_NO_CANDIDATE_DELTA_TRIGGER 0x08FD
+
+/*******************************************************************************
+ * NAME : UnifiRoamApSelectDeltaFactor
+ * PSID : 2302 (0x08FE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 20
+ * DESCRIPTION :
+ * How much higher, in percentage points, does a candidate's score needs to
+ * be in order be considered an eligible candidate? A "0" value renders all
+ * candidates eligible. Please note this applies only to soft roams.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_AP_SELECT_DELTA_FACTOR 0x08FE
+
+/*******************************************************************************
+ * NAME : UnifiCuRoamweight
+ * PSID : 2303 (0x08FF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 30
+ * DESCRIPTION :
+ * Weight of CUfactor, in percentage points, in AP selection algorithm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CU_ROAMWEIGHT 0x08FF
+
+/*******************************************************************************
+ * NAME : UnifiRssiRoamweight
+ * PSID : 2305 (0x0901)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 70
+ * DESCRIPTION :
+ * Weight of RSSI factor, in percentage points, in AP selection algorithm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_ROAMWEIGHT 0x0901
+
+/*******************************************************************************
+ * NAME : UnifiRssiRoamfactor
+ * PSID : 2306 (0x0902)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Table allocating RSSIfactor to RSSI values range.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSI_ROAMFACTOR 0x0902
+
+/*******************************************************************************
+ * NAME : UnifiRssicuRoamScanTrigger
+ * PSID : 2307 (0x0903)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * The current channel Averaged RSSI value below which a soft roaming scan
+ * shall initially start, providing high channel utilisation (see
+ * unifiCURoamScanTrigger). This is a table indexed by frequency band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RSSICU_ROAM_SCAN_TRIGGER 0x0903
+
+/*******************************************************************************
+ * NAME : UnifiCuRoamScanTrigger
+ * PSID : 2308 (0x0904)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * BSS Load / Channel Utilisation doesn't need to be monitored more than
+ * every 10th Beacons. This is a table indexed by frequency band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CU_ROAM_SCAN_TRIGGER 0x0904
+
+/*******************************************************************************
+ * NAME : UnifiRoamBssLoadMonitoringFrequency
+ * PSID : 2309 (0x0905)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * How often, in reveived beacons, should the BSS load be monitored? - used
+ * for roaming
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_BSS_LOAD_MONITORING_FREQUENCY 0x0905
+
+/*******************************************************************************
+ * NAME : UnifiRoamBlacklistSize
+ * PSID : 2310 (0x0906)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : entries
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * Do not remove! Read by the host! And then passed up to the framework.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_BLACKLIST_SIZE 0x0906
+
+/*******************************************************************************
+ * NAME : UnifiCuMeasurementInterval
+ * PSID : 2311 (0x0907)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 1
+ * MAX : 1000
+ * DEFAULT : 500
+ * DESCRIPTION :
+ * The interval in ms to perform the channel usage update
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CU_MEASUREMENT_INTERVAL 0x0907
+
+/*******************************************************************************
+ * NAME : UnifiCurrentBssNss
+ * PSID : 2312 (0x0908)
+ * PER INTERFACE?: NO
+ * TYPE : unifiAntennaMode
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * specifies current AP antenna mode: 0 = SISO, 1 = MIMO (2x2), 2 = MIMO
+ * (3x3), 3 = MIMO (4x4)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_BSS_NSS 0x0908
+
+/*******************************************************************************
+ * NAME : UnifiApMimoUsed
+ * PSID : 2313 (0x0909)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * AP uses MU-MIMO
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_MIMO_USED 0x0909
+
+/*******************************************************************************
+ * NAME : UnifiRoamEapolTimeout
+ * PSID : 2314 (0x090A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Maximum time, in seconds, allowed for an offloaded Eapol (4 way
+ * handshake).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_EAPOL_TIMEOUT 0x090A
+
+/*******************************************************************************
+ * NAME : UnifiRoamingCount
+ * PSID : 2315 (0x090B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Number of roams
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAMING_COUNT 0x090B
+
+/*******************************************************************************
+ * NAME : UnifiRoamingAkm
+ * PSID : 2316 (0x090C)
+ * PER INTERFACE?: NO
+ * TYPE : unifiRoamingAKM
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * specifies current AKM 0 = None 1 = OKC 2 = FT (FT_1X) 3 = PSK 4 = FT_PSK
+ * 5 = PMKSA Caching
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAMING_AKM 0x090C
+
+/*******************************************************************************
+ * NAME : UnifiCurrentBssBandwidth
+ * PSID : 2317 (0x090D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Current bandwidth the STA is operating on channel_bw_20_mhz = 20,
+ * channel_bw_40_mhz = 40, channel_bw_80_mhz = 80, channel_bw_160_mhz = 160
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_BSS_BANDWIDTH 0x090D
+
+/*******************************************************************************
+ * NAME : UnifiCurrentBssChannelFrequency
+ * PSID : 2318 (0x090E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Centre frequency for the connected channel
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_BSS_CHANNEL_FREQUENCY 0x090E
+
+/*******************************************************************************
+ * NAME : UnifiLoggerEnabled
+ * PSID : 2320 (0x0910)
+ * PER INTERFACE?: NO
+ * TYPE : unifiWifiLogger
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Enable reporting of the following events for Android logging: - firmware
+ * connectivity events - fate of management frames sent by the host through
+ * the MLME SAP It can take the following values: - 0: reporting for non
+ * mandetory triggers disabled. EAPOL, security, btm frames and roam
+ * triggers are reported. - 1: partial reporting is enabled. Beacons frames
+ * will not be reported. - 2: full reporting is enabled. Beacons frames are
+ * included.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LOGGER_ENABLED 0x0910
+
+/*******************************************************************************
+ * NAME : UnifiMaPacketFateEnabled
+ * PSID : 2321 (0x0911)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable reporting of the fate of the TX packets sent by the host.This mib
+ * value will be updated if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MA_PACKET_FATE_ENABLED 0x0911
+
+/*******************************************************************************
+ * NAME : UnifiFrameRxCounters
+ * PSID : 2326 (0x0916)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Frame RX Counters used by the host. These are required by MCD.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAME_RX_COUNTERS 0x0916
+
+/*******************************************************************************
+ * NAME : UnifiFrameTxCounters
+ * PSID : 2327 (0x0917)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Frame TX Counters used by the host. These are required by MCD.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAME_TX_COUNTERS 0x0917
+
+/*******************************************************************************
+ * NAME : UnifiLaaNssSpeculationIntervalSlotTime
+ * PSID : 2330 (0x091A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 300
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the repeatable amount of time,
+ * in ms, that firmware will start to send speculation frames for spatial
+ * streams.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_NSS_SPECULATION_INTERVAL_SLOT_TIME 0x091A
+
+/*******************************************************************************
+ * NAME : UnifiLaaNssSpeculationIntervalSlotMaxNum
+ * PSID : 2331 (0x091B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the maximum number of
+ * speculation time slot for spatial stream.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_NSS_SPECULATION_INTERVAL_SLOT_MAX_NUM 0x091B
+
+/*******************************************************************************
+ * NAME : UnifiLaaBwSpeculationIntervalSlotTime
+ * PSID : 2332 (0x091C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 300
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the repeatable amount of time,
+ * in ms, that firmware will start to send speculation frames for bandwidth.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_BW_SPECULATION_INTERVAL_SLOT_TIME 0x091C
+
+/*******************************************************************************
+ * NAME : UnifiLaaBwSpeculationIntervalSlotMaxNum
+ * PSID : 2333 (0x091D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 8
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the maximum number of
+ * speculation time slot for bandwidth.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_BW_SPECULATION_INTERVAL_SLOT_MAX_NUM 0x091D
+
+/*******************************************************************************
+ * NAME : UnifiLaaMcsSpeculationIntervalSlotTime
+ * PSID : 2334 (0x091E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the repeatable amount of time,
+ * in ms, that firmware will start to send speculation frames for MCS or
+ * rate index.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_MCS_SPECULATION_INTERVAL_SLOT_TIME 0x091E
+
+/*******************************************************************************
+ * NAME : UnifiLaaMcsSpeculationIntervalSlotMaxNum
+ * PSID : 2335 (0x091F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the maximum number of
+ * speculation time slot for MCS or rate index.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_MCS_SPECULATION_INTERVAL_SLOT_MAX_NUM 0x091F
+
+/*******************************************************************************
+ * NAME : UnifiLaaGiSpeculationIntervalSlotTime
+ * PSID : 2336 (0x0920)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the repeatable amount of time,
+ * in ms, that firmware will start to send speculation frames for guard
+ * interval.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_GI_SPECULATION_INTERVAL_SLOT_TIME 0x0920
+
+/*******************************************************************************
+ * NAME : UnifiLaaGiSpeculationIntervalSlotMaxNum
+ * PSID : 2337 (0x0921)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It defines the maximum number of
+ * speculation time slot for guard interval.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_GI_SPECULATION_INTERVAL_SLOT_MAX_NUM 0x0921
+
+/*******************************************************************************
+ * NAME : UnifiLaaTxDiversityBeamformEnabled
+ * PSID : 2350 (0x092E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It is used to enable or disable TX
+ * beamformer functionality.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_TX_DIVERSITY_BEAMFORM_ENABLED 0x092E
+
+/*******************************************************************************
+ * NAME : UnifiLaaTxDiversityBeamformMinMcs
+ * PSID : 2351 (0x092F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. TX Beamform is applied when MCS is same or
+ * larger than this threshold value.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_TX_DIVERSITY_BEAMFORM_MIN_MCS 0x092F
+
+/*******************************************************************************
+ * NAME : UnifiLaaTxDiversityFixMode
+ * PSID : 2352 (0x0930)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * For Link Adaptation Algorithm. It is used to fix TX diversity mode. With
+ * two antennas available and only one spatial stream used, then one of the
+ * following modes can be selected: - 0 : Not fixed. Tx diversity mode is
+ * automatically selected by LAA. - 1 : CDD fixed mode - 2 : Beamforming
+ * fixed mode - 3 : STBC fixed mode
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_TX_DIVERSITY_FIX_MODE 0x0930
+
+/*******************************************************************************
+ * NAME : UnifiLaaProtectionConfigOverride
+ * PSID : 2356 (0x0934)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 6
+ * DESCRIPTION :
+ * Overrides the default Protection configuration. Only valid flags are
+ * DPIF_PEER_INFO_PROTECTION_TXOP_AMPDU and
+ * DPIF_PEER_INFO_PROTECTION_ALLOWED. Default allows protection code to work
+ * out the rules based on VIF configuration. If
+ * DPIF_PEER_INFO_PROTECTION_ALLOWED is unset, all protection, for this vif,
+ * is disabled. If DPIF_PEER_INFO_PROTECTION_TXOP_AMPDU is unset then, for
+ * the specified vif, the first A-MPDU in the TxOp is no longer protected.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LAA_PROTECTION_CONFIG_OVERRIDE 0x0934
+
+/*******************************************************************************
+ * NAME : UnifiRateStatsRtsErrorCount
+ * PSID : 2358 (0x0936)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of successive RTS failures.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RATE_STATS_RTS_ERROR_COUNT 0x0936
+
+/*******************************************************************************
+ * NAME : UnifiCsrOnlyEifsDuration
+ * PSID : 2362 (0x093A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 12
+ * DESCRIPTION :
+ * Specifies time that is used for EIFS. A value of 0 causes the build in
+ * value to be used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CSR_ONLY_EIFS_DURATION 0x093A
+
+/*******************************************************************************
+ * NAME : UnifiOverrideDefaultBetxopForHt
+ * PSID : 2364 (0x093C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 171
+ * DESCRIPTION :
+ * When set to non-zero value then this will override the BE TXOP for 11n
+ * and higher modulations (in 32 usec units) to the value specified here.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_DEFAULT_BETXOP_FOR_HT 0x093C
+
+/*******************************************************************************
+ * NAME : UnifiOverrideDefaultBetxop
+ * PSID : 2365 (0x093D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 78
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: When set to
+ * non-zero value then this will override the BE TXOP for 11g (in 32 usec
+ * units) to the value specified here.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_DEFAULT_BETXOP 0x093D
+
+/*******************************************************************************
+ * NAME : UnifiRxabbTrimSettings
+ * PSID : 2366 (0x093E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Various settings to change RX ABB filter trim behavior.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RXABB_TRIM_SETTINGS 0x093E
+
+/*******************************************************************************
+ * NAME : UnifiRadioTrimsEnable
+ * PSID : 2367 (0x093F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X0FF5
+ * DESCRIPTION :
+ * A bitmap for enabling/disabling trims at runtime. Check unifiEnabledTrims
+ * enum for description of the possible values.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_TRIMS_ENABLE 0x093F
+
+/*******************************************************************************
+ * NAME : UnifiRadioCcaThresholds
+ * PSID : 2368 (0x0940)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * The wideband CCA ED thresholds so that the CCA-ED triggers at the
+ * regulatory value of -62 dBm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_CCA_THRESHOLDS 0x0940
+
+/*******************************************************************************
+ * NAME : UnifiHardwarePlatform
+ * PSID : 2369 (0x0941)
+ * PER INTERFACE?: NO
+ * TYPE : unifiHardwarePlatform
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware platform. This is necessary so we can apply tweaks to specific
+ * revisions, even though they might be running the same baseband and RF
+ * chip combination. Check unifiHardwarePlatform enum for description of the
+ * possible values.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_HARDWARE_PLATFORM 0x0941
+
+/*******************************************************************************
+ * NAME : UnifiForceChannelBw
+ * PSID : 2370 (0x0942)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Test only: Force channel bandwidth to specified value. This can also be
+ * used to allow emulator/silicon back to back connection to communicate at
+ * bandwidth other than default (20 MHz) Setting it to 0 uses the default
+ * bandwidth as selected by firmware channel_bw_20_mhz = 20,
+ * channel_bw_40_mhz = 40, channel_bw_80_mhz = 80 This mib value will be
+ * updated if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FORCE_CHANNEL_BW 0x0942
+
+/*******************************************************************************
+ * NAME : UnifiDpdTrainingDuration
+ * PSID : 2371 (0x0943)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Duration of DPD training (in ms).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_TRAINING_DURATION 0x0943
+
+/*******************************************************************************
+ * NAME : UnifiTxFtrimSettings
+ * PSID : 2372 (0x0944)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter frequency compensation settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_FTRIM_SETTINGS 0x0944
+
+/*******************************************************************************
+ * NAME : UnifiDpdTrainPacketConfig
+ * PSID : 2373 (0x0945)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 8
+ * MAX : 8
+ * DEFAULT :
+ * DESCRIPTION :
+ * This MIB allows the dummy packets training bandwidth and rates to be
+ * overriden. Tipically the bandwidth would be the same as the channel
+ * bandwidth (for example 80 MHz packets for an 80 Mhz channel) and rates
+ * MCS1 and MCS5. With this MIB you can set, for example, an 80 MHz channel
+ * to be trained using 20 MHz bandwidth (centered or not) with MCS2 and MCS7
+ * packets. The MIB index dictates what channel bandwidth the configuration
+ * is for (1 for 20 MHz, 2 for 40 MHz and so on). The format is: - octet 0:
+ * train bandwidth (this basically follows the rice_channel_bw_t enum). -
+ * octet 1: train primary channel position - octet 2-3: OFDM 0 rate - octet
+ * 4-5: OFDM 1 rate - octet 6-7: CCK rate (unused) The rates are encoded in
+ * host(FAPI) format, see SC-506179, section 4.41.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_TRAIN_PACKET_CONFIG 0x0945
+
+/*******************************************************************************
+ * NAME : UnifiTxPowerTrimCommonConfig
+ * PSID : 2374 (0x0946)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 3
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Common transmitter power trim settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_POWER_TRIM_COMMON_CONFIG 0x0946
+
+/*******************************************************************************
+ * NAME : UnifiIqDebugEnabled
+ * PSID : 2375 (0x0947)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Send IQ capture data to host for IQ debug
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IQ_DEBUG_ENABLED 0x0947
+
+/*******************************************************************************
+ * NAME : UnifiCoexDebugOverrideBt
+ * PSID : 2425 (0x0979)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables overriding of all BT activities by WLAN.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_COEX_DEBUG_OVERRIDE_BT 0x0979
+
+/*******************************************************************************
+ * NAME : UnifiLteMailbox
+ * PSID : 2430 (0x097E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 36
+ * MAX : 40
+ * DEFAULT :
+ * DESCRIPTION :
+ * Set modem status to simulate lte status updates. See SC-505775-SP for API
+ * description. Defined as array of uint32 represented by the octet string
+ * FOR TEST PURPOSES ONLY
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_MAILBOX 0x097E
+
+/*******************************************************************************
+ * NAME : UnifiLteMwsSignal
+ * PSID : 2431 (0x097F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Set modem status to simulate lte status updates. See SC-505775-SP for API
+ * description. See unifiLteSignalsBitField for enum bitmap. FOR TEST
+ * PURPOSES ONLY
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_MWS_SIGNAL 0x097F
+
+/*******************************************************************************
+ * NAME : UnifiLteEnableChannelAvoidance
+ * PSID : 2432 (0x0980)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables channel avoidance scheme for LTE Coex
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_ENABLE_CHANNEL_AVOIDANCE 0x0980
+
+/*******************************************************************************
+ * NAME : UnifiLteEnablePowerBackoff
+ * PSID : 2433 (0x0981)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables power backoff scheme for LTE Coex
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_ENABLE_POWER_BACKOFF 0x0981
+
+/*******************************************************************************
+ * NAME : UnifiLteEnableTimeDomain
+ * PSID : 2434 (0x0982)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables TDD scheme for LTE Coex
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_ENABLE_TIME_DOMAIN 0x0982
+
+/*******************************************************************************
+ * NAME : UnifiLteEnableLteCoex
+ * PSID : 2435 (0x0983)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables LTE Coex support
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_ENABLE_LTE_COEX 0x0983
+
+/*******************************************************************************
+ * NAME : UnifiLteBand40PowerBackoffChannelMask
+ * PSID : 2436 (0x0984)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * Channel Mask defining channels on which to apply power backoff when LTE
+ * operating on Band40. Defined as a 16 bit bitmask, as only 2G4 channels
+ * are impacted by this feature.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_BAND40_POWER_BACKOFF_CHANNEL_MASK 0x0984
+
+/*******************************************************************************
+ * NAME : UnifiLteBand40PowerBackoffRsrpLow
+ * PSID : 2437 (0x0985)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -140
+ * MAX : -77
+ * DEFAULT : -100
+ * DESCRIPTION :
+ * WLAN Power Reduction shall be applied when RSRP of LTE operating on band
+ * 40 falls below this level
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_BAND40_POWER_BACKOFF_RSRP_LOW 0x0985
+
+/*******************************************************************************
+ * NAME : UnifiLteBand40PowerBackoffRsrpHigh
+ * PSID : 2438 (0x0986)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -140
+ * MAX : -77
+ * DEFAULT : -95
+ * DESCRIPTION :
+ * WLAN Power Reduction shall be restored when RSRP of LTE operating on band
+ * 40 climbs above this level
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_BAND40_POWER_BACKOFF_RSRP_HIGH 0x0986
+
+/*******************************************************************************
+ * NAME : UnifiLteBand40PowerBackoffRsrpAveragingAlpha
+ * PSID : 2439 (0x0987)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : percentage
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Weighting applied when calculaing the average RSRP when considering Power
+ * Back Off Specifies the percentage weighting (alpha) to give to the most
+ * recent value when calculating the moving average. ma_new = alpha *
+ * new_sample + (1-alpha) * ma_old.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_BAND40_POWER_BACKOFF_RSRP_AVERAGING_ALPHA 0x0987
+
+/*******************************************************************************
+ * NAME : UnifiLteSetChannel
+ * PSID : 2440 (0x0988)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Enables LTE Coex support
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_SET_CHANNEL 0x0988
+
+/*******************************************************************************
+ * NAME : UnifiLteSetPowerBackoff
+ * PSID : 2441 (0x0989)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * MIB to force WLAN Power Backoff for LTE COEX testing purposes
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_SET_POWER_BACKOFF 0x0989
+
+/*******************************************************************************
+ * NAME : UnifiLteSetTddDebugMode
+ * PSID : 2442 (0x098A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * MIB to enable LTE TDD COEX simulation for testing purposes
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_SET_TDD_DEBUG_MODE 0x098A
+
+/*******************************************************************************
+ * NAME : UnifiApScanAbsenceDuration
+ * PSID : 2480 (0x09B0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : beacon intervals
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 7
+ * DESCRIPTION :
+ * Duration of the Absence time to use when protecting AP VIFs from scan
+ * operations. A value of 0 disables the feature.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_SCAN_ABSENCE_DURATION 0x09B0
+
+/*******************************************************************************
+ * NAME : UnifiApScanAbsencePeriod
+ * PSID : 2481 (0x09B1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : beacon intervals
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 14
+ * DESCRIPTION :
+ * Period of the Absence/Presence times cycles to use when protecting AP
+ * VIFs from scan operations.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_SCAN_ABSENCE_PERIOD 0x09B1
+
+/*******************************************************************************
+ * NAME : UnifiMlmestaKeepAliveTimeoutCheck
+ * PSID : 2485 (0x09B5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * DO NOT SET TO A VALUE HIGHER THAN THE TIMEOUT. How long before keepalive
+ * timeout to start polling, in seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMESTA_KEEP_ALIVE_TIMEOUT_CHECK 0x09B5
+
+/*******************************************************************************
+ * NAME : UnifiMlmeapKeepAliveTimeoutCheck
+ * PSID : 2486 (0x09B6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * DO NOT SET TO A VALUE HIGHER THAN THE TIMEOUT. How long before keepalive
+ * timeout to start polling, in seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMEAP_KEEP_ALIVE_TIMEOUT_CHECK 0x09B6
+
+/*******************************************************************************
+ * NAME : UnifiMlmegoKeepAliveTimeoutCheck
+ * PSID : 2487 (0x09B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * DO NOT SET TO A VALUE HIGHER THAN THE TIMEOUT. How long before keepalive
+ * timeout to start polling, in seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMEGO_KEEP_ALIVE_TIMEOUT_CHECK 0x09B7
+
+/*******************************************************************************
+ * NAME : UnifiBssMaxIdlePeriod
+ * PSID : 2488 (0x09B8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : second
+ * MIN : 0
+ * MAX : 300
+ * DEFAULT : 300
+ * DESCRIPTION :
+ * BSS Idle MAX Period. Used to cap the value coming from BSS Max Idle
+ * Period IE, in seconds
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BSS_MAX_IDLE_PERIOD 0x09B8
+
+/*******************************************************************************
+ * NAME : UnifiIdlemodeListenIntervalSkippingDtim
+ * PSID : 2495 (0x09BF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : DTIM intervals
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X00054645
+ * DESCRIPTION :
+ * Listen interval of beacons when in single-vif power saving mode,
+ * receiving DTIMs is enabled and idle mode enabled. No DTIMs are skipped
+ * during MVIF operation. A maximum of the listen interval beacons are
+ * skipped, which may be less than the number of DTIMs that can be skipped.
+ * The value is a lookup table for DTIM counts. Each 4bits, in LSB order,
+ * represent DTIM1, DTIM2, DTIM3, DTIM4, DTIM5, (unused). This key is only
+ * used for STA VIF, connected to an AP. For P2P group client intervals,
+ * refer to unifiIdlemodeP2PListenIntervalSkippingDTIM, PSID=2496.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IDLEMODE_LISTEN_INTERVAL_SKIPPING_DTIM 0x09BF
+
+/*******************************************************************************
+ * NAME : UnifiIdlemodeP2PListenIntervalSkippingDtim
+ * PSID : 2496 (0x09C0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : DTIM intervals
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X00000002
+ * DESCRIPTION :
+ * Listen interval of beacons when in single-vif, P2P client power saving
+ * mode,receiving DTIMs and idle mode enabled. No DTIMs are skipped during
+ * MVIF operation. A maximum of (listen interval - 1) beacons are skipped,
+ * which may be less than the number of DTIMs that can be skipped. The value
+ * is a lookup table for DTIM counts. Each 4bits, in LSB order, represent
+ * DTIM1, DTIM2, DTIM3, DTIM4, DTIM5, (unused). This key is only used for
+ * P2P group client. For STA connected to an AP, refer to
+ * unifiIdlemodeListenIntervalSkippingDTIM, PSID=2495.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IDLEMODE_P2_PLISTEN_INTERVAL_SKIPPING_DTIM 0x09C0
+
+/*******************************************************************************
+ * NAME : UnifiApIdleModeEnabled
+ * PSID : 2497 (0x09C1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables AP Idle mode which can transmit beacons in MIFLess mode, if
+ * softAP is active, and there has been no activity for a time. This mib has
+ * priority over unifiIdleModeLiteEnabled. If unifiAPIdleEnabled is enabled,
+ * Idle Mode Lite won't be activated. This mib value will be runtime
+ * (post-wlan enable) applied only if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_IDLE_MODE_ENABLED 0x09C1
+
+/*******************************************************************************
+ * NAME : UnifiFastPowerSaveTimeout
+ * PSID : 2500 (0x09C4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 400000
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: UniFi
+ * implements a proprietary power management mode called Fast Power Save
+ * that balances network performance against power consumption. In this mode
+ * UniFi delays entering power save mode until it detects that there has
+ * been no exchange of data for the duration of time specified.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FAST_POWER_SAVE_TIMEOUT 0x09C4
+
+/*******************************************************************************
+ * NAME : UnifiFastPowerSaveTimeoutSmall
+ * PSID : 2501 (0x09C5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 200000
+ * DESCRIPTION :
+ * UniFi implements a proprietary power management mode called Fast Power
+ * Save that balances network performance against power consumption. In this
+ * mode UniFi delays entering power save mode until it detects that there
+ * has been no exchange of data for the duration of time specified. The
+ * unifiFastPowerSaveTimeOutSmall aims to improve the power consumption by
+ * setting a lower bound for the Fast Power Save Timeout. If set with a
+ * value above unifiFastPowerSaveTimeOut it will default to
+ * unifiFastPowerSaveTimeOut.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FAST_POWER_SAVE_TIMEOUT_SMALL 0x09C5
+
+/*******************************************************************************
+ * NAME : UnifiMlmestaKeepAliveTimeout
+ * PSID : 2502 (0x09C6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2147
+ * DEFAULT : 30
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Timeout
+ * before disconnecting in seconds. 0 = Disabled. Capped to greater than 6
+ * seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMESTA_KEEP_ALIVE_TIMEOUT 0x09C6
+
+/*******************************************************************************
+ * NAME : UnifiMlmeapKeepAliveTimeout
+ * PSID : 2503 (0x09C7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2147
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Timeout before disconnecting in seconds. 0 = Disabled. Capped to greater
+ * than 6 seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMEAP_KEEP_ALIVE_TIMEOUT 0x09C7
+
+/*******************************************************************************
+ * NAME : UnifiMlmegoKeepAliveTimeout
+ * PSID : 2504 (0x09C8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2147
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Timeout before disconnecting in seconds. 0 = Disabled. Capped to greater
+ * than 6 seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLMEGO_KEEP_ALIVE_TIMEOUT 0x09C8
+
+/*******************************************************************************
+ * NAME : UnifiStaRouterAdvertisementMinimumIntervalToForward
+ * PSID : 2505 (0x09C9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 60
+ * MAX : 4294967285
+ * DEFAULT : 60
+ * DESCRIPTION :
+ * STA Mode: Minimum interval to forward Router Advertisement frames to
+ * Host. Minimum value = 60 secs.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_ROUTER_ADVERTISEMENT_MINIMUM_INTERVAL_TO_FORWARD 0x09C9
+
+/*******************************************************************************
+ * NAME : UnifiRoamConnectionQualityCheckWaitAfterConnect
+ * PSID : 2506 (0x09CA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : ms
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * The amount of time a STA will wait after connection before starting to
+ * check the MLME-installed connection quality trigger thresholds
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ROAM_CONNECTION_QUALITY_CHECK_WAIT_AFTER_CONNECT 0x09CA
+
+/*******************************************************************************
+ * NAME : UnifiApBeaconMaxDrift
+ * PSID : 2507 (0x09CB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0XFFFF
+ * DESCRIPTION :
+ * The maximum drift in microseconds we will allow for each beacon sent when
+ * we're trying to move it to get a 50% duty cycle between GO and STA in
+ * multiple VIF scenario. We'll delay our TX beacon by a maximum of this
+ * value until we reach our target TBTT. We have 3 possible cases for this
+ * value: a) ap_beacon_max_drift = 0x0000 - Feature disabled b)
+ * ap_beacon_max_drift between 0x0001 and 0xFFFE - Each time we transmit the
+ * beacon we'll move it a little bit forward but never more than this. (Not
+ * implemented yet) c) ap_beacon_max_drift = 0xFFFF - Move the beacon to the
+ * desired position in one shot.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AP_BEACON_MAX_DRIFT 0x09CB
+
+/*******************************************************************************
+ * NAME : UnifiBssMaxIdlePeriodEnabled
+ * PSID : 2508 (0x09CC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * If set STA will configure keep-alive with options specified in a received
+ * BSS max idle period IE
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BSS_MAX_IDLE_PERIOD_ENABLED 0x09CC
+
+/*******************************************************************************
+ * NAME : UnifiVifIdleMonitorTime
+ * PSID : 2509 (0x09CD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : second
+ * MIN : 0
+ * MAX : 1800
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * In Fast Power Save mode, the STA will decide whether it is idle based on
+ * monitoring its traffic class. If the traffic class is continuously
+ * "occasional" for equal or longer than the specified value (in seconds),
+ * then the VIF is marked as idle. Traffic class monitoring is based on the
+ * interval specified in the "unifiExitPowerSavePeriod" MIB
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_VIF_IDLE_MONITOR_TIME 0x09CD
+
+/*******************************************************************************
+ * NAME : UnifiDisableLegacyPowerSave
+ * PSID : 2510 (0x09CE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: This affects
+ * Station VIF power save behaviour. Setting it to true will disable legacy
+ * power save (i.e. we wil use fast power save to retrieve data) Note that
+ * actually disables full power save mode (i.e sending trigger to retrieve
+ * frames which will be PS-POLL for legacy and QOS-NULL for UAPSD)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DISABLE_LEGACY_POWER_SAVE 0x09CE
+
+/*******************************************************************************
+ * NAME : UnifiDebugForceActive
+ * PSID : 2511 (0x09CF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Force station
+ * power save mode to be active (when scheduled). VIF scheduling, coex and
+ * other non-VIF specific reasons could still force power save on the VIF.
+ * Applies to all VIFs of type station (includes P2P client). Changes to the
+ * mib will only get applied after next host/mlme power management request.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_FORCE_ACTIVE 0x09CF
+
+/*******************************************************************************
+ * NAME : UnifiStationActivityIdleTime
+ * PSID : 2512 (0x09D0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : milliseconds
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 500
+ * DESCRIPTION :
+ * Time since last station activity when it can be considered to be idle.
+ * Only used in SoftAP mode when determining if all connected stations are
+ * idle (not active).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STATION_ACTIVITY_IDLE_TIME 0x09D0
+
+/*******************************************************************************
+ * NAME : UnifiDmsEnabled
+ * PSID : 2513 (0x09D1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables Directed Multicast Service (DMS)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DMS_ENABLED 0x09D1
+
+/*******************************************************************************
+ * NAME : UnifiPowerManagementDelayTimeout
+ * PSID : 2514 (0x09D2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 30000
+ * DESCRIPTION :
+ * When UniFi enters power save mode it signals the new state by setting the
+ * power management bit in the frame control field of a NULL frame. It then
+ * remains active for the period since the previous unicast reception, or
+ * since the transmission of the NULL frame, whichever is later. This entry
+ * controls the maximum time during which UniFi will continue to listen for
+ * data. This allows any buffered data on a remote device to be cleared.
+ * Specifies an upper limit on the timeout. UniFi internally implements a
+ * proprietary algorithm to adapt the timeout depending upon the
+ * situation.This is used by firmware when current station VIF is only
+ * station VIF which can be scheduled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_POWER_MANAGEMENT_DELAY_TIMEOUT 0x09D2
+
+/*******************************************************************************
+ * NAME : UnifiApsdServicePeriodTimeout
+ * PSID : 2515 (0x09D3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 20000
+ * DESCRIPTION :
+ * During Unscheduled Automated Power Save Delivery (U-APSD), UniFi may
+ * trigger a service period in order to fetch data from the access point.
+ * The service period is normally terminated by a frame from the access
+ * point with the EOSP (End Of Service Period) flag set, at which point
+ * UniFi returns to sleep. However, if the access point is temporarily
+ * inaccessible, UniFi would stay awake indefinitely. Specifies a timeout
+ * starting from the point where the trigger frame has been sent. If the
+ * timeout expires and no data has been received from the access point,
+ * UniFi will behave as if the service period had been ended normally and
+ * return to sleep. This timeout takes precedence over
+ * unifiPowerSaveExtraListenTime if both would otherwise be applicable.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_APSD_SERVICE_PERIOD_TIMEOUT 0x09D3
+
+/*******************************************************************************
+ * NAME : UnifiConcurrentPowerManagementDelayTimeout
+ * PSID : 2516 (0x09D4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 10000
+ * DESCRIPTION :
+ * When UniFi enters power save mode it signals the new state by setting the
+ * power management bit in the frame control field of a NULL frame. It then
+ * remains active for the period since the previous unicast reception, or
+ * since the transmission of the NULL frame, whichever is later. This entry
+ * controls the maximum time during which UniFi will continue to listen for
+ * data. This allows any buffered data on a remote device to be cleared.
+ * This is same as unifiPowerManagementDelayTimeout but this value is
+ * considered only when we are doing multivif operations and other VIFs are
+ * waiting to be scheduled.Note that firmware automatically chooses one of
+ * unifiPowerManagementDelayTimeout and
+ * unifiConcurrentPowerManagementDelayTimeout depending upon the current
+ * situation.It is sensible to set unifiPowerManagementDelayTimeout to be
+ * always more thanunifiConcurrentPowerManagementDelayTimeout.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CONCURRENT_POWER_MANAGEMENT_DELAY_TIMEOUT 0x09D4
+
+/*******************************************************************************
+ * NAME : UnifiStationQosInfo
+ * PSID : 2517 (0x09D5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * QoS capability for a non-AP Station, and is encoded as per IEEE 802.11
+ * QoS Capability.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STATION_QOS_INFO 0x09D5
+
+/*******************************************************************************
+ * NAME : UnifiListenIntervalSkippingDtim
+ * PSID : 2518 (0x09D6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : DTIM intervals
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X000A89AA
+ * DESCRIPTION :
+ * Listen interval of beacons when in single-vif power saving mode,receiving
+ * DTIMs is enabled and idle mode disabled. No DTIMs are skipped during MVIF
+ * operation. A maximum of the listen interval beacons are skipped, which
+ * may be less than the number of DTIMs that can be skipped. The value is a
+ * lookup table for DTIM counts. Each 4bits, in LSB order, represent DTIM1,
+ * DTIM2, DTIM3, DTIM4, DTIM5, (unused). This key is only used for STA VIF,
+ * connected to an AP. For P2P group client intervals, refer to
+ * unifiP2PListenIntervalSkippingDTIM, PSID=2523.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LISTEN_INTERVAL_SKIPPING_DTIM 0x09D6
+
+/*******************************************************************************
+ * NAME : UnifiListenInterval
+ * PSID : 2519 (0x09D7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Association request listen interval parameter in beacon intervals. Not
+ * used for any other purpose.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LISTEN_INTERVAL 0x09D7
+
+/*******************************************************************************
+ * NAME : UnifiLegacyPsPollTimeout
+ * PSID : 2520 (0x09D8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 15000
+ * DESCRIPTION :
+ * Time we try to stay awake after sending a PS-POLL to receive data.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LEGACY_PS_POLL_TIMEOUT 0x09D8
+
+/*******************************************************************************
+ * NAME : UnifiBeaconSkippingControl
+ * PSID : 2521 (0x09D9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X00010103
+ * DESCRIPTION :
+ * Control beacon skipping behaviour within firmware with bit flags. 1
+ * defines enabled, with 0 showing the case disabled. If beacon skipping is
+ * enabled, further determine if DTIM beacons can be skipped, or only
+ * non-DTIM beacons. The following applies: bit 0: station skipping on host
+ * suspend bit 1: station skipping on host awake bit 2: station skipping on
+ * LCD on bit 3: station skipping with multivif bit 4: station skipping with
+ * BT active. bit 8: station skip dtim on host suspend bit 9: station skip
+ * dtim on host awake bit 10: station skip dtim on LCD on bit 11: station
+ * skip dtim on multivif bit 12: station skip dtim with BT active bit 16:
+ * p2p-gc skipping on host suspend bit 17: p2p-gc skipping on host awake bit
+ * 18: p2p-gc skipping on LCD on bit 19: p2p-gc skipping with multivif bit
+ * 20: p2p-gc skipping with BT active bit 24: p2p-gc skip dtim on host
+ * suspend bit 25: p2p-gc skip dtim on host awake bit 26: p2p-gc skip dtim
+ * on LCD on bit 27: p2p-gc skip dtim on multivif bit 28: p2p-gc skip dtim
+ * with BT active
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BEACON_SKIPPING_CONTROL 0x09D9
+
+/*******************************************************************************
+ * NAME : UnifiTogglePowerDomain
+ * PSID : 2522 (0x09DA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Toggle WLAN power domain when entering dorm mode (deep sleep). When
+ * entering deep sleep and this value it true, then the WLAN power domain is
+ * disabled for the deep sleep duration. When false, the power domain is
+ * left turned on. This is to work around issues with WLAN rx, and is
+ * considered temporary until the root cause is found and fixed.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TOGGLE_POWER_DOMAIN 0x09DA
+
+/*******************************************************************************
+ * NAME : UnifiP2PListenIntervalSkippingDtim
+ * PSID : 2523 (0x09DB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : DTIM intervals
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X00000002
+ * DESCRIPTION :
+ * Listen interval of beacons when in single-vif, P2P client power saving
+ * mode,receiving DTIMs and idle mode disabled. No DTIMs are skipped during
+ * MVIF operation. A maximum of (listen interval - 1) beacons are skipped,
+ * which may be less than the number of DTIMs that can be skipped. The value
+ * is a lookup table for DTIM counts. Each 4bits, in LSB order, represent
+ * DTIM1, DTIM2, DTIM3, DTIM4, DTIM5, (unused). This key is only used for
+ * P2P group client. For STA connected to an AP, refer to
+ * unifiListenIntervalSkippingDTIM, PSID=2518.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_P2_PLISTEN_INTERVAL_SKIPPING_DTIM 0x09DB
+
+/*******************************************************************************
+ * NAME : UnifiFragmentationDuration
+ * PSID : 2524 (0x09DC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * A limit on transmission time for a data frame. If the data payload would
+ * take longer than unifiFragmentationDuration to transmit, UniFi will
+ * attempt to fragment the frame to ensure that the data portion of each
+ * fragment is within the limit. The limit imposed by the fragmentation
+ * threshold is also respected, and no more than 16 fragments may be
+ * generated. If the value is zero no limit is imposed. The value may be
+ * changed dynamically during connections. Note that the limit is a
+ * guideline and may not always be respected. In particular, the data rate
+ * is finalised after fragmentation in order to ensure responsiveness to
+ * conditions, the calculation is not performed to high accuracy, and octets
+ * added during encryption are not included in the duration calculation.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FRAGMENTATION_DURATION 0x09DC
+
+/*******************************************************************************
+ * NAME : UnifiIdleModeLiteEnabled
+ * PSID : 2526 (0x09DE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables Idle Mode Lite, if softAP is active, and there has been no
+ * activity for a time. Idle mode lite should not be active if host has sent
+ * a command to change key. This mib value will be runtime (post-wlan
+ * enable) applied only if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IDLE_MODE_LITE_ENABLED 0x09DE
+
+/*******************************************************************************
+ * NAME : UnifiIdleModeEnabled
+ * PSID : 2527 (0x09DF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enables Idle Mode, if single vif station is active or there is no vif,
+ * and there has been no activity for a time. This mib value will be runtime
+ * (post-wlan enable) applied if only "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IDLE_MODE_ENABLED 0x09DF
+
+/*******************************************************************************
+ * NAME : UnifiDtimWaitTimeout
+ * PSID : 2529 (0x09E1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50000
+ * DESCRIPTION :
+ * If UniFi is in power save and receives a Traffic Indication Map from its
+ * associated access point with a DTIM indication, it will wait a maximum
+ * time given by this attribute for succeeding broadcast or multicast
+ * traffic, or until it receives such traffic with the 'more data'
+ * flag clear. Any reception of broadcast or multicast traffic with the
+ * 'more data' flag set, or any reception of unicast data, resets
+ * the timeout. The timeout can be turned off by setting the value to zero;
+ * in that case UniFi will remain awake indefinitely waiting for broadcast
+ * or multicast data. Otherwise, the value should be larger than that of
+ * unifiPowerSaveExtraListenTime.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DTIM_WAIT_TIMEOUT 0x09E1
+
+/*******************************************************************************
+ * NAME : UnifiListenIntervalMaxTime
+ * PSID : 2530 (0x09E2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * Maximum number length of time, in Time Units (1TU = 1024us), that can be
+ * used as a beacon listen interval. This will limit how many beacons maybe
+ * skipped, and affects the DTIM beacon skipping count; DTIM skipping (if
+ * enabled) will be such that skipped count = (unifiListenIntervalMaxTime /
+ * DTIM_period).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LISTEN_INTERVAL_MAX_TIME 0x09E2
+
+/*******************************************************************************
+ * NAME : UnifiScanMaxProbeTransmitLifetime
+ * PSID : 2531 (0x09E3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 1
+ * MAX : 4294967295
+ * DEFAULT : 64
+ * DESCRIPTION :
+ * In TU. If non-zero, used during active scans as the maximum lifetime for
+ * probe requests. It is the elapsed time after the initial transmission at
+ * which further attempts to transmit the probe are terminated.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SCAN_MAX_PROBE_TRANSMIT_LIFETIME 0x09E3
+
+/*******************************************************************************
+ * NAME : UnifiPowerSaveTransitionPacketThreshold
+ * PSID : 2532 (0x09E4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name:If VIF has
+ * this many packets queued/transmitted/received in last
+ * unifiFastPowerSaveTransitionPeriod then firmware may decide to come out
+ * of aggressive power save mode. This is applicable to STA/CLI and AP/GO
+ * VIFs. Note that this is only a guideline. Firmware internal factors may
+ * override this MIB. Also see unifiTrafficAnalysisPeriod and
+ * unifiAggressivePowerSaveTransitionPeriod.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_POWER_SAVE_TRANSITION_PACKET_THRESHOLD 0x09E4
+
+/*******************************************************************************
+ * NAME : UnifiProbeResponseLifetime
+ * PSID : 2533 (0x09E5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * Lifetime of proberesponse frame in unit of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PROBE_RESPONSE_LIFETIME 0x09E5
+
+/*******************************************************************************
+ * NAME : UnifiProbeResponseMaxRetry
+ * PSID : 2534 (0x09E6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * Number of retries of probe response frame.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PROBE_RESPONSE_MAX_RETRY 0x09E6
+
+/*******************************************************************************
+ * NAME : UnifiTrafficAnalysisPeriod
+ * PSID : 2535 (0x09E7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * Period in TUs over which firmware counts number of packet
+ * transmitted/queued/received to make decisions like coming out of
+ * aggressive power save mode or setting up BlockAck. This is applicable to
+ * STA/CLI and AP/GO VIFs. Note that this is only a guideline. Firmware
+ * internal factors may override this MIB. Also see
+ * unifiPowerSaveTransitionPacketThreshold,
+ * unifiAggressivePowerSaveTransitionPeriod and
+ * unifiTrafficThresholdToSetupBA.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TRAFFIC_ANALYSIS_PERIOD 0x09E7
+
+/*******************************************************************************
+ * NAME : UnifiAggressivePowerSaveTransitionPeriod
+ * PSID : 2536 (0x09E8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * Defines how many unifiExitPowerSavePeriod firmware should wait in which
+ * VIF had received/transmitted/queued less than
+ * unifiPowerSaveTransitionPacketThreshold packets - before entering
+ * aggressive power save mode (when not in aggressive power save mode) This
+ * is applicable to STA/CLI and AP/GO VIFs. Note that this is only a
+ * guideline. Firmware internal factors may override this MIB. Also see
+ * unifiPowerSaveTransitionPacketThreshold and unifiTrafficAnalysisPeriod.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AGGRESSIVE_POWER_SAVE_TRANSITION_PERIOD 0x09E8
+
+/*******************************************************************************
+ * NAME : UnifiActiveTimeAfterMoreBit
+ * PSID : 2537 (0x09E9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 30
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: After seeing
+ * the "more" bit set in a message from the AP, the STA will goto active
+ * mode for this duration of time. After this time, traffic information is
+ * evaluated to determine whether the STA should stay active or go to
+ * powersave. Setting this value to 0 means that the described functionality
+ * is disabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_ACTIVE_TIME_AFTER_MORE_BIT 0x09E9
+
+/*******************************************************************************
+ * NAME : UnifiDefaultDwellTime
+ * PSID : 2538 (0x09EA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Dwell time, in TU, for frames that need a response but have no dwell time
+ * associated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEFAULT_DWELL_TIME 0x09EA
+
+/*******************************************************************************
+ * NAME : UnifiVhtCapabilities
+ * PSID : 2540 (0x09EC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 12
+ * MAX : 12
+ * DEFAULT : { 0XB1, 0X7A, 0X11, 0X03, 0XFA, 0XFF, 0X00, 0X00, 0XFA, 0XFF, 0X00, 0X00 }
+ * DESCRIPTION :
+ * VHT capabilities of the chip. see SC-503520-SP.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_VHT_CAPABILITIES 0x09EC
+
+/*******************************************************************************
+ * NAME : UnifiMaxVifScheduleDuration
+ * PSID : 2541 (0x09ED)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Default time for which a non-scan VIF can be scheduled. Applies to
+ * multiVIF scenario. Internal firmware logic or BSS state (e.g. NOA) may
+ * cut short the schedule.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAX_VIF_SCHEDULE_DURATION 0x09ED
+
+/*******************************************************************************
+ * NAME : UnifiVifLongIntervalTime
+ * PSID : 2542 (0x09EE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 60
+ * DESCRIPTION :
+ * When the scheduler expects a VIF to schedule for time longer than this
+ * parameter (specified in TUs), then the VIF may come out of powersave.
+ * Only valid for STA VIFs.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_VIF_LONG_INTERVAL_TIME 0x09EE
+
+/*******************************************************************************
+ * NAME : UnifiDisallowSchedRelinquish
+ * PSID : 2543 (0x09EF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * When enabled the VIFs will not relinquish their assigned schedules when
+ * they have nothing left to do.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DISALLOW_SCHED_RELINQUISH 0x09EF
+
+/*******************************************************************************
+ * NAME : UnifiRameDplaneOperationTimeout
+ * PSID : 2544 (0x09F0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : milliseconds
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * Timeout for requests sent from MACRAME to Data Plane. Any value below
+ * 1000ms will be capped at 1000ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RAME_DPLANE_OPERATION_TIMEOUT 0x09F0
+
+/*******************************************************************************
+ * NAME : UnifiDebugKeepRadioOn
+ * PSID : 2545 (0x09F1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Keep the radio on. For debug purposes only. Setting the value to FALSE
+ * means radio on/off functionality will behave normally. Note that setting
+ * this value to TRUE will automatically disable dorm. The intention is
+ * not* for this value to be changed at runtime.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_KEEP_RADIO_ON 0x09F1
+
+/*******************************************************************************
+ * NAME : UnifiForceFixedDurationSchedule
+ * PSID : 2546 (0x09F2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : TU
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * For schedules with fixed duration e.g. scan, unsync VIF, the schedule
+ * will be forced after this time to avoid VIF starving
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FORCE_FIXED_DURATION_SCHEDULE 0x09F2
+
+/*******************************************************************************
+ * NAME : UnifiRameUpdateMibs
+ * PSID : 2547 (0x09F3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * When this mib is called/toggled MACRAME mibs will be read and compared
+ * with mib values in ramedata.mibs and updated if the value changes
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RAME_UPDATE_MIBS 0x09F3
+
+/*******************************************************************************
+ * NAME : UnifiGoScanAbsenceDuration
+ * PSID : 2548 (0x09F4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : beacon intervals
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 7
+ * DESCRIPTION :
+ * Duration of the Absence time to use when protecting P2PGO VIFs from scan
+ * operations. A value of 0 disables the feature.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GO_SCAN_ABSENCE_DURATION 0x09F4
+
+/*******************************************************************************
+ * NAME : UnifiGoScanAbsencePeriod
+ * PSID : 2549 (0x09F5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : beacon intervals
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 14
+ * DESCRIPTION :
+ * Period of the Absence/Presence times cycles to use when protecting P2PGO
+ * VIFs from scan operations.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_GO_SCAN_ABSENCE_PERIOD 0x09F5
+
+/*******************************************************************************
+ * NAME : UnifiMaxClient
+ * PSID : 2550 (0x09F6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 10
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Restricts the maximum number of associated STAs for SoftAP.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAX_CLIENT 0x09F6
+
+/*******************************************************************************
+ * NAME : UnifiTdlsInP2pActivated
+ * PSID : 2556 (0x09FC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enable TDLS in P2P.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_IN_P2P_ACTIVATED 0x09FC
+
+/*******************************************************************************
+ * NAME : UnifiTdlsActivated
+ * PSID : 2558 (0x09FE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Enable TDLS.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_ACTIVATED 0x09FE
+
+/*******************************************************************************
+ * NAME : UnifiTdlsTpThresholdPktSecs
+ * PSID : 2559 (0x09FF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * Used for "throughput_threshold_pktsecs" of
+ * RAME-MLME-ENABLE-PEER-TRAFFIC-REPORTING.request.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_TP_THRESHOLD_PKT_SECS 0x09FF
+
+/*******************************************************************************
+ * NAME : UnifiTdlsRssiThreshold
+ * PSID : 2560 (0x0A00)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -75
+ * DESCRIPTION :
+ * FW initiated TDLS Discovery/Setup procedure. If the RSSI of a received
+ * TDLS Discovery Response frame is greater than this value, initiate the
+ * TDLS Setup procedure.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_RSSI_THRESHOLD 0x0A00
+
+/*******************************************************************************
+ * NAME : UnifiTdlsMaximumRetry
+ * PSID : 2561 (0x0A01)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_MAXIMUM_RETRY 0x0A01
+
+/*******************************************************************************
+ * NAME : UnifiTdlsTpMonitorSecs
+ * PSID : 2562 (0x0A02)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Measurement period for recording the number of packets sent to a peer
+ * over a TDLS link.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_TP_MONITOR_SECS 0x0A02
+
+/*******************************************************************************
+ * NAME : UnifiTdlsBasicHtMcsSet
+ * PSID : 2563 (0x0A03)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_BASIC_HT_MCS_SET 0x0A03
+
+/*******************************************************************************
+ * NAME : UnifiTdlsBasicVhtMcsSet
+ * PSID : 2564 (0x0A04)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_BASIC_VHT_MCS_SET 0x0A04
+
+/*******************************************************************************
+ * NAME : Dot11TdlsDiscoveryRequestWindow
+ * PSID : 2565 (0x0A05)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Time to gate Discovery Request frame (in DTIM intervals) after
+ * transmitting a Discovery Request frame.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_TDLS_DISCOVERY_REQUEST_WINDOW 0x0A05
+
+/*******************************************************************************
+ * NAME : Dot11TdlsResponseTimeout
+ * PSID : 2566 (0x0A06)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * If a valid Setup Response frame is not received within (seconds), the
+ * initiator STA shall terminate the setup procedure and discard any Setup
+ * Response frames.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_TDLS_RESPONSE_TIMEOUT 0x0A06
+
+/*******************************************************************************
+ * NAME : Dot11TdlsChannelSwitchActivated
+ * PSID : 2567 (0x0A07)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_TDLS_CHANNEL_SWITCH_ACTIVATED 0x0A07
+
+/*******************************************************************************
+ * NAME : UnifiTdlsDesignForTestMode
+ * PSID : 2568 (0x0A08)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_DESIGN_FOR_TEST_MODE 0x0A08
+
+/*******************************************************************************
+ * NAME : UnifiTdlsWiderBandwidthProhibited
+ * PSID : 2569 (0x0A09)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Wider bandwidth prohibited flag.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_WIDER_BANDWIDTH_PROHIBITED 0x0A09
+
+/*******************************************************************************
+ * NAME : UnifiTdlsKeyLifeTimeInterval
+ * PSID : 2577 (0x0A11)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X000FFFFF
+ * DESCRIPTION :
+ * Build the Key Lifetime Interval in the TDLS Setup Request frame.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_KEY_LIFE_TIME_INTERVAL 0x0A11
+
+/*******************************************************************************
+ * NAME : UnifiTdlsTeardownFrameTxTimeout
+ * PSID : 2578 (0x0A12)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 500
+ * DESCRIPTION :
+ * Allowed time in milliseconds for a Teardown frame to be transmitted.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TDLS_TEARDOWN_FRAME_TX_TIMEOUT 0x0A12
+
+/*******************************************************************************
+ * NAME : UnifiWifiSharingEnabled
+ * PSID : 2580 (0x0A14)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables WiFi Sharing feature
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WIFI_SHARING_ENABLED 0x0A14
+
+/*******************************************************************************
+ * NAME : UnifiWiFiSharing5GHzChannel
+ * PSID : 2582 (0x0A16)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 8
+ * MAX : 8
+ * DEFAULT : { 0X00, 0XC0, 0XFF, 0XFF, 0X7F, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Applicable 5GHz Primary Channels mask. Defined in a uint64 represented by
+ * the octet string. First byte of the octet string maps to LSB. Bits 0-13
+ * representing 2.4G channels are always set to 0. Mapping defined in
+ * ChannelisationRules; i.e. Bit 14 maps to channel 36.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WI_FI_SHARING5_GHZ_CHANNEL 0x0A16
+
+/*******************************************************************************
+ * NAME : UnifiWifiSharingChannelSwitchCount
+ * PSID : 2583 (0x0A17)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 3
+ * MAX : 10
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Channel switch announcement count which will be used in the Channel
+ * announcement IE when using wifi sharing
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WIFI_SHARING_CHANNEL_SWITCH_COUNT 0x0A17
+
+/*******************************************************************************
+ * NAME : UnifiChannelAnnouncementCount
+ * PSID : 2584 (0x0A18)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Channel switch announcement count which will be used in the Channel
+ * announcement IE
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CHANNEL_ANNOUNCEMENT_COUNT 0x0A18
+
+/*******************************************************************************
+ * NAME : UnifiRaTestStoredSa
+ * PSID : 2585 (0x0A19)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X00000000
+ * DESCRIPTION :
+ * Test only: Source address of router contained in virtural router
+ * advertisement packet, specified in chapter '6.2 Forward Received RA frame
+ * to Host' in SC-506393-TE
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RA_TEST_STORED_SA 0x0A19
+
+/*******************************************************************************
+ * NAME : UnifiRaTestStoreFrame
+ * PSID : 2586 (0x0A1A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X00000000
+ * DESCRIPTION :
+ * Test only: Virtual router advertisement packet. Specified in chapter '6.2
+ * Forward Received RA frame to Host' in SC-506393-TE
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RA_TEST_STORE_FRAME 0x0A1A
+
+/*******************************************************************************
+ * NAME : Dot11TdlsPeerUapsdBufferStaActivated
+ * PSID : 2587 (0x0A1B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enable TDLS peer U-APSD.
+ *******************************************************************************/
+#define SLSI_PSID_DOT11_TDLS_PEER_UAPSD_BUFFER_STA_ACTIVATED 0x0A1B
+
+/*******************************************************************************
+ * NAME : UnifiProbeResponseLifetimeP2p
+ * PSID : 2600 (0x0A28)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 500
+ * DESCRIPTION :
+ * Lifetime of proberesponse frame in unit of ms for P2P.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PROBE_RESPONSE_LIFETIME_P2P 0x0A28
+
+/*******************************************************************************
+ * NAME : UnifiStaChannelSwitchSlowApActivated
+ * PSID : 2601 (0x0A29)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: ChanelSwitch:
+ * Enable waiting for a slow AP.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_CHANNEL_SWITCH_SLOW_AP_ACTIVATED 0x0A29
+
+/*******************************************************************************
+ * NAME : UnifiStaChannelSwitchSlowApMaxTime
+ * PSID : 2604 (0x0A2C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 70
+ * DESCRIPTION :
+ * ChannelSwitch delay for Slow APs. In Seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_CHANNEL_SWITCH_SLOW_AP_MAX_TIME 0x0A2C
+
+/*******************************************************************************
+ * NAME : UnifiStaChannelSwitchSlowApPollInterval
+ * PSID : 2605 (0x0A2D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * ChannelSwitch polling interval for Slow APs. In Seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_CHANNEL_SWITCH_SLOW_AP_POLL_INTERVAL 0x0A2D
+
+/*******************************************************************************
+ * NAME : UnifiStaChannelSwitchSlowApProcedureTimeoutIncrement
+ * PSID : 2606 (0x0A2E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * ChannelSwitch procedure timeout increment for Slow APs. In Seconds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STA_CHANNEL_SWITCH_SLOW_AP_PROCEDURE_TIMEOUT_INCREMENT 0x0A2E
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanMaxAerials
+ * PSID : 2607 (0x0A2F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Limit the number of Aerials that Scan will use.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_MAX_AERIALS 0x0A2F
+
+/*******************************************************************************
+ * NAME : UnifiApfEnabled
+ * PSID : 2650 (0x0A5A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * It is used to enable or disable Android Packet Filter(APF).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_APF_ENABLED 0x0A5A
+
+/*******************************************************************************
+ * NAME : UnifiApfVersion
+ * PSID : 2651 (0x0A5B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * APF version currently supported by the FW.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_APF_VERSION 0x0A5B
+
+/*******************************************************************************
+ * NAME : UnifiApfMaxSize
+ * PSID : 2652 (0x0A5C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2048
+ * DEFAULT : 1024
+ * DESCRIPTION :
+ * Max size in bytes supported by FW per VIF. Includes both program len and
+ * data len.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_APF_MAX_SIZE 0x0A5C
+
+/*******************************************************************************
+ * NAME : UnifiApfActiveMode
+ * PSID : 2653 (0x0A5D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Indicates if APF is supported in host active mode. Applicable to only
+ * group addressed frames.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_APF_ACTIVE_MODE 0x0A5D
+
+/*******************************************************************************
+ * NAME : UnifiCsrOnlyMibShield
+ * PSID : 4001 (0x0FA1)
+ * PER INTERFACE?: NO
+ * TYPE : unifiCSROnlyMIBShield
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Each element of the MIB has a set of read/write access constraints that
+ * may be applied when the element is accessed by the host. For most
+ * elements the constants are derived from their MAX-ACCESS clauses.
+ * unifiCSROnlyMIBShield controls the access mechanism. If this entry is set
+ * to 'warn', when the host makes an inappropriate access to a MIB
+ * variable (e.g., writing to a 'read-only' entry) then the
+ * firmware attempts to send a warning message to the host, but access is
+ * allowed to the MIB variable. If this entry is set to 'guard'
+ * then inappropriate accesses from the host are prevented. If this entry is
+ * set to 'alarm' then inappropriate accesses from the host are
+ * prevented and the firmware attempts to send warning messages to the host.
+ * If this entry is set to 'open' then no access constraints are
+ * applied and now warnings issued. Note that certain MIB entries have
+ * further protection schemes. In particular, the MIB prevents the host from
+ * reading some security keys (WEP keys, etc.).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CSR_ONLY_MIB_SHIELD 0x0FA1
+
+/*******************************************************************************
+ * NAME : UnifiPrivateBbbTxFilterConfig
+ * PSID : 4071 (0x0FE7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X17
+ * DESCRIPTION :
+ * entry is written directly to the BBB_TX_FILTER_CONFIG register. Only the
+ * lower eight bits of this register are implemented . Bits 0-3 are the
+ * 'Tx Gain', bits 6-8 are the 'Tx Delay'. This register
+ * should only be changed by an expert.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PRIVATE_BBB_TX_FILTER_CONFIG 0x0FE7
+
+/*******************************************************************************
+ * NAME : UnifiPrivateSwagcFrontEndGain
+ * PSID : 4075 (0x0FEB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * Gain of the path between chip and antenna when LNA is on.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PRIVATE_SWAGC_FRONT_END_GAIN 0x0FEB
+
+/*******************************************************************************
+ * NAME : UnifiPrivateSwagcFrontEndLoss
+ * PSID : 4076 (0x0FEC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * Loss of the path between chip and antenna when LNA is off.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PRIVATE_SWAGC_FRONT_END_LOSS 0x0FEC
+
+/*******************************************************************************
+ * NAME : UnifiPrivateSwagcExtThresh
+ * PSID : 4077 (0x0FED)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -25
+ * DESCRIPTION :
+ * Signal level at which external LNA will be used for AGC purposes.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PRIVATE_SWAGC_EXT_THRESH 0x0FED
+
+/*******************************************************************************
+ * NAME : UnifiCsrOnlyPowerCalDelay
+ * PSID : 4078 (0x0FEE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Delay applied at each step of the power calibration routine used with an
+ * external PA.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CSR_ONLY_POWER_CAL_DELAY 0x0FEE
+
+/*******************************************************************************
+ * NAME : UnifiRxAgcControl
+ * PSID : 4079 (0x0FEF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 9
+ * MAX : 11
+ * DEFAULT :
+ * DESCRIPTION :
+ * Override the AGC by adjusting the Rx minimum and maximum gains of each
+ * stage. Set requests write the values to a static structure in
+ * mac/hal/halradio/halradio_agc.c. The saved values are written to the Jar
+ * register WLRF_RADIO_AGC_CONFIG2 and to the Night registers
+ * WL_RADIO_AGC_CONFIG2 and WL_RADIO_AGC_CONFIG3. The saved values are also
+ * used to configure the AGC whenever halradio_agc_setup() is called. Get
+ * requests read the values from the static structure in
+ * mac/hal/halradio/halradio_agc.c. AGC enables are not altered. Fixed gain
+ * may be tested by setting the minimums and maximums to the same value.
+ * Version. octet 0 - Version number for this mib. Gain values. Default in
+ * brackets. octet 1 - 5G LNA minimum gain (0). octet 2 - 5G LNA maximum
+ * gain (4). octet 3 - 2G LNA minimum gain (0). octet 4 - 2G LNA maximum
+ * gain (5). octet 5 - Mixer minimum gain (0). octet 6 - Mixer maximum gain
+ * (2). octet 7 - ABB minimum gain (0). octet 8 - ABB maximum gain (27).
+ * octet 9 - Digital minimum gain (0). octet 10 - Digital maximum gain (7).
+ * For Rock / Hopper the saved values are written to the Hopper register
+ * WLRF_RADIO_AGC_CONFIG2_I0, WLRF_RADIO_AGC_CONFIG2_I1 and Rock registers
+ * WL_RADIO_AGC_CONFIG3_I0, WL_RADIO_AGC_CONFIG3_I1 Version. octet 0 -
+ * Version number for this mib. Gain values. Default in brackets. octet 1 -
+ * 5G FE minimum gain (1). octet 2 - 5G FE maximum gain (8). octet 3 - 2G FE
+ * minimum gain (0). octet 4 - 2G FE maximum gain (8). octet 5 - ABB minimum
+ * gain (0). octet 6 - ABB maximum gain (8). octet 7 - Digital minimum gain
+ * (0). octet 8 - Digital maximum gain (17).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_AGC_CONTROL 0x0FEF
+
+/*******************************************************************************
+ * NAME : UnifiWapiQosMask
+ * PSID : 4130 (0x1022)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 15
+ * DESCRIPTION :
+ * Forces the WAPI encryption hardware use the QoS mask specified.This mib
+ * value will be updated if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WAPI_QOS_MASK 0x1022
+
+/*******************************************************************************
+ * NAME : UnifiWmmStallEnable
+ * PSID : 4139 (0x102B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Enable
+ * workaround stall WMM traffic if the admitted time has been used up, used
+ * for certtification.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WMM_STALL_ENABLE 0x102B
+
+/*******************************************************************************
+ * NAME : UnifiRaaTxHostRate
+ * PSID : 4148 (0x1034)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 16385
+ * DESCRIPTION :
+ * Fixed TX rate set by Host. Ideally this should be done by the driver. 0
+ * means "host did not specified any rate".
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RAA_TX_HOST_RATE 0x1034
+
+/*******************************************************************************
+ * NAME : UnifiFallbackShortFrameRetryDistribution
+ * PSID : 4149 (0x1035)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 6
+ * MAX : 5
+ * DEFAULT : {0X3, 0X2, 0X2, 0X2, 0X1, 0X0}
+ * DESCRIPTION :
+ * Configure the retry distribution for fallback for short frames octet 0 -
+ * Number of retries for starting rate. octet 1 - Number of retries for next
+ * rate. octet 2 - Number of retries for next rate. octet 3 - Number of
+ * retries for next rate. octet 4 - Number of retries for next rate. octet 5
+ * - Number of retries for last rate. If 0 is written to an entry then the
+ * retries for that rate will be the short retry limit minus the sum of the
+ * retries for each rate above that entry (e.g. 15 - 5). Therefore, this
+ * should always be the value for octet 4. Also, when the starting rate has
+ * short guard enabled, the number of retries in octet 1 will be used and
+ * for the next rate in the fallback table (same MCS value, but with sgi
+ * disabled) octet 0 number of retries will be used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FALLBACK_SHORT_FRAME_RETRY_DISTRIBUTION 0x1035
+
+/*******************************************************************************
+ * NAME : UnifiRxthroughputlow
+ * PSID : 4150 (0x1036)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 37500000
+ * DESCRIPTION :
+ * Lower threshold for number of bytes received in a second - default value
+ * based on 300Mbps
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RXTHROUGHPUTLOW 0x1036
+
+/*******************************************************************************
+ * NAME : UnifiRxthroughputhigh
+ * PSID : 4151 (0x1037)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 50000000
+ * DESCRIPTION :
+ * Upper threshold for number of bytes received in a second - default value
+ * based on 400Mbps
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RXTHROUGHPUTHIGH 0x1037
+
+/*******************************************************************************
+ * NAME : UnifiSetFixedAmpduAggregationSize
+ * PSID : 4152 (0x1038)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * A non 0 value defines the max number of mpdus that a ampdu can have. A 0
+ * value tells FW to manage the aggregation size.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SET_FIXED_AMPDU_AGGREGATION_SIZE 0x1038
+
+/*******************************************************************************
+ * NAME : UnifiThroughputDebugReportInterval
+ * PSID : 4153 (0x1039)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * dataplane reports throughput diag report every this interval in msec. 0
+ * means to disable this report.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_THROUGHPUT_DEBUG_REPORT_INTERVAL 0x1039
+
+/*******************************************************************************
+ * NAME : UnifiPreEbrtWindow
+ * PSID : 4171 (0x104B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 100
+ * DESCRIPTION :
+ * Latest time before the expected beacon reception time that UniFi will
+ * turn on its radio in order to receive the beacon. Reducing this value can
+ * reduce UniFi power consumption when using low power modes, however a
+ * value which is too small may cause beacons to be missed, requiring the
+ * radio to remain on for longer periods to ensure reception of the
+ * subsequent beacon.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PRE_EBRT_WINDOW 0x104B
+
+/*******************************************************************************
+ * NAME : UnifiPostEbrtWindow
+ * PSID : 4173 (0x104D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 2000
+ * DESCRIPTION :
+ * Minimum time after the expected beacon reception time that UniFi will
+ * continue to listen for the beacon in an infrastructure BSS before timing
+ * out. Reducing this value can reduce UniFi power consumption when using
+ * low power modes, however a value which is too small may cause beacons to
+ * be missed, requiring the radio to remain on for longer periods to ensure
+ * reception of the subsequent beacon.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_POST_EBRT_WINDOW 0x104D
+
+/*******************************************************************************
+ * NAME : UnifiPsPollThreshold
+ * PSID : 4179 (0x1053)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 30
+ * DESCRIPTION :
+ * PS Poll threshold. When Unifi chip is configured for normal power save
+ * mode and when access point does not respond to PS-Poll requests, then a
+ * fault will be generated on non-zero PS Poll threshold indicating mode has
+ * been switched from power save to fast power save. Ignored PS Poll count
+ * is given as the fault argument.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PS_POLL_THRESHOLD 0x1053
+
+/*******************************************************************************
+ * NAME : UnifiSableContainerSizeConfiguration
+ * PSID : 5000 (0x1388)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 3
+ * MAX : 3
+ * DEFAULT : {0X64}
+ * DESCRIPTION :
+ * Sable Container Size Configuration Sable WLAN reserved memory size is
+ * determined by the host. Sable TLV containers are allocated from this WLAN
+ * reserved area. Each container has different requirement on its size. For
+ * example, frame logging or IQ capture would be very greedy, requesting
+ * most of available memroy. But some just need fixed size, but not large.
+ * To cope with such requirements, each container size is configured with
+ * the following rules: 1. To allocate a certain percentage of the whole
+ * wlan reserved area, put the percentage in hex format. For example,
+ * 0x28(=40) means 40% of reserved area will be assigned. The number
+ * 0x64(=100) is specially treated that all remaining space will be assigned
+ * after all the other containers are first served. 2. To request (n * 2048)
+ * bytes, put (100 + n) value in hex format. For example, 0x96 (= 150) means
+ * 50 * 2048 = 102400 bytes. Here are the list of containers: - octet 0 -
+ * WTLV_CONTAINER_ID_DPLANE_FRAME_LOG
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SABLE_CONTAINER_SIZE_CONFIGURATION 0x1388
+
+/*******************************************************************************
+ * NAME : UnifiSableFrameLogMode
+ * PSID : 5001 (0x1389)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 2
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Sable Frame Logging mode - 0: disable frame logging - 1: enable frame
+ * logging always, regardless of CPU resource state - 2: dynamically enable
+ * frame logging base on CPU resource. If CPU too busy, frame logging is
+ * disabled. Logging is enabled when CPU resource gets recovered.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SABLE_FRAME_LOG_MODE 0x1389
+
+/*******************************************************************************
+ * NAME : UnifiSableFrameLogCpuThresPercent
+ * PSID : 5002 (0x138A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 95
+ * DESCRIPTION :
+ * CPU target in percent. When CPU usage is higher than this target, frame
+ * logging will be disabled by firmware. Firmware will check if CPU resource
+ * is recovered every 1 second. If CPU resource recovered, then frame
+ * logging is re-enabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SABLE_FRAME_LOG_CPU_THRES_PERCENT 0x138A
+
+/*******************************************************************************
+ * NAME : UnifiSableFrameLogCpuOverheadPercent
+ * PSID : 5003 (0x138B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * Expected CPU overhead introduced by frame logging.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SABLE_FRAME_LOG_CPU_OVERHEAD_PERCENT 0x138B
+
+/*******************************************************************************
+ * NAME : UnifiDebugSvcModeStackHighWaterMark
+ * PSID : 5010 (0x1392)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read the SVC mode stack high water mark in bytes
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_SVC_MODE_STACK_HIGH_WATER_MARK 0x1392
+
+/*******************************************************************************
+ * NAME : UnifiOverrideEdcaParamBe
+ * PSID : 5023 (0x139F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 4
+ * MAX : 4
+ * DEFAULT : { 0X03, 0XA4, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Override the BE EDCA parameters. octet 0 - AIFS octet 1 - [7:4] ECW MAX
+ * [3:0] ECW MIN octet 2 ~ 3 - TXOP[7:0] TXOP[15:8] in 32 usec units for
+ * both non-HT and HT connections.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_EDCA_PARAM_BE 0x139F
+
+/*******************************************************************************
+ * NAME : UnifiOverrideEdcaParamBeEnable
+ * PSID : 5024 (0x13A0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * override STA - BE EDCA using the values in unifiOverrideEDCAParamBE for
+ * certification.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OVERRIDE_EDCA_PARAM_BE_ENABLE 0x13A0
+
+/*******************************************************************************
+ * NAME : UnifiPanicSubSystemControl
+ * PSID : 5026 (0x13A2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * PANIC levels for WLAN SubSystems. Panic level is used to filter Panic
+ * sent to the host. 4 levels of Panic per subsystem are available
+ * (FAILURE_LEVEL_T): a. 0 FATAL - Always reported to host b. 1 ERROR c. 2
+ * WARNING d. 3 DEBUG NOTE: If Panic level of a subsystem is configured to
+ * FATAL, all the Panics within that subsystem configured to FATAL will be
+ * effective, panics with ERROR, WARNING and Debug level will be converted
+ * to faults. If Panic level of a subsystem is configured to WARNING, all
+ * the panics within that subsystem configured to FATAL, ERROR and WARNING
+ * will be issued to host, panics with Debug level will be converted to
+ * faults.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PANIC_SUB_SYSTEM_CONTROL 0x13A2
+
+/*******************************************************************************
+ * NAME : UnifiFaultEnable
+ * PSID : 5027 (0x13A3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Send Fault to host state.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FAULT_ENABLE 0x13A3
+
+/*******************************************************************************
+ * NAME : UnifiFaultSubSystemControl
+ * PSID : 5028 (0x13A4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Fault levels for WLAN SubSystems. Fault level is used to filter faults
+ * sent to the host. 4 levels of faults per subsystem are available
+ * (FAILURE_LEVEL_T): a. 0 ERROR b. 1 WARNING c. 2 INFO_1 d. 3 INFO_2
+ * Modifying Fault Levels at run time: 1. Set the fault level for the
+ * subsystems in unifiFaultConfigTable 2. Set unifiFaultEnable NOTE: If
+ * fault level of a subsystem is configured to ERROR, all the faults within
+ * that subsystem configured to ERROR will only be issued to host, faults
+ * with WARNING, INFO_1 and INFO_2 level will be converted to debug message
+ * If fault level of a subsystem is configured to WARNING, all the faults
+ * within that subsystem configured to ERROR and WARNING will be issued to
+ * host, faults with INFO_1 and INFO_2 level will be converted to debug
+ * message
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FAULT_SUB_SYSTEM_CONTROL 0x13A4
+
+/*******************************************************************************
+ * NAME : UnifiDebugModuleControl
+ * PSID : 5029 (0x13A5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Debug Module levels for all modules. Module debug level is used to filter
+ * debug messages sent to the host. Only 6 levels of debug per module are
+ * available: a. -1 No debug created. b. 0 Debug if compiled in. Should not
+ * cause Buffer Full in normal testing. c. 1 - 3 Levels to allow sensible
+ * setting of the .hcf file while running specific tests or debugging d. 4
+ * Debug will harm normal execution due to excessive levels or processing
+ * time required. Only used in emergency debugging. Additional control for
+ * FSM transition and FSM signals logging is provided. Debug module level
+ * and 2 boolean flags are encoded within a uint16: Function | Is sending
+ * FSM signals | Is sending FSM transitions | Is sending FSM Timers |
+ * Reserved | Module level (signed int)
+ * -_-_-_-_-_+-_-_-_-_-_-_-_-_-_-_-_-_-_+-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_+-_-_-_-_-_-_-_-_-_-_-_-_-_+-_-_-_-_-_-_+-_-_-_-_-_-_-_-_-_-_-_-_-_- Bits | 15 | 14 | 13 | 12 - 8 | 7 - 0 Note: 0x00FF disables any debug for a module 0xE004 enables all debug for a module
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_MODULE_CONTROL 0x13A5
+
+/*******************************************************************************
+ * NAME : UnifiTxUsingLdpcEnabled
+ * PSID : 5030 (0x13A6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * LDPC will be used to code packets, for transmit only. If disabled, chip
+ * will not send LDPC coded packets even if peer supports it. To advertise
+ * reception of LDPC coded packets,enable bit 0 of unifiHtCapabilities, and
+ * bit 4 of unifiVhtCapabilities.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_USING_LDPC_ENABLED 0x13A6
+
+/*******************************************************************************
+ * NAME : UnifiTxSettings
+ * PSID : 5031 (0x13A7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_SETTINGS 0x13A7
+
+/*******************************************************************************
+ * NAME : UnifiTxGainSettings
+ * PSID : 5032 (0x13A8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter gain settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_GAIN_SETTINGS 0x13A8
+
+/*******************************************************************************
+ * NAME : UnifiTxAntennaConnectionLossFrequency
+ * PSID : 5033 (0x13A9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 3940
+ * MAX : 12000
+ * DEFAULT :
+ * DESCRIPTION :
+ * The corresponding set of frequency values for
+ * TxAntennaConnectionLossTable
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_ANTENNA_CONNECTION_LOSS_FREQUENCY 0x13A9
+
+/*******************************************************************************
+ * NAME : UnifiTxAntennaConnectionLoss
+ * PSID : 5034 (0x13AA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * The set of Antenna Connection Loss value (qdB), which is used for
+ * TPO/EIRP conversion
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_ANTENNA_CONNECTION_LOSS 0x13AA
+
+/*******************************************************************************
+ * NAME : UnifiTxAntennaMaxGainFrequency
+ * PSID : 5035 (0x13AB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 3940
+ * MAX : 12000
+ * DEFAULT :
+ * DESCRIPTION :
+ * The corresponding set of frequency values for TxAntennaMaxGain
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_ANTENNA_MAX_GAIN_FREQUENCY 0x13AB
+
+/*******************************************************************************
+ * NAME : UnifiTxAntennaMaxGain
+ * PSID : 5036 (0x13AC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * The set of Antenna Max Gain value (qdB), which is used for TPO/EIRP
+ * conversion
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_ANTENNA_MAX_GAIN 0x13AC
+
+/*******************************************************************************
+ * NAME : UnifiRxExternalGainFrequency
+ * PSID : 5037 (0x13AD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 3940
+ * MAX : 12000
+ * DEFAULT :
+ * DESCRIPTION :
+ * The set of RSSI offset value
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_EXTERNAL_GAIN_FREQUENCY 0x13AD
+
+/*******************************************************************************
+ * NAME : UnifiRxExternalGain
+ * PSID : 5038 (0x13AE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * The table giving frequency-dependent RSSI offset value
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_EXTERNAL_GAIN 0x13AE
+
+/*******************************************************************************
+ * NAME : UnifiTxSgI20Enabled
+ * PSID : 5040 (0x13B0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * SGI 20MHz will be used to code packets for transmit only. If disabled,
+ * chip will not send SGI 20MHz packets even if peer supports it. To
+ * advertise reception of SGI 20MHz packets, enable bit 5 of
+ * unifiHtCapabilities.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_SG_I20_ENABLED 0x13B0
+
+/*******************************************************************************
+ * NAME : UnifiTxSgI40Enabled
+ * PSID : 5041 (0x13B1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * SGI 40MHz will be used to code packets, for transmit only. If disabled,
+ * chip will not send SGI 40MHz packets even if peer supports it. To
+ * advertise reception of SGI 40MHz packets, enable bit 6 of
+ * unifiHtCapabilities.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_SG_I40_ENABLED 0x13B1
+
+/*******************************************************************************
+ * NAME : UnifiTxSgI80Enabled
+ * PSID : 5042 (0x13B2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * SGI 80MHz will be used to code packets, for transmit only. If disabled,
+ * chip will not send SGI 80MHz packets even if peer supports it. To
+ * advertise reception of SGI 80MHz packets, enable bit 5 of
+ * unifiVhtCapabilities.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_SG_I80_ENABLED 0x13B2
+
+/*******************************************************************************
+ * NAME : UnifiTxSgI160Enabled
+ * PSID : 5043 (0x13B3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * SGI 160/80+80MHz will be used to code packets, for transmit only. If
+ * disabled, chip will not send SGI 160/80+80MHz packets even if peer
+ * supports it. To advertise reception of SGI 160/80+80MHz packets, enable
+ * bit 6 of unifiVhtCapabilities.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_SG_I160_ENABLED 0x13B3
+
+/*******************************************************************************
+ * NAME : UnifiMacAddressRandomisation
+ * PSID : 5044 (0x13B4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Golden Certification MIB don't delete, change PSID or name: Enabling Mac
+ * Address Randomisation to be applied for Probe Requests when scanning.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION 0x13B4
+
+/*******************************************************************************
+ * NAME : UnifiMacAddressRandomisationMask
+ * PSID : 5047 (0x13B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 6
+ * MAX : 6
+ * DEFAULT : { 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * FW randomises MAC Address bits that have a corresponding bit set to 0 in
+ * the MAC Mask for Probe Requests. This excludes U/L and I/G bits which
+ * will be set to Local and Individual respectively.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_ADDRESS_RANDOMISATION_MASK 0x13B7
+
+/*******************************************************************************
+ * NAME : UnifiWipsActivated
+ * PSID : 5050 (0x13BA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables Wips.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_WIPS_ACTIVATED 0x13BA
+
+/*******************************************************************************
+ * NAME : UnifiRfTestModeEnabled
+ * PSID : 5054 (0x13BE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Test only: Set to true when running in RF Test mode. Setting this MIB key
+ * to true prevents setting mandatory HT MCS Rates.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RF_TEST_MODE_ENABLED 0x13BE
+
+/*******************************************************************************
+ * NAME : UnifiTxPowerDetectorResponse
+ * PSID : 5055 (0x13BF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter detector response settings. 2G settings
+ * before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_POWER_DETECTOR_RESPONSE 0x13BF
+
+/*******************************************************************************
+ * NAME : UnifiTxDetectorTemperatureCompensation
+ * PSID : 5056 (0x13C0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter detector temperature compensation settings.
+ * 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_DETECTOR_TEMPERATURE_COMPENSATION 0x13C0
+
+/*******************************************************************************
+ * NAME : UnifiTxDetectorFrequencyCompensation
+ * PSID : 5057 (0x13C1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter detector frequency compensation settings.
+ * 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_DETECTOR_FREQUENCY_COMPENSATION 0x13C1
+
+/*******************************************************************************
+ * NAME : UnifiTxOpenLoopTemperatureCompensation
+ * PSID : 5058 (0x13C2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter open-loop temperature compensation
+ * settings. 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_OPEN_LOOP_TEMPERATURE_COMPENSATION 0x13C2
+
+/*******************************************************************************
+ * NAME : UnifiTxOpenLoopFrequencyCompensation
+ * PSID : 5059 (0x13C3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter open-loop frequency compensation settings.
+ * 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_OPEN_LOOP_FREQUENCY_COMPENSATION 0x13C3
+
+/*******************************************************************************
+ * NAME : UnifiTxOfdmSelect
+ * PSID : 5060 (0x13C4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 4
+ * MAX : 8
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter OFDM selection settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_OFDM_SELECT 0x13C4
+
+/*******************************************************************************
+ * NAME : UnifiTxDigGain
+ * PSID : 5061 (0x13C5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 16
+ * MAX : 48
+ * DEFAULT :
+ * DESCRIPTION :
+ * Specify gain specific modulation power optimisation.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_DIG_GAIN 0x13C5
+
+/*******************************************************************************
+ * NAME : UnifiChipTemperature
+ * PSID : 5062 (0x13C6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : celcius
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read the chip temperature as seen by WLAN radio firmware.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CHIP_TEMPERATURE 0x13C6
+
+/*******************************************************************************
+ * NAME : UnifiBatteryVoltage
+ * PSID : 5063 (0x13C7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * UNITS : millivolt
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Battery voltage
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BATTERY_VOLTAGE 0x13C7
+
+/*******************************************************************************
+ * NAME : UnifiTxOobConstraints
+ * PSID : 5064 (0x13C8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * OOB constraints table. | octects | description |
+ * |-_-_-_-_-+-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-| | 0 | DPD applicability bitmask: 0 = no DPD, 1 = dynamic DPD, 2 = static DPD, 3 = applies to both static and dynamic DPD | | 1-2 | Bitmask indicating which regulatory domains this rule applies to FCC=bit0, ETSI=bit1, JAPAN=bit2 | | 3-4 | Bitmask indicating which band edges this rule applies to RICE_BAND_EDGE_ISM_24G_LOWER = bit 0, RICE_BAND_EDGE_ISM_24G_UPPER = bit 1, RICE_BAND_EDGE_U_NII_1_LOWER = bit 2, RICE_BAND_EDGE_U_NII_1_UPPER = bit 3, RICE_BAND_EDGE_U_NII_2_LOWER = bit 4, RICE_BAND_EDGE_U_NII_2_UPPER = bit 5, RICE_BAND_EDGE_U_NII_2E_LOWER = bit 6, RICE_BAND_EDGE_U_NII_2E_UPPER = bit 7, RICE_BAND_EDGE_U_NII_3_LOWER = bit 8, RICE_BAND_EDGE_U_NII_3_UPPER = bit 9 | | 5 | Bitmask indicating which modulation types this rule applies to (LSB/b0=DSSS/CCK, b1= OFDM0 modulation group, b2= OFDM1 modulation group) | | 6 | Bitmask indicating which channel bandwidths this rule applies to (LSB/b0=20MHz, b1=40MHz, b2=80MHz) | | 7 | Minimum distance to nearest band edge in 500 kHz units for which this constraint becomes is applicable. | | 8 | Maximum power (EIRP) for this particular constraint - specified in units of quarter dBm. | | 9-32 | Spectral shaping configuration to be used for this particular constraint. The value is specific to the radio hardware and should only be altered under advice from the IC supplier. | | 33-56| Tx DPD Spectral shaping configuration to be used for this particular constraint. The value is specific to the radio hardware and should only be altered under advice from the IC supplier. | |
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_OOB_CONSTRAINTS 0x13C8
+
+/*******************************************************************************
+ * NAME : UnifiTxPaGainDpdTemperatureCompensation
+ * PSID : 5066 (0x13CA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter PA gain for DPD temperature compensation
+ * settings. 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_PA_GAIN_DPD_TEMPERATURE_COMPENSATION 0x13CA
+
+/*******************************************************************************
+ * NAME : UnifiTxPaGainDpdFrequencyCompensation
+ * PSID : 5067 (0x13CB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter PA gain for DPD frequency compensation
+ * settings. 2G settings before 5G. Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_PA_GAIN_DPD_FREQUENCY_COMPENSATION 0x13CB
+
+/*******************************************************************************
+ * NAME : UnifiTxPowerTrimConfig
+ * PSID : 5072 (0x13D0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 25
+ * MAX : 25
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter power trim settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_POWER_TRIM_CONFIG 0x13D0
+
+/*******************************************************************************
+ * NAME : UnifiForceShortSlotTime
+ * PSID : 5080 (0x13D8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * If set to true, forces FW to use short slot times for all VIFs.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FORCE_SHORT_SLOT_TIME 0x13D8
+
+/*******************************************************************************
+ * NAME : UnifiTxGainStepSettings
+ * PSID : 5081 (0x13D9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * Hardware specific transmitter gain step settings. 2G settings before 5G.
+ * Increasing order within band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TX_GAIN_STEP_SETTINGS 0x13D9
+
+/*******************************************************************************
+ * NAME : UnifiDebugDisableRadioNannyActions
+ * PSID : 5082 (0x13DA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Bitmap to disable the radio nanny actions. B0==radio 0, B1==radio 1
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_DISABLE_RADIO_NANNY_ACTIONS 0x13DA
+
+/*******************************************************************************
+ * NAME : UnifiRxCckModemSensitivity
+ * PSID : 5083 (0x13DB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 6
+ * MAX : 6
+ * DEFAULT :
+ * DESCRIPTION :
+ * Specify values of CCK modem sensitivity for scan, normal and low
+ * sensitivity.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_CCK_MODEM_SENSITIVITY 0x13DB
+
+/*******************************************************************************
+ * NAME : UnifiDpdPerBandwidth
+ * PSID : 5084 (0x13DC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 63
+ * DESCRIPTION :
+ * Bitmask to enable Digital Pre-Distortion per bandwidth
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_PER_BANDWIDTH 0x13DC
+
+/*******************************************************************************
+ * NAME : UnifiBbVersion
+ * PSID : 5085 (0x13DD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Baseband chip version number determined by reading BBIC version
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_BB_VERSION 0x13DD
+
+/*******************************************************************************
+ * NAME : UnifiRfVersion
+ * PSID : 5086 (0x13DE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * RF chip version number determined by reading RFIC version
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RF_VERSION 0x13DE
+
+/*******************************************************************************
+ * NAME : UnifiReadHardwareCounter
+ * PSID : 5087 (0x13DF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read a value from a hardware packet counter for a specific radio_id and
+ * return it. The firmware will convert the radio_id to the associated
+ * mac_instance.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_READ_HARDWARE_COUNTER 0x13DF
+
+/*******************************************************************************
+ * NAME : UnifiClearRadioTrimCache
+ * PSID : 5088 (0x13E0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Clears the radio trim cache. The parameter is ignored.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CLEAR_RADIO_TRIM_CACHE 0x13E0
+
+/*******************************************************************************
+ * NAME : UnifiRadioTxSettingsRead
+ * PSID : 5089 (0x13E1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read value from Tx settings.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_TX_SETTINGS_READ 0x13E1
+
+/*******************************************************************************
+ * NAME : UnifiModemSgiOffset
+ * PSID : 5090 (0x13E2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Overwrite SGI sampling offset. Indexed by Band and Bandwidth. Defaults
+ * currently defined in fw.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MODEM_SGI_OFFSET 0x13E2
+
+/*******************************************************************************
+ * NAME : UnifiRadioTxPowerOverride
+ * PSID : 5091 (0x13E3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT :
+ * DESCRIPTION :
+ * Option in radio code to override the power requested by the upper layer
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_TX_POWER_OVERRIDE 0x13E3
+
+/*******************************************************************************
+ * NAME : UnifiRxRadioCsMode
+ * PSID : 5092 (0x13E4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * OBSOLETE. Configures RX Radio CS detection for 80MHz bandwidth.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_RADIO_CS_MODE 0x13E4
+
+/*******************************************************************************
+ * NAME : UnifiRxPriEnergyDetThreshold
+ * PSID : 5093 (0x13E5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * OBSOLETE. Energy detection threshold for primary 20MHz channel.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_PRI_ENERGY_DET_THRESHOLD 0x13E5
+
+/*******************************************************************************
+ * NAME : UnifiRxSecEnergyDetThreshold
+ * PSID : 5094 (0x13E6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * OBSOLETE. Energy detection threshold for secondary 20MHz channel.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_SEC_ENERGY_DET_THRESHOLD 0x13E6
+
+/*******************************************************************************
+ * NAME : UnifiAgcThresholds
+ * PSID : 5095 (0x13E7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * AGC Thresholds settings
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_AGC_THRESHOLDS 0x13E7
+
+/*******************************************************************************
+ * NAME : UnifiRadioRxSettingsRead
+ * PSID : 5096 (0x13E8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read value from Rx settings.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_RX_SETTINGS_READ 0x13E8
+
+/*******************************************************************************
+ * NAME : UnifiStaticDpdGain
+ * PSID : 5097 (0x13E9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 11
+ * MAX : 27
+ * DEFAULT :
+ * DESCRIPTION :
+ * Specify modulation specifc gains for static dpd optimisation.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_STATIC_DPD_GAIN 0x13E9
+
+/*******************************************************************************
+ * NAME : UnifiIqBufferSize
+ * PSID : 5098 (0x13EA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Buffer Size for IQ capture to allow CATs to read it.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_IQ_BUFFER_SIZE 0x13EA
+
+/*******************************************************************************
+ * NAME : UnifiNarrowbandCcaThresholds
+ * PSID : 5099 (0x13EB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT :
+ * DESCRIPTION :
+ * The narrowband CCA ED thresholds so that the CCA-ED triggers at the
+ * regulatory value of -62 dBm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NARROWBAND_CCA_THRESHOLDS 0x13EB
+
+/*******************************************************************************
+ * NAME : UnifiRadioCcaDebug
+ * PSID : 5100 (0x13EC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read values from Radio CCA settings.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_CCA_DEBUG 0x13EC
+
+/*******************************************************************************
+ * NAME : UnifiCcacsThresh
+ * PSID : 5101 (0x13ED)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Configures CCA CS thresholds.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CCACS_THRESH 0x13ED
+
+/*******************************************************************************
+ * NAME : UnifiCcaMasterSwitch
+ * PSID : 5102 (0x13EE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X00540050
+ * DESCRIPTION :
+ * Enables CCA
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CCA_MASTER_SWITCH 0x13EE
+
+/*******************************************************************************
+ * NAME : UnifiRxSyncCcaCfg
+ * PSID : 5103 (0x13EF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Configures CCA per 20 MHz sub-band.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RX_SYNC_CCA_CFG 0x13EF
+
+/*******************************************************************************
+ * NAME : UnifiMacCcaBusyTime
+ * PSID : 5104 (0x13F0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Counts the time CCA indicates busy
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_CCA_BUSY_TIME 0x13F0
+
+/*******************************************************************************
+ * NAME : UnifiMacSecChanClearTime
+ * PSID : 5105 (0x13F1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Configures PIFS
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MAC_SEC_CHAN_CLEAR_TIME 0x13F1
+
+/*******************************************************************************
+ * NAME : UnifiDpdDebug
+ * PSID : 5106 (0x13F2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Debug MIBs for DPD
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPD_DEBUG 0x13F2
+
+/*******************************************************************************
+ * NAME : UnifiNarrowbandCcaDebug
+ * PSID : 5107 (0x13F3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read values from Radio CCA settings.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NARROWBAND_CCA_DEBUG 0x13F3
+
+/*******************************************************************************
+ * NAME : UnifiNannyTemperatureReportDelta
+ * PSID : 5109 (0x13F5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * A temperature difference, in degrees Celsius, above which the nanny
+ * process will generate a temperature update debug word
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NANNY_TEMPERATURE_REPORT_DELTA 0x13F5
+
+/*******************************************************************************
+ * NAME : UnifiNannyTemperatureReportInterval
+ * PSID : 5110 (0x13F6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 200
+ * DESCRIPTION :
+ * A report interval in milliseconds where temperature is checked
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NANNY_TEMPERATURE_REPORT_INTERVAL 0x13F6
+
+/*******************************************************************************
+ * NAME : UnifiRadioRxDcocDebugIqValue
+ * PSID : 5111 (0x13F7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * RX DCOC debug testing. Allows user to override LUT index IQ values in
+ * combination with unifiRadioRxDcocDebug. This MIB sets IQ value that all
+ * LUT index Is and Qs get set to.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_RX_DCOC_DEBUG_IQ_VALUE 0x13F7
+
+/*******************************************************************************
+ * NAME : UnifiRadioRxDcocDebug
+ * PSID : 5112 (0x13F8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * RX DCOC debug testing. Allows user to override LUT index IQ values in
+ * combination with unifiRadioRxDcocDebugIqValue. This MIB enables the
+ * feature.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_RX_DCOC_DEBUG 0x13F8
+
+/*******************************************************************************
+ * NAME : UnifiNannyRetrimDpdMod
+ * PSID : 5113 (0x13F9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Bitmap to selectively enable nanny retrim of DPD per modulation.
+ * B0==OFDM0, B1==OFDM1, B2==CCK
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NANNY_RETRIM_DPD_MOD 0x13F9
+
+/*******************************************************************************
+ * NAME : UnifiDisableDpdSubIteration
+ * PSID : 5114 (0x13FA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * For Engineering debug use only.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DISABLE_DPD_SUB_ITERATION 0x13FA
+
+/*******************************************************************************
+ * NAME : UnifiRttCapabilities
+ * PSID : 5300 (0x14B4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 8
+ * MAX : 8
+ * DEFAULT : { 0X01, 0X01, 0X01, 0X01, 0X00, 0X07, 0X1C, 0X32 }
+ * DESCRIPTION :
+ * RTT capabilities of the chip. see SC-506960-SW.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RTT_CAPABILITIES 0x14B4
+
+/*******************************************************************************
+ * NAME : UnifiFtmMinDeltaFrames
+ * PSID : 5301 (0x14B5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 255
+ * DEFAULT : 5
+ * DESCRIPTION :
+ * Default minimum time between consecutive FTM frames in units of 100 us.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_MIN_DELTA_FRAMES 0x14B5
+
+/*******************************************************************************
+ * NAME : UnifiFtmPerBurst
+ * PSID : 5302 (0x14B6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 1
+ * MAX : 31
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * Requested FTM frames per burst.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_PER_BURST 0x14B6
+
+/*******************************************************************************
+ * NAME : UnifiFtmBurstDuration
+ * PSID : 5303 (0x14B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 2
+ * MAX : 11
+ * DEFAULT : 6
+ * DESCRIPTION :
+ * indicates the duration of a burst instance, values 0, 1, 12-14 are
+ * reserved, [2..11], the burst duration is defined as (250 x 2)^(N-2), and
+ * 15 means "no preference".
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_BURST_DURATION 0x14B7
+
+/*******************************************************************************
+ * NAME : UnifiFtmNumOfBurstsExponent
+ * PSID : 5304 (0x14B8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 14
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of burst instances is 2^(Number of Bursts Exponent), value 15
+ * means "no preference".
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_NUM_OF_BURSTS_EXPONENT 0x14B8
+
+/*******************************************************************************
+ * NAME : UnifiFtmAsapModeEnabled
+ * PSID : 5305 (0x14B9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enable support for ASAP mode in FTM
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_ASAP_MODE_ENABLED 0x14B9
+
+/*******************************************************************************
+ * NAME : UnifiFtmResponderEnabled
+ * PSID : 5306 (0x14BA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable support for FTM Responder
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_RESPONDER_ENABLED 0x14BA
+
+/*******************************************************************************
+ * NAME : UnifiFtmDefaultSessionEstablishmentTimeout
+ * PSID : 5307 (0x14BB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 10
+ * MAX : 100
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Default timeout for session estabishmant in units of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_DEFAULT_SESSION_ESTABLISHMENT_TIMEOUT 0x14BB
+
+/*******************************************************************************
+ * NAME : UnifiFtmDefaultGapBeforeFirstBurstPerResponder
+ * PSID : 5308 (0x14BC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_DEFAULT_GAP_BEFORE_FIRST_BURST_PER_RESPONDER 0x14BC
+
+/*******************************************************************************
+ * NAME : UnifiFtmDefaultGapBetweenBursts
+ * PSID : 5309 (0x14BD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 5
+ * MAX : 50
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Interval between consecutive Bursts. In units of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_DEFAULT_GAP_BETWEEN_BURSTS 0x14BD
+
+/*******************************************************************************
+ * NAME : UnifiFtmDefaultTriggerDelay
+ * PSID : 5310 (0x14BE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Delay to account for differences in time between Initiator and Responder
+ * at start of the Burst. In units of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_DEFAULT_TRIGGER_DELAY 0x14BE
+
+/*******************************************************************************
+ * NAME : UnifiFtmDefaultEndBurstDelay
+ * PSID : 5311 (0x14BF)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Delay to account for differences in time between Initiator and Responder
+ * at the end of the Burst. In units of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_DEFAULT_END_BURST_DELAY 0x14BF
+
+/*******************************************************************************
+ * NAME : UnifiFtmRequestValidationEnabled
+ * PSID : 5312 (0x14C0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable Validation for FTM Add Range request RTT_Configs
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_REQUEST_VALIDATION_ENABLED 0x14C0
+
+/*******************************************************************************
+ * NAME : UnifiFtmResponseValidationEnabled
+ * PSID : 5313 (0x14C1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Enable Validation for FTM Response
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_RESPONSE_VALIDATION_ENABLED 0x14C1
+
+/*******************************************************************************
+ * NAME : UnifiFtmUseResponseParameters
+ * PSID : 5314 (0x14C2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Use Response burst parameters for burst
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_USE_RESPONSE_PARAMETERS 0x14C2
+
+/*******************************************************************************
+ * NAME : UnifiFtmInitialResponseTimeout
+ * PSID : 5315 (0x14C3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 10
+ * MAX : 100
+ * DEFAULT : 50
+ * DESCRIPTION :
+ * Default timeout for FtmInitialResponse in units of ms.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_FTM_INITIAL_RESPONSE_TIMEOUT 0x14C3
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanContinueIfMoreThanXAps
+ * PSID : 5410 (0x1522)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Part of Scan Algorithm: Keep scanning on a channel with lots of APs.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_CONTINUE_IF_MORE_THAN_XAPS 0x1522
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanStopIfLessThanXNewAps
+ * PSID : 5411 (0x1523)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 4
+ * DESCRIPTION :
+ * Part of Scan Algorithm: Stop scanning on a channel if less than X NEW APs
+ * are seen.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_STOP_IF_LESS_THAN_XNEW_APS 0x1523
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanMultiVifEnabled
+ * PSID : 5412 (0x1524)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Part of Scan Algorithm: Enable support for Multi Vif channel times.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_MULTI_VIF_ENABLED 0x1524
+
+/*******************************************************************************
+ * NAME : UnifiMlmeScanNewAlgorithmEnabled
+ * PSID : 5413 (0x1525)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Part of Scan Algorithm: Enable support for the new algorithm.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_MLME_SCAN_NEW_ALGORITHM_ENABLED 0x1525
+
+/*******************************************************************************
+ * NAME : UnifiTpcMinPower2Gmimo
+ * PSID : 6011 (0x177B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 52
+ * DESCRIPTION :
+ * Minimum power for 2.4GHz MIMO interface when RSSI is above
+ * unifiTPCMinPowerRSSIThreshold (quarter dbm). Should be greater than
+ * dot11PowerCapabilityMinImplemented.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MIN_POWER2_GMIMO 0x177B
+
+/*******************************************************************************
+ * NAME : UnifiTpcMinPower5Gmimo
+ * PSID : 6012 (0x177C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 52
+ * DESCRIPTION :
+ * Minimum power for 5 GHz MIMO interface when RSSI is above
+ * unifiTPCMinPowerRSSIThreshold (quarter dbm). Should be greater than
+ * dot11PowerCapabilityMinImplemented.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MIN_POWER5_GMIMO 0x177C
+
+/*******************************************************************************
+ * NAME : UnifiLnaControlEnabled
+ * PSID : 6013 (0x177D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enable dynamic switching of the LNA based on RSSI.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LNA_CONTROL_ENABLED 0x177D
+
+/*******************************************************************************
+ * NAME : UnifiLnaControlRssiThresholdLower
+ * PSID : 6014 (0x177E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -40
+ * DESCRIPTION :
+ * The lower RSSI threshold for dynamic switching of the LNA. If the RSSI
+ * avg of received frames is lower than this value for all scheduled VIFs,
+ * then the external LNA will be enabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LNA_CONTROL_RSSI_THRESHOLD_LOWER 0x177E
+
+/*******************************************************************************
+ * NAME : UnifiLnaControlRssiThresholdUpper
+ * PSID : 6015 (0x177F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -30
+ * DESCRIPTION :
+ * The upper RSSI threshold for dynamic switching of the LNA. If the RSSI
+ * avg of received frames is higher than this value for all scheduled VIFs,
+ * then the external LNA will be disabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LNA_CONTROL_RSSI_THRESHOLD_UPPER 0x177F
+
+/*******************************************************************************
+ * NAME : UnifiPowerIsGrip
+ * PSID : 6016 (0x1780)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Is using Grip power cap instead of SAR cap.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_POWER_IS_GRIP 0x1780
+
+/*******************************************************************************
+ * NAME : UnifiTpcEnabled
+ * PSID : 6019 (0x1783)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT :
+ * DESCRIPTION :
+ * Deprecated. Golden Certification MIB don't delete, change PSID or name
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_ENABLED 0x1783
+
+/*******************************************************************************
+ * NAME : UnifiCurrentTxpowerLevel
+ * PSID : 6020 (0x1784)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : qdBm
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum air power for the VIF. Values are expressed in 0.25 dBm units.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_CURRENT_TXPOWER_LEVEL 0x1784
+
+/*******************************************************************************
+ * NAME : UnifiUserSetTxpowerLevel
+ * PSID : 6021 (0x1785)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 127
+ * DESCRIPTION :
+ * Test only: Maximum User Set Tx Power (quarter dBm). Enable it in
+ * unifiTestTxPowerEnable.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_USER_SET_TXPOWER_LEVEL 0x1785
+
+/*******************************************************************************
+ * NAME : UnifiTpcMaxPowerRssiThreshold
+ * PSID : 6022 (0x1786)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -55
+ * DESCRIPTION :
+ * Below the (dBm) threshold, switch to the max power allowed by regulatory,
+ * if it has been previously reduced due to unifiTPCMinPowerRSSIThreshold.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD 0x1786
+
+/*******************************************************************************
+ * NAME : UnifiTpcMinPowerRssiThreshold
+ * PSID : 6023 (0x1787)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -45
+ * DESCRIPTION :
+ * Above the (dBm) threshold, switch to the minimum hardware supported -
+ * capped by unifiTPCMinPower2G/unifiTPCMinPower5G. A Zero value reverts the
+ * power to a default state.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MIN_POWER_RSSI_THRESHOLD 0x1787
+
+/*******************************************************************************
+ * NAME : UnifiTpcMinPower2g
+ * PSID : 6024 (0x1788)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 52
+ * DESCRIPTION :
+ * Minimum power for 2.4GHz SISO interface when RSSI is above
+ * unifiTPCMinPowerRSSIThreshold (quarter dbm). Should be greater than
+ * dot11PowerCapabilityMinImplemented.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MIN_POWER2G 0x1788
+
+/*******************************************************************************
+ * NAME : UnifiTpcMinPower5g
+ * PSID : 6025 (0x1789)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : 40
+ * DESCRIPTION :
+ * Minimum power for 5 GHz SISO interface when RSSI is above
+ * unifiTPCMinPowerRSSIThreshold (quarter dbm). Should be greater than
+ * dot11PowerCapabilityMinImplemented.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_MIN_POWER5G 0x1789
+
+/*******************************************************************************
+ * NAME : UnifiSarBackoff
+ * PSID : 6026 (0x178A)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Max power values per band per index(quarter dBm).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SAR_BACKOFF 0x178A
+
+/*******************************************************************************
+ * NAME : UnifiTpcUseAfterConnectRsp
+ * PSID : 6027 (0x178B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Use TPC only after MlmeConnect_Rsp has been received from the Host i.e.
+ * not during initial connection exchanges (EAPOL/DHCP operation) as RSSI
+ * readings might be inaccurate.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TPC_USE_AFTER_CONNECT_RSP 0x178B
+
+/*******************************************************************************
+ * NAME : UnifiRadioLpRxRssiThresholdLower
+ * PSID : 6028 (0x178C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -75
+ * DESCRIPTION :
+ * The lower RSSI threshold for switching between low power rx and normal
+ * rx. If the RSSI avg of received frames is lower than this value for a
+ * VIF, then that VIF will vote against using low-power radio RX. Low power
+ * rx could negatively influence the receiver sensitivity.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_LP_RX_RSSI_THRESHOLD_LOWER 0x178C
+
+/*******************************************************************************
+ * NAME : UnifiRadioLpRxRssiThresholdUpper
+ * PSID : 6029 (0x178D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * UNITS : dBm
+ * MIN : -128
+ * MAX : 127
+ * DEFAULT : -65
+ * DESCRIPTION :
+ * The upper RSSI threshold for switching between low power rx and normal
+ * rx. If the RSSI avg of received frames is higher than this value for a
+ * VIF, then that VIF will vote in favour of using low-power radio RX. Low
+ * power RX could negatively influence the receiver sensitivity.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_RADIO_LP_RX_RSSI_THRESHOLD_UPPER 0x178D
+
+/*******************************************************************************
+ * NAME : UnifiTestTxPowerEnable
+ * PSID : 6032 (0x1790)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X03DD
+ * DESCRIPTION :
+ * Test only: Bitfield to enable Control Plane Tx Power processing.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TEST_TX_POWER_ENABLE 0x1790
+
+/*******************************************************************************
+ * NAME : UnifiLteCoexMaxPowerRssiThreshold
+ * PSID : 6033 (0x1791)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -55
+ * DESCRIPTION :
+ * Below this (dBm) threshold, switch to max power allowed by regulatory, if
+ * it has been previously reduced due to unifiTPCMinPowerRSSIThreshold.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_COEX_MAX_POWER_RSSI_THRESHOLD 0x1791
+
+/*******************************************************************************
+ * NAME : UnifiLteCoexMinPowerRssiThreshold
+ * PSID : 6034 (0x1792)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -45
+ * DESCRIPTION :
+ * Above this(dBm) threshold, switch to minimum hardware supported - capped
+ * by unifiTPCMinPower2G/unifiTPCMinPower5G. Zero reverts the power to its
+ * default state.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_COEX_MIN_POWER_RSSI_THRESHOLD 0x1792
+
+/*******************************************************************************
+ * NAME : UnifiLteCoexPowerReduction
+ * PSID : 6035 (0x1793)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 127
+ * DEFAULT : 24
+ * DESCRIPTION :
+ * When LTE Coex Power Reduction provisions are met, impose a power cap of
+ * the regulatory domain less the amount specified by this MIB (quarter dB)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LTE_COEX_POWER_REDUCTION 0x1793
+
+/*******************************************************************************
+ * NAME : UnifiPmfAssociationComebackTimeDelta
+ * PSID : 6050 (0x17A2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 1100
+ * DESCRIPTION :
+ * Timeout interval, in TU, for the TimeOut IE in the SA Query request
+ * frame.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_PMF_ASSOCIATION_COMEBACK_TIME_DELTA 0x17A2
+
+/*******************************************************************************
+ * NAME : UnifiTestTspecHack
+ * PSID : 6060 (0x17AC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Test only: Hack to allow in-house tspec testing
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TEST_TSPEC_HACK 0x17AC
+
+/*******************************************************************************
+ * NAME : UnifiTestTspecHackValue
+ * PSID : 6061 (0x17AD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Test only: Saved dialog number of tspec request action frame from the
+ * Host
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TEST_TSPEC_HACK_VALUE 0x17AD
+
+/*******************************************************************************
+ * NAME : UnifiDebugInstantDelivery
+ * PSID : 6069 (0x17B5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Instant delivery control of the debug messages when set to true. Note:
+ * will not allow the host to suspend when set to True.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_INSTANT_DELIVERY 0x17B5
+
+/*******************************************************************************
+ * NAME : UnifiDebugEnable
+ * PSID : 6071 (0x17B7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Debug to host state. Debug is either is sent to the host or it isn't.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEBUG_ENABLE 0x17B7
+
+/*******************************************************************************
+ * NAME : UnifiDPlaneDebug
+ * PSID : 6073 (0x17B9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X03
+ * DESCRIPTION :
+ * Bit mask for turning on individual debug entities in the data_plane that
+ * if enabled effect throughput. See DPLP_DEBUG_ENTITIES_T in
+ * dplane_dplp_debug.h for bits. Default of 0x3 means dplp and ampdu logs
+ * are enabled.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DPLANE_DEBUG 0x17B9
+
+/*******************************************************************************
+ * NAME : UnifiNanEnabled
+ * PSID : 6080 (0x17C0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Enables Neighbour Aware Networking (NAN)
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_ENABLED 0x17C0
+
+/*******************************************************************************
+ * NAME : UnifiNanBeaconCapabilities
+ * PSID : 6081 (0x17C1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X0720
+ * DESCRIPTION :
+ * The 16-bit field follows the coding of IEEE 802.11 Capability
+ * Information.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_BEACON_CAPABILITIES 0x17C1
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxConcurrentClusters
+ * PSID : 6082 (0x17C2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 1
+ * DESCRIPTION :
+ * Maximum number of concurrent NAN clusters supported.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_CLUSTERS 0x17C2
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxConcurrentPublishes
+ * PSID : 6083 (0x17C3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Maximum number of concurrent NAN Publish instances supported.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_PUBLISHES 0x17C3
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxConcurrentSubscribes
+ * PSID : 6084 (0x17C4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 2
+ * DESCRIPTION :
+ * Maximum number of concurrent NAN Subscribe instances supported.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_SUBSCRIBES 0x17C4
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxServiceNameLength
+ * PSID : 6085 (0x17C5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 255
+ * DESCRIPTION :
+ * Maximum Service Name Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_SERVICE_NAME_LENGTH 0x17C5
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxMatchFilterLength
+ * PSID : 6086 (0x17C6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 255
+ * DESCRIPTION :
+ * Maximum Match Filter Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_MATCH_FILTER_LENGTH 0x17C6
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxTotalMatchFilterLength
+ * PSID : 6087 (0x17C7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 255
+ * DESCRIPTION :
+ * Maximum Total Match Filter Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_TOTAL_MATCH_FILTER_LENGTH 0x17C7
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxServiceSpecificInfoLength
+ * PSID : 6088 (0x17C8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 255
+ * DESCRIPTION :
+ * Maximum Service Specific Info Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_SERVICE_SPECIFIC_INFO_LENGTH 0x17C8
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxVsaDataLength
+ * PSID : 6089 (0x17C9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum Vendor Specific Attribute Data Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_VSA_DATA_LENGTH 0x17C9
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxMeshDataLength
+ * PSID : 6090 (0x17CA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum Mesh Data Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_MESH_DATA_LENGTH 0x17CA
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxNdiInterfaces
+ * PSID : 6091 (0x17CB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum NDI Interfaces.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_NDI_INTERFACES 0x17CB
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxNdpSessions
+ * PSID : 6092 (0x17CC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum NDP Sessions.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_NDP_SESSIONS 0x17CC
+
+/*******************************************************************************
+ * NAME : UnifiNanMaxAppInfoLength
+ * PSID : 6093 (0x17CD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Maximum App Info Length.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NAN_MAX_APP_INFO_LENGTH 0x17CD
+
+/*******************************************************************************
+ * NAME : ReservedForNan
+ * PSID : 6094 (0x17CE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 3
+ * DESCRIPTION :
+ * Enables low power radio RX for idle STA and AP VIFs respectively.
+ * Setting/clearing bit 0 enables/disabled LP RX for (all) STA/Cli VIFs.
+ * Setting/clearing bit 1 enables/disabled LP RX for AP/GO VIFs.This mib
+ * value will be updated if "unifiRameUpdateMibs" mib is toggled
+ *******************************************************************************/
+#define SLSI_PSID_RESERVED_FOR_NAN 0x17CE
+
+/*******************************************************************************
+ * NAME : hutsReadWriteDataElementInt32
+ * PSID : 6100 (0x17D4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 1000
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of int32 type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_DATA_ELEMENT_INT32 0x17D4
+
+/*******************************************************************************
+ * NAME : hutsReadWriteDataElementBoolean
+ * PSID : 6101 (0x17D5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of boolean type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_DATA_ELEMENT_BOOLEAN 0x17D5
+
+/*******************************************************************************
+ * NAME : hutsReadWriteDataElementOctetString
+ * PSID : 6102 (0x17D6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 9
+ * MAX : 9
+ * DEFAULT : { 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of octet string
+ * type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_DATA_ELEMENT_OCTET_STRING 0x17D6
+
+/*******************************************************************************
+ * NAME : hutsReadWriteTableInt16Row
+ * PSID : 6103 (0x17D7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry table of int16
+ * type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_TABLE_INT16_ROW 0x17D7
+
+/*******************************************************************************
+ * NAME : hutsReadWriteTableOctetStringRow
+ * PSID : 6104 (0x17D8)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 6
+ * MAX : 73
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry table of octet
+ * string type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_TABLE_OCTET_STRING_ROW 0x17D8
+
+/*******************************************************************************
+ * NAME : hutsReadWriteRemoteProcedureCallInt32
+ * PSID : 6105 (0x17D9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT : 0X000A0001
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Remote Procedure call read/write entry of int32
+ * type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_REMOTE_PROCEDURE_CALL_INT32 0x17D9
+
+/*******************************************************************************
+ * NAME : hutsReadWriteRemoteProcedureCallOctetString
+ * PSID : 6107 (0x17DB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 144
+ * MAX : 144
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Remote Procedure call read/write entry of octet
+ * string type.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_REMOTE_PROCEDURE_CALL_OCTET_STRING 0x17DB
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiInt16
+ * PSID : 6108 (0x17DC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT : -55
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of int16 type via
+ * internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_INT16 0x17DC
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiUint16
+ * PSID : 6109 (0x17DD)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 0X0730
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of unsigned int16
+ * type via internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_UINT16 0x17DD
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiUint32
+ * PSID : 6110 (0x17DE)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * UNITS : microseconds
+ * MIN : 0
+ * MAX : 2147483647
+ * DEFAULT : 30000
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of unsigned int32
+ * type via internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_UINT32 0x17DE
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiInt64
+ * PSID : 6111 (0x17DF)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : -9223372036854775808
+ * MAX : 9223372036854775807
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of int64 type via
+ * internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_INT64 0x17DF
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiBoolean
+ * PSID : 6112 (0x17E0)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : TRUE
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of boolean type
+ * via internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_BOOLEAN 0x17E0
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiOctetString
+ * PSID : 6113 (0x17E1)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 8
+ * MAX : 8
+ * DEFAULT : { 0X00, 0X18, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Data element read/write entry of octet string
+ * type via internal API.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_OCTET_STRING 0x17E1
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixedSizeTableRow
+ * PSID : 6114 (0x17E2)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : 0
+ * MAX : 100
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Fixed size table rows of int16 type via
+ * internal API
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIXED_SIZE_TABLE_ROW 0x17E2
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiVarSizeTableRow
+ * PSID : 6115 (0x17E3)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 6
+ * MAX : 73
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Variable size table rows of octet string type
+ * via internal API
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_VAR_SIZE_TABLE_ROW 0x17E3
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixSizeTableKey1Row
+ * PSID : 6116 (0x17E4)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Fixed size table rows of int16 type via
+ * internal API
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIX_SIZE_TABLE_KEY1_ROW 0x17E4
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixSizeTableKey2Row
+ * PSID : 6117 (0x17E5)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Reserved for HUTS tests - Fixed size table rows of int16 type via
+ * internal API
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIX_SIZE_TABLE_KEY2_ROW 0x17E5
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixVarSizeTableKey1Row
+ * PSID : 6118 (0x17E6)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The values stored in hutsReadWriteInternalAPIFixVarSizeTableKeys
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIX_VAR_SIZE_TABLE_KEY1_ROW 0x17E6
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixVarSizeTableKey2Row
+ * PSID : 6119 (0x17E7)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * The values stored in hutsReadWriteInternalAPIFixVarSizeTableKeys
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIX_VAR_SIZE_TABLE_KEY2_ROW 0x17E7
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiFixSizeTableKeyRow
+ * PSID : 6120 (0x17E8)
+ * PER INTERFACE?: NO
+ * TYPE : INT64
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * The number of received MPDUs discarded by the CCMP decryption algorithm.
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_FIX_SIZE_TABLE_KEY_ROW 0x17E8
+
+/*******************************************************************************
+ * NAME : hutsReadWriteInternalApiVarSizeTableKeyRow
+ * PSID : 6121 (0x17E9)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 144
+ * MAX : 144
+ * DEFAULT :
+ * DESCRIPTION :
+ * Write a DPD LUT entry
+ *******************************************************************************/
+#define SLSI_PSID_HUTS_READ_WRITE_INTERNAL_API_VAR_SIZE_TABLE_KEY_ROW 0x17E9
+
+/*******************************************************************************
+ * NAME : UnifiTestScanNoMedium
+ * PSID : 6122 (0x17EA)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Test only: Stop Scan from using the Medium to allow thruput testing.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_TEST_SCAN_NO_MEDIUM 0x17EA
+
+/*******************************************************************************
+ * NAME : UnifiDualBandConcurrency
+ * PSID : 6123 (0x17EB)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Identify whether the chip supports dualband concurrency or not (RSDB vs.
+ * VSDB). Set in the respective platform htf file.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DUAL_BAND_CONCURRENCY 0x17EB
+
+/*******************************************************************************
+ * NAME : UnifiLoggerMaxDelayedEvents
+ * PSID : 6124 (0x17EC)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT : 10
+ * DESCRIPTION :
+ * Maximum number of events to keep when host is suspended.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_LOGGER_MAX_DELAYED_EVENTS 0x17EC
+
+/*******************************************************************************
+ * NAME : UnifiRegulatoryParameters
+ * PSID : 8011 (0x1F4B)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 3
+ * MAX : 73
+ * DEFAULT :
+ * DESCRIPTION :
+ * Regulatory parameters. Each row of the table contains the regulatory
+ * rules for one country: octet 0 - first character of alpha2 code for
+ * country octet 1 - second character of alpha2 code for country octet 2 -
+ * regulatory domain for the country Followed by the rules for the country,
+ * numbered 0..n in this description octet 7n+3 - LSB start frequency octet
+ * 7n+4 - MSB start frequency octet 7n+5 - LSB end frequency octet 7n+6 -
+ * MSB end frequency octet 7n+7 - maximum bandwidth octet 7n+8 - maximum
+ * power octet 7n+9 - rule flags
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_REGULATORY_PARAMETERS 0x1F4B
+
+/*******************************************************************************
+ * NAME : UnifiSupportedChannels
+ * PSID : 8012 (0x1F4C)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 20
+ * DEFAULT : {0X01,0X0D,0X24,0X04,0X34,0X04,0X64,0X0C,0X95,0X05}
+ * DESCRIPTION :
+ * Supported 20MHz channel centre frequency grouped in sub-bands. For each
+ * sub-band: starting channel number, followed by number of channels
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_SUPPORTED_CHANNELS 0x1F4C
+
+/*******************************************************************************
+ * NAME : UnifiDefaultCountry
+ * PSID : 8013 (0x1F4D)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 3
+ * MAX : 3
+ * DEFAULT : {0X30, 0X30, 0X20}
+ * DESCRIPTION :
+ * Hosts sets the Default Code.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEFAULT_COUNTRY 0x1F4D
+
+/*******************************************************************************
+ * NAME : UnifiCountryList
+ * PSID : 8014 (0x1F4E)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 2
+ * MAX : 270
+ * DEFAULT : (Too Large to display)
+ * DESCRIPTION :
+ * Defines the ordered list of countries present in unifiRegulatoryTable.
+ * Each country is coded as 2 ASCII characters. If unifiRegulatoryTable is
+ * modified, such as a country is either added, deleted or its relative
+ * location is modified, has to be updated as well.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_COUNTRY_LIST 0x1F4E
+
+/*******************************************************************************
+ * NAME : UnifiOperatingClassParamters
+ * PSID : 8015 (0x1F4F)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 1
+ * MAX : 73
+ * DEFAULT :
+ * DESCRIPTION :
+ * Supported Operating Class parameters.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_OPERATING_CLASS_PARAMTERS 0x1F4F
+
+/*******************************************************************************
+ * NAME : UnifiVifCountry
+ * PSID : 8016 (0x1F50)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Per VIf: Each VIF updates its Country Code for the Host to read
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_VIF_COUNTRY 0x1F50
+
+/*******************************************************************************
+ * NAME : UnifiNoCellMaxPower
+ * PSID : 8017 (0x1F51)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiInt16
+ * MIN : -32768
+ * MAX : 32767
+ * DEFAULT :
+ * DESCRIPTION :
+ * Max power values for included channels (quarter dBm).
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NO_CELL_MAX_POWER 0x1F51
+
+/*******************************************************************************
+ * NAME : UnifiNoCellIncludedChannels
+ * PSID : 8018 (0x1F52)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint8
+ * MIN : 8
+ * MAX : 8
+ * DEFAULT : { 0X00, 0X18, 0X00, 0X00, 0X00, 0X00, 0X00, 0X00 }
+ * DESCRIPTION :
+ * Channels applicable. Defined in a uint64 represented by the octet string.
+ * First byte of the octet string maps to LSB. Bit 0 maps to channel 1.
+ * Mapping defined in ChannelisationRules.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_NO_CELL_INCLUDED_CHANNELS 0x1F52
+
+/*******************************************************************************
+ * NAME : UnifiRegDomVersion
+ * PSID : 8019 (0x1F53)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint16
+ * MIN : 0
+ * MAX : 65535
+ * DEFAULT :
+ * DESCRIPTION :
+ * Regulatory domain version encoded into 2 bytes, major version as MSB and
+ * minor version as LSB
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_REG_DOM_VERSION 0x1F53
+
+/*******************************************************************************
+ * NAME : UnifiDefaultCountryWithoutCH12CH13
+ * PSID : 8020 (0x1F54)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiBool
+ * MIN : 0
+ * MAX : 1
+ * DEFAULT : FALSE
+ * DESCRIPTION :
+ * Update the default country code to ensure CH12 and CH13 are not used.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_DEFAULT_COUNTRY_WITHOUT_CH12_CH13 0x1F54
+
+/*******************************************************************************
+ * NAME : UnifiReadReg
+ * PSID : 8051 (0x1F73)
+ * PER INTERFACE?: NO
+ * TYPE : SlsiUint32
+ * MIN : 0
+ * MAX : 4294967295
+ * DEFAULT :
+ * DESCRIPTION :
+ * Read value from a register and return it.
+ *******************************************************************************/
+#define SLSI_PSID_UNIFI_READ_REG 0x1F73
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* SLSI_MIB_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd and its Licensors.
+ * All rights reserved.
+ *
+ *****************************************************************************/
+
+#include "debug.h"
+#include "mib_text_convert.h"
+
+#define CSR_TOUPPER(character) (((character) >= 'a') && ((character) <= 'z') ? ((character) - 0x20) : (character))
+
+static inline bool CsrIsSpace(u8 c)
+{
+ switch (c) {
+ case '\t':
+ case '\n':
+ case '\f':
+ case '\r':
+ case ' ':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline char *CsrStrDup(const char *string)
+{
+ if (string != NULL) {
+ u32 len = strlen(string) + 1;
+
+ return memcpy(kmalloc(len, GFP_KERNEL), string, len);
+ }
+ return NULL;
+}
+
+static int CsrStrNICmp(const char *string1,
+ const char *string2,
+ size_t count)
+{
+ u32 index;
+ int returnValue = 0;
+
+ for (index = 0; index < count; index++) {
+ if (CSR_TOUPPER(string1[index]) != CSR_TOUPPER(string2[index])) {
+ if (CSR_TOUPPER(string1[index]) > CSR_TOUPPER(string2[index]))
+ returnValue = 1;
+ else
+ returnValue = -1;
+ break;
+ }
+ if (string1[index] == '\0')
+ break;
+ }
+ return returnValue;
+}
+
+static bool CsrHexStrToUint8(const char *string, u8 *returnValue)
+{
+ u16 currentIndex = 0;
+
+ *returnValue = 0;
+ if ((string[currentIndex] == '0') && (CSR_TOUPPER(string[currentIndex + 1]) == 'X'))
+ string += 2;
+ if (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ while (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ *returnValue = (u8)(*returnValue * 16 + (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) ? string[currentIndex] - '0' : CSR_TOUPPER(string[currentIndex]) - 'A' + 10));
+ currentIndex++;
+ if (currentIndex >= 2)
+ break;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool CsrHexStrToUint16(const char *string, u16 *returnValue)
+{
+ u16 currentIndex = 0;
+
+ *returnValue = 0;
+ if ((string[currentIndex] == '0') && (CSR_TOUPPER(string[currentIndex + 1]) == 'X'))
+ string += 2;
+ if (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ while (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ *returnValue = (u16)(*returnValue * 16 + (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) ? string[currentIndex] - '0' : CSR_TOUPPER(string[currentIndex]) - 'A' + 10));
+ currentIndex++;
+ if (currentIndex >= 4)
+ break;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool CsrHexStrToUint32(const char *string, u32 *returnValue)
+{
+ u16 currentIndex = 0;
+
+ *returnValue = 0;
+ if ((string[currentIndex] == '0') && (CSR_TOUPPER(string[currentIndex + 1]) == 'X'))
+ string += 2;
+ if (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ while (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) || ((CSR_TOUPPER(string[currentIndex]) >= 'A') && (CSR_TOUPPER(string[currentIndex]) <= 'F'))) {
+ *returnValue = *returnValue * 16 + (((string[currentIndex] >= '0') && (string[currentIndex] <= '9')) ? string[currentIndex] - '0' : CSR_TOUPPER(string[currentIndex]) - 'A' + 10);
+ currentIndex++;
+ if (currentIndex >= 8)
+ break;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool CsrWifiMibConvertStrToUint16(const char *str, u16 *returnValue)
+{
+ u16 currentIndex = 0;
+
+ if (str[1] == 'x')
+ return CsrHexStrToUint16(str, returnValue);
+
+ *returnValue = 0;
+ if ((str[currentIndex] >= '0') && (str[currentIndex] <= '9')) {
+ while (str[currentIndex] >= '0' && str[currentIndex] <= '9') {
+ *returnValue *= 10;
+ *returnValue += (u8)str[currentIndex++] - '0';
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool CsrWifiMibConvertStrToUint32(const char *str, u32 *returnValue)
+{
+ u16 currentIndex = 0;
+
+ if (str[1] == 'x')
+ return CsrHexStrToUint32(str, returnValue);
+
+ *returnValue = 0;
+ if ((str[currentIndex] >= '0') && (str[currentIndex] <= '9')) {
+ while (str[currentIndex] >= '0' && str[currentIndex] <= '9') {
+ *returnValue *= 10;
+ *returnValue += (u8)str[currentIndex++] - '0';
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool CsrWifiMibConvertTextParseLine(const char *linestr, struct slsi_mib_data *mibDataSet, struct slsi_mib_data *mibDataGet)
+{
+ struct slsi_mib_entry entry;
+ bool result = true;
+ size_t equals = 0;
+ size_t dot1 = 0;
+ size_t dot2 = 0;
+ size_t trimmedIndex = 0;
+ char *trimmed = kmalloc(strlen(linestr) + 1, GFP_KERNEL);
+ const char *current_char = linestr;
+ bool processingStr = false;
+
+ memset(&entry, 0x00, sizeof(entry));
+ while (current_char[0] != '\0') {
+ if (current_char[0] == '"')
+ processingStr = !processingStr;
+ if (!processingStr) {
+ if (current_char[0] == '#')
+ break;
+ if (CsrIsSpace((u8)current_char[0])) {
+ current_char++;
+ continue;
+ }
+ if (!equals && (current_char[0] == '.')) {
+ if (!dot1) {
+ dot1 = trimmedIndex;
+ } else if (!dot2) {
+ dot2 = trimmedIndex;
+ } else {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') only 2 indexes supported", trimmed);
+ result = false;
+ }
+ }
+ if (!equals && (current_char[0] == '='))
+ equals = trimmedIndex;
+ }
+ trimmed[trimmedIndex++] = current_char[0];
+
+ current_char++;
+ }
+ trimmed[trimmedIndex] = '\0';
+
+ if (strlen(trimmed) == 0) {
+ kfree(trimmed);
+ return result;
+ }
+
+ if (result) {
+ char sep = trimmed[dot1 ? dot1 : equals];
+
+ if (dot1 || equals)
+ trimmed[dot1 ? dot1 : equals] = '\0';
+ trimmed[dot1 ? dot1 : equals] = sep;
+
+ if (!CsrWifiMibConvertStrToUint16(trimmed, &entry.psid)) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert <psid> failed", trimmed);
+ result = false;
+ }
+ if (dot1 && !CsrWifiMibConvertStrToUint16(&trimmed[dot1 + 1], &entry.index[0])) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert <index 1> failed", trimmed);
+ result = false;
+ }
+ if (dot2 && !CsrWifiMibConvertStrToUint16(&trimmed[dot2 + 1], &entry.index[1])) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert <index 2> failed", trimmed);
+ result = false;
+ }
+
+ if (result && !equals && mibDataGet) {
+ entry.value.type = SLSI_MIB_TYPE_NONE;
+ (void)slsi_mib_encode(mibDataGet, &entry);
+ }
+
+ if (result && equals && mibDataSet) {
+ char *data = &trimmed[equals + 1];
+ /*SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') psid:%d, index1:%d, index2:%d, data '%s'", trimmed, entry.psid, entry.index[0], entry.index[1], data); */
+ if (CsrStrNICmp(data, "true", 4) == 0) {
+ entry.value.type = SLSI_MIB_TYPE_BOOL;
+ entry.value.u.boolValue = 1;
+ } else if (CsrStrNICmp(data, "false", 5) == 0) {
+ entry.value.type = SLSI_MIB_TYPE_BOOL;
+ entry.value.u.boolValue = 0;
+ } else if (data[0] == '"') {
+ /* Null Terminated String */
+ entry.value.type = SLSI_MIB_TYPE_OCTET;
+ entry.value.u.octetValue.dataLength = (u32)strlen(&data[1]);
+ entry.value.u.octetValue.data = (u8 *)CsrStrDup(&data[1]);
+ entry.value.u.octetValue.data[entry.value.u.octetValue.dataLength - 1] = '\0';
+ } else if (data[0] == '[') {
+ /* Octet String */
+ size_t i;
+ u16 octetLen = ((u16)strlen(&data[1]) - 1) / 2;
+
+ entry.value.type = SLSI_MIB_TYPE_OCTET;
+ entry.value.u.octetValue.dataLength = octetLen;
+ entry.value.u.octetValue.data = kmalloc(entry.value.u.octetValue.dataLength + 1, GFP_KERNEL);
+ for (i = 0; i < octetLen; i++)
+ if (!CsrHexStrToUint8(&data[1 + (i * 2)], &entry.value.u.octetValue.data[i])) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert Hex Bytes <data> failed", trimmed);
+ result = false;
+ break;
+ }
+ entry.value.u.octetValue.data[octetLen] = '\0'; /* Make sure the Octet Stream is NULL terminated in case it is interpreted as a String */
+ } else if (data[0] == '-') {
+ /* Negative Integer Value */
+ entry.value.type = SLSI_MIB_TYPE_INT;
+ if (!CsrWifiMibConvertStrToUint32(&data[1], &entry.value.u.uintValue)) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert Integer <data> failed", trimmed);
+ result = false;
+ } else {
+ entry.value.u.intValue = (s32)(0 - (u32)entry.value.u.uintValue);
+ }
+ } else if (!CsrWifiMibConvertStrToUint32(data, &entry.value.u.uintValue)) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine('%s') Convert Unsigned Integer <data> failed", trimmed);
+ result = false;
+ } else {
+ entry.value.type = SLSI_MIB_TYPE_UINT;
+ }
+ if (result)
+ (void)slsi_mib_encode(mibDataSet, &entry);
+ if (entry.value.type == SLSI_MIB_TYPE_OCTET)
+ kfree(entry.value.u.octetValue.data);
+ }
+ }
+
+ kfree(trimmed);
+
+ return result;
+}
+
+static bool CsrWifiMibConvertTextAppend(const char *mibText, struct slsi_mib_data *mibDataSet, struct slsi_mib_data *mibDataGet)
+{
+ bool result = true;
+ const char *lineStart = mibText;
+ const char *lineEnd = mibText;
+
+ if (mibText == NULL)
+ return false;
+
+ while (lineEnd[0] != '\0') {
+ if ((lineEnd[0] == '\n') || (lineEnd[0] == '\r') || (lineEnd[1] == '\0')) {
+ size_t strSize = (size_t)(lineEnd - lineStart);
+
+ if ((lineEnd[1] == '\0'))
+ strSize++;
+ if (strSize > 2) {
+ char *line = kmalloc(strSize + 1, GFP_KERNEL);
+ (void)strncpy(line, lineStart, strSize);
+ line[strSize] = '\0';
+ if (!CsrWifiMibConvertTextParseLine(line, mibDataSet, mibDataGet)) {
+ SLSI_ERR_NODEV("CsrWifiMibConvertTextParseLine() Failed for line '%s'", line);
+ result = false;
+ }
+ kfree(line);
+ }
+ lineEnd++;
+ lineStart = lineEnd;
+ continue;
+ }
+ lineEnd++;
+ }
+
+ return result;
+}
+
+bool CsrWifiMibConvertText(const char *mibText, struct slsi_mib_data *mibDataSet, struct slsi_mib_data *mibDataGet)
+{
+ if (mibDataSet) {
+ mibDataSet->data = NULL;
+ mibDataSet->dataLength = 0;
+ }
+ if (mibDataGet) {
+ mibDataGet->data = NULL;
+ mibDataGet->dataLength = 0;
+ }
+
+ return CsrWifiMibConvertTextAppend(mibText, mibDataSet, mibDataGet);
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd and its Licensors.
+ * All rights reserved.
+ *
+ *****************************************************************************/
+
+#ifndef MIB_TEXT_CONVERT_H__
+#define MIB_TEXT_CONVERT_H__
+
+#include "mib.h"
+
+/*******************************************************************************
+ *
+ * NAME
+ * CsrWifiMibConvertText
+ *
+ * DESCRIPTION
+ * Converts a simple text mib file format into an encoded mib data buffer
+ *
+ * TEXT FILE FORMAT
+ # Comment
+ # <psid><indexes>=<value>
+ #
+ # 123 = 0x01 # Unsigned Integer
+ # 123 = 1 # Unsigned Integer
+ # 123 = -1 # Integer
+ # 123 = true # Boolean (Case Sensitive)
+ # 123 = false # Boolean (Case Sensitive)
+ # 123 = [12183984] # Octet String (Hex Format)
+ # 123 = "ABCDEFGH" # Octet String (Null Terminated Ascii)
+ #
+ # Indexes (Max 2 indexes supported)
+ # 123.1 = 1 # Single Index
+ # 123.1.2 = 1 # Multi Index
+ #
+ # Hex Format (All Unsigned Integer values can use the alternative 0x[0-F] format)
+ # 0xF12E.0x1.0x2 = 0x01 # Unsigned Integer
+ #
+ # RETURN
+ # bool: true = No errors in conversion
+ #
+ *******************************************************************************/
+bool CsrWifiMibConvertText(const char *mibText, struct slsi_mib_data *mibDataSet, struct slsi_mib_data *mibDataGet);
+
+#endif /* MIB_TEXT_CONVERT_H__ */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/delay.h>
+#include <net/cfg80211.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <scsc/scsc_log_collector.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "mlme.h"
+#include "mib.h"
+#include "mgt.h"
+#include "cac.h"
+
+#define SLSI_SCAN_PRIVATE_IE_CHANNEL_LIST_HEADER_LEN 7
+#define SLSI_SCAN_PRIVATE_IE_SSID_FILTER_HEADER_LEN 7
+#define SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE 3
+#define SLSI_CHANN_INFO_HT_SCB 0x0100
+
+#define SLSI_NOA_CONFIG_REQUEST_ID (1)
+#define SLSI_MLME_ARP_DROP_FREE_SLOTS_COUNT 16
+
+static bool missing_cfm_ind_panic = true;
+module_param(missing_cfm_ind_panic, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(missing_cfm_ind_panic, "Panic on missing confirm or indication from the chip");
+
+struct slsi_mlme_rsse {
+ u8 group_cs_count;
+ const u8 *group_cs;
+ u8 pairwise_cs_count;
+ const u8 *pairwise_cs;
+ u8 akm_suite_count;
+ const u8 *akm_suite;
+ u8 pmkid_count;
+ const u8 *pmkid;
+ const u8 *group_mgmt_cs; /* used for PMF*/
+};
+
+static struct sk_buff *slsi_mlme_wait_for_cfm(struct slsi_dev *sdev, struct slsi_sig_send *sig_wait)
+{
+ struct sk_buff *cfm = NULL;
+ int tm;
+
+ tm = wait_for_completion_timeout(&sig_wait->completion, msecs_to_jiffies(*sdev->sig_wait_cfm_timeout));
+ spin_lock_bh(&sig_wait->send_signal_lock);
+
+ /* Confirm timed out? */
+ if (!sig_wait->cfm) {
+ SLSI_ERR(sdev, "No cfm(0x%.4X) for req(0x%04X) senderid=0x%x\n", sig_wait->cfm_id, sig_wait->req_id, sig_wait->process_id);
+ if (tm == 0) {
+ char reason[80];
+
+ WARN(1, "Timeout - confirm 0x%04x not received from chip\n", sig_wait->cfm_id);
+ if (missing_cfm_ind_panic) {
+ snprintf(reason, sizeof(reason), "Timed out while waiting for the cfm(0x%.4x) for req(0x%04x)",
+ sig_wait->cfm_id, sig_wait->req_id);
+
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ slsi_sm_service_failed(sdev, reason);
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ }
+ } else {
+ WARN(1, "Confirm 0x%04x lost\n", sig_wait->cfm_id);
+ }
+ } else {
+ WARN_ON(fapi_get_u16(sig_wait->cfm, receiver_pid) != sig_wait->process_id);
+ WARN_ON(fapi_get_u16(sig_wait->cfm, id) != sig_wait->cfm_id);
+ }
+
+ sig_wait->cfm_id = 0;
+ cfm = sig_wait->cfm;
+ sig_wait->cfm = NULL;
+ if (!cfm)
+ sig_wait->ind_id = 0;
+
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+
+ return cfm;
+}
+
+static int panic_on_lost_ind(u16 ind_id)
+{
+ if (ind_id == MLME_SCAN_DONE_IND)
+ return 0;
+ return 1;
+}
+
+static struct sk_buff *slsi_mlme_wait_for_ind(struct slsi_dev *sdev, struct net_device *dev, struct slsi_sig_send *sig_wait, u16 ind_id)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *ind = NULL;
+ int tm = 0;
+
+ /* The indication and confirm may have been received in the same HIP read.
+ * The HIP receive buffer processes all received signals in one thread whilst the
+ * waiting process may not be scheduled even if the "complete" call is made.
+ * In this scenario, the complete() call has already been made for this object
+ * and the wait will return immediately.
+ */
+ if (ind_id == MLME_SCAN_DONE_IND)
+ /* To handle the coex scenario where BTscan has high priority increasing the wait time to 40 secs */
+ tm = wait_for_completion_timeout(&sig_wait->completion, msecs_to_jiffies(SLSI_SCAN_DONE_IND_WAIT_TIMEOUT));
+ else if ((ind_id == MLME_DISCONNECT_IND) && (ndev_vif->vif_type == FAPI_VIFTYPE_AP))
+ tm = wait_for_completion_timeout(&sig_wait->completion, msecs_to_jiffies(sdev->device_config.ap_disconnect_ind_timeout));
+ else
+ tm = wait_for_completion_timeout(&sig_wait->completion, msecs_to_jiffies(*sdev->sig_wait_cfm_timeout));
+
+ spin_lock_bh(&sig_wait->send_signal_lock);
+
+ /* Indication timed out? */
+ if (!sig_wait->ind) {
+ SLSI_ERR(sdev, "No ind(0x%.4X) for req(0x%04X) senderid=0x%x\n", sig_wait->ind_id, sig_wait->req_id, sig_wait->process_id);
+ if (tm == 0) {
+ char reason[80];
+
+ WARN(1, "Timeout - indication 0x%04x not received from chip\n", sig_wait->ind_id);
+ if (missing_cfm_ind_panic && panic_on_lost_ind(ind_id)) {
+ snprintf(reason, sizeof(reason), "Timed out while waiting for the ind(0x%.4x) for req(0x%04x)",
+ sig_wait->ind_id, sig_wait->req_id);
+
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ slsi_sm_service_failed(sdev, reason);
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ }
+ } else {
+ WARN(1, "Indication 0x%04x lost\n", sig_wait->ind_id);
+ }
+ } else {
+ WARN_ON(fapi_get_u16(sig_wait->ind, receiver_pid) != sig_wait->process_id);
+ WARN_ON(fapi_get_u16(sig_wait->ind, id) != sig_wait->ind_id);
+ }
+
+ sig_wait->ind_id = 0;
+ ind = sig_wait->ind;
+ sig_wait->ind = NULL;
+
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+
+ return ind;
+}
+
+/* mib_error: NULL when not required
+ * ind: 0 when not required, if used validate_cfm_wait_ind MUST be supplied
+ * validate_cfm_wait_ind: NULL when not required, if used ind MUS not be 0
+ * NOTE: dev can be NULL!
+ */
+static struct sk_buff *slsi_mlme_tx_rx(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct sk_buff *skb,
+ u16 cfm_id,
+ struct sk_buff **mib_error,
+ u16 ind_id,
+ bool (*validate_cfm_wait_ind)(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *cfm))
+{
+ struct sk_buff *rx = NULL;
+ int err;
+ u16 req_id = fapi_get_u16(skb, id);
+ struct slsi_sig_send *sig_wait = &sdev->sig_wait;
+
+ if (dev) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ sig_wait = &ndev_vif->sig_wait;
+ }
+
+ if (sdev->mlme_blocked) {
+ SLSI_DBG3(sdev, SLSI_TX, "Rejected. mlme_blocked=%d", sdev->mlme_blocked);
+ slsi_kfree_skb(skb);
+ return NULL;
+ }
+
+ slsi_wakelock(&sdev->wlan_wl);
+ SLSI_MUTEX_LOCK(sig_wait->mutex);
+
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ if (++sig_wait->process_id > SLSI_TX_PROCESS_ID_MAX)
+ sig_wait->process_id = SLSI_TX_PROCESS_ID_MIN;
+
+ WARN_ON(sig_wait->cfm);
+ WARN_ON(sig_wait->ind);
+ slsi_kfree_skb(sig_wait->cfm);
+ slsi_kfree_skb(sig_wait->ind);
+ slsi_kfree_skb(sig_wait->mib_error);
+ sig_wait->cfm = NULL;
+ sig_wait->ind = NULL;
+ sig_wait->mib_error = NULL;
+ sig_wait->req_id = req_id;
+ sig_wait->cfm_id = cfm_id;
+ sig_wait->ind_id = ind_id;
+
+ fapi_set_u16(skb, sender_pid, sig_wait->process_id);
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+
+ err = slsi_tx_control(sdev, dev, skb);
+ if (err != 0) {
+ SLSI_ERR(sdev, "Failed to send mlme signal:0x%.4X, err=%d\n", req_id, err);
+ slsi_kfree_skb(skb);
+ goto clean_exit;
+ }
+
+ if (cfm_id) {
+ rx = slsi_mlme_wait_for_cfm(sdev, sig_wait);
+ if (rx && ind_id) {
+ /* The cfm skb is owned by the validate_cfm_wait_ind() function and MUST be freed or saved there */
+ if (validate_cfm_wait_ind(sdev, dev, rx)) {
+ rx = slsi_mlme_wait_for_ind(sdev, dev, sig_wait, ind_id);
+ } else {
+ sig_wait->ind_id = 0; /* Reset as there is no wait for indication */
+ rx = NULL;
+ }
+ }
+ } else if (ind_id) {
+ rx = slsi_mlme_wait_for_ind(sdev, dev, sig_wait, ind_id);
+ }
+
+ /* The cfm_id and ind_id should ALWAYS be 0 at this point */
+ WARN_ON(sig_wait->cfm_id);
+ WARN_ON(sig_wait->ind_id);
+ WARN_ON(sig_wait->cfm);
+ WARN_ON(sig_wait->ind);
+clean_exit:
+
+ spin_lock_bh(&sig_wait->send_signal_lock);
+
+ sig_wait->req_id = 0;
+ sig_wait->cfm_id = 0;
+ sig_wait->ind_id = 0;
+ slsi_kfree_skb(sig_wait->cfm);
+ slsi_kfree_skb(sig_wait->ind);
+ sig_wait->cfm = NULL;
+ sig_wait->ind = NULL;
+
+ if (mib_error)
+ *mib_error = sig_wait->mib_error;
+ else
+ slsi_kfree_skb(sig_wait->mib_error);
+ sig_wait->mib_error = NULL;
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+
+ SLSI_MUTEX_UNLOCK(sig_wait->mutex);
+
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return rx;
+}
+
+/**
+ * NOTE: dev can be NULL!
+ */
+int slsi_mlme_req(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct slsi_sig_send *sig_wait = &sdev->sig_wait;
+
+ if (dev) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ sig_wait = &ndev_vif->sig_wait;
+ }
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ if (++sig_wait->process_id > SLSI_TX_PROCESS_ID_MAX)
+ sig_wait->process_id = SLSI_TX_PROCESS_ID_MIN;
+ fapi_set_u16(skb, sender_pid, sig_wait->process_id);
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+
+ ret = slsi_tx_control(sdev, dev, skb);
+ if (ret)
+ slsi_kfree_skb(skb);
+ return ret;
+}
+
+struct sk_buff *slsi_mlme_req_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 ind_id)
+{
+ if (WARN_ON(!ind_id))
+ goto err;
+ return slsi_mlme_tx_rx(sdev, dev, skb, 0, NULL, ind_id, NULL);
+err:
+ slsi_kfree_skb(skb);
+ return NULL;
+}
+
+struct sk_buff *slsi_mlme_req_no_cfm(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ return slsi_mlme_tx_rx(sdev, dev, skb, 0, NULL, 0, NULL);
+}
+
+struct sk_buff *slsi_mlme_req_cfm(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 cfm_id)
+{
+ if (WARN_ON(!cfm_id))
+ goto err;
+ return slsi_mlme_tx_rx(sdev, dev, skb, cfm_id, NULL, 0, NULL);
+err:
+ slsi_kfree_skb(skb);
+ return NULL;
+}
+
+/* NOTE: dev can be NULL! */
+static inline struct sk_buff *slsi_mlme_req_cfm_mib(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 cfm_id, struct sk_buff **mib_error)
+{
+ if (WARN_ON(!cfm_id))
+ goto err;
+ if (WARN_ON(!mib_error))
+ goto err;
+ return slsi_mlme_tx_rx(sdev, dev, skb, cfm_id, mib_error, 0, NULL);
+err:
+ slsi_kfree_skb(skb);
+ return NULL;
+}
+
+/* NOTE: dev can be NULL! */
+static inline struct sk_buff *slsi_mlme_req_cfm_ind(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct sk_buff *skb,
+ u16 cfm_id,
+ u16 ind_id,
+ bool (*validate_cfm_wait_ind)(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *cfm))
+{
+ if (WARN_ON(!cfm_id))
+ goto err;
+ if (WARN_ON(!ind_id))
+ goto err;
+ if (WARN_ON(!validate_cfm_wait_ind))
+ goto err;
+
+ return slsi_mlme_tx_rx(sdev, dev, skb, cfm_id, NULL, ind_id, validate_cfm_wait_ind);
+
+err:
+ slsi_kfree_skb(skb);
+ return NULL;
+}
+
+static struct ieee80211_reg_rule *slsi_get_reg_rule(u32 center_freq, struct slsi_802_11d_reg_domain *domain_info)
+{
+ struct ieee80211_reg_rule *rule;
+ int i;
+
+ for (i = 0; i < domain_info->regdomain->n_reg_rules; i++) {
+ rule = &domain_info->regdomain->reg_rules[i];
+
+ /* Consider 10Mhz on both side from the center frequency */
+ if (((center_freq - MHZ_TO_KHZ(10)) >= rule->freq_range.start_freq_khz) &&
+ ((center_freq + MHZ_TO_KHZ(10)) <= rule->freq_range.end_freq_khz))
+ return rule;
+ }
+
+ return NULL;
+}
+
+u16 slsi_compute_chann_info(struct slsi_dev *sdev, u16 width, u16 center_freq0, u16 channel_freq)
+{
+ u16 chann_info;
+ u16 prim_chan_pos = 0;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "compute channel info\n");
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20:
+ chann_info = 20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ chann_info = 40;
+ /* Check HT Minus */
+ if (center_freq0 < channel_freq)
+ chann_info |= SLSI_CHANN_INFO_HT_SCB;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ /* F = { F1-30, ... F1+30 } => { 0x0000, ... 0x0300} */
+ prim_chan_pos = ((30 + channel_freq - center_freq0) / 20);
+ if (prim_chan_pos > 3) {
+ SLSI_ERR(sdev, "Invalid center_freq0 in chandef : %u, primary channel = %u,"
+ "primary chan pos calculated = %d\n", center_freq0, channel_freq, prim_chan_pos);
+ prim_chan_pos = 0;
+ }
+ prim_chan_pos = 0xFFFF & (prim_chan_pos << 8);
+ chann_info = 80 | prim_chan_pos;
+ break;
+ default:
+ SLSI_WARN(sdev, "Invalid chandef.width(0x%x)\n", width);
+ chann_info = 0;
+ break;
+ }
+
+ SLSI_DBG3(sdev, SLSI_MLME, "channel_width:%u, chann_info:0x%x\n", width, chann_info);
+ return chann_info;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+u16 slsi_get_chann_info(struct slsi_dev *sdev, struct cfg80211_chan_def *chandef)
+{
+ u16 chann_info = 0;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_20 || chandef->width == NL80211_CHAN_WIDTH_20_NOHT) {
+ chann_info = 20;
+ SLSI_DBG3(sdev, SLSI_MLME, "channel_width:%u, chann_info:0x%x\n", chandef->width, chann_info);
+ } else if (chandef->chan) {
+ chann_info = slsi_compute_chann_info(sdev, chandef->width, chandef->center_freq1,
+ chandef->chan->center_freq);
+ }
+ return chann_info;
+}
+
+int slsi_check_channelization(struct slsi_dev *sdev, struct cfg80211_chan_def *chandef,
+ int wifi_sharing_channel_switched)
+{
+ u8 width;
+ struct ieee80211_reg_rule *rule = NULL;
+ struct ieee80211_channel *channel = NULL;
+ u32 ref_flags;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ width = 20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ width = 40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ width = 80;
+ break;
+ default:
+ SLSI_ERR(sdev, "Invalid chandef.width(0x%x)\n", chandef->width);
+ return -EINVAL;
+ }
+
+ channel = ieee80211_get_channel(sdev->wiphy, chandef->chan->center_freq);
+ if (!channel) {
+ SLSI_ERR(sdev, "Invalid channel %d used to start AP. Channel not found\n", chandef->chan->center_freq);
+ return -EINVAL;
+ }
+
+ if (wifi_sharing_channel_switched == 1) {
+ ref_flags = IEEE80211_CHAN_DISABLED
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 10, 13)
+ | IEEE80211_CHAN_PASSIVE_SCAN
+#endif
+ ;
+ } else {
+ ref_flags = IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_RADAR
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 10, 13)
+ | IEEE80211_CHAN_PASSIVE_SCAN
+#endif
+ ;
+ }
+
+ if (channel->flags & ref_flags) {
+ SLSI_ERR(sdev, "Invalid channel %d used to start AP\n", chandef->chan->center_freq);
+ return -EINVAL;
+ }
+ rule = slsi_get_reg_rule(MHZ_TO_KHZ(chandef->center_freq1), &sdev->device_config.domain_info);
+ if (!rule) {
+ SLSI_ERR(sdev, "Invalid channel %d used to start AP. No reg rule found for this channel\n", chandef->chan->center_freq);
+ return -EINVAL;
+ }
+
+ if (MHZ_TO_KHZ(width) <= rule->freq_range.max_bandwidth_khz) {
+ u32 width_boundary1, width_boundary2;
+
+ width_boundary1 = MHZ_TO_KHZ(chandef->center_freq1 - width / 2);
+ width_boundary2 = MHZ_TO_KHZ(chandef->center_freq1 + width / 2);
+ if ((width_boundary1 >= rule->freq_range.start_freq_khz) && (width_boundary2 <= rule->freq_range.end_freq_khz))
+ return 0;
+ SLSI_ERR(sdev, "Invalid channel %d used to start AP. Channel not within frequency range of the reg rule\n", chandef->chan->center_freq);
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+#else
+u16 slsi_get_chann_info(struct slsi_dev *sdev, enum nl80211_channel_type channel_type)
+{
+ u16 chann_info;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ /* Channel Info
+ * bits 0 ~ 7 : Channel Width (5, 10, 20, 40)
+ * bit 8 : Set to 1 if primary channel is greater than secondary channel (HT Minus)
+ */
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chann_info = 20;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chann_info = 40 | SLSI_CHANN_INFO_HT_SCB;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chann_info = 40;
+ break;
+ default:
+ SLSI_WARN(sdev, "Unknown channel_type: %d\n", channel_type);
+ chann_info = 0;
+ break;
+ }
+
+ SLSI_DBG3(sdev, SLSI_MLME, "channel_type:%d, chann_info:0x%x\n", channel_type, chann_info);
+ return chann_info;
+}
+
+int slsi_check_channelization(struct slsi_dev *sdev, enum nl80211_channel_type channel_type)
+{
+ return 0;
+}
+
+#endif
+
+/* Called in the case of MIB SET errors.
+ * Decode and print a MIB buffer to the log for debug purposes.
+ */
+static void mib_buffer_dump_to_log(struct slsi_dev *sdev, u8 *mib_buffer, unsigned int mib_buffer_len)
+{
+ size_t mib_decode_result;
+ size_t offset = 0;
+ struct slsi_mib_entry decoded_mib_value;
+ struct slsi_mib_data mibdata;
+ int error_out_len = mib_buffer_len * 3;
+ int error_out_pos = 0;
+ char *error_out;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ FUNC_ENTER(sdev);
+ SLSI_ERR(sdev, "MIB buffer length: %u. MIB Error (decoded):", mib_buffer_len);
+
+ if (!mib_buffer) {
+ SLSI_ERR(sdev, "MIB buffer pointer is NULL - can not decode MIB keys\n");
+ return;
+ }
+ error_out = kmalloc(error_out_len, GFP_KERNEL);
+
+ while (offset < mib_buffer_len) {
+ error_out_pos = 0;
+ mibdata.data = &mib_buffer[offset];
+ mibdata.dataLength = mib_buffer_len - offset;
+
+ mib_decode_result = slsi_mib_decode(&mibdata, &decoded_mib_value);
+ if (!mib_decode_result) {
+ SLSI_ERR_HEX(sdev, mibdata.data, mibdata.dataLength, "slsi_mib_decode() Failed to Decode:\n");
+ break;
+ }
+
+ offset += mib_decode_result;
+ /* Time for some eye candy - output the decoded MIB key at error level in the log */
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "%d", (int)(decoded_mib_value.psid));
+ if (decoded_mib_value.index[0]) {
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, ".%d", (int)(decoded_mib_value.index[0]));
+ if (decoded_mib_value.index[1])
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, ".%d", (int)(decoded_mib_value.index[1]));
+ }
+
+ switch (decoded_mib_value.value.type) {
+ case SLSI_MIB_TYPE_BOOL:
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "=%s\n", decoded_mib_value.value.u.boolValue ? "TRUE" : "FALSE");
+ break;
+ case SLSI_MIB_TYPE_UINT:
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "=%d\n", (int)decoded_mib_value.value.u.uintValue);
+ break;
+ case SLSI_MIB_TYPE_INT:
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "=%d\n", (int)decoded_mib_value.value.u.intValue);
+ break;
+ case SLSI_MIB_TYPE_OCTET:
+ {
+ u32 i;
+
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "=[");
+ for (i = 0; i < decoded_mib_value.value.u.octetValue.dataLength; i++)
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "%.2X", (int)decoded_mib_value.value.u.octetValue.data[i]);
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "]\n");
+ break;
+ }
+ default:
+ error_out_pos += snprintf(error_out + error_out_pos, error_out_len - error_out_pos, "=Can not decode MIB key type\n");
+ break;
+ }
+
+ SLSI_INFO_NODEV("%s", error_out);
+ }
+ kfree(error_out);
+ FUNC_EXIT(sdev);
+}
+
+int slsi_mlme_set_ip_address(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct sk_buff *req;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *cfm;
+ int r = 0;
+ u32 ipaddr;
+ u8 multicast_add[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_SET_IP_ADDRESS.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ req = fapi_alloc(mlme_set_ip_address_req, MLME_SET_IP_ADDRESS_REQ, ndev_vif->ifnum, sizeof(ndev_vif->ipaddress));
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_ip_address_req.ip_version, 4);
+ fapi_set_memcpy(req, u.mlme_set_ip_address_req.multicast_address, multicast_add);
+
+ ipaddr = htonl(be32_to_cpu(ndev_vif->ipaddress));
+ fapi_append_data(req, (const u8 *)(&ipaddr), sizeof(ipaddr));
+
+ SLSI_DBG2(sdev, SLSI_MLME, "slsi_mlme_set_ip_address(vif: %d, IP: %pI4)\n", ndev_vif->ifnum, &ndev_vif->ipaddress);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_IP_ADDRESS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_ip_address_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_ip_address_cfm(result:0x%04x) ERROR\n", fapi_get_u16(cfm, u.mlme_set_ip_address_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+
+int slsi_mlme_set_ipv6_address(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct sk_buff *req;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *cfm;
+ int r = 0;
+ u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x00 };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_SET_IP_ADDRESS.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ req = fapi_alloc(mlme_set_ip_address_req, MLME_SET_IP_ADDRESS_REQ, ndev_vif->ifnum, 16);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_ip_address_req.ip_version, 6);
+
+ if (ndev_vif->sta.nd_offload_enabled == 1) {
+ slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
+ memcpy(&solicited_node_addr[3], &ndev_vif->ipv6address.s6_addr[13], 3);
+ slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
+
+ fapi_set_memcpy(req, u.mlme_set_ip_address_req.multicast_address, solicited_node_addr);
+ fapi_append_data(req, ndev_vif->ipv6address.s6_addr, 16);
+ SLSI_DBG2(sdev, SLSI_MLME, "mlme_set_ip_address_req(vif: %d, IP: %pI6)\n", ndev_vif->ifnum,
+ &ndev_vif->ipv6address);
+ } else {
+ u8 node_addr_nd_disable[16];
+
+ memset(&node_addr_nd_disable, 0, sizeof(node_addr_nd_disable));
+ fapi_append_data(req, node_addr_nd_disable, 16);
+ SLSI_DBG2(sdev, SLSI_MLME, "mlme_set_ip_address_req(vif: %d, IP-setting ip address to all zeros)\n",
+ ndev_vif->ifnum);
+ }
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_IP_ADDRESS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_ip_address_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_ip_address_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_ip_address_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+#endif
+
+int slsi_mlme_set(struct slsi_dev *sdev, struct net_device *dev, u8 *mib, int mib_len)
+{
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 ifnum = 0;
+
+ if (dev) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ ifnum = ndev_vif->ifnum;
+ }
+
+ req = fapi_alloc(mlme_set_req, MLME_SET_REQ, ifnum, mib_len);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_append_data(req, mib, mib_len);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_datalen(cfm)) {
+ mib_buffer_dump_to_log(sdev, fapi_get_data(cfm), fapi_get_datalen(cfm));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+
+ return r;
+}
+
+int slsi_mlme_get(struct slsi_dev *sdev, struct net_device *dev, u8 *mib, int mib_len, u8 *resp,
+ int resp_buf_len, int *resp_len)
+{
+ struct sk_buff *req;
+ struct sk_buff *err = NULL;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 ifnum = 0;
+
+ *resp_len = 0;
+
+ if (dev) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ ifnum = ndev_vif->ifnum;
+ }
+ req = fapi_alloc(mlme_get_req, MLME_GET_REQ, ifnum, mib_len);
+ if (!req)
+ return -ENOMEM;
+ fapi_append_data(req, mib, mib_len);
+
+ cfm = slsi_mlme_req_cfm_mib(sdev, dev, req, MLME_GET_CFM, &err);
+ if (!cfm)
+ return -EIO;
+
+ if (err) {
+ SLSI_DBG1(sdev, SLSI_MLME, "ERROR: mlme_get_cfm with mib error\n");
+ mib_buffer_dump_to_log(sdev, fapi_get_data(err), fapi_get_datalen(err));
+ LOG_CONDITIONALLY(fapi_get_datalen(cfm) > resp_buf_len,
+ SLSI_ERR(sdev, "Insufficient resp_buf_len(%d). mlme_get_cfm(%d)\n",
+ resp_buf_len, fapi_get_datalen(cfm)));
+ r = -EINVAL;
+ }
+
+ /* if host has requested for multiple PSIDs in same request, we can get a
+ * combination of error and success
+ */
+ if (fapi_get_datalen(cfm) <= resp_buf_len) {
+ *resp_len = fapi_get_datalen(cfm);
+ memcpy(resp, fapi_get_data(cfm), fapi_get_datalen(cfm));
+ r = 0;
+ } else {
+ SLSI_WARN(sdev, "Insufficient length (%d) to read MIB values, expected =%d\n", resp_buf_len, fapi_get_datalen(cfm));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(err);
+ slsi_kfree_skb(cfm);
+
+ return r;
+}
+
+int slsi_mlme_add_vif(struct slsi_dev *sdev, struct net_device *dev, u8 *interface_address, u8 *device_address)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0, i;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_ADD_VIF.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ /* reset host stats */
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++) {
+ ndev_vif->tx_no_ack[i] = 0;
+ ndev_vif->tx_packets[i] = 0;
+ ndev_vif->rx_packets[i] = 0;
+ }
+ req = fapi_alloc(mlme_add_vif_req, MLME_ADD_VIF_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+ fapi_set_u16(req, u.mlme_add_vif_req.virtual_interface_type, ndev_vif->vif_type);
+ fapi_set_memcpy(req, u.mlme_add_vif_req.interface_address, interface_address);
+ fapi_set_memcpy(req, u.mlme_add_vif_req.device_address, device_address);
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_add_vif_req(vif:%d)\n", ndev_vif->ifnum);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_ADD_VIF_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_add_vif_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_add_vif_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_add_vif_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ /* By default firmware vif will be in active mode */
+ ndev_vif->power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+void slsi_mlme_del_vif(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_DEL_VIF.request\n");
+ return;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_del_vif_req(vif:%d)\n", ndev_vif->ifnum);
+ req = fapi_alloc(mlme_del_vif_req, MLME_DEL_VIF_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return;
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_DEL_VIF_CFM);
+ if (!cfm)
+ return;
+
+ if (fapi_get_u16(cfm, u.mlme_del_vif_cfm.result_code) != FAPI_RESULTCODE_SUCCESS)
+ SLSI_NET_ERR(dev, "mlme_del_vif_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_del_vif_cfm.result_code));
+
+ if (((ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) || (ndev_vif->iftype == NL80211_IFTYPE_STATION)) &&
+ (ndev_vif->delete_probe_req_ies)) {
+ kfree(ndev_vif->probe_req_ies);
+ ndev_vif->probe_req_ies = NULL;
+ ndev_vif->probe_req_ie_len = 0;
+ ndev_vif->delete_probe_req_ies = false;
+ }
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif))
+ ndev_vif->drv_in_p2p_procedure = false;
+
+ slsi_kfree_skb(cfm);
+}
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+int slsi_mlme_set_forward_beacon(struct slsi_dev *sdev, struct net_device *dev, int action)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "wlanlite does not support mlme_forward_bacon_req\n");
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_forward_beacon_req(action = %s(%d))\n", action ? "start" : "stop", action);
+
+ req = fapi_alloc(mlme_forward_beacon_req, MLME_FORWARD_BEACON_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc for mlme_forward_beacon_req is failed\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_forward_beacon_req.wips_action, action);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_FORWARD_BEACON_CFM);
+ if (!cfm) {
+ SLSI_NET_ERR(dev, "receiving mlme_forward_beacon_cfm is failed\n");
+ return -EIO;
+ }
+
+ if (fapi_get_u16(cfm, u.mlme_forward_beacon_cfm.result_code) != FAPI_RESULTCODE_HOST_REQUEST_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_forward_beacon_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_forward_beacon_cfm.result_code));
+ return -EINVAL;
+ }
+
+ ndev_vif->is_wips_running = (action ? true : false);
+
+ slsi_kfree_skb(cfm);
+ return 0;
+}
+#endif
+
+int slsi_mlme_set_channel(struct slsi_dev *sdev, struct net_device *dev, struct ieee80211_channel *chan, u16 duration, u16 interval, u16 count)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "wlanlite does not support MLME_SET_CHANNEL.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_channel_req(freq:%u, duration:%u, interval:%u, count:%u)\n", chan->center_freq, duration, interval, count);
+
+ req = fapi_alloc(mlme_set_channel_req, MLME_SET_CHANNEL_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_channel_req.availability_duration, duration);
+ fapi_set_u16(req, u.mlme_set_channel_req.availability_interval, interval);
+ fapi_set_u16(req, u.mlme_set_channel_req.count, count);
+ fapi_set_u16(req, u.mlme_set_channel_req.channel_frequency, SLSI_FREQ_HOST_TO_FW(chan->center_freq));
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_CHANNEL_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_channel_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_channel_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_channel_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_unset_channel_req(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "slsi_mlme_unset_channel_req\n");
+
+ req = fapi_alloc(mlme_unset_channel_req, MLME_UNSET_CHANNEL_REQ, ndev_vif->ifnum, 0);
+
+ if (!req)
+ return -ENOMEM;
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_UNSET_CHANNEL_CFM);
+
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_unset_channel_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_unset_channel_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_unset_channel_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+void slsi_ap_obss_scan_done_ind(struct net_device *dev, struct netdev_vif *ndev_vif)
+{
+ struct sk_buff *scan_res;
+ u16 scan_id = SLSI_SCAN_HW_ID;
+
+ SLSI_UNUSED_PARAMETER(dev);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Scan before AP start completed\n");
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex));
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+
+ scan_res = slsi_dequeue_cached_scan_result(&ndev_vif->scan[scan_id], NULL);
+ while (scan_res) {
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(scan_res);
+ size_t mgmt_len = fapi_get_mgmtlen(scan_res);
+ size_t ie_len = mgmt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable); /* ieee80211_mgmt structure is similar for Probe Response and Beacons */
+
+ SLSI_NET_DBG4(dev, SLSI_MLME, "OBSS scan result (scan_id:%d, %pM, freq:%d, rssi:%d, ie_len = %zu)\n",
+ fapi_get_u16(scan_res, u.mlme_scan_ind.scan_id),
+ fapi_get_mgmt(scan_res)->bssid,
+ fapi_get_u16(scan_res, u.mlme_scan_ind.channel_frequency) / 2,
+ fapi_get_s16(scan_res, u.mlme_scan_ind.rssi),
+ ie_len);
+
+ if (!cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, mgmt->u.beacon.variable, ie_len)) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Non HT BSS detected on primary channel\n");
+ ndev_vif->ap.non_ht_bss_present = true;
+ }
+
+ slsi_kfree_skb(scan_res);
+ scan_res = slsi_dequeue_cached_scan_result(&ndev_vif->scan[scan_id], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+}
+
+/* Null check for cfm done in caller function */
+static bool slsi_scan_cfm_validate(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *cfm)
+{
+ bool r = true;
+
+ if (fapi_get_u16(cfm, u.mlme_add_scan_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_ERR_NODEV("mlme_add_scan_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_add_scan_cfm.result_code));
+ r = false;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+int slsi_mlme_append_gscan_channel_list(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct sk_buff *req,
+ struct slsi_nl_bucket_param *nl_bucket)
+{
+ u16 channel_freq;
+ u8 i;
+ u8 *p;
+ const u8 channels_list_ie_header[] = {
+ 0xDD, /* Element ID: Vendor Specific */
+ 0x05, /* Length: actual length will be updated later */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: Scan parameters */
+ 0x02 /* OUI Subtype: channel list */
+ };
+ u8 *channels_list_ie = fapi_append_data(req, channels_list_ie_header, sizeof(channels_list_ie_header));
+
+ if (!channels_list_ie) {
+ SLSI_WARN(sdev, "channel list IE append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ if (nl_bucket->band == WIFI_BAND_UNSPECIFIED)
+ /* channel list is added only if band is UNSPECIFIED */
+ for (i = 0; i < nl_bucket->num_channels; i++) {
+ p = fapi_append_data(req, NULL, SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ if (!p) {
+ SLSI_ERR(sdev, "chan desc[%d] append failed\n", i);
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ channel_freq = SLSI_FREQ_HOST_TO_FW(nl_bucket->channels[i].channel);
+ channel_freq = cpu_to_le16(channel_freq);
+ memcpy(p, &channel_freq, sizeof(channel_freq));
+ p[2] = FAPI_SCANPOLICY_ANY_RA;
+ channels_list_ie[1] += SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE; /* Length */
+ }
+ else {
+ p = fapi_append_data(req, NULL, SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ if (!p) {
+ SLSI_ERR(sdev, "chan desc(band specific)append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ /* Channel frequency set to 0 for all channels allowed by the corresponding regulatory domain and scan policy */
+ channel_freq = 0;
+ memcpy(p, &channel_freq, sizeof(channel_freq));
+ p[2] = slsi_gscan_get_scan_policy(nl_bucket->band);
+ channels_list_ie[1] += SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE;
+ }
+
+ return 0;
+}
+#endif
+
+static int slsi_mlme_append_channel_list(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct sk_buff *req,
+ u32 num_channels,
+ struct ieee80211_channel *channels[],
+ u16 scan_type,
+ bool passive_scan)
+{
+ int chann;
+ u16 freq_fw_unit;
+ u8 i;
+ int n_valid_channels = 0;
+ u8 *p;
+
+ const u8 channels_list_ie_header[] = {
+ 0xDD, /* Element ID: vendor specific */
+ 0x05, /* Length: actual length will be updated later */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: scan parameters */
+ 0x02 /* OUI Subtype: channel list */
+ };
+
+ u8 *channels_list_ie = fapi_append_data(req, channels_list_ie_header, sizeof(channels_list_ie_header));
+
+ if (!channels_list_ie) {
+ SLSI_WARN(sdev, "channel list IE append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ /* For P2P Full Scan, Setting Channel Frequency = 0x0000, Scan Policy = 2.4GHz, 5GHz and Non-Dfs. */
+ if (scan_type == FAPI_SCANTYPE_P2P_SCAN_FULL) {
+ p = fapi_append_data(req, NULL, SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ if (!p) {
+ SLSI_WARN(sdev, "scan channel descriptor append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ p[0] = 0;
+ p[1] = 0;
+ p[2] = FAPI_SCANPOLICY_2_4GHZ | FAPI_SCANPOLICY_5GHZ | FAPI_SCANPOLICY_NON_DFS;
+ channels_list_ie[1] += SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE;
+ return 0;
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ chann = channels[i]->hw_value & 0xFF;
+
+ if (sdev->device_config.supported_band) {
+ if (channels[i]->band == NL80211_BAND_2GHZ && sdev->device_config.supported_band != SLSI_FREQ_BAND_2GHZ)
+ continue;
+ if (channels[i]->band == NL80211_BAND_5GHZ && sdev->device_config.supported_band != SLSI_FREQ_BAND_5GHZ)
+ continue;
+ }
+
+ n_valid_channels++;
+ p = fapi_append_data(req, NULL, SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ if (!p) {
+ SLSI_WARN(sdev, "scan channel descriptor append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ freq_fw_unit = 2 * ieee80211_channel_to_frequency(chann, (chann <= 14) ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ);
+ freq_fw_unit = cpu_to_le16(freq_fw_unit);
+ memcpy(p, &freq_fw_unit, sizeof(freq_fw_unit));
+
+ if (passive_scan && (scan_type != FAPI_SCANTYPE_AP_AUTO_CHANNEL_SELECTION))
+ p[2] = FAPI_SCANPOLICY_PASSIVE;
+ else
+ p[2] = 0;
+
+ channels_list_ie[1] += SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE;
+ }
+ if (n_valid_channels == 0) {
+ SLSI_NET_ERR(dev, "no valid channels to Scan\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int slsi_set_scan_params(
+ struct net_device *dev,
+ u16 scan_id,
+ u16 scan_type,
+ u16 report_mode,
+ int num_ssids,
+ struct cfg80211_ssid *ssids,
+ struct sk_buff *req)
+{
+ u8 *p = NULL;
+ u8 i;
+ struct cfg80211_ssid *pssid = ssids;
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = netdev_vif->sdev;
+#endif
+
+ fapi_set_u16(req, u.mlme_add_scan_req.scan_id, scan_id);
+ fapi_set_u16(req, u.mlme_add_scan_req.scan_type, scan_type);
+ fapi_set_u16(req, u.mlme_add_scan_req.report_mode_bitmap, report_mode);
+
+
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ if (sdev->scan_addr_set)
+ fapi_set_memcpy(req, u.mlme_add_scan_req.device_address, sdev->scan_mac_addr);
+ else
+#endif
+ fapi_set_memcpy(req, u.mlme_add_scan_req.device_address, dev->dev_addr);
+
+ for (i = 0; i < num_ssids; i++, pssid++) {
+ p = fapi_append_data(req, NULL, 2 + pssid->ssid_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ SLSI_NET_WARN(dev, "fail to append SSID element to scan request\n");
+ return -EINVAL;
+ }
+
+ *p++ = WLAN_EID_SSID;
+ *p++ = pssid->ssid_len;
+
+ if (pssid->ssid_len)
+ memcpy(p, pssid->ssid, pssid->ssid_len);
+ }
+ return 0;
+}
+
+#define SLSI_MAX_SSID_DESC_IN_SSID_FILTER_ELEM 7
+int slsi_mlme_add_sched_scan(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request,
+ const u8 *ies,
+ u16 ies_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+ size_t alloc_data_size = 0;
+ u32 i, j;
+ u32 num_ssid_filter_elements = 0;
+
+ /* Scan Timing IE: default values */
+ u8 scan_timing_ie[] = {
+ 0xdd, /* Element ID: Vendor Specific */
+ 0x11, /* Length */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: Scan parameters */
+ 0x01, /* OUI Subtype: Scan timing */
+ 0x00, 0x00, 0x00, 0x00, /* Min_Period: filled later in the function */
+ 0x00, 0x00, 0x00, 0x00, /* Max_Period: filled later in the function */
+ 0x01, /* Exponent */
+ 0x01, /* Step count */
+ 0x00, 0x01 /* Skip first period: true for scheduled scans*/
+ };
+
+ u8 ssid_filter_ie_hdr[] = {
+ 0xdd, /* Element ID: Vendor Specific */
+ 0x05, /* Length */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: Scan parameters */
+ 0x04 /* OUI Subtype: SSID Filter */
+ };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_WARN(dev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (WARN_ON(!(dev->dev_addr)))
+ return -EINVAL;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex));
+
+ alloc_data_size += sizeof(scan_timing_ie) + ies_len + SLSI_SCAN_PRIVATE_IE_CHANNEL_LIST_HEADER_LEN +
+ (request->n_channels * SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+
+ for (i = 0; i < request->n_ssids; i++) {
+ /* 2 bytes for SSID EID and length field + variable length SSID */
+ alloc_data_size += (2 + request->ssids[i].ssid_len);
+ }
+
+ if (request->n_match_sets) {
+ num_ssid_filter_elements = (request->n_match_sets / SLSI_MAX_SSID_DESC_IN_SSID_FILTER_ELEM) + 1;
+ /* EID(1) + len(1) + oui(3) + type/subtype(2) + 7 ssid descriptors(7 * 33) */
+ alloc_data_size += 238 * num_ssid_filter_elements;
+ }
+
+ req = fapi_alloc(mlme_add_scan_req, MLME_ADD_SCAN_REQ, 0, alloc_data_size);
+ if (!req)
+ return -ENOMEM;
+
+ r = slsi_set_scan_params(dev, (ndev_vif->ifnum << 8 | SLSI_SCAN_SCHED_ID),
+ FAPI_SCANTYPE_SCHEDULED_SCAN,
+ FAPI_REPORTMODE_REAL_TIME,
+ request->n_ssids,
+ request->ssids,
+ req);
+ if (r)
+ return r;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ SLSI_U32_TO_BUFF_LE(request->scan_plans->interval * 1000 * 1000, &scan_timing_ie[7]);
+ SLSI_U32_TO_BUFF_LE(request->scan_plans->interval * 1000 * 1000, &scan_timing_ie[11]);
+#else
+ SLSI_U32_TO_BUFF_LE(request->interval * 1000, &scan_timing_ie[7]);
+ SLSI_U32_TO_BUFF_LE(request->interval * 1000, &scan_timing_ie[11]);
+#endif
+
+ fapi_append_data(req, scan_timing_ie, sizeof(scan_timing_ie));
+ fapi_append_data(req, ies, ies_len);
+
+ if (request->n_match_sets) {
+ struct cfg80211_match_set *match_sets = request->match_sets;
+ u8 *ssid_filter_ie;
+
+ for (j = 0; j < num_ssid_filter_elements; j++) {
+ ssid_filter_ie = fapi_append_data(req, ssid_filter_ie_hdr, sizeof(ssid_filter_ie_hdr));
+ if (!ssid_filter_ie) {
+ slsi_kfree_skb(req);
+ SLSI_ERR(sdev, "ssid_filter_ie append failed\n");
+ return -EIO;
+ }
+ for (i = 0; i < SLSI_MAX_SSID_DESC_IN_SSID_FILTER_ELEM; i++, match_sets++) {
+ if ((j * SLSI_MAX_SSID_DESC_IN_SSID_FILTER_ELEM) + i >= request->n_match_sets)
+ break;
+ SLSI_NET_DBG2(dev, SLSI_MLME, "SSID: %.*s",
+ match_sets->ssid.ssid_len, match_sets->ssid.ssid);
+ ssid_filter_ie[1] += (1 + match_sets->ssid.ssid_len);
+ fapi_append_data(req, &match_sets->ssid.ssid_len, 1);
+ fapi_append_data(req, match_sets->ssid.ssid, match_sets->ssid.ssid_len);
+ }
+ }
+ }
+
+ if (request->n_channels) {
+ r = slsi_mlme_append_channel_list(sdev, dev, req, request->n_channels, request->channels,
+ FAPI_SCANTYPE_SCHEDULED_SCAN, request->n_ssids == 0);
+ if (r)
+ return r;
+ }
+
+ rx = slsi_mlme_req_cfm(sdev, NULL, req, MLME_ADD_SCAN_CFM);
+ if (!rx)
+ return -EIO;
+
+ if (fapi_get_u16(rx, u.mlme_add_scan_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_add_scan_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_add_scan_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+int slsi_mlme_add_scan(
+ struct slsi_dev *sdev,
+ struct net_device *dev,
+ u16 scan_type,
+ u16 report_mode,
+ u32 n_ssids,
+ struct cfg80211_ssid *ssids,
+ u32 n_channels,
+ struct ieee80211_channel *channels[],
+ void *gscan,
+ const u8 *ies,
+ u16 ies_len,
+ bool wait_for_ind)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+ size_t alloc_data_size = 0;
+ u32 i;
+
+ /* Scan Timing IE: default values */
+ u8 scan_timing_ie[] = {
+ 0xdd, /* Element ID: Vendor Specific */
+ 0x11, /* Length */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: Scan parameters */
+ 0x01, /* OUI Subtype: Scan timing */
+ 0x00, 0x00, 0x00, 0x00, /* Min_Period: filled later in the function */
+ 0x00, 0x00, 0x00, 0x00, /* Max_Period: filled later in the function */
+ 0x00, /* Exponent */
+ 0x00, /* Step count */
+ 0x00, 0x00 /* Skip first period: false */
+ };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_WARN(dev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (WARN_ON(!(dev->dev_addr)))
+ return -EINVAL;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex));
+ SLSI_INFO(sdev, "scan started for id:0x%x, n_channels:%d, n_ssids:%d, scan_type:%d\n",
+ (ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID), n_channels, n_ssids, scan_type);
+
+ alloc_data_size += sizeof(scan_timing_ie) +
+ ies_len +
+ (SLSI_SCAN_PRIVATE_IE_CHANNEL_LIST_HEADER_LEN + (n_channels * SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE));
+
+ for (i = 0; i < n_ssids; i++)
+ alloc_data_size += 2 + ssids[i].ssid_len; /* 2: SSID EID + len */
+
+ req = fapi_alloc(mlme_add_scan_req, MLME_ADD_SCAN_REQ, 0, alloc_data_size);
+ if (!req)
+ return -ENOMEM;
+
+ if (!gscan) {
+ r = slsi_set_scan_params(
+ dev,
+ (ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID),
+ scan_type,
+ report_mode,
+ n_ssids,
+ ssids,
+ req);
+ if (r)
+ return r;
+
+ fapi_append_data(req, scan_timing_ie, sizeof(scan_timing_ie));
+ fapi_append_data(req, ies, ies_len);
+
+ if (n_channels) {
+ r = slsi_mlme_append_channel_list(sdev, dev, req, n_channels, channels, scan_type,
+ n_ssids == 0);
+ if (r)
+ return r;
+ }
+ }
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ else {
+ struct slsi_gscan_param *gscan_param = (struct slsi_gscan_param *)gscan;
+
+ r = slsi_set_scan_params(
+ dev,
+ gscan_param->bucket->scan_id,
+ scan_type,
+ report_mode,
+ n_ssids,
+ ssids,
+ req);
+ if (r)
+ return r;
+
+ SLSI_U32_TO_BUFF_LE((gscan_param->nl_bucket->period * 1000), &scan_timing_ie[7]);
+ if (gscan_param->nl_bucket->exponent) {
+ SLSI_U32_TO_BUFF_LE((gscan_param->nl_bucket->max_period * 1000), &scan_timing_ie[11]);
+ scan_timing_ie[15] = (u8)gscan_param->nl_bucket->exponent;
+ scan_timing_ie[16] = (u8)gscan_param->nl_bucket->step_count;
+ }
+ fapi_append_data(req, scan_timing_ie, sizeof(scan_timing_ie));
+
+ r = slsi_mlme_append_gscan_channel_list(sdev, dev, req, gscan_param->nl_bucket);
+ if (r)
+ return r;
+ }
+#endif
+ if (wait_for_ind) {
+ /* Use the Global sig_wait not the Interface specific for Scan Req */
+ rx = slsi_mlme_req_cfm_ind(sdev, NULL, req, MLME_ADD_SCAN_CFM, MLME_SCAN_DONE_IND, slsi_scan_cfm_validate);
+ if (!rx)
+ return -EIO;
+ SLSI_NET_DBG3(dev, SLSI_MLME, "mlme_scan_done_ind()\n");
+
+ /* slsi_mlme_add_scan is a generic definition for multiple handlers
+ * Any added functionality, if not generic, should not be defined here.
+ * It should be a part of calling function.
+ */
+ } else {
+ /* Use the Global sig_wait not the Interface specific for Scan Req */
+ rx = slsi_mlme_req_cfm(sdev, NULL, req, MLME_ADD_SCAN_CFM);
+ if (!rx)
+ return -EIO;
+ if (fapi_get_u16(rx, u.mlme_add_scan_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_add_scan_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_add_scan_cfm.result_code));
+ r = -EINVAL;
+ }
+ }
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+int slsi_mlme_del_scan(struct slsi_dev *sdev, struct net_device *dev, u16 scan_id, bool scan_timed_out)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_WARN(dev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_del_scan_req(scan_id:%d)\n", scan_id);
+
+ if ((scan_id & 0xFF) == SLSI_SCAN_HW_ID && ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req && !scan_timed_out)
+ cancel_delayed_work(&ndev_vif->scan_timeout_work);
+
+ req = fapi_alloc(mlme_del_scan_req, MLME_DEL_SCAN_REQ, 0, 0);
+ if (!req)
+ return -ENOMEM;
+ fapi_set_u16(req, u.mlme_del_scan_req.scan_id, scan_id);
+
+ /* Use the Global sig_wait not the Interface specific for Scan Req */
+ cfm = slsi_mlme_req_cfm(sdev, NULL, req, MLME_DEL_SCAN_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_del_scan_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_del_scan_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_del_scan_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+static void slsi_ap_add_ext_capab_ie(struct sk_buff *req, struct netdev_vif *ndev_vif, const u8 *prev_ext)
+{
+ u8 ext_capa_ie[SLSI_AP_EXT_CAPAB_IE_LEN_MAX];
+ int i;
+ int prev_len = 0;
+
+ ext_capa_ie[0] = WLAN_EID_EXT_CAPABILITY;
+ ext_capa_ie[1] = SLSI_AP_EXT_CAPAB_IE_LEN_MAX - 1 - 1;
+ if (prev_ext) {
+ prev_len = prev_ext[1];
+ for (i = 2; i < prev_len + 2; i++)
+ ext_capa_ie[i] = prev_ext[i];
+ }
+ for (i = prev_len + 2; i < SLSI_AP_EXT_CAPAB_IE_LEN_MAX; i++)
+ ext_capa_ie[i] = 0x00;
+ SLSI_DBG3(ndev_vif->sdev, SLSI_MLME, "New Ext capab Added\n");
+ /* For VHT, set the Operating Mode Notification field - Bit 62 (8th Octet) */
+ ext_capa_ie[9] |= 0x40;
+
+ fapi_append_data(req, &ext_capa_ie[0], SLSI_AP_EXT_CAPAB_IE_LEN_MAX);
+}
+#endif
+
+static int slsi_prepare_country_ie(struct slsi_dev *sdev, u16 center_freq, u8 *country_ie, u8 **new_country_ie)
+{
+ struct ieee80211_supported_band band;
+ struct ieee80211_reg_rule *rule;
+ struct ieee80211_channel *channels;
+ u8 *ie;
+ u8 offset = 0;
+ int i;
+
+ /* Select frequency band */
+ if (center_freq < 5180)
+ band = slsi_band_2ghz;
+ else
+ band = slsi_band_5ghz;
+
+ /* Allocate memory for the new country IE - EID(1) + Len(1) + CountryString(3) + ChannelInfo (n * 3) */
+ ie = kmalloc(5 + (band.n_channels * 3), GFP_KERNEL);
+ if (!ie) {
+ SLSI_ERR(sdev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Preapre the new country IE */
+ ie[offset++] = country_ie[0]; /* Element IE */
+ ie[offset++] = 0; /* IE Length - initialized at the end of this function */
+ ie[offset++] = sdev->device_config.domain_info.regdomain->alpha2[0]; /* Country code */
+ ie[offset++] = sdev->device_config.domain_info.regdomain->alpha2[1]; /* Country code */
+ ie[offset++] = country_ie[4]; /* CountryString: 3rd octet */
+
+ channels = band.channels;
+ for (i = 0; i < band.n_channels; i++, channels++) {
+ /* Get the regulatory rule for the channel */
+ rule = slsi_get_reg_rule(MHZ_TO_KHZ(channels->center_freq), &sdev->device_config.domain_info);
+ if (rule) {
+ ie[offset++] = channels->hw_value; /* Channel number */
+ ie[offset++] = 1; /* Number of channels */
+ ie[offset++] = MBM_TO_DBM(rule->power_rule.max_eirp); /* Max TX power */
+ }
+ }
+
+ ie[1] = offset - 2; /* Length of IE */
+ *new_country_ie = ie;
+
+ return 0;
+}
+
+int slsi_modify_ies(struct net_device *dev, u8 eid, u8 *ies, int ies_len, u8 ie_index, u8 ie_value)
+{
+ u8 *ie;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "eid: %d, ie_value = 0x%x\n", eid, ie_value);
+
+ ie = (u8 *)cfg80211_find_ie(eid, ies, ies_len);
+ if (ie) {
+ switch (eid) {
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_VHT_CAPABILITY:
+ ie[ie_index] |= ie_value;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_HT_OPERATION:
+ if (ie_index == 2)
+ ie[ie_index] = ie_value;
+ else
+ ie[ie_index] |= ie_value;
+ break;
+ default:
+ SLSI_NET_WARN(dev, "slsi_modify_ies: IE type mismatch : %d\n", eid);
+ return false;
+ }
+ return true;
+ }
+ SLSI_NET_WARN(dev, "slsi_modify_ies: IE not found : %d\n", eid);
+ return false;
+}
+
+static void slsi_mlme_start_prepare_ies(struct sk_buff *req, struct netdev_vif *ndev_vif, struct cfg80211_ap_settings *settings, const u8 *wpa_ie_pos, const u8 *wmm_ie_pos)
+{
+ const u8 *wps_ie, *vht_capab_ie, *tail_pos = NULL, *ext_capab_ie;
+ size_t beacon_ie_len = 0, tail_length = 0;
+ u8 *country_ie;
+ const u8 *beacon_tail = settings->beacon.tail;
+ size_t beacon_tail_len = settings->beacon.tail_len;
+
+ /**
+ * Channel list of Country IE prepared by hostapd is wrong, so driver needs remove the existing country IE and prepare correct one.
+ * Hostapd adds country IE at the beginning of the tail, beacon_tail is moved to the next IE to avoid the default county IE.
+ */
+ country_ie = (u8 *)cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_tail, beacon_tail_len);
+ if (country_ie) {
+ u8 *new_country_ie = NULL;
+
+ SLSI_DBG3(ndev_vif->sdev, SLSI_MLME, "Country IE found, length = %d", country_ie[1]);
+
+ /* Prepare the new country IE */
+ if (slsi_prepare_country_ie(ndev_vif->sdev, ndev_vif->chan->center_freq, country_ie, &new_country_ie) != 0)
+ SLSI_ERR(ndev_vif->sdev, "Failed to prepare country IE");
+
+ /* Add the new country IE */
+ if (new_country_ie) {
+ /* new_country_ie[1] ontains the length of IE */
+ fapi_append_data(req, new_country_ie, (new_country_ie[1] + 2));
+
+ /* Free the memory allocated for the new country IE */
+ kfree(new_country_ie);
+
+ /* Remove the default country IE from the beacon_tail */
+ beacon_tail += (country_ie[1] + 2);
+ beacon_tail_len -= (country_ie[1] + 2);
+ }
+ }
+
+ /* Modify HT IE based on OBSS scan data */
+ if (ndev_vif->ap.non_ht_bss_present) {
+ u8 op_mode = 1;
+
+ SLSI_NET_DBG1(ndev_vif->wdev.netdev, SLSI_MLME, "Modify Operating mode of BSS in HT IE\n");
+ slsi_modify_ies(ndev_vif->wdev.netdev, WLAN_EID_HT_OPERATION, (u8 *)settings->beacon.tail, settings->beacon.tail_len, 4, op_mode);
+ ndev_vif->ap.non_ht_bss_present = false;
+ }
+
+ /* Vendor IEs are excluded from start_req. Currently WPA IE, WMM IE, WPS IE and P2P IE need to be excluded.
+ * From hostapd, order of IEs are - WPA, WMM, WPS and P2P
+ * Of these the WMM, WPS and P2P IE are usually at the end.
+ * Note: There can be "eid_p2p_manage" and "eid_hs20" after WPS and P2P IE. Both of these are currently not supported.
+ */
+
+ /* Exclude WMM or WPS IE */
+ if (wmm_ie_pos) /* WMM IE is present. Remove from this position onwards, i.e. copy only till this data. WPS and P2P IE will also get removed. */
+ beacon_ie_len = wmm_ie_pos - beacon_tail;
+ else {
+ /* WMM IE is not present. Check for WPS IE (and thereby P2P IE) and exclude it */
+ wps_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, beacon_tail, beacon_tail_len);
+ if (wps_ie)
+ beacon_ie_len = wps_ie - beacon_tail;
+ else
+ beacon_ie_len = beacon_tail_len;
+ }
+
+ /* Exclude WPA IE if present */
+ if (wpa_ie_pos) {
+ size_t len_before, len;
+
+ len_before = wpa_ie_pos - beacon_tail;
+ fapi_append_data(req, beacon_tail, len_before);
+
+ len = len_before + ndev_vif->ap.wpa_ie_len;
+
+ if (beacon_ie_len > len) { /* More IEs to go */
+ tail_length = beacon_ie_len - len;
+ tail_pos = (beacon_tail + len);
+ } else /* No more IEs, don't add Ext Capab IE as no HT/VHT */
+ return;
+ } else {
+ tail_length = beacon_ie_len;
+ tail_pos = beacon_tail;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ /* Add Ext Capab IE only for VHT mode for now */
+ if (ndev_vif->chandef->width == NL80211_CHAN_WIDTH_80) {
+ /* Ext Capab should be before VHT IEs */
+ vht_capab_ie = (cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, tail_pos, tail_length));
+ ext_capab_ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, tail_pos, tail_length);
+ while (tail_length > 2) {
+ if (tail_pos[0] == WLAN_EID_VHT_CAPABILITY)
+ slsi_ap_add_ext_capab_ie(req, ndev_vif, ext_capab_ie);
+ else if (tail_pos[0] != WLAN_EID_EXT_CAPABILITY && tail_pos[0] != WLAN_EID_VHT_OPERATION)
+ fapi_append_data(req, tail_pos, tail_pos[1] + 2);
+
+ tail_length -= tail_pos[1] + 2;
+ tail_pos += tail_pos[1] + 2;
+ }
+ if (!vht_capab_ie)
+ slsi_ap_add_ext_capab_ie(req, ndev_vif, ext_capab_ie);
+ } else {
+ fapi_append_data(req, tail_pos, tail_length);
+ }
+#else
+ fapi_append_data(req, tail_pos, tail_length);
+#endif
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+/*EID + LEN + CAPABILITIES + MCS */
+/* 1+1+4+8 */
+#define SLSI_VHT_CAPABILITIES_IE_LEN 14
+
+/* EID + LEN + WIDTH + SEG0 + SEG1 + MCS */
+/* 1+1+1+1+1+2 */
+#define SLSI_VHT_OPERATION_IE_LEN 7
+
+static int slsi_prepare_vht_ies(struct net_device *dev, u8 **vht_ie_capab, u8 **vht_ie_operation)
+{
+ u32 capabs;
+ u16 mcs;
+ u8 *p_cap, *p_oper;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ *vht_ie_capab = kmalloc(SLSI_VHT_CAPABILITIES_IE_LEN, GFP_KERNEL);
+ if (!(*vht_ie_capab))
+ return -EINVAL;
+ *vht_ie_operation = kmalloc(SLSI_VHT_OPERATION_IE_LEN, GFP_KERNEL);
+ if (!(*vht_ie_operation)) {
+ kfree(*vht_ie_capab);
+ return -EINVAL;
+ }
+
+ p_cap = *vht_ie_capab;
+ p_oper = *vht_ie_operation;
+
+ *p_cap++ = WLAN_EID_VHT_CAPABILITY;
+ *p_cap++ = SLSI_VHT_CAPABILITIES_IE_LEN - 1 - 1;
+ capabs = cpu_to_le32(slsi_vht_cap.cap);
+ memcpy(p_cap, &capabs, sizeof(capabs));
+ p_cap += sizeof(capabs);
+ memcpy(p_cap, &slsi_vht_cap.vht_mcs, sizeof(slsi_vht_cap.vht_mcs));
+
+ *p_oper++ = WLAN_EID_VHT_OPERATION;
+ *p_oper++ = SLSI_VHT_OPERATION_IE_LEN - 1 - 1;
+ *p_oper++ = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ *p_oper++ = ieee80211_frequency_to_channel(ndev_vif->chandef->center_freq1);
+ *p_oper++ = 0;
+ mcs = cpu_to_le16(0xfffc);
+ memcpy(p_oper, &mcs, sizeof(mcs));
+
+ return 0;
+}
+#endif
+
+int slsi_mlme_start(struct slsi_dev *sdev, struct net_device *dev, u8 *bssid, struct cfg80211_ap_settings *settings, const u8 *wpa_ie_pos, const u8 *wmm_ie_pos, bool append_vht_ies)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ struct ieee80211_mgmt *mgmt;
+ int r = 0;
+ u8 *p;
+ enum nl80211_auth_type auth_type = settings->auth_type;
+ u16 beacon_ie_head_len;
+ u16 chan_info;
+ u16 fw_freq;
+ u16 vht_ies_len = 0;
+ u8 ext_capab_len = 0;
+ const u8 *recv_vht_capab_ie, *recv_vht_operation_ie;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ u8 *vht_ie_capab, *vht_ie_operation;
+#endif
+ SLSI_UNUSED_PARAMETER(bssid);
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_START.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ mgmt = (struct ieee80211_mgmt *)settings->beacon.head;
+ beacon_ie_head_len = settings->beacon.head_len - ((u8 *)mgmt->u.beacon.variable - (u8 *)mgmt);
+
+ /* For port enabling, save the privacy bit used in assoc response or beacon */
+ ndev_vif->ap.privacy = (mgmt->u.beacon.capab_info & WLAN_CAPABILITY_PRIVACY);
+ ndev_vif->ap.qos_enabled = (mgmt->u.beacon.capab_info & WLAN_CAPABILITY_QOS);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
+ if (settings->privacy && settings->crypto.cipher_group == 0)
+ auth_type = NL80211_AUTHTYPE_SHARED_KEY;
+ break;
+ default:
+ SLSI_NET_ERR(dev, "Unsupported auth_type: %d\n", auth_type);
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_start_req(vif:%u, bssid:%pM, ssid:%.*s, hidden:%d)\n", ndev_vif->ifnum, bssid, (int)settings->ssid_len, settings->ssid, settings->hidden_ssid);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if (append_vht_ies)
+ vht_ies_len = SLSI_VHT_CAPABILITIES_IE_LEN + SLSI_VHT_OPERATION_IE_LEN;
+
+ recv_vht_capab_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, settings->beacon.tail,
+ settings->beacon.tail_len);
+ if (recv_vht_capab_ie)
+ vht_ies_len -= (recv_vht_capab_ie[1] + 2);
+
+ recv_vht_operation_ie = cfg80211_find_ie(WLAN_EID_VHT_OPERATION, settings->beacon.tail,
+ settings->beacon.tail_len);
+ if (recv_vht_operation_ie)
+ vht_ies_len -= (recv_vht_operation_ie[1] + 2);
+
+ if (ndev_vif->chandef->width == NL80211_CHAN_WIDTH_80) {
+ /* Ext Capab are not advertised by driver and so the IE would not be sent by hostapd.
+ * Frame the IE in driver and set the required bit(s).
+ */
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VHT - Ext Capab IE to be included\n");
+ ext_capab_len = SLSI_AP_EXT_CAPAB_IE_LEN_MAX;
+ }
+#endif
+
+ if (settings->hidden_ssid == 1)
+ req = fapi_alloc(mlme_start_req, MLME_START_REQ, ndev_vif->ifnum, settings->ssid_len + beacon_ie_head_len + settings->beacon.tail_len + vht_ies_len + ext_capab_len);
+ else
+ req = fapi_alloc(mlme_start_req, MLME_START_REQ, ndev_vif->ifnum, beacon_ie_head_len + settings->beacon.tail_len + vht_ies_len + ext_capab_len);
+
+ if (!req)
+ return -ENOMEM;
+ fapi_set_memcpy(req, u.mlme_start_req.bssid, dev->dev_addr);
+ fapi_set_u16(req, u.mlme_start_req.beacon_period, settings->beacon_interval);
+ fapi_set_u16(req, u.mlme_start_req.dtim_period, settings->dtim_period);
+ fapi_set_u16(req, u.mlme_start_req.capability_information, le16_to_cpu(mgmt->u.beacon.capab_info));
+ fapi_set_u16(req, u.mlme_start_req.authentication_type, auth_type);
+ fapi_set_u16(req, u.mlme_start_req.hidden_ssid, settings->hidden_ssid < 3 ? settings->hidden_ssid : NL80211_HIDDEN_SSID_ZERO_LEN);
+
+ fw_freq = ndev_vif->chan->center_freq;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ chan_info = slsi_get_chann_info(sdev, ndev_vif->chandef);
+#else
+ chan_info = slsi_get_chann_info(sdev, ndev_vif->channel_type);
+#endif
+ if ((chan_info & 20) != 20)
+ fw_freq = slsi_get_center_freq1(sdev, chan_info, fw_freq);
+
+ fapi_set_u16(req, u.mlme_start_req.channel_frequency, (2 * fw_freq));
+ fapi_set_u16(req, u.mlme_start_req.channel_information, chan_info);
+ ndev_vif->ap.channel_freq = fw_freq;
+
+ /* Addition of SSID IE in mlme_start_req for hiddenSSID case */
+ if (settings->hidden_ssid != 0) {
+ p = fapi_append_data(req, NULL, 2 + settings->ssid_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ *p++ = WLAN_EID_SSID;
+ *p++ = settings->ssid_len;
+ memcpy(p, settings->ssid, settings->ssid_len);
+ }
+
+ if (beacon_ie_head_len && settings->hidden_ssid == 0)
+ fapi_append_data(req, mgmt->u.beacon.variable, beacon_ie_head_len);
+ else if (beacon_ie_head_len && settings->hidden_ssid == 1)
+ fapi_append_data(req, mgmt->u.beacon.variable + 2, beacon_ie_head_len - 2);
+ else if (beacon_ie_head_len && settings->hidden_ssid == 2)
+ fapi_append_data(req, mgmt->u.beacon.variable + 2 + settings->ssid_len, beacon_ie_head_len - (2 + settings->ssid_len));
+
+ if (settings->beacon.tail_len)
+ slsi_mlme_start_prepare_ies(req, ndev_vif, settings, wpa_ie_pos, wmm_ie_pos);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ if ((append_vht_ies) && !slsi_prepare_vht_ies(dev, &vht_ie_capab, &vht_ie_operation)) {
+ fapi_append_data(req, vht_ie_capab, SLSI_VHT_CAPABILITIES_IE_LEN);
+ fapi_append_data(req, vht_ie_operation, SLSI_VHT_OPERATION_IE_LEN);
+ kfree(vht_ie_capab);
+ kfree(vht_ie_operation);
+ }
+#endif
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_START_CFM);
+ if (!cfm)
+ return -EIO;
+ if (fapi_get_u16(cfm, u.mlme_start_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_start_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_start_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+static const u8 *slsi_mlme_connect_get_sec_ie(struct cfg80211_connect_params *sme, int *sec_ie_len)
+{
+ u16 version;
+ const u8 *ptr = NULL;
+
+ if (sme->crypto.wpa_versions == 0) {
+ /* WAPI */
+ ptr = cfg80211_find_ie(SLSI_WLAN_EID_WAPI, sme->ie, sme->ie_len);
+ if (ptr) {
+ version = ptr[3] << 8 | ptr[2];
+ if (version != 1) {
+ SLSI_ERR_NODEV("Unexpected version (%d) in WAPI ie\n", version);
+ return NULL;
+ }
+ }
+ } else if (sme->crypto.wpa_versions == 2) {
+ /* RSN */
+ ptr = cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len);
+
+ if (ptr) {
+ /* version index is 2 for RSN */
+ version = ptr[2 + 1] << 8 | ptr[2];
+ if (version != 1) {
+ SLSI_ERR_NODEV("Unexpected version (%d) in rsn ie\n", version);
+ return NULL;
+ }
+ }
+ }
+ *sec_ie_len = ptr ? ptr[1] + 2 : 0;
+ return ptr;
+}
+
+/* If is_copy is true copy the required IEs from connect_ie to ie_dest. else
+ * calculate the required ie length
+ */
+static int slsi_mlme_connect_info_elems_ie_prep(struct slsi_dev *sdev, const u8 *connect_ie,
+ const size_t connect_ie_len, bool is_copy, u8 *ie_dest, int ie_dest_len)
+{
+ const u8 *ie_pos = NULL;
+ int info_elem_length = 0;
+ u16 curr_ie_len;
+
+ if (is_copy && (!ie_dest || ie_dest_len == 0))
+ return -EINVAL;
+
+ /* find interworking ie id:107 */
+ ie_pos = cfg80211_find_ie(SLSI_WLAN_EID_INTERWORKING, connect_ie, connect_ie_len);
+ if (ie_pos) {
+ curr_ie_len = *(ie_pos + 1) + 2;
+ if (is_copy) {
+ if (ie_dest_len >= curr_ie_len) {
+ memcpy(ie_dest, ie_pos, curr_ie_len);
+ ie_dest += curr_ie_len;
+ /* free space avail in ie_dest for next ie*/
+ ie_dest_len -= curr_ie_len;
+ } else {
+ SLSI_ERR_NODEV("interwork ie extract error (ie_copy_l:%d, c_ie_l:%d):\n", ie_dest_len, curr_ie_len);
+ return -EINVAL;
+ }
+ } else {
+ info_elem_length = curr_ie_len;
+ }
+ }
+
+ /* vendor specific IEs will be the last elements. */
+ ie_pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, connect_ie, connect_ie_len);
+ if (ie_pos) {
+ /* length of all the vendor specific IEs */
+ curr_ie_len = connect_ie_len - (ie_pos - connect_ie);
+ if (is_copy) {
+ if (ie_dest_len >= curr_ie_len) {
+ memcpy(ie_dest, ie_pos, curr_ie_len);
+ ie_dest += curr_ie_len;
+ ie_dest_len -= curr_ie_len;
+ } else {
+ SLSI_ERR_NODEV("vendor ie extract error (ie_copy_l:%d, c_ie_l:%d):\n", ie_dest_len, curr_ie_len);
+ return -EINVAL;
+ }
+ } else {
+ info_elem_length += curr_ie_len;
+ }
+ }
+
+ if (sdev->device_config.qos_info != -1) {
+ if (is_copy) {
+ if (ie_dest_len >= 9) {
+ int pos = 0;
+
+ ie_dest[pos++] = SLSI_WLAN_EID_VENDOR_SPECIFIC;
+ ie_dest[pos++] = 0x07;
+ ie_dest[pos++] = 0x00;
+ ie_dest[pos++] = 0x50;
+ ie_dest[pos++] = 0xf2;
+ ie_dest[pos++] = WLAN_OUI_TYPE_MICROSOFT_WMM;
+ ie_dest[pos++] = WMM_OUI_SUBTYPE_INFORMATION_ELEMENT;
+ ie_dest[pos++] = WMM_VERSION;
+ ie_dest[pos++] = sdev->device_config.qos_info & 0x0F;
+ ie_dest += pos;
+ ie_dest_len -= pos;
+ } else {
+ SLSI_ERR_NODEV("Required 9bytes but left:%d\n", ie_dest_len);
+ return -EINVAL;
+ }
+ } else {
+ info_elem_length += 9;
+ }
+ }
+ return info_elem_length;
+}
+
+static int slsi_mlme_connect_info_elements(struct slsi_dev *sdev, struct net_device *dev, struct cfg80211_connect_params *sme)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int info_elem_length = 0;
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u8 *p;
+
+ info_elem_length = slsi_mlme_connect_info_elems_ie_prep(sdev, sme->ie, sme->ie_len, false, NULL, 0);
+
+ /* NO IE required in MLME-ADD-INFO-ELEMENTS */
+ if (info_elem_length <= 0)
+ return info_elem_length;
+
+ req = fapi_alloc(mlme_add_info_elements_req, MLME_ADD_INFO_ELEMENTS_REQ,
+ ndev_vif->ifnum, info_elem_length);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_add_info_elements_req.purpose, FAPI_PURPOSE_ASSOCIATION_REQUEST);
+
+ p = fapi_append_data(req, NULL, info_elem_length);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ (void)slsi_mlme_connect_info_elems_ie_prep(sdev, sme->ie, sme->ie_len, true, p, info_elem_length);
+
+ /* backup ies */
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ if (ndev_vif->sta.assoc_req_add_info_elem_len)
+ kfree(ndev_vif->sta.assoc_req_add_info_elem);
+ ndev_vif->sta.assoc_req_add_info_elem_len = 0;
+
+ ndev_vif->sta.assoc_req_add_info_elem = kmalloc(info_elem_length, GFP_KERNEL);
+ if (ndev_vif->sta.assoc_req_add_info_elem) {
+ memcpy(ndev_vif->sta.assoc_req_add_info_elem, p, info_elem_length);
+ ndev_vif->sta.assoc_req_add_info_elem_len = info_elem_length;
+ } else {
+ SLSI_WARN(sdev, "No mem for ndev_vif->sta.assoc_req_add_info_elem size %d\n", info_elem_length);
+ }
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_add_info_elements_req(vif:%u)\n", ndev_vif->ifnum);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_ADD_INFO_ELEMENTS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_add_info_elements_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_add_info_elements_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_connect_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, sme->ie, sme->ie_len))
+ ndev_vif->sta.is_wps = true;
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_connect(struct slsi_dev *sdev, struct net_device *dev, struct cfg80211_connect_params *sme, struct ieee80211_channel *channel, const u8 *bssid)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u8 *p;
+ enum nl80211_auth_type auth_type = sme->auth_type;
+ u8 mac_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ struct key_params slsi_key;
+ const u8 *sec_ie = NULL;
+ int sec_ie_len = 0;
+
+ memset(&slsi_key, 0, sizeof(slsi_key));
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_CONNECT.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN(!bssid, "BSSID is Null"))
+ return -EINVAL;
+
+ if (WARN(!sme->ssid_len, "SSID is Null"))
+ return -EINVAL;
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ break;
+ case NL80211_AUTHTYPE_SAE:
+ auth_type = NL80211_AUTHTYPE_NETWORK_EAP;
+ break;
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ /* In case of WEP, need to try both open and shared.
+ * FW does this if auth is shared_key. So set it to shared.
+ */
+ if (sme->privacy &&
+ (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40 ||
+ sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104))
+ auth_type = NL80211_AUTHTYPE_SHARED_KEY;
+ else
+ auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
+ break;
+ default:
+ SLSI_NET_ERR(dev, "Unsupported auth_type: %d\n", auth_type);
+ return -EOPNOTSUPP;
+ }
+
+ /* We save the WEP key for shared authentication. */
+ if ((auth_type == NL80211_AUTHTYPE_SHARED_KEY) &&
+ ((sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) ||
+ (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104)) &&
+ (ndev_vif->vif_type == FAPI_VIFTYPE_STATION)) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "key len (%d)\n", sme->key_len);
+ slsi_key.key = (u8 *)sme->key;
+ if (!slsi_key.key)
+ return -EINVAL;
+ slsi_key.key_len = sme->key_len;
+ slsi_key.seq_len = 0;
+ if (sme->crypto.n_ciphers_pairwise)
+ slsi_key.cipher = sme->crypto.ciphers_pairwise[0];
+
+ r = slsi_mlme_set_key(sdev, dev, sme->key_idx, FAPI_KEYTYPE_WEP, mac_addr, &slsi_key);
+ if (r != 0) {
+ SLSI_NET_ERR(dev, "Error Setting Shared key (%d)", r);
+ return r;
+ }
+ }
+
+ /*Do not check sme->ie as wpa_supplicant sends some invalid value in it even if ie_len is zero .*/
+ if (sme->ie_len) {
+ r = slsi_mlme_connect_info_elements(sdev, dev, sme);
+ if (r)
+ return r;
+
+ sec_ie = slsi_mlme_connect_get_sec_ie(sme, &sec_ie_len);
+ if (sec_ie_len < 0) {
+ SLSI_NET_ERR(dev, "ERROR preparing Security IEs\n");
+ return sec_ie_len;
+ }
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_connect_req(vif:%u, bssid:%pM, ssid:%.*s)\n", ndev_vif->ifnum, bssid, (int)sme->ssid_len, sme->ssid);
+ req = fapi_alloc(mlme_connect_req, MLME_CONNECT_REQ, ndev_vif->ifnum,
+ 2 + sme->ssid_len + /*SSID IE*/
+ sec_ie_len); /*WPA/WPA2/WAPI/OSEN*/
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_memcpy(req, u.mlme_connect_req.bssid, bssid);
+ fapi_set_u16(req, u.mlme_connect_req.authentication_type, auth_type);
+ /* Need to double the freq for the firmware */
+ fapi_set_u16(req, u.mlme_connect_req.channel_frequency, (2 * channel->center_freq));
+
+ p = fapi_append_data(req, NULL, 2 + sme->ssid_len + sec_ie_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ *p++ = WLAN_EID_SSID;
+ *p++ = sme->ssid_len;
+ memcpy(p, sme->ssid, sme->ssid_len);
+ p += sme->ssid_len;
+
+ if (sec_ie_len)
+ memcpy(p, sec_ie, sec_ie_len);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_CONNECT_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_connect_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_connect_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_connect_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+void slsi_mlme_connect_resp(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_CONNECT_RESP\n");
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_connect_resp(vif:%u)\n", ndev_vif->ifnum);
+ req = fapi_alloc(mlme_connect_res, MLME_CONNECT_RES, ndev_vif->ifnum, 0);
+ if (!req)
+ return;
+
+ cfm = slsi_mlme_req_no_cfm(sdev, dev, req);
+ WARN_ON(cfm);
+}
+
+void slsi_mlme_connected_resp(struct slsi_dev *sdev, struct net_device *dev, u16 peer_index)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_CONNECT_RESP\n");
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_connected_resp(vif:%u, peer_index:%d)\n", ndev_vif->ifnum, peer_index);
+ req = fapi_alloc(mlme_connected_res, MLME_CONNECTED_RES, ndev_vif->ifnum, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "mlme-connected-response :: memory allocation failed\n");
+ return;
+ }
+
+ fapi_set_u16(req, u.mlme_connected_res.peer_index, peer_index);
+ slsi_mlme_req_no_cfm(sdev, dev, req);
+}
+
+void slsi_mlme_roamed_resp(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_ROAMED_RESP\n");
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_roamed_resp\n");
+ req = fapi_alloc(mlme_roamed_res, MLME_ROAMED_RES, ndev_vif->ifnum, 0);
+ if (!req)
+ return;
+
+ cfm = slsi_mlme_req_no_cfm(sdev, dev, req);
+ WARN_ON(cfm);
+}
+
+/* Null check for cfm done in caller function */
+bool slsi_disconnect_cfm_validate(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *cfm)
+{
+ int result = fapi_get_u16(cfm, u.mlme_disconnect_cfm.result_code);
+ bool r = false;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (WARN_ON(!dev))
+ goto exit;
+
+ if (result == FAPI_RESULTCODE_SUCCESS)
+ r = true;
+ /* Not present code would mean peer is already disconnected and hence no ind (could be race scenario), don't log as error */
+ else if (result != FAPI_RESULTCODE_NOT_PRESENT)
+ SLSI_NET_ERR(dev, "mlme_disconnect_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_disconnect_cfm.result_code));
+
+exit:
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_disconnect(struct slsi_dev *sdev, struct net_device *dev, u8 *mac, u16 reason_code, bool wait_ind)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_DISCONNECT.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_disconnect_req(vif:%u, bssid:%pM, reason:%d)\n", ndev_vif->ifnum, mac, reason_code);
+
+ /* No data reference required */
+ req = fapi_alloc(mlme_disconnect_req, MLME_DISCONNECT_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+ SLSI_INFO(sdev, "Send DEAUTH, reason = %d\n", reason_code);
+ fapi_set_u16(req, u.mlme_disconnect_req.reason_code, reason_code);
+ if (mac)
+ fapi_set_memcpy(req, u.mlme_disconnect_req.peer_sta_address, mac);
+ else
+ fapi_set_memset(req, u.mlme_disconnect_req.peer_sta_address, 0);
+ if (wait_ind) {
+ rx = slsi_mlme_req_cfm_ind(sdev, dev, req, MLME_DISCONNECT_CFM, MLME_DISCONNECT_IND, slsi_disconnect_cfm_validate);
+ if (!rx) {
+ SLSI_NET_ERR(dev, "mlme_disconnect_cfm() ERROR\n");
+ r = -EINVAL;
+ }
+ } else {
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_DISCONNECT_CFM);
+ if (rx) {
+ if (fapi_get_u16(rx, u.mlme_disconnect_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_disconnect_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_disconnect_cfm.result_code));
+ r = -EINVAL;
+ }
+ } else {
+ r = -EIO;
+ }
+ }
+
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+int slsi_mlme_set_key(struct slsi_dev *sdev, struct net_device *dev, u16 key_id, u16 key_type, const u8 *address, struct key_params *key)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_SETKEYS.request\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_setkeys_req(key_id:%d, key_type:%d, address:%pM, length:%d, cipher:0x%.8X)\n", key_id, key_type, address, key->key_len, key->cipher);
+ req = fapi_alloc(mlme_setkeys_req, MLME_SETKEYS_REQ, ndev_vif->ifnum, key->key_len + 1); /* + 1 for the wep key index */
+ if (!req)
+ return -ENOMEM;
+ fapi_set_u16(req, u.mlme_setkeys_req.length, key->key_len * 8);
+ fapi_set_u16(req, u.mlme_setkeys_req.key_id, key_id);
+ fapi_set_u16(req, u.mlme_setkeys_req.key_type, key_type);
+ fapi_set_memcpy(req, u.mlme_setkeys_req.address, address);
+ fapi_set_memset(req, u.mlme_setkeys_req.sequence_number, 0x00);
+
+ if (key->seq_len && key->seq) {
+ int i;
+ u16 temp_seq;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "mlme_setkeys_req(key->seq_len:%d)\n", key->seq_len);
+
+ /* Sequence would be in little endian format
+ * If sequence is say key->seq is
+ * 04 03 02 01 00 00 00 00, it would be encoded as :
+ * 0x0304 0x0102 0x0000 0x0000 for firmware
+ */
+ for (i = 0; i < key->seq_len; i += 2) {
+ temp_seq = (u16)(key->seq[i + 1] << 8) | (u16)(key->seq[i]);
+ fapi_set_u16(req, u.mlme_setkeys_req.sequence_number[i / 2], temp_seq);
+ }
+ }
+
+ fapi_set_u32(req, u.mlme_setkeys_req.cipher_suite_selector, key->cipher);
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ u8 wep_key_id = (u8)key_id;
+
+ if (key_id > 3)
+ SLSI_NET_WARN(dev, "Key ID is greater than 3");
+ /* Incase of WEP key index is appended before key.
+ * So increment length by one
+ */
+ fapi_set_u16(req, u.mlme_setkeys_req.length, (key->key_len + 1) * 8);
+ fapi_append_data(req, &wep_key_id, 1);
+ }
+ fapi_append_data(req, key->key, key->key_len);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SETKEYS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_setkeys_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_setkeys_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_setkeys_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_get_key(struct slsi_dev *sdev, struct net_device *dev, u16 key_id, u16 key_type, u8 *seq, int *seq_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_get_key_sequence_req(key_id:%d, key_type:%d)\n", key_id, key_type);
+ req = fapi_alloc(mlme_get_key_sequence_req, MLME_GET_KEY_SEQUENCE_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+ fapi_set_u16(req, u.mlme_get_key_sequence_req.key_id, key_id);
+ fapi_set_u16(req, u.mlme_get_key_sequence_req.key_type, key_type);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_GET_KEY_SEQUENCE_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_get_key_sequence_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_get_key_sequence_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_get_key_sequence_cfm.result_code));
+ r = -ENOENT;
+ } else {
+ int i;
+ u16 temp_seq;
+
+ /* For WPA2 Key RSC - 8 octets. For WPAI, it would be 16 octets (code would need to be updated)
+ * Length is not available in cfm but even if max length 8 is assigned, it should be ok as other octets
+ * would be padded with 0s
+ */
+ *seq_len = 8;
+
+ /* Sequence from firmware is of a[8] type u16 (16 octets) and only 8 octets are required for WPA/WPA2.
+ * If sequence is say 0x01 0x02 0x03 0x04 with 0x01 as MSB and 0x04 as LSB then
+ * it would be encoded as: 0x0304 0x0102 by firmware.
+ * Sequence is expected to be returned in little endian
+ */
+
+ for (i = 0; i < *seq_len / 2; i++) {
+ temp_seq = fapi_get_u16(cfm, u.mlme_get_key_sequence_cfm.sequence_number[i]);
+ *seq = (u8)(temp_seq & 0xFF);
+ *(seq + 1) = (u8)((temp_seq >> 8) & 0xFF);
+
+ seq += 2;
+ }
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+void slsi_fw_tx_rate_calc(u16 fw_rate, struct rate_info *tx_rate, unsigned long *data_rate_mbps)
+{
+ const int fw_rate_idx_to_80211_rate[] = { 0, 10, 20, 55, 60, 90, 110, 120, 180, 240, 360, 480, 540 };
+
+ if (tx_rate) {
+ tx_rate->flags = 0;
+ tx_rate->legacy = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ tx_rate->bw = 0;
+#endif
+ }
+ if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_NON_HT_SELECTED) {
+ u16 fw_rate_idx = fw_rate & SLSI_FW_API_RATE_INDEX_FIELD;
+
+ if (fw_rate > 0 && fw_rate_idx < ARRAY_SIZE(fw_rate_idx_to_80211_rate)) {
+ if (tx_rate)
+ tx_rate->legacy = fw_rate_idx_to_80211_rate[fw_rate_idx];
+ if (data_rate_mbps)
+ *data_rate_mbps = fw_rate_idx_to_80211_rate[fw_rate_idx] / 10;
+ }
+
+ } else if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_HT_SELECTED) {
+ u8 mcs = SLSI_FW_API_RATE_HT_MCS_FIELD & fw_rate;
+ u8 nss = ((SLSI_FW_API_RATE_HT_NSS_FIELD & fw_rate) >> 6) + 1;
+
+ if (tx_rate) {
+ tx_rate->flags |= RATE_INFO_FLAGS_MCS;
+ tx_rate->mcs = mcs;
+
+ if ((fw_rate & SLSI_FW_API_RATE_BW_FIELD) == SLSI_FW_API_RATE_BW_40MHZ)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ tx_rate->bw |= RATE_INFO_BW_40;
+#else
+ tx_rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+#endif
+ if (fw_rate & SLSI_FW_API_RATE_SGI)
+ tx_rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ if (data_rate_mbps) {
+ int chan_bw_idx;
+ int gi_idx;
+ int mcs_idx;
+
+ chan_bw_idx = (fw_rate & SLSI_FW_API_RATE_BW_FIELD) >> 9;
+ gi_idx = ((fw_rate & SLSI_FW_API_RATE_SGI) == SLSI_FW_API_RATE_SGI) ? 1 : 0;
+ mcs_idx = SLSI_FW_API_RATE_HT_MCS_FIELD & fw_rate;
+
+ if ((chan_bw_idx < 2) && (mcs_idx <= 7)) {
+ *data_rate_mbps = (unsigned long)(nss * slsi_rates_table[chan_bw_idx][gi_idx][mcs_idx]) / 10;
+ } else if (mcs == 32 && chan_bw_idx == 1) {
+ if (gi_idx == 1)
+ *data_rate_mbps = (unsigned long)(nss * 67)/10;
+ else
+ *data_rate_mbps = nss * 6;
+ } else {
+ SLSI_WARN_NODEV("FW DATA RATE decode error fw_rate:%x, bw:%x, mcs_idx:%x, nss : %d\n",
+ fw_rate, chan_bw_idx, mcs_idx, nss);
+ }
+ }
+
+ } else if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_VHT_SELECTED) {
+ int chan_bw_idx;
+ int gi_idx;
+ int mcs_idx;
+ u8 nss;
+
+ /* report vht rate in legacy units and not as mcs index. reason: upper layers may still be not
+ * updated with vht msc table.
+ */
+ chan_bw_idx = (fw_rate & SLSI_FW_API_RATE_BW_FIELD) >> 9;
+ gi_idx = ((fw_rate & SLSI_FW_API_RATE_SGI) == SLSI_FW_API_RATE_SGI) ? 1 : 0;
+ /* Calculate NSS --> bits 6 to 4*/
+ nss = ((SLSI_FW_API_RATE_VHT_NSS_FIELD & fw_rate) >> 4) + 1;
+ mcs_idx = SLSI_FW_API_RATE_VHT_MCS_FIELD & fw_rate;
+ /* Bandwidth (BW): 0x0= 20 MHz, 0x1= 40 MHz, 0x2= 80 MHz, 0x3= 160/ 80+80 MHz. 0x3 is not supported */
+ if ((chan_bw_idx <= 2) && (mcs_idx <= 9)) {
+ if (tx_rate)
+ tx_rate->legacy = nss * slsi_rates_table[chan_bw_idx][gi_idx][mcs_idx];
+ if (data_rate_mbps)
+ *data_rate_mbps = (unsigned long)(nss * slsi_rates_table[chan_bw_idx][gi_idx][mcs_idx]) / 10;
+ } else {
+ SLSI_WARN_NODEV("FW DATA RATE decode error fw_rate:%x, bw:%x, mcs_idx:%x,nss : %d\n", fw_rate, chan_bw_idx, mcs_idx, nss);
+ }
+ }
+}
+
+int slsi_mlme_get_sinfo_mib(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_peer *peer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ int data_length = 0;
+ int r = 0;
+ static const struct slsi_mib_get_entry get_values[] = {
+ { SLSI_PSID_UNIFI_TX_DATA_RATE, { 0, 0 } }, /* to get STATION_INFO_TX_BITRATE*/
+ { SLSI_PSID_UNIFI_RSSI, { 0, 0 } }, /* to get STATION_INFO_SIGNAL_AVG*/
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 3, 0 } }, /* bad_fcs_count*/
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 25, 0 } }, /* mac_bad_sig_count*/
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 30, 0 } }, /* rx_error_count*/
+ { SLSI_PSID_UNIFI_FRAME_TX_COUNTERS, { 1, 0 } }, /*tx good count*/
+ { SLSI_PSID_UNIFI_FRAME_TX_COUNTERS, { 2, 0 } }, /*tx bad count*/
+ { SLSI_PSID_UNIFI_FRAME_RX_COUNTERS, { 1, 0 } }, /*rx good count*/
+#ifdef CONFIG_SCSC_ENHANCED_PACKET_STATS
+ { SLSI_PSID_UNIFI_FRAME_TX_COUNTERS, { 3, 0 } }, /*tx retry count*/
+#endif
+ };
+ int rx_counter = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (!peer) {
+ SLSI_WARN(sdev, "Peer Not available\n");
+ return -EINVAL;
+ }
+
+ /*check if function is called within given period*/
+ if (__ratelimit(&peer->sinfo_mib_get_rs))
+ return 0;
+
+ r = slsi_mib_encode_get_list(&mibreq, (sizeof(get_values) / sizeof(struct slsi_mib_get_entry)),
+ get_values);
+ if (r != SLSI_MIB_STATUS_SUCCESS)
+ return -ENOMEM;
+
+ /* Fixed fields len (5) : 2 bytes(PSID) + 2 bytes (Len) + 1 byte (VLDATA header ) [10 for 2 PSIDs]
+ * Data : 3 bytes for SLSI_PSID_UNIFI_TX_DATA_RATE , 1 byte for SLSI_PSID_UNIFI_RSSI
+ * 10*7 bytes for 3 Throughput Mib's and 4 counter Mib's
+ */
+ mibrsp.dataLength = 84;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "failed to allocate memory\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, dev, mibreq.data, mibreq.dataLength, mibrsp.data,
+ mibrsp.dataLength, &data_length);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = (u32)data_length;
+ values = slsi_mib_decode_get_list(&mibrsp,
+ (sizeof(get_values) / sizeof(struct slsi_mib_get_entry)), get_values);
+ if (!values) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mib decode list failed\n");
+ kfree(mibrsp.data);
+ return -ENOMEM;
+ }
+
+ if (values[0].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[0].type, SLSI_MIB_TYPE_UINT);
+ slsi_fw_tx_rate_calc((u16)values[0].u.uintValue, &peer->sinfo.txrate, NULL);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+#else
+ peer->sinfo.filled |= STATION_INFO_TX_BITRATE;
+#endif
+ SLSI_DBG3(sdev, SLSI_MLME, "SLSI_PSID_UNIFI_TX_DATA_RATE = %d\n",
+ values[0].u.uintValue);
+ }
+
+ if (values[1].type != SLSI_MIB_TYPE_NONE) {
+ SLSI_CHECK_TYPE(sdev, values[1].type, SLSI_MIB_TYPE_INT);
+ if (values[1].u.intValue >= 0)
+ peer->sinfo.signal = -1;
+ else
+ peer->sinfo.signal = (s8)values[1].u.intValue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled |= BIT(NL80211_STA_INFO_SIGNAL);
+#else
+ peer->sinfo.filled |= STATION_INFO_SIGNAL;
+#endif
+ SLSI_DBG3(sdev, SLSI_MLME, "SLSI_PSID_UNIFI_RSSI = %d\n",
+ values[1].u.intValue);
+ }
+
+ if (values[2].type == SLSI_MIB_TYPE_UINT)
+ rx_counter += values[2].u.uintValue; /*bad_fcs_count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 2);
+ if (values[3].type == SLSI_MIB_TYPE_UINT)
+ rx_counter += values[3].u.uintValue; /*mac_bad_sig_count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 3);
+ if (values[4].type == SLSI_MIB_TYPE_UINT)
+ rx_counter += values[4].u.uintValue; /*rx_error_count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 4);
+ if (values[5].type == SLSI_MIB_TYPE_UINT)
+ peer->sinfo.tx_packets = values[5].u.uintValue; /*tx good count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 5);
+ if (values[6].type == SLSI_MIB_TYPE_UINT)
+ peer->sinfo.tx_failed = values[6].u.uintValue; /*tx bad count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 6);
+ if (values[7].type == SLSI_MIB_TYPE_UINT)
+ peer->sinfo.rx_packets = values[7].u.uintValue; /*rx good count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 7);
+#ifdef CONFIG_SCSC_ENHANCED_PACKET_STATS
+ if (values[8].type == SLSI_MIB_TYPE_UINT)
+ peer->sinfo.tx_retries = values[8].u.uintValue; /*tx retry count*/
+ else
+ SLSI_ERR(sdev, "invalid type. iter:%d", 8);
+#endif
+
+ peer->sinfo.rx_dropped_misc = rx_counter;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled |= BIT(NL80211_STA_INFO_TX_FAILED) | BIT(NL80211_STA_INFO_RX_DROP_MISC) |
+ BIT(NL80211_STA_INFO_TX_PACKETS) | BIT(NL80211_STA_INFO_RX_PACKETS);
+#ifdef CONFIG_SCSC_ENHANCED_PACKET_STATS
+ peer->sinfo.filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
+#endif
+#endif
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_get_req failed(result:0x%4x)\n", r);
+ }
+
+ kfree(mibrsp.data);
+ kfree(values);
+ return r;
+}
+
+int slsi_mlme_connect_scan(struct slsi_dev *sdev, struct net_device *dev,
+ u32 n_ssids, struct cfg80211_ssid *ssids, struct ieee80211_channel *channel)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int r = 0;
+ struct ieee80211_channel **scan_channels = NULL;
+ struct ieee80211_channel **add_scan_channels;
+ int n_channels = 0;
+ struct sk_buff *scan;
+ struct cfg80211_scan_info info = {.aborted = true};
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "stop on-going Scan\n");
+ (void)slsi_mlme_del_scan(sdev, dev, ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID, false);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, &info);
+#else
+ cfg80211_scan_done(ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req, true);
+#endif
+
+ ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req = NULL;
+ }
+
+ if (!channel) {
+ enum nl80211_band band;
+ struct wiphy *wiphy = sdev->wiphy;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+#else
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+#endif
+ if (!wiphy->bands[band])
+ continue;
+ n_channels += wiphy->bands[band]->n_channels;
+ }
+
+ WARN_ON(n_channels == 0);
+ scan_channels = kmalloc_array((size_t)n_channels, sizeof(*scan_channels), GFP_KERNEL);
+ if (!scan_channels) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return -ENOMEM;
+ }
+ n_channels = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+#else
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+#endif
+ int j;
+
+ if (!wiphy->bands[band])
+ continue;
+ for (j = 0; j < wiphy->bands[band]->n_channels; j++)
+ if (!(wiphy->bands[band]->channels[j].flags & IEEE80211_CHAN_DISABLED)) {
+ scan_channels[n_channels] = &wiphy->bands[band]->channels[j];
+ n_channels++;
+ }
+ }
+ add_scan_channels = scan_channels;
+ } else {
+ n_channels = 1;
+ add_scan_channels = &channel;
+ }
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = true;
+ r = slsi_mlme_add_scan(sdev,
+ dev,
+ FAPI_SCANTYPE_FULL_SCAN,
+ FAPI_REPORTMODE_REAL_TIME,
+ n_ssids,
+ ssids,
+ n_channels,
+ add_scan_channels,
+ NULL,
+ ndev_vif->probe_req_ies,
+ ndev_vif->probe_req_ie_len,
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ while (scan) {
+ slsi_rx_scan_pass_to_cfg80211(sdev, dev, scan);
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+
+ kfree(scan_channels);
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = false;
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+/**
+ * The powermgt_lock mutex is to ensure atomic update of the power management state.
+ */
+DEFINE_MUTEX(powermgt_lock);
+/**
+ * The slsi_mlme_powermgt_unlocked() must be called from a context that is synchronised
+ * with ndev_vif. if called without the ndev_vif mutex already taken, other mechanisms
+ * must ensure that ndev_vif will exist for the duration of the function.
+ */
+int slsi_mlme_powermgt_unlocked(struct slsi_dev *sdev, struct net_device *dev, u16 power_mode)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ mutex_lock(&powermgt_lock);
+
+ if (WARN_ON(!ndev_vif->activated)) {
+ mutex_unlock(&powermgt_lock);
+ return -EINVAL;
+ }
+
+ if (ndev_vif->power_mode == power_mode) {
+ mutex_unlock(&powermgt_lock);
+ SLSI_NET_DBG3(dev, SLSI_MLME, "power management mode is same as requested. No changes done\n");
+ return 0;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_powermgt_req(vif:%d, power_management_mode:%d)\n", ndev_vif->ifnum, power_mode);
+ req = fapi_alloc(mlme_powermgt_req, MLME_POWERMGT_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ mutex_unlock(&powermgt_lock);
+ return -ENOMEM;
+ }
+ fapi_set_u16(req, u.mlme_powermgt_req.power_management_mode, power_mode);
+
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_POWERMGT_CFM);
+ if (!rx) {
+ mutex_unlock(&powermgt_lock);
+ return -EIO;
+ }
+
+ if (fapi_get_u16(rx, u.mlme_powermgt_cfm.result_code) == FAPI_RESULTCODE_SUCCESS) {
+ ndev_vif->power_mode = power_mode;
+ } else {
+ SLSI_NET_ERR(dev, "mlme_powermgt_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_powermgt_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(rx);
+ mutex_unlock(&powermgt_lock);
+ return r;
+}
+
+int slsi_mlme_powermgt(struct slsi_dev *sdev, struct net_device *dev, u16 power_mode)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ return slsi_mlme_powermgt_unlocked(sdev, dev, power_mode);
+}
+
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+int slsi_mlme_synchronised_response(struct slsi_dev *sdev, struct net_device *dev,
+ struct cfg80211_external_auth_params *params)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ if (ndev_vif->activated) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "MLME_SYNCHRONISED_RES\n");
+
+ req = fapi_alloc(mlme_synchronised_res, MLME_SYNCHRONISED_RES, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_synchronised_res.result_code, params->status);
+ fapi_set_memcpy(req, u.mlme_synchronised_res.bssid, params->bssid);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_synchronised_response(vif:%d) status:%d\n",
+ ndev_vif->ifnum, params->status);
+ cfm = slsi_mlme_req_no_cfm(sdev, dev, req);
+ if (cfm)
+ SLSI_NET_ERR(dev, "Received cfm for MLME_SYNCHRONISED_RES\n");
+ } else
+ SLSI_NET_DBG1(dev, SLSI_MLME, "vif is not active");
+
+ return 0;
+}
+#endif
+
+int slsi_mlme_register_action_frame(struct slsi_dev *sdev, struct net_device *dev, u32 af_bitmap_active, u32 af_bitmap_suspended)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ req = fapi_alloc(mlme_register_action_frame_req, MLME_REGISTER_ACTION_FRAME_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u32(req, u.mlme_register_action_frame_req.action_frame_category_bitmap_active, af_bitmap_active);
+ fapi_set_u32(req, u.mlme_register_action_frame_req.action_frame_category_bitmap_suspended, af_bitmap_suspended);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_register_action_frame(vif:%d, active:%d, suspended:%d)\n", ndev_vif->ifnum, af_bitmap_active, af_bitmap_suspended);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_REGISTER_ACTION_FRAME_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_register_action_frame_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_register_action_frame_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_register_action_frame_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_channel_switch(struct slsi_dev *sdev, struct net_device *dev, u16 center_freq, u16 chan_info)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_channel_switch_req(vif:%d, freq: %d, channel info: 0x%x)\n", ndev_vif->ifnum, center_freq, chan_info);
+ req = fapi_alloc(mlme_channel_switch_req, MLME_CHANNEL_SWITCH_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_channel_switch_req.channel_frequency, SLSI_FREQ_HOST_TO_FW(center_freq));
+ fapi_set_u16(req, u.mlme_channel_switch_req.channel_information, chan_info);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_CHANNEL_SWITCH_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_channel_switch_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_channel_switch_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_channel_switch_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_add_info_elements(struct slsi_dev *sdev, struct net_device *dev, u16 purpose, const u8 *ies, const u16 ies_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u8 *p;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ req = fapi_alloc(mlme_add_info_elements_req, MLME_ADD_INFO_ELEMENTS_REQ, ndev_vif->ifnum, ies_len);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_add_info_elements_req.purpose, purpose);
+
+ if (ies_len != 0) {
+ p = fapi_append_data(req, ies, ies_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_add_info_elements_req(vif:%d, ies_len:%d)\n", ndev_vif->ifnum, ies_len);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_ADD_INFO_ELEMENTS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_add_info_elements_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_add_info_elements_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_add_info_elements_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_send_frame_data(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 msg_type,
+ u16 host_tag, u32 dwell_time, u32 period)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 len = skb->len;
+ struct sk_buff *original_skb = 0;
+ int ret;
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ int is_enhanced_arp_request_frame = 0;
+#endif
+
+ /* don't let ARP frames exhaust all the control slots */
+ if (msg_type == FAPI_MESSAGETYPE_ARP) {
+ int free_slots = 0;
+
+ free_slots = hip4_free_ctrl_slots_count(&sdev->hip4_inst);
+
+ if (free_slots < 0) {
+ SLSI_DBG1(sdev, SLSI_MLME, "drop ARP (error in getting free slot count)\n");
+ return free_slots;
+ }
+
+ if (free_slots < SLSI_MLME_ARP_DROP_FREE_SLOTS_COUNT) {
+ SLSI_DBG1(sdev, SLSI_MLME, "drop ARP (running out of Control slots:%d)\n", free_slots);
+ slsi_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (ndev_vif->enhanced_arp_detect_enabled && (msg_type == FAPI_MESSAGETYPE_ARP)) {
+ u8 *frame = skb->data + sizeof(struct ethhdr);
+ u16 arp_opcode = frame[SLSI_ARP_OPCODE_OFFSET] << 8 | frame[SLSI_ARP_OPCODE_OFFSET + 1];
+
+ if ((arp_opcode == SLSI_ARP_REQUEST_OPCODE) &&
+ !SLSI_IS_GRATUITOUS_ARP(frame) &&
+ !memcmp(&frame[SLSI_ARP_DEST_IP_ADDR_OFFSET], &ndev_vif->target_ip_addr, 4))
+ is_enhanced_arp_request_frame = 1;
+ }
+#endif
+ }
+
+ /* check for headroom to push signal header; if not available, re-alloc headroom */
+ if (skb_headroom(skb) < (fapi_sig_size(mlme_send_frame_req))) {
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = slsi_skb_realloc_headroom(skb, fapi_sig_size(mlme_send_frame_req));
+ if (!skb2)
+ return -EINVAL;
+ original_skb = skb;
+ skb = skb2;
+ }
+ len = skb->len;
+ (void)skb_push(skb, (fapi_sig_size(mlme_send_frame_req)));
+
+ /* fill the signal header */
+ fapi_set_u16(skb, id, MLME_SEND_FRAME_REQ);
+ fapi_set_u16(skb, receiver_pid, 0);
+ fapi_set_u16(skb, sender_pid, SLSI_TX_PROCESS_ID_MIN);
+ fapi_set_u16(skb, fw_reference, 0);
+
+ /* fill in signal parameters */
+ fapi_set_u16(skb, u.mlme_send_frame_req.vif, ndev_vif->ifnum);
+
+ if (host_tag == 0)
+ host_tag = slsi_tx_mgmt_host_tag(sdev);
+
+ fapi_set_u16(skb, u.mlme_send_frame_req.host_tag, host_tag);
+ fapi_set_u16(skb, u.mlme_send_frame_req.data_unit_descriptor, FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME);
+ fapi_set_u16(skb, u.mlme_send_frame_req.message_type, msg_type);
+ fapi_set_u16(skb, u.mlme_send_frame_req.channel_frequency, 0);
+ fapi_set_u32(skb, u.mlme_send_frame_req.dwell_time, dwell_time);
+ fapi_set_u32(skb, u.mlme_send_frame_req.period, period);
+
+ SLSI_DBG2(sdev, SLSI_MLME, "mlme_send_frame_req(vif:%d, message_type:%d, host_tag:%d)\n", ndev_vif->ifnum, msg_type, host_tag);
+ /* slsi_tx_control frees the skb. Do not use it after this call. */
+ ret = slsi_tx_control(sdev, dev, skb);
+ if (ret != 0) {
+ SLSI_WARN(sdev, "failed to send MLME signal(err=%d)\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (is_enhanced_arp_request_frame) {
+ int i;
+
+ ndev_vif->enhanced_arp_stats.arp_req_count_to_lower_mac++;
+ for (i = 0; i < SLSI_MAX_ARP_SEND_FRAME; i++) {
+ if (!ndev_vif->enhanced_arp_host_tag[i]) {
+ ndev_vif->enhanced_arp_host_tag[i] = host_tag;
+ break;
+ }
+ }
+ }
+#endif
+
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+
+ /* as the frame is queued to HIP for transmission, store the host tag of the frames
+ * to validate the transmission status in MLME-Frame-Transmission.indication.
+ * Take necessary action based on the type of frame and status of it's transmission
+ */
+ if (msg_type == FAPI_MESSAGETYPE_EAPOL_KEY_M4) {
+ ndev_vif->sta.m4_host_tag = host_tag;
+ SLSI_NET_DBG1(dev, SLSI_MLME, "EAPOL-Key M4 frame (host_tag:%d)\n", ndev_vif->sta.m4_host_tag);
+ } else if (msg_type == FAPI_MESSAGETYPE_EAP_MESSAGE) {
+ if (!ndev_vif->sta.is_wps && (ndev_vif->iftype == NL80211_IFTYPE_STATION)) {
+ /* In case of non-P2P station and Enterprise security store the host_tag.
+ * If transmission of such frame fails, inform supplicant to disconnect.
+ */
+ ndev_vif->sta.eap_hosttag = host_tag;
+ SLSI_NET_DBG1(dev, SLSI_MLME, "EAP frame (host_tag:%d)\n", ndev_vif->sta.eap_hosttag);
+ }
+ }
+ return ret;
+}
+
+int slsi_mlme_send_frame_mgmt(struct slsi_dev *sdev, struct net_device *dev, const u8 *frame, int frame_len,
+ u16 data_desc, u16 msg_type, u16 host_tag, u16 freq, u32 dwell_time, u32 period)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u8 *p;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ req = fapi_alloc(mlme_send_frame_req, MLME_SEND_FRAME_REQ, ndev_vif->ifnum, frame_len);
+ if (!req) {
+ SLSI_WARN(sdev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_send_frame_req.host_tag, host_tag);
+ fapi_set_u16(req, u.mlme_send_frame_req.data_unit_descriptor, data_desc);
+ fapi_set_u16(req, u.mlme_send_frame_req.message_type, msg_type);
+ fapi_set_u16(req, u.mlme_send_frame_req.channel_frequency, freq);
+ fapi_set_u32(req, u.mlme_send_frame_req.dwell_time, dwell_time);
+ fapi_set_u32(req, u.mlme_send_frame_req.period, period);
+
+ p = fapi_append_data(req, frame, frame_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ SLSI_WARN(sdev, "failed to append data\n");
+ return -EINVAL;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_send_frame_req(vif:%d, message_type:%d,host_tag:%d)\n", ndev_vif->ifnum, msg_type, host_tag);
+ slsi_debug_frame(sdev, dev, req, "TX");
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SEND_FRAME_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_send_frame_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_send_frame_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_send_frame_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_reset_dwell_time(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_reset_dwell_time_req (vif:%d)\n", ndev_vif->ifnum);
+
+ req = fapi_alloc(mlme_reset_dwell_time_req, MLME_RESET_DWELL_TIME_REQ, ndev_vif->ifnum, 0);
+
+ if (!req)
+ return -ENOMEM;
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_RESET_DWELL_TIME_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_reset_dwell_time_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_reset_dwell_time_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_reset_dwell_time_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_set_packet_filter(struct slsi_dev *sdev, struct net_device *dev,
+ int pkt_filter_len,
+ u8 num_filters,
+ struct slsi_mlme_pkt_filter_elem *pkt_filter_elems)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0, i = 0, j = 0;
+ u8 *p;
+ u8 index = 0;
+
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ if (WARN_ON(!num_filters))
+ return -EINVAL;
+
+ req = fapi_alloc(mlme_set_packet_filter_req, MLME_SET_PACKET_FILTER_REQ, ndev_vif->ifnum, pkt_filter_len);
+ if (!req)
+ return -ENOMEM;
+
+ p = fapi_append_data(req, NULL, pkt_filter_len);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_filters; i++) {
+ struct slsi_mlme_pkt_filter_elem pkt_filter_elem = pkt_filter_elems[i];
+
+ memcpy(&p[index], pkt_filter_elem.header, SLSI_PKT_FILTER_ELEM_HDR_LEN);
+ index += SLSI_PKT_FILTER_ELEM_HDR_LEN;
+
+ for (j = 0; j < pkt_filter_elem.num_pattern_desc; j++) {
+ p[index++] = pkt_filter_elem.pattern_desc[j].offset;
+ p[index++] = pkt_filter_elem.pattern_desc[j].mask_length;
+ memcpy(&p[index], pkt_filter_elem.pattern_desc[j].mask, pkt_filter_elem.pattern_desc[j].mask_length);
+ index += pkt_filter_elem.pattern_desc[j].mask_length;
+ memcpy(&p[index], pkt_filter_elem.pattern_desc[j].pattern, pkt_filter_elem.pattern_desc[j].mask_length);
+ index += pkt_filter_elem.pattern_desc[j].mask_length;
+ }
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_packet_filter_req(vif:%d, num_filters:%d)\n", ndev_vif->ifnum, num_filters);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_PACKET_FILTER_CFM);
+ if (!cfm)
+ return -EIO;
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_set_pmk(struct slsi_dev *sdev, struct net_device *dev, const u8 *pmk, u16 pmklen)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+ if (pmk)
+ req = fapi_alloc(mlme_set_pmk_req, MLME_SET_PMK_REQ, ndev_vif->ifnum, pmklen);
+ else
+ req = fapi_alloc(mlme_set_pmk_req, MLME_SET_PMK_REQ, ndev_vif->ifnum, 0);
+
+ if (!req)
+ return -ENOMEM;
+ if (pmk)
+ fapi_append_data(req, pmk, pmklen);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_pmk_req(vif:%u, pmklen:%d)\n", ndev_vif->ifnum, pmklen);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_PMK_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_pmk_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_pmk_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_pmk_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_roam(struct slsi_dev *sdev, struct net_device *dev, const u8 *bssid, u16 freq)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_roam_req(vif:%u, bssid:%pM, freq:%d)\n", ndev_vif->ifnum, bssid, freq);
+ req = fapi_alloc(mlme_roam_req, MLME_ROAM_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+ fapi_set_memcpy(req, u.mlme_roam_req.bssid, bssid);
+ fapi_set_u16(req, u.mlme_roam_req.channel_frequency, SLSI_FREQ_HOST_TO_FW(freq));
+ atomic_set(&ndev_vif->sta.drop_roamed_ind, 1);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_ROAM_CFM);
+ atomic_set(&ndev_vif->sta.drop_roamed_ind, 0);
+ if (!cfm)
+ return -EIO;
+ if (fapi_get_u16(cfm, u.mlme_roam_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_roam_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_roam_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ ndev_vif->sta.roam_in_progress = true;
+ return r;
+}
+
+int slsi_mlme_set_cached_channels(struct slsi_dev *sdev, struct net_device *dev, u32 channels_count, u8 *channels)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ u8 *p;
+ int r = 0;
+ size_t channel_ie = 0;
+ int i;
+ const u8 channels_list_ie_header[] = {
+ 0xDD, /* Element ID: Vendor Specific */
+ 0x05, /* Length: actual length will be updated later */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x01, /* OUI Type: Scan parameters */
+ 0x02 /* OUI Subtype: channel list */
+ };
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ if (channels_count) {
+ channel_ie += 6 + (channels_count * SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ req = fapi_alloc(mlme_set_cached_channels_req, MLME_SET_CACHED_CHANNELS_REQ, ndev_vif->ifnum, channel_ie);
+ } else {
+ req = fapi_alloc(mlme_set_cached_channels_req, MLME_SET_CACHED_CHANNELS_REQ, ndev_vif->ifnum, 0);
+ }
+ if (!req)
+ return -ENOMEM;
+
+ if (channels_count) {
+ u16 freq_fw_unit;
+ u8 *channels_list_ie = fapi_append_data(req, channels_list_ie_header, sizeof(channels_list_ie_header));
+
+ if (!channels_list_ie) {
+ SLSI_WARN(sdev, "channel list IE append failed\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < channels_count; i++) {
+ SLSI_NET_DBG3(dev, SLSI_MLME, "request for channels %d\n", channels[i]);
+ p = fapi_append_data(req, NULL, SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE);
+ if (!p) {
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ freq_fw_unit = 2 * ieee80211_channel_to_frequency(channels[i], (channels[i] <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ);
+ freq_fw_unit = cpu_to_le16(freq_fw_unit);
+ memcpy(p, &freq_fw_unit, sizeof(freq_fw_unit));
+
+ p[2] = FAPI_SCANPOLICY_2_4GHZ | FAPI_SCANPOLICY_5GHZ;
+
+ channels_list_ie[1] += SLSI_SCAN_CHANNEL_DESCRIPTOR_SIZE;
+ }
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_cached_channels_req(vif:%d)\n", ndev_vif->ifnum);
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_CACHED_CHANNELS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_cached_channels_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_cached_channels_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_cached_channels_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_mlme_set_acl(struct slsi_dev *sdev, struct net_device *dev, u16 ifnum, const struct cfg80211_acl_data *params)
+{
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ size_t mac_acl_size = 0;
+ int i, r = 0;
+
+ mac_acl_size = sizeof((params->mac_addrs[0])) * (params->n_acl_entries);
+ req = fapi_alloc(mlme_set_acl_req, MLME_SET_ACL_REQ, ifnum, mac_acl_size);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+ fapi_set_u16(req, u.mlme_set_acl_req.entries, params->n_acl_entries);
+ fapi_set_u16(req, u.mlme_set_acl_req.acl_policy, params->acl_policy);
+
+ for (i = 0; i < params->n_acl_entries; i++)
+ fapi_append_data(req, params->mac_addrs[i].addr, sizeof((params->mac_addrs[i])));
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_acl_req(vif:%u, n_acl_entries:%d)\n", ifnum, params->n_acl_entries);
+
+ if (ifnum)
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_ACL_CFM);
+ else
+ cfm = slsi_mlme_req_cfm(sdev, NULL, req, MLME_SET_ACL_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_acl_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_acl_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_acl_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+#endif
+
+int slsi_mlme_set_traffic_parameters(struct slsi_dev *sdev, struct net_device *dev, u16 user_priority, u16 medium_time, u16 minimun_data_rate, u8 *mac)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION && ndev_vif->iftype == NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ req = fapi_alloc(mlme_set_traffic_parameters_req, MLME_SET_TRAFFIC_PARAMETERS_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_traffic_parameters_req.user_priority, user_priority);
+ fapi_set_u16(req, u.mlme_set_traffic_parameters_req.medium_time, medium_time);
+ fapi_set_u16(req, u.mlme_set_traffic_parameters_req.minimum_data_rate, minimun_data_rate);
+
+ if (mac)
+ fapi_set_memcpy(req, u.mlme_set_traffic_parameters_req.peer_address, mac);
+ else
+ fapi_set_memset(req, u.mlme_set_traffic_parameters_req.peer_address, 0);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_set_traffic_parameters_req(vif:%u, user_priority:%d, medium_time:%d)\n", ndev_vif->ifnum, user_priority, medium_time);
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_TRAFFIC_PARAMETERS_CFM);
+ if (!rx)
+ return -EIO;
+
+ if (fapi_get_u16(rx, u.mlme_set_traffic_parameters_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_traffic_parameters_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_set_traffic_parameters_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int slsi_mlme_del_traffic_parameters(struct slsi_dev *sdev, struct net_device *dev, u16 user_priority)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION && ndev_vif->iftype == NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ req = fapi_alloc(mlme_del_traffic_parameters_req, MLME_DEL_TRAFFIC_PARAMETERS_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_del_traffic_parameters_req.user_priority, user_priority);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_del_traffic_parameters_req(vif:%u, user_priority:%d)\n", ndev_vif->ifnum, user_priority);
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_DEL_TRAFFIC_PARAMETERS_CFM);
+ if (!rx)
+ return -EIO;
+
+ if (fapi_get_u16(rx, u.mlme_del_traffic_parameters_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_del_traffic_parameters_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_del_traffic_parameters_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int slsi_mlme_set_ext_capab(struct slsi_dev *sdev, struct net_device *dev, struct slsi_mib_value *mib_val)
+{
+ struct slsi_mib_data mib_data = { 0, NULL };
+ int error = 0;
+
+ error = slsi_mib_encode_octet(&mib_data, SLSI_PSID_UNIFI_EXTENDED_CAPABILITIES, mib_val->u.octetValue.dataLength, mib_val->u.octetValue.data, 0);
+ if (error != SLSI_MIB_STATUS_SUCCESS) {
+ error = -ENOMEM;
+ goto exit;
+ }
+
+ if (WARN_ON(mib_data.dataLength == 0)) {
+ error = -EINVAL;
+ goto exit;
+ }
+
+ error = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+ kfree(mib_data.data);
+
+ if (!error)
+ return 0;
+
+exit:
+ SLSI_ERR(sdev, "Error in setting ext capab. error = %d\n", error);
+ return error;
+}
+
+int slsi_mlme_set_hs2_ext_cap(struct slsi_dev *sdev, struct net_device *dev, const u8 *ies, int ie_len)
+{
+ struct slsi_mib_entry mib_entry;
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ const u8 *ext_capab_ie;
+ int r = 0;
+ int rx_length = 0;
+ int len = 0;
+
+ slsi_mib_encode_get(&mibreq, SLSI_PSID_UNIFI_EXTENDED_CAPABILITIES, 0);
+
+ /* 5 (header) + 9 (data) + 2 (mlme expects 16 (??))*/
+ mibrsp.dataLength = 16;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Failed to alloc for Mib response\n");
+ kfree(mibreq.data);
+ return -ENOMEM;
+ }
+
+ r = slsi_mlme_get(sdev, NULL, mibreq.data, mibreq.dataLength,
+ mibrsp.data, mibrsp.dataLength, &rx_length);
+ kfree(mibreq.data);
+
+ if (r == 0) {
+ mibrsp.dataLength = rx_length;
+ len = slsi_mib_decode(&mibrsp, &mib_entry);
+ if (len == 0) {
+ SLSI_ERR(sdev, "Mib decode error\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Mib read failed (error: %d)\n", r);
+ goto exit;
+ }
+
+ ext_capab_ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ies, ie_len);
+
+ if (ext_capab_ie) {
+ u8 ext_capab_ie_len = ext_capab_ie[1];
+
+ ext_capab_ie += 2; /* skip the EID and length*/
+
+ /*BSS Transition bit is bit 19 ,ie length must be >= 3 */
+ if ((ext_capab_ie_len >= 3) && (ext_capab_ie[2] & SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED))
+ mib_entry.value.u.octetValue.data[2] |= SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED;
+ else
+ mib_entry.value.u.octetValue.data[2] &= ~SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED;
+
+ /*interworking bit is bit 31 ,ie length must be >= 4 */
+ if ((ext_capab_ie_len >= 4) && (ext_capab_ie[3] & SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED))
+ mib_entry.value.u.octetValue.data[3] |= SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED;
+ else
+ mib_entry.value.u.octetValue.data[3] &= ~SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED;
+
+ /*QoS MAP is bit 32 ,ie length must be >= 5 */
+ if ((ext_capab_ie_len >= 5) && (ext_capab_ie[4] & SLSI_WLAN_EXT_CAPA4_QOS_MAP_ENABLED))
+ mib_entry.value.u.octetValue.data[4] |= SLSI_WLAN_EXT_CAPA4_QOS_MAP_ENABLED;
+ else
+ mib_entry.value.u.octetValue.data[4] &= ~SLSI_WLAN_EXT_CAPA4_QOS_MAP_ENABLED;
+
+ /*WNM- Notification bit is bit 46 ,ie length must be >= 6 */
+ if ((ext_capab_ie_len >= 6) && (ext_capab_ie[5] & SLSI_WLAN_EXT_CAPA5_WNM_NOTIF_ENABLED))
+ mib_entry.value.u.octetValue.data[5] |= SLSI_WLAN_EXT_CAPA5_WNM_NOTIF_ENABLED;
+ else
+ mib_entry.value.u.octetValue.data[5] &= ~SLSI_WLAN_EXT_CAPA5_WNM_NOTIF_ENABLED;
+ } else {
+ mib_entry.value.u.octetValue.data[2] &= ~SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED;
+ mib_entry.value.u.octetValue.data[3] &= ~SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED;
+ mib_entry.value.u.octetValue.data[4] &= ~SLSI_WLAN_EXT_CAPA4_QOS_MAP_ENABLED;
+ mib_entry.value.u.octetValue.data[5] &= ~SLSI_WLAN_EXT_CAPA5_WNM_NOTIF_ENABLED;
+ }
+
+ r = slsi_mlme_set_ext_capab(sdev, dev, &mib_entry.value);
+exit:
+ kfree(mibrsp.data);
+ return r;
+}
+
+int slsi_mlme_tdls_peer_resp(struct slsi_dev *sdev, struct net_device *dev, u16 pid, u16 tdls_event)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ req = fapi_alloc(mlme_tdls_peer_res, MLME_TDLS_PEER_RES, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_tdls_peer_res.peer_index, pid);
+ fapi_set_u16(req, u.mlme_tdls_peer_res.tdls_event, tdls_event);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_tdls_peer_res(vif:%d)\n", ndev_vif->ifnum);
+ cfm = slsi_mlme_req_no_cfm(sdev, dev, req);
+ WARN_ON(cfm);
+
+ return 0;
+}
+
+int slsi_mlme_tdls_action(struct slsi_dev *sdev, struct net_device *dev, const u8 *peer, int action, u16 center_freq, u16 chan_info)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_tdls_action_req(action:%u)\n", action);
+ req = fapi_alloc(mlme_tdls_action_req, MLME_TDLS_ACTION_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ center_freq = SLSI_FREQ_HOST_TO_FW(center_freq);
+
+ fapi_set_memcpy(req, u.mlme_tdls_action_req.peer_sta_address, peer);
+ fapi_set_u16(req, u.mlme_tdls_action_req.tdls_action, action);
+ fapi_set_u16(req, u.mlme_tdls_action_req.channel_frequency, center_freq);
+ fapi_set_u16(req, u.mlme_tdls_action_req.channel_information, chan_info);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_TDLS_ACTION_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_tdls_action_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_tdls_action_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_tdls_action_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+
+ return r;
+}
+
+int slsi_mlme_reassociate(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ if (WARN_ON(!ndev_vif->activated))
+ return -EINVAL;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_reassoc_req(vif:%u)\n", ndev_vif->ifnum);
+ req = fapi_alloc(mlme_reassociate_req, MLME_REASSOCIATE_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_REASSOCIATE_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_reassociate_cfm.result_code) == FAPI_RESULTCODE_HOST_REQUEST_SUCCESS) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_reassoc_cfm(result:0x%04x)\n",
+ fapi_get_u16(cfm, u.mlme_reassociate_cfm.result_code));
+ } else {
+ SLSI_NET_ERR(dev, "mlme_reassoc_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_reassociate_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+void slsi_mlme_reassociate_resp(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip sending signal, WlanLite FW does not support MLME_REASSOCIATE_RESP\n");
+ return;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_reassociate_resp(vif:%d)\n", ndev_vif->ifnum);
+ req = fapi_alloc(mlme_reassociate_res, MLME_REASSOCIATE_RES, ndev_vif->ifnum, 0);
+ if (!req)
+ return;
+
+ cfm = slsi_mlme_req_no_cfm(sdev, dev, req);
+ WARN_ON(cfm);
+}
+
+int slsi_mlme_add_range_req(struct slsi_dev *sdev, u8 count,
+ struct slsi_rtt_config *nl_rtt_params, u16 rtt_id, u16 vif_idx, u8 *source_addr)
+{
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0, i;
+ size_t alloc_data_size = 0;
+ u8 fapi_ie_generic[] = { 0xdd, 0x1c, 0x00, 0x16, 0x32, 0x0a, 0x01 };
+ /* calculate data size */
+ alloc_data_size += count * (fapi_ie_generic[1] + 2);
+
+ req = fapi_alloc(mlme_add_range_req, MLME_ADD_RANGE_REQ, 0, alloc_data_size);
+ if (!req) {
+ SLSI_ERR(sdev, "failed to alloc %zd\n", alloc_data_size);
+ return -ENOMEM;
+ }
+ SLSI_DBG2(sdev, SLSI_MLME, "count:%d allocated data size: %d, source_addr:%pM\n",
+ count, alloc_data_size, source_addr);
+ /*fill the data */
+ fapi_set_u16(req, u.mlme_add_range_req.vif, vif_idx);
+ fapi_set_u16(req, u.mlme_add_range_req.rtt_id, rtt_id);
+ fapi_set_memcpy(req, u.mlme_add_range_req.device_address, source_addr);
+ for (i = 0; i < count; i++) {
+ fapi_append_data(req, fapi_ie_generic, sizeof(fapi_ie_generic));
+ fapi_append_data(req, nl_rtt_params[i].peer_addr, ETH_ALEN);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].type, 2);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].channel_freq, 2);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].burst_period, 1);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].num_burst, 1);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].num_frames_per_burst, 1);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].num_retries_per_ftmr, 1);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].burst_duration, 1);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].preamble, 2);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].bw, 2);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].LCI_request, 2);
+ fapi_append_data(req, (u8 *)&nl_rtt_params[i].LCR_request, 2);
+ }
+ rx = slsi_mlme_req_cfm(sdev, NULL, req, MLME_ADD_RANGE_CFM);
+ SLSI_DBG2(sdev, SLSI_MLME, "(After mlme req cfm for rtt config)\n");
+ if (!rx)
+ return -EIO;
+ if (fapi_get_u16(rx, u.mlme_add_range_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_ERR(sdev, "mlme_add_range_cfm(ERROR:0x%04x)",
+ fapi_get_u16(rx, u.mlme_add_range_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+bool slsi_del_range_cfm_validate(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *cfm)
+{
+ int result = fapi_get_u16(cfm, u.mlme_del_range_cfm.result_code);
+ bool r = false;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (WARN_ON(!dev))
+ goto exit;
+
+ if (result == FAPI_RESULTCODE_SUCCESS)
+ r = true;
+ else
+ SLSI_NET_ERR(dev, "mlme_del_range_cfm(result:0x%04x) ERROR\n", result);
+
+exit:
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_del_range_req(struct slsi_dev *sdev, struct net_device *dev, u16 count,
+ u8 *addr, u16 rtt_id)
+{
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ u16 *rtt_vif_idx = sdev->rtt_vif;
+ int r = 0, i;
+ size_t alloc_data_size = 0;
+
+ /* calculate data size-->2 bytes for vif */
+ alloc_data_size += count * sizeof(ETH_ALEN);
+ /* Alloc data size */
+ req = fapi_alloc(mlme_del_range_req, MLME_DEL_RANGE_REQ, 0, alloc_data_size);
+ if (!req) {
+ SLSI_ERR(sdev, "failed to alloc %zd\n", alloc_data_size);
+ return -ENOMEM;
+ }
+ /*fill the data */
+ fapi_set_u16(req, u.mlme_del_range_req.vif, rtt_vif_idx[rtt_id]);
+ fapi_set_u16(req, u.mlme_del_range_req.rtt_id, rtt_id);
+ fapi_set_u16(req, u.mlme_del_range_req.entries, count);
+ for (i = 0; i < count; i++)
+ fapi_append_data(req, &addr[i * ETH_ALEN], ETH_ALEN);
+
+ rx = slsi_mlme_req_cfm_ind(sdev, dev, req, MLME_DEL_RANGE_CFM, MLME_RANGE_IND, slsi_del_range_cfm_validate);
+ sdev->rtt_vif[rtt_id] = -1;
+ if (!rx) {
+ SLSI_NET_ERR(dev, "mlme_del_range_cfm() ERROR\n");
+ slsi_kfree_skb(rx);
+ return -EINVAL;
+ }
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+#define SLSI_FAPI_EPNO_NETWORK_MIN_SIZE (3)
+int slsi_mlme_set_pno_list(struct slsi_dev *sdev, int count,
+ struct slsi_epno_param *epno_param, struct slsi_epno_hs2_param *epno_hs2_param)
+{
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+ size_t alloc_data_size = 0;
+ u32 i, j;
+ u8 fapi_ie_generic[] = { 0xdd, 0, 0x00, 0x16, 0x32, 0x01, 0x00 };
+ u8 *buff_ptr, *ie_start_pos;
+
+ if (count) {
+ /* calculate data size */
+ if (epno_param) {
+ alloc_data_size += sizeof(fapi_ie_generic) + SLSI_FAPI_EPNO_NETWORK_MIN_SIZE * count + 11;
+ for (i = 0; i < count; i++)
+ alloc_data_size += epno_param->epno_ssid[i].ssid_len;
+ } else if (epno_hs2_param) {
+ for (i = 0; i < count; i++) {
+ /* fapi_ie_generic + Network_block_ID(1) + Realm_length(1) + realm_data(x)
+ * + Roaming_Consortium_Count(1) + Roaming Consortium data(16 * 8) +
+ * PLMN length(1) + PLMN data(6)
+ */
+ if (strlen(epno_hs2_param->realm))
+ alloc_data_size += sizeof(fapi_ie_generic) + 1 + 1 + (strlen(epno_hs2_param->realm) + 1)
+ + 1 + 16 * 8 + 1 + 6;
+ else
+ alloc_data_size += sizeof(fapi_ie_generic) + 1 + 1 + 0
+ + 1 + 16 * 8 + 1 + 6;
+ }
+ }
+ }
+
+ /* Alloc data size */
+ req = fapi_alloc(mlme_set_pno_list_req, MLME_SET_PNO_LIST_REQ, 0, alloc_data_size);
+ if (!req) {
+ SLSI_ERR(sdev, "failed to alloc %zd\n", alloc_data_size);
+ return -ENOMEM;
+ }
+ if (count) {
+ /* Fill data */
+ if (epno_param) {
+ fapi_ie_generic[1] = alloc_data_size - 2;
+ fapi_ie_generic[6] = 9; /* OUI */
+ fapi_append_data(req, fapi_ie_generic, sizeof(fapi_ie_generic));
+ fapi_append_data(req, (u8 *)epno_param, (sizeof(*epno_param) - 1));
+ for (i = 0; i < count; i++) {
+ fapi_append_data(req, (u8 *)&epno_param->epno_ssid[i].flags, 2);
+ fapi_append_data(req, (u8 *)&epno_param->epno_ssid[i].ssid_len, 1);
+ fapi_append_data(req, (u8 *)epno_param->epno_ssid[i].ssid,
+ epno_param->epno_ssid[i].ssid_len);
+ }
+ } else if (epno_hs2_param) {
+ u8 realm_length;
+ u8 roaming_consortium_count = 16;
+ u8 plmn_length = 6;
+ u8 plmn_digit[6];
+
+ fapi_ie_generic[6] = 0x10; /* OUI subtype = Passpoint Network */
+ for (i = 0; i < count; i++) {
+ buff_ptr = fapi_append_data(req, fapi_ie_generic, sizeof(fapi_ie_generic));
+ if (!buff_ptr) {
+ SLSI_ERR(sdev, "failed append data\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+ ie_start_pos = buff_ptr;
+
+ fapi_append_data(req, (u8 *)&epno_hs2_param[i].id, 1);
+
+ realm_length = strlen(epno_hs2_param[i].realm);
+ if (realm_length) {
+ realm_length++;
+ fapi_append_data(req, &realm_length, 1);
+ fapi_append_data(req, epno_hs2_param[i].realm, realm_length);
+ } else {
+ fapi_append_data(req, &realm_length, 1);
+ }
+
+ fapi_append_data(req, &roaming_consortium_count, 1);
+ fapi_append_data(req, (u8 *)&epno_hs2_param[i].roaming_consortium_ids, 16 * 8);
+
+ fapi_append_data(req, &plmn_length, 1);
+ for (j = 0; j < 3; j++) {
+ plmn_digit[j * 2] = epno_hs2_param[i].plmn[i] & 0x0F;
+ plmn_digit[(j * 2) + 1] = epno_hs2_param[i].plmn[i] & 0xF0 >> 4;
+ }
+ buff_ptr = fapi_append_data(req, plmn_digit, sizeof(plmn_digit));
+ if (!buff_ptr) {
+ SLSI_ERR(sdev, "failed append data\n");
+ slsi_kfree_skb(req);
+ return -EINVAL;
+ }
+
+ buff_ptr += sizeof(plmn_digit);
+ ie_start_pos[1] = buff_ptr - ie_start_pos - 2; /* fill ie length field */
+ }
+ }
+ }
+
+ /* Send signal */
+ /* Use the Global sig_wait not the Interface specific for mlme-set-pno.list */
+ rx = slsi_mlme_req_cfm(sdev, NULL, req, MLME_SET_PNO_LIST_CFM);
+ if (!rx)
+ return -EIO;
+ if (fapi_get_u16(rx, u.mlme_set_pno_list_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_ERR(sdev, "mlme_set_pno_list_cfm(ERROR:0x%04x)",
+ fapi_get_u16(rx, u.mlme_set_pno_list_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(rx);
+ return r;
+}
+
+int slsi_mlme_start_link_stats_req(struct slsi_dev *sdev, u16 mpdu_size_threshold, bool aggressive_stats_enabled)
+{
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ req = fapi_alloc(mlme_start_link_statistics_req, MLME_START_LINK_STATISTICS_REQ, 0, 0);
+ if (!req) {
+ SLSI_ERR(sdev, "memory allocation failed for signal\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_start_link_statistics_req.mpdu_size_threshold, mpdu_size_threshold);
+ fapi_set_u16(req, u.mlme_start_link_statistics_req.aggressive_statistics_gathering_enabled,
+ aggressive_stats_enabled);
+
+ SLSI_DBG2(sdev, SLSI_MLME, "(mpdu_size_threshold:%d, aggressive_stats_enabled:%d)\n",
+ mpdu_size_threshold, aggressive_stats_enabled);
+ cfm = slsi_mlme_req_cfm(sdev, NULL, req, MLME_START_LINK_STATISTICS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_start_link_statistics_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_ERR(sdev, "mlme_start_link_statistics_cfm (result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_start_link_statistics_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_stop_link_stats_req(struct slsi_dev *sdev, u16 stats_stop_mask)
+{
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ req = fapi_alloc(mlme_stop_link_statistics_req, MLME_STOP_LINK_STATISTICS_REQ, 0, 0);
+ if (!req) {
+ SLSI_ERR(sdev, "memory allocation failed for signal\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_stop_link_statistics_req.statistics_stop_bitmap, stats_stop_mask);
+
+ SLSI_DBG2(sdev, SLSI_MLME, "statistics_stop_bitmap:%d\n", stats_stop_mask);
+ cfm = slsi_mlme_req_cfm(sdev, NULL, req, MLME_STOP_LINK_STATISTICS_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_stop_link_statistics_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_ERR(sdev, "mlme_stop_link_statistics_cfm (result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_stop_link_statistics_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+#endif
+
+int slsi_mlme_set_rssi_monitor(struct slsi_dev *sdev, struct net_device *dev, u8 enable, s8 low_rssi_threshold, s8 high_rssi_threshold)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_set_rssi_monitor(vif:%u), enable =%d, low_rssi_threshold = %d,high_rssi_threshold =%d\n",
+ ndev_vif->ifnum, enable, low_rssi_threshold, high_rssi_threshold);
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ req = fapi_alloc(mlme_monitor_rssi_req, MLME_MONITOR_RSSI_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_monitor_rssi_req.rssi_monitoring_enabled, enable);
+ fapi_set_u16(req, u.mlme_monitor_rssi_req.low_rssi_threshold, low_rssi_threshold);
+ fapi_set_u16(req, u.mlme_monitor_rssi_req.high_rssi_threshold, high_rssi_threshold);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_MONITOR_RSSI_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_monitor_rssi_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_monitor_rssi_cfm(result:0x%04x) ERROR\n", fapi_get_u16(cfm, u.mlme_monitor_rssi_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+struct slsi_mib_value *slsi_read_mibs(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_mib_get_entry *mib_entries, int mib_count, struct slsi_mib_data *mibrsp)
+{
+ struct slsi_mib_data mibreq = { 0, NULL };
+ struct slsi_mib_value *values;
+ int rx_length, r;
+
+ r = slsi_mib_encode_get_list(&mibreq, mib_count, mib_entries);
+ if (r != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_WARN(sdev, "slsi_mib_encode_get_list fail %d\n", r);
+ return NULL;
+ }
+
+ r = slsi_mlme_get(sdev, dev, mibreq.data, mibreq.dataLength, mibrsp->data, mibrsp->dataLength, &rx_length);
+ kfree(mibreq.data);
+
+ if (r != 0) {
+ SLSI_ERR(sdev, "Mib (err:%d)\n", r);
+ return NULL;
+ }
+
+ mibrsp->dataLength = (u32)rx_length;
+ values = slsi_mib_decode_get_list(mibrsp, mib_count, mib_entries);
+ if (!values)
+ SLSI_WARN(sdev, "decode error\n");
+ return values;
+}
+
+int slsi_mlme_set_ctwindow(struct slsi_dev *sdev, struct net_device *dev, unsigned int ct_param)
+{
+ struct netdev_vif *ndev_vif;
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_set_ctwindow(ct_param = %d)\n", ct_param);
+
+ ndev_vif = netdev_priv(dev);
+
+ req = fapi_alloc(mlme_set_ctwindow_req, MLME_SET_CTWINDOW_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_ctwindow_req.ctwindow, ct_param);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_CTWINDOW_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_ctwindow_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_ctwindow_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_ctwindow_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_set_p2p_noa(struct slsi_dev *sdev, struct net_device *dev, unsigned int noa_count,
+ unsigned int interval, unsigned int duration)
+{
+ struct netdev_vif *ndev_vif;
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_set_noa_req(noa_count = %d, interval = %d, duration = %d)\n",
+ noa_count, interval, duration);
+
+ ndev_vif = netdev_priv(dev);
+
+ req = fapi_alloc(mlme_set_noa_req, MLME_SET_NOA_REQ, ndev_vif->ifnum, 0);
+ if (!req)
+ return -ENOMEM;
+
+ fapi_set_u16(req, u.mlme_set_noa_req.request_id, SLSI_NOA_CONFIG_REQUEST_ID);
+ fapi_set_u16(req, u.mlme_set_noa_req.noa_count, noa_count);
+ if (!interval)
+ fapi_set_u32(req, u.mlme_set_noa_req.interval, (1 * 1024 * ndev_vif->ap.beacon_interval));
+ else
+ fapi_set_u32(req, u.mlme_set_noa_req.interval, interval * 1000);
+ fapi_set_u32(req, u.mlme_set_noa_req.duration, duration * 1000);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_SET_NOA_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_set_noa_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_set_noa_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_set_noa_cfm.result_code));
+ r = -EINVAL;
+ }
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_set_host_state(struct slsi_dev *sdev, struct net_device *dev, u8 host_state)
+{
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "Skip MLME_HOST_STATE_REQ in wlanlite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_set_host_state(state =%d)\n", host_state);
+
+ req = fapi_alloc(mlme_host_state_req, MLME_HOST_STATE_REQ, 0, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_host_state_req.host_state, host_state);
+
+ cfm = slsi_mlme_req_cfm(sdev, NULL, req, MLME_HOST_STATE_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_host_state_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+int slsi_mlme_read_apf_request(struct slsi_dev *sdev, struct net_device *dev, u8 **host_dst, int *datalen)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_ERR(sdev, "ndev_vif is not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_ERR(sdev, "vif_type is not FAPI_VIFTYPE_STATION\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ req = fapi_alloc(mlme_read_apf_req, MLME_READ_APF_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_read_apf_req(vif:%u)\n", ndev_vif->ifnum);
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_READ_APF_CFM);
+ if (!rx) {
+ r = -EIO;
+ goto exit;
+ }
+
+ if (fapi_get_u16(rx, u.mlme_read_apf_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_read_apf_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_read_apf_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ *datalen = fapi_get_datalen(rx);
+ *host_dst = fapi_get_data(rx);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+int slsi_mlme_install_apf_request(struct slsi_dev *sdev, struct net_device *dev,
+ u8 *program, u32 program_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_ERR(sdev, "ndev_vif is not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_ERR(sdev, "vif_type is not FAPI_VIFTYPE_STATION\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ req = fapi_alloc(mlme_install_apf_req, MLME_INSTALL_APF_REQ, ndev_vif->ifnum, program_len);
+ if (!req) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ /* filter_mode will be "don't care" for FW */
+ fapi_set_u16(req, u.mlme_install_apf_req.filter_mode, FAPI_APFFILTERMODE_SUSPEND);
+ fapi_append_data(req, program, program_len);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_install_apf_req(vif:%u, filter_mode:%d)\n",
+ ndev_vif->ifnum, FAPI_APFFILTERMODE_SUSPEND);
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_INSTALL_APF_CFM);
+ if (!rx) {
+ r = -EIO;
+ goto exit;
+ }
+
+ if (fapi_get_u16(rx, u.mlme_install_apf_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_install_apf_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_install_apf_cfm.result_code));
+ r = -EINVAL;
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+int slsi_mlme_arp_detect_request(struct slsi_dev *sdev, struct net_device *dev, u16 action, u8 *target_ipaddr)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *rx;
+ int r = 0;
+ u32 ipaddress = 0x0;
+ int i = 0;
+
+ if (!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex)) {
+ SLSI_ERR(sdev, "ndev_vif mutex is not locked\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if (!ndev_vif->activated) {
+ SLSI_ERR(sdev, "ndev_vif is not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ if ((ndev_vif->vif_type != FAPI_VIFTYPE_STATION) && (ndev_vif->iftype == NL80211_IFTYPE_STATION)) {
+ SLSI_ERR(sdev, "vif_type is not FAPI_VIFTYPE_STATION\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ req = fapi_alloc(mlme_arp_detect_req, MLME_ARP_DETECT_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < 4; i++)
+ ipaddress = (ipaddress << 8) | ((unsigned char)target_ipaddr[i]);
+ ipaddress = htonl(ipaddress);
+
+ fapi_set_u16(req, u.mlme_arp_detect_req.arp_detect_action, action);
+ fapi_append_data(req, (const u8 *)&ipaddress, 4);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_arp_detect_req(vif:%u, action:%d IP Address:%d.%d.%d.%d)\n",
+ ndev_vif->ifnum, action, ndev_vif->target_ip_addr[0], ndev_vif->target_ip_addr[1],
+ ndev_vif->target_ip_addr[2], ndev_vif->target_ip_addr[3]);
+ rx = slsi_mlme_req_cfm(sdev, dev, req, MLME_ARP_DETECT_CFM);
+ if (!rx) {
+ r = -EIO;
+ goto exit;
+ }
+
+ if (fapi_get_u16(rx, u.mlme_arp_detect_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "mlme_arp_detect_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(rx, u.mlme_arp_detect_cfm.result_code));
+ r = -EINVAL;
+ }
+
+exit:
+ return r;
+}
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+#define SLSI_TEST_CONFIG_MONITOR_MODE_DESCRIPTOR_SIZE (12)
+int slsi_test_sap_configure_monitor_mode(struct slsi_dev *sdev, struct net_device *dev, struct cfg80211_chan_def *chandef)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ u8 *p = NULL;
+ size_t alloc_data_size = 0;
+ u16 center_freq1;
+ u16 center_freq2;
+ u16 chan_info;
+ int r = 0;
+
+ const u8 monitor_config_ie_header[] = {
+ 0xDD, /* Element ID: Vendor Specific */
+ 0x11, /* Length */
+ 0x00, 0x16, 0x32, /* OUI: Samsung Electronics Co. */
+ 0x10, /* OUI Type: Monitor mode parameters */
+ 0x01 /* OUI Subtype: configuration */
+ };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_NET_WARN(dev, "WlanLite: NOT supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+
+ chan_info = slsi_get_chann_info(sdev, chandef);
+ SLSI_NET_DBG2(dev, SLSI_MLME, "test_configure_monitor_mode_req(center_freq1:%u, chan_info:%u, center_freq2:%u)\n",
+ chandef->center_freq1,
+ chan_info,
+ chandef->center_freq2);
+
+ center_freq1 = SLSI_FREQ_HOST_TO_FW(chandef->center_freq1);
+ center_freq1 = cpu_to_le16(center_freq1);
+ center_freq2 = SLSI_FREQ_HOST_TO_FW(chandef->center_freq2);
+ center_freq2 = cpu_to_le16(center_freq2);
+
+ alloc_data_size = sizeof(monitor_config_ie_header) + SLSI_TEST_CONFIG_MONITOR_MODE_DESCRIPTOR_SIZE;
+
+ req = fapi_alloc(test_configure_monitor_mode_req, TEST_CONFIGURE_MONITOR_MODE_REQ, ndev_vif->ifnum, alloc_data_size);
+ if (!req) {
+ SLSI_NET_ERR(dev, "failed to alloc Monitor mode request (len:%d)\n", alloc_data_size);
+ return -ENOMEM;
+ }
+
+ fapi_append_data(req, monitor_config_ie_header, sizeof(monitor_config_ie_header));
+ fapi_append_data(req, (const u8 *)¢er_freq1, 2);
+ fapi_append_data(req, (const u8 *)&chan_info, 2);
+ p = fapi_append_data(req, (const u8 *)¢er_freq2, 2);
+
+ /* MAC address filtering is not supported yet; so fill in zeros */
+ memset(p, 0, 6);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, TEST_CONFIGURE_MONITOR_MODE_CFM);
+ if (!cfm) {
+ SLSI_NET_ERR(dev, "failed to receive Confirm\n");
+ return -EIO;
+ }
+
+ if (fapi_get_u16(cfm, u.mlme_set_channel_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "test_configure_monitor_mode_cfm(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.test_configure_monitor_mode_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_MLME_H__
+#define __SLSI_MLME_H__
+
+#include "dev.h"
+#include "mib.h"
+
+enum slsi_ac_index_wmm_pe {
+ AC_BE,
+ AC_BK,
+ AC_VI,
+ AC_VO
+};
+
+#define SLSI_FREQ_FW_TO_HOST(f) ((f) / 2)
+#define SLSI_FREQ_HOST_TO_FW(f) ((f) * 2)
+
+#define SLSI_SINFO_MIB_ACCESS_TIMEOUT (1000) /* 1 sec timeout */
+
+#define SLSI_WLAN_EID_VENDOR_SPECIFIC 0xdd
+#define SLSI_WLAN_EID_INTERWORKING 107
+
+#define SLSI_WLAN_OUI_TYPE_WFA_HS20_IND 0x10
+#define SLSI_WLAN_OUI_TYPE_WFA_OSEN 0x12
+
+/*Extended capabilities bytes*/
+#define SLSI_WLAN_EXT_CAPA2_BSS_TRANSISITION_ENABLED (1 << 3)
+#define SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED (1 << 7)
+#define SLSI_WLAN_EXT_CAPA4_QOS_MAP_ENABLED (1 << 0)
+#define SLSI_WLAN_EXT_CAPA5_WNM_NOTIF_ENABLED (1 << 6)
+#define SLSI_WLAN_EXT_CAPA2_QBSS_LOAD_ENABLED BIT(7)
+#define SLSI_WLAN_EXT_CAPA1_PROXY_ARP_ENABLED BIT(4)
+#define SLSI_WLAN_EXT_CAPA2_TFS_ENABLED BIT(0)
+#define SLSI_WLAN_EXT_CAPA2_WNM_SLEEP_ENABLED BIT(1)
+#define SLSI_WLAN_EXT_CAPA2_TIM_ENABLED BIT(2)
+#define SLSI_WLAN_EXT_CAPA2_DMS_ENABLED BIT(4)
+
+/*RM Enabled Capabilities Bytes*/
+#define SLSI_WLAN_RM_CAPA0_LINK_MEASUREMENT_ENABLED BIT(0)
+#define SLSI_WLAN_RM_CAPA0_NEIGHBOR_REPORT_ENABLED BIT(1)
+#define SLSI_WLAN_RM_CAPA0_PASSIVE_MODE_ENABLED BIT(4)
+#define SLSI_WLAN_RM_CAPA0_ACTIVE_MODE_ENABLED BIT(5)
+#define SLSI_WLAN_RM_CAPA0_TABLE_MODE_ENABLED BIT(6)
+
+
+#define SLSI_AP_EXT_CAPAB_IE_LEN_MAX 12 /* EID (1) + Len (1) + Ext Capab (8) */
+
+#define SLSI_SCAN_DONE_IND_WAIT_TIMEOUT 40000 /* 40 seconds */
+
+/* WLAN_EID_COUNTRY available from kernel version 3.7 */
+#ifndef WLAN_EID_COUNTRY
+#define WLAN_EID_COUNTRY 7
+#endif
+
+/* P2P (Wi-Fi Direct) */
+#define SLSI_P2P_WILDCARD_SSID "DIRECT-"
+#define SLSI_P2P_WILDCARD_SSID_LENGTH 7
+#define SLSI_P2P_SOCIAL_CHAN_COUNT 3
+
+/* A join scan with P2P GO SSID can come and hence the SSID length comparision should include >= */
+#define SLSI_IS_P2P_SSID(ssid, ssid_len) ((ssid_len >= SLSI_P2P_WILDCARD_SSID_LENGTH) && \
+ (memcmp(ssid, SLSI_P2P_WILDCARD_SSID, SLSI_P2P_WILDCARD_SSID_LENGTH) == 0))
+
+/* Action frame categories for registering with firmware */
+#define SLSI_ACTION_FRAME_PUBLIC (1 << 4)
+#define SLSI_ACTION_FRAME_VENDOR_SPEC_PROTECTED (1 << 30)
+#define SLSI_ACTION_FRAME_VENDOR_SPEC (1 << 31)
+#define SLSI_ACTION_FRAME_WMM (1 << 17)
+#define SLSI_ACTION_FRAME_WNM (1 << 10)
+#define SLSI_ACTION_FRAME_QOS (1 << 1)
+#define SLSI_ACTION_FRAME_PROTECTED_DUAL BIT(9)
+#define SLSI_ACTION_FRAME_RADIO_MEASUREMENT BIT(5)
+
+/* Firmware transmit rates */
+#define SLSI_TX_RATE_NON_HT_1MBPS 0x4001
+#define SLSI_TX_RATE_NON_HT_6MBPS 0x4004
+#define SLSI_ROAMING_CHANNELS_MAX 38
+
+#define SLSI_WLAN_EID_WAPI 68
+
+/**
+ * If availability_duration is set to SLSI_FW_CHANNEL_DURATION_UNSPECIFIED
+ * then the firmware autonomously decides how long to remain listening on
+ * the configured channel.
+ */
+#define SLSI_FW_CHANNEL_DURATION_UNSPECIFIED (0x0000)
+extern struct ieee80211_supported_band slsi_band_2ghz;
+extern struct ieee80211_supported_band slsi_band_5ghz;
+extern struct ieee80211_sta_vht_cap slsi_vht_cap;
+
+/* Packet Filtering */
+#define SLSI_MAX_PATTERN_DESC 4 /* We are not using more than 4 pattern descriptors in a pkt filter*/
+#define SLSI_PKT_DESC_FIXED_LEN 2 /* offset (1) + mask length (1)*/
+#define SLSI_PKT_FILTER_ELEM_FIXED_LEN 6 /* oui(3) + oui type(1) + filter id (1) + pkt filter mode(1)*/
+#define SLSI_PKT_FILTER_ELEM_HDR_LEN (2 + SLSI_PKT_FILTER_ELEM_FIXED_LEN) /* element id + len + SLSI_PKT_FILTER_ELEM_FIXED_LEN*/
+#define SLSI_MAX_PATTERN_LENGTH 6
+
+/*Default values of MIBS params for GET_STA_INFO driver private command */
+#define SLSI_DEFAULT_UNIFI_PEER_RX_RETRY_PACKETS 0
+#define SLSI_DEFAULT_UNIFI_PEER_RX_BC_MC_PACKETS 0
+#define SLSI_DEFAULT_UNIFI_PEER_BANDWIDTH -1
+#define SLSI_DEFAULT_UNIFI_PEER_NSS 0
+#define SLSI_DEFAULT_UNIFI_PEER_RSSI 1
+#define SLSI_DEFAULT_UNIFI_PEER_TX_DATA_RATE 0
+
+#define SLSI_CHECK_TYPE(sdev, recv_type, exp_type) \
+ do { \
+ int var1 = recv_type; \
+ int var2 = exp_type; \
+ if (var1 != var2) { \
+ SLSI_WARN(sdev, "Type mismatched, expected type: %d and received type %d ", var2, var1); \
+ } \
+ } while (0)
+
+
+struct slsi_mlme_pattern_desc {
+ u8 offset;
+ u8 mask_length;
+ u8 mask[SLSI_MAX_PATTERN_LENGTH];
+ u8 pattern[SLSI_MAX_PATTERN_LENGTH];
+};
+
+struct slsi_mlme_pkt_filter_elem {
+ u8 header[SLSI_PKT_FILTER_ELEM_HDR_LEN];
+ u8 num_pattern_desc;
+ struct slsi_mlme_pattern_desc pattern_desc[SLSI_MAX_PATTERN_DESC];
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+u16 slsi_get_chann_info(struct slsi_dev *sdev, struct cfg80211_chan_def *chandef);
+int slsi_check_channelization(struct slsi_dev *sdev, struct cfg80211_chan_def *chandef,
+ int wifi_sharing_channel_switched);
+#else
+u16 slsi_get_chann_info(struct slsi_dev *sdev, enum nl80211_channel_type channel_type);
+int slsi_check_channelization(struct slsi_dev *sdev, enum nl80211_channel_type channel_type);
+#endif
+
+int slsi_mlme_set_ip_address(struct slsi_dev *sdev, struct net_device *dev);
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+int slsi_mlme_set_ipv6_address(struct slsi_dev *sdev, struct net_device *dev);
+#endif
+int slsi_mlme_set(struct slsi_dev *sdev, struct net_device *dev, u8 *req, int req_len);
+int slsi_mlme_get(struct slsi_dev *sdev, struct net_device *dev, u8 *req, int req_len,
+ u8 *resp, int resp_buf_len, int *resp_len);
+
+int slsi_mlme_add_vif(struct slsi_dev *sdev, struct net_device *dev, u8 *interface_address, u8 *device_address);
+void slsi_mlme_del_vif(struct slsi_dev *sdev, struct net_device *dev);
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+int slsi_mlme_set_forward_beacon(struct slsi_dev *sdev, struct net_device *dev, int action);
+#endif
+int slsi_mlme_set_channel(struct slsi_dev *sdev, struct net_device *dev, struct ieee80211_channel *chan, u16 duration, u16 interval, u16 count);
+void slsi_ap_obss_scan_done_ind(struct net_device *dev, struct netdev_vif *ndev_vif);
+
+int slsi_mlme_unset_channel_req(struct slsi_dev *sdev, struct net_device *dev);
+
+u16 slsi_compute_chann_info(struct slsi_dev *sdev, u16 width, u16 center_freq0, u16 channel_freq);
+/**
+ * slsi_mlme_add_autonomous_scan() Returns:
+ * 0 : Scan installed
+ * >0 : Scan NOT installed. Not an Error
+ * <0 : Scan NOT installed. Error
+ */
+int slsi_mlme_add_scan(struct slsi_dev *sdev,
+ struct net_device *dev,
+ u16 scan_type,
+ u16 report_mode,
+ u32 n_ssids,
+ struct cfg80211_ssid *ssids,
+ u32 n_channels,
+ struct ieee80211_channel *channels[],
+ void *gscan_param,
+ const u8 *ies,
+ u16 ies_len,
+ bool wait_for_ind);
+
+int slsi_mlme_add_sched_scan(struct slsi_dev *sdev,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request,
+ const u8 *ies,
+ u16 ies_len);
+
+int slsi_mlme_del_scan(struct slsi_dev *sdev, struct net_device *dev, u16 scan_id, bool scan_timed_out);
+int slsi_mlme_start(struct slsi_dev *sdev, struct net_device *dev, u8 *bssid, struct cfg80211_ap_settings *settings, const u8 *wpa_ie_pos, const u8 *wmm_ie_pos, bool append_vht_ies);
+int slsi_mlme_connect(struct slsi_dev *sdev, struct net_device *dev, struct cfg80211_connect_params *sme, struct ieee80211_channel *channel, const u8 *bssid);
+int slsi_mlme_set_key(struct slsi_dev *sdev, struct net_device *dev, u16 key_id, u16 key_type, const u8 *address, struct key_params *key);
+int slsi_mlme_get_key(struct slsi_dev *sdev, struct net_device *dev, u16 key_id, u16 key_type, u8 *seq, int *seq_len);
+
+/**
+ * Sends MLME-DISCONNECT-REQ and waits for the MLME-DISCONNECT-CFM
+ * MLME-DISCONNECT-CFM only indicates if the firmware has accepted the request (or not)
+ * the actual end of the disconnection is indicated by the firmware sending
+ * MLME-DISCONNECT-IND (following a successful MLME-DISCONNECT-CFM).
+ * The host has to wait for the full exchange to complete with the firmware before
+ * returning to cfg80211 if it made the disconnect request. Due to this, this function
+ * waits for both the MLME-DISCONNECT-CFM and the MLME-DISCONNECT-IND (if the
+ * MLME-DISCONNECT-CFM was successful)
+ */
+int slsi_mlme_disconnect(struct slsi_dev *sdev, struct net_device *dev, u8 *bssid, u16 reason_code, bool wait_ind);
+
+int slsi_mlme_req(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+struct sk_buff *slsi_mlme_req_no_cfm(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+
+struct sk_buff *slsi_mlme_req_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 ind_id);
+/* Reads multiple MIB data related to station info. returns 0 if success else errno*/
+int slsi_mlme_get_sinfo_mib(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_peer *peer);
+
+int slsi_mlme_connect_scan(struct slsi_dev *sdev, struct net_device *dev,
+ u32 n_ssids, struct cfg80211_ssid *ssids, struct ieee80211_channel *channel);
+int slsi_mlme_powermgt(struct slsi_dev *sdev, struct net_device *dev, u16 ps_mode);
+int slsi_mlme_powermgt_unlocked(struct slsi_dev *sdev, struct net_device *dev, u16 ps_mode);
+int slsi_mlme_register_action_frame(struct slsi_dev *sdev, struct net_device *dev, u32 af_bitmap_active, u32 af_bitmap_suspended);
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+int slsi_mlme_synchronised_response(struct slsi_dev *sdev, struct net_device *dev,
+ struct cfg80211_external_auth_params *params);
+#endif
+int slsi_mlme_channel_switch(struct slsi_dev *sdev, struct net_device *dev, u16 center_freq, u16 chan_info);
+int slsi_mlme_add_info_elements(struct slsi_dev *sdev, struct net_device *dev, u16 purpose, const u8 *ies, const u16 ies_len);
+int slsi_mlme_send_frame_mgmt(struct slsi_dev *sdev, struct net_device *dev, const u8 *frame, int frame_len, u16 data_desc, u16 msg_type, u16 host_tag, u16 freq, u32 dwell_time, u32 period);
+int slsi_mlme_send_frame_data(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 msg_type,
+ u16 host_tag, u32 dwell_time, u32 period);
+int slsi_mlme_reset_dwell_time(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_mlme_set_packet_filter(struct slsi_dev *sdev, struct net_device *dev, int pkt_filter_len, u8 num_filters, struct slsi_mlme_pkt_filter_elem *pkt_filter_elems);
+void slsi_mlme_connect_resp(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_mlme_connected_resp(struct slsi_dev *sdev, struct net_device *dev, u16 peer_index);
+void slsi_mlme_roamed_resp(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_mlme_set_pmk(struct slsi_dev *sdev, struct net_device *dev, const u8 *pmk, u16 pmklen);
+int slsi_mlme_roam(struct slsi_dev *sdev, struct net_device *dev, const u8 *bssid, u16 freq);
+int slsi_mlme_set_cached_channels(struct slsi_dev *sdev, struct net_device *dev, u32 channels_count, u8 *channels);
+int slsi_mlme_tdls_peer_resp(struct slsi_dev *sdev, struct net_device *dev, u16 pid, u16 tdls_event);
+int slsi_mlme_tdls_action(struct slsi_dev *sdev, struct net_device *dev, const u8 *peer, int action, u16 center_freq, u16 chan_info);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+int slsi_mlme_set_acl(struct slsi_dev *sdev, struct net_device *dev, u16 ifnum, const struct cfg80211_acl_data *params);
+#endif
+int slsi_mlme_set_traffic_parameters(struct slsi_dev *sdev, struct net_device *dev, u16 user_priority, u16 medium_time, u16 minimun_data_rate, u8 *mac);
+int slsi_mlme_del_traffic_parameters(struct slsi_dev *sdev, struct net_device *dev, u16 user_priority);
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+int slsi_mlme_set_pno_list(struct slsi_dev *sdev, int count,
+ struct slsi_epno_param *epno_param, struct slsi_epno_hs2_param *epno_hs2_param);
+int slsi_mlme_start_link_stats_req(struct slsi_dev *sdev, u16 mpdu_size_threshold, bool aggressive_statis_enabled);
+int slsi_mlme_stop_link_stats_req(struct slsi_dev *sdev, u16 stats_stop_mask);
+int slsi_mlme_nan_enable(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_enable_req *hal_req);
+int slsi_mlme_nan_publish(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_publish_req *hal_req,
+ u16 publish_id);
+int slsi_mlme_nan_subscribe(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_subscribe_req *hal_req,
+ u16 subscribe_id);
+int slsi_mlme_nan_tx_followup(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_hal_nan_transmit_followup_req *hal_req);
+int slsi_mlme_nan_set_config(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_config_req *hal_req);
+#endif
+
+int slsi_mlme_set_ext_capab(struct slsi_dev *sdev, struct net_device *dev, struct slsi_mib_value *mib_val);
+int slsi_mlme_set_hs2_ext_cap(struct slsi_dev *sdev, struct net_device *dev, const u8 *ies, int ie_len);
+int slsi_mlme_reassociate(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_mlme_reassociate_resp(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_modify_ies(struct net_device *dev, u8 eid, u8 *ies, int ies_len, u8 ie_index, u8 ie_value);
+int slsi_mlme_set_rssi_monitor(struct slsi_dev *sdev, struct net_device *dev, u8 enable, s8 low_rssi_threshold, s8 high_rssi_threshold);
+struct slsi_mib_value *slsi_read_mibs(struct slsi_dev *sdev, struct net_device *dev, struct slsi_mib_get_entry *mib_entries, int mib_count, struct slsi_mib_data *mibrsp);
+int slsi_mlme_set_host_state(struct slsi_dev *sdev, struct net_device *dev, u8 host_state);
+int slsi_mlme_read_apf_request(struct slsi_dev *sdev, struct net_device *dev, u8 **host_dst, int *datalen);
+int slsi_mlme_install_apf_request(struct slsi_dev *sdev, struct net_device *dev,
+ u8 *program, u32 program_len);
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+int slsi_mlme_arp_detect_request(struct slsi_dev *sdev, struct net_device *dev, u16 action, u8 *ipaddr);
+#endif
+int slsi_mlme_set_ctwindow(struct slsi_dev *sdev, struct net_device *dev, unsigned int ct_param);
+int slsi_mlme_set_p2p_noa(struct slsi_dev *sdev, struct net_device *dev, unsigned int noa_count,
+ unsigned int interval, unsigned int duration);
+void slsi_fw_tx_rate_calc(u16 fw_rate, struct rate_info *tx_rate, unsigned long *data_rate_mbps);
+int slsi_test_sap_configure_monitor_mode(struct slsi_dev *sdev, struct net_device *dev, struct cfg80211_chan_def *chandef);
+
+struct sk_buff *slsi_mlme_req_cfm(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 cfm_id);
+
+#endif /*__SLSI_MLME_H__*/
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "debug.h"
+#include "mlme.h"
+
+#define SLSI_FAPI_NAN_ATTRIBUTE_PUT_U8(req, attribute, val) \
+ { \
+ u16 attribute_len = 1; \
+ struct sk_buff *req_p = req; \
+ fapi_append_data((req_p), (u8 *)&(attribute), 2); \
+ fapi_append_data((req_p), (u8 *)&attribute_len, 2); \
+ fapi_append_data((req_p), (u8 *)&(val), 1); \
+ }
+
+#define SLSI_FAPI_NAN_ATTRIBUTE_PUT_U16(req, attribute, val) \
+ { \
+ u16 attribute_len = 2; \
+ __le16 le16val = cpu_to_le16(val); \
+ struct sk_buff *req_p = req; \
+ fapi_append_data((req_p), (u8 *)&(attribute), 2); \
+ fapi_append_data((req_p), (u8 *)&attribute_len, 2); \
+ fapi_append_data((req_p), (u8 *)&le16val, 2); \
+ }
+
+#define SLSI_FAPI_NAN_ATTRIBUTE_PUT_U32(req, attribute, val) \
+ { \
+ u16 attribute_len = 4; \
+ __le32 le32val = cpu_to_le32(val);\
+ struct sk_buff *req_p = req; \
+ fapi_append_data((req_p), (u8 *)&(attribute), 2); \
+ fapi_append_data((req_p), (u8 *)&attribute_len, 2); \
+ fapi_append_data((req_p), (u8 *)&le32val, 4); \
+ }
+
+#define SLSI_FAPI_NAN_ATTRIBUTE_PUT_DATA(req, attribute, val, val_len) \
+ { \
+ u16 attribute_len = (val_len); \
+ struct sk_buff *req_p = req; \
+ fapi_append_data((req_p), (u8 *)&(attribute), 2); \
+ fapi_append_data((req_p), (u8 *)&attribute_len, 2); \
+ fapi_append_data((req_p), (val), (attribute_len)); \
+ }
+
+static void slsi_mlme_nan_enable_fapi_data(struct sk_buff *req, struct slsi_hal_nan_enable_req *hal_req)
+{
+ u8 nan_config_fields_header[] = {0xdd, 0x24, 0x00, 0x16, 0x32, 0x0b, 0x01};
+ u16 fapi_bool;
+ u8 fapi_u8 = 0;
+ u16 rssi_window = hal_req->config_rssi_window_size ? hal_req->rssi_window_size_val : 8;
+
+ fapi_append_data(req, nan_config_fields_header, sizeof(nan_config_fields_header));
+
+ fapi_append_data(req, &hal_req->master_pref, 1);
+
+ /* publish service ID inclusion in beacon */
+ fapi_bool = hal_req->config_sid_beacon && (hal_req->sid_beacon_val & 0x01);
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ if (fapi_bool)
+ fapi_u8 = hal_req->sid_beacon_val >> 1;
+ fapi_append_data(req, &fapi_u8, 1);
+
+ /* subscribe service ID inclusion in beacon */
+ fapi_bool = hal_req->config_subscribe_sid_beacon && (hal_req->subscribe_sid_beacon_val & 0x01);
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ if (fapi_bool)
+ fapi_u8 = hal_req->subscribe_sid_beacon_val >> 1;
+ fapi_append_data(req, &fapi_u8, 1);
+
+ fapi_append_data(req, (u8 *)&rssi_window, 2);
+ fapi_append_data(req, (u8 *)&hal_req->disc_mac_addr_rand_interval_sec, 4);
+
+ /* 2.4G NAN band specific config*/
+ fapi_u8 = hal_req->config_2dot4g_rssi_close ? hal_req->rssi_close_2dot4g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_2dot4g_rssi_middle ? hal_req->rssi_middle_2dot4g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_2dot4g_rssi_proximity ? hal_req->rssi_proximity_2dot4g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &hal_req->scan_params_val.dwell_time[0], 1);
+ fapi_append_data(req, (u8 *)&hal_req->scan_params_val.scan_period[0], 2);
+ fapi_bool = hal_req->config_2dot4g_dw_band;
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ fapi_append_data(req, (u8 *)&hal_req->dw_2dot4g_interval_val, 1);
+
+ /* 5G NAN band specific config*/
+ fapi_u8 = hal_req->config_5g_rssi_close ? hal_req->rssi_close_5g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_5g_rssi_middle ? hal_req->rssi_middle_5g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_5g_rssi_close_proximity ? hal_req->rssi_close_proximity_5g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &hal_req->scan_params_val.dwell_time[1], 1);
+ fapi_append_data(req, (u8 *)&hal_req->scan_params_val.scan_period[1], 2);
+ fapi_bool = hal_req->config_5g_dw_band;
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ fapi_append_data(req, (u8 *)&hal_req->dw_5g_interval_val, 1);
+}
+
+int slsi_mlme_nan_enable(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_enable_req *hal_req)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 nan_oper_ctrl = 0;
+ u16 operatein5gband = hal_req->config_support_5g && hal_req->support_5g_val;
+ u16 hopcountmax = hal_req->config_hop_count_limit ? hal_req->hop_count_limit_val : 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "\n");
+
+ /* max mbulk data IE info length is 0x24. So need 0x26 bytes */
+ req = fapi_alloc(mlme_nan_start_req, MLME_NAN_START_REQ, ndev_vif->ifnum, 0x26);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ nan_oper_ctrl |= FAPI_NANOPERATIONCONTROL_MAC_ADDRESS_EVENT | FAPI_NANOPERATIONCONTROL_START_CLUSTER_EVENT |
+ FAPI_NANOPERATIONCONTROL_JOINED_CLUSTER_EVENT;
+
+ fapi_set_u16(req, u.mlme_nan_start_req.operatein5gband, operatein5gband);
+ fapi_set_u16(req, u.mlme_nan_start_req.hopcountmax, hopcountmax);
+ fapi_set_u16(req, u.mlme_nan_start_req.nan_operation_control_flags, nan_oper_ctrl);
+
+ slsi_mlme_nan_enable_fapi_data(req, hal_req);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_NAN_START_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_nan_start_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "MLME_NAN_START_CFM(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+static void slsi_mlme_nan_append_tlv(struct sk_buff *req, __le16 tlv_t, __le16 tlv_l, u8 *tlv_v, u8 **header_ptr,
+ u8 header_ie_generic_len, u8 **end_ptr)
+{
+ u8 tmp_buf[255 + 4];
+ u8 *tmp_buf_pos;
+ int tmp_buf_len, len1, ip_ie_len;
+
+ memcpy(tmp_buf, &tlv_t, 2);
+ memcpy(tmp_buf + 2, &tlv_l, 2);
+ memcpy(tmp_buf + 4, tlv_v, tlv_l);
+ tmp_buf_len = 4 + tlv_l;
+ ip_ie_len = *end_ptr - *header_ptr - 2;
+ tmp_buf_pos = tmp_buf;
+
+ while (tmp_buf_len + ip_ie_len > 255) {
+ len1 = 255 - ip_ie_len;
+ fapi_append_data(req, tmp_buf_pos, len1);
+ (*header_ptr)[1] = 255;
+ tmp_buf_len -= len1;
+ tmp_buf_pos += len1;
+ ip_ie_len = 0;
+ if (tmp_buf_len) {
+ *header_ptr = fapi_append_data(req, *header_ptr, header_ie_generic_len);
+ *end_ptr = *header_ptr + header_ie_generic_len;
+ } else {
+ *end_ptr = *header_ptr + header_ie_generic_len + 255;
+ }
+ }
+ if (tmp_buf_len) {
+ fapi_append_data(req, tmp_buf, tmp_buf_len);
+ *end_ptr += tmp_buf_len;
+ }
+}
+
+static void slsi_mlme_nan_fapi_put_data_path_security_ie(struct sk_buff *req, struct slsi_nan_security_info *sec_info)
+{
+ u8 ie_header[] = {0xdd, 0x00, 0x00, 0x16, 0x32, 0x0b, 0x07};
+ u8 *header_ptr;
+ u8 u8val, i, key_len = 0;
+
+ header_ptr = fapi_append_data(req, ie_header, sizeof(ie_header));
+ u8val = sec_info->key_info.key_type == 1 || sec_info->key_info.key_type == 2 ? sec_info->key_info.key_type : 0;
+ fapi_append_data(req, &u8val, 1);
+ fapi_append_data(req, (u8 *)&sec_info->cipher_type, 1);
+ if (sec_info->key_info.key_type == 1) {
+ fapi_append_data(req, sec_info->key_info.body.pmk_info.pmk, 32);
+ u8val = 0;
+ fapi_append_data(req, &u8val, 1);
+ } else {
+ u8val = 0;
+ for (i = 0; i < 32; i++)
+ fapi_append_data(req, &u8val, 1);
+ if (sec_info->key_info.key_type == 2) {
+ key_len = sec_info->key_info.body.passphrase_info.passphrase_len;
+ fapi_append_data(req, &key_len, 1);
+ fapi_append_data(req, sec_info->key_info.body.passphrase_info.passphrase, key_len);
+ } else {
+ fapi_append_data(req, &u8val, 1);
+ }
+ }
+ header_ptr[1] = 40 + key_len;
+}
+
+static void slsi_mlme_nan_fapi_put_nan_ranging_ie(struct sk_buff *req, struct slsi_nan_ranging_cfg *cfg)
+{
+ u8 ie_header[] = {0xdd, 0x0b, 0x00, 0x16, 0x32, 0x0b, 0x09};
+
+ fapi_append_data(req, ie_header, sizeof(ie_header));
+ fapi_append_data(req, (u8 *)&cfg->ranging_interval_msec, 2);
+ fapi_append_data(req, (u8 *)&cfg->config_ranging_indications, 1);
+ fapi_append_data(req, (u8 *)&cfg->distance_ingress_mm, 2);
+ fapi_append_data(req, (u8 *)&cfg->distance_egress_mm, 2);
+}
+
+static void slsi_mlme_nan_publish_fapi_data(struct sk_buff *req, struct slsi_hal_nan_publish_req *hal_req)
+{
+ u8 nan_publish_fields_header[] = {0xdd, 0x00, 0x00, 0x16, 0x32, 0x0b, 0x02};
+ u8 *header_ptr, *end_ptr;
+ __le16 le16val;
+ u8 u8val;
+
+ header_ptr = fapi_append_data(req, nan_publish_fields_header, sizeof(nan_publish_fields_header));
+ fapi_append_data(req, &hal_req->publish_type, 1);
+ fapi_append_data(req, &hal_req->tx_type, 1);
+
+ le16val = cpu_to_le16(hal_req->ttl);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ le16val = cpu_to_le16(hal_req->period);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ fapi_append_data(req, &hal_req->publish_count, 1);
+ fapi_append_data(req, &hal_req->publish_match_indicator, 1);
+ le16val = cpu_to_le16(hal_req->rssi_threshold_flag);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ u8val = 0;
+ fapi_append_data(req, (u8 *)&u8val, 1); /* Ranging required */
+ fapi_append_data(req, (u8 *)&u8val, 1); /* Data path required */
+
+ end_ptr = header_ptr + sizeof(nan_publish_fields_header) + 12;
+
+ if (hal_req->service_name_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SERVICE_NAME),
+ cpu_to_le16 (hal_req->service_name_len), hal_req->service_name, &header_ptr,
+ sizeof(nan_publish_fields_header), &end_ptr);
+
+ if (hal_req->service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO),
+ cpu_to_le16 (hal_req->service_specific_info_len),
+ hal_req->service_specific_info, &header_ptr,
+ sizeof(nan_publish_fields_header), &end_ptr);
+
+ if (hal_req->sdea_service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SDEA),
+ cpu_to_le16 (hal_req->sdea_service_specific_info_len),
+ hal_req->sdea_service_specific_info,
+ &header_ptr, sizeof(nan_publish_fields_header), &end_ptr);
+
+ if (hal_req->rx_match_filter_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_RX_MATCH_FILTER),
+ cpu_to_le16 (hal_req->rx_match_filter_len), hal_req->rx_match_filter,
+ &header_ptr, sizeof(nan_publish_fields_header), &end_ptr);
+
+ if (hal_req->tx_match_filter_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_TX_MATCH_FILTER),
+ cpu_to_le16 (hal_req->tx_match_filter_len), hal_req->tx_match_filter,
+ &header_ptr, sizeof(nan_publish_fields_header), &end_ptr);
+
+ /* update len */
+ header_ptr[1] = end_ptr - header_ptr - 2;
+ slsi_mlme_nan_fapi_put_data_path_security_ie(req, &hal_req->sec_info);
+ slsi_mlme_nan_fapi_put_nan_ranging_ie(req, &hal_req->ranging_cfg);
+
+}
+
+int slsi_mlme_nan_publish(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_publish_req *hal_req,
+ u16 publish_id)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 nan_sdf_flags = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "\n");
+ if (hal_req) {
+ u16 max_mbulk_data_len;
+ u16 length = 17; /* non tlv info in fapi publish IE */
+
+ length += hal_req->service_name_len ? hal_req->service_name_len + 4 : 0;
+ length += hal_req->service_specific_info_len ? hal_req->service_specific_info_len + 4 : 0;
+ length += hal_req->rx_match_filter_len ? hal_req->rx_match_filter_len + 4 : 0;
+ length += hal_req->tx_match_filter_len ? hal_req->tx_match_filter_len + 4 : 0;
+ length += hal_req->sdea_service_specific_info_len ? hal_req->sdea_service_specific_info_len + 4 : 0;
+ if (length > 255)
+ /* 2 = ie_id _ie_len, 5 = oui+type+sub_type*/
+ max_mbulk_data_len = (255 + 2) * (length / (255 - (2 + 5)) + 1);
+ else
+ max_mbulk_data_len = length + 2;
+ max_mbulk_data_len += 42 + 64; /* max length for NAN Data Path Security IE */
+ max_mbulk_data_len += 14; /* NAN Ranging IE*/
+
+ req = fapi_alloc(mlme_nan_publish_req, MLME_NAN_PUBLISH_REQ, ndev_vif->ifnum, max_mbulk_data_len);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ /* Set/Enable corresponding bits to disable any indications
+ * that follow a publish.
+ * BIT0 - Disable publish termination indication.
+ * BIT1 - Disable match expired indication.
+ * BIT2 - Disable followUp indication received (OTA).
+ */
+ if (hal_req->recv_indication_cfg & BIT(0))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_PUBLISH_END_EVENT;
+ if (hal_req->recv_indication_cfg & BIT(1))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_MATCH_EXPIRED_EVENT;
+ if (hal_req->recv_indication_cfg & BIT(2))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_RECEIVED_FOLLOWUP_EVENT;
+ } else {
+ req = fapi_alloc(mlme_nan_publish_req, MLME_NAN_PUBLISH_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+ }
+
+ fapi_set_u16(req, u.mlme_nan_publish_req.publish_id, publish_id);
+ fapi_set_u16(req, u.mlme_nan_publish_req.nan_sdf_flags, nan_sdf_flags);
+
+ if (hal_req)
+ slsi_mlme_nan_publish_fapi_data(req, hal_req);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_NAN_PUBLISH_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_nan_publish_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "MLME_NAN_PUBLISH_CFM(result:0x%04x) ERROR\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ if (hal_req && !r)
+ ndev_vif->nan.publish_id_map |= BIT(publish_id);
+ else
+ ndev_vif->nan.publish_id_map &= ~BIT(publish_id);
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+static void slsi_mlme_nan_subscribe_fapi_data(struct sk_buff *req, struct slsi_hal_nan_subscribe_req *hal_req)
+{
+ u8 nan_subscribe_fields_header[] = {0xdd, 0x00, 0x00, 0x16, 0x32, 0x0b, 0x03};
+ u8 *header_ptr, *end_ptr;
+ __le16 le16val;
+ u8 u8val = 0;
+
+ header_ptr = fapi_append_data(req, nan_subscribe_fields_header, sizeof(nan_subscribe_fields_header));
+ fapi_append_data(req, &hal_req->subscribe_type, 1);
+ fapi_append_data(req, &hal_req->service_response_filter, 1);
+ fapi_append_data(req, &hal_req->service_response_include, 1);
+ fapi_append_data(req, &hal_req->use_service_response_filter, 1);
+ fapi_append_data(req, &hal_req->ssi_required_for_match_indication, 1);
+
+ le16val = cpu_to_le16(hal_req->ttl);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ le16val = cpu_to_le16(hal_req->period);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ fapi_append_data(req, &hal_req->subscribe_count, 1);
+ fapi_append_data(req, &hal_req->subscribe_match_indicator, 1);
+ le16val = cpu_to_le16(hal_req->rssi_threshold_flag);
+ fapi_append_data(req, (u8 *)&le16val, 2);
+ fapi_append_data(req, &u8val, 1); /* ranging required */
+ end_ptr = fapi_append_data(req, &u8val, 1); /* datapath required */
+ end_ptr += 1;
+
+ if (hal_req->service_name_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SERVICE_NAME),
+ cpu_to_le16 (hal_req->service_name_len), hal_req->service_name, &header_ptr,
+ sizeof(nan_subscribe_fields_header), &end_ptr);
+
+ if (hal_req->service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO),
+ cpu_to_le16 (hal_req->service_specific_info_len),
+ hal_req->service_specific_info, &header_ptr,
+ sizeof(nan_subscribe_fields_header), &end_ptr);
+
+ if (hal_req->rx_match_filter_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_RX_MATCH_FILTER),
+ cpu_to_le16 (hal_req->rx_match_filter_len), hal_req->rx_match_filter,
+ &header_ptr, sizeof(nan_subscribe_fields_header), &end_ptr);
+
+ if (hal_req->tx_match_filter_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_TX_MATCH_FILTER),
+ cpu_to_le16 (hal_req->tx_match_filter_len), hal_req->tx_match_filter,
+ &header_ptr, sizeof(nan_subscribe_fields_header), &end_ptr);
+
+ if (hal_req->sdea_service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SDEA),
+ cpu_to_le16 (hal_req->sdea_service_specific_info_len),
+ hal_req->sdea_service_specific_info,
+ &header_ptr, sizeof(nan_subscribe_fields_header), &end_ptr);
+
+ /* update len */
+ header_ptr[1] = end_ptr - header_ptr - 2;
+ slsi_mlme_nan_fapi_put_data_path_security_ie(req, &hal_req->sec_info);
+ slsi_mlme_nan_fapi_put_nan_ranging_ie(req, &hal_req->ranging_cfg);
+}
+
+int slsi_mlme_nan_subscribe(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_subscribe_req *hal_req,
+ u16 subscribe_id)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 nan_sdf_flags = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "\n");
+ if (hal_req) {
+ /*max possible length for publish attributes: 8*255 */
+ u16 max_mbulk_data_len;
+ u16 length = 17; /* non tlv info in fapi publish IE */
+
+ length += hal_req->service_name_len ? hal_req->service_name_len + 4 : 0;
+ length += hal_req->service_specific_info_len ? hal_req->service_specific_info_len + 4 : 0;
+ length += hal_req->rx_match_filter_len ? hal_req->rx_match_filter_len + 4 : 0;
+ length += hal_req->tx_match_filter_len ? hal_req->tx_match_filter_len + 4 : 0;
+ length += hal_req->sdea_service_specific_info_len ? hal_req->sdea_service_specific_info_len + 4 : 0;
+ if (length > 255)
+ /* 2 = ie_id _ie_len, 5 = oui+type+sub_type*/
+ max_mbulk_data_len = (255 + 2) * (length / (255 - (2 + 5)) + 1);
+ else
+ max_mbulk_data_len = length + 2;
+
+ req = fapi_alloc(mlme_nan_subscribe_req, MLME_NAN_SUBSCRIBE_REQ, ndev_vif->ifnum, max_mbulk_data_len);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+ /* Set/Enable corresponding bits to disable
+ * indications that follow a subscribe.
+ * BIT0 - Disable subscribe termination indication.
+ * BIT1 - Disable match expired indication.
+ * BIT2 - Disable followUp indication received (OTA).
+ */
+ if (hal_req->recv_indication_cfg & BIT(0))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_SUBSCRIBE_END_EVENT;
+ if (hal_req->recv_indication_cfg & BIT(1))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_MATCH_EXPIRED_EVENT;
+ if (hal_req->recv_indication_cfg & BIT(2))
+ nan_sdf_flags |= FAPI_NANSDFCONTROL_RECEIVED_FOLLOWUP_EVENT;
+ } else {
+ req = fapi_alloc(mlme_nan_subscribe_req, MLME_NAN_SUBSCRIBE_REQ, ndev_vif->ifnum, 0);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+ }
+
+ fapi_set_u16(req, u.mlme_nan_subscribe_req.subscribe_id, subscribe_id);
+ fapi_set_u16(req, u.mlme_nan_subscribe_req.nan_sdf_flags, nan_sdf_flags);
+
+ if (hal_req)
+ slsi_mlme_nan_subscribe_fapi_data(req, hal_req);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_NAN_SUBSCRIBE_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_nan_subscribe_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "MLME_NAN_SUBSCRIBE_CFM(res:0x%04x)\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ if (hal_req && !r)
+ ndev_vif->nan.subscribe_id_map |= BIT(subscribe_id);
+ else
+ ndev_vif->nan.subscribe_id_map &= ~BIT(subscribe_id);
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+static void slsi_mlme_nan_followup_fapi_data(struct sk_buff *req, struct slsi_hal_nan_transmit_followup_req *hal_req)
+{
+ u8 nan_followup_fields_header[] = {0xdd, 0x00, 0x00, 0x16, 0x32, 0x0b, 0x05};
+ u8 *header_ptr, *end_ptr;
+
+ header_ptr = fapi_append_data(req, nan_followup_fields_header, sizeof(nan_followup_fields_header));
+ fapi_append_data(req, hal_req->addr, ETH_ALEN);
+ fapi_append_data(req, &hal_req->priority, 1);
+ end_ptr = fapi_append_data(req, &hal_req->dw_or_faw, 1);
+ end_ptr += 1;
+
+ if (hal_req->service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO),
+ cpu_to_le16 (hal_req->service_specific_info_len),
+ hal_req->service_specific_info, &header_ptr,
+ sizeof(nan_followup_fields_header), &end_ptr);
+ if (hal_req->sdea_service_specific_info_len)
+ slsi_mlme_nan_append_tlv(req, cpu_to_le16 (SLSI_FAPI_NAN_SDEA),
+ cpu_to_le16 (hal_req->sdea_service_specific_info_len),
+ hal_req->sdea_service_specific_info, &header_ptr,
+ sizeof(nan_followup_fields_header), &end_ptr);
+
+ /* update len */
+ header_ptr[1] = end_ptr - header_ptr - 2;
+}
+
+int slsi_mlme_nan_tx_followup(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_hal_nan_transmit_followup_req *hal_req)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 nan_sdf_flags = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "\n");
+
+ /* max possible length for publish attributes: 5*255 */
+ req = fapi_alloc(mlme_nan_followup_req, MLME_NAN_FOLLOWUP_REQ, ndev_vif->ifnum, 5 * 255);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ fapi_set_u16(req, u.mlme_nan_followup_req.publish_subscribe_id, hal_req->publish_subscribe_id);
+ fapi_set_u16(req, u.mlme_nan_followup_req.peer_id, hal_req->requestor_instance_id);
+ fapi_set_u16(req, u.mlme_nan_subscribe_req.nan_sdf_flags, nan_sdf_flags);
+
+ slsi_mlme_nan_followup_fapi_data(req, hal_req);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_NAN_FOLLOWUP_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_nan_followup_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "MLME_NAN_FOLLOWUP_CFM(res:0x%04x)\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
+
+static void slsi_mlme_nan_config_fapi_data(struct sk_buff *req, struct slsi_hal_nan_config_req *hal_req)
+{
+ u8 nan_config_fields_header[] = {0xdd, 0x24, 0x00, 0x16, 0x32, 0x0b, 0x01};
+ u16 fapi_bool;
+ u8 fapi_u8 = 0;
+ u16 rssi_window = hal_req->config_rssi_window_size ? hal_req->rssi_window_size_val : 8;
+
+ fapi_append_data(req, nan_config_fields_header, sizeof(nan_config_fields_header));
+
+ fapi_append_data(req, &hal_req->master_pref, 1);
+
+ /* publish service ID inclusion in beacon */
+ fapi_bool = hal_req->config_sid_beacon && (hal_req->sid_beacon & 0x01);
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+
+ fapi_u8 = fapi_bool ? hal_req->sid_beacon >> 1 : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+
+ /* subscribe service ID inclusion in beacon */
+ fapi_bool = hal_req->config_subscribe_sid_beacon && (hal_req->subscribe_sid_beacon_val & 0x01);
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+
+ fapi_u8 = fapi_bool ? hal_req->subscribe_sid_beacon_val >> 1 : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+
+ fapi_append_data(req, (u8 *)&rssi_window, 2);
+ fapi_append_data(req, (u8 *)&hal_req->disc_mac_addr_rand_interval_sec, 4);
+
+ /* 2.4G NAN band specific config*/
+ fapi_u8 = 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_rssi_proximity ? hal_req->rssi_proximity : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &hal_req->scan_params_val.dwell_time[0], 1);
+ fapi_append_data(req, (u8 *)&hal_req->scan_params_val.scan_period[0], 2);
+ fapi_bool = hal_req->config_2dot4g_dw_band;
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ fapi_append_data(req, (u8 *)&hal_req->dw_2dot4g_interval_val, 1);
+
+ /* 5G NAN band specific config*/
+ fapi_u8 = 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_u8 = hal_req->config_5g_rssi_close_proximity ? hal_req->rssi_close_proximity_5g_val : 0;
+ fapi_append_data(req, &fapi_u8, 1);
+ fapi_append_data(req, &hal_req->scan_params_val.dwell_time[1], 1);
+ fapi_append_data(req, (u8 *)&hal_req->scan_params_val.scan_period[1], 2);
+ fapi_bool = hal_req->config_5g_dw_band;
+ fapi_append_data(req, (u8 *)&fapi_bool, 2);
+ fapi_append_data(req, (u8 *)&hal_req->dw_5g_interval_val, 1);
+}
+
+int slsi_mlme_nan_set_config(struct slsi_dev *sdev, struct net_device *dev, struct slsi_hal_nan_config_req *hal_req)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *req;
+ struct sk_buff *cfm;
+ int r = 0;
+ u16 nan_oper_ctrl = 0;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "\n");
+ /* max possible length for publish attributes 5*255 */
+ req = fapi_alloc(mlme_nan_config_req, MLME_NAN_CONFIG_REQ, ndev_vif->ifnum, 5 * 255);
+ if (!req) {
+ SLSI_NET_ERR(dev, "fapi alloc failure\n");
+ return -ENOMEM;
+ }
+
+ nan_oper_ctrl |= FAPI_NANOPERATIONCONTROL_MAC_ADDRESS_EVENT | FAPI_NANOPERATIONCONTROL_START_CLUSTER_EVENT |
+ FAPI_NANOPERATIONCONTROL_JOINED_CLUSTER_EVENT;
+ fapi_set_u16(req, u.mlme_nan_config_req.nan_operation_control_flags, nan_oper_ctrl);
+
+ slsi_mlme_nan_config_fapi_data(req, hal_req);
+
+ cfm = slsi_mlme_req_cfm(sdev, dev, req, MLME_NAN_FOLLOWUP_CFM);
+ if (!cfm)
+ return -EIO;
+
+ if (fapi_get_u16(cfm, u.mlme_nan_followup_cfm.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ SLSI_NET_ERR(dev, "MLME_NAN_FOLLOWUP_CFM(res:0x%04x)\n",
+ fapi_get_u16(cfm, u.mlme_host_state_cfm.result_code));
+ r = -EINVAL;
+ }
+
+ slsi_kfree_skb(cfm);
+ return r;
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/sch_generic.h>
+#include <linux/if_ether.h>
+#include <scsc/scsc_logring.h>
+
+#include "debug.h"
+#include "netif.h"
+#include "dev.h"
+#include "mgt.h"
+#include "scsc_wifi_fcq.h"
+#include "ioctl.h"
+#include "mib.h"
+#include "hip4_sampler.h"
+
+#define IP4_OFFSET_TO_TOS_FIELD 1
+#define IP6_OFFSET_TO_TC_FIELD_0 0
+#define IP6_OFFSET_TO_TC_FIELD_1 1
+#define FIELD_TO_DSCP 2
+
+/* DSCP */
+/* (RFC5865) */
+#define DSCP_VA 0x2C
+/* (RFC3246) */
+#define DSCP_EF 0x2E
+/* (RFC2597) */
+#define DSCP_AF43 0x26
+#define DSCP_AF42 0x24
+#define DSCP_AF41 0x22
+#define DSCP_AF33 0x1E
+#define DSCP_AF32 0x1C
+#define DSCP_AF31 0x1A
+#define DSCP_AF23 0x16
+#define DSCP_AF22 0x14
+#define DSCP_AF21 0x12
+#define DSCP_AF13 0x0E
+#define DSCP_AF12 0x0C
+#define DSCP_AF11 0x0A
+/* (RFC2474) */
+#define CS7 0x38
+#define CS6 0x30
+#define CS5 0x28
+#define CS4 0x20
+#define CS3 0x18
+#define CS2 0x10
+#define CS0 0x00
+/* (RFC3662) */
+#define CS1 0x08
+
+#ifndef CONFIG_ARM
+static bool tcp_ack_suppression_disable;
+module_param(tcp_ack_suppression_disable, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_disable, "Disable TCP ack suppression feature");
+
+static bool tcp_ack_suppression_disable_2g;
+module_param(tcp_ack_suppression_disable_2g, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_disable_2g, "Disable TCP ack suppression for only 2.4GHz band");
+
+static bool tcp_ack_suppression_monitor = true;
+module_param(tcp_ack_suppression_monitor, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_monitor, "TCP ack suppression throughput monitor: Y: enable (default), N: disable");
+
+static uint tcp_ack_suppression_monitor_interval = 500;
+module_param(tcp_ack_suppression_monitor_interval, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_monitor_interval, "Sampling interval (in ms) for throughput monitor");
+
+static uint tcp_ack_suppression_timeout = 16;
+module_param(tcp_ack_suppression_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_timeout, "Timeout (in ms) before cached TCP ack is flushed to tx");
+
+static uint tcp_ack_suppression_max = 16;
+module_param(tcp_ack_suppression_max, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_max, "Maximum number of TCP acks suppressed before latest flushed to tx");
+
+static uint tcp_ack_suppression_rate_very_high = 100;
+module_param(tcp_ack_suppression_rate_very_high, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high, "Rate (in Mbps) to apply very high degree of suppression");
+
+static uint tcp_ack_suppression_rate_very_high_timeout = 4;
+module_param(tcp_ack_suppression_rate_very_high_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in very high rate");
+
+static uint tcp_ack_suppression_rate_very_high_acks = 20;
+module_param(tcp_ack_suppression_rate_very_high_acks, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_very_high_acks, "Maximum number of TCP acks suppressed before latest flushed in very high rate");
+
+static uint tcp_ack_suppression_rate_high = 20;
+module_param(tcp_ack_suppression_rate_high, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_high, "Rate (in Mbps) to apply high degree of suppression");
+
+static uint tcp_ack_suppression_rate_high_timeout = 4;
+module_param(tcp_ack_suppression_rate_high_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_high_timeout, "Timeout (in ms) before cached TCP ack is flushed in high rate");
+
+static uint tcp_ack_suppression_rate_high_acks = 16;
+module_param(tcp_ack_suppression_rate_high_acks, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_high_acks, "Maximum number of TCP acks suppressed before latest flushed in high rate");
+
+static uint tcp_ack_suppression_rate_low = 1;
+module_param(tcp_ack_suppression_rate_low, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_low, "Rate (in Mbps) to apply low degree of suppression");
+
+static uint tcp_ack_suppression_rate_low_timeout = 4;
+module_param(tcp_ack_suppression_rate_low_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_low_timeout, "Timeout (in ms) before cached TCP ack is flushed in low rate");
+
+static uint tcp_ack_suppression_rate_low_acks = 10;
+module_param(tcp_ack_suppression_rate_low_acks, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rate_low_acks, "Maximum number of TCP acks suppressed before latest flushed in low rate");
+
+static uint tcp_ack_suppression_slow_start_acks = 512;
+module_param(tcp_ack_suppression_slow_start_acks, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_slow_start_acks, "Maximum number of Acks sent in slow start");
+
+static uint tcp_ack_suppression_rcv_window = 128;
+module_param(tcp_ack_suppression_rcv_window, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tcp_ack_suppression_rcv_window, "Receive window size (in unit of Kbytes) that triggers Ack suppression");
+
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t);
+#else
+static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data);
+#endif
+static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev);
+static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev);
+static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb);
+#endif
+
+/* Net Device callback operations */
+static int slsi_net_open(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int err;
+ unsigned char dev_addr_zero_check[ETH_ALEN];
+
+ if (WARN_ON(ndev_vif->is_available))
+ return -EINVAL;
+
+ if (sdev->mlme_blocked) {
+ SLSI_NET_WARN(dev, "Fail: called when MLME in blocked state\n");
+ return -EIO;
+ }
+
+ slsi_wakelock(&sdev->wlan_wl);
+
+ /* check if request to rf test mode. */
+ slsi_check_rf_test_mode();
+
+ err = slsi_start(sdev);
+ if (WARN_ON(err)) {
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return err;
+ }
+
+ if (!sdev->netdev_up_count) {
+ slsi_get_hw_mac_address(sdev, sdev->hw_addr);
+ /* Assign Addresses */
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
+
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02; /* Set the local bit */
+
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN], sdev->hw_addr);
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][0] |= 0x02; /* Set the local bit */
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][4] ^= 0x80; /* EXOR 5th byte with 0x80 */
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_NAN], sdev->hw_addr);
+ sdev->netdev_addresses[SLSI_NET_INDEX_NAN][0] |= 0x02; /* Set the local bit */
+ sdev->netdev_addresses[SLSI_NET_INDEX_NAN][3] ^= 0x80; /* EXOR 4th byte with 0x80 */
+#endif
+ sdev->initial_scan = true;
+ }
+
+ memset(dev_addr_zero_check, 0, ETH_ALEN);
+ if (!memcmp(dev->dev_addr, dev_addr_zero_check, ETH_ALEN)) {
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (SLSI_IS_VIF_INDEX_MHS(sdev, ndev_vif))
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
+ else
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
+#else
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
+#endif
+ }
+ SLSI_ETHER_COPY(dev->perm_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (ndev_vif->iftype == NL80211_IFTYPE_MONITOR) {
+ err = slsi_start_monitor_mode(sdev, dev);
+ if (WARN_ON(err)) {
+ slsi_wakeunlock(&sdev->wlan_wl);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return err;
+ }
+ }
+#endif
+ SLSI_NET_INFO(dev, "ifnum:%d r:%d MAC:%pM\n", ndev_vif->ifnum, sdev->recovery_status, dev->dev_addr);
+ ndev_vif->is_available = true;
+ sdev->netdev_up_count++;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ reinit_completion(&ndev_vif->sig_wait.completion);
+#else
+ INIT_COMPLETION(ndev_vif->sig_wait.completion);
+#endif
+#ifndef CONFIG_ARM
+ slsi_netif_tcp_ack_suppression_start(dev);
+#endif
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ netif_tx_start_all_queues(dev);
+ slsi_wakeunlock(&sdev->wlan_wl);
+
+ /* The default power mode in host*/
+ /* 2511 measn unifiForceActive and 1 means active */
+ if (slsi_is_rf_test_mode_enabled()) {
+ SLSI_NET_INFO(dev, "*#rf# rf test mode set is enabled.\n");
+ slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAMING_ENABLED, 0);
+ slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, 0);
+ slsi_set_mib_roam(sdev, NULL, 2511, 1);
+ slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, 0);
+ }
+
+ return 0;
+}
+
+static int slsi_net_stop(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+
+ SLSI_NET_INFO(dev, "ifnum:%d r:%d\n", ndev_vif->ifnum, sdev->recovery_status);
+ slsi_wakelock(&sdev->wlan_wl);
+ netif_tx_stop_all_queues(dev);
+ sdev->initial_scan = false;
+
+ if (!ndev_vif->is_available) {
+ /* May have been taken out by the Chip going down */
+ SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available\n");
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return 0;
+ }
+#ifndef SLSI_TEST_DEV
+ if (!slsi_is_rf_test_mode_enabled() && !sdev->recovery_status) {
+ SLSI_NET_DBG1(dev, SLSI_NETDEV, "To user mode\n");
+ slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, -55);
+ }
+#endif
+#ifndef CONFIG_ARM
+ slsi_netif_tcp_ack_suppression_stop(dev);
+#endif
+ slsi_stop_net_dev(sdev, dev);
+
+#ifndef SLSI_TEST_DEV
+ memset(dev->dev_addr, 0, ETH_ALEN);
+#endif
+
+ sdev->allow_switch_40_mhz = true;
+ sdev->allow_switch_80_mhz = true;
+ sdev->acs_channel_switched = false;
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return 0;
+}
+
+/* This is called after the WE handlers */
+static int slsi_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ SLSI_NET_DBG4(dev, SLSI_NETDEV, "IOCTL cmd:0x%.4x\n", cmd);
+
+ if (cmd == SIOCDEVPRIVATE + 2) { /* 0x89f0 + 2 from wpa_supplicant */
+ return slsi_ioctl(dev, rq, cmd);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct net_device_stats *slsi_net_get_stats(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
+ return &ndev_vif->stats;
+}
+
+#ifdef CONFIG_SCSC_USE_WMM_TOS
+static u16 slsi_get_priority_from_tos(u8 *frame, u16 proto)
+{
+ if (WARN_ON(!frame))
+ return FAPI_PRIORITY_QOS_UP0;
+
+ switch (proto) {
+ case ETH_P_IP: /* IPv4 */
+ return (u16)(((frame[IP4_OFFSET_TO_TOS_FIELD]) & 0xE0) >> 5);
+
+ case ETH_P_IPV6: /* IPv6 */
+ return (u16)((*frame & 0x0E) >> 1);
+
+ default:
+ return FAPI_PRIORITY_QOS_UP0;
+ }
+}
+
+#else
+static u16 slsi_get_priority_from_tos_dscp(u8 *frame, u16 proto)
+{
+ u8 dscp;
+
+ if (WARN_ON(!frame))
+ return FAPI_PRIORITY_QOS_UP0;
+
+ switch (proto) {
+ case ETH_P_IP: /* IPv4 */
+ dscp = frame[IP4_OFFSET_TO_TOS_FIELD] >> FIELD_TO_DSCP;
+ break;
+
+ case ETH_P_IPV6: /* IPv6 */
+ /* Get traffic class */
+ dscp = (((frame[IP6_OFFSET_TO_TC_FIELD_0] & 0x0F) << 4) |
+ ((frame[IP6_OFFSET_TO_TC_FIELD_1] & 0xF0) >> 4)) >> FIELD_TO_DSCP;
+ break;
+
+ default:
+ return FAPI_PRIORITY_QOS_UP0;
+ }
+/* DSCP table based in RFC8325 from Android 10 */
+#if (defined(ANDROID_VERSION) && ANDROID_VERSION >= 100000)
+ switch (dscp) {
+ case CS7:
+ return FAPI_PRIORITY_QOS_UP7;
+ case CS6:
+ case DSCP_EF:
+ case DSCP_VA:
+ return FAPI_PRIORITY_QOS_UP6;
+ case CS5:
+ return FAPI_PRIORITY_QOS_UP5;
+ case DSCP_AF41:
+ case DSCP_AF42:
+ case DSCP_AF43:
+ case CS4:
+ case DSCP_AF31:
+ case DSCP_AF32:
+ case DSCP_AF33:
+ case CS3:
+ return FAPI_PRIORITY_QOS_UP4;
+ case DSCP_AF21:
+ case DSCP_AF22:
+ case DSCP_AF23:
+ return FAPI_PRIORITY_QOS_UP3;
+ case CS2:
+ case DSCP_AF11:
+ case DSCP_AF12:
+ case DSCP_AF13:
+ case CS0:
+ return FAPI_PRIORITY_QOS_UP0;
+ case CS1:
+ return FAPI_PRIORITY_QOS_UP1;
+ default:
+ return FAPI_PRIORITY_QOS_UP0;
+ }
+#else
+ switch (dscp) {
+ case DSCP_EF:
+ case DSCP_VA:
+ return FAPI_PRIORITY_QOS_UP6;
+ case DSCP_AF43:
+ case DSCP_AF42:
+ case DSCP_AF41:
+ return FAPI_PRIORITY_QOS_UP5;
+ case DSCP_AF33:
+ case DSCP_AF32:
+ case DSCP_AF31:
+ case DSCP_AF23:
+ case DSCP_AF22:
+ case DSCP_AF21:
+ case DSCP_AF13:
+ case DSCP_AF12:
+ case DSCP_AF11:
+ return FAPI_PRIORITY_QOS_UP0;
+ case CS7:
+ return FAPI_PRIORITY_QOS_UP7;
+ case CS6:
+ return FAPI_PRIORITY_QOS_UP6;
+ case CS5:
+ return FAPI_PRIORITY_QOS_UP5;
+ case CS4:
+ return FAPI_PRIORITY_QOS_UP4;
+ case CS3:
+ return FAPI_PRIORITY_QOS_UP3;
+ case CS2:
+ return FAPI_PRIORITY_QOS_UP2;
+ case CS1:
+ return FAPI_PRIORITY_QOS_UP1;
+ case CS0:
+ return FAPI_PRIORITY_QOS_UP0;
+ default:
+ return FAPI_PRIORITY_QOS_UP0;
+ }
+#endif
+}
+
+#endif
+
+static bool slsi_net_downgrade_ac(struct net_device *dev, struct sk_buff *skb)
+{
+ SLSI_UNUSED_PARAMETER(dev);
+
+ switch (skb->priority) {
+ case 6:
+ case 7:
+ skb->priority = FAPI_PRIORITY_QOS_UP5; /* VO -> VI */
+ return true;
+ case 4:
+ case 5:
+ skb->priority = FAPI_PRIORITY_QOS_UP3; /* VI -> BE */
+ return true;
+ case 0:
+ case 3:
+ skb->priority = FAPI_PRIORITY_QOS_UP2; /* BE -> BK */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 slsi_net_up_to_ac_mapping(u8 priority)
+{
+ switch (priority) {
+ case FAPI_PRIORITY_QOS_UP6:
+ case FAPI_PRIORITY_QOS_UP7:
+ return BIT(FAPI_PRIORITY_QOS_UP6) | BIT(FAPI_PRIORITY_QOS_UP7);
+ case FAPI_PRIORITY_QOS_UP4:
+ case FAPI_PRIORITY_QOS_UP5:
+ return BIT(FAPI_PRIORITY_QOS_UP4) | BIT(FAPI_PRIORITY_QOS_UP5);
+ case FAPI_PRIORITY_QOS_UP0:
+ case FAPI_PRIORITY_QOS_UP3:
+ return BIT(FAPI_PRIORITY_QOS_UP0) | BIT(FAPI_PRIORITY_QOS_UP3);
+ default:
+ return BIT(FAPI_PRIORITY_QOS_UP1) | BIT(FAPI_PRIORITY_QOS_UP2);
+ }
+}
+
+enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority)
+{
+ switch (priority) {
+ case FAPI_PRIORITY_QOS_UP0:
+ case FAPI_PRIORITY_QOS_UP3:
+ return SLSI_TRAFFIC_Q_BE;
+ case FAPI_PRIORITY_QOS_UP1:
+ case FAPI_PRIORITY_QOS_UP2:
+ return SLSI_TRAFFIC_Q_BK;
+ case FAPI_PRIORITY_QOS_UP4:
+ case FAPI_PRIORITY_QOS_UP5:
+ return SLSI_TRAFFIC_Q_VI;
+ case FAPI_PRIORITY_QOS_UP6:
+ case FAPI_PRIORITY_QOS_UP7:
+ return SLSI_TRAFFIC_Q_VO;
+ default:
+ return SLSI_TRAFFIC_Q_BE;
+ }
+}
+
+int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids)
+{
+ switch (ac) {
+ case SLSI_TRAFFIC_Q_BE:
+ tids[0] = FAPI_PRIORITY_QOS_UP0;
+ tids[1] = FAPI_PRIORITY_QOS_UP3;
+ break;
+
+ case SLSI_TRAFFIC_Q_BK:
+ tids[0] = FAPI_PRIORITY_QOS_UP1;
+ tids[1] = FAPI_PRIORITY_QOS_UP2;
+ break;
+
+ case SLSI_TRAFFIC_Q_VI:
+ tids[0] = FAPI_PRIORITY_QOS_UP4;
+ tids[1] = FAPI_PRIORITY_QOS_UP5;
+ break;
+
+ case SLSI_TRAFFIC_Q_VO:
+ tids[0] = FAPI_PRIORITY_QOS_UP6;
+ tids[1] = FAPI_PRIORITY_QOS_UP7;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void slsi_net_downgrade_pri(struct net_device *dev, struct slsi_peer *peer,
+ struct sk_buff *skb)
+{
+ /* in case we are a client downgrade the ac if acm is
+ * set and tspec is not established
+ */
+ while (unlikely(peer->wmm_acm & BIT(skb->priority)) &&
+ !(peer->tspec_established & slsi_net_up_to_ac_mapping(skb->priority))) {
+ SLSI_NET_DBG3(dev, SLSI_NETDEV, "Downgrading from UP:%d\n", skb->priority);
+ if (!slsi_net_downgrade_ac(dev, skb))
+ break;
+ }
+ SLSI_NET_DBG4(dev, SLSI_NETDEV, "To UP:%d\n", skb->priority);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv)
+#else
+static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+#endif
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ u16 netif_q = 0;
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+ int proto = 0;
+ struct slsi_peer *peer;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ (void)accel_priv;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ (void)fallback;
+#endif
+ SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
+
+ /* Defensive check for uninitialized mac header */
+ if (!skb_mac_header_was_set(skb))
+ skb_reset_mac_header(skb);
+
+ if (is_zero_ether_addr(ehdr->h_dest) || is_zero_ether_addr(ehdr->h_source)) {
+ SLSI_NET_WARN(dev, "invalid Ethernet addresses (dest:%pM,src:%pM)\n", ehdr->h_dest, ehdr->h_source);
+ SCSC_BIN_TAG_INFO(BINARY, skb->data, skb->len > 128 ? 128 : skb->len);
+ return SLSI_NETIF_Q_DISCARD;
+ }
+
+ proto = be16_to_cpu(eth_hdr(skb)->h_proto);
+
+ switch (proto) {
+ default:
+ /* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
+ break;
+ case ETH_P_PAE:
+ case ETH_P_WAI:
+ SLSI_NET_DBG3(dev, SLSI_TX, "EAP packet. Priority Queue Selected\n");
+ return SLSI_NETIF_Q_PRIORITY;
+ case ETH_P_ARP:
+ SLSI_NET_DBG3(dev, SLSI_TX, "ARP frame. Priority Queue Selected\n");
+ return SLSI_NETIF_Q_PRIORITY;
+ case ETH_P_IP:
+ if (slsi_is_dhcp_packet(skb->data) == SLSI_TX_IS_NOT_DHCP)
+ break;
+ SLSI_NET_DBG3(dev, SLSI_TX, "DHCP packet. Priority Queue Selected\n");
+ return SLSI_NETIF_Q_PRIORITY;
+ }
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP)
+ /* MULTICAST/BROADCAST Queue is only used for AP */
+ if (is_multicast_ether_addr(ehdr->h_dest)) {
+ SLSI_NET_DBG3(dev, SLSI_TX, "Multicast AC queue will be selected\n");
+#ifdef CONFIG_SCSC_USE_WMM_TOS
+ skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
+#else
+ skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
+#endif
+ return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb->priority));
+ }
+
+ slsi_spinlock_lock(&ndev_vif->peer_lock);
+ peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
+ if (!peer) {
+ SLSI_NET_DBG1(dev, SLSI_TX, "Discard: Peer %pM NOT found\n", ehdr->h_dest);
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return SLSI_NETIF_Q_DISCARD;
+ }
+
+ if (peer->qos_enabled) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if (peer->qos_map_set) { /*802.11 QoS for interworking*/
+ skb->priority = cfg80211_classify8021d(skb, &peer->qos_map);
+ } else
+#endif
+ {
+#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
+ if ((proto == ETH_P_IP && slsi_is_dns_packet(skb->data)) ||
+ (proto == ETH_P_IP && slsi_is_mdns_packet(skb->data)) ||
+ (proto == ETH_P_IP && slsi_is_tcp_sync_packet(dev, skb))) {
+ skb->priority = FAPI_PRIORITY_QOS_UP7;
+ } else
+#endif
+ {
+#ifdef CONFIG_SCSC_USE_WMM_TOS
+ skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
+#else
+ skb->priority = slsi_get_priority_from_tos_dscp(skb->data + ETH_HLEN, proto);
+#endif
+ }
+ }
+ } else{
+ skb->priority = FAPI_PRIORITY_QOS_UP0;
+ }
+
+ /* Downgrade the priority if acm bit is set and tspec is not established */
+ slsi_net_downgrade_pri(dev, peer, skb);
+
+ netif_q = slsi_netif_get_peer_queue(peer->queueset, slsi_frame_priority_to_ac_queue(skb->priority));
+ SLSI_NET_DBG3(dev, SLSI_TX, "prio:%d queue:%u\n", skb->priority, netif_q);
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return netif_q;
+}
+
+void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection)
+{
+ struct netdev_vif *netdev_vif = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+ struct sk_buff *skb_to_free = NULL;
+#endif
+ struct ethhdr *ehdr;
+ struct Qdisc *qd;
+ u32 num_pkts;
+ u16 staq;
+ u16 tdlsq;
+ u16 netq;
+ u16 i;
+ u16 j;
+ int index;
+ struct slsi_tcp_ack_s *tcp_ack;
+
+ /* Get the netdev queue number from queueset */
+ staq = slsi_netif_get_peer_queue(sta_peer->queueset, 0);
+ tdlsq = slsi_netif_get_peer_queue(tdls_peer->queueset, 0);
+
+ SLSI_NET_DBG1(dev, SLSI_TDLS, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq: %d, tdls_netq: %d\n",
+ connection, sta_peer->queueset, tdls_peer->queueset, staq, tdlsq);
+
+ /* Pause the TDLS queues and STA netdev queues */
+ slsi_tx_pause_queues(sdev);
+
+ /* walk through frames in TCP Ack suppression queue and change mapping to TDLS queue */
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &netdev_vif->ack_suppression[index];
+ if (!tcp_ack && !tcp_ack->state)
+ continue;
+ slsi_spinlock_lock(&tcp_ack->lock);
+ skb_queue_walk(&tcp_ack->list, skb) {
+ SLSI_NET_DBG2(dev, SLSI_TDLS, "frame in TCP Ack list (peer:%pM)\n", eth_hdr(skb)->h_dest);
+ /* is it destined to TDLS peer? */
+ if (compare_ether_addr(tdls_peer->address, eth_hdr(skb)->h_dest) == 0) {
+ if (connection) {
+ /* TDLS setup: change the queue mapping to TDLS queue */
+ skb->queue_mapping += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
+ } else {
+ /* TDLS teardown: change the queue to STA queue */
+ skb->queue_mapping -= (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
+ }
+ }
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+
+ slsi_spinlock_lock(&netdev_vif->peer_lock);
+ /**
+ * For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
+ * For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
+ */
+ if (connection)
+ tdls_peer->valid = true;
+ else
+ tdls_peer->valid = false;
+
+ /* Move packets from netdev queues */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
+ i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
+
+ if (connection) {
+ /* Check if any packet is already avilable in TDLS queue (most likely from last session) */
+ if (dev->_tx[tdlsq + i].qdisc->q.qlen)
+ SLSI_NET_ERR(dev, "tdls_connection: Packet present in queue %d\n", tdlsq + i);
+
+ qd = dev->_tx[staq + i].qdisc;
+ /* Get the total number of packets in STAQ */
+ num_pkts = qd->q.qlen;
+
+ /* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
+ for (j = 0; j < num_pkts; j++) {
+ qd = dev->_tx[staq + i].qdisc;
+ /* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
+ skb = qd->gso_skb;
+ if (skb) {
+ qd->gso_skb = NULL;
+ qd->q.qlen--;
+ } else {
+ skb = qd->dequeue(qd);
+ }
+
+ if (!skb) {
+ SLSI_NET_ERR(dev, "tdls_connection: STA NETQ skb is NULL\n");
+ break;
+ }
+
+ /* Change the queue mapping for the TDLS packets */
+ netq = skb->queue_mapping;
+ ehdr = (struct ethhdr *)skb->data;
+ if (compare_ether_addr(tdls_peer->address, ehdr->h_dest) == 0) {
+ netq += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
+ SLSI_NET_DBG3(dev, SLSI_TDLS, "NETQ%d: Queue mapping changed from %d to %d\n",
+ i, skb->queue_mapping, netq);
+ skb_set_queue_mapping(skb, netq);
+ }
+
+ qd = dev->_tx[netq].qdisc;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+ qd->enqueue(skb, qd, &skb_to_free);
+#else
+ /* If the netdev queue is already full then enqueue() will drop the skb */
+ qd->enqueue(skb, qd);
+#endif
+ }
+ } else {
+ num_pkts = dev->_tx[tdlsq + i].qdisc->q.qlen;
+ /* Move the packets from TDLS to STA queue */
+ for (j = 0; j < num_pkts; j++) {
+ /* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
+ qd = dev->_tx[tdlsq + i].qdisc;
+ skb = qd->gso_skb;
+ if (skb) {
+ qd->gso_skb = NULL;
+ qd->q.qlen--;
+ } else {
+ skb = qd->dequeue(qd);
+ }
+
+ if (!skb) {
+ SLSI_NET_ERR(dev, "tdls_teardown: TDLS NETQ skb is NULL\n");
+ break;
+ }
+
+ /* Update the queue mapping */
+ skb_set_queue_mapping(skb, staq + i);
+
+ /* Enqueue the packet in STA queue */
+ qd = dev->_tx[staq + i].qdisc;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+ qd->enqueue(skb, qd, &skb_to_free);
+#else
+ /* If the netdev queue is already full then enqueue() will drop the skb */
+ qd->enqueue(skb, qd);
+#endif
+ }
+ }
+ SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
+ i, dev->_tx[tdlsq + i].qdisc->q.qlen, dev->_tx[staq + i].qdisc->q.qlen);
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
+ if (unlikely(skb_to_free))
+ kfree_skb_list(skb_to_free);
+#endif
+ slsi_spinlock_unlock(&netdev_vif->peer_lock);
+
+ /* Teardown - after teardown there should not be any packet in TDLS queues */
+ if (!connection)
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ if (dev->_tx[tdlsq + i].qdisc->q.qlen)
+ SLSI_NET_ERR(dev, "tdls_teardown: Packet present in NET queue %d\n", tdlsq + i);
+ }
+
+ /* Resume the STA and TDLS netdev queues */
+ slsi_tx_unpause_queues(sdev);
+}
+
+/**
+ * This is the main TX entry point for the driver.
+ *
+ * Ownership of the skb is transferred to another function ONLY IF such
+ * function was able to deal with that skb and ended with a SUCCESS ret code.
+ * Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
+ *
+ * In the context of this function:
+ * - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
+ * SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
+ * kernel netstack will receive back NETDEV_TX_OK too.
+ * - ownership is KEPT HERE by this function when lower layers fails somehow
+ * to deal with the transmission of the skb. In this case the skb WOULD HAVE
+ * NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
+ * - intermediate lower layer functions (NOT directly involved in failure or
+ * success) will relay any retcode up to this layer for evaluation.
+ *
+ * WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
+ * - ENOSPC: something related to queueing happened...this should be
+ * retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
+ * requeued by the Kernel NetStack itself, using the proper queue.
+ * As a consequence SKB is NOT FREED HERE !.
+ * - ANY OTHER ERR: all other errors are considered at the moment NOT
+ * recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
+ * the proper ERRCODE and stops dealing with the packet considering it
+ * consumed by lower layer. (same behavior as NETDEV_TX_OK)
+ *
+ * BIG NOTE:
+ * As detailed in Documentation/networking/drivers.txt the above behavior
+ * of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
+ * discouraged and should be used ONLY in case of a real HARD error(?);
+ * the advised solution is to actively STOP the queues before finishing
+ * the available space and WAKING them up again when more free buffers
+ * would have arrived.
+ */
+static netdev_tx_t slsi_net_hw_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_dev *sdev = ndev_vif->sdev;
+ int r = NETDEV_TX_OK;
+ struct sk_buff *original_skb = NULL;
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ int known_users = 0;
+#endif
+ /* Keep the packet length. The packet length will be used to increment
+ * stats for the netdev if the packet was successfully transmitted.
+ * The ownership of the SKB is passed to lower layers, so we should
+ * not refer the SKB after this point
+ */
+ unsigned int packet_len = skb->len;
+ enum slsi_traffic_q traffic_q = slsi_frame_priority_to_ac_queue(skb->priority);
+
+ slsi_wakelock(&sdev->wlan_wl);
+ slsi_skb_cb_init(skb);
+
+ /* Check for misaligned (oddly aligned) data.
+ * The f/w requires 16 bit aligned.
+ * This is a corner case - for example, the kernel can generate BPDU
+ * that are oddly aligned. Therefore it is acceptable to copy these
+ * frames to a 16 bit alignment.
+ */
+ if ((uintptr_t)skb->data & 0x1) {
+ struct sk_buff *skb2 = NULL;
+ /* Received a socket buffer aligned on an odd address.
+ * Re-align by asking for headroom.
+ */
+ skb2 = skb_copy_expand(skb, SLSI_NETIF_SKB_HEADROOM, skb_tailroom(skb), GFP_ATOMIC);
+ if (skb2 && (!(((uintptr_t)skb2->data) & 0x1))) {
+ /* We should account for this duplication */
+ original_skb = skb;
+ skb = skb2;
+ SLSI_NET_DBG3(dev, SLSI_TX, "Oddly aligned skb realigned\n");
+ } else {
+ /* Drop the packet if we can't re-align. */
+ SLSI_NET_WARN(dev, "Oddly aligned skb failed realignment, dropping\n");
+ if (skb2) {
+ SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand didn't align for us\n");
+ slsi_kfree_skb(skb2);
+ } else {
+ SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand failed when trying to align\n");
+ }
+ r = -EFAULT;
+ goto evaluate;
+ }
+ }
+ slsi_dbg_track_skb(skb, GFP_ATOMIC);
+
+ /* Be defensive about the mac_header - some kernels have a bug where a
+ * frame can be delivered to the driver with mac_header initialised
+ * to ~0U and this causes a crash when the pointer is dereferenced to
+ * access part of the Ethernet header.
+ */
+ if (!skb_mac_header_was_set(skb))
+ skb_reset_mac_header(skb);
+
+ SLSI_NET_DBG3(dev, SLSI_TX, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb)->h_proto));
+
+ if (!ndev_vif->is_available) {
+ SLSI_NET_WARN(dev, "vif NOT available\n");
+ r = -EFAULT;
+ goto evaluate;
+ }
+ if (skb->queue_mapping == SLSI_NETIF_Q_DISCARD) {
+ SLSI_NET_WARN(dev, "Discard Queue :: Packet Dropped\n");
+ r = -EIO;
+ goto evaluate;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ known_users = refcount_read(&skb->users);
+#else
+ known_users = atomic_read(&skb->users);
+#endif
+#endif
+
+#ifndef CONFIG_ARM
+ skb = slsi_netif_tcp_ack_suppression_pkt(dev, skb);
+ if (!skb) {
+ slsi_wakeunlock(&sdev->wlan_wl);
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+ return NETDEV_TX_OK;
+ }
+#endif
+
+ /* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
+ r = slsi_tx_data(sdev, dev, skb);
+evaluate:
+ if (r == 0) {
+ /**
+ * A copy has been passed down and successfully transmitted
+ * and freed....here we free the original coming from the
+ * upper network layers....if a copy was passed down.
+ */
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+ /* skb freed by lower layers on success...enjoy */
+
+ ndev_vif->tx_packets[traffic_q]++;
+ ndev_vif->stats.tx_packets++;
+ ndev_vif->stats.tx_bytes += packet_len;
+ r = NETDEV_TX_OK;
+ } else {
+ /**
+ * Failed to send:
+ * - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
+ * NOT discarded by lower layers and NETDEV_TX_BUSY should
+ * be returned to upper layers: this will cause the skb
+ * (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
+ * to be requeued ...
+ * NOTE THAT it's the original skb that will be retried
+ * by upper netstack.
+ * THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
+ * the following.
+ *
+ * - with any other -ERR instead return the error: this
+ * anyway let the kernel think that the SKB has
+ * been consumed, and we drop the frame and free it.
+ *
+ * - a WARN_ON() takes care to ensure the SKB has NOT been
+ * freed by someone despite this was NOT supposed to happen,
+ * just before the actual freeing.
+ *
+ */
+ if (r == -ENOSPC) {
+ /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
+ ndev_vif->stats.tx_fifo_errors++;
+ /* Free the local copy if any ... */
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ r = NETDEV_TX_BUSY;
+ } else {
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ WARN_ON(known_users && refcount_read(&skb->users) != known_users);
+#else
+ WARN_ON(known_users && atomic_read(&skb->users) != known_users);
+#endif
+#endif
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+ slsi_kfree_skb(skb);
+ ndev_vif->stats.tx_dropped++;
+ /* We return the ORIGINAL Error 'r' anyway
+ * BUT Kernel treats them as TX complete anyway
+ * and assumes the SKB has been consumed.
+ */
+ /* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
+ }
+ }
+ /* SKBs are always considered consumed if the driver
+ * returns NETDEV_TX_OK.
+ */
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return r;
+}
+
+static netdev_features_t slsi_net_fix_features(struct net_device *dev, netdev_features_t features)
+{
+ SLSI_UNUSED_PARAMETER(dev);
+
+#ifdef CONFIG_SCSC_WLAN_SG
+ SLSI_NET_DBG1(dev, SLSI_RX, "Scatter-gather and GSO enabled\n");
+ features |= NETIF_F_SG;
+ features |= NETIF_F_GSO;
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
+ SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO enabled\n");
+ features |= NETIF_F_GRO;
+#else
+ SLSI_NET_DBG1(dev, SLSI_RX, "NAPI Rx GRO disabled\n");
+ features &= ~NETIF_F_GRO;
+#endif
+ return features;
+}
+
+static void slsi_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 count, i = 0;
+ u8 mdns_addr[ETH_ALEN] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
+
+#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ u8 mc_addr_prefix[3] = { 0x01, 0x00, 0x5e };
+#else
+ u8 mdns6_addr[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
+ const u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
+ u8 ipv6addr_suffix[3];
+#endif
+ struct netdev_hw_addr *ha;
+
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
+ return;
+
+ if (!ndev_vif->is_available) {
+ SLSI_NET_DBG1(dev, SLSI_NETDEV, "vif NOT available\n");
+ return;
+ }
+
+ count = netdev_mc_count(dev);
+ if (!count)
+ goto exit;
+
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
+ memcpy(ipv6addr_suffix, &ndev_vif->ipv6address.s6_addr[13], 3);
+ slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
+#endif
+
+ netdev_for_each_mc_addr(ha, dev) {
+#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) || /*mDns is handled separately*/
+ (memcmp(ha->addr, mc_addr_prefix, 3))) { /*only consider IPv4 multicast addresses*/
+#else
+ if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) ||
+ (!memcmp(ha->addr, mdns6_addr, ETH_ALEN)) || /*mDns is handled separately*/
+ (!memcmp(ha->addr, solicited_node_addr, 3) &&
+ !memcmp(&ha->addr[3], ipv6addr_suffix, 3))) { /* local multicast addr handled separately*/
+#endif
+
+ SLSI_NET_DBG3(dev, SLSI_NETDEV, "Drop MAC %pM\n", ha->addr);
+ continue;
+ }
+ if (i == SLSI_MC_ADDR_ENTRY_MAX) {
+ SLSI_NET_WARN(dev, "MAC list has reached max limit (%d), actual count %d\n", SLSI_MC_ADDR_ENTRY_MAX, count);
+ break;
+ }
+
+ SLSI_NET_DBG3(dev, SLSI_NETDEV, "idx %d MAC %pM\n", i, ha->addr);
+ SLSI_ETHER_COPY(ndev_vif->sta.regd_mc_addr[i++], ha->addr);
+ }
+
+exit:
+ ndev_vif->sta.regd_mc_addr_count = i;
+}
+
+static int slsi_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = (struct sockaddr *)addr;
+
+ SLSI_NET_DBG1(dev, SLSI_NETDEV, "slsi_set_mac_address %pM\n", sa->sa_data);
+ SLSI_ETHER_COPY(dev->dev_addr, sa->sa_data);
+ return 0;
+}
+
+static const struct net_device_ops slsi_netdev_ops = {
+ .ndo_open = slsi_net_open,
+ .ndo_stop = slsi_net_stop,
+ .ndo_start_xmit = slsi_net_hw_xmit,
+ .ndo_do_ioctl = slsi_net_ioctl,
+ .ndo_get_stats = slsi_net_get_stats,
+ .ndo_select_queue = slsi_net_select_queue,
+ .ndo_fix_features = slsi_net_fix_features,
+ .ndo_set_rx_mode = slsi_set_multicast_list,
+ .ndo_set_mac_address = slsi_set_mac_address,
+};
+
+static void slsi_if_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &slsi_netdev_ops;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
+ dev->needs_free_netdev = true;
+#else
+ dev->destructor = free_netdev;
+#endif
+}
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+
+#ifdef CONFIG_SOC_EXYNOS9610
+#define SCSC_NETIF_RPS_CPUS_MASK "fe"
+#else
+#define SCSC_NETIF_RPS_CPUS_MASK "0"
+#endif
+
+static void slsi_netif_rps_map_clear(struct net_device *dev)
+{
+ struct rps_map *map;
+
+ map = rcu_dereference_protected(dev->_rx->rps_map, 1);
+ if (map) {
+ RCU_INIT_POINTER(dev->_rx->rps_map, NULL);
+ kfree_rcu(map, rcu);
+ SLSI_NET_INFO(dev, "clear rps_cpus map\n");
+ }
+}
+
+static int slsi_netif_rps_map_set(struct net_device *dev, char *buf, size_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ SLSI_NET_WARN(dev, "CPU bitmap parse failed\n");
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ SLSI_NET_WARN(dev, "CPU mask alloc failed\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu_and(cpu, mask, cpu_online_mask)
+ map->cpus[i++] = cpu;
+
+ if (i) {
+ map->len = i;
+ } else {
+ kfree(map);
+ map = NULL;
+ }
+
+ spin_lock(&rps_map_lock);
+ old_map = rcu_dereference_protected(dev->_rx->rps_map, lockdep_is_held(&rps_map_lock));
+ rcu_assign_pointer(dev->_rx->rps_map, map);
+ spin_unlock(&rps_map_lock);
+
+ if (map)
+ static_key_slow_inc(&rps_needed);
+ if (old_map)
+ static_key_slow_dec(&rps_needed);
+
+ if (old_map)
+ kfree_rcu(old_map, rcu);
+
+ free_cpumask_var(mask);
+ SLSI_NET_INFO(dev, "rps_cpus map set(%s)\n", buf);
+ return len;
+}
+#endif
+
+int slsi_netif_add_locked(struct slsi_dev *sdev, const char *name, int ifnum)
+{
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif;
+ struct wireless_dev *wdev;
+ int alloc_size, txq_count = 0, ret;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
+
+ if (WARN_ON(!sdev || ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES || sdev->netdev[ifnum]))
+ return -EINVAL;
+
+ alloc_size = sizeof(struct netdev_vif);
+
+ txq_count = SLSI_NETIF_Q_PEER_START + (SLSI_NETIF_Q_PER_PEER * (SLSI_ADHOC_PEER_CONNECTIONS_MAX));
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
+ dev = alloc_netdev_mqs(alloc_size, name, NET_NAME_PREDICTABLE, slsi_if_setup, txq_count, 1);
+#else
+ dev = alloc_netdev_mqs(alloc_size, name, slsi_if_setup, txq_count, 1);
+#endif
+ if (!dev) {
+ SLSI_ERR(sdev, "Failed to allocate private data for netdev\n");
+ return -ENOMEM;
+ }
+
+ /* Reserve space in skb for later use */
+ dev->needed_headroom = SLSI_NETIF_SKB_HEADROOM;
+ dev->needed_tailroom = SLSI_NETIF_SKB_TAILROOM;
+
+ ret = dev_alloc_name(dev, dev->name);
+ if (ret < 0)
+ goto exit_with_error;
+
+ ndev_vif = netdev_priv(dev);
+ memset(ndev_vif, 0x00, sizeof(*ndev_vif));
+ SLSI_MUTEX_INIT(ndev_vif->vif_mutex);
+ SLSI_MUTEX_INIT(ndev_vif->scan_mutex);
+ SLSI_MUTEX_INIT(ndev_vif->scan_result_mutex);
+ skb_queue_head_init(&ndev_vif->ba_complete);
+ slsi_sig_send_init(&ndev_vif->sig_wait);
+ ndev_vif->sdev = sdev;
+ ndev_vif->ifnum = ifnum;
+ ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
+#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
+ slsi_spinlock_create(&ndev_vif->ipv6addr_lock);
+#endif
+ slsi_spinlock_create(&ndev_vif->peer_lock);
+ atomic_set(&ndev_vif->ba_flush, 0);
+
+ /* Reserve memory for the peer database - Not required for p2p0/nan interface */
+ if (!(SLSI_IS_VIF_INDEX_P2P(ndev_vif) || SLSI_IS_VIF_INDEX_NAN(ndev_vif))) {
+ int queueset;
+
+ for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
+ ndev_vif->peer_sta_record[queueset] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queueset]), GFP_KERNEL);
+
+ if (!ndev_vif->peer_sta_record[queueset]) {
+ int j;
+
+ SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queueset:%d)\n", queueset);
+
+ /* Free previously allocated peer database memory till current queueset */
+ for (j = 0; j < queueset; j++) {
+ kfree(ndev_vif->peer_sta_record[j]);
+ ndev_vif->peer_sta_record[j] = NULL;
+ }
+
+ ret = -ENOMEM;
+ goto exit_with_error;
+ }
+ }
+ }
+
+ /* The default power mode in host*/
+ if (slsi_is_rf_test_mode_enabled()) {
+ SLSI_NET_ERR(dev, "*#rf# rf test mode set is enabled.\n");
+ ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
+ } else {
+ ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
+ }
+
+ INIT_LIST_HEAD(&ndev_vif->sta.network_map);
+ SLSI_DBG1(sdev, SLSI_NETDEV, "ifnum=%d\n", ndev_vif->ifnum);
+
+ /* For HS2 interface */
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
+
+ /* For p2p0 interface */
+ else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ ret = slsi_p2p_init(sdev, ndev_vif);
+ if (ret)
+ goto exit_with_error;
+ }
+
+ INIT_DELAYED_WORK(&ndev_vif->scan_timeout_work, slsi_scan_ind_timeout_handle);
+
+ ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_data, "slsi_wlan_rx_data", slsi_rx_netdev_data_work);
+ if (ret)
+ goto exit_with_error;
+
+ ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_mlme, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work);
+ if (ret) {
+ slsi_skb_work_deinit(&ndev_vif->rx_data);
+ goto exit_with_error;
+ }
+
+ wdev = &ndev_vif->wdev;
+
+ dev->ieee80211_ptr = wdev;
+ wdev->wiphy = sdev->wiphy;
+ wdev->netdev = dev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+ SET_NETDEV_DEV(dev, sdev->dev);
+
+ /* We are not ready to send data yet. */
+ netif_carrier_off(dev);
+
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ if (strcmp(name, CONFIG_SCSC_AP_INTERFACE_NAME) == 0)
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
+ else
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
+#else
+ SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
+#endif
+ SLSI_DBG1(sdev, SLSI_NETDEV, "Add:%pM\n", dev->dev_addr);
+ rcu_assign_pointer(sdev->netdev[ifnum], dev);
+ ndev_vif->delete_probe_req_ies = false;
+ ndev_vif->probe_req_ies = NULL;
+ ndev_vif->probe_req_ie_len = 0;
+ ndev_vif->drv_in_p2p_procedure = false;
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ slsi_netif_rps_map_set(dev, SCSC_NETIF_RPS_CPUS_MASK, strlen(SCSC_NETIF_RPS_CPUS_MASK));
+#endif
+ return 0;
+
+exit_with_error:
+ mutex_lock(&sdev->netdev_remove_mutex);
+ free_netdev(dev);
+ mutex_unlock(&sdev->netdev_remove_mutex);
+ return ret;
+}
+
+int slsi_netif_dynamic_iface_add(struct slsi_dev *sdev, const char *name)
+{
+ int index = -EINVAL;
+ int err;
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ if (sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN] == sdev->netdev_ap) {
+ rcu_assign_pointer(sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN], NULL);
+ err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
+ index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
+ }
+#else
+ err = slsi_netif_add_locked(sdev, name, SLSI_NET_INDEX_P2PX_SWLAN);
+ index = err ? err : SLSI_NET_INDEX_P2PX_SWLAN;
+#endif
+
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return index;
+}
+
+int slsi_netif_init(struct slsi_dev *sdev)
+{
+ int i;
+
+ SLSI_DBG3(sdev, SLSI_NETDEV, "\n");
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+
+ /* Initialize all other netdev interfaces to NULL */
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
+ RCU_INIT_POINTER(sdev->netdev[i], NULL);
+
+ if (slsi_netif_add_locked(sdev, "wlan%d", SLSI_NET_INDEX_WLAN) != 0) {
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -EINVAL;
+ }
+
+ if (slsi_netif_add_locked(sdev, "p2p%d", SLSI_NET_INDEX_P2P) != 0) {
+ rtnl_lock();
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
+ rtnl_unlock();
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -EINVAL;
+ }
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ if (slsi_netif_add_locked(sdev, CONFIG_SCSC_AP_INTERFACE_NAME, SLSI_NET_INDEX_P2PX_SWLAN) != 0) {
+ rtnl_lock();
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
+ rtnl_unlock();
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -EINVAL;
+ }
+#endif
+#endif
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ if (slsi_netif_add_locked(sdev, "nan%d", SLSI_NET_INDEX_NAN) != 0) {
+ rtnl_lock();
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2P]);
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+#if defined(CONFIG_SCSC_WLAN_MHS_STATIC_INTERFACE) || (defined(ANDROID_VERSION) && ANDROID_VERSION >= 90000)
+ slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN]);
+#endif
+#endif
+ rtnl_unlock();
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return -EINVAL;
+ }
+#endif
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return 0;
+}
+
+static int slsi_netif_register_locked(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int err;
+
+ WARN_ON(!rtnl_is_locked());
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
+ if (atomic_read(&ndev_vif->is_registered)) {
+ SLSI_NET_ERR(dev, "Register:%pM Failed: Already registered\n", dev->dev_addr);
+ return 0;
+ }
+
+ err = register_netdevice(dev);
+ if (err)
+ SLSI_NET_ERR(dev, "Register:%pM Failed\n", dev->dev_addr);
+ else
+ atomic_set(&ndev_vif->is_registered, 1);
+ return err;
+}
+
+int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int err;
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ err = slsi_netif_register_locked(sdev, dev);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return err;
+}
+
+int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int err;
+
+ rtnl_lock();
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ err = slsi_netif_register_locked(sdev, dev);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ rtnl_unlock();
+ return err;
+}
+
+void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev)
+{
+ int i;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_NET_DBG1(dev, SLSI_NETDEV, "Unregister:%pM\n", dev->dev_addr);
+
+ WARN_ON(!rtnl_is_locked());
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
+
+ if (atomic_read(&ndev_vif->is_registered)) {
+ netif_tx_disable(dev);
+ netif_carrier_off(dev);
+
+ slsi_stop_net_dev(sdev, dev);
+ }
+
+ rcu_assign_pointer(sdev->netdev[ndev_vif->ifnum], NULL);
+ synchronize_rcu();
+
+ /* Free memory of the peer database - Not required for p2p0 interface */
+ if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ int queueset;
+
+ for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
+ kfree(ndev_vif->peer_sta_record[queueset]);
+ ndev_vif->peer_sta_record[queueset] = NULL;
+ }
+ }
+
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
+ slsi_p2p_deinit(sdev, ndev_vif);
+ } else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_NO_VIF;
+ ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
+ }
+
+ cancel_delayed_work(&ndev_vif->scan_timeout_work);
+ ndev_vif->scan[SLSI_SCAN_HW_ID].requeue_timeout_work = false;
+
+ slsi_skb_work_deinit(&ndev_vif->rx_data);
+ slsi_skb_work_deinit(&ndev_vif->rx_mlme);
+
+ for (i = 0; i < SLSI_SCAN_MAX; i++)
+ slsi_purge_scan_results(ndev_vif, i);
+
+ slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
+ slsi_roam_channel_cache_prune(dev, 0);
+ kfree(ndev_vif->probe_req_ies);
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ slsi_netif_rps_map_clear(dev);
+#endif
+ if (atomic_read(&ndev_vif->is_registered)) {
+ atomic_set(&ndev_vif->is_registered, 0);
+ unregister_netdevice(dev);
+ } else {
+ mutex_lock(&sdev->netdev_remove_mutex);
+ free_netdev(dev);
+ mutex_unlock(&sdev->netdev_remove_mutex);
+ }
+}
+
+void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
+{
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ slsi_netif_remove_locked(sdev, dev);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+}
+
+void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev)
+{
+ rtnl_lock();
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ slsi_netif_remove_locked(sdev, dev);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ rtnl_unlock();
+}
+
+void slsi_netif_remove_all(struct slsi_dev *sdev)
+{
+ int i;
+
+ SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
+ rtnl_lock();
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
+ if (sdev->netdev[i])
+ slsi_netif_remove_locked(sdev, sdev->netdev[i]);
+ rcu_assign_pointer(sdev->netdev_ap, NULL);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ rtnl_unlock();
+}
+
+void slsi_netif_deinit(struct slsi_dev *sdev)
+{
+ SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
+ slsi_netif_remove_all(sdev);
+}
+
+#ifndef CONFIG_ARM
+static int slsi_netif_tcp_ack_suppression_start(struct net_device *dev)
+{
+ int index;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_tcp_ack_s *tcp_ack;
+
+ ndev_vif->last_tcp_ack = NULL;
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &ndev_vif->ack_suppression[index];
+ tcp_ack->dport = 0;
+ tcp_ack->daddr = 0;
+ tcp_ack->sport = 0;
+ tcp_ack->saddr = 0;
+ tcp_ack->ack_seq = 0;
+ tcp_ack->count = 0;
+ tcp_ack->max = 0;
+ tcp_ack->age = 0;
+ skb_queue_head_init(&tcp_ack->list);
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ timer_setup(&tcp_ack->timer, slsi_netif_tcp_ack_suppression_timeout, 0);
+#else
+ tcp_ack->timer.function = slsi_netif_tcp_ack_suppression_timeout;
+ tcp_ack->timer.data = (unsigned long)tcp_ack;
+ init_timer(&tcp_ack->timer);
+#endif
+ tcp_ack->state = 1;
+ slsi_spinlock_create(&tcp_ack->lock);
+ }
+
+ memset(&ndev_vif->tcp_ack_stats, 0, sizeof(struct slsi_tcp_ack_stats));
+ return 0;
+}
+
+static int slsi_netif_tcp_ack_suppression_stop(struct net_device *dev)
+{
+ int index;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_tcp_ack_s *tcp_ack;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &ndev_vif->ack_suppression[index];
+ del_timer_sync(&tcp_ack->timer);
+ slsi_spinlock_lock(&tcp_ack->lock);
+ tcp_ack->state = 0;
+ skb_queue_purge(&tcp_ack->list);
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+ ndev_vif->last_tcp_ack = NULL;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+static void slsi_netif_tcp_ack_suppression_timeout(struct timer_list *t)
+#else
+static void slsi_netif_tcp_ack_suppression_timeout(unsigned long data)
+#endif
+{
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ struct slsi_tcp_ack_s *tcp_ack = from_timer(tcp_ack, t, timer);
+#else
+ struct slsi_tcp_ack_s *tcp_ack = (struct slsi_tcp_ack_s *)data;
+#endif
+ struct sk_buff *skb;
+ struct netdev_vif *ndev_vif;
+ struct slsi_dev *sdev;
+ int r;
+
+ if (!tcp_ack)
+ return;
+
+ if (!tcp_ack->state)
+ return;
+
+ slsi_spinlock_lock(&tcp_ack->lock);
+ while ((skb = skb_dequeue(&tcp_ack->list)) != 0) {
+ tcp_ack->count = 0;
+
+ if (!skb->dev) {
+ kfree_skb(skb);
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return;
+ }
+ ndev_vif = netdev_priv(skb->dev);
+ sdev = ndev_vif->sdev;
+ ndev_vif->tcp_ack_stats.tack_timeout++;
+
+ r = slsi_tx_data(sdev, skb->dev, skb);
+ if (r == 0) {
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ tcp_ack->last_sent = ktime_get();
+ } else if (r == -ENOSPC) {
+ ndev_vif->tcp_ack_stats.tack_dropped++;
+ slsi_kfree_skb(skb);
+ } else {
+ ndev_vif->tcp_ack_stats.tack_dropped++;
+ }
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+}
+
+static int slsi_netif_tcp_ack_suppression_option(struct sk_buff *skb, u32 option)
+{
+ unsigned char *options;
+ u32 optlen = 0, len = 0;
+
+ if (tcp_hdr(skb)->doff > 5)
+ optlen = (tcp_hdr(skb)->doff - 5) * 4;
+
+ options = ((u8 *)tcp_hdr(skb)) + TCP_ACK_SUPPRESSION_OPTIONS_OFFSET;
+
+ while (optlen > 0) {
+ switch (options[0]) {
+ case TCP_ACK_SUPPRESSION_OPTION_EOL:
+ return 0;
+ case TCP_ACK_SUPPRESSION_OPTION_NOP:
+ len = 1;
+ break;
+ case TCP_ACK_SUPPRESSION_OPTION_MSS:
+ if (option == TCP_ACK_SUPPRESSION_OPTION_MSS)
+ return ((options[2] << 8) | options[3]);
+ len = options[1];
+ break;
+ case TCP_ACK_SUPPRESSION_OPTION_WINDOW:
+ if (option == TCP_ACK_SUPPRESSION_OPTION_WINDOW)
+ return options[2];
+ len = 1;
+ break;
+ case TCP_ACK_SUPPRESSION_OPTION_SACK:
+ if (option == TCP_ACK_SUPPRESSION_OPTION_SACK)
+ return 1;
+ len = options[1];
+ break;
+ default:
+ len = options[1];
+ break;
+ }
+ /* if length field in TCP options is 0, or greater than
+ * total options length, then options are incorrect; return here
+ */
+ if ((len == 0) || (len > optlen)) {
+ SLSI_DBG_HEX_NODEV(SLSI_TX, skb->data, skb->len < 128 ? skb->len : 128, "SKB:\n");
+ return 0;
+ }
+ optlen -= len;
+ options += len;
+ }
+ return 0;
+}
+
+static void slsi_netif_tcp_ack_suppression_syn(struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_tcp_ack_s *tcp_ack;
+ int index;
+
+ SLSI_NET_DBG2(dev, SLSI_TX, "\n");
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &ndev_vif->ack_suppression[index];
+ slsi_spinlock_lock(&tcp_ack->lock);
+
+ if (!tcp_ack->state) {
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return;
+ }
+ /* Recover old/hung/unused record. */
+ if (tcp_ack->daddr) {
+ if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= TCP_ACK_SUPPRESSION_RECORD_UNUSED_TIMEOUT * 1000) {
+ SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
+ skb_queue_purge(&tcp_ack->list);
+ tcp_ack->dport = 0;
+ tcp_ack->sport = 0;
+ tcp_ack->daddr = 0;
+ tcp_ack->saddr = 0;
+ tcp_ack->count = 0;
+ tcp_ack->ack_seq = 0;
+ del_timer(&tcp_ack->timer);
+ }
+ }
+
+ if (tcp_ack->daddr == 0) {
+ SLSI_NET_DBG2(dev, SLSI_TX, "add at %d (%pI4.%d > %pI4.%d)\n", index, &ip_hdr(skb)->saddr, ntohs(tcp_hdr(skb)->source), &ip_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest));
+ tcp_ack->daddr = ip_hdr(skb)->daddr;
+ tcp_ack->saddr = ip_hdr(skb)->saddr;
+ tcp_ack->dport = tcp_hdr(skb)->dest;
+ tcp_ack->sport = tcp_hdr(skb)->source;
+ tcp_ack->count = 0;
+ tcp_ack->ack_seq = 0;
+ tcp_ack->slow_start_count = 0;
+ tcp_ack->tcp_slow_start = true;
+ if (tcp_ack_suppression_monitor) {
+ tcp_ack->max = 0;
+ tcp_ack->age = 0;
+ } else {
+ tcp_ack->max = tcp_ack_suppression_max;
+ tcp_ack->age = tcp_ack_suppression_timeout;
+ }
+ tcp_ack->last_sent = ktime_get();
+
+ if (tcp_ack_suppression_monitor) {
+ tcp_ack->last_sample_time = ktime_get();
+ tcp_ack->last_ack_seq = 0;
+ tcp_ack->last_tcp_rate = 0;
+ tcp_ack->num_bytes = 0;
+ tcp_ack->hysteresis = 0;
+ }
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ tcp_ack->stream_id = index;
+#endif
+ /* read and validate the window scaling multiplier */
+ tcp_ack->window_multiplier = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_WINDOW);
+ if (tcp_ack->window_multiplier > 14)
+ tcp_ack->window_multiplier = 0;
+ tcp_ack->mss = slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_MSS);
+ SLSI_NET_DBG2(dev, SLSI_TX, "options: mss:%u, window:%u\n", tcp_ack->mss, tcp_ack->window_multiplier);
+ SCSC_HIP4_SAMPLER_TCP_SYN(ndev_vif->sdev->minor_prof, index, tcp_ack->mss);
+ SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, index, be32_to_cpu(tcp_hdr(skb)->seq));
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return;
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+}
+
+static void slsi_netif_tcp_ack_suppression_fin(struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_tcp_ack_s *tcp_ack;
+ int index;
+
+ SLSI_NET_DBG2(dev, SLSI_TX, "\n");
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &ndev_vif->ack_suppression[index];
+ slsi_spinlock_lock(&tcp_ack->lock);
+
+ if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
+ (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
+ SLSI_NET_DBG2(dev, SLSI_TX, "delete at %d (%pI4.%d > %pI4.%d)\n", index, &tcp_ack->saddr, ntohs(tcp_ack->sport), &tcp_ack->daddr, ntohs(tcp_ack->dport));
+ skb_queue_purge(&tcp_ack->list);
+ tcp_ack->dport = 0;
+ tcp_ack->sport = 0;
+ tcp_ack->daddr = 0;
+ tcp_ack->saddr = 0;
+ tcp_ack->count = 0;
+ tcp_ack->ack_seq = 0;
+
+ if (tcp_ack_suppression_monitor) {
+ tcp_ack->last_ack_seq = 0;
+ tcp_ack->last_tcp_rate = 0;
+ tcp_ack->num_bytes = 0;
+ tcp_ack->hysteresis = 0;
+ }
+
+ del_timer(&tcp_ack->timer);
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ tcp_ack->stream_id = 0;
+#endif
+ SCSC_HIP4_SAMPLER_TCP_FIN(ndev_vif->sdev->minor_prof, index);
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return;
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+}
+
+static struct sk_buff *slsi_netif_tcp_ack_suppression_pkt(struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int index, found;
+ struct slsi_tcp_ack_s *tcp_ack;
+ int forward_now = 0, flush = 0;
+ struct sk_buff *cskb = 0;
+ u32 tcp_recv_window_size = 0;
+
+ if (tcp_ack_suppression_disable)
+ return skb;
+
+ if (tcp_ack_suppression_disable_2g && !SLSI_IS_VIF_CHANNEL_5G(ndev_vif))
+ return skb;
+
+ /* for AP type (AP or P2P Go) check if the packet is local or intra BSS. If intra BSS then
+ * the IP header and TCP header are not set; so return the SKB
+ */
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (compare_ether_addr(eth_hdr(skb)->h_source, dev->dev_addr) != 0))
+ return skb;
+
+ /* Return SKB that doesn't match. */
+ if (be16_to_cpu(eth_hdr(skb)->h_proto) != ETH_P_IP)
+ return skb;
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP)
+ return skb;
+ if (!skb_transport_header_was_set(skb))
+ return skb;
+ if (tcp_hdr(skb)->syn) {
+ slsi_netif_tcp_ack_suppression_syn(dev, skb);
+ return skb;
+ }
+ if (tcp_hdr(skb)->fin) {
+ slsi_netif_tcp_ack_suppression_fin(dev, skb);
+ return skb;
+ }
+ if (!tcp_hdr(skb)->ack)
+ return skb;
+ if (tcp_hdr(skb)->rst)
+ return skb;
+ if (tcp_hdr(skb)->urg)
+ return skb;
+
+ ndev_vif->tcp_ack_stats.tack_acks++;
+ /* If we find a record, leave the spinlock taken until the end of the function. */
+ found = 0;
+ if (ndev_vif->last_tcp_ack) {
+ tcp_ack = ndev_vif->last_tcp_ack;
+ slsi_spinlock_lock(&tcp_ack->lock);
+ if (!tcp_ack->state) {
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ SLSI_ERR_NODEV("last_tcp_ack record not enabled\n");
+ return skb;
+ }
+ if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
+ (tcp_ack->sport == tcp_hdr(skb)->source) &&
+ (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
+ found = 1;
+ ndev_vif->tcp_ack_stats.tack_lastrecord++;
+ } else {
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+ }
+ if (found == 0) {
+ /* Search for an existing record on this connection. */
+ for (index = 0; index < TCP_ACK_SUPPRESSION_RECORDS_MAX; index++) {
+ tcp_ack = &ndev_vif->ack_suppression[index];
+
+ slsi_spinlock_lock(&tcp_ack->lock);
+
+ if (!tcp_ack->state) {
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ SLSI_ERR_NODEV("tcp_ack record %d not enabled\n", index);
+ return skb;
+ }
+ if ((tcp_ack->dport == tcp_hdr(skb)->dest) &&
+ (tcp_ack->sport == tcp_hdr(skb)->source) &&
+ (tcp_ack->daddr == ip_hdr(skb)->daddr)) {
+ found = 1;
+ ndev_vif->tcp_ack_stats.tack_searchrecord++;
+ break;
+ }
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ }
+ if (found == 0) {
+ /* No record found, so We cannot suppress the ack, return. */
+ ndev_vif->tcp_ack_stats.tack_norecord++;
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ return skb;
+ }
+ ndev_vif->last_tcp_ack = tcp_ack;
+ }
+
+ /* If it is a DUP Ack, send straight away without flushing the cache. */
+ if (be32_to_cpu(tcp_hdr(skb)->ack_seq) < tcp_ack->ack_seq) {
+ /* check for wrap-around */
+ if (((s32)((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - (u32)tcp_ack->ack_seq)) < 0) {
+ ndev_vif->tcp_ack_stats.tack_dacks++;
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return skb;
+ }
+ }
+
+ /* Has data, forward straight away. */
+ if (be16_to_cpu(ip_hdr(skb)->tot_len) > ((ip_hdr(skb)->ihl * 4) + (tcp_hdr(skb)->doff * 4))) {
+ SCSC_HIP4_SAMPLER_TCP_DATA(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, be32_to_cpu(tcp_hdr(skb)->seq));
+ SCSC_HIP4_SAMPLER_TCP_CWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, (skb->sk) ? tcp_sk(skb->sk)->snd_cwnd : 0);
+ SCSC_HIP4_SAMPLER_TCP_SEND_BUF(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, sysctl_tcp_wmem[2]);
+ ndev_vif->tcp_ack_stats.tack_hasdata++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* PSH flag set, forward straight away. */
+ if (tcp_hdr(skb)->psh) {
+ ndev_vif->tcp_ack_stats.tack_psh++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* The ECE flag is set for Explicit Congestion Notification supporting connections when the ECT flag
+ * is set in the segment packet. We must forward ECE marked acks immediately for ECN to work.
+ */
+ if (tcp_hdr(skb)->ece) {
+ ndev_vif->tcp_ack_stats.tack_ece++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ if (tcp_ack_suppression_monitor) {
+ /* Measure the throughput of TCP stream by monitoring the bytes Acked by each Ack over a
+ * sampling period. Based on throughput apply different degree of Ack suppression
+ */
+ if (tcp_ack->last_ack_seq)
+ tcp_ack->num_bytes += ((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq) - tcp_ack->last_ack_seq);
+
+ tcp_ack->last_ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
+ if (ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sample_time)) > tcp_ack_suppression_monitor_interval) {
+ u16 acks_max;
+ u32 tcp_rate = ((tcp_ack->num_bytes * 8) / (tcp_ack_suppression_monitor_interval * 1000));
+
+ SLSI_NET_DBG2(dev, SLSI_TX, "hysteresis:%u total_bytes:%llu rate:%u Mbps\n",
+ tcp_ack->hysteresis, tcp_ack->num_bytes, tcp_rate);
+
+ /* hysterisis - change only if the variation from last value is more than threshold */
+ if ((abs(tcp_rate - tcp_ack->last_tcp_rate)) > tcp_ack->hysteresis) {
+ if (tcp_rate >= tcp_ack_suppression_rate_very_high) {
+ tcp_ack->max = tcp_ack_suppression_rate_very_high_acks;
+ tcp_ack->age = tcp_ack_suppression_rate_very_high_timeout;
+ } else if (tcp_rate >= tcp_ack_suppression_rate_high) {
+ tcp_ack->max = tcp_ack_suppression_rate_high_acks;
+ tcp_ack->age = tcp_ack_suppression_rate_high_timeout;
+ } else if (tcp_rate >= tcp_ack_suppression_rate_low) {
+ tcp_ack->max = tcp_ack_suppression_rate_low_acks;
+ tcp_ack->age = tcp_ack_suppression_rate_low_timeout;
+ } else {
+ tcp_ack->max = 0;
+ tcp_ack->age = 0;
+ }
+
+ /* Should not be suppressing Acks more than 20% of receiver window size
+ * doing so can lead to increased RTT and low transmission rate at the
+ * TCP sender
+ */
+ if (tcp_ack->window_multiplier)
+ tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
+ else
+ tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
+ SCSC_HIP4_SAMPLER_TCP_RWND(ndev_vif->sdev->minor_prof, tcp_ack->stream_id, tcp_recv_window_size);
+
+ acks_max = (tcp_recv_window_size / 5) / (2 * tcp_ack->mss);
+ if (tcp_ack->max > acks_max)
+ tcp_ack->max = acks_max;
+ }
+ tcp_ack->hysteresis = tcp_rate / 5; /* 20% hysteresis */
+ tcp_ack->last_tcp_rate = tcp_rate;
+ tcp_ack->num_bytes = 0;
+ tcp_ack->last_sample_time = ktime_get();
+ }
+ }
+
+ /* Do not suppress Selective Acks. */
+ if (slsi_netif_tcp_ack_suppression_option(skb, TCP_ACK_SUPPRESSION_OPTION_SACK)) {
+ ndev_vif->tcp_ack_stats.tack_sacks++;
+
+ /* A TCP selective Ack suggests TCP segment loss. The TCP sender
+ * may reduce congestion window and limit the number of segments
+ * it sends before waiting for Ack.
+ * It is ideal to switch off TCP ack suppression for certain time
+ * (being replicated here by tcp_ack_suppression_slow_start_acks
+ * count) and send as many Acks as possible to allow the cwnd to
+ * grow at the TCP sender
+ */
+ tcp_ack->slow_start_count = 0;
+ tcp_ack->tcp_slow_start = true;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ if (be32_to_cpu(tcp_hdr(skb)->ack_seq) == tcp_ack->ack_seq) {
+ ndev_vif->tcp_ack_stats.tack_dacks++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* When the TCP connection is made, wait until a number of Acks
+ * are sent before applying the suppression rules. It is to
+ * allow the cwnd to grow at a normal rate at the TCP sender
+ */
+ if (tcp_ack->tcp_slow_start) {
+ tcp_ack->slow_start_count++;
+ if (tcp_ack->slow_start_count >= tcp_ack_suppression_slow_start_acks) {
+ tcp_ack->slow_start_count = 0;
+ tcp_ack->tcp_slow_start = false;
+ }
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* do not suppress if so decided by the TCP monitor */
+ if (tcp_ack_suppression_monitor && (!tcp_ack->max || !tcp_ack->age)) {
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* do not suppress delayed Acks that acknowledges for more than 2 TCP
+ * maximum size segments
+ */
+ if (((u32)be32_to_cpu(tcp_hdr(skb)->ack_seq)) - (tcp_ack->ack_seq) > (2 * tcp_ack->mss)) {
+ ndev_vif->tcp_ack_stats.tack_delay_acks++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* Do not suppress unless the receive window is large
+ * enough.
+ * With low receive window size the cwnd can't grow much.
+ * So suppressing Acks has a negative impact on sender
+ * rate as it increases the Round trip time measured at
+ * sender
+ */
+ if (!tcp_ack_suppression_monitor) {
+ if (tcp_ack->window_multiplier)
+ tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window) * (2 << tcp_ack->window_multiplier);
+ else
+ tcp_recv_window_size = be16_to_cpu(tcp_hdr(skb)->window);
+ if (tcp_recv_window_size < tcp_ack_suppression_rcv_window * 1024) {
+ ndev_vif->tcp_ack_stats.tack_low_window++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+ }
+
+ if (!tcp_ack_suppression_monitor && ktime_to_ms(ktime_sub(ktime_get(), tcp_ack->last_sent)) >= tcp_ack->age) {
+ ndev_vif->tcp_ack_stats.tack_ktime++;
+ forward_now = 1;
+ goto _forward_now;
+ }
+
+ /* Test for a new cache */
+ if (!skb_queue_len(&tcp_ack->list)) {
+ skb_queue_tail(&tcp_ack->list, skb);
+ tcp_ack->count = 1;
+ tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
+ if (tcp_ack->age)
+ mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return 0;
+ }
+_forward_now:
+ cskb = skb_dequeue(&tcp_ack->list);
+ if (cskb) {
+ if (tcp_ack_suppression_monitor && tcp_ack->age)
+ mod_timer(&tcp_ack->timer, jiffies + msecs_to_jiffies(tcp_ack->age));
+ ndev_vif->tcp_ack_stats.tack_suppressed++;
+ slsi_kfree_skb(cskb);
+ }
+ skb_queue_tail(&tcp_ack->list, skb);
+ tcp_ack->ack_seq = be32_to_cpu(tcp_hdr(skb)->ack_seq);
+ tcp_ack->count++;
+ if (forward_now) {
+ flush = 1;
+ } else {
+ if (tcp_ack->count >= tcp_ack->max) {
+ flush = 1;
+ ndev_vif->tcp_ack_stats.tack_max++;
+ }
+ }
+ if (!flush) {
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ return 0;
+ }
+ /* Flush the cache. */
+ cskb = skb_dequeue(&tcp_ack->list);
+ tcp_ack->count = 0;
+
+ if (tcp_ack->age)
+ del_timer(&tcp_ack->timer);
+
+ tcp_ack->last_sent = ktime_get();
+
+ slsi_spinlock_unlock(&tcp_ack->lock);
+ ndev_vif->tcp_ack_stats.tack_sent++;
+ return cskb;
+}
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h"
+
+#ifndef __SLSI_NETIF_H__
+#define __SLSI_NETIF_H__
+
+/* net_device queues
+ * ---------------------------------------------
+ * 1 Queue for Security frames (EAPOL, WAPI etc)
+ * 1 Queue for Broadcast/Multicast when in AP mode.
+ * 4 Queues per Peer
+ *
+ * STA/ADHOC
+ * Queues
+ * -------------------------------------------------------
+ * | 0 | 1 | 2 - 5 | 6 | 7 | 8 | 9 |
+ * -------------------------------------------------------
+ * | Eapol | Discard | Not | AC | AC | AC | AC |
+ * | Frames | Queue | Used | 0 | 1 | 2 | 3 |
+ * -------------------------------------------------------
+ *
+ * AP
+ * Queues
+ * --------------------------------------------------------
+ * | Peer 1 ACs (0 - 4) | Peer 2 ACs (0 - 4) | ......
+ * --------------------------------------------------------------------------------------------------------------------
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | ......
+ * --------------------------------------------------------------------------------------------------------------------
+ * | Eapol | Discard |B/M Cast |B/M Cast |B/M Cast |B/M Cast | AC | AC | AC | AC | AC | AC | AC | AC | ......
+ * | Frames | Queue | AC 0 | AC 1 | AC 2 | AC 3 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 | ......
+ * --------------------------------------------------------------------------------------------------------------------
+ */
+
+#ifndef ETH_P_PAE
+#define ETH_P_PAE 0x888e
+#endif
+#ifndef ETH_P_WAI
+#define ETH_P_WAI 0x88b4
+#endif
+
+#define SLSI_NETIF_Q_PRIORITY 0
+#define SLSI_NETIF_Q_DISCARD 1
+#define SLSI_NETIF_Q_MULTICAST_START 2
+#define SLSI_NETIF_Q_PEER_START 6
+
+#define SLSI_NETIF_Q_PER_PEER 4
+
+#define SLSI_NETIF_SKB_HEADROOM (68 + 160) /* sizeof ma_unitdata_req [36] + pad [30] + pad_words [2] */
+#define SLSI_NETIF_SKB_TAILROOM 0
+
+#define SLSI_IS_MULTICAST_QUEUE_MAPPING(queue) (queue >= SLSI_NETIF_Q_MULTICAST_START && queue < (SLSI_NETIF_Q_MULTICAST_START + SLSI_NETIF_Q_PER_PEER) ? 1 : 0)
+
+static inline u16 slsi_netif_get_peer_queue(s16 queueset, s16 ac)
+{
+ WARN_ON(ac > SLSI_NETIF_Q_PER_PEER);
+ return SLSI_NETIF_Q_PEER_START + (queueset * SLSI_NETIF_Q_PER_PEER) + ac;
+}
+
+/* queueset is one less than the assigned aid. */
+static inline unsigned short slsi_netif_get_qs_from_queue(short queue, short ac)
+{
+ return ((queue - ac - SLSI_NETIF_Q_PEER_START) / SLSI_NETIF_Q_PER_PEER);
+}
+
+static inline u16 slsi_netif_get_multicast_queue(s16 ac)
+{
+ WARN_ON(ac > SLSI_NETIF_Q_PER_PEER);
+ return SLSI_NETIF_Q_MULTICAST_START + ac;
+}
+
+#define MAP_QS_TO_AID(qs) (qs + 1)
+#define MAP_AID_TO_QS(aid) (aid - 1)
+
+enum slsi_traffic_q {
+ SLSI_TRAFFIC_Q_BE = 0,
+ SLSI_TRAFFIC_Q_BK,
+ SLSI_TRAFFIC_Q_VI,
+ SLSI_TRAFFIC_Q_VO,
+};
+
+enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority);
+int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids);
+
+struct slsi_dev;
+struct slsi_peer;
+
+int slsi_netif_init(struct slsi_dev *sdev);
+/* returns the index or -E<error> code */
+int slsi_netif_dynamic_iface_add(struct slsi_dev *sdev, const char *name);
+int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev);
+void slsi_netif_remove_all(struct slsi_dev *sdev);
+void slsi_netif_deinit(struct slsi_dev *sdev);
+void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection);
+void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev);
+
+#endif /*__SLSI_NETIF_H__*/
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/version.h>
+#include <net/cfg80211.h>
+#include <net/ip.h>
+#include <linux/etherdevice.h>
+#include "dev.h"
+#include "cfg80211_ops.h"
+#include "debug.h"
+#include "mgt.h"
+#include "mlme.h"
+#include "netif.h"
+#include "unifiio.h"
+#include "mib.h"
+#include "nl80211_vendor.h"
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+#include "scsc_wifilogger.h"
+#include "scsc_wifilogger_rings.h"
+#include "scsc_wifilogger_types.h"
+#endif
+#define SLSI_WIFI_TAG_RSSI 21
+#define SLSI_WIFI_TAG_REASON_CODE 14
+#define SLSI_WIFI_TAG_VENDOR_SPECIFIC 0
+#define SLSI_WIFI_TAG_EAPOL_MESSAGE_TYPE 29
+#define SLSI_WIFI_TAG_STATUS 4
+
+#define SLSI_GSCAN_INVALID_RSSI 0x7FFF
+#define SLSI_EPNO_AUTH_FIELD_WEP_OPEN 1
+#define SLSI_EPNO_AUTH_FIELD_WPA_PSK 2
+#define SLSI_EPNO_AUTH_FIELD_WPA_EAP 4
+#define WIFI_EVENT_FW_BTM_FRAME_REQUEST 56 // Request for a BTM frame is received
+#define WIFI_EVENT_FW_BTM_FRAME_RESPONSE 57 // A BTM frame is transmitted.
+#define WIFI_EVENT_FW_NR_FRAME_REQUEST 58
+#define WIFI_EVENT_FW_RM_FRAME_RESPONSE 59
+
+#define SLSI_WIFI_TAG_VD_CHANNEL_UTILISATION 0xf01a
+#define SLSI_WIFI_TAG_VD_ROAMING_REASON 0xf019
+#define SLSI_WIFI_TAG_VD_BTM_REQUEST_MODE 0xf01b
+#define SLSI_WIFI_TAG_VD_BTM_RESPONSE_STATUS 0xf01c
+#define SLSI_WIFI_TAG_VD_RETRY_COUNT 0xf00f
+#define SLSI_WIFI_TAG_VD_EAPOL_KEY_TYPE 0xF008
+#define SLSI_WIFI_EAPOL_KEY_TYPE_GTK 0x0000
+#define SLSI_WIFI_EAPOL_KEY_TYPE_PTK 0x0001
+#define SLSI_WIFI_ROAMING_SEARCH_REASON_RESERVED 0
+#define SLSI_WIFI_ROAMING_SEARCH_REASON_LOW_RSSI 1
+#define SLSI_WIFI_ROAMING_SEARCH_REASON_LINK_LOSS 2
+#define SLSI_WIFI_ROAMING_SEARCH_REASON_BTM_REQ 3
+#define SLSI_WIFI_ROAMING_SEARCH_REASON_CU_TRIGGER 4
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+static int mem_dump_buffer_size;
+static char *mem_dump_buffer;
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+char *slsi_print_event_name(int event_id)
+{
+ switch (event_id) {
+ case SLSI_NL80211_SCAN_RESULTS_AVAILABLE_EVENT:
+ return "SCAN_RESULTS_AVAILABLE_EVENT";
+ case SLSI_NL80211_FULL_SCAN_RESULT_EVENT:
+ return "FULL_SCAN_RESULT_EVENT";
+ case SLSI_NL80211_SCAN_EVENT:
+ return "BUCKET_SCAN_DONE_EVENT";
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+ case SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH:
+ return "KEY_MGMT_ROAM_AUTH";
+#endif
+ case SLSI_NL80211_VENDOR_HANGED_EVENT:
+ return "SLSI_NL80211_VENDOR_HANGED_EVENT";
+ case SLSI_NL80211_EPNO_EVENT:
+ return "SLSI_NL80211_EPNO_EVENT";
+ case SLSI_NL80211_HOTSPOT_MATCH:
+ return "SLSI_NL80211_HOTSPOT_MATCH";
+ case SLSI_NL80211_RSSI_REPORT_EVENT:
+ return "SLSI_NL80211_RSSI_REPORT_EVENT";
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ case SLSI_NL80211_LOGGER_RING_EVENT:
+ return "SLSI_NL80211_LOGGER_RING_EVENT";
+ case SLSI_NL80211_LOGGER_FW_DUMP_EVENT:
+ return "SLSI_NL80211_LOGGER_FW_DUMP_EVENT";
+#endif
+ case SLSI_NL80211_NAN_RESPONSE_EVENT:
+ return "SLSI_NL80211_NAN_RESPONSE_EVENT";
+ case SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT:
+ return "SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT";
+ case SLSI_NL80211_NAN_MATCH_EVENT:
+ return "SLSI_NL80211_NAN_MATCH_EVENT";
+ case SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT:
+ return "SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT";
+ case SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT:
+ return "SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT";
+ case SLSI_NL80211_NAN_FOLLOWUP_EVENT:
+ return "SLSI_NL80211_NAN_FOLLOWUP_EVENT";
+ case SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT:
+ return "SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT";
+ case SLSI_NL80211_RTT_RESULT_EVENT:
+ return "SLSI_NL80211_RTT_RESULT_EVENT";
+ case SLSI_NL80211_RTT_COMPLETE_EVENT:
+ return "SLSI_NL80211_RTT_COMPLETE_EVENT";
+ case SLSI_NL80211_VENDOR_ACS_EVENT:
+ return "SLSI_NL80211_VENDOR_ACS_EVENT";
+ default:
+ return "UNKNOWN_EVENT";
+ }
+}
+#endif
+
+int slsi_vendor_event(struct slsi_dev *sdev, int event_id, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_MLME, "Event: %s(%d), data = %p, len = %d\n",
+ slsi_print_event_name(event_id), event_id, data, len);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, len, event_id, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, len, event_id, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for vendor event: %d\n", event_id);
+ return -ENOMEM;
+ }
+
+ nla_put_nohdr(skb, len, data);
+
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+
+ return 0;
+}
+
+static int slsi_vendor_cmd_reply(struct wiphy *wiphy, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb\n");
+ return -ENOMEM;
+ }
+
+ nla_put_nohdr(skb, len, data);
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+static struct net_device *slsi_gscan_get_netdev(struct slsi_dev *sdev)
+{
+ return slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+}
+
+static struct netdev_vif *slsi_gscan_get_vif(struct slsi_dev *sdev)
+{
+ struct net_device *dev;
+
+ dev = slsi_gscan_get_netdev(sdev);
+ if (!dev) {
+ SLSI_WARN_NODEV("Dev is NULL\n");
+ return NULL;
+ }
+
+ return netdev_priv(dev);
+}
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+static void slsi_gscan_add_dump_params(struct slsi_nl_gscan_param *nl_gscan_param)
+{
+ int i;
+ int j;
+
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "Parameters for SLSI_NL80211_VENDOR_SUBCMD_ADD_GSCAN sub-command:\n");
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "base_period: %d max_ap_per_scan: %d report_threshold_percent: %d report_threshold_num_scans = %d num_buckets: %d\n",
+ nl_gscan_param->base_period, nl_gscan_param->max_ap_per_scan,
+ nl_gscan_param->report_threshold_percent, nl_gscan_param->report_threshold_num_scans,
+ nl_gscan_param->num_buckets);
+
+ for (i = 0; i < nl_gscan_param->num_buckets; i++) {
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "Bucket: %d\n", i);
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "\tbucket_index: %d band: %d period: %d report_events: %d num_channels: %d\n",
+ nl_gscan_param->nl_bucket[i].bucket_index, nl_gscan_param->nl_bucket[i].band,
+ nl_gscan_param->nl_bucket[i].period, nl_gscan_param->nl_bucket[i].report_events,
+ nl_gscan_param->nl_bucket[i].num_channels);
+
+ for (j = 0; j < nl_gscan_param->nl_bucket[i].num_channels; j++)
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "\tchannel_list[%d]: %d\n",
+ j, nl_gscan_param->nl_bucket[i].channels[j].channel);
+ }
+}
+
+void slsi_gscan_scan_res_dump(struct slsi_gscan_result *scan_data)
+{
+ struct slsi_nl_scan_result_param *nl_scan_res = &scan_data->nl_scan_res;
+
+ SLSI_DBG3_NODEV(SLSI_GSCAN, "TS:%llu SSID:%s BSSID:%pM Chan:%d RSSI:%d Bcn_Int:%d Capab:%#x IE_Len:%d\n",
+ nl_scan_res->ts, nl_scan_res->ssid, nl_scan_res->bssid, nl_scan_res->channel,
+ nl_scan_res->rssi, nl_scan_res->beacon_period, nl_scan_res->capability, nl_scan_res->ie_length);
+
+ SLSI_DBG_HEX_NODEV(SLSI_GSCAN, &nl_scan_res->ie_data[0], nl_scan_res->ie_length, "IE_Data:\n");
+ if (scan_data->anqp_length) {
+ SLSI_DBG3_NODEV(SLSI_GSCAN, "ANQP_LENGTH:%d\n", scan_data->anqp_length);
+ SLSI_DBG_HEX_NODEV(SLSI_GSCAN, nl_scan_res->ie_data + nl_scan_res->ie_length, scan_data->anqp_length, "ANQP_info:\n");
+ }
+}
+#endif
+
+static int slsi_gscan_get_capabilities(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_nl_gscan_capabilities nl_cap;
+ int ret = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_GET_GSCAN_CAPABILITIES\n");
+
+ memset(&nl_cap, 0, sizeof(struct slsi_nl_gscan_capabilities));
+
+ ret = slsi_mib_get_gscan_cap(sdev, &nl_cap);
+ if (ret != 0) {
+ SLSI_ERR(sdev, "Failed to read mib\n");
+ return ret;
+ }
+
+ nl_cap.max_scan_cache_size = SLSI_GSCAN_MAX_SCAN_CACHE_SIZE;
+ nl_cap.max_ap_cache_per_scan = SLSI_GSCAN_MAX_AP_CACHE_PER_SCAN;
+ nl_cap.max_scan_reporting_threshold = SLSI_GSCAN_MAX_SCAN_REPORTING_THRESHOLD;
+
+ ret = slsi_vendor_cmd_reply(wiphy, &nl_cap, sizeof(struct slsi_nl_gscan_capabilities));
+ if (ret)
+ SLSI_ERR_NODEV("gscan_get_capabilities vendor cmd reply failed (err = %d)\n", ret);
+
+ return ret;
+}
+
+static u32 slsi_gscan_put_channels(struct ieee80211_supported_band *chan_data, bool no_dfs, bool only_dfs, u32 *buf)
+{
+ u32 chan_count = 0;
+ u32 chan_flags;
+ int i;
+
+ if (chan_data == NULL) {
+ SLSI_DBG3_NODEV(SLSI_GSCAN, "Band not supported\n");
+ return 0;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ chan_flags = (IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_OFDM | IEEE80211_CHAN_RADAR);
+#else
+ chan_flags = (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_NO_OFDM | IEEE80211_CHAN_RADAR);
+#endif
+
+ for (i = 0; i < chan_data->n_channels; i++) {
+ if (chan_data->channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+ if (only_dfs) {
+ if (chan_data->channels[i].flags & chan_flags)
+ buf[chan_count++] = chan_data->channels[i].center_freq;
+ continue;
+ }
+ if (no_dfs && (chan_data->channels[i].flags & chan_flags))
+ continue;
+ buf[chan_count++] = chan_data->channels[i].center_freq;
+ }
+ return chan_count;
+}
+
+static int slsi_gscan_get_valid_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0, type, band;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ u32 *chan_list;
+ u32 chan_count = 0, mem_len = 0;
+ struct sk_buff *reply;
+
+ type = nla_type(data);
+
+ if (type == GSCAN_ATTRIBUTE_BAND)
+ band = nla_get_u32(data);
+ else
+ return -EINVAL;
+
+ if (band == 0) {
+ SLSI_WARN(sdev, "NO Bands. return 0 channel\n");
+ return ret;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ SLSI_DBG3(sdev, SLSI_GSCAN, "band %d\n", band);
+ if (wiphy->bands[NL80211_BAND_2GHZ])
+ mem_len += wiphy->bands[NL80211_BAND_2GHZ]->n_channels * sizeof(u32);
+
+ if (wiphy->bands[NL80211_BAND_5GHZ])
+ mem_len += wiphy->bands[NL80211_BAND_5GHZ]->n_channels * sizeof(u32);
+
+ if (mem_len == 0) {
+ ret = -ENOTSUPP;
+ goto exit;
+ }
+
+ chan_list = kmalloc(mem_len, GFP_KERNEL);
+ if (chan_list == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ mem_len += SLSI_NL_VENDOR_REPLY_OVERHEAD + (SLSI_NL_ATTRIBUTE_U32_LEN * 2);
+ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_len);
+ if (reply == NULL) {
+ ret = -ENOMEM;
+ goto exit_with_chan_list;
+ }
+ switch (band) {
+ case WIFI_BAND_BG:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_2GHZ], false, false, chan_list);
+ break;
+ case WIFI_BAND_A:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_5GHZ], true, false, chan_list);
+ break;
+ case WIFI_BAND_A_DFS:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_5GHZ], false, true, chan_list);
+ break;
+ case WIFI_BAND_A_WITH_DFS:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_5GHZ], false, false, chan_list);
+ break;
+ case WIFI_BAND_ABG:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_2GHZ], true, false, chan_list);
+ chan_count += slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_5GHZ], true, false, chan_list + chan_count);
+ break;
+ case WIFI_BAND_ABG_WITH_DFS:
+ chan_count = slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_2GHZ], false, false, chan_list);
+ chan_count += slsi_gscan_put_channels(wiphy->bands[NL80211_BAND_5GHZ], false, false, chan_list + chan_count);
+ break;
+ default:
+ chan_count = 0;
+ SLSI_WARN(sdev, "Invalid Band %d\n", band);
+ }
+ nla_put_u32(reply, GSCAN_ATTRIBUTE_NUM_CHANNELS, chan_count);
+ nla_put(reply, GSCAN_ATTRIBUTE_CHANNEL_LIST, chan_count * sizeof(u32), chan_list);
+
+ ret = cfg80211_vendor_cmd_reply(reply);
+
+ if (ret)
+ SLSI_ERR(sdev, "FAILED to reply GET_VALID_CHANNELS\n");
+
+exit_with_chan_list:
+ kfree(chan_list);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return ret;
+}
+
+struct slsi_gscan_result *slsi_prepare_scan_result(struct sk_buff *skb, u16 anqp_length, int hs2_id)
+{
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ struct slsi_gscan_result *scan_res;
+ struct timespec ts;
+ const u8 *ssid_ie;
+ int mem_reqd;
+ int ie_len;
+ u8 *ie;
+
+ ie = &mgmt->u.beacon.variable[0];
+ ie_len = fapi_get_datalen(skb) - (ie - (u8 *)mgmt) - anqp_length;
+
+ /* Exclude 1 byte for ie_data[1]. sizeof(u16) to include anqp_length, sizeof(int) for hs_id */
+ mem_reqd = (sizeof(struct slsi_gscan_result) - 1) + ie_len + anqp_length + sizeof(int) + sizeof(u16);
+
+ /* Allocate memory for scan result */
+ scan_res = kmalloc(mem_reqd, GFP_KERNEL);
+ if (scan_res == NULL) {
+ SLSI_ERR_NODEV("Failed to allocate memory for scan result\n");
+ return NULL;
+ }
+
+ /* Exclude 1 byte for ie_data[1] */
+ scan_res->scan_res_len = (sizeof(struct slsi_nl_scan_result_param) - 1) + ie_len;
+ scan_res->anqp_length = 0;
+
+ get_monotonic_boottime(&ts);
+ scan_res->nl_scan_res.ts = (u64)TIMESPEC_TO_US(ts);
+
+ ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, &mgmt->u.beacon.variable[0], ie_len);
+ if ((ssid_ie != NULL) && (ssid_ie[1] > 0) && (ssid_ie[1] < IEEE80211_MAX_SSID_LEN)) {
+ memcpy(scan_res->nl_scan_res.ssid, &ssid_ie[2], ssid_ie[1]);
+ scan_res->nl_scan_res.ssid[ssid_ie[1]] = '\0';
+ } else {
+ scan_res->nl_scan_res.ssid[0] = '\0';
+ }
+
+ SLSI_ETHER_COPY(scan_res->nl_scan_res.bssid, mgmt->bssid);
+ scan_res->nl_scan_res.channel = fapi_get_u16(skb, u.mlme_scan_ind.channel_frequency) / 2;
+ scan_res->nl_scan_res.rssi = fapi_get_s16(skb, u.mlme_scan_ind.rssi);
+ scan_res->nl_scan_res.rtt = SLSI_GSCAN_RTT_UNSPECIFIED;
+ scan_res->nl_scan_res.rtt_sd = SLSI_GSCAN_RTT_UNSPECIFIED;
+ scan_res->nl_scan_res.beacon_period = mgmt->u.beacon.beacon_int;
+ scan_res->nl_scan_res.capability = mgmt->u.beacon.capab_info;
+ scan_res->nl_scan_res.ie_length = ie_len;
+ memcpy(scan_res->nl_scan_res.ie_data, ie, ie_len);
+ memcpy(scan_res->nl_scan_res.ie_data + ie_len, &hs2_id, sizeof(int));
+ memcpy(scan_res->nl_scan_res.ie_data + ie_len + sizeof(int), &anqp_length, sizeof(u16));
+ if (anqp_length) {
+ memcpy(scan_res->nl_scan_res.ie_data + ie_len + sizeof(u16) + sizeof(int), ie + ie_len, anqp_length);
+ scan_res->anqp_length = anqp_length + sizeof(u16) + sizeof(int);
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ slsi_gscan_scan_res_dump(scan_res);
+#endif
+
+ return scan_res;
+}
+
+static void slsi_gscan_hash_add(struct slsi_dev *sdev, struct slsi_gscan_result *scan_res)
+{
+ u8 key = SLSI_GSCAN_GET_HASH_KEY(scan_res->nl_scan_res.bssid[5]);
+ struct netdev_vif *ndev_vif;
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+ if (!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex))
+ SLSI_WARN_NODEV("ndev_vif->scan_mutex is not locked\n");
+
+ scan_res->hnext = sdev->gscan_hash_table[key];
+ sdev->gscan_hash_table[key] = scan_res;
+
+ /* Update the total buffer consumed and number of scan results */
+ sdev->buffer_consumed += scan_res->scan_res_len;
+ sdev->num_gscan_results++;
+}
+
+static struct slsi_gscan_result *slsi_gscan_hash_get(struct slsi_dev *sdev, u8 *mac)
+{
+ struct slsi_gscan_result *temp;
+ struct netdev_vif *ndev_vif;
+ u8 key = SLSI_GSCAN_GET_HASH_KEY(mac[5]);
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+
+ if (!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex))
+ SLSI_WARN_NODEV("ndev_vif->scan_mutex is not locked\n");
+
+ temp = sdev->gscan_hash_table[key];
+ while (temp != NULL) {
+ if (memcmp(temp->nl_scan_res.bssid, mac, ETH_ALEN) == 0)
+ return temp;
+ temp = temp->hnext;
+ }
+
+ return NULL;
+}
+
+void slsi_gscan_hash_remove(struct slsi_dev *sdev, u8 *mac)
+{
+ u8 key = SLSI_GSCAN_GET_HASH_KEY(mac[5]);
+ struct slsi_gscan_result *curr;
+ struct slsi_gscan_result *prev;
+ struct netdev_vif *ndev_vif;
+ struct slsi_gscan_result *scan_res = NULL;
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+ if (!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex))
+ SLSI_WARN_NODEV("ndev_vif->scan_mutex is not locked\n");
+
+ if (sdev->gscan_hash_table[key] == NULL)
+ return;
+
+ if (memcmp(sdev->gscan_hash_table[key]->nl_scan_res.bssid, mac, ETH_ALEN) == 0) {
+ scan_res = sdev->gscan_hash_table[key];
+ sdev->gscan_hash_table[key] = sdev->gscan_hash_table[key]->hnext;
+ } else {
+ prev = sdev->gscan_hash_table[key];
+ curr = prev->hnext;
+
+ while (curr != NULL) {
+ if (memcmp(curr->nl_scan_res.bssid, mac, ETH_ALEN) == 0) {
+ scan_res = curr;
+ prev->hnext = curr->hnext;
+ break;
+ }
+ prev = curr;
+ curr = curr->hnext;
+ }
+ }
+
+ if (scan_res) {
+ /* Update the total buffer consumed and number of scan results */
+ sdev->buffer_consumed -= scan_res->scan_res_len;
+ sdev->num_gscan_results--;
+ kfree(scan_res);
+ }
+
+ if (sdev->num_gscan_results < 0)
+ SLSI_ERR(sdev, "Wrong num_gscan_results: %d\n", sdev->num_gscan_results);
+}
+
+int slsi_check_scan_result(struct slsi_dev *sdev, struct slsi_bucket *bucket, struct slsi_gscan_result *new_scan_res)
+{
+ struct slsi_gscan_result *scan_res;
+
+ /* Check if the scan result for the same BSS already exists in driver buffer */
+ scan_res = slsi_gscan_hash_get(sdev, new_scan_res->nl_scan_res.bssid);
+ if (scan_res == NULL) { /* New scan result */
+ if ((sdev->buffer_consumed + new_scan_res->scan_res_len) >= SLSI_GSCAN_MAX_SCAN_CACHE_SIZE) {
+ SLSI_DBG2(sdev, SLSI_GSCAN,
+ "Scan buffer full, discarding scan result, buffer_consumed = %d, buffer_threshold = %d\n",
+ sdev->buffer_consumed, sdev->buffer_threshold);
+
+ /* Scan buffer is full can't store anymore new results */
+ return SLSI_DISCARD_SCAN_RESULT;
+ }
+
+ return SLSI_KEEP_SCAN_RESULT;
+ }
+
+ /* Even if scan buffer is full existing results can be replaced with the latest one */
+ if (scan_res->scan_cycle == bucket->scan_cycle)
+ /* For the same scan cycle the result will be replaced only if the RSSI is better */
+ if (new_scan_res->nl_scan_res.rssi < scan_res->nl_scan_res.rssi)
+ return SLSI_DISCARD_SCAN_RESULT;
+
+ /* Remove the existing scan result */
+ slsi_gscan_hash_remove(sdev, scan_res->nl_scan_res.bssid);
+
+ return SLSI_KEEP_SCAN_RESULT;
+}
+
+void slsi_gscan_handle_scan_result(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 scan_id, bool scan_done)
+{
+ struct slsi_gscan_result *scan_res = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_bucket *bucket;
+ u16 bucket_index;
+ int event_type = WIFI_SCAN_FAILED;
+ u16 anqp_length;
+ int hs2_network_id;
+
+ if (!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_mutex))
+ SLSI_WARN_NODEV("ndev_vif->scan_mutex is not locked\n");
+
+ SLSI_NET_DBG_HEX(dev, SLSI_GSCAN, skb->data, skb->len, "mlme_scan_ind skb->len: %d\n", skb->len);
+
+ bucket_index = scan_id - SLSI_GSCAN_SCAN_ID_START;
+ if (bucket_index >= SLSI_GSCAN_MAX_BUCKETS) {
+ SLSI_NET_ERR(dev, "Invalid bucket index: %d (scan_id = %#x)\n", bucket_index, scan_id);
+ goto out;
+ }
+
+ bucket = &sdev->bucket[bucket_index];
+ if (!bucket->used) {
+ SLSI_NET_DBG1(dev, SLSI_GSCAN, "Bucket is not active, index: %d (scan_id = %#x)\n", bucket_index, scan_id);
+ goto out;
+ }
+
+ /* For scan_done indication - no need to store the results */
+ if (scan_done) {
+ bucket->scan_cycle++;
+ bucket->gscan->num_scans++;
+
+ SLSI_NET_DBG3(dev, SLSI_GSCAN, "scan done, scan_cycle = %d, num_scans = %d\n",
+ bucket->scan_cycle, bucket->gscan->num_scans);
+
+ if (bucket->report_events & SLSI_REPORT_EVENTS_EACH_SCAN)
+ event_type = WIFI_SCAN_RESULTS_AVAILABLE;
+ if (bucket->gscan->num_scans % bucket->gscan->report_threshold_num_scans == 0)
+ event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+ if (sdev->buffer_consumed >= sdev->buffer_threshold)
+ event_type = WIFI_SCAN_THRESHOLD_PERCENT;
+
+ if (event_type != WIFI_SCAN_FAILED)
+ slsi_vendor_event(sdev, SLSI_NL80211_SCAN_EVENT, &event_type, sizeof(event_type));
+
+ goto out;
+ }
+
+ anqp_length = fapi_get_u16(skb, u.mlme_scan_ind.anqp_elements_length);
+ /* TODO new FAPI 3.c has mlme_scan_ind.network_block_id, use that when fapi is updated. */
+ hs2_network_id = 1;
+
+ scan_res = slsi_prepare_scan_result(skb, anqp_length, hs2_network_id);
+ if (scan_res == NULL) {
+ SLSI_NET_ERR(dev, "Failed to prepare scan result\n");
+ goto out;
+ }
+
+ /* Check for ePNO networks */
+ if (fapi_get_u16(skb, u.mlme_scan_ind.preferrednetwork_ap)) {
+ if (anqp_length == 0)
+ slsi_vendor_event(sdev, SLSI_NL80211_EPNO_EVENT,
+ &scan_res->nl_scan_res, scan_res->scan_res_len);
+ else
+ slsi_vendor_event(sdev, SLSI_NL80211_HOTSPOT_MATCH,
+ &scan_res->nl_scan_res, scan_res->scan_res_len + scan_res->anqp_length);
+ }
+
+ if (bucket->report_events & SLSI_REPORT_EVENTS_FULL_RESULTS) {
+ struct sk_buff *nlevent;
+
+ SLSI_NET_DBG3(dev, SLSI_GSCAN, "report_events: SLSI_REPORT_EVENTS_FULL_RESULTS\n");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ nlevent = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, scan_res->scan_res_len + 4, SLSI_NL80211_FULL_SCAN_RESULT_EVENT, GFP_KERNEL);
+#else
+ nlevent = cfg80211_vendor_event_alloc(sdev->wiphy, scan_res->scan_res_len + 4, SLSI_NL80211_FULL_SCAN_RESULT_EVENT, GFP_KERNEL);
+#endif
+ if (!nlevent) {
+ SLSI_ERR(sdev, "failed to allocate sbk of size:%d\n", scan_res->scan_res_len + 4);
+ kfree(scan_res);
+ goto out;
+ }
+ if (nla_put_u32(nlevent, GSCAN_ATTRIBUTE_SCAN_BUCKET_BIT, (1 << bucket_index)) ||
+ nla_put(nlevent, GSCAN_ATTRIBUTE_SCAN_RESULTS, scan_res->scan_res_len, &scan_res->nl_scan_res)) {
+ SLSI_ERR(sdev, "failed to put data\n");
+ kfree_skb(nlevent);
+ kfree(scan_res);
+ goto out;
+ }
+ cfg80211_vendor_event(nlevent, GFP_KERNEL);
+ }
+
+ if (slsi_check_scan_result(sdev, bucket, scan_res) == SLSI_DISCARD_SCAN_RESULT) {
+ kfree(scan_res);
+ goto out;
+ }
+ slsi_gscan_hash_add(sdev, scan_res);
+
+out:
+ slsi_kfree_skb(skb);
+}
+
+u8 slsi_gscan_get_scan_policy(enum wifi_band band)
+{
+ u8 scan_policy;
+
+ switch (band) {
+ case WIFI_BAND_UNSPECIFIED:
+ scan_policy = FAPI_SCANPOLICY_ANY_RA;
+ break;
+ case WIFI_BAND_BG:
+ scan_policy = FAPI_SCANPOLICY_2_4GHZ;
+ break;
+ case WIFI_BAND_A:
+ scan_policy = (FAPI_SCANPOLICY_5GHZ |
+ FAPI_SCANPOLICY_NON_DFS);
+ break;
+ case WIFI_BAND_A_DFS:
+ scan_policy = (FAPI_SCANPOLICY_5GHZ |
+ FAPI_SCANPOLICY_DFS);
+ break;
+ case WIFI_BAND_A_WITH_DFS:
+ scan_policy = (FAPI_SCANPOLICY_5GHZ |
+ FAPI_SCANPOLICY_NON_DFS |
+ FAPI_SCANPOLICY_DFS);
+ break;
+ case WIFI_BAND_ABG:
+ scan_policy = (FAPI_SCANPOLICY_5GHZ |
+ FAPI_SCANPOLICY_NON_DFS |
+ FAPI_SCANPOLICY_2_4GHZ);
+ break;
+ case WIFI_BAND_ABG_WITH_DFS:
+ scan_policy = (FAPI_SCANPOLICY_5GHZ |
+ FAPI_SCANPOLICY_NON_DFS |
+ FAPI_SCANPOLICY_DFS |
+ FAPI_SCANPOLICY_2_4GHZ);
+ break;
+ default:
+ scan_policy = FAPI_SCANPOLICY_ANY_RA;
+ break;
+ }
+
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "Scan Policy: %#x\n", scan_policy);
+
+ return scan_policy;
+}
+
+static int slsi_gscan_add_read_params(struct slsi_nl_gscan_param *nl_gscan_param, const void *data, int len)
+{
+ int j = 0;
+ int type, tmp, tmp1, tmp2, k = 0;
+ const struct nlattr *iter, *iter1, *iter2;
+ struct slsi_nl_bucket_param *nl_bucket;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+
+ if (j >= SLSI_GSCAN_MAX_BUCKETS)
+ break;
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BASE_PERIOD:
+ nl_gscan_param->base_period = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN:
+ nl_gscan_param->max_ap_per_scan = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_THRESHOLD:
+ nl_gscan_param->report_threshold_percent = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_THRESHOLD_NUM_SCANS:
+ nl_gscan_param->report_threshold_num_scans = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_NUM_BUCKETS:
+ nl_gscan_param->num_buckets = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_CH_BUCKET_1:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_2:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_3:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_4:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_5:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_6:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_7:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_8:
+ nla_for_each_nested(iter1, iter, tmp1) {
+ type = nla_type(iter1);
+ nl_bucket = nl_gscan_param->nl_bucket;
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BUCKET_ID:
+ nl_bucket[j].bucket_index = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_PERIOD:
+ nl_bucket[j].period = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS:
+ nl_bucket[j].num_channels = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_CHANNELS:
+ nla_for_each_nested(iter2, iter1, tmp2) {
+ nl_bucket[j].channels[k].channel = nla_get_u32(iter2);
+ k++;
+ }
+ k = 0;
+ break;
+ case GSCAN_ATTRIBUTE_BUCKETS_BAND:
+ nl_bucket[j].band = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_EVENTS:
+ nl_bucket[j].report_events = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_EXPONENT:
+ nl_bucket[j].exponent = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT:
+ nl_bucket[j].step_count = nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD:
+ nl_bucket[j].max_period = nla_get_u32(iter1);
+ break;
+ default:
+ SLSI_ERR_NODEV("No ATTR_BUKTS_type - %x\n", type);
+ break;
+ }
+ }
+ j++;
+ break;
+ default:
+ SLSI_ERR_NODEV("No GSCAN_ATTR_CH_BUKT_type - %x\n", type);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int slsi_gscan_add_verify_params(struct slsi_nl_gscan_param *nl_gscan_param)
+{
+ int i;
+
+ if ((nl_gscan_param->max_ap_per_scan < 0) || (nl_gscan_param->max_ap_per_scan > SLSI_GSCAN_MAX_AP_CACHE_PER_SCAN)) {
+ SLSI_ERR_NODEV("Invalid max_ap_per_scan: %d\n", nl_gscan_param->max_ap_per_scan);
+ return -EINVAL;
+ }
+
+ if ((nl_gscan_param->report_threshold_percent < 0) || (nl_gscan_param->report_threshold_percent > SLSI_GSCAN_MAX_SCAN_REPORTING_THRESHOLD)) {
+ SLSI_ERR_NODEV("Invalid report_threshold_percent: %d\n", nl_gscan_param->report_threshold_percent);
+ return -EINVAL;
+ }
+
+ if ((nl_gscan_param->num_buckets <= 0) || (nl_gscan_param->num_buckets > SLSI_GSCAN_MAX_BUCKETS)) {
+ SLSI_ERR_NODEV("Invalid num_buckets: %d\n", nl_gscan_param->num_buckets);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nl_gscan_param->num_buckets; i++) {
+ if ((nl_gscan_param->nl_bucket[i].band == WIFI_BAND_UNSPECIFIED) && (nl_gscan_param->nl_bucket[i].num_channels == 0)) {
+ SLSI_ERR_NODEV("No band/channels provided for gscan: band = %d, num_channel = %d\n",
+ nl_gscan_param->nl_bucket[i].band, nl_gscan_param->nl_bucket[i].num_channels);
+ return -EINVAL;
+ }
+
+ if (nl_gscan_param->nl_bucket[i].report_events > 4) {
+ SLSI_ERR_NODEV("Unsupported report event: report_event = %d\n", nl_gscan_param->nl_bucket[i].report_events);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void slsi_gscan_add_to_list(struct slsi_gscan **sdev_gscan, struct slsi_gscan *gscan)
+{
+ gscan->next = *sdev_gscan;
+ *sdev_gscan = gscan;
+}
+
+int slsi_gscan_alloc_buckets(struct slsi_dev *sdev, struct slsi_gscan *gscan, int num_buckets)
+{
+ int i;
+ int bucket_index = 0;
+ int free_buckets = 0;
+
+ for (i = 0; i < SLSI_GSCAN_MAX_BUCKETS; i++)
+ if (!sdev->bucket[i].used)
+ free_buckets++;
+
+ if (num_buckets > free_buckets) {
+ SLSI_ERR_NODEV("Not enough free buckets, num_buckets = %d, free_buckets = %d\n",
+ num_buckets, free_buckets);
+ return -EINVAL;
+ }
+
+ /* Allocate free buckets for the current gscan */
+ for (i = 0; i < SLSI_GSCAN_MAX_BUCKETS; i++)
+ if (!sdev->bucket[i].used) {
+ sdev->bucket[i].used = true;
+ sdev->bucket[i].gscan = gscan;
+ gscan->bucket[bucket_index] = &sdev->bucket[i];
+ bucket_index++;
+ if (bucket_index == num_buckets)
+ break;
+ }
+
+ return 0;
+}
+
+static void slsi_gscan_free_buckets(struct slsi_gscan *gscan)
+{
+ struct slsi_bucket *bucket;
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "gscan = %p, num_buckets = %d\n", gscan, gscan->num_buckets);
+
+ for (i = 0; i < gscan->num_buckets; i++) {
+ bucket = gscan->bucket[i];
+
+ SLSI_DBG2_NODEV(SLSI_GSCAN, "bucket = %p, used = %d, report_events = %d, scan_id = %#x, gscan = %p\n",
+ bucket, bucket->used, bucket->report_events, bucket->scan_id, bucket->gscan);
+ if (bucket->used) {
+ bucket->used = false;
+ bucket->report_events = 0;
+ bucket->gscan = NULL;
+ }
+ }
+}
+
+void slsi_gscan_flush_scan_results(struct slsi_dev *sdev)
+{
+ struct netdev_vif *ndev_vif;
+ struct slsi_gscan_result *temp;
+ int i;
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+ if (!ndev_vif) {
+ SLSI_WARN_NODEV("ndev_vif is NULL");
+ return;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ for (i = 0; i < SLSI_GSCAN_HASH_TABLE_SIZE; i++)
+ while (sdev->gscan_hash_table[i]) {
+ temp = sdev->gscan_hash_table[i];
+ sdev->gscan_hash_table[i] = sdev->gscan_hash_table[i]->hnext;
+ sdev->num_gscan_results--;
+ sdev->buffer_consumed -= temp->scan_res_len;
+ kfree(temp);
+ }
+
+ SLSI_DBG2(sdev, SLSI_GSCAN, "num_gscan_results: %d, buffer_consumed = %d\n",
+ sdev->num_gscan_results, sdev->buffer_consumed);
+
+ if (sdev->num_gscan_results != 0)
+ SLSI_WARN_NODEV("sdev->num_gscan_results is not zero\n");
+
+ if (sdev->buffer_consumed != 0)
+ SLSI_WARN_NODEV("sdev->buffer_consumedis not zero\n");
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+}
+
+static int slsi_gscan_add_mlme(struct slsi_dev *sdev, struct slsi_nl_gscan_param *nl_gscan_param, struct slsi_gscan *gscan)
+{
+ struct slsi_gscan_param gscan_param;
+ struct net_device *dev;
+ int ret = 0;
+ int i;
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ u8 mac_addr_mask[ETH_ALEN] = {0xFF};
+#endif
+
+ dev = slsi_gscan_get_netdev(sdev);
+
+ if (!dev) {
+ SLSI_WARN_NODEV("dev is NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nl_gscan_param->num_buckets; i++) {
+ u16 report_mode = 0;
+
+ gscan_param.nl_bucket = &nl_gscan_param->nl_bucket[i]; /* current bucket */
+ gscan_param.bucket = gscan->bucket[i];
+
+ if (gscan_param.bucket->report_events) {
+ if (gscan_param.bucket->report_events & SLSI_REPORT_EVENTS_EACH_SCAN)
+ report_mode |= FAPI_REPORTMODE_END_OF_SCAN_CYCLE;
+ if (gscan_param.bucket->report_events & SLSI_REPORT_EVENTS_FULL_RESULTS)
+ report_mode |= FAPI_REPORTMODE_REAL_TIME;
+ if (gscan_param.bucket->report_events & SLSI_REPORT_EVENTS_NO_BATCH)
+ report_mode |= FAPI_REPORTMODE_NO_BATCH;
+ } else {
+ report_mode = FAPI_REPORTMODE_RESERVED;
+ }
+
+ if (report_mode == 0) {
+ SLSI_NET_ERR(dev, "Invalid report event value: %d\n", gscan_param.bucket->report_events);
+ return -EINVAL;
+ }
+
+ /* In case of epno no_batch mode should be set. */
+ if (sdev->epno_active)
+ report_mode |= FAPI_REPORTMODE_NO_BATCH;
+
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+ if (sdev->scan_addr_set == 1) {
+ memset(mac_addr_mask, 0xFF, ETH_ALEN);
+ for (i = 3; i < ETH_ALEN; i++)
+ mac_addr_mask[i] = 0x00;
+ ret = slsi_set_mac_randomisation_mask(sdev, mac_addr_mask);
+ if (ret)
+ sdev->scan_addr_set = 0;
+ } else {
+ memset(mac_addr_mask, 0xFF, ETH_ALEN);
+ slsi_set_mac_randomisation_mask(sdev, mac_addr_mask);
+ }
+#endif
+ ret = slsi_mlme_add_scan(sdev,
+ dev,
+ FAPI_SCANTYPE_GSCAN,
+ report_mode,
+ 0, /* n_ssids */
+ NULL, /* ssids */
+ nl_gscan_param->nl_bucket[i].num_channels,
+ NULL, /* ieee80211_channel */
+ &gscan_param,
+ NULL, /* ies */
+ 0, /* ies_len */
+ false /* wait_for_ind */);
+
+ if (ret != 0) {
+ SLSI_NET_ERR(dev, "Failed to add bucket: %d\n", i);
+
+ /* Delete the scan those are already added */
+ for (i = (i - 1); i >= 0; i--)
+ slsi_mlme_del_scan(sdev, dev, gscan->bucket[i]->scan_id, false);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int slsi_gscan_add(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct slsi_nl_gscan_param *nl_gscan_param = NULL;
+ struct slsi_gscan *gscan;
+ struct netdev_vif *ndev_vif;
+ int buffer_threshold;
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_ADD_GSCAN\n");
+
+ if (!sdev) {
+ SLSI_WARN_NODEV("sdev is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!slsi_dev_gscan_supported())
+ return -ENOTSUPP;
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ /* Allocate memory for the received scan params */
+ nl_gscan_param = kzalloc(sizeof(*nl_gscan_param), GFP_KERNEL);
+ if (nl_gscan_param == NULL) {
+ SLSI_ERR_NODEV("Failed for allocate memory for gscan params\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ slsi_gscan_add_read_params(nl_gscan_param, data, len);
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ slsi_gscan_add_dump_params(nl_gscan_param);
+#endif
+
+ ret = slsi_gscan_add_verify_params(nl_gscan_param);
+ if (ret) {
+ /* After adding a hotlist a new gscan is added with 0 buckets - return success */
+ if (nl_gscan_param->num_buckets == 0) {
+ kfree(nl_gscan_param);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return 0;
+ }
+
+ goto exit;
+ }
+
+ /* Allocate Memory for the new gscan */
+ gscan = kzalloc(sizeof(*gscan), GFP_KERNEL);
+ if (gscan == NULL) {
+ SLSI_ERR_NODEV("Failed to allocate memory for gscan\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ gscan->num_buckets = nl_gscan_param->num_buckets;
+ gscan->report_threshold_percent = nl_gscan_param->report_threshold_percent;
+ gscan->report_threshold_num_scans = nl_gscan_param->report_threshold_num_scans;
+ gscan->nl_bucket = nl_gscan_param->nl_bucket[0];
+
+ /* If multiple gscan is added; consider the lowest report_threshold_percent */
+ buffer_threshold = (SLSI_GSCAN_MAX_SCAN_CACHE_SIZE * nl_gscan_param->report_threshold_percent) / 100;
+ if ((sdev->buffer_threshold == 0) || (buffer_threshold < sdev->buffer_threshold))
+ sdev->buffer_threshold = buffer_threshold;
+
+ ret = slsi_gscan_alloc_buckets(sdev, gscan, nl_gscan_param->num_buckets);
+ if (ret)
+ goto exit_with_gscan_free;
+
+ for (i = 0; i < nl_gscan_param->num_buckets; i++)
+ gscan->bucket[i]->report_events = nl_gscan_param->nl_bucket[i].report_events;
+
+ ret = slsi_gscan_add_mlme(sdev, nl_gscan_param, gscan);
+ if (ret) {
+ /* Free the buckets */
+ slsi_gscan_free_buckets(gscan);
+
+ goto exit_with_gscan_free;
+ }
+
+ slsi_gscan_add_to_list(&sdev->gscan, gscan);
+
+ goto exit;
+
+exit_with_gscan_free:
+ kfree(gscan);
+exit:
+ kfree(nl_gscan_param);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return ret;
+}
+
+static int slsi_gscan_del(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct slsi_gscan *gscan;
+ int ret = 0;
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_DEL_GSCAN\n");
+
+ dev = slsi_gscan_get_netdev(sdev);
+ if (!dev) {
+ SLSI_WARN_NODEV("dev is NULL\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ while (sdev->gscan != NULL) {
+ gscan = sdev->gscan;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "gscan = %p, num_buckets = %d\n", gscan, gscan->num_buckets);
+
+ for (i = 0; i < gscan->num_buckets; i++)
+ if (gscan->bucket[i]->used)
+ slsi_mlme_del_scan(sdev, dev, gscan->bucket[i]->scan_id, false);
+ slsi_gscan_free_buckets(gscan);
+ sdev->gscan = gscan->next;
+ kfree(gscan);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+
+ slsi_gscan_flush_scan_results(sdev);
+
+ sdev->buffer_threshold = 0;
+
+ return ret;
+}
+
+static int slsi_gscan_get_scan_results(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct sk_buff *skb;
+ struct slsi_gscan_result *scan_res;
+ struct nlattr *scan_hdr;
+ struct netdev_vif *ndev_vif;
+ int num_results = 0;
+ int mem_needed;
+ const struct nlattr *attr;
+ int nl_num_results = 0;
+ int ret = 0;
+ int temp;
+ int type;
+ int i;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_GET_SCAN_RESULTS\n");
+
+ /* Read the number of scan results need to be given */
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_OF_RESULTS:
+ nl_num_results = nla_get_u32(attr);
+ break;
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ break;
+ }
+ }
+
+ ndev_vif = slsi_gscan_get_vif(sdev);
+ if (!ndev_vif) {
+ SLSI_WARN_NODEV("ndev_vif is NULL\n");
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ num_results = sdev->num_gscan_results;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "nl_num_results: %d, num_results = %d\n", nl_num_results, sdev->num_gscan_results);
+
+ if (num_results == 0) {
+ SLSI_DBG1(sdev, SLSI_GSCAN, "No scan results available\n");
+ /* Return value should be 0 for this scenario */
+ goto exit;
+ }
+
+ /* Find the number of results to return */
+ if (num_results > nl_num_results)
+ num_results = nl_num_results;
+
+ /* 12 bytes additional for scan_id, flags and num_resuls */
+ mem_needed = num_results * sizeof(struct slsi_nl_scan_result_param) + 12;
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (skb == NULL) {
+ SLSI_ERR_NODEV("skb alloc failed");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
+ if (scan_hdr == NULL) {
+ kfree_skb(skb);
+ SLSI_ERR_NODEV("scan_hdr is NULL.\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, 0);
+ nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, 0);
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results);
+
+ for (i = 0; i < SLSI_GSCAN_HASH_TABLE_SIZE; i++)
+ while (sdev->gscan_hash_table[i]) {
+ scan_res = sdev->gscan_hash_table[i];
+ sdev->gscan_hash_table[i] = sdev->gscan_hash_table[i]->hnext;
+ sdev->num_gscan_results--;
+ sdev->buffer_consumed -= scan_res->scan_res_len;
+ /* TODO: If IE is included then HAL is not able to parse the results */
+ nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, sizeof(struct slsi_nl_scan_result_param), &scan_res->nl_scan_res);
+ kfree(scan_res);
+ num_results--;
+ if (num_results == 0)
+ goto out;
+ }
+out:
+ nla_nest_end(skb, scan_hdr);
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return ret;
+}
+
+void slsi_rx_rssi_report_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_rssi_monitor_evt event_data;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_ETHER_COPY(event_data.bssid, fapi_get_buff(skb, u.mlme_rssi_report_ind.bssid));
+ event_data.rssi = fapi_get_s16(skb, u.mlme_rssi_report_ind.rssi);
+ SLSI_DBG3(sdev, SLSI_GSCAN, "RSSI threshold breached, Current RSSI for %pM= %d\n", event_data.bssid, event_data.rssi);
+ slsi_vendor_event(sdev, SLSI_NL80211_RSSI_REPORT_EVENT, &event_data, sizeof(event_data));
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+static int slsi_key_mgmt_set_pmk(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *pmk, int pmklen)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r = 0;
+
+ if (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Not required to set PMK for P2P client\n");
+ return r;
+ }
+ SLSI_DBG3(sdev, SLSI_GSCAN, "SUBCMD_SET_PMK Received\n");
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ r = slsi_mlme_set_pmk(sdev, net_dev, pmk, (u16)pmklen);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+#endif
+
+static int slsi_set_bssid_blacklist(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int temp1;
+ int type;
+ const struct nlattr *attr;
+ u32 num_bssids = 0;
+ u8 i = 0;
+ int ret;
+ u8 *bssid = NULL;
+ struct cfg80211_acl_data *acl_data = NULL;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_SET_BSSID_BLACK_LIST\n");
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ if (!net_dev) {
+ SLSI_WARN_NODEV("net_dev is NULL\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(net_dev);
+ /*This subcmd can be issued in either connected or disconnected state.
+ * Hence using scan_mutex and not vif_mutex
+ */
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ nla_for_each_attr(attr, data, len, temp1) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_BSSID:
+ if (acl_data)
+ break;
+
+ num_bssids = nla_get_u32(attr);
+ acl_data = kmalloc(sizeof(*acl_data) + (sizeof(struct mac_address) * num_bssids), GFP_KERNEL);
+ if (!acl_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ acl_data->n_acl_entries = num_bssids;
+ break;
+
+ case GSCAN_ATTRIBUTE_BLACKLIST_BSSID:
+ if (!acl_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ bssid = (u8 *)nla_data(attr);
+ SLSI_ETHER_COPY(acl_data->mac_addrs[i].addr, bssid);
+ SLSI_DBG3_NODEV(SLSI_GSCAN, "mac_addrs[%d]:%pM)\n", i, acl_data->mac_addrs[i].addr);
+ i++;
+ break;
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (acl_data) {
+ acl_data->acl_policy = FAPI_ACLPOLICY_BLACKLIST;
+ ret = slsi_mlme_set_acl(sdev, net_dev, 0, acl_data);
+ if (ret)
+ SLSI_ERR_NODEV("Failed to set bssid blacklist\n");
+ } else {
+ ret = -EINVAL;
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ kfree(acl_data);
+ return ret;
+}
+
+static int slsi_start_keepalive_offload(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+#ifndef CONFIG_SCSC_WLAN_NAT_KEEPALIVE_DISABLE
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u16 ip_pkt_len = 0;
+ u8 *ip_pkt = NULL, *src_mac_addr = NULL, *dst_mac_addr = NULL;
+ u32 period = 0;
+ struct slsi_peer *peer;
+ struct sk_buff *skb;
+ struct ethhdr *ehdr;
+ int r = 0;
+ u16 host_tag = 0;
+ u8 index = 0;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "SUBCMD_START_KEEPALIVE_OFFLOAD received\n");
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ SLSI_WARN_NODEV("ndev_vif is not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_WARN_NODEV("ndev_vif->vif_type is not FAPI_VIFTYPE_STATION\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED) {
+ SLSI_WARN_NODEV("ndev_vif->sta.vif_status is not SLSI_VIF_STATUS_CONNECTED\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ peer = slsi_get_peer_from_qs(sdev, net_dev, SLSI_STA_PEER_QUEUESET);
+ if (!peer) {
+ SLSI_WARN_NODEV("peer is NULL\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN:
+ ip_pkt_len = nla_get_u16(attr);
+ break;
+
+ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT:
+ ip_pkt = (u8 *)nla_data(attr);
+ break;
+
+ case MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC:
+ period = nla_get_u32(attr);
+ break;
+
+ case MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR:
+ dst_mac_addr = (u8 *)nla_data(attr);
+ break;
+
+ case MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR:
+ src_mac_addr = (u8 *)nla_data(attr);
+ break;
+
+ case MKEEP_ALIVE_ATTRIBUTE_ID:
+ index = nla_get_u8(attr);
+ break;
+
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ r = -EINVAL;
+ goto exit;
+ }
+ }
+
+ /* Stop any existing request. This may fail if no request exists
+ * so ignore the return value
+ */
+ slsi_mlme_send_frame_mgmt(sdev, net_dev, NULL, 0,
+ FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME,
+ FAPI_MESSAGETYPE_ANY_OTHER,
+ ndev_vif->sta.keepalive_host_tag[index - 1], 0, 0, 0);
+
+ skb = slsi_alloc_skb_headroom(sizeof(struct ethhdr) + ip_pkt_len, GFP_KERNEL);
+ if (!skb) {
+ SLSI_WARN_NODEV("Memory allocation failed for skb\n");
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, sizeof(struct ethhdr));
+
+ /* Ethernet Header */
+ ehdr = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+
+ if (dst_mac_addr)
+ SLSI_ETHER_COPY(ehdr->h_dest, dst_mac_addr);
+ if (src_mac_addr)
+ SLSI_ETHER_COPY(ehdr->h_source, src_mac_addr);
+ ehdr->h_proto = cpu_to_be16(ETH_P_IP);
+ if (ip_pkt)
+ memcpy(skb_put(skb, ip_pkt_len), ip_pkt, ip_pkt_len);
+
+ skb->dev = net_dev;
+ skb->protocol = ETH_P_IP;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Queueset 0 AC 0 */
+ skb->queue_mapping = slsi_netif_get_peer_queue(0, 0);
+
+ /* Enabling the "Don't Fragment" Flag in the IP Header */
+ ip_hdr(skb)->frag_off |= htons(IP_DF);
+
+ /* Calculation of IP header checksum */
+ ip_hdr(skb)->check = 0;
+ ip_send_check(ip_hdr(skb));
+
+ host_tag = slsi_tx_mgmt_host_tag(sdev);
+ r = slsi_mlme_send_frame_data(sdev, net_dev, skb, FAPI_MESSAGETYPE_ANY_OTHER, host_tag,
+ 0, (period * 1000));
+ if (r == 0)
+ ndev_vif->sta.keepalive_host_tag[index - 1] = host_tag;
+ else
+ slsi_kfree_skb(skb);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+#else
+ SLSI_DBG3_NODEV(SLSI_MLME, "SUBCMD_START_KEEPALIVE_OFFLOAD received\n");
+ SLSI_DBG3_NODEV(SLSI_MLME, "NAT Keep Alive Feature is disabled\n");
+ return -EOPNOTSUPP;
+
+#endif
+}
+
+static int slsi_stop_keepalive_offload(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+#ifndef CONFIG_SCSC_WLAN_NAT_KEEPALIVE_DISABLE
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r = 0;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u8 index = 0;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "SUBCMD_STOP_KEEPALIVE_OFFLOAD received\n");
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ SLSI_WARN(sdev, "VIF is not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_WARN(sdev, "Not a STA VIF\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED) {
+ SLSI_WARN(sdev, "VIF is not connected\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case MKEEP_ALIVE_ATTRIBUTE_ID:
+ index = nla_get_u8(attr);
+ break;
+
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ r = -EINVAL;
+ goto exit;
+ }
+ }
+
+ r = slsi_mlme_send_frame_mgmt(sdev, net_dev, NULL, 0, FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME,
+ FAPI_MESSAGETYPE_ANY_OTHER, ndev_vif->sta.keepalive_host_tag[index - 1], 0, 0, 0);
+ ndev_vif->sta.keepalive_host_tag[index - 1] = 0;
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+#else
+ SLSI_DBG3_NODEV(SLSI_MLME, "SUBCMD_STOP_KEEPALIVE_OFFLOAD received\n");
+ SLSI_DBG3_NODEV(SLSI_MLME, "NAT Keep Alive Feature is disabled\n");
+ return -EOPNOTSUPP;
+
+#endif
+}
+
+static inline int slsi_epno_ssid_list_get(struct slsi_dev *sdev,
+ struct slsi_epno_ssid_param *epno_ssid_params, const struct nlattr *outer)
+{
+ int type, tmp;
+ u8 epno_auth;
+ u8 len = 0;
+ const struct nlattr *inner;
+
+ nla_for_each_nested(inner, outer, tmp) {
+ type = nla_type(inner);
+ switch (type) {
+ case SLSI_ATTRIBUTE_EPNO_FLAGS:
+ epno_ssid_params->flags |= nla_get_u16(inner);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_AUTH:
+ epno_auth = nla_get_u8(inner);
+ if (epno_auth & SLSI_EPNO_AUTH_FIELD_WEP_OPEN)
+ epno_ssid_params->flags |= FAPI_EPNOPOLICY_AUTH_OPEN;
+ else if (epno_auth & SLSI_EPNO_AUTH_FIELD_WPA_PSK)
+ epno_ssid_params->flags |= FAPI_EPNOPOLICY_AUTH_PSK;
+ else if (epno_auth & SLSI_EPNO_AUTH_FIELD_WPA_EAP)
+ epno_ssid_params->flags |= FAPI_EPNOPOLICY_AUTH_EAPOL;
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SSID_LEN:
+ len = nla_get_u8(inner);
+ if (len <= 32) {
+ epno_ssid_params->ssid_len = len;
+ } else {
+ SLSI_ERR(sdev, "SSID too long %d\n", len);
+ return -EINVAL;
+ }
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SSID:
+ memcpy(epno_ssid_params->ssid, nla_data(inner), len);
+ break;
+ default:
+ SLSI_WARN(sdev, "Ignoring unknown type:%d\n", type);
+ }
+ }
+ return 0;
+}
+
+static int slsi_set_epno_ssid(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r = 0;
+ int tmp, tmp1, type, num = 0;
+ const struct nlattr *outer, *iter;
+ u8 i = 0;
+ struct slsi_epno_ssid_param *epno_ssid_params;
+ struct slsi_epno_param *epno_params;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "SUBCMD_SET_EPNO_LIST Received\n");
+
+ if (!slsi_dev_epno_supported())
+ return -EOPNOTSUPP;
+
+ epno_params = kmalloc((sizeof(*epno_params) + (sizeof(*epno_ssid_params) * SLSI_GSCAN_MAX_EPNO_SSIDS)),
+ GFP_KERNEL);
+ if (!epno_params) {
+ SLSI_ERR(sdev, "Mem alloc fail\n");
+ return -ENOMEM;
+ }
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+ nla_for_each_attr(iter, data, len, tmp1) {
+ type = nla_type(iter);
+ switch (type) {
+ case SLSI_ATTRIBUTE_EPNO_MINIMUM_5G_RSSI:
+ epno_params->min_5g_rssi = nla_get_u16(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_MINIMUM_2G_RSSI:
+ epno_params->min_2g_rssi = nla_get_u16(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_INITIAL_SCORE_MAX:
+ epno_params->initial_score_max = nla_get_u16(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_CUR_CONN_BONUS:
+ epno_params->current_connection_bonus = nla_get_u8(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS:
+ epno_params->same_network_bonus = nla_get_u8(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SECURE_BONUS:
+ epno_params->secure_bonus = nla_get_u8(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_5G_BONUS:
+ epno_params->band_5g_bonus = nla_get_u8(iter);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SSID_LIST:
+ nla_for_each_nested(outer, iter, tmp) {
+ epno_ssid_params = &epno_params->epno_ssid[i];
+ epno_ssid_params->flags = 0;
+ r = slsi_epno_ssid_list_get(sdev, epno_ssid_params, outer);
+ if (r)
+ goto exit;
+ i++;
+ }
+ break;
+ case SLSI_ATTRIBUTE_EPNO_SSID_NUM:
+ num = nla_get_u8(iter);
+ if (num > SLSI_GSCAN_MAX_EPNO_SSIDS) {
+ SLSI_ERR(sdev, "Cannot support %d SSIDs. max %d\n", num, SLSI_GSCAN_MAX_EPNO_SSIDS);
+ r = -EINVAL;
+ goto exit;
+ }
+ epno_params->num_networks = num;
+ break;
+ default:
+ SLSI_ERR(sdev, "Invalid attribute %d\n", type);
+ r = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (i != num) {
+ SLSI_ERR(sdev, "num_ssid %d does not match ssids sent %d\n", num, i);
+ r = -EINVAL;
+ goto exit;
+ }
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ r = slsi_mlme_set_pno_list(sdev, num, epno_params, NULL);
+ if (r == 0)
+ sdev->epno_active = (num != 0);
+ else
+ sdev->epno_active = false;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ kfree(epno_params);
+ return r;
+}
+
+static int slsi_set_hs_params(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r = 0;
+ int tmp, tmp1, tmp2, type, num = 0;
+ const struct nlattr *outer, *inner, *iter;
+ u8 i = 0;
+ struct slsi_epno_hs2_param *epno_hs2_params_array;
+ struct slsi_epno_hs2_param *epno_hs2_params;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "SUBCMD_SET_HS_LIST Received\n");
+
+ if (!slsi_dev_epno_supported())
+ return -EOPNOTSUPP;
+
+ epno_hs2_params_array = kmalloc(sizeof(*epno_hs2_params_array) * SLSI_GSCAN_MAX_EPNO_HS2_PARAM, GFP_KERNEL);
+ if (!epno_hs2_params_array) {
+ SLSI_ERR(sdev, "Mem alloc fail\n");
+ return -ENOMEM;
+ }
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+ switch (type) {
+ case SLSI_ATTRIBUTE_EPNO_HS_PARAM_LIST:
+ nla_for_each_nested(outer, iter, tmp) {
+ epno_hs2_params = &epno_hs2_params_array[i];
+ i++;
+ nla_for_each_nested(inner, outer, tmp1) {
+ type = nla_type(inner);
+
+ switch (type) {
+ case SLSI_ATTRIBUTE_EPNO_HS_ID:
+ epno_hs2_params->id = (u32)nla_get_u32(inner);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_HS_REALM:
+ memcpy(epno_hs2_params->realm, nla_data(inner), 256);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_HS_CONSORTIUM_IDS:
+ memcpy(epno_hs2_params->roaming_consortium_ids, nla_data(inner), 16 * 8);
+ break;
+ case SLSI_ATTRIBUTE_EPNO_HS_PLMN:
+ memcpy(epno_hs2_params->plmn, nla_data(inner), 3);
+ break;
+ default:
+ SLSI_WARN(sdev, "Ignoring unknown type:%d\n", type);
+ }
+ }
+ }
+ break;
+ case SLSI_ATTRIBUTE_EPNO_HS_NUM:
+ num = nla_get_u8(iter);
+ if (num > SLSI_GSCAN_MAX_EPNO_HS2_PARAM) {
+ SLSI_ERR(sdev, "Cannot support %d SSIDs. max %d\n", num, SLSI_GSCAN_MAX_EPNO_SSIDS);
+ r = -EINVAL;
+ goto exit;
+ }
+ break;
+ default:
+ SLSI_ERR(sdev, "Invalid attribute %d\n", type);
+ r = -EINVAL;
+ goto exit;
+ }
+ }
+ if (i != num) {
+ SLSI_ERR(sdev, "num_ssid %d does not match ssids sent %d\n", num, i);
+ r = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ r = slsi_mlme_set_pno_list(sdev, num, NULL, epno_hs2_params_array);
+ if (r == 0)
+ sdev->epno_active = true;
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ kfree(epno_hs2_params_array);
+ return r;
+}
+
+static int slsi_reset_hs_params(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "SUBCMD_RESET_HS_LIST Received\n");
+
+ if (!slsi_dev_epno_supported())
+ return -EOPNOTSUPP;
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(net_dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ r = slsi_mlme_set_pno_list(sdev, 0, NULL, NULL);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ sdev->epno_active = false;
+ return r;
+}
+
+static int slsi_set_rssi_monitor(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev;
+ struct netdev_vif *ndev_vif;
+ int r = 0;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ s8 min_rssi = 0, max_rssi = 0;
+ u16 enable = 0;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Recd RSSI monitor command\n");
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ if (net_dev == NULL) {
+ SLSI_ERR(sdev, "netdev is NULL!!\n");
+ return -ENODEV;
+ }
+
+ ndev_vif = netdev_priv(net_dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_ERR(sdev, "Vif not activated\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) {
+ SLSI_ERR(sdev, "Not a STA vif\n");
+ r = -EINVAL;
+ goto exit;
+ }
+ if (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED) {
+ SLSI_ERR(sdev, "STA vif not connected\n");
+ r = -EINVAL;
+ goto exit;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_RSSI_MONITOR_ATTRIBUTE_START:
+ enable = (u16)nla_get_u8(attr);
+ break;
+ case SLSI_RSSI_MONITOR_ATTRIBUTE_MIN_RSSI:
+ min_rssi = nla_get_s8(attr);
+ break;
+ case SLSI_RSSI_MONITOR_ATTRIBUTE_MAX_RSSI:
+ max_rssi = nla_get_s8(attr);
+ break;
+ default:
+ r = -EINVAL;
+ goto exit;
+ }
+ }
+ if (min_rssi > max_rssi) {
+ SLSI_ERR(sdev, "Invalid params, min_rssi= %d ,max_rssi = %d\n", min_rssi, max_rssi);
+ r = -EINVAL;
+ goto exit;
+ }
+ r = slsi_mlme_set_rssi_monitor(sdev, net_dev, enable, min_rssi, max_rssi);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return r;
+}
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+void slsi_lls_debug_dump_stats(struct slsi_dev *sdev, struct slsi_lls_radio_stat *radio_stat,
+ struct slsi_lls_iface_stat *iface_stat, u8 *buf, int buf_len, int num_of_radios)
+{
+ int i, j;
+
+ for (j = 0; j < num_of_radios; j++) {
+ SLSI_DBG3(sdev, SLSI_GSCAN, "radio_stat====\n");
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\tradio_id : %d, on_time : %d, tx_time : %d, rx_time : %d,"
+ "on_time_scan : %d, num_channels : %d\n", radio_stat->radio, radio_stat->on_time,
+ radio_stat->tx_time, radio_stat->rx_time, radio_stat->on_time_scan,
+ radio_stat->num_channels);
+
+ radio_stat = (struct slsi_lls_radio_stat *)((u8 *)radio_stat + sizeof(struct slsi_lls_radio_stat) +
+ (sizeof(struct slsi_lls_channel_stat) * radio_stat->num_channels));
+ }
+ SLSI_DBG3(sdev, SLSI_GSCAN, "iface_stat====\n");
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\tiface %p info : (mode : %d, mac_addr : %pM, state : %d, roaming : %d,"
+ " capabilities : %d, ssid : %s, bssid : %pM, ap_country_str : [%d%d%d])\trssi_data : %d\n",
+ iface_stat->iface, iface_stat->info.mode, iface_stat->info.mac_addr, iface_stat->info.state,
+ iface_stat->info.roaming, iface_stat->info.capabilities, iface_stat->info.ssid,
+ iface_stat->info.bssid, iface_stat->info.ap_country_str[0], iface_stat->info.ap_country_str[1],
+ iface_stat->info.ap_country_str[2], iface_stat->rssi_data);
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\tnum_peers %d\n", iface_stat->num_peers);
+ for (i = 0; i < iface_stat->num_peers; i++) {
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\t\tpeer_mac_address %pM\n", iface_stat->peer_info[i].peer_mac_address);
+ }
+
+ SLSI_DBG_HEX(sdev, SLSI_GSCAN, buf, buf_len, "return buffer\n");
+}
+#endif
+
+static int slsi_lls_set_stats(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *net_dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u32 mpdu_size_threshold = 0;
+ u32 aggr_stat_gathering = 0;
+ int r = 0, i;
+
+ if (!slsi_dev_lls_supported())
+ return -EOPNOTSUPP;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_WARN(sdev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case LLS_ATTRIBUTE_SET_MPDU_SIZE_THRESHOLD:
+ mpdu_size_threshold = nla_get_u32(attr);
+ break;
+
+ case LLS_ATTRIBUTE_SET_AGGR_STATISTICS_GATHERING:
+ aggr_stat_gathering = nla_get_u32(attr);
+ break;
+
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ r = -EINVAL;
+ }
+ }
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ /* start Statistics measurements in Firmware */
+ (void)slsi_mlme_start_link_stats_req(sdev, mpdu_size_threshold, aggr_stat_gathering);
+
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ if (net_dev) {
+ ndev_vif = netdev_priv(net_dev);
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++) {
+ ndev_vif->rx_packets[i] = 0;
+ ndev_vif->tx_packets[i] = 0;
+ ndev_vif->tx_no_ack[i] = 0;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return 0;
+}
+
+static int slsi_lls_clear_stats(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u32 stats_clear_req_mask = 0;
+ u32 stop_req = 0;
+ int r = 0, i;
+ struct net_device *net_dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+
+ switch (type) {
+ case LLS_ATTRIBUTE_CLEAR_STOP_REQUEST_MASK:
+ stats_clear_req_mask = nla_get_u32(attr);
+ SLSI_DBG3(sdev, SLSI_GSCAN, "stats_clear_req_mask:%u\n", stats_clear_req_mask);
+ break;
+
+ case LLS_ATTRIBUTE_CLEAR_STOP_REQUEST:
+ stop_req = nla_get_u32(attr);
+ SLSI_DBG3(sdev, SLSI_GSCAN, "stop_req:%u\n", stop_req);
+ break;
+
+ default:
+ SLSI_ERR(sdev, "Unknown attribute:%d\n", type);
+ r = -EINVAL;
+ }
+ }
+
+ /* stop_req = 0 : clear the stats which are flaged 0
+ * stop_req = 1 : clear the stats which are flaged 1
+ */
+ if (!stop_req)
+ stats_clear_req_mask = ~stats_clear_req_mask;
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ (void)slsi_mlme_stop_link_stats_req(sdev, stats_clear_req_mask);
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ if (net_dev) {
+ ndev_vif = netdev_priv(net_dev);
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++) {
+ ndev_vif->rx_packets[i] = 0;
+ ndev_vif->tx_packets[i] = 0;
+ ndev_vif->tx_no_ack[i] = 0;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return 0;
+}
+
+static u32 slsi_lls_ie_to_cap(const u8 *ies, int ies_len)
+{
+ u32 capabilities = 0;
+ const u8 *ie_data;
+ const u8 *ie;
+ int ie_len;
+
+ if (!ies || ies_len == 0) {
+ SLSI_ERR_NODEV("no ie[&%p %d]\n", ies, ies_len);
+ return 0;
+ }
+ ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ies, ies_len);
+ if (ie) {
+ ie_len = ie[1];
+ ie_data = &ie[2];
+ if ((ie_len >= 4) && (ie_data[3] & SLSI_WLAN_EXT_CAPA3_INTERWORKING_ENABLED))
+ capabilities |= SLSI_LLS_CAPABILITY_INTERWORKING;
+ if ((ie_len >= 7) && (ie_data[6] & 0x01)) /* Bit48: UTF-8 ssid */
+ capabilities |= SLSI_LLS_CAPABILITY_SSID_UTF8;
+ }
+
+ ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, SLSI_WLAN_OUI_TYPE_WFA_HS20_IND, ies, ies_len);
+ if (ie)
+ capabilities |= SLSI_LLS_CAPABILITY_HS20;
+ return capabilities;
+}
+
+static void slsi_lls_iface_sta_stats(struct slsi_dev *sdev, struct netdev_vif *ndev_vif,
+ struct slsi_lls_iface_stat *iface_stat)
+{
+ int i;
+ struct slsi_lls_interface_link_layer_info *lls_info = &iface_stat->info;
+ enum slsi_lls_peer_type peer_type;
+ struct slsi_peer *peer;
+ const u8 *ie_data, *ie;
+ u8 ie_len;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+
+ if (ndev_vif->ifnum == SLSI_NET_INDEX_WLAN) {
+ lls_info->mode = SLSI_LLS_INTERFACE_STA;
+ peer_type = SLSI_LLS_PEER_AP;
+ } else {
+ lls_info->mode = SLSI_LLS_INTERFACE_P2P_CLIENT;
+ peer_type = SLSI_LLS_PEER_P2P_GO;
+ }
+
+ switch (ndev_vif->sta.vif_status) {
+ case SLSI_VIF_STATUS_CONNECTING:
+ lls_info->state = SLSI_LLS_AUTHENTICATING;
+ break;
+ case SLSI_VIF_STATUS_CONNECTED:
+ lls_info->state = SLSI_LLS_ASSOCIATED;
+ break;
+ default:
+ lls_info->state = SLSI_LLS_DISCONNECTED;
+ }
+ lls_info->roaming = ndev_vif->sta.roam_in_progress ?
+ SLSI_LLS_ROAMING_ACTIVE : SLSI_LLS_ROAMING_IDLE;
+
+ iface_stat->info.capabilities = 0;
+ lls_info->ssid[0] = 0;
+ if (ndev_vif->sta.sta_bss) {
+ ie = cfg80211_find_ie(WLAN_EID_SSID, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+ if (ie) {
+ ie_len = ie[1];
+ ie_data = &ie[2];
+ memcpy(lls_info->ssid, ie_data, ie_len);
+ lls_info->ssid[ie_len] = 0;
+ }
+ SLSI_ETHER_COPY(lls_info->bssid, ndev_vif->sta.sta_bss->bssid);
+ ie = cfg80211_find_ie(WLAN_EID_COUNTRY, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+ if (ie) {
+ ie_data = &ie[2];
+ memcpy(lls_info->ap_country_str, ie_data, 3);
+ iface_stat->peer_info[0].capabilities |= SLSI_LLS_CAPABILITY_COUNTRY;
+ }
+ }
+
+ peer = ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET]; /* connected AP */
+ if (peer && peer->valid && peer->assoc_ie && peer->assoc_resp_ie) {
+ iface_stat->info.capabilities |= slsi_lls_ie_to_cap(peer->assoc_ie->data, peer->assoc_ie->len);
+ if (peer->capabilities & WLAN_CAPABILITY_PRIVACY) {
+ iface_stat->peer_info[0].capabilities |= SLSI_LLS_CAPABILITY_PROTECTED;
+ iface_stat->info.capabilities |= SLSI_LLS_CAPABILITY_PROTECTED;
+ }
+ if (peer->qos_enabled) {
+ iface_stat->peer_info[0].capabilities |= SLSI_LLS_CAPABILITY_QOS;
+ iface_stat->info.capabilities |= SLSI_LLS_CAPABILITY_QOS;
+ }
+ iface_stat->peer_info[0].capabilities |= slsi_lls_ie_to_cap(peer->assoc_resp_ie->data, peer->assoc_resp_ie->len);
+
+ SLSI_ETHER_COPY(iface_stat->peer_info[0].peer_mac_address, peer->address);
+ iface_stat->peer_info[0].type = peer_type;
+ iface_stat->num_peers = 1;
+ }
+
+ for (i = MAP_AID_TO_QS(SLSI_TDLS_PEER_INDEX_MIN); i <= MAP_AID_TO_QS(SLSI_TDLS_PEER_INDEX_MAX); i++) {
+ peer = ndev_vif->peer_sta_record[i];
+ if (peer && peer->valid) {
+ SLSI_ETHER_COPY(iface_stat->peer_info[iface_stat->num_peers].peer_mac_address, peer->address);
+ iface_stat->peer_info[iface_stat->num_peers].type = SLSI_LLS_PEER_TDLS;
+ if (peer->qos_enabled)
+ iface_stat->peer_info[iface_stat->num_peers].capabilities |= SLSI_LLS_CAPABILITY_QOS;
+ iface_stat->peer_info[iface_stat->num_peers].num_rate = 0;
+ iface_stat->num_peers++;
+ }
+ }
+}
+
+static void slsi_lls_iface_ap_stats(struct slsi_dev *sdev, struct netdev_vif *ndev_vif, struct slsi_lls_iface_stat *iface_stat)
+{
+ enum slsi_lls_peer_type peer_type = SLSI_LLS_PEER_INVALID;
+ struct slsi_peer *peer;
+ int i;
+ struct net_device *dev;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+
+ /* We are AP/GO, so we advertize our own country. */
+ memcpy(iface_stat->info.ap_country_str, iface_stat->info.country_str, 3);
+
+ if (ndev_vif->ifnum == SLSI_NET_INDEX_WLAN) {
+ iface_stat->info.mode = SLSI_LLS_INTERFACE_SOFTAP;
+ peer_type = SLSI_LLS_PEER_STA;
+ } else if (ndev_vif->ifnum == SLSI_NET_INDEX_P2PX_SWLAN) {
+ dev = sdev->netdev[SLSI_NET_INDEX_P2PX_SWLAN];
+ if (SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif)) {
+ iface_stat->info.mode = SLSI_LLS_INTERFACE_P2P_GO;
+ peer_type = SLSI_LLS_PEER_P2P_CLIENT;
+ }
+ }
+
+ for (i = MAP_AID_TO_QS(SLSI_PEER_INDEX_MIN); i <= MAP_AID_TO_QS(SLSI_PEER_INDEX_MAX); i++) {
+ peer = ndev_vif->peer_sta_record[i];
+ if (peer && peer->valid) {
+ SLSI_ETHER_COPY(iface_stat->peer_info[iface_stat->num_peers].peer_mac_address, peer->address);
+ iface_stat->peer_info[iface_stat->num_peers].type = peer_type;
+ iface_stat->peer_info[iface_stat->num_peers].num_rate = 0;
+ if (peer->qos_enabled)
+ iface_stat->peer_info[iface_stat->num_peers].capabilities = SLSI_LLS_CAPABILITY_QOS;
+ iface_stat->num_peers++;
+ }
+ }
+
+ memcpy(iface_stat->info.ssid, ndev_vif->ap.ssid, ndev_vif->ap.ssid_len);
+ iface_stat->info.ssid[ndev_vif->ap.ssid_len] = 0;
+ if (ndev_vif->ap.privacy)
+ iface_stat->info.capabilities |= SLSI_LLS_CAPABILITY_PROTECTED;
+ if (ndev_vif->ap.qos_enabled)
+ iface_stat->info.capabilities |= SLSI_LLS_CAPABILITY_QOS;
+}
+
+static void slsi_lls_iface_stat_fill(struct slsi_dev *sdev,
+ struct net_device *net_dev,
+ struct slsi_lls_iface_stat *iface_stat)
+{
+ int i;
+ struct netdev_vif *ndev_vif;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_AC_RETRIES, { SLSI_TRAFFIC_Q_BE + 1, 0 } },
+ { SLSI_PSID_UNIFI_AC_RETRIES, { SLSI_TRAFFIC_Q_BK + 1, 0 } },
+ { SLSI_PSID_UNIFI_AC_RETRIES, { SLSI_TRAFFIC_Q_VI + 1, 0 } },
+ { SLSI_PSID_UNIFI_AC_RETRIES, { SLSI_TRAFFIC_Q_VO + 1, 0 } },
+ { SLSI_PSID_UNIFI_BEACON_RECEIVED, {0, 0} },
+ { SLSI_PSID_UNIFI_PS_LEAKY_AP, {0, 0} },
+ { SLSI_PSID_UNIFI_RSSI, {0, 0} } };
+
+ iface_stat->iface = NULL;
+ iface_stat->info.mode = SLSI_LLS_INTERFACE_UNKNOWN;
+ iface_stat->info.country_str[0] = sdev->device_config.domain_info.regdomain->alpha2[0];
+ iface_stat->info.country_str[1] = sdev->device_config.domain_info.regdomain->alpha2[1];
+ iface_stat->info.country_str[2] = ' '; /* 3rd char of our country code is ASCII<space> */
+
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++)
+ iface_stat->ac[i].ac = SLSI_LLS_AC_MAX;
+
+ if (!net_dev)
+ return;
+
+ ndev_vif = netdev_priv(net_dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated)
+ goto exit;
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ slsi_lls_iface_sta_stats(sdev, ndev_vif, iface_stat);
+ } else if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ slsi_lls_iface_ap_stats(sdev, ndev_vif, iface_stat);
+ SLSI_ETHER_COPY(iface_stat->info.bssid, net_dev->dev_addr);
+ }
+ SLSI_ETHER_COPY(iface_stat->info.mac_addr, net_dev->dev_addr);
+
+ mibrsp.dataLength = 10 * sizeof(get_values) / sizeof(get_values[0]);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes for interface MIBs\n", mibrsp.dataLength);
+ goto exit;
+ }
+
+ values = slsi_read_mibs(sdev, net_dev, get_values, sizeof(get_values) / sizeof(get_values[0]), &mibrsp);
+ if (!values)
+ goto exit;
+
+ for (i = 0; i < SLSI_LLS_AC_MAX; i++) {
+ if (values[i].type == SLSI_MIB_TYPE_UINT) {
+ iface_stat->ac[i].ac = slsi_fapi_to_android_traffic_q(i);
+ iface_stat->ac[i].retries = values[i].u.uintValue;
+ iface_stat->ac[i].rx_mpdu = ndev_vif->rx_packets[i];
+ iface_stat->ac[i].tx_mpdu = ndev_vif->tx_packets[i];
+ iface_stat->ac[i].mpdu_lost = ndev_vif->tx_no_ack[i];
+ } else {
+ SLSI_WARN(sdev, "LLS: expected datatype 1 but received %d\n", values[i].type);
+ }
+ }
+
+ if (values[4].type == SLSI_MIB_TYPE_UINT)
+ iface_stat->beacon_rx = values[4].u.uintValue;
+
+ if (values[5].type == SLSI_MIB_TYPE_UINT) {
+ iface_stat->leaky_ap_detected = values[5].u.uintValue;
+ iface_stat->leaky_ap_guard_time = 5; /* 5 milli sec. As mentioned in lls document */
+ }
+
+ if (values[6].type == SLSI_MIB_TYPE_INT)
+ iface_stat->rssi_data = values[6].u.intValue;
+
+exit:
+ kfree(values);
+ kfree(mibrsp.data);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+void slsi_check_num_radios(struct slsi_dev *sdev)
+{
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_RADIO_SCAN_TIME, { 1, 0 } } };
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_WARN(sdev, "not supported in WlanLite mode\n");
+ return;
+ }
+
+ /* Expect each mib length in response is <= 15 So assume 15 bytes for each MIB */
+ mibrsp.dataLength = 15 * ARRAY_SIZE(get_values);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ sdev->lls_num_radio = 0;
+ return;
+ }
+
+ values = slsi_read_mibs(sdev, NULL, get_values, ARRAY_SIZE(get_values), &mibrsp);
+ if (!values) {
+ sdev->lls_num_radio = 0;
+ } else {
+ sdev->lls_num_radio = values[0].type == SLSI_MIB_TYPE_NONE ? 1 : 2;
+ kfree(values);
+ }
+
+ kfree(mibrsp.data);
+}
+
+static void slsi_lls_radio_stat_fill(struct slsi_dev *sdev, struct net_device *dev,
+ struct slsi_lls_radio_stat *radio_stat,
+ int max_chan_count, int radio_index, int twoorfive)
+{
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_data supported_chan_mib = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_RADIO_SCAN_TIME, { radio_index, 0 } },
+ { SLSI_PSID_UNIFI_RADIO_RX_TIME, { radio_index, 0 } },
+ { SLSI_PSID_UNIFI_RADIO_TX_TIME, { radio_index, 0 } },
+ { SLSI_PSID_UNIFI_RADIO_ON_TIME, { radio_index, 0 } },
+ { SLSI_PSID_UNIFI_SUPPORTED_CHANNELS, { 0, 0 } } };
+ u32 *radio_data[] = {&radio_stat->on_time_scan, &radio_stat->rx_time,
+ &radio_stat->tx_time, &radio_stat->on_time};
+ int i, j, chan_count, chan_start, k;
+
+ radio_stat->radio = radio_index;
+
+ /* Expect each mib length in response is <= 15 So assume 15 bytes for each MIB */
+ mibrsp.dataLength = 15 * sizeof(get_values) / sizeof(get_values[0]);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (mibrsp.data == NULL) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ return;
+ }
+ values = slsi_read_mibs(sdev, NULL, get_values, sizeof(get_values) / sizeof(get_values[0]), &mibrsp);
+ if (!values)
+ goto exit_with_mibrsp;
+
+ for (i = 0; i < (sizeof(get_values) / sizeof(get_values[0])) - 1; i++) {
+ if (values[i].type == SLSI_MIB_TYPE_UINT) {
+ *radio_data[i] = values[i].u.uintValue;
+ } else {
+ SLSI_ERR(sdev, "invalid type. iter:%d", i);
+ }
+ }
+ if (values[4].type != SLSI_MIB_TYPE_OCTET) {
+ SLSI_ERR(sdev, "Supported_Chan invalid type.");
+ goto exit_with_values;
+ }
+
+ supported_chan_mib = values[4].u.octetValue;
+ for (j = 0; j < supported_chan_mib.dataLength / 2; j++) {
+ struct slsi_lls_channel_info *radio_chan;
+
+ chan_start = supported_chan_mib.data[j * 2];
+ chan_count = supported_chan_mib.data[j * 2 + 1];
+ if (radio_stat->num_channels + chan_count > max_chan_count)
+ chan_count = max_chan_count - radio_stat->num_channels;
+ if (chan_start == 1 && (twoorfive & BIT(0))) { /* for 2.4GHz */
+ for (k = 0; k < chan_count; k++) {
+ radio_chan = &radio_stat->channels[radio_stat->num_channels + k].channel;
+ if (k + chan_start == 14)
+ radio_chan->center_freq = 2484;
+ else
+ radio_chan->center_freq = 2407 + (chan_start + k) * 5;
+ radio_chan->width = SLSI_LLS_CHAN_WIDTH_20;
+ }
+ radio_stat->num_channels += chan_count;
+ } else if (chan_start != 1 && (twoorfive & BIT(1))) {
+ /* for 5GHz */
+ for (k = 0; k < chan_count; k++) {
+ radio_chan = &radio_stat->channels[radio_stat->num_channels + k].channel;
+ radio_chan->center_freq = 5000 + (chan_start + (k * 4)) * 5;
+ radio_chan->width = SLSI_LLS_CHAN_WIDTH_20;
+ }
+ radio_stat->num_channels += chan_count;
+ }
+ }
+exit_with_values:
+ kfree(values);
+exit_with_mibrsp:
+ kfree(mibrsp.data);
+}
+
+static int slsi_lls_fill(struct slsi_dev *sdev, u8 **src_buf)
+{
+ struct net_device *net_dev = NULL;
+ struct slsi_lls_radio_stat *radio_stat;
+ struct slsi_lls_radio_stat *radio_stat_temp;
+ struct slsi_lls_iface_stat *iface_stat;
+ int buf_len = 0;
+ int max_chan_count = 0;
+ u8 *buf;
+ int num_of_radios_supported;
+ int i = 0;
+ int radio_type[2] = {BIT(0), BIT(1)};
+
+ if (sdev->lls_num_radio == 0) {
+ slsi_check_num_radios(sdev);
+ if (sdev->lls_num_radio == 0)
+ return -EIO;
+ }
+
+ num_of_radios_supported = sdev->lls_num_radio;
+ net_dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+
+ if (sdev->wiphy->bands[NL80211_BAND_2GHZ])
+ max_chan_count = sdev->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+ if (sdev->wiphy->bands[NL80211_BAND_5GHZ])
+ max_chan_count += sdev->wiphy->bands[NL80211_BAND_5GHZ]->n_channels;
+ buf_len = (int)((num_of_radios_supported * sizeof(struct slsi_lls_radio_stat))
+ + sizeof(struct slsi_lls_iface_stat)
+ + sizeof(u8)
+ + (sizeof(struct slsi_lls_peer_info) * SLSI_ADHOC_PEER_CONNECTIONS_MAX)
+ + (sizeof(struct slsi_lls_channel_stat) * max_chan_count));
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ SLSI_ERR(sdev, "No mem. Size:%d\n", buf_len);
+ return -ENOMEM;
+ }
+ buf[0] = num_of_radios_supported;
+ *src_buf = buf;
+ iface_stat = (struct slsi_lls_iface_stat *)(buf + sizeof(u8));
+ slsi_lls_iface_stat_fill(sdev, net_dev, iface_stat);
+
+ radio_stat = (struct slsi_lls_radio_stat *)(buf + sizeof(u8) + sizeof(struct slsi_lls_iface_stat) +
+ (sizeof(struct slsi_lls_peer_info) * iface_stat->num_peers));
+ radio_stat_temp = radio_stat;
+ if (num_of_radios_supported == 1) {
+ radio_type[0] = BIT(0) | BIT(1);
+ slsi_lls_radio_stat_fill(sdev, net_dev, radio_stat, max_chan_count, 0, radio_type[0]);
+ radio_stat = (struct slsi_lls_radio_stat *)((u8 *)radio_stat + sizeof(struct slsi_lls_radio_stat) +
+ (sizeof(struct slsi_lls_channel_stat) * radio_stat->num_channels));
+ } else {
+ for (i = 1; i <= num_of_radios_supported ; i++) {
+ slsi_lls_radio_stat_fill(sdev, net_dev, radio_stat, max_chan_count, i, radio_type[i - 1]);
+ radio_stat = (struct slsi_lls_radio_stat *)((u8 *)radio_stat +
+ sizeof(struct slsi_lls_radio_stat) + (sizeof(struct slsi_lls_channel_stat)
+ * radio_stat->num_channels));
+ }
+ }
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (slsi_dev_llslogs_supported())
+ slsi_lls_debug_dump_stats(sdev, radio_stat_temp, iface_stat, buf, buf_len, num_of_radios_supported);
+#endif
+ return buf_len;
+}
+
+static int slsi_lls_get_stats(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret;
+ u8 *buf = NULL;
+ int buf_len;
+
+ if (!slsi_dev_lls_supported())
+ return -EOPNOTSUPP;
+
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_WARN(sdev, "not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!sdev) {
+ SLSI_ERR(sdev, "sdev is Null\n");
+ return -EINVAL;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ /* In case of lower layer failure do not read LLS MIBs */
+ if (sdev->mlme_blocked)
+ buf_len = -EIO;
+ else
+ buf_len = slsi_lls_fill(sdev, &buf);
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+
+ if (buf_len > 0) {
+ ret = slsi_vendor_cmd_reply(wiphy, buf, buf_len);
+ if (ret)
+ SLSI_ERR_NODEV("vendor cmd reply failed (err:%d)\n", ret);
+ } else {
+ ret = buf_len;
+ }
+ kfree(buf);
+ return ret;
+}
+
+static int slsi_gscan_set_oui(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+
+#ifdef CONFIG_SCSC_WLAN_ENABLE_MAC_RANDOMISATION
+
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ struct netdev_vif *ndev_vif;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u8 scan_oui[6];
+
+ memset(&scan_oui, 0, 6);
+
+ if (!dev) {
+ SLSI_ERR(sdev, "dev is NULL!!\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ sdev->scan_addr_set = 0;
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_NL_ATTRIBUTE_PNO_RANDOM_MAC_OUI:
+ {
+ memcpy(&scan_oui, nla_data(attr), 3);
+ memcpy(sdev->scan_mac_addr, scan_oui, 6);
+ sdev->scan_addr_set = 1;
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ SLSI_ERR(sdev, "Invalid type : %d\n", type);
+ break;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+#endif
+ return ret;
+}
+
+static int slsi_get_feature_set(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ u32 feature_set = 0;
+ int ret = 0;
+
+ SLSI_DBG3_NODEV(SLSI_GSCAN, "\n");
+
+ feature_set |= SLSI_WIFI_HAL_FEATURE_RSSI_MONITOR;
+ feature_set |= SLSI_WIFI_HAL_FEATURE_CONTROL_ROAMING;
+ feature_set |= SLSI_WIFI_HAL_FEATURE_TDLS | SLSI_WIFI_HAL_FEATURE_TDLS_OFFCHANNEL;
+#ifndef CONFIG_SCSC_WLAN_NAT_KEEPALIVE_DISABLE
+ feature_set |= SLSI_WIFI_HAL_FEATURE_MKEEP_ALIVE;
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ feature_set |= SLSI_WIFI_HAL_FEATURE_LOGGER;
+#endif
+ if (slsi_dev_gscan_supported())
+ feature_set |= SLSI_WIFI_HAL_FEATURE_GSCAN;
+ if (slsi_dev_lls_supported())
+ feature_set |= SLSI_WIFI_HAL_FEATURE_LINK_LAYER_STATS;
+ if (slsi_dev_epno_supported())
+ feature_set |= SLSI_WIFI_HAL_FEATURE_HAL_EPNO;
+ if (slsi_dev_nan_supported(SDEV_FROM_WIPHY(wiphy)))
+ feature_set |= SLSI_WIFI_HAL_FEATURE_NAN;
+ if (slsi_dev_rtt_supported()) {
+ feature_set |= SLSI_WIFI_HAL_FEATURE_D2D_RTT;
+ feature_set |= SLSI_WIFI_HAL_FEATURE_D2AP_RTT;
+ }
+
+ ret = slsi_vendor_cmd_reply(wiphy, &feature_set, sizeof(feature_set));
+
+ return ret;
+}
+
+static int slsi_set_country_code(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ char country_code[SLSI_COUNTRY_CODE_LEN];
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Received country code command\n");
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_NL_ATTRIBUTE_COUNTRY_CODE:
+ {
+ if (nla_len(attr) < (SLSI_COUNTRY_CODE_LEN - 1)) {
+ ret = -EINVAL;
+ SLSI_ERR(sdev, "Insufficient Country Code Length : %d\n", nla_len(attr));
+ return ret;
+ }
+ memcpy(country_code, nla_data(attr), (SLSI_COUNTRY_CODE_LEN - 1));
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ SLSI_ERR(sdev, "Invalid type : %d\n", type);
+ return ret;
+ }
+ }
+ ret = slsi_set_country_update_regd(sdev, country_code, SLSI_COUNTRY_CODE_LEN);
+ if (ret < 0)
+ SLSI_ERR(sdev, "Set country failed ret:%d\n", ret);
+ return ret;
+}
+
+static int slsi_apf_read_filter(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ u8 *host_dst;
+ int datalen;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_APF_READ_FILTER\n");
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if (!sdev->device_config.fw_apf_supported) {
+ SLSI_WARN(sdev, "APF not supported by the firmware.\n");
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return -ENOTSUPP;
+ }
+
+ ret = slsi_mlme_read_apf_request(sdev, dev, &host_dst, &datalen);
+ if (!ret)
+ ret = slsi_vendor_cmd_reply(wiphy, host_dst, datalen);
+
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return ret;
+}
+
+static int slsi_apf_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ struct sk_buff *nl_skb;
+ struct nlattr *nlattr_start;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_APF_GET_CAPABILITIES\n");
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if (!sdev->device_config.fw_apf_supported) {
+ SLSI_WARN(sdev, "APF not supported by the firmware.\n");
+ ret = -ENOTSUPP;
+ goto exit;
+ }
+ memset(&sdev->device_config.apf_cap, 0, sizeof(struct slsi_apf_capabilities));
+
+ ret = slsi_mib_get_apf_cap(sdev, dev);
+ if (ret != 0) {
+ SLSI_ERR(sdev, "Failed to read mib\n");
+ goto exit;
+ }
+ SLSI_DBG3(sdev, SLSI_GSCAN, "APF version: %d Max_Length:%d\n", sdev->device_config.apf_cap.version,
+ sdev->device_config.apf_cap.max_length);
+ nl_skb = cfg80211_vendor_cmd_alloc_reply_skb(sdev->wiphy, NLMSG_DEFAULT_SIZE);
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ nlattr_start = nla_nest_start(nl_skb, NL80211_ATTR_VENDOR_DATA);
+ if (!nlattr_start) {
+ SLSI_ERR(sdev, "failed to put NL80211_ATTR_VENDOR_DATA\n");
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = nla_put_u16(nl_skb, SLSI_APF_ATTR_VERSION, sdev->device_config.apf_cap.version);
+ ret |= nla_put_u16(nl_skb, SLSI_APF_ATTR_MAX_LEN, sdev->device_config.apf_cap.max_length);
+ if (ret) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", ret);
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(nl_skb);
+ if (ret)
+ SLSI_ERR(sdev, "apf_get_capabilities cfg80211_vendor_cmd_reply failed :%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return ret;
+}
+
+static int slsi_apf_set_filter(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ int ret = 0;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u32 program_len = 0;
+ u8 *program;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Received apf_set_filter command\n");
+ SLSI_MUTEX_LOCK(sdev->device_config_mutex);
+ if (!sdev->device_config.fw_apf_supported) {
+ SLSI_WARN(sdev, "APF not supported by the firmware.\n");
+ ret = -ENOTSUPP;
+ goto exit;
+ }
+
+ if (!dev) {
+ SLSI_ERR(sdev, "dev is NULL!!\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_APF_ATTR_PROGRAM_LEN:
+ {
+ program_len = nla_get_u32(attr);
+ program = kmalloc(program_len, GFP_KERNEL);
+ break;
+ }
+ case SLSI_APF_ATTR_PROGRAM:
+ {
+ memcpy(program, (u8 *)nla_data(attr), program_len);
+ break;
+ }
+ default:
+ SLSI_ERR(sdev, "Invalid type : %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ ret = slsi_mlme_install_apf_request(sdev, dev, program, program_len);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "apf_set_filter failed ret:%d\n", ret);
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->device_config_mutex);
+ return ret;
+}
+
+static int slsi_rtt_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_rtt_capabilities rtt_cap;
+ int ret = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_GET_RTT_CAPABILITIES\n");
+ if (!slsi_dev_rtt_supported()) {
+ SLSI_WARN(sdev, "RTT not supported.\n");
+ return -ENOTSUPP;
+ }
+ memset(&rtt_cap, 0, sizeof(struct slsi_rtt_capabilities));
+
+ ret = slsi_mib_get_rtt_cap(sdev, dev, &rtt_cap);
+ if (ret != 0) {
+ SLSI_ERR(sdev, "Failed to read mib\n");
+ return ret;
+ }
+ ret = slsi_vendor_cmd_reply(wiphy, &rtt_cap, sizeof(struct slsi_rtt_capabilities));
+ if (ret)
+ SLSI_ERR_NODEV("rtt_get_capabilities vendor cmd reply failed (err = %d)\n", ret);
+ return ret;
+}
+
+static int slsi_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int r, type, j = 0;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ struct netdev_vif *ndev_vif;
+#endif
+ struct net_device *dev = wdev->netdev;
+ struct slsi_rtt_config *nl_rtt_params;
+ const struct nlattr *iter, *outer, *inner;
+ u8 source_addr[ETH_ALEN];
+ int tmp, tmp1, tmp2;
+ u16 rtt_id = 0;
+ u8 num_devices = 0;
+ u16 rtt_peer = SLSI_RTT_PEER_AP;
+ u16 vif_idx = 0;
+ u16 channel_freq = 0;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_RTT_RANGE_START\n");
+ if (!slsi_dev_rtt_supported()) {
+ SLSI_ERR(sdev, "RTT not supported.\n");
+ return WIFI_HAL_ERROR_NOT_SUPPORTED;
+ }
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case SLSI_RTT_ATTRIBUTE_TARGET_CNT:
+ num_devices = nla_get_u8(iter);
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Target cnt %d\n", num_devices);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_ID:
+ rtt_id = nla_get_u16(iter);
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Target id %d\n", rtt_id);
+ break;
+ default:
+ SLSI_ERR_NODEV("Unexpected RTT attribute:type - %d\n", type);
+ break;
+ }
+ }
+ if (!num_devices) {
+ SLSI_ERR_NODEV("No device found for rtt configuration!\n");
+ return -EINVAL;
+ }
+ /* Allocate memory for the received config params */
+ nl_rtt_params = kcalloc(num_devices, sizeof(*nl_rtt_params), GFP_KERNEL);
+ if (!nl_rtt_params) {
+ SLSI_ERR_NODEV("Failed for allocate memory for config rtt_param\n");
+ return -ENOMEM;
+ }
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case SLSI_RTT_ATTRIBUTE_TARGET_INFO:
+ nla_for_each_nested(outer, iter, tmp1) {
+ nla_for_each_nested(inner, outer, tmp2) {
+ switch (nla_type(inner)) {
+ case SLSI_RTT_ATTRIBUTE_TARGET_MAC:
+ memcpy(nl_rtt_params[j].peer_addr, nla_data(inner), ETH_ALEN);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_TYPE:
+ nl_rtt_params[j].type = nla_get_u16(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_PEER:
+ rtt_peer = nla_get_u16(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_CHAN_FREQ:
+ channel_freq = nla_get_u16(inner);
+ nl_rtt_params[j].channel_freq = channel_freq * 2;
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_PERIOD:
+ nl_rtt_params[j].burst_period = nla_get_u8(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_NUM_BURST:
+ nl_rtt_params[j].num_burst = nla_get_u8(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST:
+ nl_rtt_params[j].num_frames_per_burst = nla_get_u8(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR:
+ nl_rtt_params[j].num_retries_per_ftmr = nla_get_u8(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_BURST_DURATION:
+ nl_rtt_params[j].burst_duration = nla_get_u8(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_PREAMBLE:
+ nl_rtt_params[j].preamble = nla_get_u16(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_BW:
+ nl_rtt_params[j].bw = nla_get_u16(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_LCI:
+ nl_rtt_params[j].LCI_request = nla_get_u16(inner);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_LCR:
+ nl_rtt_params[j].LCR_request = nla_get_u16(inner);
+ break;
+ default:
+ SLSI_ERR_NODEV("Unknown RTT INFO ATTRIBUTE type: %d\n", type);
+ break;
+ }
+ }
+ j++;
+ }
+ break;
+ default:
+ SLSI_ERR_NODEV("No ATTRIBUTE_Target cnt - %d\n", type);
+ break;
+ }
+ }
+
+ SLSI_ETHER_COPY(source_addr, dev->dev_addr);
+
+ if (rtt_peer == SLSI_RTT_PEER_NAN) {
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ if (!slsi_dev_nan_supported(sdev)) {
+ SLSI_ERR(sdev, "NAN not supported(mib:%d)\n", sdev->nan_enabled);
+ kfree(nl_rtt_params);
+ return WIFI_HAL_ERROR_NOT_SUPPORTED;
+ }
+ ndev_vif = netdev_priv(dev);
+ if (ndev_vif->activated) {
+ vif_idx = ndev_vif->vif_type;
+ } else {
+ SLSI_ERR(sdev, "NAN vif not activated\n");
+ kfree(nl_rtt_params);
+ return -EINVAL;
+ }
+#else
+ SLSI_ERR(sdev, "NAN not enabled\n");
+ return -ENOTSUPP;
+#endif
+ }
+ r = slsi_mlme_add_range_req(sdev, num_devices, nl_rtt_params, rtt_id, vif_idx, source_addr);
+ if (r) {
+ r = -EINVAL;
+ SLSI_ERR_NODEV("Failed to set rtt config\n");
+ } else {
+ sdev->rtt_vif[rtt_id] = vif_idx;
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Successfully set rtt config\n");
+ }
+ kfree(nl_rtt_params);
+ return r;
+}
+
+int slsi_tx_rate_calc(struct sk_buff *nl_skb, u16 fw_rate, int res, bool tx_rate)
+{
+ u8 preamble;
+ const u32 fw_rate_idx_to_80211_rate[] = { 0, 10, 20, 55, 60, 90, 110, 120, 180, 240, 360, 480, 540 };
+ u32 data_rate = 0;
+ u32 mcs = 0, nss = 0;
+ u32 chan_bw_idx = 0;
+ int gi_idx;
+
+ preamble = (fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) >> 14;
+ if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_NON_HT_SELECTED) {
+ u16 fw_rate_idx = fw_rate & SLSI_FW_API_RATE_INDEX_FIELD;
+
+ if (fw_rate > 0 && fw_rate_idx < ARRAY_SIZE(fw_rate_idx_to_80211_rate))
+ data_rate = fw_rate_idx_to_80211_rate[fw_rate_idx];
+ } else if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_HT_SELECTED) {
+ nss = (SLSI_FW_API_RATE_HT_NSS_FIELD & fw_rate) >> 6;
+ chan_bw_idx = (fw_rate & SLSI_FW_API_RATE_BW_FIELD) >> 9;
+ gi_idx = ((fw_rate & SLSI_FW_API_RATE_SGI) == SLSI_FW_API_RATE_SGI) ? 1 : 0;
+ mcs = SLSI_FW_API_RATE_HT_MCS_FIELD & fw_rate;
+ if ((chan_bw_idx < 2) && (mcs <= 7)) {
+ data_rate = (nss + 1) * slsi_rates_table[chan_bw_idx][gi_idx][mcs];
+ } else if (mcs == 32 && chan_bw_idx == 1) {
+ if (gi_idx == 1)
+ data_rate = (nss + 1) * 67;
+ else
+ data_rate = (nss + 1) * 60;
+ } else {
+ SLSI_WARN_NODEV("FW DATA RATE decode error fw_rate:%x, bw:%x, mcs_idx:%x, nss : %d\n",
+ fw_rate, chan_bw_idx, mcs, nss);
+ }
+ } else if ((fw_rate & SLSI_FW_API_RATE_HT_SELECTOR_FIELD) == SLSI_FW_API_RATE_VHT_SELECTED) {
+ /* report vht rate in legacy units and not as mcs index. reason: upper layers may still be not
+ * updated with vht msc table.
+ */
+ chan_bw_idx = (fw_rate & SLSI_FW_API_RATE_BW_FIELD) >> 9;
+ gi_idx = ((fw_rate & SLSI_FW_API_RATE_SGI) == SLSI_FW_API_RATE_SGI) ? 1 : 0;
+ /* Calculate NSS --> bits 6 to 4*/
+ nss = (SLSI_FW_API_RATE_VHT_NSS_FIELD & fw_rate) >> 4;
+ mcs = SLSI_FW_API_RATE_VHT_MCS_FIELD & fw_rate;
+ /* Bandwidth (BW): 0x0= 20 MHz, 0x1= 40 MHz, 0x2= 80 MHz, 0x3= 160/ 80+80 MHz. 0x3 is not supported */
+ if ((chan_bw_idx <= 2) && (mcs <= 9))
+ data_rate = (nss + 1) * slsi_rates_table[chan_bw_idx][gi_idx][mcs];
+ else
+ SLSI_WARN_NODEV("FW DATA RATE decode error fw_rate:%x, bw:%x, mcs_idx:%x,nss : %d\n",
+ fw_rate, chan_bw_idx, mcs, nss);
+ if (nss > 1)
+ nss += 1;
+ }
+
+ if (tx_rate) {
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TX_PREAMBLE, preamble);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TX_NSS, nss);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TX_BW, chan_bw_idx);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TX_MCS, mcs);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TX_RATE, data_rate);
+ } else {
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RX_PREAMBLE, preamble);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RX_NSS, nss);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RX_BW, chan_bw_idx);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RX_MCS, mcs);
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RX_RATE, data_rate);
+ }
+ return res;
+}
+
+void slsi_rx_range_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u32 i, tm;
+ u16 rtt_entry_count = fapi_get_u16(skb, u.mlme_range_ind.entries);
+ u16 rtt_id = fapi_get_u16(skb, u.mlme_range_ind.rtt_id);
+ u32 tmac = fapi_get_u32(skb, u.mlme_range_ind.spare_3);
+ int data_len = fapi_get_datalen(skb);
+ u8 *ip_ptr, *start_ptr;
+ u16 tx_data, rx_data;
+ struct sk_buff *nl_skb;
+ int res = 0;
+ struct nlattr *nlattr_nested;
+ struct timespec ts;
+ u64 tkernel;
+ u8 rep_cnt = 0;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_RTT_RESULT_EVENT, GFP_KERNEL);
+#else
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE, SLSI_NL80211_RTT_RESULT_EVENT,
+ GFP_KERNEL);
+#endif
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Event: %s(%d)\n",
+ slsi_print_event_name(SLSI_NL80211_RTT_RESULT_EVENT), SLSI_NL80211_RTT_RESULT_EVENT);
+#endif
+
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ goto exit;
+ }
+
+ ip_ptr = fapi_get_data(skb);
+ start_ptr = fapi_get_data(skb);
+ res |= nla_put_u16(nl_skb, SLSI_RTT_ATTRIBUTE_RESULT_CNT, rtt_entry_count);
+ res |= nla_put_u16(nl_skb, SLSI_RTT_ATTRIBUTE_TARGET_ID, rtt_id);
+ res |= nla_put_u8(nl_skb, SLSI_RTT_ATTRIBUTE_RESULTS_PER_TARGET, 1);
+ for (i = 0; i < rtt_entry_count; i++) {
+ nlattr_nested = nla_nest_start(nl_skb, SLSI_RTT_ATTRIBUTE_RESULT);
+ if (!nlattr_nested) {
+ SLSI_ERR(sdev, "Error in nla_nest_start\n");
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ goto exit;
+ }
+ ip_ptr += 7; /*skip first 7 bytes for fapi_ie_generic */
+ res |= nla_put(nl_skb, SLSI_RTT_EVENT_ATTR_ADDR, ETH_ALEN, ip_ptr);
+ ip_ptr += 6;
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_BURST_NUM, *ip_ptr++);
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_MEASUREMENT_NUM, *ip_ptr++);
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_SUCCESS_NUM, *ip_ptr++);
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_NUM_PER_BURST_PEER, *ip_ptr++);
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_STATUS, *ip_ptr);
+ ip_ptr += 2;
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_RETRY_AFTER_DURATION, *ip_ptr++);
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_TYPE, *ip_ptr);
+ ip_ptr += 2;
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_RSSI, *ip_ptr);
+ ip_ptr += 2;
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_RSSI_SPREAD, *ip_ptr);
+ ip_ptr += 2;
+ memcpy(&tx_data, ip_ptr, 2);
+ res = slsi_tx_rate_calc(nl_skb, tx_data, res, 1);
+ ip_ptr += 2;
+ memcpy(&rx_data, ip_ptr, 2);
+ res = slsi_tx_rate_calc(nl_skb, rx_data, res, 0);
+ ip_ptr += 2;
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_RTT, *ip_ptr);
+ ip_ptr += 4;
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_RTT_SD, *ip_ptr);
+ ip_ptr += 2;
+ res |= nla_put_u16(nl_skb, SLSI_RTT_EVENT_ATTR_RTT_SPREAD, *ip_ptr);
+ ip_ptr += 2;
+ get_monotonic_boottime(&ts);
+ tkernel = (u64)TIMESPEC_TO_US(ts);
+ tm = *ip_ptr;
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_TIMESTAMP_US, tkernel - (tmac - tm));
+ ip_ptr += 4;
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_DISTANCE_MM, *ip_ptr);
+ ip_ptr += 4;
+ res |= nla_put_u32(nl_skb, SLSI_RTT_EVENT_ATTR_DISTANCE_SD_MM, *ip_ptr);
+ ip_ptr += 4;
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_BURST_DURATION_MSN, *ip_ptr++);
+ res |= nla_put_u8(nl_skb, SLSI_RTT_EVENT_ATTR_NEGOTIATED_BURST_NUM, *ip_ptr++);
+ for (rep_cnt = 0; rep_cnt < 2; rep_cnt++) {
+ if (ip_ptr - start_ptr < data_len && ip_ptr[0] == WLAN_EID_MEASURE_REPORT) {
+ if (ip_ptr[4] == 8) /*LCI Element*/
+ res |= nla_put(nl_skb, SLSI_RTT_EVENT_ATTR_LCI,
+ ip_ptr[1] + 2, ip_ptr);
+ else if (ip_ptr[4] == 11) /*LCR element */
+ res |= nla_put(nl_skb, SLSI_RTT_EVENT_ATTR_LCR,
+ ip_ptr[1] + 2, ip_ptr);
+ ip_ptr += ip_ptr[1] + 2;
+ }
+ }
+ nla_nest_end(nl_skb, nlattr_nested);
+ }
+ SLSI_DBG_HEX(sdev, SLSI_GSCAN, fapi_get_data(skb), fapi_get_datalen(skb), "range indication skb buffer:\n");
+ if (res) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", res);
+ kfree_skb(nl_skb);
+ goto exit;
+ }
+ cfg80211_vendor_event(nl_skb, GFP_KERNEL);
+exit:
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+void slsi_rx_range_done_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 rtt_id = fapi_get_u16(skb, u.mlme_range_ind.rtt_id);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Event: %s(%d)\n",
+ slsi_print_event_name(SLSI_NL80211_RTT_COMPLETE_EVENT), SLSI_NL80211_RTT_COMPLETE_EVENT);
+#endif
+ slsi_vendor_event(sdev, SLSI_NL80211_RTT_COMPLETE_EVENT, &rtt_id, sizeof(rtt_id));
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+static int slsi_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int temp, ret, r = 1, j = 0, type;
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ u8 *addr;
+ const struct nlattr *iter;
+ u16 num_devices = 0, rtt_id = 0;
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "RTT_SUBCMD_CANCEL_CONFIG\n");
+ if (!slsi_dev_rtt_supported()) {
+ SLSI_WARN(sdev, "RTT not supported.\n");
+ return -ENOTSUPP;
+ }
+ nla_for_each_attr(iter, data, len, temp) {
+ type = nla_type(iter);
+ switch (type) {
+ case SLSI_RTT_ATTRIBUTE_TARGET_CNT:
+ num_devices = nla_get_u16(iter);
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Target cnt %d\n", num_devices);
+ break;
+ case SLSI_RTT_ATTRIBUTE_TARGET_ID:
+ rtt_id = nla_get_u16(iter);
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Target id %d\n", rtt_id);
+ break;
+ default:
+ SLSI_ERR_NODEV("No ATTRIBUTE_Target cnt - %d\n", type);
+ break;
+ }
+ }
+ /* Allocate memory for the received mac addresses */
+ if (num_devices) {
+ addr = kzalloc(ETH_ALEN * num_devices, GFP_KERNEL);
+ if (!addr) {
+ SLSI_ERR_NODEV("Failed for allocate memory for mac addresses\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ nla_for_each_attr(iter, data, len, temp) {
+ type = nla_type(iter);
+ if (type == SLSI_RTT_ATTRIBUTE_TARGET_MAC) {
+ memcpy(&addr[j], nla_data(iter), ETH_ALEN);
+ j++;
+ } else {
+ SLSI_ERR_NODEV("No ATTRIBUTE_MAC - %d\n", type);
+ }
+ }
+
+ r = slsi_mlme_del_range_req(sdev, dev, num_devices, addr, rtt_id);
+ kfree(addr);
+ }
+ if (r)
+ SLSI_ERR_NODEV("Failed to cancel rtt config\n");
+ return r;
+}
+
+static int slsi_configure_nd_offload(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ struct netdev_vif *ndev_vif;
+ int ret = 0;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ u8 nd_offload_enabled = 0;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Received nd_offload command\n");
+
+ if (!dev) {
+ SLSI_ERR(sdev, "dev is NULL!!\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated || (ndev_vif->vif_type != FAPI_VIFTYPE_STATION) ||
+ (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTED)) {
+ SLSI_DBG3(sdev, SLSI_GSCAN, "vif error\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_NL_ATTRIBUTE_ND_OFFLOAD_VALUE:
+ {
+ nd_offload_enabled = nla_get_u8(attr);
+ break;
+ }
+ default:
+ SLSI_ERR(sdev, "Invalid type : %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ ndev_vif->sta.nd_offload_enabled = nd_offload_enabled;
+ ret = slsi_mlme_set_ipv6_address(sdev, dev);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "Configure nd_offload failed ret:%d nd_offload_enabled: %d\n", ret, nd_offload_enabled);
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return ret;
+}
+
+static int slsi_get_roaming_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ struct netdev_vif *ndev_vif;
+ int ret = 0;
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_ROAM_BLACKLIST_SIZE, { 0, 0 } } };
+ u32 max_blacklist_size = 0;
+ u32 max_whitelist_size = 0;
+ struct sk_buff *nl_skb;
+ struct nlattr *nlattr_start;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "dev is NULL!!\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ mibrsp.dataLength = 10;
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ values = slsi_read_mibs(sdev, NULL, get_values, ARRAY_SIZE(get_values), &mibrsp);
+ if (values && (values[0].type == SLSI_MIB_TYPE_UINT || values[0].type == SLSI_MIB_TYPE_INT))
+ max_blacklist_size = values[0].u.uintValue;
+ nl_skb = cfg80211_vendor_cmd_alloc_reply_skb(sdev->wiphy, NLMSG_DEFAULT_SIZE);
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ ret = -ENOMEM;
+ goto exit_with_mib_resp;
+ }
+
+ nlattr_start = nla_nest_start(nl_skb, NL80211_ATTR_VENDOR_DATA);
+ if (!nlattr_start) {
+ SLSI_ERR(sdev, "failed to put NL80211_ATTR_VENDOR_DATA\n");
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ ret = -EINVAL;
+ goto exit_with_mib_resp;
+ }
+
+ ret = nla_put_u32(nl_skb, SLSI_NL_ATTR_MAX_BLACKLIST_SIZE, max_blacklist_size);
+ ret |= nla_put_u32(nl_skb, SLSI_NL_ATTR_MAX_WHITELIST_SIZE, max_whitelist_size);
+ if (ret) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", ret);
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ goto exit_with_mib_resp;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(nl_skb);
+ if (ret)
+ SLSI_ERR(sdev, "cfg80211_vendor_cmd_reply failed :%d\n", ret);
+exit_with_mib_resp:
+ kfree(mibrsp.data);
+ kfree(values);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return ret;
+}
+
+static int slsi_set_roaming_state(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ int temp = 0;
+ int type = 0;
+ const struct nlattr *attr;
+ int ret = 0;
+ int roam_state = 0;
+
+ if (!dev) {
+ SLSI_WARN_NODEV("net_dev is NULL\n");
+ return -EINVAL;
+ }
+
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_NL_ATTR_ROAM_STATE:
+ roam_state = nla_get_u8(attr);
+ break;
+ default:
+ SLSI_ERR_NODEV("Unknown attribute: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "SUBCMD_SET_ROAMING_STATE roam_state = %d\n", roam_state);
+ ret = slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAMING_ENABLED, roam_state);
+ if (ret < 0)
+ SLSI_ERR_NODEV("Failed to set roaming state\n");
+
+exit:
+ return ret;
+}
+
+char *slsi_get_roam_reason_str(int roam_reason)
+{
+ switch (roam_reason) {
+ case SLSI_WIFI_ROAMING_SEARCH_REASON_RESERVED:
+ return "WIFI_ROAMING_SEARCH_REASON_RESERVED";
+ case SLSI_WIFI_ROAMING_SEARCH_REASON_LOW_RSSI:
+ return "WIFI_ROAMING_SEARCH_REASON_LOW_RSSI";
+ case SLSI_WIFI_ROAMING_SEARCH_REASON_LINK_LOSS:
+ return "WIFI_ROAMING_SEARCH_REASON_LINK_LOSS";
+ case SLSI_WIFI_ROAMING_SEARCH_REASON_BTM_REQ:
+ return "WIFI_ROAMING_SEARCH_REASON_BTM_REQ";
+ case SLSI_WIFI_ROAMING_SEARCH_REASON_CU_TRIGGER:
+ return "WIFI_ROAMING_SEARCH_REASON_CU_TRIGGER";
+ default:
+ return "UNKNOWN_REASON";
+ }
+}
+
+void slsi_rx_event_log_indication(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 event_id = 0;
+ u64 timestamp = 0;
+ u8 *tlv_data;
+ u32 roam_reason = 0, chan_utilisation = 0, btm_request_mode = 0, btm_response = 0, eapol_msg_type = 0;
+ u32 deauth_reason = 0, eapol_retry_count = 0, roam_rssi, status_code = 0;
+ u16 vendor_len, tag_id, tag_len, vtag_id, eapol_key_type = 0;
+ u32 tag_value, vtag_value, rssi_bits = 0;
+ int roam_rssi_val = 0;
+ __le16 *le16_ptr = NULL;
+ int tlv_buffer__len = fapi_get_datalen(skb), i = 0;
+
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ event_id = fapi_get_s16(skb, u.mlme_event_log_ind.event);
+ timestamp = fapi_get_u64(skb, u.mlme_event_log_ind.timestamp);
+ tlv_data = fapi_get_data(skb);
+
+ SLSI_DBG3(sdev, SLSI_GSCAN,
+ "slsi_rx_event_log_indication, event id = %d, len = %d\n", event_id, tlv_buffer__len);
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+ SCSC_WLOG_FW_EVENT(WLOG_NORMAL, event_id, timestamp, fapi_get_data(skb), fapi_get_datalen(skb));
+#endif
+ while (i + 4 < tlv_buffer__len) {
+ le16_ptr = (__le16 *)&tlv_data[i];
+ tag_id = le16_to_cpu(*le16_ptr);
+ le16_ptr = (__le16 *)&tlv_data[i + 2];
+ tag_len = le16_to_cpu(*le16_ptr);
+ i += 4;
+ if (i + tag_len > tlv_buffer__len) {
+ SLSI_INFO(sdev, "Incorrect fapi bulk data\n");
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return;
+ }
+ tag_value = slsi_convert_tlv_data_to_value(&tlv_data[i], tag_len);
+ switch (tag_id) {
+ case SLSI_WIFI_TAG_RSSI:
+ roam_rssi = tag_value;
+ while (roam_rssi) {
+ rssi_bits++;
+ roam_rssi >>= 1;
+ }
+ roam_rssi_val = ((1 << rssi_bits) - 1) ^ tag_value;
+ roam_rssi_val = -(roam_rssi_val + 1);
+ break;
+ case SLSI_WIFI_TAG_REASON_CODE:
+ deauth_reason = tag_value;
+ break;
+ case SLSI_WIFI_TAG_VENDOR_SPECIFIC:
+ vendor_len = tag_len - 2;
+ le16_ptr = (__le16 *)&tlv_data[i];
+ vtag_id = le16_to_cpu(*le16_ptr);
+ vtag_value = slsi_convert_tlv_data_to_value(&tlv_data[i + 2], vendor_len);
+ switch (vtag_id) {
+ case SLSI_WIFI_TAG_VD_CHANNEL_UTILISATION:
+ chan_utilisation = vtag_value;
+ break;
+ case SLSI_WIFI_TAG_VD_ROAMING_REASON:
+ roam_reason = vtag_value;
+ break;
+ case SLSI_WIFI_TAG_VD_BTM_REQUEST_MODE:
+ btm_request_mode = vtag_value;
+ break;
+ case SLSI_WIFI_TAG_VD_BTM_RESPONSE_STATUS:
+ btm_response = vtag_value;
+ break;
+ case SLSI_WIFI_TAG_VD_RETRY_COUNT:
+ eapol_retry_count = vtag_value;
+ break;
+ case SLSI_WIFI_TAG_VD_EAPOL_KEY_TYPE:
+ eapol_key_type = vtag_value;
+ break;
+ }
+ break;
+ case SLSI_WIFI_TAG_EAPOL_MESSAGE_TYPE:
+ eapol_msg_type = tag_value;
+ break;
+ case SLSI_WIFI_TAG_STATUS:
+ status_code = tag_value;
+ break;
+ }
+ i += tag_len;
+ }
+ switch (event_id) {
+ case FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START:
+ if (eapol_key_type == SLSI_WIFI_EAPOL_KEY_TYPE_GTK)
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, Send GTK, G%d\n", eapol_msg_type);
+ else if (eapol_key_type == SLSI_WIFI_EAPOL_KEY_TYPE_PTK)
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, Send 4way-H/S, M%d\n",
+ eapol_msg_type);
+ break;
+ case FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP:
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP,Result Code:%d, Retry Count:%d\n",
+ status_code, eapol_retry_count);
+ break;
+ case FAPI_EVENT_WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED:
+ if (eapol_key_type == SLSI_WIFI_EAPOL_KEY_TYPE_GTK)
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, Received GTK, G%d\n", eapol_msg_type);
+ else if (eapol_key_type == SLSI_WIFI_EAPOL_KEY_TYPE_PTK)
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, Received 4way-H/S, M%d\n", eapol_msg_type);
+ break;
+ case WIFI_EVENT_FW_BTM_FRAME_REQUEST:
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_BTM_FRAME_REQUEST,Request Mode:%d\n", btm_request_mode);
+ break;
+ case WIFI_EVENT_FW_BTM_FRAME_RESPONSE:
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_BTM_FRAME_RESPONSE,Status code:%d\n", btm_response);
+ break;
+ case FAPI_EVENT_WIFI_EVENT_ROAM_SEARCH_STARTED:
+ SLSI_INFO(sdev, "WIFI_EVENT_ROAM_SEARCH_STARTED, RSSI:%d, Deauth Reason:0x%04x, Channel Utilisation:%d,"
+ "Roam Reason: %s\n", roam_rssi_val, deauth_reason, chan_utilisation,
+ slsi_get_roam_reason_str(roam_reason));
+ break;
+ case FAPI_EVENT_WIFI_EVENT_FW_AUTH_STARTED:
+ SLSI_INFO(sdev, "WIFI_EVENT_FW_AUTH_STARTED\n");
+ break;
+ case FAPI_EVENT_WIFI_EVENT_AUTH_COMPLETE:
+ SLSI_INFO(sdev, "WIFI_EVENT_AUTH_COMPLETE,Status code:%d\n", status_code);
+ break;
+ case FAPI_EVENT_WIFI_EVENT_ROAM_ASSOC_COMPLETE:
+ SLSI_INFO(sdev, "Received Association Response\n");
+ break;
+ case WIFI_EVENT_FW_NR_FRAME_REQUEST:
+ SLSI_INFO(sdev, "Send Radio Measurement Frame (Neighbor Report Req)\n");
+ break;
+ case WIFI_EVENT_FW_RM_FRAME_RESPONSE:
+ SLSI_INFO(sdev, "Received Radio Measurement Frame (Radio Measurement Rep)\n");
+ break;
+ }
+
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+}
+
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+static void slsi_on_ring_buffer_data(char *ring_name, char *buffer, int buffer_size,
+ struct scsc_wifi_ring_buffer_status *buffer_status, void *ctx)
+{
+ struct sk_buff *skb;
+ int event_id = SLSI_NL80211_LOGGER_RING_EVENT;
+ struct slsi_dev *sdev = ctx;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, buffer_size, event_id, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, buffer_size, event_id, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for vendor event: %d\n", event_id);
+ return;
+ }
+
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_STATUS, sizeof(*buffer_status), buffer_status) ||
+ nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_DATA, buffer_size, buffer)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ return;
+ }
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+}
+
+static void slsi_on_alert(char *buffer, int buffer_size, int err_code, void *ctx)
+{
+ struct sk_buff *skb;
+ int event_id = SLSI_NL80211_LOGGER_FW_DUMP_EVENT;
+ struct slsi_dev *sdev = ctx;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, buffer_size, event_id, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, buffer_size, event_id, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for vendor event: %d\n", event_id);
+ goto exit;
+ }
+
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_LEN, buffer_size) ||
+ nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_DATA, buffer_size, buffer)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ goto exit;
+ }
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+}
+
+static void slsi_on_firmware_memory_dump(char *buffer, int buffer_size, void *ctx)
+{
+ SLSI_ERR_NODEV("slsi_on_firmware_memory_dump\n");
+ kfree(mem_dump_buffer);
+ mem_dump_buffer = NULL;
+ mem_dump_buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!mem_dump_buffer) {
+ SLSI_ERR_NODEV("Failed to allocate memory for mem_dump_buffer\n");
+ return;
+ }
+ mem_dump_buffer_size = buffer_size;
+ memcpy(mem_dump_buffer, buffer, mem_dump_buffer_size);
+}
+
+static void slsi_on_driver_memory_dump(char *buffer, int buffer_size, void *ctx)
+{
+ SLSI_ERR_NODEV("slsi_on_driver_memory_dump\n");
+ kfree(mem_dump_buffer);
+ mem_dump_buffer = NULL;
+ mem_dump_buffer_size = buffer_size;
+ mem_dump_buffer = kmalloc(mem_dump_buffer_size, GFP_KERNEL);
+ if (!mem_dump_buffer) {
+ SLSI_ERR_NODEV("Failed to allocate memory for mem_dump_buffer\n");
+ return;
+ }
+ memcpy(mem_dump_buffer, buffer, mem_dump_buffer_size);
+}
+
+static int slsi_enable_logging(struct slsi_dev *sdev, bool enable)
+{
+ int status = 0;
+#ifdef ENABLE_WIFI_LOGGER_MIB_WRITE
+ struct slsi_mib_data mib_data = { 0, NULL };
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "Value of enable is : %d\n", enable);
+ status = slsi_mib_encode_bool(&mib_data, SLSI_PSID_UNIFI_LOGGER_ENABLED, enable, 0);
+ if (status != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "slsi_enable_logging failed: no mem for MIB\n");
+ status = -ENOMEM;
+ goto exit;
+ }
+ status = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+ kfree(mib_data.data);
+ if (status)
+ SLSI_ERR(sdev, "Err setting unifiLoggerEnabled MIB. error = %d\n", status);
+
+exit:
+ return status;
+#else
+ SLSI_DBG3(sdev, SLSI_GSCAN, "UnifiLoggerEnabled MIB write disabled\n");
+ return status;
+#endif
+}
+
+static int slsi_start_logging(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ char ring_name[32] = {0};
+ int verbose_level = 0;
+ int ring_flags = 0;
+ int max_interval_sec = 0;
+ int min_data_size = 0;
+ const struct nlattr *attr;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(attr), MIN(sizeof(ring_name) - 1, nla_len(attr)));
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_VERBOSE_LEVEL:
+ verbose_level = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_FLAGS:
+ ring_flags = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_LOG_MAX_INTERVAL:
+ max_interval_sec = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+ min_data_size = nla_get_u32(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+ ret = scsc_wifi_set_log_handler(slsi_on_ring_buffer_data, sdev);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_set_log_handler failed ret: %d\n", ret);
+ goto exit;
+ }
+ ret = scsc_wifi_set_alert_handler(slsi_on_alert, sdev);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "Warning : scsc_wifi_set_alert_handler failed ret: %d\n", ret);
+ }
+ ret = slsi_enable_logging(sdev, 1);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "slsi_enable_logging for enable = 1, failed ret: %d\n", ret);
+ goto exit_with_reset_alert_handler;
+ }
+ ret = scsc_wifi_start_logging(verbose_level, ring_flags, max_interval_sec, min_data_size, ring_name);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_start_logging failed ret: %d\n", ret);
+ goto exit_with_disable_logging;
+ } else {
+ goto exit;
+ }
+exit_with_disable_logging:
+ ret = slsi_enable_logging(sdev, 0);
+ if (ret < 0)
+ SLSI_ERR(sdev, "slsi_enable_logging for enable = 0, failed ret: %d\n", ret);
+exit_with_reset_alert_handler:
+ ret = scsc_wifi_reset_alert_handler();
+ if (ret < 0)
+ SLSI_ERR(sdev, "Warning : scsc_wifi_reset_alert_handler failed ret: %d\n", ret);
+ ret = scsc_wifi_reset_log_handler();
+ if (ret < 0)
+ SLSI_ERR(sdev, "scsc_wifi_reset_log_handler failed ret: %d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_reset_logging(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ ret = slsi_enable_logging(sdev, 0);
+ if (ret < 0)
+ SLSI_ERR(sdev, "slsi_enable_logging for enable = 0, failed ret: %d\n", ret);
+ ret = scsc_wifi_reset_log_handler();
+ if (ret < 0)
+ SLSI_ERR(sdev, "scsc_wifi_reset_log_handler failed ret: %d\n", ret);
+ ret = scsc_wifi_reset_alert_handler();
+ if (ret < 0)
+ SLSI_ERR(sdev, "Warning : scsc_wifi_reset_alert_handler failed ret: %d\n", ret);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_trigger_fw_mem_dump(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ int length = 100;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+
+ ret = scsc_wifi_get_firmware_memory_dump(slsi_on_firmware_memory_dump, sdev);
+ if (ret) {
+ SLSI_ERR(sdev, "scsc_wifi_get_firmware_memory_dump failed : %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, length);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_LEN, mem_dump_buffer_size)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_fw_mem_dump(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ int buf_len = 0;
+ void __user *user_buf = NULL;
+ const struct nlattr *attr;
+ struct sk_buff *skb;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_LEN:
+ buf_len = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_DATA:
+ user_buf = (void __user *)(unsigned long)nla_get_u64(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return -EINVAL;
+ }
+ }
+ if (buf_len > 0 && user_buf) {
+ ret = copy_to_user(user_buf, mem_dump_buffer, buf_len);
+ if (ret) {
+ SLSI_ERR(sdev, "failed to copy memdump into user buffer : %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Indicate the memdump is successfully copied */
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+ }
+
+exit:
+ kfree(mem_dump_buffer);
+ mem_dump_buffer = NULL;
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_trigger_driver_mem_dump(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ int length = 100;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+
+ ret = scsc_wifi_get_driver_memory_dump(slsi_on_driver_memory_dump, sdev);
+ if (ret) {
+ SLSI_ERR(sdev, "scsc_wifi_get_driver_memory_dump failed : %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, length);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_LEN, mem_dump_buffer_size)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_driver_mem_dump(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ int buf_len = 0;
+ void __user *user_buf = NULL;
+ const struct nlattr *attr;
+ struct sk_buff *skb;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_LEN:
+ buf_len = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_DATA:
+ user_buf = (void __user *)(unsigned long)nla_get_u64(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return -EINVAL;
+ }
+ }
+ if (buf_len > 0 && user_buf) {
+ ret = copy_to_user(user_buf, mem_dump_buffer, buf_len);
+ if (ret) {
+ SLSI_ERR(sdev, "failed to copy memdump into user buffer : %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Indicate the memdump is successfully copied */
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_DATA, sizeof(ret), &ret)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+ }
+
+exit:
+ kfree(mem_dump_buffer);
+ mem_dump_buffer = NULL;
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_version(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ int buffer_size = 1024;
+ bool log_version = false;
+ char *buffer;
+ const struct nlattr *attr;
+
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer) {
+ SLSI_ERR(sdev, "No mem. Size:%d\n", buffer_size);
+ return -ENOMEM;
+ }
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_VERSION:
+ log_version = true;
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_VERSION:
+ log_version = false;
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (log_version)
+ ret = scsc_wifi_get_driver_version(buffer, buffer_size);
+ else
+ ret = scsc_wifi_get_firmware_version(buffer, buffer_size);
+
+ if (ret < 0) {
+ SLSI_ERR(sdev, "failed to get the version %d\n", ret);
+ goto exit;
+ }
+
+ ret = slsi_vendor_cmd_reply(wiphy, buffer, strlen(buffer));
+exit:
+ kfree(buffer);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_ring_buffers_status(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int num_rings = 10;
+ struct sk_buff *skb;
+ struct scsc_wifi_ring_buffer_status status[num_rings];
+
+ SLSI_DBG1(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ memset(status, 0, sizeof(struct scsc_wifi_ring_buffer_status) * num_rings);
+ ret = scsc_wifi_get_ring_buffers_status(&num_rings, status);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_get_ring_buffers_status failed ret:%d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 700);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Indicate that the ring count and ring buffers status is successfully copied */
+ if (nla_put_u8(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_NUM, num_rings) ||
+ nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_STATUS, sizeof(status[0]) * num_rings, status)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_ring_data(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ char ring_name[32] = {0};
+ const struct nlattr *attr;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(attr), MIN(sizeof(ring_name) - 1, nla_len(attr)));
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ goto exit;
+ }
+ }
+
+ ret = scsc_wifi_get_ring_data(ring_name);
+ if (ret < 0)
+ SLSI_ERR(sdev, "trigger_get_data failed ret:%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_logger_supported_feature_set(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ u32 supported_features = 0;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ ret = scsc_wifi_get_logger_supported_feature_set(&supported_features);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_get_logger_supported_feature_set failed ret:%d\n", ret);
+ goto exit;
+ }
+ ret = slsi_vendor_cmd_reply(wiphy, &supported_features, sizeof(supported_features));
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_start_pkt_fate_monitoring(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = 0;
+#ifdef ENABLE_WIFI_LOGGER_MIB_WRITE
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct slsi_mib_data mib_data = { 0, NULL };
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ ret = slsi_mib_encode_bool(&mib_data, SLSI_PSID_UNIFI_TX_DATA_CONFIRM, 1, 0);
+ if (ret != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "Failed to set UnifiTxDataConfirm MIB : no mem for MIB\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+
+ if (ret) {
+ SLSI_ERR(sdev, "Err setting UnifiTxDataConfirm MIB. error = %d\n", ret);
+ goto exit;
+ }
+
+ ret = scsc_wifi_start_pkt_fate_monitoring();
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_start_pkt_fate_monitoring failed, ret=%d\n", ret);
+
+ // Resetting the SLSI_PSID_UNIFI_TX_DATA_CONFIRM mib back to 0.
+ mib_data.dataLength = 0;
+ mib_data.data = NULL;
+ ret = slsi_mib_encode_bool(&mib_data, SLSI_PSID_UNIFI_TX_DATA_CONFIRM, 1, 0);
+ if (ret != SLSI_MIB_STATUS_SUCCESS) {
+ SLSI_ERR(sdev, "Failed to set UnifiTxDataConfirm MIB : no mem for MIB\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = slsi_mlme_set(sdev, NULL, mib_data.data, mib_data.dataLength);
+
+ if (ret) {
+ SLSI_ERR(sdev, "Err setting UnifiTxDataConfirm MIB. error = %d\n", ret);
+ goto exit;
+ }
+ }
+exit:
+ kfree(mib_data.data);
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+#else
+ SLSI_ERR_NODEV("slsi_start_pkt_fate_monitoring : UnifiTxDataConfirm MIB write disabled\n");
+ return ret;
+#endif
+}
+
+static int slsi_get_tx_pkt_fates(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ void __user *user_buf = NULL;
+ u32 req_count = 0;
+ size_t provided_count = 0;
+ struct sk_buff *skb;
+ const struct nlattr *attr;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_NUM:
+ req_count = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_DATA:
+ user_buf = (void __user *)(unsigned long)nla_get_u64(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ ret = scsc_wifi_get_tx_pkt_fates(user_buf, req_count, &provided_count);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_get_tx_pkt_fates failed ret: %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_NUM, sizeof(provided_count), &provided_count)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_rx_pkt_fates(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ void __user *user_buf = NULL;
+ u32 req_count = 0;
+ size_t provided_count = 0;
+ struct sk_buff *skb;
+ const struct nlattr *attr;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_NUM:
+ req_count = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_DATA:
+ user_buf = (void __user *)(unsigned long)nla_get_u64(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ ret = scsc_wifi_get_rx_pkt_fates(user_buf, req_count, &provided_count);
+ if (ret < 0) {
+ SLSI_ERR(sdev, "scsc_wifi_get_rx_pkt_fates failed ret: %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_NUM, sizeof(provided_count), &provided_count)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+static int slsi_get_wake_reason_stats(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct slsi_wlan_driver_wake_reason_cnt wake_reason_count;
+ int ret = 0;
+ int temp = 0;
+ int type = 0;
+ const struct nlattr *attr;
+ struct sk_buff *skb;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ // Initialising the wake_reason_count structure values to 0.
+ memset(&wake_reason_count, 0, sizeof(struct slsi_wlan_driver_wake_reason_cnt));
+
+ SLSI_MUTEX_LOCK(sdev->logger_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_CMD_EVENT_WAKE_CNT_SZ:
+ wake_reason_count.cmd_event_wake_cnt_sz = nla_get_u32(attr);
+ break;
+ case SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_DRIVER_FW_LOCAL_WAKE_CNT_SZ:
+ wake_reason_count.driver_fw_local_wake_cnt_sz = nla_get_u32(attr);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unknown type: %d\n", type);
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (ret < 0) {
+ SLSI_ERR(sdev, "Failed to get wake reason stats : %d\n", ret);
+ goto exit;
+ }
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 700);
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for Vendor event\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_CMD_EVENT_WAKE,
+ wake_reason_count.total_cmd_event_wake)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_CMD_EVENT_WAKE_CNT_PTR, 0,
+ wake_reason_count.cmd_event_wake_cnt)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_DRIVER_FW_LOCAL_WAKE,
+ wake_reason_count.total_driver_fw_local_wake)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_put(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_DRIVER_FW_LOCAL_WAKE_CNT_PTR, 0,
+ wake_reason_count.driver_fw_local_wake_cnt)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_RX_DATA_WAKE,
+ wake_reason_count.total_rx_data_wake) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_UNICAST_CNT,
+ wake_reason_count.rx_wake_details.rx_unicast_cnt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_MULTICAST_CNT,
+ wake_reason_count.rx_wake_details.rx_multicast_cnt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_BROADCAST_CNT,
+ wake_reason_count.rx_wake_details.rx_broadcast_cnt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP_PKT,
+ wake_reason_count.rx_wake_pkt_classification_info.icmp_pkt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_PKT,
+ wake_reason_count.rx_wake_pkt_classification_info.icmp6_pkt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_RA,
+ wake_reason_count.rx_wake_pkt_classification_info.icmp6_ra) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_NA,
+ wake_reason_count.rx_wake_pkt_classification_info.icmp6_na) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_NS,
+ wake_reason_count.rx_wake_pkt_classification_info.icmp6_ns) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP4_RX_MULTICAST_CNT,
+ wake_reason_count.rx_multicast_wake_pkt_info.ipv4_rx_multicast_addr_cnt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_RX_MULTICAST_CNT,
+ wake_reason_count.rx_multicast_wake_pkt_info.ipv6_rx_multicast_addr_cnt) ||
+ nla_put_u32(skb, SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_OTHER_RX_MULTICAST_CNT,
+ wake_reason_count.rx_multicast_wake_pkt_info.other_rx_multicast_addr_cnt)) {
+ SLSI_ERR_NODEV("Failed nla_put\n");
+ slsi_kfree_skb(skb);
+ ret = -EINVAL;
+ goto exit;
+ }
+ ret = cfg80211_vendor_cmd_reply(skb);
+
+ if (ret)
+ SLSI_ERR(sdev, "Vendor Command reply failed ret:%d\n", ret);
+exit:
+ SLSI_MUTEX_UNLOCK(sdev->logger_mutex);
+ return ret;
+}
+
+#endif /* CONFIG_SCSC_WLAN_ENHANCED_LOGGING */
+
+static int slsi_acs_validate_width_hw_mode(struct slsi_acs_request *request)
+{
+ if (request->hw_mode != SLSI_ACS_MODE_IEEE80211A && request->hw_mode != SLSI_ACS_MODE_IEEE80211B &&
+ request->hw_mode != SLSI_ACS_MODE_IEEE80211G)
+ return -EINVAL;
+ if (request->ch_width != 20 && request->ch_width != 40 && request->ch_width != 80)
+ return -EINVAL;
+ return 0;
+}
+
+static int slsi_acs_init(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = wdev->netdev;
+ struct netdev_vif *ndev_vif;
+ struct slsi_acs_request *request;
+ int temp;
+ int type;
+ const struct nlattr *attr;
+ int r = 0;
+ u32 *freq_list = NULL;
+ int freq_list_len = 0;
+
+ SLSI_INFO(sdev, "SUBCMD_ACS_INIT Received\n");
+ if (slsi_is_test_mode_enabled()) {
+ SLSI_ERR(sdev, "Not supported in WlanLite mode\n");
+ return -EOPNOTSUPP;
+ }
+ if (wdev->iftype != NL80211_IFTYPE_AP) {
+ SLSI_ERR(sdev, "Invalid iftype: %d\n", wdev->iftype);
+ return -EINVAL;
+ }
+ if (!dev) {
+ SLSI_ERR(sdev, "Dev not found!\n");
+ return -ENODEV;
+ }
+ request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ SLSI_ERR(sdev, "No memory for request!");
+ return -ENOMEM;
+ }
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ nla_for_each_attr(attr, data, len, temp) {
+ type = nla_type(attr);
+ switch (type) {
+ case SLSI_ACS_ATTR_HW_MODE:
+ {
+ request->hw_mode = nla_get_u8(attr);
+ SLSI_INFO(sdev, "ACS hw mode: %d\n", request->hw_mode);
+ break;
+ }
+ case SLSI_ACS_ATTR_CHWIDTH:
+ {
+ request->ch_width = nla_get_u16(attr);
+ SLSI_INFO(sdev, "ACS ch_width: %d\n", request->ch_width);
+ break;
+ }
+ case SLSI_ACS_ATTR_FREQ_LIST:
+ {
+ if (freq_list) /* This check is to avoid Prevent Issue */
+ break;
+
+ freq_list = kmalloc(nla_len(attr), GFP_KERNEL);
+ if (!freq_list) {
+ SLSI_ERR(sdev, "No memory for frequency list!");
+ kfree(request);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return -ENOMEM;
+ }
+ memcpy(freq_list, nla_data(attr), nla_len(attr));
+ freq_list_len = nla_len(attr) / sizeof(u32);
+ SLSI_INFO(sdev, "ACS freq_list_len: %d\n", freq_list_len);
+ break;
+ }
+ default:
+ if (type > SLSI_ACS_ATTR_MAX)
+ SLSI_ERR(sdev, "Invalid type : %d\n", type);
+ break;
+ }
+ }
+
+ r = slsi_acs_validate_width_hw_mode(request);
+ if (r == 0 && freq_list_len) {
+ struct ieee80211_channel *channels[freq_list_len];
+ struct slsi_acs_chan_info ch_info[MAX_CHAN_VALUE_ACS];
+ struct slsi_acs_selected_channels acs_selected_channels;
+ int i = 0, num_channels = 0;
+ int idx;
+ u32 chan_flags = (IEEE80211_CHAN_INDOOR_ONLY | IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_DISABLED |
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 10, 13)
+ IEEE80211_CHAN_PASSIVE_SCAN
+#else
+ IEEE80211_CHAN_NO_IR
+#endif
+ );
+
+ memset(channels, 0, sizeof(channels));
+ memset(&ch_info, 0, sizeof(ch_info));
+ for (i = 0; i < freq_list_len; i++) {
+ channels[num_channels] = ieee80211_get_channel(wiphy, freq_list[i]);
+ if (!channels[num_channels]) {
+ SLSI_INFO(sdev, "Ignore invalid freq:%d in freq list\n", freq_list[i]);
+ } else if (channels[num_channels]->flags & chan_flags) {
+ SLSI_INFO(sdev, "Skip invalid channel:%d for ACS\n", channels[num_channels]->hw_value);
+ } else {
+ idx = slsi_find_chan_idx(channels[num_channels]->hw_value, request->hw_mode);
+ ch_info[idx].chan = channels[num_channels]->hw_value;
+ num_channels++;
+ }
+ }
+
+ if (num_channels == 1) {
+ memset(&acs_selected_channels, 0, sizeof(acs_selected_channels));
+ acs_selected_channels.ch_width = 20;
+ acs_selected_channels.hw_mode = request->hw_mode;
+ acs_selected_channels.pri_channel = channels[0]->hw_value;
+ r = slsi_send_acs_event(sdev, acs_selected_channels);
+ sdev->acs_channel_switched = true;
+ kfree(freq_list);
+ kfree(request);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+ }
+
+ if (request->hw_mode == SLSI_ACS_MODE_IEEE80211A)
+ request->ch_list_len = 25;
+ else
+ request->ch_list_len = 14;
+ memcpy(&request->acs_chan_info[0], &ch_info[0], sizeof(ch_info));
+ ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request = request;
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan = false;
+ r = slsi_mlme_add_scan(sdev,
+ dev,
+ FAPI_SCANTYPE_AP_AUTO_CHANNEL_SELECTION,
+ FAPI_REPORTMODE_REAL_TIME,
+ 0, /* n_ssids */
+ NULL, /* ssids */
+ num_channels,
+ channels,
+ NULL,
+ NULL, /* ie */
+ 0, /* ie_len */
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan);
+ } else {
+ SLSI_ERR(sdev, "Invalid freq_list len:%d or ch_width:%d or hw_mode:%d\n", freq_list_len,
+ request->ch_width, request->hw_mode);
+ r = -EINVAL;
+ kfree(request);
+ }
+ kfree(freq_list);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return r;
+}
+
+static const struct nl80211_vendor_cmd_info slsi_vendor_events[] = {
+ /**********Deprecated now due to fapi updates.Do not remove*/
+ { OUI_GOOGLE, SLSI_NL80211_SIGNIFICANT_CHANGE_EVENT },
+ { OUI_GOOGLE, SLSI_NL80211_HOTLIST_AP_FOUND_EVENT },
+ /******************************************/
+ { OUI_GOOGLE, SLSI_NL80211_SCAN_RESULTS_AVAILABLE_EVENT },
+ { OUI_GOOGLE, SLSI_NL80211_FULL_SCAN_RESULT_EVENT },
+ { OUI_GOOGLE, SLSI_NL80211_SCAN_EVENT },
+ /**********Deprecated now due to fapi updates.Do not remove*/
+ { OUI_GOOGLE, SLSI_NL80211_HOTLIST_AP_LOST_EVENT },
+ /******************************************/
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+ { OUI_SAMSUNG, SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH },
+#endif
+ { OUI_SAMSUNG, SLSI_NL80211_VENDOR_HANGED_EVENT },
+ { OUI_GOOGLE, SLSI_NL80211_EPNO_EVENT },
+ { OUI_GOOGLE, SLSI_NL80211_HOTSPOT_MATCH },
+ { OUI_GOOGLE, SLSI_NL80211_RSSI_REPORT_EVENT},
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ { OUI_GOOGLE, SLSI_NL80211_LOGGER_RING_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_LOGGER_FW_DUMP_EVENT},
+#endif
+ { OUI_GOOGLE, SLSI_NL80211_NAN_RESPONSE_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_MATCH_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_FOLLOWUP_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_NAN_DISABLED_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_RTT_RESULT_EVENT},
+ { OUI_GOOGLE, SLSI_NL80211_RTT_COMPLETE_EVENT},
+ { OUI_SAMSUNG, SLSI_NL80211_VENDOR_ACS_EVENT},
+ { OUI_SAMSUNG, SLSI_NL80211_VENDOR_FORWARD_BEACON},
+ { OUI_SAMSUNG, SLSI_NL80211_VENDOR_FORWARD_BEACON_ABORT}
+};
+
+static const struct wiphy_vendor_command slsi_vendor_cmd[] = {
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_get_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_VALID_CHANNELS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_get_valid_channel
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_ADD_GSCAN
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_add
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_DEL_GSCAN
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_del
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_SCAN_RESULTS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_get_scan_results
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_GSCAN_OUI
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_gscan_set_oui
+ },
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+ {
+ {
+ .vendor_id = OUI_SAMSUNG,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_key_mgmt_set_pmk
+ },
+#endif
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_BSSID_BLACKLIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_bssid_blacklist
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_START_KEEP_ALIVE_OFFLOAD
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_start_keepalive_offload
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_STOP_KEEP_ALIVE_OFFLOAD
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_stop_keepalive_offload
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_EPNO_LIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_epno_ssid
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_HS_LIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_hs_params
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_RESET_HS_LIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_reset_hs_params
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_RSSI_MONITOR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_rssi_monitor
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_SET_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_lls_set_stats
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_GET_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_lls_get_stats
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_CLEAR_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_lls_clear_stats
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_FEATURE_SET
+ },
+ .flags = 0,
+ .doit = slsi_get_feature_set
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_COUNTRY_CODE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_country_code
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_CONFIGURE_ND_OFFLOAD
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_configure_nd_offload
+ },
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_START_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_start_logging
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_RESET_LOGGING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_reset_logging
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_TRIGGER_FW_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_trigger_fw_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_FW_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_fw_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_TRIGGER_DRIVER_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_trigger_driver_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_DRIVER_MEM_DUMP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_driver_mem_dump
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_VERSION
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_version
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_RING_STATUS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_ring_buffers_status
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_RING_DATA
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_ring_data
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_FEATURE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_logger_supported_feature_set
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_START_PKT_FATE_MONITORING
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_start_pkt_fate_monitoring
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_TX_PKT_FATES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_tx_pkt_fates
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_RX_PKT_FATES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_rx_pkt_fates
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_WAKE_REASON_STATS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_wake_reason_stats
+ },
+#endif /* CONFIG_SCSC_WLAN_ENHANCED_LOGGING */
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_ENABLE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_enable
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_DISABLE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_disable
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_PUBLISH
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_publish
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_PUBLISHCANCEL
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_publish_cancel
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_SUBSCRIBE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_subscribe
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_SUBSCRIBECANCEL
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_subscribe_cancel
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_TXFOLLOWUP
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_transmit_followup
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_set_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_NAN_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_nan_get_capabilities
+ },
+#endif
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_GET_ROAMING_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_get_roaming_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_SET_ROAMING_STATE
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_set_roaming_state
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_RTT_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_rtt_get_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_RTT_RANGE_START
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_rtt_set_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_RTT_RANGE_CANCEL
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_rtt_cancel_config
+ },
+ {
+ {
+ .vendor_id = OUI_SAMSUNG,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_ACS_INIT
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_acs_init
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_APF_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_apf_get_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_APF_SET_FILTER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_apf_set_filter
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = SLSI_NL80211_VENDOR_SUBCMD_APF_READ_FILTER
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = slsi_apf_read_filter
+ }
+};
+
+void slsi_nl80211_vendor_deinit(struct slsi_dev *sdev)
+{
+ SLSI_DBG2(sdev, SLSI_GSCAN, "De-initialise vendor command and events\n");
+ sdev->wiphy->vendor_commands = NULL;
+ sdev->wiphy->n_vendor_commands = 0;
+ sdev->wiphy->vendor_events = NULL;
+ sdev->wiphy->n_vendor_events = 0;
+
+ SLSI_DBG2(sdev, SLSI_GSCAN, "Gscan cleanup\n");
+ slsi_gscan_flush_scan_results(sdev);
+
+}
+
+void slsi_nl80211_vendor_init(struct slsi_dev *sdev)
+{
+ int i;
+
+ SLSI_DBG2(sdev, SLSI_GSCAN, "Init vendor command and events\n");
+
+ sdev->wiphy->vendor_commands = slsi_vendor_cmd;
+ sdev->wiphy->n_vendor_commands = ARRAY_SIZE(slsi_vendor_cmd);
+ sdev->wiphy->vendor_events = slsi_vendor_events;
+ sdev->wiphy->n_vendor_events = ARRAY_SIZE(slsi_vendor_events);
+
+ for (i = 0; i < SLSI_GSCAN_MAX_BUCKETS; i++)
+ sdev->bucket[i].scan_id = (SLSI_GSCAN_SCAN_ID_START + i);
+
+ for (i = 0; i < SLSI_GSCAN_HASH_TABLE_SIZE; i++)
+ sdev->gscan_hash_table[i] = NULL;
+
+ INIT_LIST_HEAD(&sdev->hotlist_results);
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "nl80211_vendor_nan.h"
+
+#ifndef __SLSI_NL80211_VENDOR_H_
+#define __SLSI_NL80211_VENDOR_H_
+
+#define OUI_GOOGLE 0x001A11
+#define OUI_SAMSUNG 0x0000f0
+#define SLSI_NL80211_GSCAN_SUBCMD_RANGE_START 0x1000
+#define SLSI_NL80211_GSCAN_EVENT_RANGE_START 0x01
+#define SLSI_NL80211_LOGGING_SUBCMD_RANGE_START 0x1400
+#define SLSI_NL80211_NAN_SUBCMD_RANGE_START 0x1500
+#define SLSI_NL80211_RTT_SUBCMD_RANGE_START 0x1100
+#define SLSI_NL80211_APF_SUBCMD_RANGE_START 0x1600
+#define SLSI_GSCAN_SCAN_ID_START 0x410
+#define SLSI_GSCAN_SCAN_ID_END 0x500
+
+#define SLSI_GSCAN_MAX_BUCKETS (8)
+#define SLSI_GSCAN_MAX_CHANNELS (16) /* As per gscan.h */
+#define SLSI_GSCAN_MAX_HOTLIST_APS (64)
+#define SLSI_GSCAN_MAX_BUCKETS_PER_GSCAN (SLSI_GSCAN_MAX_BUCKETS)
+#define SLSI_GSCAN_MAX_SCAN_CACHE_SIZE (12000)
+#define SLSI_GSCAN_MAX_AP_CACHE_PER_SCAN (16)
+#define SLSI_GSCAN_MAX_SCAN_REPORTING_THRESHOLD (100)
+#define SLSI_GSCAN_MAX_SIGNIFICANT_CHANGE_APS (64)
+#define SLSI_GSCAN_MAX_EPNO_SSIDS (32)
+#define SLSI_GSCAN_MAX_EPNO_HS2_PARAM (8) /* Framework is not using this. Tune when needed */
+
+#define SLSI_REPORT_EVENTS_NONE (0)
+#define SLSI_REPORT_EVENTS_EACH_SCAN (1)
+#define SLSI_REPORT_EVENTS_FULL_RESULTS (2)
+#define SLSI_REPORT_EVENTS_NO_BATCH (4)
+
+#define SLSI_NL_ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
+#define SLSI_NL_ATTRIBUTE_COUNTRY_CODE (4)
+#define SLSI_NL_VENDOR_ID_OVERHEAD SLSI_NL_ATTRIBUTE_U32_LEN
+#define SLSI_NL_VENDOR_SUBCMD_OVERHEAD SLSI_NL_ATTRIBUTE_U32_LEN
+#define SLSI_NL_VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
+
+#define SLSI_NL_VENDOR_REPLY_OVERHEAD (SLSI_NL_VENDOR_ID_OVERHEAD + \
+ SLSI_NL_VENDOR_SUBCMD_OVERHEAD + \
+ SLSI_NL_VENDOR_DATA_OVERHEAD)
+
+#define SLSI_GSCAN_RTT_UNSPECIFIED (-1)
+#define SLSI_GSCAN_HASH_TABLE_SIZE (32)
+#define SLSI_GSCAN_HASH_KEY_MASK (0x1F)
+#define SLSI_GSCAN_GET_HASH_KEY(_key) (_key & SLSI_GSCAN_HASH_KEY_MASK)
+
+#define SLSI_KEEP_SCAN_RESULT (0)
+#define SLSI_DISCARD_SCAN_RESULT (1)
+
+#define SLSI_GSCAN_MAX_BSSID_PER_IE (20)
+
+#define SLSI_LLS_CAPABILITY_QOS 0x00000001 /* set for QOS association */
+#define SLSI_LLS_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 beacon frame control protected bit set)*/
+#define SLSI_LLS_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities element interworking bit is set*/
+#define SLSI_LLS_CAPABILITY_HS20 0x00000008 /* set for HS20 association*/
+#define SLSI_LLS_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities element UTF-8 SSID bit is set*/
+#define SLSI_LLS_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present*/
+
+#define TIMESPEC_TO_US(ts) (((u64)(ts).tv_sec * USEC_PER_SEC) + (ts).tv_nsec / NSEC_PER_USEC)
+
+/* Feature enums */
+#define SLSI_WIFI_HAL_FEATURE_INFRA 0x000001 /* Basic infrastructure mode */
+#define SLSI_WIFI_HAL_FEATURE_INFRA_5G 0x000002 /* Support for 5 GHz Band */
+#define SLSI_WIFI_HAL_FEATURE_HOTSPOT 0x000004 /* Support for GAS/ANQP */
+#define SLSI_WIFI_HAL_FEATURE_P2P 0x000008 /* Wifi-Direct */
+#define SLSI_WIFI_HAL_FEATURE_SOFT_AP 0x000010 /* Soft AP */
+#define SLSI_WIFI_HAL_FEATURE_GSCAN 0x000020 /* Google-Scan APIs */
+#define SLSI_WIFI_HAL_FEATURE_NAN 0x000040 /* Neighbor Awareness Networking */
+#define SLSI_WIFI_HAL_FEATURE_D2D_RTT 0x000080 /* Device-to-device RTT */
+#define SLSI_WIFI_HAL_FEATURE_D2AP_RTT 0x000100 /* Device-to-AP RTT */
+#define SLSI_WIFI_HAL_FEATURE_BATCH_SCAN 0x000200 /* Batched Scan (legacy) */
+#define SLSI_WIFI_HAL_FEATURE_PNO 0x000400 /* Preferred network offload */
+#define SLSI_WIFI_HAL_FEATURE_ADDITIONAL_STA 0x000800 /* Support for two STAs */
+#define SLSI_WIFI_HAL_FEATURE_TDLS 0x001000 /* Tunnel directed link setup */
+#define SLSI_WIFI_HAL_FEATURE_TDLS_OFFCHANNEL 0x002000 /* Support for TDLS off channel */
+#define SLSI_WIFI_HAL_FEATURE_EPR 0x004000 /* Enhanced power reporting */
+#define SLSI_WIFI_HAL_FEATURE_AP_STA 0x008000 /* Support for AP STA Concurrency */
+#define SLSI_WIFI_HAL_FEATURE_LINK_LAYER_STATS 0x010000 /* Link layer stats collection */
+#define SLSI_WIFI_HAL_FEATURE_LOGGER 0x020000 /* WiFi Logger */
+#define SLSI_WIFI_HAL_FEATURE_HAL_EPNO 0x040000 /* WiFi PNO enhanced */
+#define SLSI_WIFI_HAL_FEATURE_RSSI_MONITOR 0x080000 /* RSSI Monitor */
+#define SLSI_WIFI_HAL_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */
+#define SLSI_WIFI_HAL_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming macro */
+
+enum slsi_wifi_attr {
+ SLSI_NL_ATTRIBUTE_ND_OFFLOAD_VALUE = 0,
+ SLSI_NL_ATTRIBUTE_PNO_RANDOM_MAC_OUI
+};
+
+enum SLSI_APF_ATTRIBUTES {
+ SLSI_APF_ATTR_VERSION = 0,
+ SLSI_APF_ATTR_MAX_LEN,
+ SLSI_APF_ATTR_PROGRAM,
+ SLSI_APF_ATTR_PROGRAM_LEN
+};
+
+enum SLSI_ROAM_ATTRIBUTES {
+ SLSI_NL_ATTR_MAX_BLACKLIST_SIZE,
+ SLSI_NL_ATTR_MAX_WHITELIST_SIZE,
+ SLSI_NL_ATTR_ROAM_STATE
+};
+
+enum slsi_acs_attr_offload {
+ SLSI_ACS_ATTR_CHANNEL_INVALID = 0,
+ SLSI_ACS_ATTR_PRIMARY_CHANNEL,
+ SLSI_ACS_ATTR_SECONDARY_CHANNEL,
+ SLSI_ACS_ATTR_HW_MODE,
+ SLSI_ACS_ATTR_HT_ENABLED,
+ SLSI_ACS_ATTR_HT40_ENABLED,
+ SLSI_ACS_ATTR_VHT_ENABLED,
+ SLSI_ACS_ATTR_CHWIDTH,
+ SLSI_ACS_ATTR_CH_LIST,
+ SLSI_ACS_ATTR_VHT_SEG0_CENTER_CHANNEL,
+ SLSI_ACS_ATTR_VHT_SEG1_CENTER_CHANNEL,
+ SLSI_ACS_ATTR_FREQ_LIST,
+ /* keep last */
+ SLSI_ACS_ATTR_AFTER_LAST,
+ SLSI_ACS_ATTR_MAX =
+ SLSI_ACS_ATTR_AFTER_LAST - 1
+};
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+enum slsi_wips_attr {
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_SSID = 0,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_BSSID,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_CHANNEL,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_BCN_INTERVAL,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_TIME_STAMP1,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_TIME_STAMP2,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_SYS_TIME,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_MAX
+};
+
+enum slsi_wips_abort_attr {
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_ABORT = 0,
+ SLSI_WLAN_VENDOR_ATTR_FORWARD_BEACON_ABORT_MAX,
+};
+
+enum slsi_forward_beacon_abort_reason {
+ SLSI_FORWARD_BEACON_ABORT_REASON_UNSPECIFIED = 0,
+ SLSI_FORWARD_BEACON_ABORT_REASON_SCANNING,
+ SLSI_FORWARD_BEACON_ABORT_REASON_ROAMING,
+ SLSI_FORWARD_BEACON_ABORT_REASON_SUSPENDED,
+ SLSI_FORWARD_BEACON_ABORT_REASON_OFFSET = 0x8007,
+};
+#endif
+
+enum slsi_acs_hw_mode {
+ SLSI_ACS_MODE_IEEE80211B,
+ SLSI_ACS_MODE_IEEE80211G,
+ SLSI_ACS_MODE_IEEE80211A,
+ SLSI_ACS_MODE_IEEE80211AD,
+ SLSI_ACS_MODE_IEEE80211ANY,
+};
+
+enum GSCAN_ATTRIBUTE {
+ GSCAN_ATTRIBUTE_NUM_BUCKETS = 10,
+ GSCAN_ATTRIBUTE_BASE_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKETS_BAND,
+ GSCAN_ATTRIBUTE_BUCKET_ID,
+ GSCAN_ATTRIBUTE_BUCKET_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
+ GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
+ GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
+ GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
+ GSCAN_ATTRIBUTE_REPORT_THRESHOLD_NUM_SCANS,
+ GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
+
+ GSCAN_ATTRIBUTE_ENABLE_FEATURE = 20,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, /* indicates no more results */
+ GSCAN_ATTRIBUTE_REPORT_EVENTS,
+
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_NUM_OF_RESULTS = 30,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */
+ GSCAN_ATTRIBUTE_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_CHANNEL_LIST,
+ GSCAN_ATTRIBUTE_SCAN_ID,
+ GSCAN_ATTRIBUTE_SCAN_FLAGS,
+ GSCAN_ATTRIBUTE_SCAN_BUCKET_BIT,
+
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = 60,
+ GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
+ GSCAN_ATTRIBUTE_MIN_BREACHING,
+ GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
+
+ GSCAN_ATTRIBUTE_BUCKET_STEP_COUNT = 70,
+ GSCAN_ATTRIBUTE_BUCKET_EXPONENT,
+ GSCAN_ATTRIBUTE_BUCKET_MAX_PERIOD,
+
+ GSCAN_ATTRIBUTE_NUM_BSSID,
+ GSCAN_ATTRIBUTE_BLACKLIST_BSSID,
+
+ GSCAN_ATTRIBUTE_MAX
+};
+
+enum epno_ssid_attribute {
+ SLSI_ATTRIBUTE_EPNO_MINIMUM_5G_RSSI,
+ SLSI_ATTRIBUTE_EPNO_MINIMUM_2G_RSSI,
+ SLSI_ATTRIBUTE_EPNO_INITIAL_SCORE_MAX,
+ SLSI_ATTRIBUTE_EPNO_CUR_CONN_BONUS,
+ SLSI_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS,
+ SLSI_ATTRIBUTE_EPNO_SECURE_BONUS,
+ SLSI_ATTRIBUTE_EPNO_5G_BONUS,
+ SLSI_ATTRIBUTE_EPNO_SSID_NUM,
+ SLSI_ATTRIBUTE_EPNO_SSID_LIST,
+ SLSI_ATTRIBUTE_EPNO_SSID,
+ SLSI_ATTRIBUTE_EPNO_SSID_LEN,
+ SLSI_ATTRIBUTE_EPNO_FLAGS,
+ SLSI_ATTRIBUTE_EPNO_AUTH,
+ SLSI_ATTRIBUTE_EPNO_MAX
+};
+
+enum epno_hs_attribute {
+ SLSI_ATTRIBUTE_EPNO_HS_PARAM_LIST,
+ SLSI_ATTRIBUTE_EPNO_HS_NUM,
+ SLSI_ATTRIBUTE_EPNO_HS_ID,
+ SLSI_ATTRIBUTE_EPNO_HS_REALM,
+ SLSI_ATTRIBUTE_EPNO_HS_CONSORTIUM_IDS,
+ SLSI_ATTRIBUTE_EPNO_HS_PLMN,
+ SLSI_ATTRIBUTE_EPNO_HS_MAX
+};
+
+enum gscan_bucket_attributes {
+ GSCAN_ATTRIBUTE_CH_BUCKET_1,
+ GSCAN_ATTRIBUTE_CH_BUCKET_2,
+ GSCAN_ATTRIBUTE_CH_BUCKET_3,
+ GSCAN_ATTRIBUTE_CH_BUCKET_4,
+ GSCAN_ATTRIBUTE_CH_BUCKET_5,
+ GSCAN_ATTRIBUTE_CH_BUCKET_6,
+ GSCAN_ATTRIBUTE_CH_BUCKET_7,
+ GSCAN_ATTRIBUTE_CH_BUCKET_8
+};
+
+enum wifi_band {
+ WIFI_BAND_UNSPECIFIED,
+ WIFI_BAND_BG = 1, /* 2.4 GHz */
+ WIFI_BAND_A = 2, /* 5 GHz without DFS */
+ WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */
+ WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */
+ WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */
+ WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */
+};
+
+enum wifi_scan_event {
+ WIFI_SCAN_RESULTS_AVAILABLE,
+ WIFI_SCAN_THRESHOLD_NUM_SCANS,
+ WIFI_SCAN_THRESHOLD_PERCENT,
+ WIFI_SCAN_FAILED,
+};
+
+enum wifi_mkeep_alive_attribute {
+ MKEEP_ALIVE_ATTRIBUTE_ID,
+ MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
+ MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN,
+ MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR,
+ MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR,
+ MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC
+};
+
+enum wifi_rssi_monitor_attr {
+ SLSI_RSSI_MONITOR_ATTRIBUTE_MAX_RSSI,
+ SLSI_RSSI_MONITOR_ATTRIBUTE_MIN_RSSI,
+ SLSI_RSSI_MONITOR_ATTRIBUTE_START
+};
+
+enum lls_attribute {
+ LLS_ATTRIBUTE_SET_MPDU_SIZE_THRESHOLD = 1,
+ LLS_ATTRIBUTE_SET_AGGR_STATISTICS_GATHERING,
+ LLS_ATTRIBUTE_CLEAR_STOP_REQUEST_MASK,
+ LLS_ATTRIBUTE_CLEAR_STOP_REQUEST,
+ LLS_ATTRIBUTE_MAX
+};
+
+enum slsi_hal_vendor_subcmds {
+ SLSI_NL80211_VENDOR_SUBCMD_GET_CAPABILITIES = SLSI_NL80211_GSCAN_SUBCMD_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_VALID_CHANNELS,
+ SLSI_NL80211_VENDOR_SUBCMD_ADD_GSCAN,
+ SLSI_NL80211_VENDOR_SUBCMD_DEL_GSCAN,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_SCAN_RESULTS,
+ /*****Deprecated due to fapi updates.Do not remove.************/
+ SLSI_NL80211_VENDOR_SUBCMD_SET_BSSID_HOTLIST,
+ SLSI_NL80211_VENDOR_SUBCMD_RESET_BSSID_HOTLIST,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_HOTLIST_RESULTS,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_SIGNIFICANT_CHANGE,
+ SLSI_NL80211_VENDOR_SUBCMD_RESET_SIGNIFICANT_CHANGE,
+ /********************************************************/
+ SLSI_NL80211_VENDOR_SUBCMD_SET_GSCAN_OUI,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_NODFS,
+ SLSI_NL80211_VENDOR_SUBCMD_START_KEEP_ALIVE_OFFLOAD,
+ SLSI_NL80211_VENDOR_SUBCMD_STOP_KEEP_ALIVE_OFFLOAD,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_BSSID_BLACKLIST,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_EPNO_LIST,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_HS_LIST,
+ SLSI_NL80211_VENDOR_SUBCMD_RESET_HS_LIST,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_RSSI_MONITOR,
+ SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_SET_STATS,
+ SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_GET_STATS,
+ SLSI_NL80211_VENDOR_SUBCMD_LSTATS_SUBCMD_CLEAR_STATS,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_FEATURE_SET,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_COUNTRY_CODE,
+ SLSI_NL80211_VENDOR_SUBCMD_CONFIGURE_ND_OFFLOAD,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_ROAMING_CAPABILITIES,
+ SLSI_NL80211_VENDOR_SUBCMD_SET_ROAMING_STATE,
+ SLSI_NL80211_VENDOR_SUBCMD_START_LOGGING = SLSI_NL80211_LOGGING_SUBCMD_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_TRIGGER_FW_MEM_DUMP,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_FW_MEM_DUMP,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_VERSION,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_RING_STATUS,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_RING_DATA,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_FEATURE,
+ SLSI_NL80211_VENDOR_SUBCMD_RESET_LOGGING,
+ SLSI_NL80211_VENDOR_SUBCMD_TRIGGER_DRIVER_MEM_DUMP,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_DRIVER_MEM_DUMP,
+ SLSI_NL80211_VENDOR_SUBCMD_START_PKT_FATE_MONITORING,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_TX_PKT_FATES,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_RX_PKT_FATES,
+ SLSI_NL80211_VENDOR_SUBCMD_GET_WAKE_REASON_STATS,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_ENABLE = SLSI_NL80211_NAN_SUBCMD_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_DISABLE,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_PUBLISH,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_PUBLISHCANCEL,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_SUBSCRIBE,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_SUBSCRIBECANCEL,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_TXFOLLOWUP,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_CONFIG,
+ SLSI_NL80211_VENDOR_SUBCMD_NAN_CAPABILITIES,
+ SLSI_NL80211_VENDOR_SUBCMD_RTT_GET_CAPABILITIES = SLSI_NL80211_RTT_SUBCMD_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_RTT_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_RTT_RANGE_CANCEL,
+ SLSI_NL80211_VENDOR_SUBCMD_APF_SET_FILTER = SLSI_NL80211_APF_SUBCMD_RANGE_START,
+ SLSI_NL80211_VENDOR_SUBCMD_APF_GET_CAPABILITIES,
+ SLSI_NL80211_VENDOR_SUBCMD_APF_READ_FILTER
+};
+
+enum slsi_supp_vendor_subcmds {
+ SLSI_NL80211_VENDOR_SUBCMD_UNSPEC = 0,
+ SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_SET_KEY,
+ SLSI_NL80211_VENDOR_SUBCMD_ACS_INIT,
+};
+
+enum slsi_vendor_event_values {
+ /**********Deprecated now due to fapi updates.Do not remove*/
+ SLSI_NL80211_SIGNIFICANT_CHANGE_EVENT,
+ SLSI_NL80211_HOTLIST_AP_FOUND_EVENT,
+ /******************************************/
+ SLSI_NL80211_SCAN_RESULTS_AVAILABLE_EVENT,
+ SLSI_NL80211_FULL_SCAN_RESULT_EVENT,
+ SLSI_NL80211_SCAN_EVENT,
+ /**********Deprecated now due to fapi updates.Do not remove*/
+ SLSI_NL80211_HOTLIST_AP_LOST_EVENT,
+ /******************************************/
+ SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH,
+ SLSI_NL80211_VENDOR_HANGED_EVENT,
+ SLSI_NL80211_EPNO_EVENT,
+ SLSI_NL80211_HOTSPOT_MATCH,
+ SLSI_NL80211_RSSI_REPORT_EVENT,
+ SLSI_NL80211_LOGGER_RING_EVENT,
+ SLSI_NL80211_LOGGER_FW_DUMP_EVENT,
+ SLSI_NL80211_NAN_RESPONSE_EVENT,
+ SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT,
+ SLSI_NL80211_NAN_MATCH_EVENT,
+ SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT,
+ SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT,
+ SLSI_NL80211_NAN_FOLLOWUP_EVENT,
+ SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT,
+ SLSI_NL80211_NAN_DISABLED_EVENT,
+ SLSI_NL80211_RTT_RESULT_EVENT,
+ SLSI_NL80211_RTT_COMPLETE_EVENT,
+ SLSI_NL80211_VENDOR_ACS_EVENT,
+ SLSI_NL80211_VENDOR_FORWARD_BEACON,
+ SLSI_NL80211_VENDOR_FORWARD_BEACON_ABORT
+};
+
+enum slsi_lls_interface_mode {
+ SLSI_LLS_INTERFACE_STA = 0,
+ SLSI_LLS_INTERFACE_SOFTAP = 1,
+ SLSI_LLS_INTERFACE_IBSS = 2,
+ SLSI_LLS_INTERFACE_P2P_CLIENT = 3,
+ SLSI_LLS_INTERFACE_P2P_GO = 4,
+ SLSI_LLS_INTERFACE_NAN = 5,
+ SLSI_LLS_INTERFACE_MESH = 6,
+ SLSI_LLS_INTERFACE_UNKNOWN = -1
+};
+
+enum slsi_lls_connection_state {
+ SLSI_LLS_DISCONNECTED = 0,
+ SLSI_LLS_AUTHENTICATING = 1,
+ SLSI_LLS_ASSOCIATING = 2,
+ SLSI_LLS_ASSOCIATED = 3,
+ SLSI_LLS_EAPOL_STARTED = 4, /* if done by firmware/driver*/
+ SLSI_LLS_EAPOL_COMPLETED = 5, /* if done by firmware/driver*/
+};
+
+enum slsi_lls_roam_state {
+ SLSI_LLS_ROAMING_IDLE = 0,
+ SLSI_LLS_ROAMING_ACTIVE = 1,
+};
+
+/* access categories */
+enum slsi_lls_traffic_ac {
+ SLSI_LLS_AC_VO = 0,
+ SLSI_LLS_AC_VI = 1,
+ SLSI_LLS_AC_BE = 2,
+ SLSI_LLS_AC_BK = 3,
+ SLSI_LLS_AC_MAX = 4,
+};
+
+/* channel operating width */
+enum slsi_lls_channel_width {
+ SLSI_LLS_CHAN_WIDTH_20 = 0,
+ SLSI_LLS_CHAN_WIDTH_40 = 1,
+ SLSI_LLS_CHAN_WIDTH_80 = 2,
+ SLSI_LLS_CHAN_WIDTH_160 = 3,
+ SLSI_LLS_CHAN_WIDTH_80P80 = 4,
+ SLSI_LLS_CHAN_WIDTH_5 = 5,
+ SLSI_LLS_CHAN_WIDTH_10 = 6,
+ SLSI_LLS_CHAN_WIDTH_INVALID = -1
+};
+
+/* wifi peer type */
+enum slsi_lls_peer_type {
+ SLSI_LLS_PEER_STA,
+ SLSI_LLS_PEER_AP,
+ SLSI_LLS_PEER_P2P_GO,
+ SLSI_LLS_PEER_P2P_CLIENT,
+ SLSI_LLS_PEER_NAN,
+ SLSI_LLS_PEER_TDLS,
+ SLSI_LLS_PEER_INVALID,
+};
+
+/* slsi_enhanced_logging_attributes */
+enum slsi_enhanced_logging_attributes {
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_VERSION,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_VERSION,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_ID,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_NAME,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_FLAGS,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_VERBOSE_LEVEL,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_LOG_MAX_INTERVAL,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_LOG_MIN_DATA_SIZE,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_LEN,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_FW_DUMP_DATA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_DATA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_STATUS,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_RING_NUM,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_LEN,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_DRIVER_DUMP_DATA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_NUM,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_PKT_FATE_DATA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_INVALID = 0,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_CMD_EVENT_WAKE,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_CMD_EVENT_WAKE_CNT_PTR,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_CMD_EVENT_WAKE_CNT_SZ,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_DRIVER_FW_LOCAL_WAKE,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_DRIVER_FW_LOCAL_WAKE_CNT_PTR,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_DRIVER_FW_LOCAL_WAKE_CNT_SZ,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_TOTAL_RX_DATA_WAKE,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_UNICAST_CNT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_MULTICAST_CNT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_RX_BROADCAST_CNT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP_PKT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_PKT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_RA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_NA,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_NS,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP4_RX_MULTICAST_CNT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_ICMP6_RX_MULTICAST_CNT,
+ SLSI_ENHANCED_LOGGING_ATTRIBUTE_WAKE_STATS_OTHER_RX_MULTICAST_CNT,
+};
+
+enum slsi_rtt_event_attributes {
+ SLSI_RTT_EVENT_ATTR_ADDR = 0,
+ SLSI_RTT_EVENT_ATTR_BURST_NUM,
+ SLSI_RTT_EVENT_ATTR_MEASUREMENT_NUM,
+ SLSI_RTT_EVENT_ATTR_SUCCESS_NUM,
+ SLSI_RTT_EVENT_ATTR_NUM_PER_BURST_PEER,
+ SLSI_RTT_EVENT_ATTR_STATUS,
+ SLSI_RTT_EVENT_ATTR_RETRY_AFTER_DURATION,
+ SLSI_RTT_EVENT_ATTR_TYPE,
+ SLSI_RTT_EVENT_ATTR_RSSI,
+ SLSI_RTT_EVENT_ATTR_RSSI_SPREAD,
+ SLSI_RTT_EVENT_ATTR_TX_PREAMBLE,
+ SLSI_RTT_EVENT_ATTR_TX_NSS,
+ SLSI_RTT_EVENT_ATTR_TX_BW,
+ SLSI_RTT_EVENT_ATTR_TX_MCS,
+ SLSI_RTT_EVENT_ATTR_TX_RATE,
+ SLSI_RTT_EVENT_ATTR_RX_PREAMBLE,
+ SLSI_RTT_EVENT_ATTR_RX_NSS,
+ SLSI_RTT_EVENT_ATTR_RX_BW,
+ SLSI_RTT_EVENT_ATTR_RX_MCS,
+ SLSI_RTT_EVENT_ATTR_RX_RATE,
+ SLSI_RTT_EVENT_ATTR_RTT,
+ SLSI_RTT_EVENT_ATTR_RTT_SD,
+ SLSI_RTT_EVENT_ATTR_RTT_SPREAD,
+ SLSI_RTT_EVENT_ATTR_DISTANCE_MM,
+ SLSI_RTT_EVENT_ATTR_DISTANCE_SD_MM,
+ SLSI_RTT_EVENT_ATTR_DISTANCE_SPREAD_MM,
+ SLSI_RTT_EVENT_ATTR_TIMESTAMP_US,
+ SLSI_RTT_EVENT_ATTR_BURST_DURATION_MSN,
+ SLSI_RTT_EVENT_ATTR_NEGOTIATED_BURST_NUM,
+ SLSI_RTT_EVENT_ATTR_LCI,
+ SLSI_RTT_EVENT_ATTR_LCR,
+
+};
+
+/* RTT peer type */
+enum slsi_rtt_peer_type {
+ SLSI_RTT_PEER_AP = 0x1,
+ SLSI_RTT_PEER_STA,
+ SLSI_RTT_PEER_P2P_GO,
+ SLSI_RTT_PEER_P2P_CLIENT,
+ SLSI_RTT_PEER_NAN,
+};
+
+/* RTT Measurement Bandwidth */
+enum slsi_wifi_rtt_bw {
+ SLSI_WIFI_RTT_BW_5 = 0x01,
+ SLSI_WIFI_RTT_BW_10 = 0x02,
+ SLSI_WIFI_RTT_BW_20 = 0x04,
+ SLSI_WIFI_RTT_BW_40 = 0x08,
+ SLSI_WIFI_RTT_BW_80 = 0x10,
+ SLSI_WIFI_RTT_BW_160 = 0x20
+};
+
+/* RTT Measurement Preamble */
+enum slsi_wifi_rtt_preamble {
+ SLSI_WIFI_RTT_PREAMBLE_LEGACY = 0x1,
+ SLSI_WIFI_RTT_PREAMBLE_HT = 0x2,
+ SLSI_WIFI_RTT_PREAMBLE_VHT = 0x4
+};
+
+/* RTT Type */
+enum slsi_wifi_rtt_type {
+ SLSI_RTT_TYPE_1_SIDED = 0x1,
+ SLSI_RTT_TYPE_2_SIDED,
+};
+
+enum slsi_rtt_attribute {
+ SLSI_RTT_ATTRIBUTE_TARGET_CNT = 0,
+ SLSI_RTT_ATTRIBUTE_TARGET_INFO,
+ SLSI_RTT_ATTRIBUTE_TARGET_MAC,
+ SLSI_RTT_ATTRIBUTE_TARGET_TYPE,
+ SLSI_RTT_ATTRIBUTE_TARGET_PEER,
+ SLSI_RTT_ATTRIBUTE_TARGET_CHAN_FREQ,
+ SLSI_RTT_ATTRIBUTE_TARGET_PERIOD,
+ SLSI_RTT_ATTRIBUTE_TARGET_NUM_BURST,
+ SLSI_RTT_ATTRIBUTE_TARGET_NUM_FTM_BURST,
+ SLSI_RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTM,
+ SLSI_RTT_ATTRIBUTE_TARGET_NUM_RETRY_FTMR,
+ SLSI_RTT_ATTRIBUTE_TARGET_LCI,
+ SLSI_RTT_ATTRIBUTE_TARGET_LCR,
+ SLSI_RTT_ATTRIBUTE_TARGET_BURST_DURATION,
+ SLSI_RTT_ATTRIBUTE_TARGET_PREAMBLE,
+ SLSI_RTT_ATTRIBUTE_TARGET_BW,
+ SLSI_RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
+ SLSI_RTT_ATTRIBUTE_RESULTS_PER_TARGET,
+ SLSI_RTT_ATTRIBUTE_RESULT_CNT,
+ SLSI_RTT_ATTRIBUTE_RESULT,
+ SLSI_RTT_ATTRIBUTE_TARGET_ID
+};
+
+/* Ranging status */
+enum slsi_wifi_rtt_status {
+ SLSI_RTT_STATUS_SUCCESS = 0,
+ SLSI_RTT_STATUS_FAILURE, /* general failure status */
+ SLSI_RTT_STATUS_FAIL_NO_RSP, /* target STA does not respond to request */
+ SLSI_RTT_STATUS_FAIL_REJECTED, /* request rejected. Applies to 2-sided RTT only*/
+ SLSI_RTT_STATUS_FAIL_NOT_SCHEDULED_YET,
+ SLSI_RTT_STATUS_FAIL_TM_TIMEOUT, /* timing measurement times out */
+ SLSI_RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL, /* Target on different channel, cannot range */
+ SLSI_RTT_STATUS_FAIL_NO_CAPABILITY, /* ranging not supported */
+ SLSI_RTT_STATUS_ABORTED, /* request aborted for unknown reason */
+ SLSI_RTT_STATUS_FAIL_INVALID_TS, /* Invalid T1-T4 timestamp */
+ SLSI_RTT_STATUS_FAIL_PROTOCOL, /* 11mc protocol failed */
+ SLSI_RTT_STATUS_FAIL_SCHEDULE, /* request could not be scheduled */
+ SLSI_RTT_STATUS_FAIL_BUSY_TRY_LATER, /* responder cannot collaborate at time of request */
+ SLSI_RTT_STATUS_INVALID_REQ, /* bad request args */
+ SLSI_RTT_STATUS_NO_WIFI, /* WiFi not enabled */
+ SLSI_RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE /* Responder overrides param info, cannot range with new params */
+};
+
+/* Format of information elements found in the beacon */
+struct slsi_wifi_information_element {
+ u8 id; /* element identifier */
+ u8 len; /* number of bytes to follow */
+ u8 data[];
+};
+
+struct slsi_nl_gscan_capabilities {
+ int max_scan_cache_size;
+ int max_scan_buckets;
+ int max_ap_cache_per_scan;
+ int max_rssi_sample_size;
+ int max_scan_reporting_threshold;
+ int max_hotlist_aps;
+ int max_hotlist_ssids;
+ int max_significant_wifi_change_aps;
+ int max_bssid_history_entries;
+ int max_number_epno_networks;
+ int max_number_epno_networks_by_ssid;
+ int max_number_of_white_listed_ssid;
+};
+
+struct slsi_nl_channel_param {
+ int channel;
+ int dwell_time_ms;
+ int passive; /* 0 => active, 1 => passive scan; ignored for DFS */
+};
+
+struct slsi_nl_bucket_param {
+ int bucket_index;
+ enum wifi_band band;
+ int period; /* desired period in millisecond */
+ u8 report_events;
+ int max_period; /* If non-zero: scan period will grow exponentially to a maximum period of max_period */
+ int exponent; /* multiplier: new_period = old_period ^ exponent */
+ int step_count; /* number of scans performed at a given period and until the exponent is applied */
+ int num_channels;
+ struct slsi_nl_channel_param channels[SLSI_GSCAN_MAX_CHANNELS];
+};
+
+struct slsi_nl_gscan_param {
+ int base_period; /* base timer period in ms */
+ int max_ap_per_scan; /* number of APs to store in each scan in the BSSID/RSSI history buffer */
+ int report_threshold_percent; /* when scan_buffer is this much full, wake up application processor */
+ int report_threshold_num_scans; /* wake up application processor after these many scans */
+ int num_buckets;
+ struct slsi_nl_bucket_param nl_bucket[SLSI_GSCAN_MAX_BUCKETS];
+};
+
+struct slsi_nl_scan_result_param {
+ u64 ts; /* time since boot (in microsecond) when the result was retrieved */
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; /* NULL terminated */
+ u8 bssid[6];
+ int channel; /* channel frequency in MHz */
+ int rssi; /* in db */
+ s64 rtt; /* in nanoseconds */
+ s64 rtt_sd; /* standard deviation in rtt */
+ u16 beacon_period; /* period advertised in the beacon */
+ u16 capability; /* capabilities advertised in the beacon */
+ u32 ie_length; /* size of the ie_data blob */
+ u8 ie_data[1]; /* beacon IE */
+};
+
+struct slsi_bucket {
+ bool used; /* to identify if this entry is free */
+ bool for_change_tracking; /* Indicates if this scan_id is used for change_tracking */
+ u8 report_events; /* this is received from HAL/Framework */
+ u16 scan_id; /* SLSI_GSCAN_SCAN_ID_START + <offset in the array> */
+ int scan_cycle; /* To find the current scan cycle */
+ struct slsi_gscan *gscan; /* gscan ref in which this bucket belongs */
+};
+
+struct slsi_gscan {
+ int max_ap_per_scan; /* received from HAL/Framework */
+ int report_threshold_percent; /* received from HAL/Framework */
+ int report_threshold_num_scans; /* received from HAL/Framework */
+ int num_scans;
+ int num_buckets; /* received from HAL/Framework */
+ struct slsi_nl_bucket_param nl_bucket; /* store the first bucket params. used in tracking*/
+ struct slsi_bucket *bucket[SLSI_GSCAN_MAX_BUCKETS_PER_GSCAN];
+ struct slsi_gscan *next;
+};
+
+struct slsi_gscan_param {
+ struct slsi_nl_bucket_param *nl_bucket;
+ struct slsi_bucket *bucket;
+};
+
+struct slsi_gscan_result {
+ struct slsi_gscan_result *hnext;
+ int scan_cycle;
+ int scan_res_len;
+ int anqp_length;
+ struct slsi_nl_scan_result_param nl_scan_res;
+};
+
+struct slsi_epno_ssid_param {
+ u16 flags;
+ u8 ssid_len;
+ u8 ssid[32];
+};
+
+struct slsi_epno_param {
+ u16 min_5g_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */
+ u16 min_2g_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */
+ u16 initial_score_max; /* maximum score that a network can have before bonuses */
+ u8 current_connection_bonus; /* report current connection bonus only, when there is a
+ * network's score this much higher than the current connection
+ */
+ u8 same_network_bonus; /* score bonus for all networks with the same network flag */
+ u8 secure_bonus; /* score bonus for networks that are not open */
+ u8 band_5g_bonus; /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
+ u8 num_networks; /* number of wifi_epno_network objects */
+ struct slsi_epno_ssid_param epno_ssid[]; /* PNO networks */
+};
+
+struct slsi_epno_hs2_param {
+ u32 id; /* identifier of this network block, report this in event */
+ u8 realm[256]; /* null terminated UTF8 encoded realm, 0 if unspecified */
+ s64 roaming_consortium_ids[16]; /* roaming consortium ids to match, 0s if unspecified */
+ u8 plmn[3]; /* mcc/mnc combination as per rules, 0s if unspecified */
+};
+
+struct slsi_rssi_monitor_evt {
+ s16 rssi;
+ u8 bssid[ETH_ALEN];
+};
+
+/* channel information */
+struct slsi_lls_channel_info {
+ enum slsi_lls_channel_width width; /* channel width (20, 40, 80, 80+80, 160)*/
+ int center_freq; /* primary 20 MHz channel */
+ int center_freq0; /* center frequency (MHz) first segment */
+ int center_freq1; /* center frequency (MHz) second segment */
+};
+
+/* channel statistics */
+struct slsi_lls_channel_stat {
+ struct slsi_lls_channel_info channel;
+ u32 on_time; /* msecs the radio is awake (32 bits number accruing over time) */
+ u32 cca_busy_time; /* msecs the CCA register is busy (32 bits number accruing over time) */
+};
+
+/* wifi rate */
+struct slsi_lls_rate {
+ u32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved*/
+ u32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4*/
+ u32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz*/
+ u32 rate_mcs_idx :8; /* OFDM/CCK rate code mcs index*/
+ u32 reserved :16; /* reserved*/
+ u32 bitrate; /* units of 100 Kbps*/
+};
+
+/* per rate statistics */
+struct slsi_lls_rate_stat {
+ struct slsi_lls_rate rate; /* rate information*/
+ u32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd)*/
+ u32 rx_mpdu; /* number of received data pkts*/
+ u32 mpdu_lost; /* number of data packet losses (no ACK)*/
+ u32 retries; /* total number of data pkt retries*/
+ u32 retries_short; /* number of short data pkt retries*/
+ u32 retries_long; /* number of long data pkt retries*/
+};
+
+/* radio statistics */
+struct slsi_lls_radio_stat {
+ int radio; /* wifi radio (if multiple radio supported)*/
+ u32 on_time; /* msecs the radio is awake (32 bits number accruing over time)*/
+ u32 tx_time; /* msecs the radio is transmitting (32 bits number accruing over time)*/
+ u32 rx_time; /* msecs the radio is in active receive (32 bits number accruing over time)*/
+ u32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits number accruing over time)*/
+ u32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits number accruing over time)*/
+ u32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits number accruing over time)*/
+ u32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits number accruing over time)*/
+ u32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits number accruing over time)*/
+ u32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and GAS exchange (32 bits number accruing over time)*/
+ u32 num_channels; /* number of channels*/
+ struct slsi_lls_channel_stat channels[]; /* channel statistics*/
+};
+
+struct slsi_lls_interface_link_layer_info {
+ enum slsi_lls_interface_mode mode; /* interface mode*/
+ u8 mac_addr[6]; /* interface mac address (self)*/
+ enum slsi_lls_connection_state state; /* connection state (valid for STA, CLI only)*/
+ enum slsi_lls_roam_state roaming; /* roaming state*/
+ u32 capabilities; /* WIFI_CAPABILITY_XXX (self)*/
+ u8 ssid[33]; /* null terminated SSID*/
+ u8 bssid[6]; /* bssid*/
+ u8 ap_country_str[3]; /* country string advertised by AP*/
+ u8 country_str[3]; /* country string for this association*/
+};
+
+/* per peer statistics */
+struct slsi_lls_peer_info {
+ enum slsi_lls_peer_type type; /* peer type (AP, TDLS, GO etc.)*/
+ u8 peer_mac_address[6]; /* mac address*/
+ u32 capabilities; /* peer WIFI_CAPABILITY_XXX*/
+ u32 num_rate; /* number of rates*/
+ struct slsi_lls_rate_stat rate_stats[]; /* per rate statistics, number of entries = num_rate*/
+};
+
+/* Per access category statistics */
+struct slsi_lls_wmm_ac_stat {
+ enum slsi_lls_traffic_ac ac; /* access category (VI, VO, BE, BK)*/
+ u32 tx_mpdu; /* number of successfully transmitted unicast data pkts (ACK rcvd)*/
+ u32 rx_mpdu; /* number of received unicast data packets*/
+ u32 tx_mcast; /* number of successfully transmitted multicast data packets*/
+ u32 rx_mcast; /* number of received multicast data packets*/
+ u32 rx_ampdu; /* number of received unicast a-mpdus; support of this counter is optional*/
+ u32 tx_ampdu; /* number of transmitted unicast a-mpdus; support of this counter is optional*/
+ u32 mpdu_lost; /* number of data pkt losses (no ACK)*/
+ u32 retries; /* total number of data pkt retries*/
+ u32 retries_short; /* number of short data pkt retries*/
+ u32 retries_long; /* number of long data pkt retries*/
+ u32 contention_time_min; /* data pkt min contention time (usecs)*/
+ u32 contention_time_max; /* data pkt max contention time (usecs)*/
+ u32 contention_time_avg; /* data pkt avg contention time (usecs)*/
+ u32 contention_num_samples; /* num of data pkts used for contention statistics*/
+};
+
+struct slsi_rx_data_cnt_details {
+ int rx_unicast_cnt; /*Total rx unicast packet which woke up host */
+ int rx_multicast_cnt; /*Total rx multicast packet which woke up host */
+ int rx_broadcast_cnt; /*Total rx broadcast packet which woke up host */
+};
+
+struct slsi_rx_wake_pkt_type_classification {
+ int icmp_pkt; /*wake icmp packet count */
+ int icmp6_pkt; /*wake icmp6 packet count */
+ int icmp6_ra; /*wake icmp6 RA packet count */
+ int icmp6_na; /*wake icmp6 NA packet count */
+ int icmp6_ns; /*wake icmp6 NS packet count */
+};
+
+struct slsi_rx_multicast_cnt {
+ int ipv4_rx_multicast_addr_cnt; /*Rx wake packet was ipv4 multicast */
+ int ipv6_rx_multicast_addr_cnt; /*Rx wake packet was ipv6 multicast */
+ int other_rx_multicast_addr_cnt;/*Rx wake packet was non-ipv4 and non-ipv6*/
+};
+
+/*
+ * Structure holding all the driver/firmware wake count reasons.
+ *
+ * Buffers for the array fields (cmd_event_wake_cnt/driver_fw_local_wake_cnt)
+ * are allocated and freed by the framework. The size of each allocated
+ * array is indicated by the corresponding |_cnt| field. HAL needs to fill in
+ * the corresponding |_used| field to indicate the number of elements used in
+ * the array.
+ */
+struct slsi_wlan_driver_wake_reason_cnt {
+ int total_cmd_event_wake; /* Total count of cmd event wakes */
+ int *cmd_event_wake_cnt; /* Individual wake count array, each index a reason */
+ int cmd_event_wake_cnt_sz; /* Max number of cmd event wake reasons */
+ int cmd_event_wake_cnt_used; /* Number of cmd event wake reasons specific to the driver */
+
+ int total_driver_fw_local_wake; /* Total count of drive/fw wakes, for local reasons */
+ int *driver_fw_local_wake_cnt; /* Individual wake count array, each index a reason */
+ int driver_fw_local_wake_cnt_sz; /* Max number of local driver/fw wake reasons */
+ int driver_fw_local_wake_cnt_used; /* Number of local driver/fw wake reasons specific to the driver */
+
+ int total_rx_data_wake; /* total data rx packets, that woke up host */
+ struct slsi_rx_data_cnt_details rx_wake_details;
+ struct slsi_rx_wake_pkt_type_classification rx_wake_pkt_classification_info;
+ struct slsi_rx_multicast_cnt rx_multicast_wake_pkt_info;
+};
+
+/* interface statistics */
+struct slsi_lls_iface_stat {
+ void *iface; /* wifi interface*/
+ struct slsi_lls_interface_link_layer_info info; /* current state of the interface*/
+ u32 beacon_rx; /* access point beacon received count from connected AP*/
+ u64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT)*/
+ u32 leaky_ap_detected; /* indicate that this AP typically leaks packets beyond the driver guard time.*/
+ u32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after frame with PM bit set was ACK'ed by AP*/
+ u32 leaky_ap_guard_time;
+ u32 mgmt_rx; /* access point mgmt frames received count from connected AP (including Beacon)*/
+ u32 mgmt_action_rx; /* action frames received count*/
+ u32 mgmt_action_tx; /* action frames transmit count*/
+ int rssi_mgmt; /* access Point Beacon and Management frames RSSI (averaged)*/
+ int rssi_data; /* access Point Data Frames RSSI (averaged) from connected AP*/
+ int rssi_ack; /* access Point ACK RSSI (averaged) from connected AP*/
+ struct slsi_lls_wmm_ac_stat ac[SLSI_LLS_AC_MAX]; /* per ac data packet statistics*/
+ u32 num_peers; /* number of peers*/
+ struct slsi_lls_peer_info peer_info[]; /* per peer statistics*/
+};
+
+enum slsi_wifi_hal_api_return_types {
+ WIFI_HAL_SUCCESS = 0,
+ WIFI_HAL_ERROR_NONE = 0,
+ WIFI_HAL_ERROR_UNKNOWN = -1,
+ WIFI_HAL_ERROR_UNINITIALIZED = -2,
+ WIFI_HAL_ERROR_NOT_SUPPORTED = -3,
+ WIFI_HAL_ERROR_NOT_AVAILABLE = -4,
+ WIFI_HAL_ERROR_INVALID_ARGS = -5,
+ WIFI_HAL_ERROR_INVALID_REQUEST_ID = -6,
+ WIFI_HAL_ERROR_TIMED_OUT = -7,
+ WIFI_HAL_ERROR_TOO_MANY_REQUESTS = -8,
+ WIFI_HAL_ERROR_OUT_OF_MEMORY = -9
+};
+
+struct slsi_rtt_capabilities {
+ u8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */
+ u8 rtt_ftm_supported; /* if ftm rtt data collection is supported */
+ u8 lci_support; /* if initiator supports LCI request. Applies to 2-sided RTT */
+ u8 lcr_support; /* if initiator supports LCR request. Applies to 2-sided RTT */
+ u8 preamble_support; /* bit mask indicates what preamble is supported by initiator */
+ u8 bw_support; /* bit mask indicates what BW is supported by initiator */
+ u8 responder_supported; /* if 11mc responder mode is supported */
+ u8 mc_version; /* draft 11mc spec version supported by chip. For instance,
+ *version 4.0 should be 40 and version 4.3 should be 43 etc.
+ */
+};
+
+/* RTT configuration */
+struct slsi_rtt_config {
+ u8 peer_addr[ETH_ALEN]; /* peer device mac address */
+ u16 type; /* 1-sided or 2-sided RTT */
+ u16 channel_freq; /* Required for STA-AP mode, optional for P2P, NBD etc. */
+ u16 channel_info;
+ u8 burst_period; /* Time interval between bursts (units: 100 ms). */
+ /* Applies to 1-sided and 2-sided RTT multi-burst requests.
+ *Range: 0-31, 0: no preference by initiator (2-sided RTT)
+ */
+ u8 num_burst; /* Total number of RTT bursts to be executed. It will be
+ *specified in the same way as the parameter "Number of
+ *Burst Exponent" found in the FTM frame format. It
+ *applies to both: 1-sided RTT and 2-sided RTT. Valid
+ *values are 0 to 15 as defined in 802.11mc std
+ *0 means single shot
+ *The implication of this parameter on the maximum
+ *number of RTT results is the following:
+ *for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst)
+ *for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1)
+ */
+ u8 num_frames_per_burst; /* num of frames per burst.
+ *Minimum value = 1, Maximum value = 31
+ *For 2-sided this equals the number of FTM frames
+ *to be attempted in a single burst. This also
+ *equals the number of FTM frames that the
+ *initiator will request that the responder send
+ *in a single frame.
+ */
+ u8 num_retries_per_ftmr; /* Maximum number of retries that the initiator can
+ *retry an FTMR frame.
+ *Minimum value = 0, Maximum value = 3
+ */
+ u8 burst_duration; /* Applies to 1-sided and 2-sided RTT. Valid values will
+ *be 2-11 and 15 as specified by the 802.11mc std for
+ *the FTM parameter burst duration. In a multi-burst
+ *request, if responder overrides with larger value,
+ *the initiator will return failure. In a single-burst
+ *request if responder overrides with larger value,
+ *the initiator will sent TMR_STOP to terminate RTT
+ *at the end of the burst_duration it requested.
+ */
+ u16 preamble; /* RTT preamble to be used in the RTT frames */
+ u16 bw; /* RTT BW to be used in the RTT frames */
+ u16 LCI_request; /* 1: request LCI, 0: do not request LCI */
+ u16 LCR_request; /* 1: request LCR, 0: do not request LCR */
+};
+
+#define MAX_CHAN_VALUE_ACS 25 /*Max number of supported channel is 25*/
+
+struct slsi_acs_chan_info {
+ u16 chan;
+ u8 num_ap;
+ u8 num_bss_load_ap;
+ u8 total_chan_utilization;
+ u8 avg_chan_utilization;
+ int rssi_factor;
+ int adj_rssi_factor;
+};
+
+struct slsi_acs_selected_channels {
+ u8 pri_channel;
+ u8 sec_channel;
+ u8 vht_seg0_center_ch;
+ u8 vht_seg1_center_ch;
+ u16 ch_width;
+ enum slsi_acs_hw_mode hw_mode;
+};
+
+struct slsi_acs_request {
+ struct slsi_acs_chan_info acs_chan_info[MAX_CHAN_VALUE_ACS];
+ u8 hw_mode;
+ u16 ch_width;
+ u8 ch_list_len;
+};
+
+void slsi_nl80211_vendor_init(struct slsi_dev *sdev);
+void slsi_nl80211_vendor_deinit(struct slsi_dev *sdev);
+u8 slsi_gscan_get_scan_policy(enum wifi_band band);
+void slsi_gscan_handle_scan_result(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, u16 scan_id, bool scan_done);
+void slsi_gscan_hash_remove(struct slsi_dev *sdev, u8 *mac);
+void slsi_rx_significant_change_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+int slsi_gscan_alloc_buckets(struct slsi_dev *sdev, struct slsi_gscan *gscan, int num_buckets);
+int slsi_vendor_event(struct slsi_dev *sdev, int event_id, const void *data, int len);
+int slsi_mib_get_gscan_cap(struct slsi_dev *sdev, struct slsi_nl_gscan_capabilities *cap);
+void slsi_rx_rssi_report_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+int slsi_mib_get_apf_cap(struct slsi_dev *sdev, struct net_device *dev);
+int slsi_mib_get_rtt_cap(struct slsi_dev *sdev, struct net_device *dev, struct slsi_rtt_capabilities *cap);
+int slsi_mlme_add_range_req(struct slsi_dev *sdev, u8 count, struct slsi_rtt_config *nl_rtt_params,
+ u16 rtt_id, u16 vif_idx, u8 *source_addr);
+int slsi_mlme_del_range_req(struct slsi_dev *sdev, struct net_device *dev, u16 count, u8 *addr, u16 rtt_id);
+void slsi_rx_range_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_range_done_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+int slsi_tx_rate_calc(struct sk_buff *nl_skb, u16 fw_rate, int res, bool tx_rate);
+void slsi_check_num_radios(struct slsi_dev *sdev);
+void slsi_rx_event_log_indication(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+char *slsi_print_event_name(int event_id);
+#endif
+
+
+static inline bool slsi_is_gscan_id(u16 scan_id)
+{
+ if ((scan_id >= SLSI_GSCAN_SCAN_ID_START) && (scan_id <= SLSI_GSCAN_SCAN_ID_END))
+ return true;
+
+ return false;
+}
+
+static inline enum slsi_lls_traffic_ac slsi_fapi_to_android_traffic_q(enum slsi_traffic_q fapi_q)
+{
+ switch (fapi_q) {
+ case SLSI_TRAFFIC_Q_BE:
+ return SLSI_LLS_AC_BE;
+ case SLSI_TRAFFIC_Q_BK:
+ return SLSI_LLS_AC_BK;
+ case SLSI_TRAFFIC_Q_VI:
+ return SLSI_LLS_AC_VI;
+ case SLSI_TRAFFIC_Q_VO:
+ return SLSI_LLS_AC_VO;
+ default:
+ return SLSI_LLS_AC_MAX;
+ }
+}
+
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "cfg80211_ops.h"
+#include "debug.h"
+#include "mgt.h"
+#include "mlme.h"
+
+struct net_device *slsi_nan_get_netdev(struct slsi_dev *sdev)
+{
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ return slsi_get_netdev(sdev, SLSI_NET_INDEX_NAN);
+#else
+ return NULL;
+#endif
+}
+
+static int slsi_nan_get_new_id(u32 id_map, int max_ids)
+{
+ int i;
+
+ for (i = 1; i <= max_ids; i++) {
+ if (!(id_map & BIT(i)))
+ return i;
+ }
+ return 0;
+}
+
+static int slsi_nan_get_new_publish_id(struct netdev_vif *ndev_vif)
+{
+ return slsi_nan_get_new_id(ndev_vif->nan.publish_id_map, SLSI_NAN_MAX_PUBLISH_ID);
+}
+
+static int slsi_nan_get_new_subscribe_id(struct netdev_vif *ndev_vif)
+{
+ return slsi_nan_get_new_id(ndev_vif->nan.subscribe_id_map, SLSI_NAN_MAX_SUBSCRIBE_ID);
+}
+
+static bool slsi_nan_is_publish_id_active(struct netdev_vif *ndev_vif, u32 id)
+{
+ return ndev_vif->nan.publish_id_map & BIT(id);
+}
+
+static bool slsi_nan_is_subscribe_id_active(struct netdev_vif *ndev_vif, u32 id)
+{
+ return ndev_vif->nan.subscribe_id_map & BIT(id);
+}
+
+/*Note: vendor IE length of the stitched info may not be correct. Caller
+ * has to depend on return value for full length of IE.
+ */
+static u32 slsi_nan_stitch_ie(u8 *buff, u32 buff_len, u16 oui_type_subtype, u8 *dest_buff)
+{
+ u8 samsung_oui[] = {0x00, 0x16, 0x32};
+ u8 *pos = buff;
+ u32 dest_buff_len = 0;
+
+ while (buff_len - (pos - buff) > 2 + sizeof(samsung_oui) + 2) {
+ /* check if buffer is alteast al long as IE length */
+ if (pos[1] + 2 + pos - buff > buff_len) {
+ pos = buff + buff_len;
+ continue;
+ }
+ if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ memcmp(samsung_oui, &pos[2], sizeof(samsung_oui)) == 0 &&
+ memcmp((u8 *)&oui_type_subtype, &pos[5], sizeof(oui_type_subtype)) == 0) {
+ if (!dest_buff_len) {
+ memcpy(dest_buff, pos, pos[1] + 2);
+ dest_buff_len = pos[1] + 2;
+ } else {
+ memcpy(&dest_buff[dest_buff_len], &pos[7], pos[1] - 5);
+ dest_buff_len += pos[1] - 5;
+ }
+ }
+ pos += pos[1] + 2;
+ }
+
+ return dest_buff_len;
+}
+
+void slsi_nan_get_mac(struct slsi_dev *sdev, char *nan_mac_addr)
+{
+ memset(nan_mac_addr, 0, ETH_ALEN);
+#if CONFIG_SCSC_WLAN_MAX_INTERFACES >= 4
+ if (slsi_dev_nan_supported(sdev))
+ ether_addr_copy(nan_mac_addr, sdev->netdev_addresses[SLSI_NET_INDEX_NAN]);
+#endif
+}
+
+static void slsi_vendor_nan_command_reply(struct wiphy *wiphy, u32 status, u32 error, u32 response_type,
+ u16 publish_subscribe_id, struct slsi_hal_nan_capabilities *capabilities)
+{
+ int reply_len;
+ struct sk_buff *reply;
+
+ reply_len = SLSI_NL_VENDOR_REPLY_OVERHEAD + SLSI_NL_ATTRIBUTE_U32_LEN *
+ (3 + sizeof(struct slsi_hal_nan_capabilities));
+ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, reply_len);
+ if (!reply) {
+ SLSI_WARN_NODEV("SKB alloc failed for vendor_cmd reply\n");
+ return;
+ }
+
+ nla_put_u32(reply, NAN_REPLY_ATTR_STATUS_TYPE, status);
+ nla_put_u32(reply, NAN_REPLY_ATTR_VALUE, error);
+ nla_put_u32(reply, NAN_REPLY_ATTR_RESPONSE_TYPE, response_type);
+
+ if (capabilities) {
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_CONCURRENT_CLUSTER,
+ capabilities->max_concurrent_nan_clusters);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_PUBLISHES, capabilities->max_publishes);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_SUBSCRIBES, capabilities->max_subscribes);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_SERVICE_NAME_LEN, capabilities->max_service_name_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_MATCH_FILTER_LEN, capabilities->max_match_filter_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_TOTAL_MATCH_FILTER_LEN,
+ capabilities->max_total_match_filter_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_SERVICE_SPECIFIC_INFO_LEN,
+ capabilities->max_service_specific_info_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_VSA_DATA_LEN, capabilities->max_vsa_data_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_MESH_DATA_LEN, capabilities->max_mesh_data_len);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_NDI_INTERFACES, capabilities->max_ndi_interfaces);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_NDP_SESSIONS, capabilities->max_ndp_sessions);
+ nla_put_u32(reply, NAN_REPLY_ATTR_CAP_MAX_APP_INFO_LEN, capabilities->max_app_info_len);
+ } else if (publish_subscribe_id) {
+ nla_put_u16(reply, NAN_REPLY_ATTR_PUBLISH_SUBSCRIBE_TYPE, publish_subscribe_id);
+ }
+
+ if (cfg80211_vendor_cmd_reply(reply))
+ SLSI_ERR_NODEV("FAILED to reply nan coammnd. response_type:%d\n", response_type);
+}
+
+static int slsi_nan_get_sdea_params_nl(struct slsi_dev *sdev, struct slsi_nan_sdea_ctrl_params *sdea_params,
+ const struct nlattr *iter, int nl_attr_id)
+{
+ switch (nl_attr_id) {
+ case NAN_REQ_ATTR_SDEA_PARAM_NDP_TYPE:
+ sdea_params->ndp_type = nla_get_u8(iter);
+ sdea_params->config_nan_data_path = 1;
+ break;
+ case NAN_REQ_ATTR_SDEA_PARAM_SECURITY_CFG:
+ sdea_params->security_cfg = nla_get_u8(iter);
+ sdea_params->config_nan_data_path = 1;
+ break;
+ case NAN_REQ_ATTR_SDEA_PARAM_RANGING_STATE:
+ sdea_params->ranging_state = nla_get_u8(iter);
+ sdea_params->config_nan_data_path = 1;
+ break;
+ case NAN_REQ_ATTR_SDEA_PARAM_RANGE_REPORT:
+ sdea_params->range_report = nla_get_u8(iter);
+ sdea_params->config_nan_data_path = 1;
+ break;
+ case NAN_REQ_ATTR_SDEA_PARAM_QOS_CFG:
+ sdea_params->qos_cfg = nla_get_u8(iter);
+ sdea_params->config_nan_data_path = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int slsi_nan_get_ranging_cfg_nl(struct slsi_dev *sdev, struct slsi_nan_ranging_cfg *ranging_cfg,
+ const struct nlattr *iter, int nl_attr_id)
+{
+ switch (nl_attr_id) {
+ case NAN_REQ_ATTR_RANGING_CFG_INTERVAL:
+ ranging_cfg->ranging_interval_msec = nla_get_u32(iter);
+ break;
+ case NAN_REQ_ATTR_RANGING_CFG_INDICATION:
+ ranging_cfg->config_ranging_indications = nla_get_u32(iter);
+ break;
+ case NAN_REQ_ATTR_RANGING_CFG_INGRESS_MM:
+ ranging_cfg->distance_ingress_mm = nla_get_u32(iter);
+ break;
+ case NAN_REQ_ATTR_RANGING_CFG_EGRESS_MM:
+ ranging_cfg->distance_egress_mm = nla_get_u32(iter);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int slsi_nan_get_security_info_nl(struct slsi_dev *sdev, struct slsi_nan_security_info *sec_info,
+ const struct nlattr *iter, int nl_attr_id)
+{
+ u32 len;
+
+ switch (nl_attr_id) {
+ case NAN_REQ_ATTR_CIPHER_TYPE:
+ sec_info->cipher_type = nla_get_u32(iter);
+ break;
+ case NAN_REQ_ATTR_SECURITY_KEY_TYPE:
+ sec_info->key_info.key_type = nla_get_u8(iter);
+ break;
+ case NAN_REQ_ATTR_SECURITY_PMK_LEN:
+ len = nla_get_u32(iter);
+ sec_info->key_info.body.pmk_info.pmk_len = len;
+ break;
+ case NAN_REQ_ATTR_SECURITY_PMK:
+ memcpy(sec_info->key_info.body.pmk_info.pmk, nla_data(iter), len);
+ break;
+ case NAN_REQ_ATTR_SECURITY_PASSPHRASE_LEN:
+ len = nla_get_u32(iter);
+ sec_info->key_info.body.passphrase_info.passphrase_len = len;
+ break;
+ case NAN_REQ_ATTR_SECURITY_PASSPHRASE:
+ memcpy(sec_info->key_info.body.passphrase_info.passphrase, nla_data(iter), len);
+ break;
+ case NAN_REQ_ATTR_SCID_LEN:
+ sec_info->scid_len = nla_get_u32(iter);
+ break;
+ case NAN_REQ_ATTR_SCID:
+ memcpy(sec_info->scid, nla_data(iter), sec_info->scid_len);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int slsi_nan_get_range_resp_cfg_nl(struct slsi_dev *sdev, struct slsi_nan_range_response_cfg *cfg,
+ const struct nlattr *iter, int nl_attr_id)
+{
+ switch (nl_attr_id) {
+ case NAN_REQ_ATTR_RANGE_RESPONSE_CFG_PUBLISH_ID:
+ cfg->publish_id = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_RANGE_RESPONSE_CFG_REQUESTOR_ID:
+ cfg->requestor_instance_id = nla_get_u32(iter);
+ break;
+
+ case NAN_REQ_ATTR_RANGE_RESPONSE_CFG_PEER_ADDR:
+ memcpy(cfg->peer_addr, nla_data(iter), ETH_ALEN);
+ break;
+
+ case NAN_REQ_ATTR_RANGE_RESPONSE_CFG_RANGING_RESPONSE:
+ cfg->ranging_response = nla_get_u8(iter);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int slsi_nan_enable_get_nl_params(struct slsi_dev *sdev, struct slsi_hal_nan_enable_req *hal_req,
+ const void *data, int len)
+{
+ int type, tmp;
+ const struct nlattr *iter;
+
+ memset(hal_req, 0, sizeof(*hal_req));
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_MASTER_PREF:
+ hal_req->master_pref = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_CLUSTER_LOW:
+ hal_req->cluster_low = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_CLUSTER_HIGH:
+ hal_req->cluster_high = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUPPORT_5G_VAL:
+ hal_req->support_5g_val = nla_get_u8(iter);
+ hal_req->config_support_5g = 1;
+ break;
+
+ case NAN_REQ_ATTR_SID_BEACON_VAL:
+ hal_req->sid_beacon_val = nla_get_u8(iter);
+ hal_req->config_sid_beacon = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_CLOSE_2G4_VAL:
+ hal_req->rssi_close_2dot4g_val = nla_get_u8(iter);
+ hal_req->config_2dot4g_rssi_close = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_MIDDLE_2G4_VAL:
+ hal_req->rssi_middle_2dot4g_val = nla_get_u8(iter);
+ hal_req->config_2dot4g_rssi_middle = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_PROXIMITY_2G4_VAL:
+ hal_req->rssi_proximity_2dot4g_val = nla_get_u8(iter);
+ hal_req->rssi_proximity_2dot4g_val = 1;
+ break;
+
+ case NAN_REQ_ATTR_HOP_COUNT_LIMIT_VAL:
+ hal_req->hop_count_limit_val = nla_get_u8(iter);
+ hal_req->config_hop_count_limit = 1;
+ break;
+
+ case NAN_REQ_ATTR_SUPPORT_2G4_VAL:
+ hal_req->support_2dot4g_val = nla_get_u8(iter);
+ hal_req->config_2dot4g_support = 1;
+ break;
+
+ case NAN_REQ_ATTR_BEACONS_2G4_VAL:
+ hal_req->beacon_2dot4g_val = nla_get_u8(iter);
+ hal_req->config_2dot4g_beacons = 1;
+ break;
+
+ case NAN_REQ_ATTR_SDF_2G4_VAL:
+ hal_req->sdf_2dot4g_val = nla_get_u8(iter);
+ hal_req->config_2dot4g_sdf = 1;
+ break;
+
+ case NAN_REQ_ATTR_BEACON_5G_VAL:
+ hal_req->beacon_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_beacons = 1;
+ break;
+
+ case NAN_REQ_ATTR_SDF_5G_VAL:
+ hal_req->sdf_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_sdf = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_CLOSE_5G_VAL:
+ hal_req->rssi_close_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_rssi_close = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_MIDDLE_5G_VAL:
+ hal_req->rssi_middle_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_rssi_middle = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_CLOSE_PROXIMITY_5G_VAL:
+ hal_req->rssi_close_proximity_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_rssi_close_proximity = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_WINDOW_SIZE_VAL:
+ hal_req->rssi_window_size_val = nla_get_u8(iter);
+ hal_req->config_rssi_window_size = 1;
+ break;
+
+ case NAN_REQ_ATTR_OUI_VAL:
+ hal_req->oui_val = nla_get_u32(iter);
+ hal_req->config_oui = 1;
+ break;
+
+ case NAN_REQ_ATTR_MAC_ADDR_VAL:
+ memcpy(hal_req->intf_addr_val, nla_data(iter), ETH_ALEN);
+ hal_req->config_intf_addr = 1;
+ break;
+
+ case NAN_REQ_ATTR_CLUSTER_VAL:
+ hal_req->config_cluster_attribute_val = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SOCIAL_CH_SCAN_DWELL_TIME:
+ memcpy(hal_req->scan_params_val.dwell_time, nla_data(iter),
+ sizeof(hal_req->scan_params_val.dwell_time));
+ hal_req->config_scan_params = 1;
+ break;
+
+ case NAN_REQ_ATTR_SOCIAL_CH_SCAN_PERIOD:
+ memcpy(hal_req->scan_params_val.scan_period, nla_data(iter),
+ sizeof(hal_req->scan_params_val.scan_period));
+ hal_req->config_scan_params = 1;
+ break;
+
+ case NAN_REQ_ATTR_RANDOM_FACTOR_FORCE_VAL:
+ hal_req->random_factor_force_val = nla_get_u8(iter);
+ hal_req->config_random_factor_force = 1;
+ break;
+
+ case NAN_REQ_ATTR_HOP_COUNT_FORCE_VAL:
+ hal_req->hop_count_force_val = nla_get_u8(iter);
+ hal_req->config_hop_count_force = 1;
+ break;
+
+ case NAN_REQ_ATTR_CHANNEL_2G4_MHZ_VAL:
+ hal_req->channel_24g_val = nla_get_u32(iter);
+ hal_req->config_24g_channel = 1;
+ break;
+
+ case NAN_REQ_ATTR_CHANNEL_5G_MHZ_VAL:
+ hal_req->channel_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_channel = 1;
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SID_BEACON_VAL:
+ hal_req->subscribe_sid_beacon_val = nla_get_u8(iter);
+ hal_req->config_subscribe_sid_beacon = 1;
+ break;
+
+ case NAN_REQ_ATTR_DW_2G4_INTERVAL:
+ hal_req->dw_2dot4g_interval_val = nla_get_u8(iter);
+ /* valid range for 2.4G is 1-5 */
+ if (hal_req->dw_2dot4g_interval_val > 0 && hal_req->dw_2dot4g_interval_val < 5)
+ hal_req->config_2dot4g_dw_band = 1;
+ break;
+
+ case NAN_REQ_ATTR_DW_5G_INTERVAL:
+ hal_req->dw_5g_interval_val = nla_get_u8(iter);
+ /* valid range for 5g is 0-5 */
+ if (hal_req->dw_5g_interval_val < 5)
+ hal_req->config_5g_dw_band = 1;
+ break;
+
+ case NAN_REQ_ATTR_DISC_MAC_ADDR_RANDOM_INTERVAL:
+ hal_req->disc_mac_addr_rand_interval_sec = nla_get_u8(iter);
+ break;
+
+ default:
+ SLSI_ERR(sdev, "Unexpected NAN enable attribute TYPE:%d\n", type);
+ return SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ }
+ }
+ return SLSI_HAL_NAN_STATUS_SUCCESS;
+}
+
+int slsi_nan_enable(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct slsi_hal_nan_enable_req hal_req;
+ int ret;
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ u8 nan_vif_mac_address[ETH_ALEN];
+ u8 broadcast_mac[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "No NAN interface\n");
+ ret = -ENOTSUPP;
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ goto exit;
+ }
+
+ if (!slsi_dev_nan_supported(sdev)) {
+ SLSI_ERR(sdev, "NAN not allowed(mib:%d)\n", sdev->nan_enabled);
+ ret = WIFI_HAL_ERROR_NOT_SUPPORTED;
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ reply_status = slsi_nan_enable_get_nl_params(sdev, &hal_req, data, len);
+ if (reply_status != SLSI_HAL_NAN_STATUS_SUCCESS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (ndev_vif->activated) {
+ ret = -EINVAL;
+ SLSI_DBG1(sdev, SLSI_GSCAN, "Already Enabled. Req Rejected\n");
+ goto exit_with_mutex;
+ }
+ ndev_vif->vif_type = FAPI_VIFTYPE_NAN;
+
+ if (hal_req.config_intf_addr)
+ ether_addr_copy(nan_vif_mac_address, hal_req.intf_addr_val);
+ else
+ slsi_nan_get_mac(sdev, nan_vif_mac_address);
+
+ ret = slsi_mlme_add_vif(sdev, dev, nan_vif_mac_address, broadcast_mac);
+ if (ret) {
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ SLSI_ERR(sdev, "failed to set unsync vif. Cannot start NAN\n");
+ } else {
+ ret = slsi_mlme_nan_enable(sdev, dev, &hal_req);
+ if (ret) {
+ SLSI_ERR(sdev, "failed to enable NAN.\n");
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ slsi_mlme_del_vif(sdev, dev);
+ ndev_vif->activated = false;
+ ndev_vif->nan.subscribe_id_map = 0;
+ ndev_vif->nan.publish_id_map = 0;
+ } else {
+ slsi_vif_activated(sdev, dev);
+ }
+ }
+
+exit_with_mutex:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_ENABLED, 0, NULL);
+ return ret;
+}
+
+int slsi_nan_disable(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (ndev_vif->activated) {
+ slsi_mlme_del_vif(sdev, dev);
+ ndev_vif->activated = false;
+ ndev_vif->nan.subscribe_id_map = 0;
+ ndev_vif->nan.publish_id_map = 0;
+ } else {
+ SLSI_WARN(sdev, "NAN FWif not active!!");
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ } else {
+ SLSI_WARN(sdev, "No NAN interface!!");
+ }
+
+ slsi_vendor_nan_command_reply(wiphy, SLSI_HAL_NAN_STATUS_SUCCESS, 0, NAN_RESPONSE_DISABLED, 0, NULL);
+
+ return 0;
+}
+
+static int slsi_nan_publish_get_nl_params(struct slsi_dev *sdev, struct slsi_hal_nan_publish_req *hal_req,
+ const void *data, int len)
+{
+ int type, tmp, r;
+ const struct nlattr *iter;
+
+ memset(hal_req, 0, sizeof(*hal_req));
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_PUBLISH_ID:
+ hal_req->publish_id = nla_get_u16(iter);
+ break;
+ case NAN_REQ_ATTR_PUBLISH_TTL:
+ hal_req->ttl = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_PERIOD:
+ hal_req->period = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_TYPE:
+ hal_req->publish_type = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_TX_TYPE:
+ hal_req->tx_type = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_COUNT:
+ hal_req->publish_count = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SERVICE_NAME_LEN:
+ hal_req->service_name_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SERVICE_NAME:
+ memcpy(hal_req->service_name, nla_data(iter), hal_req->service_name_len);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_MATCH_ALGO:
+ hal_req->publish_match_indicator = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SERVICE_INFO_LEN:
+ hal_req->service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SERVICE_INFO:
+ memcpy(hal_req->service_specific_info, nla_data(iter), hal_req->service_specific_info_len);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_RX_MATCH_FILTER_LEN:
+ hal_req->rx_match_filter_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_RX_MATCH_FILTER:
+ memcpy(hal_req->rx_match_filter, nla_data(iter), hal_req->rx_match_filter_len);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_TX_MATCH_FILTER_LEN:
+ hal_req->tx_match_filter_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_TX_MATCH_FILTER:
+ memcpy(hal_req->tx_match_filter, nla_data(iter), hal_req->tx_match_filter_len);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_RSSI_THRESHOLD_FLAG:
+ hal_req->rssi_threshold_flag = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_CONN_MAP:
+ hal_req->connmap = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_RECV_IND_CFG:
+ hal_req->recv_indication_cfg = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA_LEN:
+ hal_req->sdea_service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA:
+ memcpy(hal_req->sdea_service_specific_info, nla_data(iter),
+ hal_req->sdea_service_specific_info_len);
+ break;
+
+ case NAN_REQ_ATTR_RANGING_AUTO_RESPONSE:
+ hal_req->ranging_auto_response = nla_get_u8(iter);
+ break;
+
+ default:
+ r = slsi_nan_get_sdea_params_nl(sdev, &hal_req->sdea_params, iter, type);
+ if (r)
+ r = slsi_nan_get_ranging_cfg_nl(sdev, &hal_req->ranging_cfg, iter, type);
+ if (r)
+ r = slsi_nan_get_security_info_nl(sdev, &hal_req->sec_info, iter, type);
+ if (r)
+ r = slsi_nan_get_range_resp_cfg_nl(sdev, &hal_req->range_response_cfg, iter, type);
+ if (r) {
+ SLSI_ERR(sdev, "Unexpected NAN publish attribute TYPE:%d\n", type);
+ return SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ }
+ }
+ }
+ return SLSI_HAL_NAN_STATUS_SUCCESS;
+}
+
+int slsi_nan_publish(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct slsi_hal_nan_publish_req *hal_req;
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ int ret;
+ u32 reply_status;
+ u32 publish_id = 0;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!\n");
+ ret = -EINVAL;
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ goto exit;
+ }
+
+ hal_req = kmalloc(sizeof(*hal_req), GFP_KERNEL);
+ if (!hal_req) {
+ SLSI_ERR(sdev, "failed to alloc hal_req\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NO_RESOURCE_AVAILABLE;
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ reply_status = slsi_nan_publish_get_nl_params(sdev, hal_req, data, len);
+ if (reply_status != SLSI_HAL_NAN_STATUS_SUCCESS) {
+ kfree(hal_req);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_WARN(sdev, "NAN vif not activated\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ goto exit_with_lock;
+ }
+
+ if (!hal_req->publish_id) {
+ hal_req->publish_id = slsi_nan_get_new_publish_id(ndev_vif);
+ } else if (!slsi_nan_is_publish_id_active(ndev_vif, hal_req->publish_id)) {
+ SLSI_WARN(sdev, "Publish id %d not found. map:%x\n", hal_req->publish_id,
+ ndev_vif->nan.publish_id_map);
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ ret = -EINVAL;
+ goto exit_with_lock;
+ }
+
+ if (hal_req->publish_id) {
+ ret = slsi_mlme_nan_publish(sdev, dev, hal_req, hal_req->publish_id);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ else
+ publish_id = hal_req->publish_id;
+ } else {
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ SLSI_WARN(sdev, "Too Many concurrent PUBLISH REQ(map:%x)\n",
+ ndev_vif->nan.publish_id_map);
+ ret = -ENOTSUPP;
+ }
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ kfree(hal_req);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_PUBLISH, publish_id, NULL);
+ return ret;
+}
+
+int slsi_nan_publish_cancel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ int type, tmp, ret = 0;
+ u16 publish_id = 0;
+ const struct nlattr *iter;
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_PUBLISH_ID:
+ publish_id = nla_get_u16(iter);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unexpected NAN publishcancel attribute TYPE:%d\n", type);
+ }
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ goto exit_with_lock;
+ }
+ if (!publish_id || !slsi_nan_is_publish_id_active(ndev_vif, publish_id)) {
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ SLSI_WARN(sdev, "Publish_id(%d) not active. map:%x\n",
+ publish_id, ndev_vif->nan.publish_id_map);
+ } else {
+ ret = slsi_mlme_nan_publish(sdev, dev, NULL, publish_id);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ }
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_PUBLISH_CANCEL, publish_id, NULL);
+ return ret;
+}
+
+static int slsi_nan_subscribe_get_nl_params(struct slsi_dev *sdev, struct slsi_hal_nan_subscribe_req *hal_req,
+ const void *data, int len)
+{
+ int type, tmp, r;
+ const struct nlattr *iter;
+
+ memset(hal_req, 0, sizeof(*hal_req));
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_SUBSCRIBE_ID:
+ hal_req->subscribe_id = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_TTL:
+ hal_req->ttl = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_PERIOD:
+ hal_req->period = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_TYPE:
+ hal_req->subscribe_type = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RESP_FILTER_TYPE:
+ hal_req->service_response_filter = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RESP_INCLUDE:
+ hal_req->service_response_include = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_USE_RESP_FILTER:
+ hal_req->use_service_response_filter = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SSI_REQUIRED:
+ hal_req->ssi_required_for_match_indication = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_MATCH_INDICATOR:
+ hal_req->subscribe_match_indicator = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_COUNT:
+ hal_req->subscribe_count = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SERVICE_NAME_LEN:
+ hal_req->service_name_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SERVICE_NAME:
+ memcpy(hal_req->service_name, nla_data(iter), hal_req->service_name_len);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SERVICE_INFO_LEN:
+ hal_req->service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SERVICE_INFO:
+ memcpy(hal_req->service_specific_info, nla_data(iter), hal_req->service_specific_info_len);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RX_MATCH_FILTER_LEN:
+ hal_req->rx_match_filter_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RX_MATCH_FILTER:
+ memcpy(hal_req->rx_match_filter, nla_data(iter), hal_req->rx_match_filter_len);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_TX_MATCH_FILTER_LEN:
+ hal_req->tx_match_filter_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_TX_MATCH_FILTER:
+ memcpy(hal_req->tx_match_filter, nla_data(iter), hal_req->tx_match_filter_len);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RSSI_THRESHOLD_FLAG:
+ hal_req->rssi_threshold_flag = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_CONN_MAP:
+ hal_req->connmap = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_NUM_INTF_ADDR_PRESENT:
+ hal_req->num_intf_addr_present = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_INTF_ADDR:
+ memcpy(hal_req->intf_addr, nla_data(iter), hal_req->num_intf_addr_present * ETH_ALEN);
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_RECV_IND_CFG:
+ hal_req->recv_indication_cfg = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA_LEN:
+ hal_req->sdea_service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA:
+ memcpy(hal_req->sdea_service_specific_info, nla_data(iter),
+ hal_req->sdea_service_specific_info_len);
+ break;
+
+ case NAN_REQ_ATTR_RANGING_AUTO_RESPONSE:
+ hal_req->ranging_auto_response = nla_get_u8(iter);
+ break;
+
+ default:
+ r = slsi_nan_get_sdea_params_nl(sdev, &hal_req->sdea_params, iter, type);
+ if (r)
+ r = slsi_nan_get_ranging_cfg_nl(sdev, &hal_req->ranging_cfg, iter, type);
+ if (r)
+ r = slsi_nan_get_security_info_nl(sdev, &hal_req->sec_info, iter, type);
+ if (r)
+ r = slsi_nan_get_range_resp_cfg_nl(sdev, &hal_req->range_response_cfg, iter, type);
+ if (r) {
+ SLSI_ERR(sdev, "Unexpected NAN subscribe attribute TYPE:%d\n", type);
+ return SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ }
+ }
+ }
+ return SLSI_HAL_NAN_STATUS_SUCCESS;
+}
+
+int slsi_nan_subscribe(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ struct slsi_hal_nan_subscribe_req *hal_req;
+ int ret;
+ u32 reply_status;
+ u32 subscribe_id = 0;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ hal_req = kmalloc(sizeof(*hal_req), GFP_KERNEL);
+ if (!hal_req) {
+ SLSI_ERR(sdev, "Failed to alloc hal_req structure!!!\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NO_RESOURCE_AVAILABLE;
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ reply_status = slsi_nan_subscribe_get_nl_params(sdev, hal_req, data, len);
+ if (reply_status != SLSI_HAL_NAN_STATUS_SUCCESS) {
+ kfree(hal_req);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ SLSI_WARN(sdev, "NAN vif not activated\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ goto exit_with_lock;
+ }
+
+ if (!hal_req->subscribe_id) {
+ hal_req->subscribe_id = slsi_nan_get_new_subscribe_id(ndev_vif);
+ } else if (!slsi_nan_is_subscribe_id_active(ndev_vif, hal_req->subscribe_id)) {
+ SLSI_WARN(sdev, "Subscribe id %d not found. map:%x\n", hal_req->subscribe_id,
+ ndev_vif->nan.subscribe_id_map);
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ ret = -EINVAL;
+ goto exit_with_lock;
+ }
+
+ ret = slsi_mlme_nan_subscribe(sdev, dev, hal_req, hal_req->subscribe_id);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ else
+ subscribe_id = hal_req->subscribe_id;
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ kfree(hal_req);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_SUBSCRIBE, subscribe_id, NULL);
+ return ret;
+}
+
+int slsi_nan_subscribe_cancel(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ int type, tmp, ret = WIFI_HAL_ERROR_UNKNOWN;
+ u16 subscribe_id = 0;
+ const struct nlattr *iter;
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_SUBSCRIBE_ID:
+ subscribe_id = nla_get_u16(iter);
+ break;
+ default:
+ SLSI_ERR(sdev, "Unexpected NAN subscribecancel attribute TYPE:%d\n", type);
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ goto exit;
+ }
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (ndev_vif->activated) {
+ if (!subscribe_id || !slsi_nan_is_subscribe_id_active(ndev_vif, subscribe_id)) {
+ SLSI_WARN(sdev, "subscribe_id(%d) not active. map:%x\n",
+ subscribe_id, ndev_vif->nan.subscribe_id_map);
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ } else {
+ ret = slsi_mlme_nan_subscribe(sdev, dev, NULL, subscribe_id);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ }
+ } else {
+ SLSI_ERR(sdev, "vif not activated\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_SUBSCRIBE_CANCEL, subscribe_id, NULL);
+ return ret;
+}
+
+static int slsi_nan_followup_get_nl_params(struct slsi_dev *sdev, struct slsi_hal_nan_transmit_followup_req *hal_req,
+ const void *data, int len)
+{
+ int type, tmp;
+ const struct nlattr *iter;
+
+ memset(hal_req, 0, sizeof(*hal_req));
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_FOLLOWUP_ID:
+ hal_req->publish_subscribe_id = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_REQUESTOR_ID:
+ hal_req->requestor_instance_id = nla_get_u32(iter);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_ADDR:
+ memcpy(hal_req->addr, nla_data(iter), ETH_ALEN);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_PRIORITY:
+ hal_req->priority = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_TX_WINDOW:
+ hal_req->dw_or_faw = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_SERVICE_NAME_LEN:
+ hal_req->service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_SERVICE_NAME:
+ memcpy(hal_req->service_specific_info, nla_data(iter), hal_req->service_specific_info_len);
+ break;
+
+ case NAN_REQ_ATTR_FOLLOWUP_RECV_IND_CFG:
+ hal_req->recv_indication_cfg = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA_LEN:
+ hal_req->sdea_service_specific_info_len = nla_get_u16(iter);
+ break;
+
+ case NAN_REQ_ATTR_PUBLISH_SDEA:
+ memcpy(hal_req->sdea_service_specific_info, nla_data(iter),
+ hal_req->sdea_service_specific_info_len);
+ break;
+
+ default:
+ SLSI_ERR(sdev, "Unexpected NAN followup attribute TYPE:%d\n", type);
+ return SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ }
+ }
+ return SLSI_HAL_NAN_STATUS_SUCCESS;
+}
+
+int slsi_nan_transmit_followup(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ struct slsi_hal_nan_transmit_followup_req hal_req;
+ int ret;
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!");
+ ret = -EINVAL;
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ reply_status = slsi_nan_followup_get_nl_params(sdev, &hal_req, data, len);
+ if (reply_status) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ SLSI_WARN(sdev, "NAN vif not activated\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ goto exit_with_lock;
+ }
+
+ if (!hal_req.publish_subscribe_id ||
+ !(slsi_nan_is_subscribe_id_active(ndev_vif, hal_req.publish_subscribe_id) ||
+ slsi_nan_is_publish_id_active(ndev_vif, hal_req.publish_subscribe_id))) {
+ SLSI_WARN(sdev, "publish/Subscribe id %d not found. map:%x\n", hal_req.publish_subscribe_id,
+ ndev_vif->nan.subscribe_id_map);
+ reply_status = SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
+ ret = -EINVAL;
+ goto exit_with_lock;
+ }
+
+ ret = slsi_mlme_nan_tx_followup(sdev, dev, &hal_req);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_TRANSMIT_FOLLOWUP, 0, NULL);
+ return ret;
+}
+
+static int slsi_nan_config_get_nl_params(struct slsi_dev *sdev, struct slsi_hal_nan_config_req *hal_req,
+ const void *data, int len)
+{
+ int type, type1, tmp, tmp1, disc_attr_idx = 0, famchan_idx = 0;
+ const struct nlattr *iter, *iter1;
+ struct slsi_hal_nan_post_discovery_param *disc_attr;
+ struct slsi_hal_nan_further_availability_channel *famchan;
+
+ memset(hal_req, 0, sizeof(*hal_req));
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ switch (type) {
+ case NAN_REQ_ATTR_SID_BEACON_VAL:
+ hal_req->sid_beacon = nla_get_u8(iter);
+ hal_req->config_sid_beacon = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_PROXIMITY_2G4_VAL:
+ hal_req->rssi_proximity = nla_get_u8(iter);
+ hal_req->config_rssi_proximity = 1;
+ break;
+
+ case NAN_REQ_ATTR_MASTER_PREF:
+ hal_req->master_pref = nla_get_u8(iter);
+ hal_req->config_master_pref = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_CLOSE_PROXIMITY_5G_VAL:
+ hal_req->rssi_close_proximity_5g_val = nla_get_u8(iter);
+ hal_req->config_5g_rssi_close_proximity = 1;
+ break;
+
+ case NAN_REQ_ATTR_RSSI_WINDOW_SIZE_VAL:
+ hal_req->rssi_window_size_val = nla_get_u8(iter);
+ hal_req->config_rssi_window_size = 1;
+ break;
+
+ case NAN_REQ_ATTR_CLUSTER_VAL:
+ hal_req->config_cluster_attribute_val = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_SOCIAL_CH_SCAN_DWELL_TIME:
+ memcpy(hal_req->scan_params_val.dwell_time, nla_data(iter),
+ sizeof(hal_req->scan_params_val.dwell_time));
+ hal_req->config_scan_params = 1;
+ break;
+
+ case NAN_REQ_ATTR_SOCIAL_CH_SCAN_PERIOD:
+ memcpy(hal_req->scan_params_val.scan_period, nla_data(iter),
+ sizeof(hal_req->scan_params_val.scan_period));
+ hal_req->config_scan_params = 1;
+ break;
+
+ case NAN_REQ_ATTR_RANDOM_FACTOR_FORCE_VAL:
+ hal_req->random_factor_force_val = nla_get_u8(iter);
+ hal_req->config_random_factor_force = 1;
+ break;
+
+ case NAN_REQ_ATTR_HOP_COUNT_FORCE_VAL:
+ hal_req->hop_count_force_val = nla_get_u8(iter);
+ hal_req->config_hop_count_force = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_PAYLOAD_TX:
+ hal_req->conn_capability_val.payload_transmit_flag = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_WFD:
+ hal_req->conn_capability_val.is_wfd_supported = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_WFDS:
+ hal_req->conn_capability_val.is_wfds_supported = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_TDLS:
+ hal_req->conn_capability_val.is_tdls_supported = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_MESH:
+ hal_req->conn_capability_val.is_mesh_supported = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_IBSS:
+ hal_req->conn_capability_val.is_ibss_supported = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_CONN_CAPABILITY_WLAN_INFRA:
+ hal_req->conn_capability_val.wlan_infra_field = nla_get_u8(iter);
+ hal_req->config_conn_capability = 1;
+ break;
+
+ case NAN_REQ_ATTR_DISCOVERY_ATTR_NUM_ENTRIES:
+ hal_req->num_config_discovery_attr = nla_get_u8(iter);
+ break;
+
+ case NAN_REQ_ATTR_DISCOVERY_ATTR_VAL:
+ if (disc_attr_idx >= hal_req->num_config_discovery_attr) {
+ SLSI_ERR(sdev,
+ "disc attr(%d) > num disc attr(%d)\n",
+ disc_attr_idx + 1, hal_req->num_config_discovery_attr);
+ return -EINVAL;
+ }
+ disc_attr = &hal_req->discovery_attr_val[disc_attr_idx];
+ disc_attr_idx++;
+ nla_for_each_nested(iter1, iter, tmp1) {
+ type1 = nla_type(iter1);
+ switch (type1) {
+ case NAN_REQ_ATTR_CONN_TYPE:
+ disc_attr->type = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_NAN_ROLE:
+ disc_attr->role = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_TRANSMIT_FREQ:
+ disc_attr->transmit_freq = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_AVAILABILITY_DURATION:
+ disc_attr->duration = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_AVAILABILITY_INTERVAL:
+ disc_attr->avail_interval_bitmap = nla_get_u32(iter1);
+ break;
+
+ case NAN_REQ_ATTR_MAC_ADDR_VAL:
+ memcpy(disc_attr->addr, nla_data(iter1), ETH_ALEN);
+ break;
+
+ case NAN_REQ_ATTR_MESH_ID_LEN:
+ disc_attr->mesh_id_len = nla_get_u16(iter1);
+ break;
+
+ case NAN_REQ_ATTR_MESH_ID:
+ memcpy(disc_attr->mesh_id, nla_data(iter1), disc_attr->mesh_id_len);
+ break;
+
+ case NAN_REQ_ATTR_INFRASTRUCTURE_SSID_LEN:
+ disc_attr->infrastructure_ssid_len = nla_get_u16(iter1);
+ break;
+
+ case NAN_REQ_ATTR_INFRASTRUCTURE_SSID:
+ memcpy(disc_attr->infrastructure_ssid_val, nla_data(iter1),
+ disc_attr->infrastructure_ssid_len);
+ break;
+ }
+ }
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_NUM_ENTRIES:
+ hal_req->fam_val.numchans = nla_get_u8(iter);
+ hal_req->config_fam = 1;
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_VAL:
+ hal_req->config_fam = 1;
+ if (famchan_idx >= hal_req->fam_val.numchans) {
+ SLSI_ERR(sdev,
+ "famchan attr(%d) > numchans(%d)\n",
+ famchan_idx + 1, hal_req->fam_val.numchans);
+ return -EINVAL;
+ }
+ famchan = &hal_req->fam_val.famchan[famchan_idx];
+ famchan_idx++;
+ nla_for_each_nested(iter1, iter, tmp1) {
+ type1 = nla_type(iter1);
+ switch (type1) {
+ case NAN_REQ_ATTR_FURTHER_AVAIL_ENTRY_CTRL:
+ famchan->entry_control = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_CHAN_CLASS:
+ famchan->class_val = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_CHAN:
+ famchan->channel = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_CHAN_MAPID:
+ famchan->mapid = nla_get_u8(iter1);
+ break;
+
+ case NAN_REQ_ATTR_FURTHER_AVAIL_INTERVAL_BITMAP:
+ famchan->avail_interval_bitmap = nla_get_u32(iter1);
+ break;
+ }
+ }
+ break;
+
+ case NAN_REQ_ATTR_SUBSCRIBE_SID_BEACON_VAL:
+ hal_req->subscribe_sid_beacon_val = nla_get_u8(iter);
+ hal_req->config_subscribe_sid_beacon = 1;
+ break;
+
+ case NAN_REQ_ATTR_DW_2G4_INTERVAL:
+ hal_req->dw_2dot4g_interval_val = nla_get_u8(iter);
+ /* valid range for 2.4G is 1-5 */
+ if (hal_req->dw_2dot4g_interval_val > 0 && hal_req->dw_2dot4g_interval_val < 6)
+ hal_req->config_2dot4g_dw_band = 1;
+ break;
+
+ case NAN_REQ_ATTR_DW_5G_INTERVAL:
+ hal_req->dw_5g_interval_val = nla_get_u8(iter);
+ /* valid range for 5g is 0-5 */
+ if (hal_req->dw_5g_interval_val < 6)
+ hal_req->config_5g_dw_band = 1;
+ break;
+
+ case NAN_REQ_ATTR_DISC_MAC_ADDR_RANDOM_INTERVAL:
+ hal_req->disc_mac_addr_rand_interval_sec = nla_get_u8(iter);
+ break;
+
+ default:
+ SLSI_ERR(sdev, "Unexpected NAN config attribute TYPE:%d\n", type);
+ return SLSI_HAL_NAN_STATUS_INVALID_PARAM;
+ }
+ }
+ return SLSI_HAL_NAN_STATUS_SUCCESS;
+}
+
+int slsi_nan_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ struct slsi_hal_nan_config_req hal_req;
+ int ret;
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!");
+ ret = -EINVAL;
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ reply_status = slsi_nan_config_get_nl_params(sdev, &hal_req, data, len);
+ if (reply_status) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ SLSI_WARN(sdev, "NAN vif not activated\n");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = WIFI_HAL_ERROR_NOT_AVAILABLE;
+ } else {
+ ret = slsi_mlme_nan_set_config(sdev, dev, &hal_req);
+ if (ret)
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_CONFIG, 0, NULL);
+ return ret;
+}
+
+int slsi_nan_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len)
+{
+ struct slsi_dev *sdev = SDEV_FROM_WIPHY(wiphy);
+ struct net_device *dev = slsi_nan_get_netdev(sdev);
+ struct netdev_vif *ndev_vif;
+ u32 reply_status = SLSI_HAL_NAN_STATUS_SUCCESS;
+ struct slsi_hal_nan_capabilities nan_capabilities;
+ int ret = 0, i;
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_CLUSTERS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_PUBLISHES, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_CONCURRENT_SUBSCRIBES, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_SERVICE_NAME_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_MATCH_FILTER_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_TOTAL_MATCH_FILTER_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_SERVICE_SPECIFIC_INFO_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_VSA_DATA_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_MESH_DATA_LENGTH, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_NDI_INTERFACES, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_NDP_SESSIONS, { 0, 0 } },
+ { SLSI_PSID_UNIFI_NAN_MAX_APP_INFO_LENGTH, { 0, 0 } } };
+ u32 *capabilities_mib_val[] = { &nan_capabilities.max_concurrent_nan_clusters,
+ &nan_capabilities.max_publishes,
+ &nan_capabilities.max_subscribes,
+ &nan_capabilities.max_service_name_len,
+ &nan_capabilities.max_match_filter_len,
+ &nan_capabilities.max_total_match_filter_len,
+ &nan_capabilities.max_service_specific_info_len,
+ &nan_capabilities.max_vsa_data_len,
+ &nan_capabilities.max_mesh_data_len,
+ &nan_capabilities.max_ndi_interfaces,
+ &nan_capabilities.max_ndp_sessions,
+ &nan_capabilities.max_app_info_len };
+
+ if (!dev) {
+ SLSI_ERR(sdev, "NAN netif not active!!");
+ reply_status = SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED;
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ /* Expect each mib length in response is 11 */
+ mibrsp.dataLength = 11 * ARRAY_SIZE(get_values);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data) {
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ reply_status = SLSI_HAL_NAN_STATUS_NO_RESOURCE_AVAILABLE;
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ values = slsi_read_mibs(sdev, NULL, get_values, ARRAY_SIZE(get_values), &mibrsp);
+ if (!values) {
+ ret = 0xFFFFFFFF;
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ goto exit_with_mibrsp;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(get_values); i++) {
+ if (values[i].type == SLSI_MIB_TYPE_UINT) {
+ *capabilities_mib_val[i] = values[i].u.uintValue;
+ SLSI_DBG2(sdev, SLSI_GSCAN, "MIB value = %ud\n", *capabilities_mib_val[i]);
+ } else {
+ SLSI_ERR(sdev, "invalid type(%d). iter:%d\n", values[i].type, i);
+ ret = 0xFFFFFFFF;
+ reply_status = SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE;
+ *capabilities_mib_val[i] = 0;
+ }
+ }
+
+ kfree(values);
+exit_with_mibrsp:
+ kfree(mibrsp.data);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+exit:
+ slsi_vendor_nan_command_reply(wiphy, reply_status, ret, NAN_RESPONSE_GET_CAPABILITIES, 0, &nan_capabilities);
+ return ret;
+}
+
+void slsi_nan_event(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb = NULL;
+ int res = 0;
+ u16 event, identifier, evt_reason;
+ u8 *mac_addr;
+ u16 hal_event;
+ struct netdev_vif *ndev_vif;
+ enum slsi_nan_disc_event_type disc_event_type = 0;
+
+ ndev_vif = netdev_priv(dev);
+ event = fapi_get_u16(skb, u.mlme_nan_event_ind.event);
+ identifier = fapi_get_u16(skb, u.mlme_nan_event_ind.identifier);
+ mac_addr = fapi_get_buff(skb, u.mlme_nan_event_ind.address_or_identifier);
+ evt_reason = fapi_get_u16(skb, u.mlme_nan_event_ind.reason_code);
+
+ switch (event) {
+ case FAPI_EVENT_WIFI_EVENT_NAN_PUBLISH_TERMINATED:
+ hal_event = SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT;
+ break;
+ case FAPI_EVENT_WIFI_EVENT_NAN_MATCH_EXPIRED:
+ hal_event = SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT;
+ break;
+ case FAPI_EVENT_WIFI_EVENT_NAN_SUBSCRIBE_TERMINATED:
+ hal_event = SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT;
+ break;
+ case FAPI_EVENT_WIFI_EVENT_NAN_ADDRESS_CHANGED:
+ disc_event_type = NAN_EVENT_ID_DISC_MAC_ADDR;
+ hal_event = SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT;
+ break;
+ case FAPI_EVENT_WIFI_EVENT_NAN_CLUSTER_STARTED:
+ disc_event_type = NAN_EVENT_ID_STARTED_CLUSTER;
+ hal_event = SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT;
+ break;
+ case FAPI_EVENT_WIFI_EVENT_NAN_CLUSTER_JOINED:
+ disc_event_type = NAN_EVENT_ID_JOINED_CLUSTER;
+ hal_event = SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT;
+ break;
+ default:
+ return;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Event: %s(%d)\n",
+ slsi_print_event_name(hal_event), hal_event);
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE, hal_event, GFP_KERNEL);
+#else
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE, hal_event, GFP_KERNEL);
+#endif
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ return;
+ }
+
+ switch (hal_event) {
+ case SLSI_NL80211_NAN_PUBLISH_TERMINATED_EVENT:
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_PUBLISH_ID, identifier);
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_PUBLISH_ID, evt_reason);
+ ndev_vif->nan.publish_id_map &= ~BIT(identifier);
+ break;
+ case SLSI_NL80211_NAN_MATCH_EXPIRED_EVENT:
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_MATCH_PUBLISH_SUBSCRIBE_ID, identifier);
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_MATCH_REQUESTOR_INSTANCE_ID, evt_reason);
+ break;
+ case SLSI_NL80211_NAN_SUBSCRIBE_TERMINATED_EVENT:
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_SUBSCRIBE_ID, identifier);
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_SUBSCRIBE_REASON, evt_reason);
+ ndev_vif->nan.subscribe_id_map &= ~BIT(identifier);
+ break;
+ case SLSI_NL80211_NAN_DISCOVERY_ENGINE_EVENT:
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_DISCOVERY_ENGINE_EVT_TYPE, disc_event_type);
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_DISCOVERY_ENGINE_MAC_ADDR, ETH_ALEN, mac_addr);
+ break;
+ }
+
+ if (res) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", res);
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_vendor_event(nl_skb, GFP_KERNEL);
+}
+
+void slsi_nan_followup_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 tag_id, tag_len;
+ u8 *stitched_ie_p, *ptr;
+ int stitched_ie_len;
+ struct slsi_hal_nan_followup_ind *hal_evt;
+ struct sk_buff *nl_skb;
+ int res;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+ stitched_ie_len = fapi_get_datalen(skb); /* max length of stitched_ie */
+ if (!stitched_ie_len) {
+ SLSI_ERR(sdev, "mlme_nan_followup_ind no mbulk data\n");
+ return;
+ }
+ stitched_ie_p = kmalloc(stitched_ie_len, GFP_KERNEL);
+ if (!stitched_ie_p) {
+ SLSI_ERR(sdev, "No memory for followup_ind fapi_data\n");
+ return;
+ }
+
+ hal_evt = kmalloc(sizeof(*hal_evt), GFP_KERNEL);
+ if (!hal_evt) {
+ SLSI_ERR(sdev, "No memory for followup_ind\n");
+ kfree(stitched_ie_p);
+ return;
+ }
+ memset(hal_evt, 0, sizeof(*hal_evt));
+ stitched_ie_len = slsi_nan_stitch_ie(fapi_get_data(skb), stitched_ie_len, 0x050b, stitched_ie_p);
+ if (!stitched_ie_len) {
+ SLSI_ERR(sdev, "No followup ind IE\n");
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+ hal_evt->publish_subscribe_id = fapi_get_u16(skb, u.mlme_nan_followup_ind.publish_subscribe_id);
+ hal_evt->requestor_instance_id = fapi_get_u16(skb, u.mlme_nan_followup_ind.peer_id);
+ ptr = stitched_ie_p + 7; /* 7 = ie_id(1), ie_len(1), oui(3) type/subtype(2)*/
+
+ ether_addr_copy(hal_evt->addr, ptr);
+ ptr += ETH_ALEN;
+ ptr += 1; /* skip priority */
+ hal_evt->dw_or_faw = *ptr;
+ ptr += 1;
+ while (stitched_ie_len > (ptr - stitched_ie_p) + 4) {
+ tag_id = *(u16 *)ptr;
+ ptr += 2;
+ tag_len = *(u16 *)ptr;
+ ptr += 2;
+ if (stitched_ie_p[1] + 2 < (ptr - stitched_ie_p) + tag_len) {
+ SLSI_ERR(sdev, "TLV error\n");
+ kfree(hal_evt);
+ return;
+ }
+ if (tag_id == SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO) {
+ hal_evt->service_specific_info_len = tag_len;
+ memcpy(hal_evt->service_specific_info, ptr, tag_len);
+ } else if (tag_id == SLSI_FAPI_NAN_SDEA) {
+ hal_evt->sdea_service_specific_info_len = tag_len;
+ memcpy(hal_evt->sdea_service_specific_info, ptr, tag_len);
+ }
+ ptr += tag_len;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Event: %s(%d)\n",
+ slsi_print_event_name(SLSI_NL80211_NAN_FOLLOWUP_EVENT), SLSI_NL80211_NAN_FOLLOWUP_EVENT);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE, SLSI_NL80211_NAN_FOLLOWUP_EVENT,
+ GFP_KERNEL);
+#else
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE, SLSI_NL80211_NAN_FOLLOWUP_EVENT,
+ GFP_KERNEL);
+#endif
+
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+
+ res = nla_put_be16(nl_skb, NAN_EVT_ATTR_FOLLOWUP_PUBLISH_SUBSCRIBE_ID,
+ cpu_to_le16(hal_evt->publish_subscribe_id));
+ res |= nla_put_be16(nl_skb, NAN_EVT_ATTR_FOLLOWUP_REQUESTOR_INSTANCE_ID,
+ cpu_to_le16(hal_evt->requestor_instance_id));
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_FOLLOWUP_ADDR, ETH_ALEN, hal_evt->addr);
+ res |= nla_put_u8(nl_skb, NAN_EVT_ATTR_FOLLOWUP_DW_OR_FAW, hal_evt->dw_or_faw);
+ res |= nla_put_u16(nl_skb, NAN_EVT_ATTR_FOLLOWUP_SERVICE_SPECIFIC_INFO_LEN, hal_evt->service_specific_info_len);
+ if (hal_evt->service_specific_info_len)
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_FOLLOWUP_SERVICE_SPECIFIC_INFO, hal_evt->service_specific_info_len,
+ hal_evt->service_specific_info);
+ res |= nla_put_u16(nl_skb, NAN_EVT_ATTR_SDEA_LEN, hal_evt->sdea_service_specific_info_len);
+ if (hal_evt->sdea_service_specific_info_len)
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_SDEA, hal_evt->sdea_service_specific_info_len,
+ hal_evt->sdea_service_specific_info);
+
+ if (res) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", res);
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_vendor_event(nl_skb, GFP_KERNEL);
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+}
+
+void slsi_nan_service_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 tag_id, tag_len;
+ u8 *stitched_ie_p, *ptr;
+ const u8 *ie_ptr;
+ int stitched_ie_len, ie_len;
+ struct slsi_hal_nan_match_ind *hal_evt;
+ struct sk_buff *nl_skb;
+ int res;
+
+ SLSI_DBG3(sdev, SLSI_GSCAN, "\n");
+
+ stitched_ie_len = fapi_get_datalen(skb); /* max length of stitched_ie */
+ if (!stitched_ie_len) {
+ SLSI_ERR(sdev, "mlme_nan_service_ind no mbulk data\n");
+ return;
+ }
+
+ stitched_ie_p = kmalloc(stitched_ie_len, GFP_KERNEL);
+ if (!stitched_ie_p) {
+ SLSI_ERR(sdev, "No memory for service_ind fapi_data\n");
+ return;
+ }
+
+ hal_evt = kmalloc(sizeof(*hal_evt), GFP_KERNEL);
+ if (!hal_evt) {
+ SLSI_ERR(sdev, "No memory for service_ind\n");
+ kfree(stitched_ie_p);
+ return;
+ }
+
+ memset(hal_evt, 0, sizeof(*hal_evt));
+ stitched_ie_len = slsi_nan_stitch_ie(fapi_get_data(skb), stitched_ie_len, 0x040b, stitched_ie_p);
+ if (!stitched_ie_len) {
+ SLSI_ERR(sdev, "No match ind IE\n");
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+ hal_evt->publish_subscribe_id = fapi_get_u16(skb, u.mlme_nan_service_ind.publish_subscribe_id);
+ hal_evt->requestor_instance_id = fapi_get_u16(skb, u.mlme_nan_service_ind.peer_id);
+
+ /* 7 = ie_id(1), ie_len(1), oui(3) type/subtype(2)*/
+ ptr = stitched_ie_p + 7;
+ ether_addr_copy(hal_evt->addr, ptr);
+ ptr += ETH_ALEN;
+ hal_evt->match_occurred_flag = *ptr;
+ ptr += 1;
+ hal_evt->out_of_resource_flag = *ptr;
+ ptr += 1;
+ hal_evt->rssi_value = *ptr;
+ ptr += 1 + 8; /* skip 8 bytes related to datapath and ranging*/
+ while (stitched_ie_len > (ptr - stitched_ie_p) + 4) {
+ tag_id = *(u16 *)ptr;
+ ptr += 2;
+ tag_len = *(u16 *)ptr;
+ ptr += 2;
+ if (stitched_ie_p[1] + 2 < (ptr - stitched_ie_p) + tag_len) {
+ SLSI_ERR(sdev, "TLV error\n");
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+ switch (tag_id) {
+ case SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO:
+ hal_evt->service_specific_info_len = tag_len;
+ memcpy(hal_evt->service_specific_info, ptr, tag_len);
+ break;
+
+ case SLSI_FAPI_NAN_SDEA:
+ hal_evt->sdea_service_specific_info_len = tag_len;
+ memcpy(hal_evt->sdea_service_specific_info, ptr, tag_len);
+ break;
+
+ case SLSI_FAPI_NAN_SDF_MATCH_FILTER:
+ hal_evt->sdf_match_filter_len = tag_len;
+ memcpy(hal_evt->sdf_match_filter, ptr, tag_len);
+ break;
+ }
+ ptr += tag_len;
+ }
+
+ ie_ptr = fapi_get_data(skb);
+ ie_len = fapi_get_datalen(skb);
+#define SLSI_OUI 0x001632
+#define SLSI_OUI_TYPE_NAN_PARAMS 0x0b
+#define SLSI_OUI_TYPE_RTT_PARAMS 0x0a
+ while (ie_ptr && (fapi_get_datalen(skb) > ie_ptr - fapi_get_data(skb))) {
+ ie_ptr = cfg80211_find_vendor_ie(SLSI_OUI, SLSI_OUI_TYPE_NAN_PARAMS, ie_ptr, ie_len);
+ if (!ie_ptr)
+ break;
+ if (ie_len > 9 && ie_ptr[1] > 5 && ie_ptr[6] == 9)
+ hal_evt->sec_info.cipher_type = ie_ptr[8];
+ break;
+ ie_len -= ie_ptr[1] + 2;
+ if (ie_len < 3)
+ ie_ptr = NULL;
+ else
+ ie_ptr += ie_ptr[1] + 2;
+ }
+
+ ie_ptr = fapi_get_data(skb);
+ ie_len = fapi_get_datalen(skb);
+ while (ie_ptr && (fapi_get_datalen(skb) > ie_ptr - fapi_get_data(skb))) {
+ ie_ptr = cfg80211_find_vendor_ie(SLSI_OUI, SLSI_OUI_TYPE_RTT_PARAMS, ie_ptr, ie_len);
+ if (!ie_ptr)
+ break;
+ if (ie_len >= 0x35 && ie_ptr[1] == 0x33 && ie_ptr[6] == 2)
+ hal_evt->range_measurement_mm = ie_ptr[43];
+ hal_evt->ranging_event_type = 0;
+ break;
+ ie_len -= ie_ptr[1] + 2;
+ if (ie_len < 3)
+ ie_ptr = NULL;
+ else
+ ie_ptr += ie_ptr[1] + 2;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ SLSI_DBG1_NODEV(SLSI_GSCAN, "Event: %s(%d)\n",
+ slsi_print_event_name(SLSI_NL80211_NAN_MATCH_EVENT), SLSI_NL80211_NAN_MATCH_EVENT);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE, SLSI_NL80211_NAN_MATCH_EVENT,
+ GFP_KERNEL);
+#else
+ nl_skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE, SLSI_NL80211_NAN_MATCH_EVENT, GFP_KERNEL);
+#endif
+ if (!nl_skb) {
+ SLSI_ERR(sdev, "NO MEM for nl_skb!!!\n");
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+ res = nla_put_u16(nl_skb, NAN_EVT_ATTR_MATCH_PUBLISH_SUBSCRIBE_ID, hal_evt->publish_subscribe_id);
+ res |= nla_put_u32(nl_skb, NAN_EVT_ATTR_MATCH_REQUESTOR_INSTANCE_ID, hal_evt->requestor_instance_id);
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_MATCH_ADDR, ETH_ALEN, hal_evt->addr);
+ res |= nla_put_u16(nl_skb, NAN_EVT_ATTR_MATCH_SERVICE_SPECIFIC_INFO_LEN, hal_evt->service_specific_info_len);
+ if (hal_evt->service_specific_info_len)
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_MATCH_SERVICE_SPECIFIC_INFO, hal_evt->service_specific_info_len,
+ hal_evt->service_specific_info);
+ res |= nla_put_u16(nl_skb, NAN_EVT_ATTR_MATCH_SDF_MATCH_FILTER_LEN, hal_evt->sdf_match_filter_len);
+ if (hal_evt->sdf_match_filter_len)
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_MATCH_SDF_MATCH_FILTER, hal_evt->sdf_match_filter_len,
+ hal_evt->sdf_match_filter);
+ res |= nla_put_u16(nl_skb, NAN_EVT_ATTR_SDEA_LEN, hal_evt->sdea_service_specific_info_len);
+ if (hal_evt->sdea_service_specific_info_len)
+ res |= nla_put(nl_skb, NAN_EVT_ATTR_SDEA, hal_evt->sdea_service_specific_info_len,
+ hal_evt->sdea_service_specific_info);
+
+ res |= nla_put_u8(nl_skb, NAN_EVT_ATTR_MATCH_MATCH_OCCURRED_FLAG, hal_evt->match_occurred_flag);
+ res |= nla_put_u8(nl_skb, NAN_EVT_ATTR_MATCH_OUT_OF_RESOURCE_FLAG, hal_evt->out_of_resource_flag);
+ res |= nla_put_u8(nl_skb, NAN_EVT_ATTR_MATCH_RSSI_VALUE, hal_evt->rssi_value);
+ res |= nla_put_u32(nl_skb, NAN_EVT_ATTR_RANGE_MEASUREMENT_MM, hal_evt->range_measurement_mm);
+ res |= nla_put_u32(nl_skb, NAN_EVT_ATTR_RANGEING_EVENT_TYPE, hal_evt->ranging_event_type);
+ res |= nla_put_u32(nl_skb, NAN_EVT_ATTR_SECURITY_CIPHER_TYPE, hal_evt->sec_info.cipher_type);
+
+ if (res) {
+ SLSI_ERR(sdev, "Error in nla_put*:%x\n", res);
+ /* Dont use slsi skb wrapper for this free */
+ kfree_skb(nl_skb);
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+ return;
+ }
+
+ cfg80211_vendor_event(nl_skb, GFP_KERNEL);
+ kfree(hal_evt);
+ kfree(stitched_ie_p);
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#ifndef __SLSI_NL80211_VENDOR_NAN_H_
+#define __SLSI_NL80211_VENDOR_NAN_H_
+
+enum SLSI_NAN_REPLY_ATTRIBUTES {
+ NAN_REPLY_ATTR_STATUS_TYPE,
+ NAN_REPLY_ATTR_VALUE,
+ NAN_REPLY_ATTR_RESPONSE_TYPE,
+ NAN_REPLY_ATTR_PUBLISH_SUBSCRIBE_TYPE,
+ NAN_REPLY_ATTR_CAP_MAX_CONCURRENT_CLUSTER,
+ NAN_REPLY_ATTR_CAP_MAX_PUBLISHES,
+ NAN_REPLY_ATTR_CAP_MAX_SUBSCRIBES,
+ NAN_REPLY_ATTR_CAP_MAX_SERVICE_NAME_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_MATCH_FILTER_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_TOTAL_MATCH_FILTER_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_SERVICE_SPECIFIC_INFO_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_VSA_DATA_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_MESH_DATA_LEN,
+ NAN_REPLY_ATTR_CAP_MAX_NDI_INTERFACES,
+ NAN_REPLY_ATTR_CAP_MAX_NDP_SESSIONS,
+ NAN_REPLY_ATTR_CAP_MAX_APP_INFO_LEN
+};
+
+enum SLSI_NAN_REQ_ATTRIBUTES {
+ NAN_REQ_ATTR_MASTER_PREF,
+ NAN_REQ_ATTR_CLUSTER_LOW,
+ NAN_REQ_ATTR_CLUSTER_HIGH,
+ NAN_REQ_ATTR_HOP_COUNT_LIMIT_VAL,
+ NAN_REQ_ATTR_SID_BEACON_VAL,
+ NAN_REQ_ATTR_SUPPORT_2G4_VAL,
+ NAN_REQ_ATTR_SUPPORT_5G_VAL,
+ NAN_REQ_ATTR_RSSI_CLOSE_2G4_VAL,
+ NAN_REQ_ATTR_RSSI_MIDDLE_2G4_VAL,
+ NAN_REQ_ATTR_RSSI_PROXIMITY_2G4_VAL,
+ NAN_REQ_ATTR_BEACONS_2G4_VAL = 10,
+ NAN_REQ_ATTR_SDF_2G4_VAL,
+ NAN_REQ_ATTR_CHANNEL_2G4_MHZ_VAL,
+ NAN_REQ_ATTR_RSSI_PROXIMITY_VAL,
+ NAN_REQ_ATTR_RSSI_CLOSE_5G_VAL,
+ NAN_REQ_ATTR_RSSI_CLOSE_PROXIMITY_5G_VAL,
+ NAN_REQ_ATTR_RSSI_MIDDLE_5G_VAL,
+ NAN_REQ_ATTR_RSSI_PROXIMITY_5G_VAL,
+ NAN_REQ_ATTR_BEACON_5G_VAL,
+ NAN_REQ_ATTR_SDF_5G_VAL,
+ NAN_REQ_ATTR_CHANNEL_5G_MHZ_VAL = 20,
+ NAN_REQ_ATTR_RSSI_WINDOW_SIZE_VAL,
+ NAN_REQ_ATTR_OUI_VAL,
+ NAN_REQ_ATTR_MAC_ADDR_VAL,
+ NAN_REQ_ATTR_CLUSTER_VAL,
+ NAN_REQ_ATTR_SOCIAL_CH_SCAN_DWELL_TIME,
+ NAN_REQ_ATTR_SOCIAL_CH_SCAN_PERIOD,
+ NAN_REQ_ATTR_RANDOM_FACTOR_FORCE_VAL,
+ NAN_REQ_ATTR_HOP_COUNT_FORCE_VAL,
+ NAN_REQ_ATTR_CONN_CAPABILITY_PAYLOAD_TX,
+ NAN_REQ_ATTR_CONN_CAPABILITY_IBSS = 30,
+ NAN_REQ_ATTR_CONN_CAPABILITY_WFD,
+ NAN_REQ_ATTR_CONN_CAPABILITY_WFDS,
+ NAN_REQ_ATTR_CONN_CAPABILITY_TDLS,
+ NAN_REQ_ATTR_CONN_CAPABILITY_MESH,
+ NAN_REQ_ATTR_CONN_CAPABILITY_WLAN_INFRA,
+ NAN_REQ_ATTR_DISCOVERY_ATTR_NUM_ENTRIES,
+ NAN_REQ_ATTR_DISCOVERY_ATTR_VAL,
+ NAN_REQ_ATTR_CONN_TYPE,
+ NAN_REQ_ATTR_NAN_ROLE,
+ NAN_REQ_ATTR_TRANSMIT_FREQ = 40,
+ NAN_REQ_ATTR_AVAILABILITY_DURATION,
+ NAN_REQ_ATTR_AVAILABILITY_INTERVAL,
+ NAN_REQ_ATTR_MESH_ID_LEN,
+ NAN_REQ_ATTR_MESH_ID,
+ NAN_REQ_ATTR_INFRASTRUCTURE_SSID_LEN,
+ NAN_REQ_ATTR_INFRASTRUCTURE_SSID,
+ NAN_REQ_ATTR_FURTHER_AVAIL_NUM_ENTRIES,
+ NAN_REQ_ATTR_FURTHER_AVAIL_VAL,
+ NAN_REQ_ATTR_FURTHER_AVAIL_ENTRY_CTRL,
+ NAN_REQ_ATTR_FURTHER_AVAIL_CHAN_CLASS = 50,
+ NAN_REQ_ATTR_FURTHER_AVAIL_CHAN,
+ NAN_REQ_ATTR_FURTHER_AVAIL_CHAN_MAPID,
+ NAN_REQ_ATTR_FURTHER_AVAIL_INTERVAL_BITMAP,
+ NAN_REQ_ATTR_PUBLISH_ID,
+ NAN_REQ_ATTR_PUBLISH_TTL,
+ NAN_REQ_ATTR_PUBLISH_PERIOD,
+ NAN_REQ_ATTR_PUBLISH_TYPE,
+ NAN_REQ_ATTR_PUBLISH_TX_TYPE,
+ NAN_REQ_ATTR_PUBLISH_COUNT,
+ NAN_REQ_ATTR_PUBLISH_SERVICE_NAME_LEN = 60,
+ NAN_REQ_ATTR_PUBLISH_SERVICE_NAME,
+ NAN_REQ_ATTR_PUBLISH_MATCH_ALGO,
+ NAN_REQ_ATTR_PUBLISH_SERVICE_INFO_LEN,
+ NAN_REQ_ATTR_PUBLISH_SERVICE_INFO,
+ NAN_REQ_ATTR_PUBLISH_RX_MATCH_FILTER_LEN,
+ NAN_REQ_ATTR_PUBLISH_RX_MATCH_FILTER,
+ NAN_REQ_ATTR_PUBLISH_TX_MATCH_FILTER_LEN,
+ NAN_REQ_ATTR_PUBLISH_TX_MATCH_FILTER,
+ NAN_REQ_ATTR_PUBLISH_RSSI_THRESHOLD_FLAG,
+ NAN_REQ_ATTR_PUBLISH_CONN_MAP = 70,
+ NAN_REQ_ATTR_PUBLISH_RECV_IND_CFG,
+ NAN_REQ_ATTR_SUBSCRIBE_ID,
+ NAN_REQ_ATTR_SUBSCRIBE_TTL,
+ NAN_REQ_ATTR_SUBSCRIBE_PERIOD,
+ NAN_REQ_ATTR_SUBSCRIBE_TYPE,
+ NAN_REQ_ATTR_SUBSCRIBE_RESP_FILTER_TYPE,
+ NAN_REQ_ATTR_SUBSCRIBE_RESP_INCLUDE,
+ NAN_REQ_ATTR_SUBSCRIBE_USE_RESP_FILTER,
+ NAN_REQ_ATTR_SUBSCRIBE_SSI_REQUIRED,
+ NAN_REQ_ATTR_SUBSCRIBE_MATCH_INDICATOR = 80,
+ NAN_REQ_ATTR_SUBSCRIBE_COUNT,
+ NAN_REQ_ATTR_SUBSCRIBE_SERVICE_NAME_LEN,
+ NAN_REQ_ATTR_SUBSCRIBE_SERVICE_NAME,
+ NAN_REQ_ATTR_SUBSCRIBE_SERVICE_INFO_LEN,
+ NAN_REQ_ATTR_SUBSCRIBE_SERVICE_INFO,
+ NAN_REQ_ATTR_SUBSCRIBE_RX_MATCH_FILTER_LEN,
+ NAN_REQ_ATTR_SUBSCRIBE_RX_MATCH_FILTER,
+ NAN_REQ_ATTR_SUBSCRIBE_TX_MATCH_FILTER_LEN,
+ NAN_REQ_ATTR_SUBSCRIBE_TX_MATCH_FILTER,
+ NAN_REQ_ATTR_SUBSCRIBE_RSSI_THRESHOLD_FLAG = 90,
+ NAN_REQ_ATTR_SUBSCRIBE_CONN_MAP,
+ NAN_REQ_ATTR_SUBSCRIBE_NUM_INTF_ADDR_PRESENT,
+ NAN_REQ_ATTR_SUBSCRIBE_INTF_ADDR,
+ NAN_REQ_ATTR_SUBSCRIBE_RECV_IND_CFG,
+ NAN_REQ_ATTR_FOLLOWUP_ID,
+ NAN_REQ_ATTR_FOLLOWUP_REQUESTOR_ID,
+ NAN_REQ_ATTR_FOLLOWUP_ADDR,
+ NAN_REQ_ATTR_FOLLOWUP_PRIORITY,
+ NAN_REQ_ATTR_FOLLOWUP_SERVICE_NAME_LEN,
+ NAN_REQ_ATTR_FOLLOWUP_SERVICE_NAME = 100,
+ NAN_REQ_ATTR_FOLLOWUP_TX_WINDOW,
+ NAN_REQ_ATTR_FOLLOWUP_RECV_IND_CFG,
+ NAN_REQ_ATTR_SUBSCRIBE_SID_BEACON_VAL,
+ NAN_REQ_ATTR_DW_2G4_INTERVAL,
+ NAN_REQ_ATTR_DW_5G_INTERVAL,
+ NAN_REQ_ATTR_DISC_MAC_ADDR_RANDOM_INTERVAL,
+ NAN_REQ_ATTR_PUBLISH_SDEA_LEN,
+ NAN_REQ_ATTR_PUBLISH_SDEA,
+ NAN_REQ_ATTR_RANGING_AUTO_RESPONSE,
+ NAN_REQ_ATTR_SDEA_PARAM_NDP_TYPE = 110,
+ NAN_REQ_ATTR_SDEA_PARAM_SECURITY_CFG,
+ NAN_REQ_ATTR_SDEA_PARAM_RANGING_STATE,
+ NAN_REQ_ATTR_SDEA_PARAM_RANGE_REPORT,
+ NAN_REQ_ATTR_SDEA_PARAM_QOS_CFG,
+ NAN_REQ_ATTR_RANGING_CFG_INTERVAL,
+ NAN_REQ_ATTR_RANGING_CFG_INDICATION,
+ NAN_REQ_ATTR_RANGING_CFG_INGRESS_MM,
+ NAN_REQ_ATTR_RANGING_CFG_EGRESS_MM,
+ NAN_REQ_ATTR_CIPHER_TYPE,
+ NAN_REQ_ATTR_SCID_LEN = 120,
+ NAN_REQ_ATTR_SCID,
+ NAN_REQ_ATTR_SECURITY_KEY_TYPE,
+ NAN_REQ_ATTR_SECURITY_PMK_LEN,
+ NAN_REQ_ATTR_SECURITY_PMK,
+ NAN_REQ_ATTR_SECURITY_PASSPHRASE_LEN,
+ NAN_REQ_ATTR_SECURITY_PASSPHRASE,
+ NAN_REQ_ATTR_RANGE_RESPONSE_CFG_PUBLISH_ID,
+ NAN_REQ_ATTR_RANGE_RESPONSE_CFG_REQUESTOR_ID,
+ NAN_REQ_ATTR_RANGE_RESPONSE_CFG_PEER_ADDR,
+ NAN_REQ_ATTR_RANGE_RESPONSE_CFG_RANGING_RESPONSE
+};
+
+enum SLSI_NAN_RESP_ATTRIBUTES {
+ NAN_RESP_ATTR_MAX_CONCURRENT_NAN_CLUSTERS,
+ NAN_RESP_ATTR_MAX_PUBLISHES,
+ NAN_RESP_ATTR_MAX_SUBSCRIBES,
+ NAN_RESP_ATTR_MAX_SERVICE_NAME_LEN,
+ NAN_RESP_ATTR_MAX_MATCH_FILTER_LEN,
+ NAN_RESP_ATTR_MAX_TOTAL_MATCH_FILTER_LEN,
+ NAN_RESP_ATTR_MAX_SERVICE_SPECIFIC_INFO_LEN,
+ NAN_RESP_ATTR_MAX_VSA_DATA_LEN,
+ NAN_RESP_ATTR_MAX_MESH_DATA_LEN,
+ NAN_RESP_ATTR_MAX_NDI_INTERFACES,
+ NAN_RESP_ATTR_MAX_NDP_SESSIONS,
+ NAN_RESP_ATTR_MAX_APP_INFO_LEN,
+ NAN_RESP_ATTR_SUBSCRIBE_ID,
+ NAN_RESP_ATTR_PUBLISH_ID
+};
+
+enum SLSI_NAN_EVT_ATTRIBUTES {
+ NAN_EVT_ATTR_MATCH_PUBLISH_SUBSCRIBE_ID,
+ NAN_EVT_ATTR_MATCH_REQUESTOR_INSTANCE_ID,
+ NAN_EVT_ATTR_MATCH_ADDR,
+ NAN_EVT_ATTR_MATCH_SERVICE_SPECIFIC_INFO_LEN,
+ NAN_EVT_ATTR_MATCH_SERVICE_SPECIFIC_INFO,
+ NAN_EVT_ATTR_MATCH_SDF_MATCH_FILTER_LEN,
+ NAN_EVT_ATTR_MATCH_SDF_MATCH_FILTER,
+ NAN_EVT_ATTR_MATCH_MATCH_OCCURRED_FLAG,
+ NAN_EVT_ATTR_MATCH_OUT_OF_RESOURCE_FLAG,
+ NAN_EVT_ATTR_MATCH_RSSI_VALUE,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_IS_WFD_SUPPORTED = 10,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_IS_WFDS_SUPPORTED,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_IS_TDLS_SUPPORTED,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_IS_IBSS_SUPPORTED,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_IS_MESH_SUPPORTED,
+ NAN_EVT_ATTR_MATCH_CONN_CAPABILITY_WLAN_INFRA_FIELD,
+ NAN_EVT_ATTR_MATCH_NUM_RX_DISCOVERY_ATTR,
+ NAN_EVT_ATTR_MATCH_RX_DISCOVERY_ATTR,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_TYPE,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_ROLE,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_DURATION = 20,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_AVAIL_INTERVAL_BITMAP,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_MAPID,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_ADDR,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_MESH_ID_LEN,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_MESH_ID,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_INFRASTRUCTURE_SSID_LEN,
+ NAN_EVT_ATTR_MATCH_DISC_ATTR_INFRASTRUCTURE_SSID_VAL,
+ NAN_EVT_ATTR_MATCH_NUM_CHANS,
+ NAN_EVT_ATTR_MATCH_FAMCHAN,
+ NAN_EVT_ATTR_MATCH_FAM_ENTRY_CONTROL = 30,
+ NAN_EVT_ATTR_MATCH_FAM_CLASS_VAL,
+ NAN_EVT_ATTR_MATCH_FAM_CHANNEL,
+ NAN_EVT_ATTR_MATCH_FAM_MAPID,
+ NAN_EVT_ATTR_MATCH_FAM_AVAIL_INTERVAL_BITMAP,
+ NAN_EVT_ATTR_MATCH_CLUSTER_ATTRIBUTE_LEN,
+ NAN_EVT_ATTR_MATCH_CLUSTER_ATTRIBUTE,
+ NAN_EVT_ATTR_PUBLISH_ID,
+ NAN_EVT_ATTR_PUBLISH_REASON,
+ NAN_EVT_ATTR_SUBSCRIBE_ID,
+ NAN_EVT_ATTR_SUBSCRIBE_REASON = 40,
+ NAN_EVT_ATTR_DISABLED_REASON,
+ NAN_EVT_ATTR_FOLLOWUP_PUBLISH_SUBSCRIBE_ID,
+ NAN_EVT_ATTR_FOLLOWUP_REQUESTOR_INSTANCE_ID,
+ NAN_EVT_ATTR_FOLLOWUP_ADDR,
+ NAN_EVT_ATTR_FOLLOWUP_DW_OR_FAW,
+ NAN_EVT_ATTR_FOLLOWUP_SERVICE_SPECIFIC_INFO_LEN,
+ NAN_EVT_ATTR_FOLLOWUP_SERVICE_SPECIFIC_INFO,
+ NAN_EVT_ATTR_DISCOVERY_ENGINE_EVT_TYPE,
+ NAN_EVT_ATTR_DISCOVERY_ENGINE_MAC_ADDR,
+ NAN_EVT_ATTR_DISCOVERY_ENGINE_CLUSTER = 50,
+ NAN_EVT_ATTR_SDEA,
+ NAN_EVT_ATTR_SDEA_LEN,
+ NAN_EVT_ATTR_SCID,
+ NAN_EVT_ATTR_SCID_LEN,
+ NAN_EVT_ATTR_SDEA_PARAM_CONFIG_NAN_DATA_PATH,
+ NAN_EVT_ATTR_SDEA_PARAM_NDP_TYPE,
+ NAN_EVT_ATTR_SDEA_PARAM_SECURITY_CONFIG,
+ NAN_EVT_ATTR_SDEA_PARAM_RANGE_STATE,
+ NAN_EVT_ATTR_SDEA_PARAM_RANGE_REPORT,
+ NAN_EVT_ATTR_SDEA_PARAM_QOS_CFG = 60,
+ NAN_EVT_ATTR_RANGE_MEASUREMENT_MM,
+ NAN_EVT_ATTR_RANGEING_EVENT_TYPE,
+ NAN_EVT_ATTR_SECURITY_CIPHER_TYPE
+};
+
+#define SLSI_FAPI_NAN_CONFIG_PARAM_SID_BEACON 0X0003
+#define SLSI_FAPI_NAN_CONFIG_PARAM_2_4_RSSI_CLOSE 0X0004
+#define SLSI_FAPI_NAN_CONFIG_PARAM_2_4_RSSI_MIDDLE 0X0005
+#define SLSI_FAPI_NAN_CONFIG_PARAM_2_4_RSSI_PROXIMITY 0X0006
+#define SLSI_FAPI_NAN_CONFIG_PARAM_BAND_USAGE 0X0007
+#define SLSI_FAPI_NAN_CONFIG_PARAM_5_RSSI_CLOSE 0X0008
+#define SLSI_FAPI_NAN_CONFIG_PARAM_5_RSSI_MIDDLE 0X0009
+#define SLSI_FAPI_NAN_CONFIG_PARAM_5_RSSI_PROXIMITY 0X000A
+#define SLSI_FAPI_NAN_CONFIG_PARAM_HOP_COUNT_LIMIT 0X000B
+#define SLSI_FAPI_NAN_CONFIG_PARAM_RSSI_WINDOW_SIZE 0X000C
+#define SLSI_FAPI_NAN_CONFIG_PARAM_SCAN_PARAMETER_2_4 0X000D
+#define SLSI_FAPI_NAN_CONFIG_PARAM_SCAN_PARAMETER_5 0X000E
+#define SLSI_FAPI_NAN_CONFIG_PARAM_MASTER_PREFERENCE 0X000F
+#define SLSI_FAPI_NAN_CONFIG_PARAM_CONNECTION_CAPAB 0X0010
+#define SLSI_FAPI_NAN_CONFIG_PARAM_POST_DISCOVER_PARAM 0X0011
+#define SLSI_FAPI_NAN_CONFIG_PARAM_FURTHER_AVAIL_CHANNEL_MAP 0X0012
+#define SLSI_FAPI_NAN_CONFIG_PARAM_ADDR_RANDOM_INTERVAL 0X0013
+#define SLSI_FAPI_NAN_SERVICE_NAME 0X0020
+#define SLSI_FAPI_NAN_SERVICE_SPECIFIC_INFO 0X0021
+#define SLSI_FAPI_NAN_RX_MATCH_FILTER 0X0022
+#define SLSI_FAPI_NAN_TX_MATCH_FILTER 0X0023
+#define SLSI_FAPI_NAN_SDF_MATCH_FILTER 0X0024
+#define SLSI_FAPI_NAN_CLUSTER_ATTRIBUTE 0X0025
+#define SLSI_FAPI_NAN_INTERFACE_ADDRESS_SET 0X0026
+#define SLSI_FAPI_NAN_SDEA 0X0027
+
+#define SLSI_HAL_NAN_MAX_SOCIAL_CHANNELS 3
+#define SLSI_HAL_NAN_MAX_SERVICE_NAME_LEN 255
+#define SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN 1024
+#define SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN 255
+#define SLSI_HAL_NAN_MAX_SUBSCRIBE_MAX_ADDRESS 42
+#define SLSI_HAL_NAN_MAX_POSTDISCOVERY_LEN 5
+#define SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN 1024
+
+enum slsi_wifi_hal_nan_status_type {
+ /* NAN Protocol Response Codes */
+ SLSI_HAL_NAN_STATUS_SUCCESS = 0,
+ /* NAN Discovery Engine/Host driver failures */
+ SLSI_HAL_NAN_STATUS_INTERNAL_FAILURE = 1,
+ /* NAN OTA failures */
+ SLSI_HAL_NAN_STATUS_PROTOCOL_FAILURE = 2,
+ /* if the publish/subscribe id is invalid */
+ SLSI_HAL_NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID = 3,
+ /* If we run out of resources allocated */
+ SLSI_HAL_NAN_STATUS_NO_RESOURCE_AVAILABLE = 4,
+ /* if invalid params are passed */
+ SLSI_HAL_NAN_STATUS_INVALID_PARAM = 5,
+ /* if the requestor instance id is invalid */
+ SLSI_HAL_NAN_STATUS_INVALID_REQUESTOR_INSTANCE_ID = 6,
+ /* if the ndp id is invalid */
+ SLSI_HAL_NAN_STATUS_INVALID_NDP_ID = 7,
+ /* if NAN is enabled when wifi is turned off */
+ SLSI_HAL_NAN_STATUS_NAN_NOT_ALLOWED = 8,
+ /* if over the air ack is not received */
+ SLSI_HAL_NAN_STATUS_NO_OTA_ACK = 9,
+ /* If NAN is already enabled and we are try to re-enable the same */
+ SLSI_HAL_NAN_STATUS_ALREADY_ENABLED = 10,
+ /* If followup message internal queue is full */
+ SLSI_HAL_NAN_STATUS_FOLLOWUP_QUEUE_FULL = 11,
+ /* Unsupported concurrency session enabled, NAN disabled notified */
+ SLSI_HAL_NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED = 12
+};
+
+enum slsi_nan_status_type {
+ /* NAN Protocol Response Codes */
+ NAN_STATUS_SUCCESS = 0,
+ NAN_STATUS_TIMEOUT = 1,
+ NAN_STATUS_DE_FAILURE = 2,
+ NAN_STATUS_INVALID_MSG_VERSION = 3,
+ NAN_STATUS_INVALID_MSG_LEN = 4,
+ NAN_STATUS_INVALID_MSG_ID = 5,
+ NAN_STATUS_INVALID_HANDLE = 6,
+ NAN_STATUS_NO_SPACE_AVAILABLE = 7,
+ NAN_STATUS_INVALID_PUBLISH_TYPE = 8,
+ NAN_STATUS_INVALID_TX_TYPE = 9,
+ NAN_STATUS_INVALID_MATCH_ALGORITHM = 10,
+ NAN_STATUS_DISABLE_IN_PROGRESS = 11,
+ NAN_STATUS_INVALID_TLV_LEN = 12,
+ NAN_STATUS_INVALID_TLV_TYPE = 13,
+ NAN_STATUS_MISSING_TLV_TYPE = 14,
+ NAN_STATUS_INVALID_TOTAL_TLVS_LEN = 15,
+ NAN_STATUS_INVALID_MATCH_HANDLE = 16,
+ NAN_STATUS_INVALID_TLV_VALUE = 17,
+ NAN_STATUS_INVALID_TX_PRIORITY = 18,
+ NAN_STATUS_INVALID_CONNECTION_MAP = 19,
+ NAN_STATUS_INVALID_TCA_ID = 20,
+ NAN_STATUS_INVALID_STATS_ID = 21,
+ NAN_STATUS_NAN_NOT_ALLOWED = 22,
+ NAN_STATUS_NO_OTA_ACK = 23,
+ NAN_STATUS_TX_FAIL = 24,
+ /* 25-4095 Reserved */
+ /* NAN Configuration Response codes */
+ NAN_STATUS_INVALID_RSSI_CLOSE_VALUE = 4096,
+ NAN_STATUS_INVALID_RSSI_MIDDLE_VALUE = 4097,
+ NAN_STATUS_INVALID_HOP_COUNT_LIMIT = 4098,
+ NAN_STATUS_INVALID_MASTER_PREFERENCE_VALUE = 4099,
+ NAN_STATUS_INVALID_LOW_CLUSTER_ID_VALUE = 4100,
+ NAN_STATUS_INVALID_HIGH_CLUSTER_ID_VALUE = 4101,
+ NAN_STATUS_INVALID_BACKGROUND_SCAN_PERIOD = 4102,
+ NAN_STATUS_INVALID_RSSI_PROXIMITY_VALUE = 4103,
+ NAN_STATUS_INVALID_SCAN_CHANNEL = 4104,
+ NAN_STATUS_INVALID_POST_NAN_CONNECTIVITY_CAPABILITIES_BITMAP = 4105,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_NUMCHAN_VALUE = 4106,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_DURATION_VALUE = 4107,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_CLASS_VALUE = 4108,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_CHANNEL_VALUE = 4109,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_AVAILABILITY_INTERVAL_BITMAP_VALUE = 4110,
+ NAN_STATUS_INVALID_FURTHER_AVAILABILITY_MAP_MAP_ID = 4111,
+ NAN_STATUS_INVALID_POST_NAN_DISCOVERY_CONN_TYPE_VALUE = 4112,
+ NAN_STATUS_INVALID_POST_NAN_DISCOVERY_DEVICE_ROLE_VALUE = 4113,
+ NAN_STATUS_INVALID_POST_NAN_DISCOVERY_DURATION_VALUE = 4114,
+ NAN_STATUS_INVALID_POST_NAN_DISCOVERY_BITMAP_VALUE = 4115,
+ NAN_STATUS_MISSING_FUTHER_AVAILABILITY_MAP = 4116,
+ NAN_STATUS_INVALID_BAND_CONFIG_FLAGS = 4117,
+ NAN_STATUS_INVALID_RANDOM_FACTOR_UPDATE_TIME_VALUE = 4118,
+ NAN_STATUS_INVALID_ONGOING_SCAN_PERIOD = 4119,
+ NAN_STATUS_INVALID_DW_INTERVAL_VALUE = 4120,
+ NAN_STATUS_INVALID_DB_INTERVAL_VALUE = 4121,
+ /* 4122-8191 RESERVED */
+ NAN_TERMINATED_REASON_INVALID = 8192,
+ NAN_TERMINATED_REASON_TIMEOUT = 8193,
+ NAN_TERMINATED_REASON_USER_REQUEST = 8194,
+ NAN_TERMINATED_REASON_FAILURE = 8195,
+ NAN_TERMINATED_REASON_COUNT_REACHED = 8196,
+ NAN_TERMINATED_REASON_DE_SHUTDOWN = 8197,
+ NAN_TERMINATED_REASON_DISABLE_IN_PROGRESS = 8198,
+ NAN_TERMINATED_REASON_POST_DISC_ATTR_EXPIRED = 8199,
+ NAN_TERMINATED_REASON_POST_DISC_LEN_EXCEEDED = 8200,
+ NAN_TERMINATED_REASON_FURTHER_AVAIL_MAP_EMPTY = 8201
+};
+
+enum slsi_nan_response_type {
+ NAN_RESPONSE_ENABLED = 0,
+ NAN_RESPONSE_DISABLED = 1,
+ NAN_RESPONSE_PUBLISH = 2,
+ NAN_RESPONSE_PUBLISH_CANCEL = 3,
+ NAN_RESPONSE_TRANSMIT_FOLLOWUP = 4,
+ NAN_RESPONSE_SUBSCRIBE = 5,
+ NAN_RESPONSE_SUBSCRIBE_CANCEL = 6,
+ NAN_RESPONSE_STATS = 7,
+ NAN_RESPONSE_CONFIG = 8,
+ NAN_RESPONSE_TCA = 9,
+ NAN_RESPONSE_ERROR = 10,
+ NAN_RESPONSE_BEACON_SDF_PAYLOAD = 11,
+ NAN_RESPONSE_GET_CAPABILITIES = 12
+};
+
+enum slsi_nan_disc_event_type {
+ NAN_EVENT_ID_DISC_MAC_ADDR = 0,
+ NAN_EVENT_ID_STARTED_CLUSTER,
+ NAN_EVENT_ID_JOINED_CLUSTER
+};
+
+struct slsi_hal_nan_social_channel_scan_params {
+ u8 dwell_time[SLSI_HAL_NAN_MAX_SOCIAL_CHANNELS];
+ u16 scan_period[SLSI_HAL_NAN_MAX_SOCIAL_CHANNELS];
+};
+
+struct slsi_hal_nan_connectivity_capability {
+ u8 payload_transmit_flag;
+ u8 is_wfd_supported;
+ u8 is_wfds_supported;
+ u8 is_tdls_supported;
+ u8 is_ibss_supported;
+ u8 is_mesh_supported;
+ u8 wlan_infra_field;
+};
+
+struct slsi_hal_nan_post_discovery_param {
+ u8 type; /* NanConnectionType */
+ u8 role; /* NanDeviceRole */
+ u8 transmit_freq;
+ u8 duration; /* NanAvailDuration */
+ u32 avail_interval_bitmap;
+ u8 addr[ETH_ALEN];
+ u16 mesh_id_len;
+ u8 mesh_id[32];
+ u16 infrastructure_ssid_len;
+ u8 infrastructure_ssid_val[32];
+};
+
+struct slsi_hal_nan_further_availability_channel {
+ /* struct slsi_hal_nan_further_availability_channel*/
+ u8 entry_control;
+ u8 class_val;
+ u8 channel;
+ u8 mapid;
+ u32 avail_interval_bitmap;
+};
+
+struct slsi_hal_nan_further_availability_map {
+ u8 numchans;
+ struct slsi_hal_nan_further_availability_channel famchan[32];
+};
+
+struct slsi_hal_nan_receive_post_discovery {
+ u8 type;
+ u8 role;
+ u8 duration;
+ u32 avail_interval_bitmap;
+ u8 mapid;
+ u8 addr[ETH_ALEN];
+ u16 mesh_id_len;
+ u8 mesh_id[32];
+ u16 infrastructure_ssid_len;
+ u8 infrastructure_ssid_val[32];
+};
+
+struct slsi_nan_sdea_ctrl_params {
+ u8 config_nan_data_path;
+ u8 ndp_type;
+ u8 security_cfg;
+ u8 ranging_state;
+ u8 range_report;
+ u8 qos_cfg;
+};
+
+struct slsi_nan_ranging_cfg {
+ u32 ranging_interval_msec;
+ u32 config_ranging_indications;
+ u32 distance_ingress_mm;
+ u32 distance_egress_mm;
+};
+
+struct slsi_nan_range_response_cfg {
+ u16 publish_id;
+ u32 requestor_instance_id;
+ u8 peer_addr[ETH_ALEN];
+ u8 ranging_response;
+};
+
+#define SLSI_NAN_PMK_INFO_LEN 32
+#define SLSI_NAN_SECURITY_MAX_PASSPHRASE_LEN 63
+#define SLSI_NAN_MAX_SCID_BUF_LEN 1024
+struct slsi_nan_security_pmk {
+ u32 pmk_len;
+ u8 pmk[SLSI_NAN_PMK_INFO_LEN];
+};
+
+struct slsi_nan_security_passphrase {
+ u32 passphrase_len;
+ u8 passphrase[SLSI_NAN_SECURITY_MAX_PASSPHRASE_LEN];
+};
+
+struct slsi_nan_security_key_info {
+ u8 key_type;
+ union {
+ struct slsi_nan_security_pmk pmk_info;
+ struct slsi_nan_security_passphrase passphrase_info;
+ } body;
+};
+
+struct slsi_nan_security_info {
+ u32 cipher_type;
+ u32 scid_len;
+ u8 scid[SLSI_NAN_MAX_SCID_BUF_LEN];
+ struct slsi_nan_security_key_info key_info;
+};
+
+struct slsi_hal_nan_enable_req {
+ /* Mandatory parameters below */
+ u8 master_pref;
+ u16 cluster_low;
+ u16 cluster_high;
+
+ u8 config_support_5g;
+ u8 support_5g_val;
+ u8 config_sid_beacon;
+ u8 sid_beacon_val;
+ u8 config_2dot4g_rssi_close;
+ u8 rssi_close_2dot4g_val;
+
+ u8 config_2dot4g_rssi_middle;
+ u8 rssi_middle_2dot4g_val;
+
+ u8 config_2dot4g_rssi_proximity;
+ u8 rssi_proximity_2dot4g_val;
+
+ u8 config_hop_count_limit;
+ u8 hop_count_limit_val;
+
+ u8 config_2dot4g_support;
+ u8 support_2dot4g_val;
+
+ u8 config_2dot4g_beacons;
+ u8 beacon_2dot4g_val;
+ u8 config_2dot4g_sdf;
+ u8 sdf_2dot4g_val;
+ u8 config_5g_beacons;
+ u8 beacon_5g_val;
+ u8 config_5g_sdf;
+ u8 sdf_5g_val;
+ u8 config_5g_rssi_close;
+ u8 rssi_close_5g_val;
+ u8 config_5g_rssi_middle;
+ u8 rssi_middle_5g_val;
+ u8 config_5g_rssi_close_proximity;
+ u8 rssi_close_proximity_5g_val;
+ u8 config_rssi_window_size;
+ u8 rssi_window_size_val;
+ /* The 24 bit Organizationally Unique ID + the 8 bit Network Id. */
+ u8 config_oui;
+ u32 oui_val;
+ u8 config_intf_addr;
+ u8 intf_addr_val[ETH_ALEN];
+
+ u8 config_cluster_attribute_val;
+ u8 config_scan_params;
+ struct slsi_hal_nan_social_channel_scan_params scan_params_val;
+ u8 config_random_factor_force;
+ u8 random_factor_force_val;
+ u8 config_hop_count_force;
+ u8 hop_count_force_val;
+
+ /* channel frequency in MHz to enable Nan on */
+ u8 config_24g_channel;
+ u32 channel_24g_val;
+
+ u8 config_5g_channel;
+ int channel_5g_val;
+ u8 config_subscribe_sid_beacon;
+ u32 subscribe_sid_beacon_val;
+
+ /*NanConfigDW config_dw*/
+ u8 config_2dot4g_dw_band;
+ u32 dw_2dot4g_interval_val;
+ u8 config_5g_dw_band;
+ u32 dw_5g_interval_val;
+ u32 disc_mac_addr_rand_interval_sec;
+};
+
+struct slsi_hal_nan_publish_req {
+ /* id 0 means new publish, any other id is existing publish */
+ u16 publish_id;
+ /* how many seconds to run for. 0 means forever until canceled */
+ u16 ttl;
+ /* periodicity of OTA unsolicited publish.
+ * Specified in increments of 500 ms
+ */
+ u16 period;
+ u8 publish_type;/* 0= unsolicited, solicited = 1, 2= both */
+ u8 tx_type; /* 0 = broadcast, 1= unicast if solicited publish */
+ /* number of OTA Publish, 0 means forever until canceled */
+ u8 publish_count;
+ u16 service_name_len;
+ u8 service_name[SLSI_HAL_NAN_MAX_SERVICE_NAME_LEN];
+ u8 publish_match_indicator;
+
+ u16 service_specific_info_len;
+ u8 service_specific_info[SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN];
+
+ u16 rx_match_filter_len;
+ u8 rx_match_filter[SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN];
+
+ u16 tx_match_filter_len;
+ u8 tx_match_filter[SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN];
+
+ u8 rssi_threshold_flag;
+
+ /* 8-bit bitmap which allows the Host to associate this publish
+ * with a particular Post-NAN Connectivity attribute
+ * which has been sent down in a NanConfigureRequest/NanEnableRequest
+ * message. If the DE fails to find a configured Post-NAN
+ * connectivity attributes referenced by the bitmap,
+ * the DE will return an error code to the Host.
+ * If the Publish is configured to use a Post-NAN Connectivity
+ * attribute and the Host does not refresh the Post-NAN Connectivity
+ * attribute the Publish will be canceled and the Host will be sent
+ * a PublishTerminatedIndication message.
+ */
+ u8 connmap;
+ /* Set/Enable corresponding bits to disable any
+ * indications that follow a publish.
+ * BIT0 - Disable publish termination indication.
+ * BIT1 - Disable match expired indication.
+ * BIT2 - Disable followUp indication received (OTA).
+ */
+ u8 recv_indication_cfg;
+
+ u8 service_responder_policy;
+ struct slsi_nan_security_info sec_info;
+ struct slsi_nan_sdea_ctrl_params sdea_params;
+ struct slsi_nan_ranging_cfg ranging_cfg;
+ u8 ranging_auto_response;
+ struct slsi_nan_range_response_cfg range_response_cfg;
+
+ u16 sdea_service_specific_info_len;
+ u8 sdea_service_specific_info[SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN];
+};
+
+struct slsi_hal_nan_subscribe_req {
+ /* id 0 means new subscribe, non zero is existing subscribe */
+ u16 subscribe_id;
+ /* how many seconds to run for. 0 means forever until canceled */
+ u16 ttl;
+ /* periodicity of OTA Active Subscribe. Units in increments
+ * of 500 ms , 0 = attempt every DW
+ */
+ u16 period;
+
+ /* Flag which specifies how the Subscribe request shall be processed. */
+ u8 subscribe_type; /* 0 - PASSIVE , 1- ACTIVE */
+
+ /* Flag which specifies on Active Subscribes how the Service Response
+ * Filter attribute is populated.
+ */
+ u8 service_response_filter; /* 0 - Bloom Filter, 1 - MAC Addr */
+
+ /* Flag which specifies how the Service Response Filter Include
+ * bit is populated.
+ * 0=Do not respond if in the Address Set, 1= Respond
+ */
+ u8 service_response_include;
+
+ /* Flag which specifies if the Service Response Filter
+ * should be used when creating Subscribes.
+ * 0=Do not send the Service Response Filter,1= send
+ */
+ u8 use_service_response_filter;
+
+ /* Flag which specifies if the Service Specific Info is needed in
+ * the Publish message before creating the MatchIndication
+ */
+ u8 ssi_required_for_match_indication; /* 0=Not needed, 1= Required */
+
+ /* Field which specifies how matching indication to host is controlled.
+ * 0 - Match and Indicate Once
+ * 1 - Match and Indicate continuous
+ * 2 - Match and Indicate never. This means don't
+ * indicate match to host.
+ * 3 - Reserved
+ */
+ u8 subscribe_match_indicator;
+
+ /* The number of Subscribe Matches which should occur
+ * before the Subscribe request is automatically terminated.
+ */
+ /* If this value is 0 this field is not used by DE.*/
+ u8 subscribe_count;
+
+ /* length of service name */
+ /* UTF-8 encoded string identifying the service */
+ u16 service_name_len;
+ u8 service_name[SLSI_HAL_NAN_MAX_SERVICE_NAME_LEN];
+
+ /* Sequence of values which further specify the published service
+ * beyond the service name
+ */
+ u16 service_specific_info_len;
+ u8 service_specific_info[SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN];
+
+ /* Ordered sequence of <length, value> pairs used to filter out
+ * received publish discovery messages.
+ * This can be sent both for a Passive or an Active Subscribe
+ */
+ u16 rx_match_filter_len;
+ u8 rx_match_filter[SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN];
+
+ /* Ordered sequence of <length, value> pairs included in the
+ * Discovery Frame when an Active Subscribe is used.
+ */
+ u16 tx_match_filter_len;
+ u8 tx_match_filter[SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN];
+ u8 rssi_threshold_flag;
+
+ u8 connmap;
+ /* NAN Interface Address, conforming to the format as described in
+ * 8.2.4.3.2 of IEEE Std. 802.11-2012.
+ */
+ u8 num_intf_addr_present;
+ u8 intf_addr[SLSI_HAL_NAN_MAX_SUBSCRIBE_MAX_ADDRESS][ETH_ALEN];
+ /* Set/Enable corresponding bits to disable
+ * indications that follow a subscribe.
+ * BIT0 - Disable subscribe termination indication.
+ * BIT1 - Disable match expired indication.
+ * BIT2 - Disable followUp indication received (OTA).
+ */
+ u8 recv_indication_cfg;
+
+ struct slsi_nan_security_info sec_info;
+ struct slsi_nan_sdea_ctrl_params sdea_params;
+ struct slsi_nan_ranging_cfg ranging_cfg;
+ u8 ranging_auto_response;
+ struct slsi_nan_range_response_cfg range_response_cfg;
+
+ u16 sdea_service_specific_info_len;
+ u8 sdea_service_specific_info[SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN];
+};
+
+struct slsi_hal_nan_transmit_followup_req {
+ /* Publish or Subscribe Id of an earlier Publish/Subscribe */
+ u16 publish_subscribe_id;
+
+ /* This Id is the Requestor Instance that is passed as
+ * part of earlier MatchInd/FollowupInd message.
+ */
+ u32 requestor_instance_id;
+ u8 addr[ETH_ALEN]; /* Unicast address */
+ u8 priority; /* priority of the request 2=high */
+ u8 dw_or_faw; /* 0= send in a DW, 1=send in FAW */
+
+ /* Sequence of values which further specify the published service beyond
+ * the service name.
+ */
+ u16 service_specific_info_len;
+ u8 service_specific_info[SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN];
+ /* Set/Enable corresponding bits to disable
+ * responses after followUp.
+ * BIT0 - Disable followUp response from FW.
+ */
+ u8 recv_indication_cfg;
+
+ u16 sdea_service_specific_info_len;
+ u8 sdea_service_specific_info[SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN];
+};
+
+struct slsi_hal_nan_config_req {
+ u8 config_sid_beacon;
+ u8 sid_beacon;
+ u8 config_rssi_proximity;
+ u8 rssi_proximity;
+ u8 config_master_pref;
+ u8 master_pref;
+ /* 1 byte value which defines the RSSI filter threshold.
+ * Any Service Descriptors received above this value
+ * that are configured for RSSI filtering will be dropped.
+ * The rssi values should be specified without sign.
+ * For eg: -70dBm should be specified as 70.
+ */
+ u8 config_5g_rssi_close_proximity;
+ u8 rssi_close_proximity_5g_val;
+ u8 config_rssi_window_size;
+ u16 rssi_window_size_val;
+ /* If set to 1, the Discovery Engine will enclose the Cluster
+ * Attribute only sent in Beacons in a Vendor Specific Attribute
+ * and transmit in a Service Descriptor Frame.
+ */
+ u8 config_cluster_attribute_val;
+ u8 config_scan_params;
+ struct slsi_hal_nan_social_channel_scan_params scan_params_val;
+ /* 1 byte quantity which forces the Random Factor to a particular
+ * value for all transmitted Sync/Discovery beacons
+ */
+ u8 config_random_factor_force;
+ u8 random_factor_force_val;
+ /* 1 byte quantity which forces the HC for all transmitted Sync and
+ * Discovery Beacon NO matter the real HC being received over the
+ * air.
+ */
+ u8 config_hop_count_force;
+ u8 hop_count_force_val;
+ /* NAN Post Connectivity Capability */
+ u8 config_conn_capability;
+ struct slsi_hal_nan_connectivity_capability conn_capability_val;
+ /* NAN Post Discover Capability */
+ u8 num_config_discovery_attr;
+ struct slsi_hal_nan_post_discovery_param discovery_attr_val[SLSI_HAL_NAN_MAX_POSTDISCOVERY_LEN];
+ /* NAN Further availability Map */
+ u8 config_fam;
+ struct slsi_hal_nan_further_availability_map fam_val;
+
+ int channel_5g_val;
+ u8 config_subscribe_sid_beacon;
+ u32 subscribe_sid_beacon_val;
+
+ /*NanConfigDW config_dw*/
+ u8 config_2dot4g_dw_band;
+ u32 dw_2dot4g_interval_val;
+ u8 config_5g_dw_band;
+ u32 dw_5g_interval_val;
+ u32 disc_mac_addr_rand_interval_sec;
+
+};
+
+struct slsi_hal_nan_capabilities {
+ u32 max_concurrent_nan_clusters;
+ u32 max_publishes;
+ u32 max_subscribes;
+ u32 max_service_name_len;
+ u32 max_match_filter_len;
+ u32 max_total_match_filter_len;
+ u32 max_service_specific_info_len;
+ u32 max_vsa_data_len;
+ u32 max_mesh_data_len;
+ u32 max_ndi_interfaces;
+ u32 max_ndp_sessions;
+ u32 max_app_info_len;
+};
+
+struct slsi_hal_nan_followup_ind {
+ u16 publish_subscribe_id;
+ u32 requestor_instance_id;
+ u8 addr[ETH_ALEN];
+ u8 dw_or_faw;
+ u16 service_specific_info_len;
+ u8 service_specific_info[SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN];
+ u16 sdea_service_specific_info_len;
+ u8 sdea_service_specific_info[SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN];
+};
+
+struct slsi_hal_nan_match_ind {
+ u16 publish_subscribe_id;
+ u32 requestor_instance_id;
+ u8 addr[ETH_ALEN];
+ u16 service_specific_info_len;
+ u8 service_specific_info[SLSI_HAL_NAN_MAX_SERVICE_SPECIFIC_INFO_LEN];
+ u16 sdf_match_filter_len;
+ u8 sdf_match_filter[SLSI_HAL_NAN_MAX_MATCH_FILTER_LEN];
+ u8 match_occurred_flag;
+ u8 out_of_resource_flag;
+ u8 rssi_value;
+ u8 is_conn_capability_valid;
+ struct slsi_hal_nan_connectivity_capability conn_capability;
+ u8 num_rx_discovery_attr;
+ struct slsi_hal_nan_receive_post_discovery discovery_attr[SLSI_HAL_NAN_MAX_POSTDISCOVERY_LEN];
+ u8 num_chans;
+ struct slsi_hal_nan_further_availability_channel famchan[32];
+ u8 cluster_attribute_len;
+ u8 cluster_attribute[32];
+ struct slsi_nan_security_info sec_info;
+ struct slsi_nan_sdea_ctrl_params peer_sdea_params;
+ u32 range_measurement_mm;
+ u32 ranging_event_type;
+ u16 sdea_service_specific_info_len;
+ u8 sdea_service_specific_info[SLSI_HAL_NAN_MAX_SDEA_SERVICE_SPECIFIC_INFO_LEN];
+};
+
+void slsi_nan_event(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_nan_followup_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_nan_service_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_nan_get_mac(struct slsi_dev *sdev, char *nan_mac_addr);
+struct net_device *slsi_nan_get_netdev(struct slsi_dev *sdev);
+int slsi_nan_enable(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_disable(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_publish(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_publish_cancel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len);
+int slsi_nan_subscribe(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_subscribe_cancel(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_transmit_followup(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+int slsi_nan_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len);
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/**/
+
+#ifndef _PORTING_IMX__H__
+#define _PORTING_IMX__H__
+#endif
+/**
+ * ether_addr_copy - Copy an Ethernet address
+ * @dst: Pointer to a six-byte array Ethernet address destination
+ * @src: Pointer to a six-byte array Ethernet address source
+ *
+ * Please note: dst & src must both be aligned to u16.
+ */
+static inline void ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ *(u32 *)dst = *(const u32 *)src;
+ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] = b[0];
+ a[1] = b[1];
+ a[2] = b[2];
+#endif
+}
+
+static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
+}
+
+
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h"
+#include "procfs.h"
+#include "debug.h"
+#include "mlme.h"
+#include "mgt.h"
+#include "mib.h"
+#include "cac.h"
+#include "hip.h"
+#include "netif.h"
+#include "ioctl.h"
+#include "nl80211_vendor.h"
+
+#include "mib.h"
+
+int slsi_procfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = SLSI_PDE_DATA(inode);
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+static int slsi_printf_mutex_stats(char *buf, const size_t bufsz, const char *printf_padding, struct slsi_mutex *mutex_p)
+{
+ int pos = 0;
+ const char *filename;
+ bool is_locked;
+
+ if (mutex_p->valid) {
+ is_locked = SLSI_MUTEX_IS_LOCKED(*mutex_p);
+ pos += scnprintf(buf, bufsz, "INFO: lock:%d\n", is_locked);
+ if (is_locked) {
+ filename = strrchr(mutex_p->file_name_before, '/');
+ if (filename)
+ filename++;
+ else
+ filename = mutex_p->file_name_before;
+ pos += scnprintf(buf + pos, bufsz - pos, "\t%sTryingToAcquire:%s:%d\n", printf_padding,
+ filename, mutex_p->line_no_before);
+ filename = strrchr(mutex_p->file_name_after, '/');
+ if (filename)
+ filename++;
+ else
+ filename = mutex_p->file_name_after;
+ pos += scnprintf(buf + pos, bufsz - pos, "\t%sAcquired:%s:%d:%s\n", printf_padding,
+ filename, mutex_p->line_no_after, mutex_p->function);
+ pos += scnprintf(buf + pos, bufsz - pos, "\t%sProcessName:%s\n", printf_padding, mutex_p->owner->comm);
+ }
+ } else {
+ pos += scnprintf(buf, bufsz, "NoInit\n");
+ }
+ return pos;
+}
+
+static ssize_t slsi_procfs_mutex_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[76 + (200 * CONFIG_SCSC_WLAN_MAX_INTERFACES)];
+ int pos = 0;
+ int i;
+ const size_t bufsz = sizeof(buf);
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ pos += scnprintf(buf, bufsz, "sdev\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tnetdev_add_remove_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t", &sdev->netdev_add_remove_mutex);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tstart_stop_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t", &sdev->start_stop_mutex);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tdevice_config_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t", &sdev->device_config_mutex);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tsig_wait.mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t", &sdev->sig_wait.mutex);
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ pos += scnprintf(buf + pos, bufsz - pos, "\tlogger_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t", &sdev->logger_mutex);
+#endif
+
+ for (i = 1; i < CONFIG_SCSC_WLAN_MAX_INTERFACES + 1; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos, "netdevvif %d\n", i);
+ dev = slsi_get_netdev_locked(sdev, i);
+ if (!dev)
+ continue;
+ ndev_vif = netdev_priv(dev);
+ if (ndev_vif->is_available) {
+ pos += scnprintf(buf + pos, bufsz - pos, "\tvif_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t\t", &ndev_vif->vif_mutex);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tsig_wait.mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t\t", &ndev_vif->sig_wait.mutex);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tscan_mutex ");
+ pos += slsi_printf_mutex_stats(buf + pos, bufsz - pos, "\t\t", &ndev_vif->scan_mutex);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos, "\tvif UNAVAILABLE\n");
+ }
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+#endif
+
+static ssize_t slsi_procfs_throughput_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[5 * 25];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct slsi_mib_data mibrsp = { 0, NULL };
+ struct slsi_mib_value *values = NULL;
+ struct slsi_mib_get_entry get_values[] = {{ SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 3, 0 } },
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 4, 0 } },
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 5, 0 } },
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 25, 0 } },
+ { SLSI_PSID_UNIFI_THROUGHPUT_DEBUG, { 30, 0 } } };
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ dev = slsi_get_netdev(sdev, 1);
+ ndev_vif = netdev_priv(dev);
+
+ if (ndev_vif->activated) {
+ mibrsp.dataLength = 15 * ARRAY_SIZE(get_values);
+ mibrsp.data = kmalloc(mibrsp.dataLength, GFP_KERNEL);
+ if (!mibrsp.data)
+ SLSI_ERR(sdev, "Cannot kmalloc %d bytes\n", mibrsp.dataLength);
+ values = slsi_read_mibs(sdev, dev, get_values, ARRAY_SIZE(get_values), &mibrsp);
+ if (!values) {
+ kfree(mibrsp.data);
+ return -EINVAL;
+ }
+ if (values[0].type != SLSI_MIB_TYPE_UINT)
+ SLSI_ERR(sdev, "invalid type. iter:%d", 0); /*bad_fcs_count*/
+ if (values[1].type != SLSI_MIB_TYPE_UINT)
+ SLSI_ERR(sdev, "invalid type. iter:%d", 1); /*missed_ba_count*/
+ if (values[2].type != SLSI_MIB_TYPE_UINT)
+ SLSI_ERR(sdev, "invalid type. iter:%d", 2); /*missed_ack_count*/
+ if (values[3].type != SLSI_MIB_TYPE_UINT)
+ SLSI_ERR(sdev, "invalid type. iter:%d", 3); /*mac_bad_sig_count*/
+ if (values[4].type != SLSI_MIB_TYPE_UINT)
+ SLSI_ERR(sdev, "invalid type. iter:%d", 4); /*rx_error_count*/
+
+ pos += scnprintf(buf, bufsz, "RX FCS: %d\n", values[0].u.uintValue);
+ pos += scnprintf(buf + pos, bufsz - pos, "RX bad SIG: %d\n", values[3].u.uintValue);
+ pos += scnprintf(buf + pos, bufsz - pos, "RX dot11 error: %d\n", values[4].u.uintValue);
+ pos += scnprintf(buf + pos, bufsz - pos, "TX MPDU no ACK: %d\n", values[2].u.uintValue);
+ pos += scnprintf(buf + pos, bufsz - pos, "TX A-MPDU no ACK: %d\n", values[1].u.uintValue);
+
+ kfree(values);
+ kfree(mibrsp.data);
+ } else {
+ pos += scnprintf(buf, bufsz, "RX FCS: %d\n", 0);
+ pos += scnprintf(buf + pos, bufsz - pos, "RX bad SIG: %d\n", 0);
+ pos += scnprintf(buf + pos, bufsz - pos, "RX dot11 error: %d\n", 0);
+ pos += scnprintf(buf + pos, bufsz - pos, "TX MPDU no ACK: %d\n", 0);
+ pos += scnprintf(buf + pos, bufsz - pos, "TX A-MPDU no ACK: %d\n", 0);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t slsi_procfs_sta_bss_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[100];
+ int pos;
+ const size_t bufsz = sizeof(buf);
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct cfg80211_bss *sta_bss;
+ int channel = 0, center_freq = 0;
+ u8 no_mac[] = {0, 0, 0, 0, 0, 0};
+ u8 *mac_ptr;
+ u8 ssid[33];
+ s32 signal = 0;
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ mac_ptr = no_mac;
+ ssid[0] = 0;
+
+ dev = slsi_get_netdev(sdev, 1);
+ if (!dev)
+ goto exit;
+
+ ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ sta_bss = ndev_vif->sta.sta_bss;
+ if (sta_bss && ndev_vif->vif_type == FAPI_VIFTYPE_STATION &&
+ ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED) {
+ const u8 *ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, sta_bss->ies->data, sta_bss->ies->len);
+
+ if (ssid_ie) {
+ memcpy(ssid, &ssid_ie[2], ssid_ie[1]);
+ ssid[ssid_ie[1]] = 0;
+ }
+
+ if (sta_bss->channel) {
+ channel = sta_bss->channel->hw_value;
+ center_freq = sta_bss->channel->center_freq;
+ }
+ mac_ptr = sta_bss->bssid;
+ signal = sta_bss->signal;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+exit:
+ pos = scnprintf(buf, bufsz, "%pM,%s,%d,%d,%d", mac_ptr, ssid, channel, center_freq, signal);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t slsi_procfs_big_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[100];
+ int pos;
+ const size_t bufsz = sizeof(buf);
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ SLSI_UNUSED_PARAMETER(file);
+ dev = slsi_get_netdev(sdev, 1);
+ if (!dev)
+ goto exit;
+
+ ndev_vif = netdev_priv(dev);
+
+exit:
+ pos = slsi_get_sta_info(dev, buf, bufsz);
+ if (pos >= 0)
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return 0;
+}
+
+static int slsi_procfs_status_show(struct seq_file *m, void *v)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)m->private;
+ const char *state;
+ u32 conf_hip4_ver = 0;
+ int i;
+
+ SLSI_UNUSED_PARAMETER(v);
+
+ switch (sdev->device_state) {
+ case SLSI_DEVICE_STATE_ATTACHING:
+ state = "Attaching";
+ break;
+ case SLSI_DEVICE_STATE_STOPPED:
+ state = "Stopped";
+ break;
+ case SLSI_DEVICE_STATE_STARTING:
+ state = "Starting";
+ break;
+ case SLSI_DEVICE_STATE_STARTED:
+ state = "Started";
+ break;
+ case SLSI_DEVICE_STATE_STOPPING:
+ state = "Stopping";
+ break;
+ default:
+ state = "UNKNOWN";
+ break;
+ }
+
+ seq_printf(m, "Driver FAPI Version: MA SAP : %d.%d.%d\n", FAPI_MAJOR_VERSION(FAPI_DATA_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_DATA_SAP_VERSION), FAPI_DATA_SAP_ENG_VERSION);
+ seq_printf(m, "Driver FAPI Version: MLME SAP : %d.%d.%d\n", FAPI_MAJOR_VERSION(FAPI_CONTROL_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_CONTROL_SAP_VERSION), FAPI_CONTROL_SAP_ENG_VERSION);
+ seq_printf(m, "Driver FAPI Version: DEBUG SAP : %d.%d.%d\n", FAPI_MAJOR_VERSION(FAPI_DEBUG_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_DEBUG_SAP_VERSION), FAPI_DEBUG_SAP_ENG_VERSION);
+ seq_printf(m, "Driver FAPI Version: TEST SAP : %d.%d.%d\n", FAPI_MAJOR_VERSION(FAPI_TEST_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_TEST_SAP_VERSION), FAPI_TEST_SAP_ENG_VERSION);
+
+ if (atomic_read(&sdev->hip.hip_state) == SLSI_HIP_STATE_STARTED) {
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ seq_printf(m, "HIP4 Version : %d\n", conf_hip4_ver);
+ if (conf_hip4_ver == 4) {
+ seq_printf(m, "Chip FAPI Version (v4): MA SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_ma_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_ma_ver)));
+ seq_printf(m, "Chip FAPI Version (v4): MLME SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_mlme_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_mlme_ver)));
+ seq_printf(m, "Chip FAPI Version (v4): DEBUG SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_debug_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_debug_ver)));
+ seq_printf(m, "Chip FAPI Version (v4): TEST SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_test_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_test_ver)));
+ } else if (conf_hip4_ver == 5) {
+ seq_printf(m, "Chip FAPI Version (v5): MA SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_ma_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_ma_ver)));
+ seq_printf(m, "Chip FAPI Version (v5): MLME SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_mlme_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_mlme_ver)));
+ seq_printf(m, "Chip FAPI Version (v5): DEBUG SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_debug_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_debug_ver)));
+ seq_printf(m, "Chip FAPI Version (v5): TEST SAP : %d.%d\n",
+ FAPI_MAJOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_test_ver)),
+ FAPI_MINOR_VERSION(scsc_wifi_get_hip_config_version_5_u16(&sdev->hip4_inst.hip_control->config_v5, sap_test_ver)));
+ }
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ seq_puts(m, "Driver Debug : Enabled\n");
+#else
+ seq_puts(m, "Driver Debug : Disabled\n");
+#endif
+ seq_printf(m, "Driver State : %s\n", state);
+
+ seq_printf(m, "HW Version [MIB] : 0x%.4X (%u)\n", sdev->chip_info_mib.chip_version, sdev->chip_info_mib.chip_version);
+ seq_printf(m, "Platform Build [MIB] : 0x%.4X (%u)\n", sdev->plat_info_mib.plat_build, sdev->plat_info_mib.plat_build);
+ for (i = 0; i < SLSI_WLAN_MAX_MIB_FILE; i++) {
+ seq_printf(m, "Hash [MIB%2d] : 0x%.4X (%u)\n", i, sdev->mib[i].mib_hash, sdev->mib[i].mib_hash);
+ seq_printf(m, "Platform: [MIB%2d] : %s\n", i, sdev->mib[i].platform);
+ }
+ seq_printf(m, "Hash [local_MIB] : 0x%.4X (%u)\n", sdev->local_mib.mib_hash, sdev->local_mib.mib_hash);
+ seq_printf(m, "Platform: [local_MIB] : %s\n", sdev->local_mib.platform);
+
+ return 0;
+}
+
+static int slsi_procfs_build_show(struct seq_file *m, void *v)
+{
+ SLSI_UNUSED_PARAMETER(v);
+ seq_printf(m, "FAPI_DATA_SAP_VERSION : %d.%d.%d\n",
+ FAPI_MAJOR_VERSION(FAPI_DATA_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_DATA_SAP_VERSION),
+ FAPI_DATA_SAP_ENG_VERSION);
+ seq_printf(m, "FAPI_CONTROL_SAP_VERSION : %d.%d.%d\n",
+ FAPI_MAJOR_VERSION(FAPI_CONTROL_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_CONTROL_SAP_VERSION),
+ FAPI_CONTROL_SAP_ENG_VERSION);
+ seq_printf(m, "FAPI_DEBUG_SAP_VERSION : %d.%d.%d\n",
+ FAPI_MAJOR_VERSION(FAPI_DEBUG_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_DEBUG_SAP_VERSION),
+ FAPI_DEBUG_SAP_ENG_VERSION);
+ seq_printf(m, "FAPI_TEST_SAP_VERSION : %d.%d.%d\n",
+ FAPI_MAJOR_VERSION(FAPI_TEST_SAP_VERSION),
+ FAPI_MINOR_VERSION(FAPI_TEST_SAP_VERSION),
+ FAPI_TEST_SAP_ENG_VERSION);
+ seq_printf(m, "CONFIG_SCSC_WLAN_MAX_INTERFACES : %d\n", CONFIG_SCSC_WLAN_MAX_INTERFACES);
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ seq_puts(m, "CONFIG_SCSC_WLAN_RX_NAPI : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_RX_NAPI : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
+ seq_puts(m, "CONFIG_SCSC_WLAN_RX_NAPI_GRO : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_RX_NAPI_GRO : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_ANDROID
+ seq_puts(m, "CONFIG_SCSC_WLAN_ANDROID : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_ANDROID : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ seq_puts(m, "CONFIG_SCSC_WLAN_GSCAN_ENABLE : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_GSCAN_ENABLE : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+ seq_puts(m, "CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES
+ seq_puts(m, "CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_PRIORITISE_IMP_FRAMES : n\n");
+#endif
+ seq_puts(m, "-------------------------------------------------\n");
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ seq_puts(m, "CONFIG_SCSC_WLAN_DEBUG : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_DEBUG : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_SKB_TRACKING
+ seq_puts(m, "CONFIG_SCSC_WLAN_SKB_TRACKING : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_SKB_TRACKING : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ seq_puts(m, "CONFIG_SCSC_WLAN_MUTEX_DEBUG : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_MUTEX_DEBUG : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ seq_puts(m, "CONFIG_SCSC_WLAN_ENHANCED_LOGGING : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_ENHANCED_LOGGING : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_WIFI_SHARING
+ seq_puts(m, "CONFIG_SCSC_WLAN_WIFI_SHARING : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_WIFI_SHARING : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_SINGLE_ANTENNA
+ seq_puts(m, "CONFIG_SCSC_WLAN_SINGLE_ANTENNA : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_SINGLE_ANTENNA : n\n");
+#endif
+#ifdef CONFIG_SCSC_AP_INTERFACE_NAME
+ seq_printf(m, "CONFIG_SCSC_AP_INTERFACE_NAME : %s\n", CONFIG_SCSC_AP_INTERFACE_NAME);
+#endif
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ seq_puts(m, "CONFIG_SCSC_WIFI_NAN_ENABLE : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WIFI_NAN_ENABLE : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA
+ seq_puts(m, "CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_SET_PREFERRED_ANTENNA : n\n");
+#endif
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ seq_puts(m, "CONFIG_SLSI_WLAN_STA_FWD_BEACON : y\n");
+#else
+ seq_puts(m, "CONFIG_SLSI_WLAN_STA_FWD_BEACON : n\n");
+#endif
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ seq_puts(m, "CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT : y\n");
+#else
+ seq_puts(m, "CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT : n\n");
+#endif
+
+ return 0;
+}
+
+static const char *slsi_procfs_vif_type_to_str(u16 type)
+{
+ switch (type) {
+ case FAPI_VIFTYPE_STATION:
+ return "STATION";
+ case FAPI_VIFTYPE_AP:
+ return "AP";
+ case FAPI_VIFTYPE_UNSYNCHRONISED:
+ return "UNSYNCH";
+ default:
+ return "?";
+ }
+}
+
+static int slsi_procfs_vifs_show(struct seq_file *m, void *v)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)m->private;
+ u16 vif;
+ u16 peer_index;
+
+ SLSI_UNUSED_PARAMETER(v);
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (vif = 1; vif <= CONFIG_SCSC_WLAN_MAX_INTERFACES; vif++) {
+ struct net_device *dev = slsi_get_netdev_locked(sdev, vif);
+ struct netdev_vif *ndev_vif;
+
+ if (!dev)
+ continue;
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ continue;
+ }
+ seq_printf(m, "vif:%d %pM %s\n", vif, dev->dev_addr, slsi_procfs_vif_type_to_str(ndev_vif->vif_type));
+ for (peer_index = 0; peer_index < SLSI_ADHOC_PEER_CONNECTIONS_MAX; peer_index++) {
+ struct slsi_peer *peer = ndev_vif->peer_sta_record[peer_index];
+
+ if (peer && peer->valid)
+ seq_printf(m, "vif:%d %pM peer[%d] %pM\n", vif, dev->dev_addr, peer_index, peer->address);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ return 0;
+}
+
+static ssize_t slsi_procfs_read_int(struct file *file, char __user *user_buf, size_t count, loff_t *ppos, int value, const char *extra)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", value);
+ if (extra)
+ pos += scnprintf(buf + pos, bufsz - pos, "%s", extra);
+ SLSI_INFO((struct slsi_dev *)file->private_data, "%s", buf);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t slsi_procfs_uapsd_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ struct net_device *dev = NULL;
+ struct netdev_vif *ndev_vif = NULL;
+ int qos_info = 0;
+ int offset = 0;
+ char *read_string;
+
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+
+ if (!dev) {
+ SLSI_ERR(sdev, "Dev not found\n");
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ if (!count)
+ return -EINVAL;
+
+ read_string = kmalloc(count + 1, GFP_KERNEL);
+ memset(read_string, 0, (count + 1));
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ offset = slsi_str_to_int(read_string, &qos_info);
+ if (!offset) {
+ SLSI_ERR(sdev, "qos info : failed to read a numeric value");
+ kfree(read_string);
+ return -EINVAL;
+ }
+
+ /*Store the qos info and use it to set MIB during connection*/
+ sdev->device_config.qos_info = qos_info;
+ SLSI_DBG1(sdev, SLSI_MLME, "set qos_info:%d\n", sdev->device_config.qos_info);
+
+
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_ap_cert_disable_ht_vht_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ int offset = 0;
+ int width = 0;
+ char *read_string;
+
+ if (!count)
+ return -EINVAL;
+
+ read_string = kmalloc(count + 1, GFP_KERNEL);
+ memset(read_string, 0, (count + 1));
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ offset = slsi_str_to_int(read_string, &width);
+ if (!offset) {
+ SLSI_ERR(sdev, "Failed to read a numeric value");
+ kfree(read_string);
+ return -EINVAL;
+ }
+
+ /* Disable default upgrade of corresponding width during AP start */
+ if (width == 80)
+ sdev->allow_switch_80_mhz = false;
+ else if (width == 40)
+ sdev->allow_switch_40_mhz = false;
+
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_p2p_certif_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ char *read_string;
+ int cert_info = 0;
+ int offset = 0;
+
+ read_string = kmalloc(count + 1, GFP_KERNEL);
+ memset(read_string, 0, (count + 1));
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ offset = slsi_str_to_int(read_string, &cert_info);
+ if (!offset) {
+ SLSI_ERR(sdev, "qos info : failed to read a numeric value");
+ kfree(read_string);
+ return -EINVAL;
+ }
+ sdev->p2p_certif = cert_info;
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_p2p_certif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", sdev->p2p_certif);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static int slsi_procfs_mac_addr_show(struct seq_file *m, void *v)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)m->private;
+
+ SLSI_UNUSED_PARAMETER(v);
+
+ seq_printf(m, "%pM", sdev->hw_addr);
+ return 0;
+}
+
+static ssize_t slsi_procfs_create_tspec_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ static const char *extra_info = "";
+
+ return slsi_procfs_read_int(file, user_buf, count, ppos, sdev->current_tspec_id, extra_info);
+}
+
+static ssize_t slsi_procfs_create_tspec_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sfdev = (struct slsi_dev *)file->private_data;
+ char *read_string = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!read_string) {
+ SLSI_ERR(sfdev, "Malloc for read_string failed\n");
+ return -ENOMEM;
+ }
+
+ if (!count) {
+ kfree(read_string);
+ return 0;
+ }
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ sfdev->current_tspec_id = cac_ctrl_create_tspec(sfdev, read_string);
+ if (sfdev->current_tspec_id < 0) {
+ SLSI_ERR(sfdev, "create tspec: No parameters or not valid parameters\n");
+ kfree(read_string);
+ return -EINVAL;
+ }
+ kfree(read_string);
+
+ return count;
+}
+
+static ssize_t slsi_procfs_confg_tspec_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ static const char *extra_info = "Not implemented yet";
+ int value = 10;
+
+ return slsi_procfs_read_int(file, user_buf, count, ppos, value, extra_info);
+}
+
+static ssize_t slsi_procfs_confg_tspec_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sfdev = (struct slsi_dev *)file->private_data;
+ char *read_string = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!read_string) {
+ SLSI_ERR(sfdev, "Malloc for read_string failed\n");
+ return -ENOMEM;
+ }
+
+ if (!count) {
+ kfree(read_string);
+ return 0;
+ }
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ /* to do: call to config_tspec() to configure a tspec field */
+ if (cac_ctrl_config_tspec(sfdev, read_string) < 0) {
+ SLSI_ERR(sfdev, "config tspec error\n");
+ kfree(read_string);
+ return -EINVAL;
+ }
+
+ kfree(read_string);
+
+ return count;
+}
+
+static ssize_t slsi_procfs_send_addts_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ static const char *extra_info = "";
+
+ return slsi_procfs_read_int(file, user_buf, count, ppos, sdev->tspec_error_code, extra_info);
+}
+
+static ssize_t slsi_procfs_send_addts_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sfdev = (struct slsi_dev *)file->private_data;
+ char *read_string = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!read_string) {
+ SLSI_ERR(sfdev, "Malloc for read_string failed\n");
+ return -ENOMEM;
+ }
+
+ sfdev->tspec_error_code = -1;
+ if (!count) {
+ kfree(read_string);
+ return 0;
+ }
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ /* to do: call to config_tspec() to configure a tspec field */
+ if (cac_ctrl_send_addts(sfdev, read_string) < 0) {
+ SLSI_ERR(sfdev, "send addts error\n");
+ kfree(read_string);
+ return -EINVAL;
+ }
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_send_delts_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = file->private_data;
+ static const char *extra_info = "";
+
+ return slsi_procfs_read_int(file, user_buf, count, ppos, sdev->tspec_error_code, extra_info);
+}
+
+static ssize_t slsi_procfs_send_delts_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sfdev = (struct slsi_dev *)file->private_data;
+ char *read_string = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!read_string) {
+ SLSI_ERR(sfdev, "Malloc for read_string failed\n");
+ return -ENOMEM;
+ }
+
+ sfdev->tspec_error_code = -1;
+
+ if (!count) {
+ kfree(read_string);
+ return 0;
+ }
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ /* to do: call to config_tspec() to configure a tspec field */
+ if (cac_ctrl_send_delts(sfdev, read_string) < 0) {
+ SLSI_ERR(sfdev, "send delts error\n");
+ kfree(read_string);
+ return -EINVAL;
+ }
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_del_tspec_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ static const char *extra_info = "Not implemented yet";
+ int value = 10;
+
+ return slsi_procfs_read_int(file, user_buf, count, ppos, value, extra_info);
+}
+
+static ssize_t slsi_procfs_del_tspec_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sfdev = (struct slsi_dev *)file->private_data;
+ char *read_string = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!read_string) {
+ SLSI_ERR(sfdev, "Malloc for read_string failed\n");
+ return -ENOMEM;
+ }
+
+ if (!count) {
+ kfree(read_string);
+ return 0;
+ }
+
+ simple_write_to_buffer(read_string, count, ppos, user_buf, count);
+ read_string[count] = '\0';
+
+ /* to do: call to config_tspec() to configure a tspec field */
+ if (cac_ctrl_delete_tspec(sfdev, read_string) < 0) {
+ SLSI_ERR(sfdev, "config tspec error\n");
+ kfree(read_string);
+ return -EINVAL;
+ }
+ kfree(read_string);
+ return count;
+}
+
+static ssize_t slsi_procfs_tput_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ int i;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = sdev->netdev[i];
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+ pos += scnprintf(buf + pos, bufsz - pos, "%s:\t", dev->name);
+ if (ndev_vif->throughput_tx_bps < 1000)
+ pos += scnprintf(buf + pos, bufsz - pos, "TX:%u bps\t", ndev_vif->throughput_tx_bps);
+ else if ((ndev_vif->throughput_tx_bps >= 1000) && (ndev_vif->throughput_tx_bps < (1000 * 1000)))
+ pos += scnprintf(buf + pos, bufsz - pos, "TX:%u Kbps\t", (ndev_vif->throughput_tx_bps / 1000));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "TX:%u Mbps\t", (ndev_vif->throughput_tx_bps / (1000 * 1000)));
+
+ if (ndev_vif->throughput_rx_bps < 1000)
+ pos += scnprintf(buf + pos, bufsz - pos, "RX:%u bps\n", ndev_vif->throughput_rx_bps);
+ else if ((ndev_vif->throughput_rx_bps >= 1000) && (ndev_vif->throughput_rx_bps < (1000 * 1000)))
+ pos += scnprintf(buf + pos, bufsz - pos, "RX:%u Kbps\n", (ndev_vif->throughput_rx_bps / 1000));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "RX:%u Mbps\n", (ndev_vif->throughput_rx_bps / (1000 * 1000)));
+ }
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t slsi_procfs_tput_write(struct file *file, const char __user *user_buf, size_t len, loff_t *ppos)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+ char read_buf[2];
+
+ if (!sdev) {
+ SLSI_ERR(sdev, "invalid sdev\n");
+ return -ENOMEM;
+ }
+
+ if (!len || (len > sizeof(read_buf))) {
+ SLSI_ERR(sdev, "invalid len\n");
+ return -EINVAL;
+ }
+
+ simple_write_to_buffer(read_buf, len, ppos, user_buf, len);
+
+ switch (read_buf[0]) {
+ case '1':
+ if (!slsi_traffic_mon_is_running(sdev)) {
+ SLSI_DBG1(sdev, SLSI_HIP, "start Traffic monitor\n");
+ slsi_traffic_mon_client_register(sdev, sdev, 0, 0, 0, NULL);
+ }
+ break;
+ case '0':
+ SLSI_DBG1(sdev, SLSI_HIP, "stop Traffic monitor\n");
+ slsi_traffic_mon_client_unregister(sdev, sdev);
+ break;
+ default:
+ SLSI_DBG1(sdev, SLSI_HIP, "invalid value %c\n", read_buf[0]);
+ return -EINVAL;
+ }
+ return len;
+}
+
+static atomic_t fd_opened_count;
+
+void slsi_procfs_inc_node(void)
+{
+ atomic_inc(&fd_opened_count);
+}
+
+void slsi_procfs_dec_node(void)
+{
+ if (0 == atomic_read(&fd_opened_count)) {
+ WARN_ON(1);
+ return;
+ }
+ atomic_dec(&fd_opened_count);
+}
+
+static ssize_t slsi_procfs_fd_opened_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[128];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ SLSI_UNUSED_PARAMETER(file);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", atomic_read(&fd_opened_count));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static int slsi_procfs_fcq_show(struct seq_file *m, void *v)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)m->private;
+ int ac;
+ s32 vif, i;
+
+ SLSI_UNUSED_PARAMETER(v);
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (vif = 1; vif <= CONFIG_SCSC_WLAN_MAX_INTERFACES; vif++) {
+ struct net_device *dev = slsi_get_netdev_locked(sdev, vif);
+ struct netdev_vif *ndev_vif;
+
+ if (!dev)
+ continue;
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* Unicast */
+ for (i = 0; i < SLSI_ADHOC_PEER_CONNECTIONS_MAX; i++) {
+ struct slsi_peer *peer = ndev_vif->peer_sta_record[i];
+ int smod = 0, scod = 0, qmod = 0, qcod = 0;
+ struct scsc_wifi_fcq_q_stat queue_stat;
+ u32 peer_ps_state_transitions = 0;
+ enum scsc_wifi_fcq_8021x_state cp_state;
+
+ if (!peer || !peer->valid)
+ continue;
+
+ if (scsc_wifi_fcq_stat_queueset(&peer->data_qs, &queue_stat, &smod, &scod, &cp_state, &peer_ps_state_transitions) != 0)
+ continue;
+
+ seq_printf(m, "|%-12s|%-6d|%-6s|\n%d). peer:%pM, qs:%2d, smod:%u, scod:%u, netq stops :%u, netq resumes :%u, PS transitions :%u Controlled port :%s\n",
+ netdev_name(dev),
+ vif,
+ "UCAST",
+ i + 1,
+ peer->address,
+ peer->queueset,
+ smod,
+ scod,
+ queue_stat.netq_stops,
+ queue_stat.netq_resumes,
+ peer_ps_state_transitions,
+ cp_state == SCSC_WIFI_FCQ_8021x_STATE_BLOCKED ? "Blocked" : "Opened");
+
+ seq_printf(m, " |%-12s|%-17s|%4s|%8s|%8s|%8s|%8s|%10s|%8s|\n",
+ "netdev",
+ "peer",
+ "AC index", "qcod", "qmod",
+ "nq_state", "nq_stop", "nq_resume",
+ "tq_state");
+
+ for (ac = 0; ac < SLSI_NETIF_Q_PER_PEER; ac++) {
+ if (scsc_wifi_fcq_stat_queue(&peer->data_qs.ac_q[ac].head,
+ &queue_stat,
+ &qmod, &qcod) == 0)
+ seq_printf(m, " |%-12s|%pM|%4d|%8u|%8u|%8u|%8u\n",
+ netdev_name(dev),
+ peer->address,
+ ac,
+ qcod,
+ qmod,
+ queue_stat.netq_stops,
+ queue_stat.netq_resumes);
+ else
+ break;
+ }
+ }
+
+ /* Groupcast */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ int smod = 0, scod = 0, qmod = 0, qcod = 0;
+ struct scsc_wifi_fcq_q_stat queue_stat;
+ u32 peer_ps_state_transitions = 0;
+ enum scsc_wifi_fcq_8021x_state cp_state;
+
+ if (scsc_wifi_fcq_stat_queueset(&ndev_vif->ap.group_data_qs, &queue_stat, &smod, &scod, &cp_state, &peer_ps_state_transitions) != 0)
+ continue;
+
+ seq_printf(m, "|%-12s|%-6d|%-6s|\n%d). smod:%u, scod:%u, netq stops :%u, netq resumes :%u, PS transitions :%u Controlled port :%s\n",
+ netdev_name(dev),
+ vif,
+ "MCAST",
+ i + 1,
+ smod,
+ scod,
+ queue_stat.netq_stops,
+ queue_stat.netq_resumes,
+ peer_ps_state_transitions,
+ cp_state == SCSC_WIFI_FCQ_8021x_STATE_BLOCKED ? "Blocked" : "Opened");
+
+ seq_printf(m, " |%-12s|%4s|%8s|%8s|%8s|%8s|%10s|%8s|\n",
+ "netdev",
+ "AC index", "qcod", "qmod",
+ "nq_state", "nq_stop", "nq_resume",
+ "tq_state");
+
+ for (ac = 0; ac < SLSI_NETIF_Q_PER_PEER; ac++) {
+ if (scsc_wifi_fcq_stat_queue(&ndev_vif->ap.group_data_qs.ac_q[ac].head,
+ &queue_stat,
+ &qmod, &qcod) == 0)
+ seq_printf(m, " |%-12s|%4d|%8u|%8u|%8u|%8u\n",
+ netdev_name(dev),
+ ac,
+ qcod,
+ qmod,
+ queue_stat.netq_stops,
+ queue_stat.netq_resumes);
+ else
+ break;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ return 0;
+}
+
+static int slsi_procfs_tcp_ack_suppression_show(struct seq_file *m, void *v)
+{
+ struct slsi_dev *sdev = (struct slsi_dev *)m->private;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ int i;
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = sdev->netdev[i];
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+
+ seq_printf(m, "%s: tack_acks=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_acks);
+ seq_printf(m, "%s: tack_suppressed=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_suppressed);
+ seq_printf(m, "%s: tack_sent=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_sent);
+ seq_printf(m, "%s: tack_max=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_max);
+ seq_printf(m, "%s: tack_timeout=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_timeout);
+ seq_printf(m, "%s: tack_aged=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_ktime);
+ seq_printf(m, "%s: tack_dacks=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_dacks);
+ seq_printf(m, "%s: tack_sacks=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_sacks);
+ seq_printf(m, "%s: tack_delay_acks=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_delay_acks);
+ seq_printf(m, "%s: tack_low_window=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_low_window);
+ seq_printf(m, "%s: tack_ece=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_ece);
+ seq_printf(m, "%s: tack_nocache=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_nocache);
+ seq_printf(m, "%s: tack_norecord=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_norecord);
+ seq_printf(m, "%s: tack_lastrecord=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_lastrecord);
+ seq_printf(m, "%s: tack_searchrecord=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_searchrecord);
+ seq_printf(m, "%s: tack_hasdata=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_hasdata);
+ seq_printf(m, "%s: tack_psh=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_psh);
+ seq_printf(m, "%s: tack_dropped=%u\n", dev->name, ndev_vif->tcp_ack_stats.tack_dropped);
+
+ /* reset stats after it is read */
+ memset(&ndev_vif->tcp_ack_stats, 0, sizeof(struct slsi_tcp_ack_stats));
+ }
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ return 0;
+}
+
+static ssize_t slsi_procfs_nan_mac_addr_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[20];
+ char nan_mac[ETH_ALEN];
+ int pos = 0;
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ struct slsi_dev *sdev = (struct slsi_dev *)file->private_data;
+
+ slsi_nan_get_mac(sdev, nan_mac);
+#else
+
+ SLSI_UNUSED_PARAMETER(file);
+ memset(nan_mac, 0, ETH_ALEN);
+#endif
+ pos = scnprintf(buf, sizeof(buf), "%pM", nan_mac);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+SLSI_PROCFS_SEQ_FILE_OPS(vifs);
+SLSI_PROCFS_SEQ_FILE_OPS(mac_addr);
+SLSI_PROCFS_WRITE_FILE_OPS(uapsd);
+SLSI_PROCFS_WRITE_FILE_OPS(ap_cert_disable_ht_vht);
+SLSI_PROCFS_RW_FILE_OPS(p2p_certif);
+SLSI_PROCFS_RW_FILE_OPS(create_tspec);
+SLSI_PROCFS_RW_FILE_OPS(confg_tspec);
+SLSI_PROCFS_RW_FILE_OPS(send_addts);
+SLSI_PROCFS_RW_FILE_OPS(send_delts);
+SLSI_PROCFS_RW_FILE_OPS(del_tspec);
+SLSI_PROCFS_RW_FILE_OPS(tput);
+SLSI_PROCFS_READ_FILE_OPS(fd_opened);
+SLSI_PROCFS_SEQ_FILE_OPS(build);
+SLSI_PROCFS_SEQ_FILE_OPS(status);
+SLSI_PROCFS_SEQ_FILE_OPS(fcq);
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+SLSI_PROCFS_READ_FILE_OPS(mutex_stats);
+#endif
+SLSI_PROCFS_READ_FILE_OPS(sta_bss);
+SLSI_PROCFS_READ_FILE_OPS(big_data);
+SLSI_PROCFS_READ_FILE_OPS(throughput_stats);
+SLSI_PROCFS_SEQ_FILE_OPS(tcp_ack_suppression);
+SLSI_PROCFS_READ_FILE_OPS(nan_mac_addr);
+
+
+int slsi_create_proc_dir(struct slsi_dev *sdev)
+{
+ char dir[16];
+ struct proc_dir_entry *parent;
+
+ (void)snprintf(dir, sizeof(dir), "driver/unifi%d", sdev->procfs_instance);
+ parent = proc_mkdir(dir, NULL);
+ if (parent) {
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 4, 0))
+ parent->data = sdev;
+#endif
+ sdev->procfs_dir = parent;
+
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, build, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, status, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, fcq, parent, S_IRUSR | S_IRGRP | S_IROTH);
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, vifs, parent, S_IRUSR | S_IRGRP);
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, mac_addr, parent, S_IRUSR | S_IRGRP | S_IROTH); /*Add S_IROTH permission so that android settings can access it*/
+ SLSI_PROCFS_ADD_FILE(sdev, uapsd, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, ap_cert_disable_ht_vht, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, p2p_certif, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, create_tspec, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, confg_tspec, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, send_addts, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, send_delts, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, del_tspec, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, tput, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, fd_opened, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ SLSI_PROCFS_ADD_FILE(sdev, mutex_stats, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+#endif
+ SLSI_PROCFS_ADD_FILE(sdev, sta_bss, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, big_data, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, throughput_stats, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ SLSI_PROCFS_SEQ_ADD_FILE(sdev, tcp_ack_suppression, sdev->procfs_dir, S_IRUSR | S_IRGRP);
+ SLSI_PROCFS_ADD_FILE(sdev, nan_mac_addr, parent, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+ return 0;
+ }
+
+err:
+ SLSI_DBG1(sdev, SLSI_HIP, "Failure in creation of proc directories\n");
+ return -EINVAL;
+}
+
+void slsi_remove_proc_dir(struct slsi_dev *sdev)
+{
+ if (sdev->procfs_dir) {
+ char dir[32];
+
+ SLSI_PROCFS_REMOVE_FILE(build, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(release, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(version, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(status, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(vifs, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(mac_addr, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(fcq, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(uapsd, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(ap_cert_disable_ht_vht, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(p2p_certif, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(create_tspec, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(confg_tspec, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(send_addts, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(send_delts, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(del_tspec, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(tput, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(fd_opened, sdev->procfs_dir);
+#ifdef CONFIG_SCSC_WLAN_MUTEX_DEBUG
+ SLSI_PROCFS_REMOVE_FILE(mutex_stats, sdev->procfs_dir);
+#endif
+ SLSI_PROCFS_REMOVE_FILE(sta_bss, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(big_data, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(throughput_stats, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(tcp_ack_suppression, sdev->procfs_dir);
+ SLSI_PROCFS_REMOVE_FILE(nan_mac_addr, sdev->procfs_dir);
+
+ (void)snprintf(dir, sizeof(dir), "driver/unifi%d", sdev->procfs_instance);
+ remove_proc_entry(dir, NULL);
+ sdev->procfs_dir = NULL;
+ }
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __SLSI_PROCFS_H__
+#define __SLSI_PROCFS_H__
+
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+
+struct slsi_dev;
+struct slsi_vif;
+
+#ifdef CONFIG_SCSC_WLAN_ANDROID
+# ifndef AID_WIFI
+# define AID_WIFI 1010
+# endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define SLSI_PROCFS_SET_UID_GID(_entry) \
+ do { \
+ kuid_t proc_kuid = KUIDT_INIT(AID_WIFI); \
+ kgid_t proc_kgid = KGIDT_INIT(AID_WIFI); \
+ proc_set_user(_entry, proc_kuid, proc_kgid); \
+ } while (0)
+#else
+#define SLSI_PROCFS_SET_UID_GID(entry) \
+ do { \
+ (entry)->uid = AID_WIFI; \
+ (entry)->gid = AID_WIFI; \
+ } while (0)
+#endif
+#else
+#define SLSI_PROCFS_SET_UID_GID(entry)
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define SLSI_PDE_DATA(inode) PDE_DATA(inode)
+#else
+#define SLSI_PDE_DATA(inode) (PDE(inode)->data)
+#endif
+
+/* procfs operations */
+int slsi_create_proc_dir(struct slsi_dev *sdev);
+void slsi_remove_proc_dir(struct slsi_dev *sdev);
+
+int slsi_procfs_open_file_generic(struct inode *inode, struct file *file);
+
+#define SLSI_PROCFS_SEQ_FILE_OPS(name) \
+ static int slsi_procfs_ ## name ## _show(struct seq_file *m, void *v); \
+ static int slsi_procfs_ ## name ## _open(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, slsi_procfs_ ## name ## _show, SLSI_PDE_DATA(inode)); \
+ } \
+ static const struct file_operations slsi_procfs_ ## name ## _fops = { \
+ .open = slsi_procfs_ ## name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define SLSI_PROCFS_SEQ_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = proc_create_data(# name, mode, parent, &slsi_procfs_ ## name ## _fops, _sdev); \
+ if (!entry) { \
+ goto err; \
+ } \
+ SLSI_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+
+#define SLSI_PROCFS_READ_FILE_OPS(name) \
+ static ssize_t slsi_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations slsi_procfs_ ## name ## _fops = { \
+ .read = slsi_procfs_ ## name ## _read, \
+ .open = slsi_procfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+#define SLSI_PROCFS_WRITE_FILE_OPS(name) \
+ static ssize_t slsi_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations slsi_procfs_ ## name ## _fops = { \
+ .write = slsi_procfs_ ## name ## _write, \
+ .open = slsi_procfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+#define SLSI_PROCFS_RW_FILE_OPS(name) \
+ static ssize_t slsi_procfs_ ## name ## _write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); \
+ static ssize_t slsi_procfs_ ## name ## _read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); \
+ static const struct file_operations slsi_procfs_ ## name ## _fops = { \
+ .read = slsi_procfs_ ## name ## _read, \
+ .write = slsi_procfs_ ## name ## _write, \
+ .open = slsi_procfs_open_file_generic, \
+ .llseek = generic_file_llseek \
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+#define SLSI_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry = proc_create_data(# name, mode, parent, &slsi_procfs_ ## name ## _fops, _sdev); \
+ SLSI_PROCFS_SET_UID_GID(entry); \
+ } while (0)
+#else
+#define SLSI_PROCFS_ADD_FILE(_sdev, name, parent, mode) \
+ do { \
+ struct proc_dir_entry *entry; \
+ entry = create_proc_entry(# name, mode, parent); \
+ if (entry) { \
+ entry->proc_fops = &slsi_procfs_ ## name ## _fops; \
+ entry->data = _sdev; \
+ SLSI_PROCFS_SET_UID_GID(entry); \
+ } else { \
+ goto err; \
+ } \
+ } while (0)
+#endif
+#define SLSI_PROCFS_REMOVE_FILE(name, parent) remove_proc_entry(# name, parent)
+
+void slsi_procfs_inc_node(void);
+void slsi_procfs_dec_node(void);
+
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/time.h>
+
+#include "debug.h"
+#include "dev.h"
+#include "mgt.h"
+#include "mlme.h"
+#include "src_sink.h"
+#include "const.h"
+#include "ba.h"
+#include "mib.h"
+#include "cac.h"
+#include "nl80211_vendor.h"
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+#include <scsc/scsc_log_collector.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#include "porting_imx.h"
+#endif
+struct ieee80211_channel *slsi_find_scan_channel(struct slsi_dev *sdev, struct ieee80211_mgmt *mgmt, size_t mgmt_len, u16 freq)
+{
+ int ielen = mgmt_len - (mgmt->u.beacon.variable - (u8 *)mgmt);
+ const u8 *scan_ds = cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable, ielen);
+ const u8 *scan_ht = cfg80211_find_ie(WLAN_EID_HT_OPERATION, mgmt->u.beacon.variable, ielen);
+ u8 chan = 0;
+
+ /* Use the DS or HT channel where possible as the Offchannel results mean the RX freq is not reliable */
+ if (scan_ds)
+ chan = scan_ds[2];
+ else if (scan_ht)
+ chan = scan_ht[2];
+
+ if (chan) {
+ enum nl80211_band band = NL80211_BAND_2GHZ;
+
+ if (chan > 14)
+ band = NL80211_BAND_5GHZ;
+ freq = (u16)ieee80211_channel_to_frequency(chan, band);
+ }
+ if (!freq)
+ return NULL;
+
+ return ieee80211_get_channel(sdev->wiphy, freq);
+}
+
+static struct ieee80211_mgmt *slsi_rx_scan_update_ssid(struct slsi_dev *sdev, struct net_device *dev,
+ struct ieee80211_mgmt *mgmt, size_t mgmt_len, size_t *new_len,
+ u16 freq)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 *new_mgmt;
+ int offset;
+ const u8 *mgmt_pos;
+ const u8 *ssid;
+ int i;
+ int band;
+
+ if (!SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
+ return NULL;
+
+ /* update beacon, not probe response as probe response will always have actual ssid.*/
+ if (!ieee80211_is_beacon(mgmt->frame_control))
+ return NULL;
+
+ ssid = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable,
+ mgmt_len - (mgmt->u.beacon.variable - (u8 *)mgmt));
+ if (!ssid) {
+ SLSI_WARN(sdev, "beacon with NO SSID IE\n");
+ return NULL;
+ }
+ /* update beacon only if hidden ssid. So, Skip if not hidden ssid*/
+ if ((ssid[1] > 0) && (ssid[2] != '\0'))
+ return NULL;
+
+ band = (freq / 1000) == 2 ? SLSI_FREQ_BAND_2GHZ : SLSI_FREQ_BAND_5GHZ;
+
+ /* check we have a known ssid for a bss */
+ for (i = 0; i < SLSI_SCAN_SSID_MAP_MAX; i++) {
+ if (SLSI_ETHER_EQUAL(sdev->ssid_map[i].bssid, mgmt->bssid) && (sdev->ssid_map[i].band == band)) {
+ new_mgmt = kmalloc(mgmt_len + 34, GFP_KERNEL);
+ if (!new_mgmt) {
+ SLSI_ERR_NODEV("malloc failed(len:%ld)\n", mgmt_len + 34);
+ return NULL;
+ }
+
+ /* copy frame till ssid element */
+ memcpy(new_mgmt, mgmt, ssid - (u8 *)mgmt);
+ offset = ssid - (u8 *)mgmt;
+ /* copy bss ssid into new frame */
+ new_mgmt[offset++] = WLAN_EID_SSID;
+ new_mgmt[offset++] = sdev->ssid_map[i].ssid_len;
+ memcpy(new_mgmt + offset, sdev->ssid_map[i].ssid, sdev->ssid_map[i].ssid_len);
+ offset += sdev->ssid_map[i].ssid_len;
+ /* copy rest of the frame following ssid */
+ mgmt_pos = ssid + ssid[1] + 2;
+ memcpy(new_mgmt + offset, mgmt_pos, mgmt_len - (mgmt_pos - (u8 *)mgmt));
+ offset += mgmt_len - (mgmt_pos - (u8 *)mgmt);
+ *new_len = offset;
+
+ return (struct ieee80211_mgmt *)new_mgmt;
+ }
+ }
+ return NULL;
+}
+
+void slsi_rx_scan_pass_to_cfg80211(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ size_t mgmt_len = fapi_get_mgmtlen(skb);
+ s32 signal = fapi_get_s16(skb, u.mlme_scan_ind.rssi) * 100;
+ u16 freq = SLSI_FREQ_FW_TO_HOST(fapi_get_u16(skb, u.mlme_scan_ind.channel_frequency));
+ struct ieee80211_channel *channel = slsi_find_scan_channel(sdev, mgmt, mgmt_len, freq);
+ struct timespec uptime;
+
+ get_monotonic_boottime(&uptime);
+ SLSI_UNUSED_PARAMETER(dev);
+
+ /* update timestamp with device uptime in micro sec */
+ mgmt->u.beacon.timestamp = (uptime.tv_sec * 1000000) + (uptime.tv_nsec / 1000);
+
+ if (channel) {
+ struct cfg80211_bss *bss;
+ struct ieee80211_mgmt *mgmt_new;
+ size_t mgmt_new_len = 0;
+
+ mgmt_new = slsi_rx_scan_update_ssid(sdev, dev, mgmt, mgmt_len, &mgmt_new_len, freq);
+ if (mgmt_new)
+ bss = cfg80211_inform_bss_frame(sdev->wiphy, channel, mgmt_new, mgmt_new_len, signal, GFP_KERNEL);
+ else
+ bss = cfg80211_inform_bss_frame(sdev->wiphy, channel, mgmt, mgmt_len, signal, GFP_KERNEL);
+
+ slsi_cfg80211_put_bss(sdev->wiphy, bss);
+ kfree(mgmt_new);
+ } else {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "No Channel info found for freq:%d\n", freq);
+ }
+
+ slsi_kfree_skb(skb);
+}
+
+static int slsi_add_to_scan_list(struct slsi_dev *sdev, struct netdev_vif *ndev_vif,
+ struct sk_buff *skb, const u8 *scan_ssid, u16 scan_id)
+{
+ struct slsi_scan_result *head;
+ struct slsi_scan_result *scan_result, *current_result, *prev = NULL;
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ bool found = 0, skb_stored = 0;
+ int current_rssi, current_band;
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ head = ndev_vif->scan[scan_id].scan_results;
+ scan_result = head;
+ current_rssi = fapi_get_s16(skb, u.mlme_scan_ind.rssi);
+ current_band = (fapi_get_s16(skb,
+ u.mlme_scan_ind.channel_frequency) /
+ 2000) == 2 ? SLSI_FREQ_BAND_2GHZ : SLSI_FREQ_BAND_5GHZ;
+
+ while (scan_result) {
+ if (SLSI_ETHER_EQUAL(scan_result->bssid, mgmt->bssid) && (scan_result->band == current_band)) {
+ /*entry exists for bssid*/
+ if (!scan_result->probe_resp && ieee80211_is_probe_resp(mgmt->frame_control)) {
+ scan_result->probe_resp = skb;
+ skb_stored = 1;
+ } else if (!scan_result->beacon && ieee80211_is_beacon(mgmt->frame_control)) {
+ scan_result->beacon = skb;
+ skb_stored = 1;
+ if (!scan_ssid || !scan_ssid[1] || scan_ssid[2] == '\0')
+ scan_result->hidden = 1;
+ }
+
+ /* Use the best RSSI value from all beacons/probe resp for a bssid. If no improvment
+ * in RSSI and beacon and probe response exist, ignore this result
+ */
+ if (current_rssi < scan_result->rssi) {
+ if (!skb_stored)
+ slsi_kfree_skb(skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ return 0;
+ }
+
+ scan_result->rssi = current_rssi;
+ if (!skb_stored) {
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ slsi_kfree_skb(scan_result->beacon);
+ scan_result->beacon = skb;
+ } else {
+ slsi_kfree_skb(scan_result->probe_resp);
+ scan_result->probe_resp = skb;
+ }
+ }
+
+ /*No change in position if rssi is still less than prev node*/
+ if (!prev || (prev->rssi > current_rssi)) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ return 0;
+ }
+
+ /*remove and re-insert*/
+ found = 1;
+ prev->next = scan_result->next;
+ scan_result->next = NULL;
+ current_result = scan_result;
+
+ break;
+ }
+
+ prev = scan_result;
+ scan_result = scan_result->next;
+ }
+
+ if (!found) {
+ /*add_new node*/
+ current_result = kzalloc(sizeof(*current_result), GFP_KERNEL);
+ if (!current_result) {
+ SLSI_ERR(sdev, "Failed to allocate node for scan result\n");
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ return -1;
+ }
+ SLSI_ETHER_COPY(current_result->bssid, mgmt->bssid);
+
+ current_result->rssi = current_rssi;
+ current_result->band = current_band;
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ current_result->beacon = skb;
+ if (!scan_ssid || !scan_ssid[1] || scan_ssid[2] == '\0')
+ current_result->hidden = 1;
+ } else {
+ current_result->probe_resp = skb;
+ }
+ current_result->next = NULL;
+
+ if (!head) { /*first node*/
+ ndev_vif->scan[scan_id].scan_results = current_result;
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ return 0;
+ }
+ }
+
+ scan_result = head;
+ prev = NULL;
+ /* insert based on rssi in descending order*/
+ while (scan_result) {
+ if (current_result->rssi > scan_result->rssi) {
+ current_result->next = scan_result;
+ if (prev)
+ prev->next = current_result;
+ else
+ ndev_vif->scan[scan_id].scan_results = current_result;
+ break;
+ }
+ prev = scan_result;
+ scan_result = scan_result->next;
+ }
+ if (!scan_result) {
+ /*insert at the end*/
+ prev->next = current_result;
+ current_result->next = NULL;
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ return 0;
+}
+
+static int slsi_add_to_p2p_scan_list(struct slsi_dev *sdev, struct netdev_vif *ndev_vif,
+ struct sk_buff *skb, u16 scan_id)
+{
+ struct slsi_scan_result *current_result;
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ struct slsi_scan *scan;
+
+ /*add_new node*/
+ current_result = kzalloc(sizeof(*current_result), GFP_KERNEL);
+ if (!current_result) {
+ SLSI_ERR(sdev, "Failed to allocate node for scan result\n");
+ return -1;
+ }
+ SLSI_ETHER_COPY(current_result->bssid, mgmt->bssid);
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan = &ndev_vif->scan[scan_id];
+ if (ieee80211_is_beacon(mgmt->frame_control))
+ current_result->beacon = skb;
+ else
+ current_result->probe_resp = skb;
+
+ if (!scan->scan_results) {
+ scan->scan_results = current_result;
+ current_result->next = NULL;
+ } else {
+ current_result->next = scan->scan_results;
+ scan->scan_results = current_result;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+
+ return 0;
+}
+
+void slsi_rx_scan_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 scan_id = fapi_get_u16(skb, u.mlme_scan_ind.scan_id);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ size_t mgmt_len = fapi_get_mgmtlen(skb);
+ size_t ie_len = mgmt_len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ const u8 *scan_ssid = NULL;
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ if (slsi_is_gscan_id(scan_id)) {
+ SLSI_NET_DBG3(dev, SLSI_GSCAN, "scan_id:%#x bssid:%pM\n", scan_id, fapi_get_mgmt(skb)->bssid);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ slsi_gscan_handle_scan_result(sdev, dev, skb, scan_id, false);
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ return;
+ }
+#endif
+
+ scan_ssid = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.probe_resp.variable, ie_len);
+
+ if (sdev->p2p_certif && (ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) && (scan_id == (ndev_vif->ifnum << 8 | SLSI_SCAN_HW_ID))) {
+ /* When supplicant receives a peer GO probe response with selected registrar set and group capability as 0,
+ * which is invalid, it is unable to store persistent network block. Hence such probe response is getting ignored here.
+ * This is mainly for an inter-op with Realtek P2P GO in P2P certification
+ */
+ if (scan_ssid && scan_ssid[1] > 7) {
+ const u8 *p2p_ie = NULL;
+
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, mgmt->u.probe_resp.variable, ie_len);
+#define P2P_GROUP_CAPAB_PERSISTENT_GROUP BIT(1)
+ if (p2p_ie && !(p2p_ie[10] & P2P_GROUP_CAPAB_PERSISTENT_GROUP)) {
+ SLSI_NET_INFO(dev, "Ignoring a peer GO probe response with group_capab as 0\n");
+ slsi_kfree_skb(skb);
+ return;
+ }
+ }
+ }
+
+ scan_id = (scan_id & 0xFF);
+
+ if (WARN_ON(scan_id >= SLSI_SCAN_MAX)) {
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ /* Blocking scans already taken scan mutex.
+ * So scan mutex only incase of non blocking scans.
+ */
+ if (!ndev_vif->scan[scan_id].is_blocking_scan)
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+
+ if (fapi_get_vif(skb) != 0 && fapi_get_u16(skb, u.mlme_scan_ind.scan_id) == 0) {
+ /* Connect/Roaming scan data : Save for processing later */
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Connect/Roaming scan indication received, bssid:%pM\n", fapi_get_mgmt(skb)->bssid);
+ slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
+ ndev_vif->sta.mlme_scan_ind_skb = skb;
+ } else if (ndev_vif->scan[scan_id].scan_req || ndev_vif->scan[scan_id].sched_req ||
+ ndev_vif->scan[scan_id].acs_request ||
+ ndev_vif->scan[SLSI_SCAN_HW_ID].is_blocking_scan) {
+ slsi_roam_channel_cache_add(sdev, dev, skb);
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
+ slsi_add_to_scan_list(sdev, ndev_vif, skb, scan_ssid, scan_id);
+ else
+ slsi_add_to_p2p_scan_list(sdev, ndev_vif, skb, scan_id);
+ }
+
+ if (!ndev_vif->scan[scan_id].is_blocking_scan)
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+}
+
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+void slsi_rx_beacon_reporting_event_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 reason_code = fapi_get_u16(skb, u.mlme_beacon_reporting_event_ind.abort_reason) -
+ SLSI_FORWARD_BEACON_ABORT_REASON_OFFSET;
+ int ret = 0;
+
+ if (!ndev_vif->is_wips_running) {
+ SLSI_ERR(sdev, "WIPS is not running. Ignore beacon_reporting_event_ind(%u)\n", reason_code);
+ return;
+ }
+
+ ndev_vif->is_wips_running = false;
+
+ if (reason_code >= SLSI_FORWARD_BEACON_ABORT_REASON_UNSPECIFIED &&
+ reason_code <= SLSI_FORWARD_BEACON_ABORT_REASON_SUSPENDED) {
+ SLSI_INFO(sdev, "received abort_event from FW with reason(%u)\n", reason_code);
+ } else {
+ SLSI_ERR(sdev, "received abort_event unsupporting reason(%u)\n", reason_code);
+ }
+
+ ret = slsi_send_forward_beacon_abort_vendor_event(sdev, reason_code);
+ if (ret)
+ SLSI_ERR(sdev, "Failed to send forward_beacon_abort_event(err=%d)\n", ret);
+}
+
+void slsi_handle_wips_beacon(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb,
+ struct ieee80211_mgmt *mgmt, int mgmt_len)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ size_t ie_len = mgmt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *ssid_ie = NULL;
+ const u8 *scan_ssid = NULL;
+ const u8 *scan_bssid = NULL;
+ u16 beacon_int = 0;
+ u64 timestamp = 0;
+ int ssid_len = 0;
+ struct timespec sys_time;
+ int ret = 0;
+
+ u8 channel = (u8)(ndev_vif->chan->hw_value);
+
+ get_monotonic_boottime(&sys_time);
+ scan_bssid = fapi_get_mgmt(skb)->bssid;
+
+ ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable, ie_len);
+ ssid_len = ssid_ie[1];
+ scan_ssid = &ssid_ie[2];
+ beacon_int = mgmt->u.beacon.beacon_int;
+ timestamp = mgmt->u.beacon.timestamp;
+
+ SLSI_NET_DBG2(dev, SLSI_RX,
+ "forward_beacon from bssid:%pM beacon_int:%u timestamp:%llu system_time:%llu\n",
+ fapi_get_mgmt(skb)->bssid, beacon_int, timestamp,
+ (u64)TIMESPEC_TO_US(sys_time));
+
+ ret = slsi_send_forward_beacon_vendor_event(sdev, scan_ssid, ssid_len, scan_bssid,
+ channel, beacon_int, timestamp,
+ (u64)TIMESPEC_TO_US(sys_time));
+ if (ret)
+ SLSI_ERR(sdev, "Failed to forward beacon_event\n");
+}
+#endif
+
+static void slsi_scan_update_ssid_map(struct slsi_dev *sdev, struct net_device *dev, u16 scan_id)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct ieee80211_mgmt *mgmt;
+ const u8 *ssid_ie = NULL, *connected_ssid = NULL;
+ int i, found = 0, is_connected = 0;
+ struct slsi_scan_result *scan_result = NULL;
+ int band;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->scan_result_mutex));
+
+ if (ndev_vif->activated && ndev_vif->vif_type == FAPI_VIFTYPE_STATION && ndev_vif->sta.sta_bss) {
+ band = (ndev_vif->sta.sta_bss->channel->center_freq /
+ 1000) == 2 ? SLSI_FREQ_BAND_2GHZ : SLSI_FREQ_BAND_5GHZ;
+ is_connected = 1;
+ connected_ssid = cfg80211_find_ie(WLAN_EID_SSID, ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len);
+ }
+
+ /* sanitize map: [remove any old entries] */
+ for (i = 0; i < SLSI_SCAN_SSID_MAP_MAX; i++) {
+ found = 0;
+ if (!sdev->ssid_map[i].ssid_len)
+ continue;
+
+ /* We are connected to this hidden AP. So no need to check if this AP is present in scan results */
+ if (is_connected && SLSI_ETHER_EQUAL(ndev_vif->sta.sta_bss->bssid, sdev->ssid_map[i].bssid) &&
+ (sdev->ssid_map[i].band == band))
+ continue;
+
+ /* If this entry AP is found to be non-hidden, remove entry. */
+ scan_result = ndev_vif->scan[scan_id].scan_results;
+ while (scan_result) {
+ if (SLSI_ETHER_EQUAL(sdev->ssid_map[i].bssid, scan_result->bssid) &&
+ (sdev->ssid_map[i].band == scan_result->band)) {
+ /* AP is no more hidden. OR AP is hidden but did not
+ * receive probe resp. Go for expiry.
+ */
+ if (!scan_result->hidden || (scan_result->hidden && !scan_result->probe_resp))
+ sdev->ssid_map[i].age = SLSI_SCAN_SSID_MAP_EXPIRY_AGE;
+ else
+ found = 1;
+ break;
+ }
+ scan_result = scan_result->next;
+ }
+
+ if (!found) {
+ sdev->ssid_map[i].age++;
+ if (sdev->ssid_map[i].age > SLSI_SCAN_SSID_MAP_EXPIRY_AGE) {
+ sdev->ssid_map[i].ssid_len = 0;
+ sdev->ssid_map[i].age = 0;
+ }
+ }
+ }
+
+ scan_result = ndev_vif->scan[scan_id].scan_results;
+ /* update/add hidden bss with known ssid */
+ while (scan_result) {
+ ssid_ie = NULL;
+
+ if (scan_result->hidden) {
+ if (is_connected && SLSI_ETHER_EQUAL(ndev_vif->sta.sta_bss->bssid, scan_result->bssid) &&
+ (scan_result->band == band)) {
+ ssid_ie = connected_ssid;
+ } else if (scan_result->probe_resp) {
+ mgmt = fapi_get_mgmt(scan_result->probe_resp);
+ ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, mgmt->u.beacon.variable, fapi_get_mgmtlen(scan_result->probe_resp) - (mgmt->u.beacon.variable - (u8 *)mgmt));
+ }
+ }
+
+ if (!ssid_ie) {
+ scan_result = scan_result->next;
+ continue;
+ }
+
+ found = 0;
+ /* if this bss is in map, update map */
+ for (i = 0; i < SLSI_SCAN_SSID_MAP_MAX; i++) {
+ if (!sdev->ssid_map[i].ssid_len)
+ continue;
+ if (SLSI_ETHER_EQUAL(scan_result->bssid, sdev->ssid_map[i].bssid) &&
+ (scan_result->band == sdev->ssid_map[i].band)) {
+ sdev->ssid_map[i].ssid_len = ssid_ie[1];
+ memcpy(sdev->ssid_map[i].ssid, &ssid_ie[2], ssid_ie[1]);
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ /* add a new entry in map */
+ for (i = 0; i < SLSI_SCAN_SSID_MAP_MAX; i++) {
+ if (sdev->ssid_map[i].ssid_len)
+ continue;
+ SLSI_ETHER_COPY(sdev->ssid_map[i].bssid, scan_result->bssid);
+ sdev->ssid_map[i].age = 0;
+ sdev->ssid_map[i].ssid_len = ssid_ie[1];
+ sdev->ssid_map[i].band = scan_result->band;
+ memcpy(sdev->ssid_map[i].ssid, &ssid_ie[2], ssid_ie[1]);
+ break;
+ }
+ }
+ scan_result = scan_result->next;
+ }
+}
+
+void slsi_scan_complete(struct slsi_dev *sdev, struct net_device *dev, u16 scan_id, bool aborted)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *scan;
+ int count = 0;
+ int *result_count = NULL, max_count = 0;
+ struct cfg80211_scan_info info = {.aborted = aborted};
+ int scan_results_count = 0;
+ int more_than_max_count = 0;
+
+ if (WARN_ON(scan_id >= SLSI_SCAN_MAX))
+ return;
+
+ if (scan_id == SLSI_SCAN_HW_ID && !ndev_vif->scan[scan_id].scan_req)
+ return;
+
+ if (WARN_ON(scan_id == SLSI_SCAN_SCHED_ID && !ndev_vif->scan[scan_id].sched_req))
+ return;
+
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ slsi_scan_update_ssid_map(sdev, dev, scan_id);
+ result_count = &count;
+ max_count = slsi_dev_get_scan_result_count();
+ }
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[scan_id], result_count);
+ while (scan) {
+ scan_results_count++;
+ /* skb freed inside slsi_rx_scan_pass_to_cfg80211 */
+ slsi_rx_scan_pass_to_cfg80211(sdev, dev, scan);
+
+ if ((SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) && (*result_count >= max_count)) {
+ more_than_max_count = 1;
+ slsi_purge_scan_results_locked(ndev_vif, scan_id);
+ break;
+ }
+ scan = slsi_dequeue_cached_scan_result(&ndev_vif->scan[scan_id], result_count);
+ }
+ SLSI_INFO(sdev, "Scan count:%d APs\n", scan_results_count);
+ SLSI_NET_DBG3(dev, SLSI_MLME, "interface:%d, scan_id:%d,%s\n", ndev_vif->ifnum, scan_id,
+ more_than_max_count ? "Scan results overflow" : "");
+ slsi_roam_channel_cache_prune(dev, SLSI_ROAMING_CHANNEL_CACHE_TIMEOUT);
+
+ if (scan_id == SLSI_SCAN_HW_ID) {
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif) && (!SLSI_IS_P2P_GROUP_STATE(sdev))) {
+ /* Check for unsync vif as it could be present during the cycle of social channel scan and listen */
+ if (ndev_vif->activated)
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+ else
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_NO_VIF);
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ cfg80211_scan_done(ndev_vif->scan[scan_id].scan_req, &info);
+#else
+ cfg80211_scan_done(ndev_vif->scan[scan_id].scan_req, aborted);
+#endif
+
+ ndev_vif->scan[scan_id].scan_req = NULL;
+ ndev_vif->scan[scan_id].requeue_timeout_work = false;
+ }
+
+ if (scan_id == SLSI_SCAN_SCHED_ID && scan_results_count > 0)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ cfg80211_sched_scan_results(sdev->wiphy, ndev_vif->scan[scan_id].sched_req->reqid);
+#else
+ cfg80211_sched_scan_results(sdev->wiphy);
+#endif
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+}
+
+int slsi_set_2g_auto_channel(struct slsi_dev *sdev, struct netdev_vif *ndev_vif,
+ struct slsi_acs_selected_channels *acs_selected_channels,
+ struct slsi_acs_chan_info *ch_info)
+{
+ int i = 0, j = 0, avg_load, total_num_ap, total_rssi, adjacent_rssi;
+ bool all_bss_load = true, none_bss_load = true;
+ int min_avg_chan_utilization = INT_MAX, min_adjacent_rssi = INT_MAX;
+ int ch_idx_min_load = 0, ch_idx_min_rssi = 0;
+ int min_avg_chan_utilization_20 = INT_MAX, min_adjacent_rssi_20 = INT_MAX;
+ int ch_idx_min_load_20 = 0, ch_idx_min_rssi_20 = 0;
+ int ret = 0;
+ int ch_list_len = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->ch_list_len;
+
+ acs_selected_channels->ch_width = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->ch_width;
+ acs_selected_channels->hw_mode = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "ch_lis_len:%d\n", ch_list_len);
+ for (i = 0; i < ch_list_len; i++) {
+ if (!ch_info[i].chan)
+ continue;
+ adjacent_rssi = 0; /* Assuming ch_list is in sorted order. */
+ for (j = -2; j <= 2; j++)
+ if (i + j >= 0 && i + j < ch_list_len)
+ adjacent_rssi += ch_info[i + j].rssi_factor;
+ ch_info[i].adj_rssi_factor = adjacent_rssi;
+ if (ch_info[i].num_bss_load_ap != 0) {
+ ch_info[i].avg_chan_utilization = ch_info[i].total_chan_utilization /
+ ch_info[i].num_bss_load_ap;
+ if (ch_info[i].avg_chan_utilization < min_avg_chan_utilization_20) {
+ min_avg_chan_utilization_20 = ch_info[i].avg_chan_utilization;
+ ch_idx_min_load_20 = i;
+ } else if (ch_info[i].avg_chan_utilization == min_avg_chan_utilization_20 &&
+ ch_info[i].num_ap < ch_info[ch_idx_min_load_20].num_ap) {
+ ch_idx_min_load_20 = i;
+ }
+ none_bss_load = false;
+ } else {
+ SLSI_DBG3(sdev, SLSI_MLME, "BSS load IE not found\n");
+ all_bss_load = false;
+ }
+ if (adjacent_rssi < min_adjacent_rssi_20) {
+ min_adjacent_rssi_20 = adjacent_rssi;
+ ch_idx_min_rssi_20 = i;
+ } else if (adjacent_rssi == min_adjacent_rssi_20 &&
+ ch_info[i].num_ap < ch_info[ch_idx_min_rssi_20].num_ap) {
+ ch_idx_min_rssi_20 = i;
+ }
+ SLSI_DBG3(sdev, SLSI_MLME, "min rssi:%d min_rssi_idx:%d\n", min_adjacent_rssi_20, ch_idx_min_rssi_20);
+ SLSI_DBG3(sdev, SLSI_MLME, "num_ap:%d,chan:%d,total_util:%d,avg_util:%d,rssi_fac:%d,adj_rssi_fac:%d,"
+ "bss_ap:%d\n", ch_info[i].num_ap, ch_info[i].chan, ch_info[i].total_chan_utilization,
+ ch_info[i].avg_chan_utilization, ch_info[i].rssi_factor, ch_info[i].adj_rssi_factor,
+ ch_info[i].num_bss_load_ap);
+ }
+
+ if (acs_selected_channels->ch_width == 40) {
+ for (i = 0; i < ch_list_len; i++) {
+ if (i + 4 >= ch_list_len || !ch_info[i + 4].chan || !ch_info[i].chan)
+ continue;
+ avg_load = ch_info[i].avg_chan_utilization + ch_info[i + 4].avg_chan_utilization;
+ total_num_ap = ch_info[i].num_ap + ch_info[i + 4].num_ap;
+ total_rssi = ch_info[i].adj_rssi_factor + ch_info[i + 4].adj_rssi_factor;
+
+ if (avg_load < min_avg_chan_utilization) {
+ min_avg_chan_utilization = avg_load;
+ ch_idx_min_load = i;
+ } else if (avg_load == min_avg_chan_utilization &&
+ total_num_ap < ch_info[ch_idx_min_load].num_ap +
+ ch_info[ch_idx_min_load + 4].num_ap) {
+ ch_idx_min_load = i;
+ }
+ if (total_rssi < min_adjacent_rssi) {
+ min_adjacent_rssi = total_rssi;
+ ch_idx_min_rssi = i;
+ } else if (total_rssi == min_adjacent_rssi &&
+ total_num_ap < ch_info[ch_idx_min_rssi].num_ap +
+ ch_info[ch_idx_min_rssi + 4].num_ap) {
+ ch_idx_min_rssi = i;
+ }
+ }
+ if (all_bss_load) {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_load].chan;
+ acs_selected_channels->sec_channel = ch_info[ch_idx_min_load].chan + 4;
+ } else {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_rssi].chan;
+ acs_selected_channels->sec_channel = ch_info[ch_idx_min_rssi].chan + 4;
+ }
+
+ if (!acs_selected_channels->pri_channel)
+ acs_selected_channels->ch_width = 20;
+ }
+
+ if (acs_selected_channels->ch_width == 20) {
+ if (all_bss_load)
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_load_20].chan;
+ else
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_rssi_20].chan;
+ }
+ return ret;
+}
+
+int slsi_is_40mhz_5gchan(u8 pri_channel, u8 sec_channel)
+{
+ int slsi_40mhz_chan[12] = {38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ if (pri_channel == slsi_40mhz_chan[i] - 2 && sec_channel == slsi_40mhz_chan[i] + 2)
+ return 1;
+ else if (pri_channel < slsi_40mhz_chan[i])
+ return 0;
+ }
+ return 0;
+}
+
+int slsi_is_80mhz_5gchan(u8 pri_channel, u8 last_channel)
+{
+ int slsi_80mhz_chan[6] = {42, 58, 106, 122, 138, 155};
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pri_channel == slsi_80mhz_chan[i] - 6 && last_channel == slsi_80mhz_chan[i] + 6)
+ return 1;
+ else if (pri_channel < slsi_80mhz_chan[i])
+ return 0;
+ }
+ return 0;
+}
+
+int slsi_set_5g_auto_channel(struct slsi_dev *sdev, struct netdev_vif *ndev_vif,
+ struct slsi_acs_selected_channels *acs_selected_channels,
+ struct slsi_acs_chan_info *ch_info)
+{
+ int i = 0, avg_load, total_num_ap;
+ bool all_bss_load = true, none_bss_load = true;
+ int min_num_ap = INT_MAX, min_avg_chan_utilization = INT_MAX;
+ int ch_idx_min_load = 0, ch_idx_min_ap = 0;
+ int min_avg_chan_utilization_20 = INT_MAX, min_num_ap_20 = INT_MAX;
+ int ch_idx_min_load_20 = 0, ch_idx_min_ap_20 = 0;
+ int ret = 0;
+ int ch_list_len = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->ch_list_len;
+
+ acs_selected_channels->ch_width = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->ch_width;
+ acs_selected_channels->hw_mode = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "ch_lis_len:%d\n", ch_list_len);
+ for (i = 0; i < ch_list_len; i++) {
+ if (!ch_info[i].chan)
+ continue;
+ if (ch_info[i].num_bss_load_ap != 0) {
+ ch_info[i].avg_chan_utilization = ch_info[i].total_chan_utilization /
+ ch_info[i].num_bss_load_ap;
+ if (ch_info[i].avg_chan_utilization < min_avg_chan_utilization_20) {
+ min_avg_chan_utilization_20 = ch_info[i].avg_chan_utilization;
+ ch_idx_min_load_20 = i;
+ } else if (ch_info[i].avg_chan_utilization == min_avg_chan_utilization_20 &&
+ ch_info[i].num_ap < ch_info[ch_idx_min_load_20].num_ap) {
+ ch_idx_min_load_20 = i;
+ }
+ none_bss_load = false;
+ } else {
+ if (ch_info[i].num_ap < min_num_ap_20) {
+ min_num_ap_20 = ch_info[i].num_ap;
+ ch_idx_min_ap_20 = i;
+ }
+ SLSI_DBG3(sdev, SLSI_MLME, "BSS load IE not found\n");
+ ch_info[i].avg_chan_utilization = 128;
+ all_bss_load = false;
+ }
+ SLSI_DBG3(sdev, SLSI_MLME, "num_ap:%d chan:%d, total_chan_util:%d, avg_chan_util:%d, bss_load_ap:%d\n",
+ ch_info[i].num_ap, ch_info[i].chan, ch_info[i].total_chan_utilization,
+ ch_info[i].avg_chan_utilization, ch_info[i].num_bss_load_ap);
+ }
+
+ if (acs_selected_channels->ch_width == 80) {
+ for (i = 0; i < ch_list_len; i++) {
+ if (i + 3 >= ch_list_len)
+ continue;
+ if (!ch_info[i].chan || !ch_info[i + 1].chan || !ch_info[i + 2].chan || !ch_info[i + 3].chan)
+ continue;
+ if (slsi_is_80mhz_5gchan(ch_info[i].chan, ch_info[i + 3].chan)) {
+ avg_load = ch_info[i].avg_chan_utilization + ch_info[i + 1].avg_chan_utilization +
+ ch_info[i + 2].avg_chan_utilization + ch_info[i + 3].avg_chan_utilization;
+ total_num_ap = ch_info[i].num_ap + ch_info[i + 1].num_ap + ch_info[i + 2].num_ap +
+ ch_info[i + 3].num_ap;
+ if (avg_load < min_avg_chan_utilization) {
+ min_avg_chan_utilization = avg_load;
+ ch_idx_min_load = i;
+ } else if (avg_load == min_avg_chan_utilization && total_num_ap <
+ (ch_info[ch_idx_min_load].num_ap + ch_info[ch_idx_min_load + 1].num_ap +
+ ch_info[ch_idx_min_load + 2].num_ap +
+ ch_info[ch_idx_min_load + 3].num_ap)) {
+ ch_idx_min_load = i;
+ }
+ if (total_num_ap < min_num_ap) {
+ min_num_ap = total_num_ap;
+ ch_idx_min_ap = i;
+ }
+ }
+ }
+ if (all_bss_load || min_avg_chan_utilization <= 512) {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_load].chan;
+ acs_selected_channels->vht_seg0_center_ch = ch_info[ch_idx_min_load].chan + 6;
+ } else if (none_bss_load || min_avg_chan_utilization > 512) {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_ap].chan;
+ acs_selected_channels->vht_seg0_center_ch = ch_info[ch_idx_min_ap].chan + 6;
+ }
+
+ if (!acs_selected_channels->pri_channel)
+ acs_selected_channels->ch_width = 40;
+ }
+
+ if (acs_selected_channels->ch_width == 40) {
+ for (i = 0; i < ch_list_len; i++) {
+ if (!ch_info[i].chan || i + 1 >= ch_list_len || !ch_info[i + 1].chan)
+ continue;
+ if (slsi_is_40mhz_5gchan(ch_info[i].chan, ch_info[i + 1].chan)) {
+ avg_load = ch_info[i].avg_chan_utilization + ch_info[i + 1].avg_chan_utilization;
+ total_num_ap = ch_info[i].num_ap + ch_info[i + 1].num_ap;
+ if (avg_load < min_avg_chan_utilization) {
+ min_avg_chan_utilization = avg_load;
+ ch_idx_min_load = i;
+ } else if (avg_load == min_avg_chan_utilization && total_num_ap <
+ ch_info[ch_idx_min_load].num_ap + ch_info[ch_idx_min_load + 1].num_ap) {
+ ch_idx_min_load = i;
+ }
+ if (total_num_ap < min_num_ap) {
+ min_num_ap = total_num_ap;
+ ch_idx_min_ap = i;
+ }
+ }
+ }
+ if (all_bss_load || min_avg_chan_utilization <= 256) {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_load].chan;
+ acs_selected_channels->sec_channel = ch_info[ch_idx_min_load + 1].chan;
+ } else if (none_bss_load || min_avg_chan_utilization > 256) {
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_ap].chan;
+ acs_selected_channels->sec_channel = ch_info[ch_idx_min_ap + 1].chan;
+ }
+
+ if (!acs_selected_channels->pri_channel)
+ acs_selected_channels->ch_width = 20;
+ }
+
+ if (acs_selected_channels->ch_width == 20) {
+ if (all_bss_load || min_avg_chan_utilization_20 < 128)
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_load_20].chan;
+ else if (none_bss_load || min_avg_chan_utilization_20 >= 128)
+ acs_selected_channels->pri_channel = ch_info[ch_idx_min_ap_20].chan;
+ }
+ return ret;
+}
+
+int slsi_acs_get_rssi_factor(struct slsi_dev *sdev, int rssi, int ch_util)
+{
+ int frac_pow_val[10] = {10, 12, 15, 19, 25, 31, 39, 50, 63, 79};
+ int res = 1;
+ int i;
+
+ if (rssi < 0)
+ rssi = 0 - rssi;
+ else
+ return INT_MAX;
+ for (i = 0; i < rssi / 10; i++)
+ res *= 10;
+ res = (10000000 * ch_util / res) / frac_pow_val[rssi % 10];
+
+ SLSI_DBG3(sdev, SLSI_MLME, "ch_util:%d\n", ch_util);
+ return res;
+}
+
+struct slsi_acs_chan_info *slsi_acs_scan_results(struct slsi_dev *sdev, struct netdev_vif *ndev_vif, u16 scan_id)
+{
+ struct sk_buff *scan_res;
+ struct sk_buff *unique_scan;
+ struct sk_buff_head unique_scan_results;
+ struct slsi_acs_chan_info *ch_info = ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->acs_chan_info;
+
+ SLSI_DBG3(sdev, SLSI_MLME, "Received acs_results\n");
+ skb_queue_head_init(&unique_scan_results);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_result_mutex);
+ scan_res = slsi_dequeue_cached_scan_result(&ndev_vif->scan[SLSI_SCAN_HW_ID], NULL);
+
+ while (scan_res) {
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(scan_res);
+ size_t mgmt_len = fapi_get_mgmtlen(scan_res);
+ struct ieee80211_channel *scan_channel;
+ int idx = 0;
+ const u8 *ie_data;
+ const u8 *ie;
+ int ie_len;
+ int ch_util = 128;
+ /* ieee80211_mgmt structure is similar for Probe Response and Beacons */
+ size_t ies_len = mgmt_len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ /* make sure this BSSID has not already been used */
+ skb_queue_walk(&unique_scan_results, unique_scan) {
+ struct ieee80211_mgmt *unique_mgmt = fapi_get_mgmt(unique_scan);
+
+ if (compare_ether_addr(mgmt->bssid, unique_mgmt->bssid) == 0)
+ goto next_scan;
+ }
+ slsi_skb_queue_head(&unique_scan_results, scan_res);
+ scan_channel = slsi_find_scan_channel(sdev, mgmt, mgmt_len,
+ fapi_get_u16(scan_res, u.mlme_scan_ind.channel_frequency) / 2);
+ if (!scan_channel)
+ goto next_scan;
+ SLSI_DBG3(sdev, SLSI_MLME, "scan result (scan_id:%d, %pM, channel:%d, rssi:%d, ie_len = %zu)\n",
+ fapi_get_u16(scan_res, u.mlme_scan_ind.scan_id),
+ fapi_get_mgmt(scan_res)->bssid, scan_channel->hw_value,
+ fapi_get_s16(scan_res, u.mlme_scan_ind.rssi),
+ ies_len);
+
+ idx = slsi_find_chan_idx(scan_channel->hw_value, ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode);
+ SLSI_DBG3(sdev, SLSI_MLME, "chan_idx:%d chan_value: %d\n", idx, ch_info[idx].chan);
+
+ if ((idx < 0) || (idx > 24)) {
+ SLSI_DBG3(sdev, SLSI_MLME, "idx is not in range idx=%d\n", idx);
+ goto next_scan;
+ }
+ if (ch_info[idx].chan) {
+ ch_info[idx].num_ap += 1;
+ ie = cfg80211_find_ie(WLAN_EID_QBSS_LOAD, mgmt->u.beacon.variable, ies_len);
+ if (ie) {
+ ie_len = ie[1];
+ ie_data = &ie[2];
+ if (ie_len >= 3) {
+ ch_util = ie_data[2];
+ ch_info[idx].num_bss_load_ap += 1;
+ ch_info[idx].total_chan_utilization += ch_util;
+ }
+ }
+ if (idx == scan_channel->hw_value - 1) { /*if 2.4GHZ channel */
+ int res = 0;
+
+ res = slsi_acs_get_rssi_factor(sdev, fapi_get_s16(scan_res, u.mlme_scan_ind.rssi),
+ ch_util);
+ ch_info[idx].rssi_factor += res;
+ SLSI_DBG3(sdev, SLSI_MLME, "ch_info[idx].rssi_factor:%d\n", ch_info[idx].rssi_factor);
+ }
+ } else {
+ goto next_scan;
+ }
+next_scan:
+ scan_res = slsi_dequeue_cached_scan_result(&ndev_vif->scan[scan_id], NULL);
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_result_mutex);
+ slsi_skb_queue_purge(&unique_scan_results);
+ return ch_info;
+}
+
+void slsi_acs_scan_complete(struct slsi_dev *sdev, struct netdev_vif *ndev_vif, u16 scan_id)
+{
+ struct slsi_acs_selected_channels acs_selected_channels;
+ struct slsi_acs_chan_info *ch_info;
+ int r = 0;
+
+ memset(&acs_selected_channels, 0, sizeof(acs_selected_channels));
+ ch_info = slsi_acs_scan_results(sdev, ndev_vif, scan_id);
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode == SLSI_ACS_MODE_IEEE80211A)
+ r = slsi_set_5g_auto_channel(sdev, ndev_vif, &acs_selected_channels, ch_info);
+ else if (ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode == SLSI_ACS_MODE_IEEE80211B ||
+ ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request->hw_mode == SLSI_ACS_MODE_IEEE80211G)
+ r = slsi_set_2g_auto_channel(sdev, ndev_vif, &acs_selected_channels, ch_info);
+ else
+ r = -EINVAL;
+ if (!r) {
+ r = slsi_send_acs_event(sdev, acs_selected_channels);
+ if (r != 0)
+ SLSI_ERR(sdev, "Could not send ACS vendor event up\n");
+ } else {
+ SLSI_ERR(sdev, "set_auto_channel failed: %d\n", r);
+ }
+ sdev->acs_channel_switched = true;
+ kfree(ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request);
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request)
+ ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request = NULL;
+}
+
+void slsi_rx_scan_done_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 scan_id = fapi_get_u16(skb, u.mlme_scan_done_ind.scan_id);
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_MUTEX_LOCK(ndev_vif->scan_mutex);
+ SLSI_NET_DBG3(dev, SLSI_GSCAN, "slsi_rx_scan_done_ind Received scan_id:%#x\n", scan_id);
+
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ if (slsi_is_gscan_id(scan_id)) {
+ SLSI_NET_DBG3(dev, SLSI_GSCAN, "scan_id:%#x\n", scan_id);
+
+ slsi_gscan_handle_scan_result(sdev, dev, skb, scan_id, true);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return;
+ }
+#endif
+ scan_id = (scan_id & 0xFF);
+
+ if (scan_id == SLSI_SCAN_HW_ID && (ndev_vif->scan[SLSI_SCAN_HW_ID].scan_req ||
+ ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request))
+ cancel_delayed_work(&ndev_vif->scan_timeout_work);
+ if (ndev_vif->scan[SLSI_SCAN_HW_ID].acs_request)
+ slsi_acs_scan_complete(sdev, ndev_vif, scan_id);
+ else
+ slsi_scan_complete(sdev, dev, scan_id, false);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->scan_mutex);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_channel_switched_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 freq = 0;
+ int width;
+ int primary_chan_pos;
+ u16 temp_chan_info;
+ struct cfg80211_chan_def chandef;
+ u16 cf1 = 0;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ temp_chan_info = fapi_get_u16(skb, u.mlme_channel_switched_ind.channel_information);
+ cf1 = fapi_get_u16(skb, u.mlme_channel_switched_ind.channel_frequency);
+ cf1 = cf1 / 2;
+
+ primary_chan_pos = (temp_chan_info >> 8);
+ width = (temp_chan_info & 0x00FF);
+
+ /* If width is 80MHz/40MHz then do frequency calculation, else store as it is */
+ if (width == 40)
+ freq = cf1 + (primary_chan_pos * 20) - 10;
+ else if (width == 80)
+ freq = cf1 + (primary_chan_pos * 20) - 30;
+ else
+ freq = cf1;
+
+ if (width == 20)
+ width = NL80211_CHAN_WIDTH_20;
+ else if (width == 40)
+ width = NL80211_CHAN_WIDTH_40;
+ else if (width == 80)
+ width = NL80211_CHAN_WIDTH_80;
+ else if (width == 160)
+ width = NL80211_CHAN_WIDTH_160;
+
+ chandef.chan = ieee80211_get_channel(sdev->wiphy, freq);
+ chandef.width = width;
+ chandef.center_freq1 = cf1;
+ chandef.center_freq2 = 0;
+
+ ndev_vif->ap.channel_freq = freq; /* updated for GETSTAINFO */
+ ndev_vif->chan = chandef.chan;
+
+ cfg80211_ch_switch_notify(dev, &chandef);
+ slsi_kfree_skb(skb);
+}
+
+void __slsi_rx_blockack_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "ma_blockack_ind(vif:%d, peer_qsta_address:%pM, parameter_set:%d, sequence_number:%d, reason_code:%d, direction:%d)\n",
+ fapi_get_vif(skb),
+ fapi_get_buff(skb, u.ma_blockack_ind.peer_qsta_address),
+ fapi_get_u16(skb, u.ma_blockack_ind.blockack_parameter_set),
+ fapi_get_u16(skb, u.ma_blockack_ind.sequence_number),
+ fapi_get_u16(skb, u.ma_blockack_ind.reason_code),
+ fapi_get_u16(skb, u.ma_blockack_ind.direction));
+
+ peer = slsi_get_peer_from_mac(sdev, dev, fapi_get_buff(skb, u.ma_blockack_ind.peer_qsta_address));
+ WARN_ON(!peer);
+
+ if (peer) {
+ /* Buffering of frames before the mlme_connected_ind */
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && (peer->connected_state == SLSI_STA_CONN_STATE_CONNECTING)) {
+ SLSI_DBG3(sdev, SLSI_MLME, "Buffering MA-BlockAck.Indication\n");
+ slsi_skb_queue_tail(&peer->buffered_frames, skb);
+ return;
+ }
+ slsi_handle_blockack(
+ dev,
+ peer,
+ fapi_get_vif(skb),
+ fapi_get_buff(skb, u.ma_blockack_ind.peer_qsta_address),
+ fapi_get_u16(skb, u.ma_blockack_ind.blockack_parameter_set),
+ fapi_get_u16(skb, u.ma_blockack_ind.sequence_number),
+ fapi_get_u16(skb, u.ma_blockack_ind.reason_code),
+ fapi_get_u16(skb, u.ma_blockack_ind.direction)
+ );
+ }
+
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_blockack_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ __slsi_rx_blockack_ind(sdev, dev, skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+static bool get_wmm_ie_from_resp_ie(struct slsi_dev *sdev, struct net_device *dev, u8 *resp_ie, size_t resp_ie_len, const u8 **wmm_elem, size_t *wmm_elem_len)
+{
+ struct ieee80211_vendor_ie *ie;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ if (!resp_ie) {
+ SLSI_NET_ERR(dev, "Received invalid pointer to the ie's of the association response\n");
+ return false;
+ }
+
+ *wmm_elem = resp_ie;
+ while (*wmm_elem && (*wmm_elem - resp_ie < resp_ie_len)) {
+ /* parse response ie elements and return the wmm ie */
+ *wmm_elem = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, *wmm_elem,
+ resp_ie_len - (*wmm_elem - resp_ie));
+ /* re-assoc-res can contain wmm parameter IE and wmm TSPEC IE.
+ * we want wmm parameter Element)
+ */
+ if (*wmm_elem && (*wmm_elem)[1] > 6 && (*wmm_elem)[6] == WMM_OUI_SUBTYPE_PARAMETER_ELEMENT)
+ break;
+ if (*wmm_elem)
+ *wmm_elem += (*wmm_elem)[1];
+ }
+
+ if (!(*wmm_elem)) {
+ SLSI_NET_DBG2(dev, SLSI_MLME, "No WMM IE\n");
+ return false;
+ }
+ ie = (struct ieee80211_vendor_ie *)*wmm_elem;
+ *wmm_elem_len = ie->len + 2;
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "WMM IE received and parsed successfully\n");
+ return true;
+}
+
+static bool sta_wmm_update_uapsd(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer, u8 *assoc_req_ie, size_t assoc_req_ie_len)
+{
+ const u8 *wmm_information_ie;
+
+ if (!assoc_req_ie) {
+ SLSI_NET_ERR(dev, "null reference to IE\n");
+ return false;
+ }
+
+ wmm_information_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, assoc_req_ie, assoc_req_ie_len);
+ if (!wmm_information_ie) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "no WMM IE\n");
+ return false;
+ }
+
+ peer->uapsd = wmm_information_ie[8];
+ SLSI_NET_DBG1(dev, SLSI_MLME, "peer->uapsd = 0x%x\n", peer->uapsd);
+ return true;
+}
+
+static bool sta_wmm_update_wmm_ac_ies(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer,
+ u8 *assoc_rsp_ie, size_t assoc_rsp_ie_len)
+{
+ size_t left;
+ const u8 *pos;
+ const u8 *wmm_elem = NULL;
+ size_t wmm_elem_len = 0;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_wmm_ac *wmm_ac = &ndev_vif->sta.wmm_ac[0];
+
+ if (!get_wmm_ie_from_resp_ie(sdev, dev, assoc_rsp_ie, assoc_rsp_ie_len, &wmm_elem, &wmm_elem_len)) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "No WMM IE received\n");
+ return false;
+ }
+
+ if (wmm_elem_len < 10 || wmm_elem[7] /* version */ != 1) {
+ SLSI_NET_WARN(dev, "Invalid WMM IE: wmm_elem_len=%lu, wmm_elem[7]=%d\n", (unsigned long int)wmm_elem_len, (int)wmm_elem[7]);
+ return false;
+ }
+
+ pos = wmm_elem + 10;
+ left = wmm_elem_len - 10;
+
+ for (; left >= 4; left -= 4, pos += 4) {
+ int aci = (pos[0] >> 5) & 0x03;
+ int acm = (pos[0] >> 4) & 0x01;
+
+ memcpy(wmm_ac, pos, sizeof(struct slsi_wmm_ac));
+
+ switch (aci) {
+ case 1: /* AC_BK */
+ if (acm)
+ peer->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
+ break;
+ case 2: /* AC_VI */
+ if (acm)
+ peer->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
+ break;
+ case 3: /* AC_VO */
+ if (acm)
+ peer->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
+ break;
+ case 0: /* AC_BE */
+ default:
+ if (acm)
+ peer->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
+ break;
+ }
+ wmm_ac++;
+ }
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "WMM ies have been updated successfully\n");
+ return true;
+}
+
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+enum slsi_wlan_vendor_attr_roam_auth {
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_INVALID = 0,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_KEY_REPLAY_CTR,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KCK,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_PTK_KEK,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_BEACON_IE,
+ /* keep last */
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST,
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_MAX =
+ SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_AFTER_LAST - 1
+};
+
+int slsi_send_roam_vendor_event(struct slsi_dev *sdev, const u8 *bssid,
+ const u8 *req_ie, u32 req_ie_len, const u8 *resp_ie, u32 resp_ie_len,
+ const u8 *beacon_ie, u32 beacon_ie_len, bool authorized)
+{
+ bool is_secured_bss;
+ struct sk_buff *skb = NULL;
+ u8 err = 0;
+
+ is_secured_bss = cfg80211_find_ie(WLAN_EID_RSN, req_ie, req_ie_len) ||
+ cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, req_ie, req_ie_len);
+
+ SLSI_DBG2(sdev, SLSI_MLME, "authorized:%d, is_secured_bss:%d\n", authorized, is_secured_bss);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NULL, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH, GFP_KERNEL);
+#else
+ skb = cfg80211_vendor_event_alloc(sdev->wiphy, NLMSG_DEFAULT_SIZE,
+ SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH, GFP_KERNEL);
+#endif
+ if (!skb) {
+ SLSI_ERR_NODEV("Failed to allocate skb for VENDOR Roam event\n");
+ return -ENOMEM;
+ }
+
+ err |= nla_put(skb, SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_BSSID, ETH_ALEN, bssid) ? BIT(1) : 0;
+ err |= nla_put(skb, SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_AUTHORIZED, 1, &authorized) ? BIT(2) : 0;
+ err |= (req_ie && nla_put(skb, SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_REQ_IE, req_ie_len, req_ie)) ? BIT(3) : 0;
+ err |= (resp_ie && nla_put(skb, SLSI_WLAN_VENDOR_ATTR_ROAM_AUTH_RESP_IE, resp_ie_len, resp_ie)) ? BIT(4) : 0;
+ err |= (beacon_ie && nla_put(skb, SLSI_WLAN_VENDOR_ATTR_ROAM_BEACON_IE, beacon_ie_len, beacon_ie)) ? BIT(5) : 0;
+ if (err) {
+ SLSI_ERR_NODEV("Failed nla_put ,req_ie_len=%d,resp_ie_len=%d,beacon_ie_len=%d,condition_failed=%d\n",
+ req_ie_len, resp_ie_len, beacon_ie_len, err);
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ SLSI_DBG3_NODEV(SLSI_MLME, "Event: KEY_MGMT_ROAM_AUTH(%d)\n", SLSI_NL80211_VENDOR_SUBCMD_KEY_MGMT_ROAM_AUTH);
+ cfg80211_vendor_event(skb, GFP_KERNEL);
+ return 0;
+}
+#endif /* offload */
+
+void slsi_rx_roamed_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ struct slsi_peer *peer;
+ u16 temporal_keys_required = fapi_get_u16(skb, u.mlme_roamed_ind.temporal_keys_required);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ enum ieee80211_privacy bss_privacy;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ struct cfg80211_roam_info roam_info = {};
+#endif
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_roamed_ind(vif:%d) Roaming to %pM\n",
+ fapi_get_vif(skb),
+ mgmt->bssid);
+
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer))
+ goto exit;
+
+ if (WARN_ON(!ndev_vif->sta.sta_bss))
+ goto exit;
+
+ slsi_rx_ba_stop_all(dev, peer);
+
+ SLSI_ETHER_COPY(peer->address, mgmt->bssid);
+
+ if (ndev_vif->sta.mlme_scan_ind_skb) {
+ /* saved skb [mlme_scan_ind] freed inside slsi_rx_scan_pass_to_cfg80211 */
+ slsi_rx_scan_pass_to_cfg80211(sdev, dev, ndev_vif->sta.mlme_scan_ind_skb);
+ ndev_vif->sta.mlme_scan_ind_skb = NULL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ if (ndev_vif->sta.sta_bss->capability & WLAN_CAPABILITY_PRIVACY)
+ bss_privacy = IEEE80211_PRIVACY_ON;
+ else
+ bss_privacy = IEEE80211_PRIVACY_OFF;
+#endif
+
+ slsi_cfg80211_put_bss(sdev->wiphy, ndev_vif->sta.sta_bss);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(sdev->wiphy, NULL, peer->address, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, bss_privacy);
+#else
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(sdev->wiphy, NULL, peer->address, NULL, 0, 0, 0);
+#endif
+
+ if (!ndev_vif->sta.sta_bss || !ndev_vif->sta.roam_mlme_procedure_started_ind) {
+ if (!ndev_vif->sta.sta_bss)
+ SLSI_INFO(sdev, "BSS not updated in cfg80211\n");
+ if (!ndev_vif->sta.roam_mlme_procedure_started_ind)
+ SLSI_INFO(sdev, "procedure-started-ind not received before roamed-ind\n");
+ netif_carrier_off(dev);
+ slsi_mlme_disconnect(sdev, dev, peer->address, 0, true);
+ slsi_handle_disconnect(sdev, dev, peer->address, 0);
+ } else {
+ u8 *assoc_ie = NULL;
+ int assoc_ie_len = 0;
+ u8 *assoc_rsp_ie = NULL;
+ int assoc_rsp_ie_len = 0;
+
+ slsi_peer_reset_stats(sdev, dev, peer);
+ slsi_peer_update_assoc_req(sdev, dev, peer, ndev_vif->sta.roam_mlme_procedure_started_ind);
+ slsi_peer_update_assoc_rsp(sdev, dev, peer, skb);
+
+ /* skb is consumed by slsi_peer_update_assoc_rsp. So do not access this anymore. */
+ skb = NULL;
+
+ if (peer->assoc_ie) {
+ assoc_ie = peer->assoc_ie->data;
+ assoc_ie_len = peer->assoc_ie->len;
+ }
+
+ if (peer->assoc_resp_ie) {
+ assoc_rsp_ie = peer->assoc_resp_ie->data;
+ assoc_rsp_ie_len = peer->assoc_resp_ie->len;
+ }
+
+ /* this is the right place to initialize the bitmasks for
+ * acm bit and tspec establishment
+ */
+ peer->wmm_acm = 0;
+ peer->tspec_established = 0;
+ peer->uapsd = 0;
+
+ /* update the uapsd bitmask according to the bit values
+ * in wmm information element of association request
+ */
+ if (!sta_wmm_update_uapsd(sdev, dev, peer, assoc_ie, assoc_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM uapsd\n");
+
+ /* update the acm bitmask according to the acm bit values that
+ * are included in wmm ie element of association response
+ */
+ if (!sta_wmm_update_wmm_ac_ies(sdev, dev, peer, assoc_rsp_ie, assoc_rsp_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM AC ies\n");
+
+ ndev_vif->sta.roam_mlme_procedure_started_ind = NULL;
+
+ if (temporal_keys_required) {
+ peer->pairwise_key_set = 0;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DOING_KEY_CONFIG);
+ }
+
+ WARN_ON(assoc_ie_len && !assoc_ie);
+ WARN_ON(assoc_rsp_ie_len && !assoc_rsp_ie);
+
+ SLSI_NET_DBG3(dev, SLSI_MLME, "cfg80211_roamed()\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ /* cfg80211 does not require bss pointer in roam_info.
+ * If bss pointer is given in roam_info, cfg80211 bss
+ * data base goes bad and results in random panic.
+ */
+ roam_info.channel = ndev_vif->sta.sta_bss->channel;
+ roam_info.bssid = peer->address;
+ roam_info.req_ie = assoc_ie;
+ roam_info.req_ie_len = assoc_ie_len;
+ roam_info.resp_ie = assoc_rsp_ie;
+ roam_info.resp_ie_len = assoc_rsp_ie_len;
+ cfg80211_roamed(dev, &roam_info, GFP_KERNEL);
+#else
+ cfg80211_roamed(dev,
+ ndev_vif->sta.sta_bss->channel,
+ peer->address,
+ assoc_ie,
+ assoc_ie_len,
+ assoc_rsp_ie,
+ assoc_rsp_ie_len,
+ GFP_KERNEL);
+#endif
+#ifdef CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+ if (slsi_send_roam_vendor_event(sdev, peer->address, assoc_ie, assoc_ie_len,
+ assoc_rsp_ie, assoc_rsp_ie_len,
+ ndev_vif->sta.sta_bss->ies->data, ndev_vif->sta.sta_bss->ies->len,
+ !temporal_keys_required) != 0) {
+ SLSI_NET_ERR(dev, "Could not send Roam vendor event up");
+ }
+#endif
+ SLSI_NET_DBG3(dev, SLSI_MLME, "cfg80211_roamed() Done\n");
+
+ ndev_vif->sta.roam_in_progress = false;
+ ndev_vif->chan = ndev_vif->sta.sta_bss->channel;
+#if !defined SLSI_TEST_DEV && defined CONFIG_ANDROID
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Taking a wakelock for DHCP to finish after roaming\n");
+ wake_lock_timeout(&sdev->wlan_wl_roam, msecs_to_jiffies(10 * 1000));
+#ifdef CONFIG_SCSC_WIFILOGGER
+ SCSC_WLOG_WAKELOCK(WLOG_NORMAL, WL_TAKEN, "wlan_wl_roam", WL_REASON_ROAM);
+#endif
+#endif
+
+ if (!temporal_keys_required) {
+ slsi_mlme_roamed_resp(sdev, dev);
+ cac_update_roam_traffic_params(sdev, dev);
+ } else {
+ ndev_vif->sta.resp_id = MLME_ROAMED_RES;
+ }
+ }
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_roam_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ enum ieee80211_statuscode status = WLAN_STATUS_SUCCESS;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_roam_ind(vif:%d, aid:0, result:0x%04x )\n",
+ fapi_get_vif(skb),
+ fapi_get_u16(skb, u.mlme_roam_ind.result_code));
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ if (WARN(ndev_vif->vif_type != FAPI_VIFTYPE_STATION, "Not a Station VIF\n"))
+ goto exit_with_lock;
+
+ if (fapi_get_u16(skb, u.mlme_roam_ind.result_code) != FAPI_RESULTCODE_HOST_REQUEST_SUCCESS)
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_tdls_event_discovered(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 tdls_event = fapi_get_u16(skb, u.mlme_tdls_peer_ind.tdls_event);
+ u16 peer_index = fapi_get_u16(skb, u.mlme_tdls_peer_ind.peer_index);
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ int len = fapi_get_mgmtlen(skb);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_INFO(sdev, "\n");
+
+ if (len != 0) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_rx_mgmt(&ndev_vif->wdev, ndev_vif->chan->center_freq, 0, (const u8 *)mgmt, len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(dev, ndev_vif->chan->center_freq, 0, (const u8 *)mgmt, len, GFP_ATOMIC);
+#endif
+ /* Handling MLME-TDLS-PEER.response */
+ slsi_mlme_tdls_peer_resp(sdev, dev, peer_index, tdls_event);
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_tdls_event_connected(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_peer *peer = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 peer_index = fapi_get_u16(skb, u.mlme_tdls_peer_ind.peer_index);
+ u16 tdls_event = fapi_get_u16(skb, u.mlme_tdls_peer_ind.tdls_event);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ ndev_vif->sta.tdls_enabled = true;
+
+ SLSI_INFO(sdev, "(vif:%d, peer_index:%d mac[%pM])\n",
+ fapi_get_vif(skb), peer_index, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ if (WARN(ndev_vif->vif_type != FAPI_VIFTYPE_STATION, "STA VIF"))
+ goto exit_with_lock;
+
+ /* Check for MAX client */
+ if ((ndev_vif->sta.tdls_peer_sta_records) + 1 > SLSI_TDLS_PEER_CONNECTIONS_MAX) {
+ SLSI_NET_ERR(dev, "MAX TDLS peer limit reached. Ignore ind for peer_index:%d\n", peer_index);
+ goto exit_with_lock;
+ }
+
+ if (peer_index < SLSI_TDLS_PEER_INDEX_MIN || peer_index > SLSI_TDLS_PEER_INDEX_MAX) {
+ SLSI_NET_ERR(dev, "Received incorrect peer_index: %d\n", peer_index);
+ goto exit_with_lock;
+ }
+
+ peer = slsi_peer_add(sdev, dev, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address), peer_index);
+
+ if (!peer) {
+ SLSI_NET_ERR(dev, "Peer NOT Created\n");
+ goto exit_with_lock;
+ }
+
+ /* QoS is mandatory for TDLS - enable QoS for TDLS peer by default */
+ peer->qos_enabled = true;
+
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+
+ /* Move TDLS packets from STA_Q to TDLS_Q */
+ slsi_tdls_move_packets(sdev, dev, ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET], peer, true);
+
+ /* Handling MLME-TDLS-PEER.response */
+ slsi_mlme_tdls_peer_resp(sdev, dev, peer_index, tdls_event);
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+static void slsi_tdls_event_disconnected(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_peer *peer = NULL;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 pid = fapi_get_u16(skb, u.mlme_tdls_peer_ind.peer_index);
+ u16 tdls_event = fapi_get_u16(skb, u.mlme_tdls_peer_ind.tdls_event);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (WARN_ON(!dev))
+ goto exit;
+
+ SLSI_INFO(sdev, "(vif:%d, MAC:%pM)\n", ndev_vif->ifnum,
+ fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit;
+ }
+
+ peer = slsi_get_peer_from_mac(sdev, dev, fapi_get_buff(skb, u.mlme_tdls_peer_ind.peer_sta_address));
+
+ if (!peer || (peer->aid == 0)) {
+ WARN_ON(!peer || (peer->aid == 0));
+ SLSI_NET_DBG1(dev, SLSI_MLME, "peer NOT found by MAC address\n");
+ goto exit;
+ }
+
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DISCONNECTED);
+
+ /* Move TDLS packets from TDLS_Q to STA_Q */
+ slsi_tdls_move_packets(sdev, dev, ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET], peer, false);
+
+ slsi_peer_remove(sdev, dev, peer);
+
+ slsi_mlme_tdls_peer_resp(sdev, dev, pid, tdls_event);
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+
+ slsi_kfree_skb(skb);
+}
+
+/* Handling for MLME-TDLS-PEER.indication
+ */
+void slsi_tdls_peer_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 tdls_event = fapi_get_u16(skb, u.mlme_tdls_peer_ind.tdls_event);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_tdls_peer_ind tdls_event: %d\n", tdls_event);
+
+ switch (tdls_event) {
+ case FAPI_TDLSEVENT_CONNECTED:
+ slsi_tdls_event_connected(sdev, dev, skb);
+ break;
+ case FAPI_TDLSEVENT_DISCONNECTED:
+ slsi_tdls_event_disconnected(sdev, dev, skb);
+ break;
+ case FAPI_TDLSEVENT_DISCOVERED:
+ slsi_tdls_event_discovered(sdev, dev, skb);
+ break;
+ default:
+ WARN_ON((tdls_event == 0) || (tdls_event > 4));
+ slsi_kfree_skb(skb);
+ break;
+ }
+}
+
+/* Retrieve any buffered frame before connected_ind and pass them up. */
+void slsi_rx_buffered_frames(struct slsi_dev *sdev, struct net_device *dev, struct slsi_peer *peer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *buff_frame = NULL;
+
+ WARN_ON(!SLSI_MUTEX_IS_LOCKED(ndev_vif->vif_mutex));
+ if (WARN(!peer, "Peer is NULL"))
+ return;
+ WARN(peer->connected_state == SLSI_STA_CONN_STATE_CONNECTING, "Wrong state");
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Processing buffered RX frames received before mlme_connected_ind for (vif:%d, aid:%d)\n",
+ ndev_vif->ifnum, peer->aid);
+ buff_frame = slsi_skb_dequeue(&peer->buffered_frames);
+ while (buff_frame) {
+ slsi_debug_frame(sdev, dev, buff_frame, "RX_BUFFERED");
+ switch (fapi_get_sigid(buff_frame)) {
+ case MA_BLOCKACK_IND:
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Transferring buffered MA_BLOCKACK_IND frame");
+ __slsi_rx_blockack_ind(sdev, dev, buff_frame);
+ break;
+ default:
+ SLSI_NET_WARN(dev, "Unexpected Data: 0x%.4x\n", fapi_get_sigid(buff_frame));
+ slsi_kfree_skb(buff_frame);
+ break;
+ }
+ buff_frame = slsi_skb_dequeue(&peer->buffered_frames);
+ }
+}
+
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+void slsi_rx_synchronised_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ const u8 *connecting_ssid = NULL;
+ struct cfg80211_external_auth_params auth_request;
+ int r;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Received slsi_rx_synchronised_ind\n");
+ if (ndev_vif->sta.sta_bss->ies->len)
+ connecting_ssid = cfg80211_find_ie(WLAN_EID_SSID, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+
+ auth_request.action = NL80211_EXTERNAL_AUTH_START;
+ memcpy(auth_request.bssid, ndev_vif->sta.sta_bss->bssid, ETH_ALEN);
+ if (connecting_ssid && (connecting_ssid[1] > 0)) {
+ memcpy(auth_request.ssid.ssid, &connecting_ssid[2], connecting_ssid[1]);
+ auth_request.ssid.ssid_len = connecting_ssid[1];
+ }
+ auth_request.key_mgmt_suite = ndev_vif->sta.crypto.akm_suites[0];
+ r = cfg80211_external_auth_request(dev, &auth_request, GFP_KERNEL);
+ if (r)
+ SLSI_NET_DBG1(dev, SLSI_MLME, "cfg80211_external_auth_request failed");
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+#endif
+
+void slsi_rx_connected_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ u16 aid = fapi_get_u16(skb, u.mlme_connected_ind.peer_index);
+
+ /* For AP mode, peer_index value is equivalent to aid(association_index) value */
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_connected_ind(vif:%d, peer_index:%d)\n",
+ fapi_get_vif(skb),
+ aid);
+ SLSI_INFO(sdev, "Received Association Response\n");
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ if (WARN(ndev_vif->vif_type == FAPI_VIFTYPE_STATION, "STA VIF and Not Roaming"))
+ goto exit_with_lock;
+
+ switch (ndev_vif->vif_type) {
+ case FAPI_VIFTYPE_AP:
+ {
+ if (aid < SLSI_PEER_INDEX_MIN || aid > SLSI_PEER_INDEX_MAX) {
+ SLSI_NET_ERR(dev, "Received incorrect peer_index: %d\n", aid);
+ goto exit_with_lock;
+ }
+
+ peer = slsi_get_peer_from_qs(sdev, dev, aid - 1);
+ if (!peer) {
+ SLSI_NET_ERR(dev, "Peer (aid:%d) Not Found - Disconnect peer\n", aid);
+ goto exit_with_lock;
+ }
+
+ cfg80211_new_sta(dev, peer->address, &peer->sinfo, GFP_KERNEL);
+
+ if (ndev_vif->ap.privacy) {
+ peer->connected_state = SLSI_STA_CONN_STATE_DOING_KEY_CONFIG;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DOING_KEY_CONFIG);
+ } else {
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTED;
+ slsi_mlme_connected_resp(sdev, dev, aid);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ }
+ slsi_rx_buffered_frames(sdev, dev, peer);
+ break;
+ }
+
+ default:
+ SLSI_NET_WARN(dev, "mlme_connected_ind(vif:%d, unexpected vif type:%d)\n", fapi_get_vif(skb), ndev_vif->vif_type);
+ break;
+ }
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_reassoc_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ enum ieee80211_statuscode status = WLAN_STATUS_SUCCESS;
+ struct slsi_peer *peer = NULL;
+ u8 *assoc_ie = NULL;
+ int assoc_ie_len = 0;
+ u8 *reassoc_rsp_ie = NULL;
+ int reassoc_rsp_ie_len = 0;
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_reassoc_ind(vif:%d, result:0x%04x)\n",
+ fapi_get_vif(skb),
+ fapi_get_u16(skb, u.mlme_reassociate_ind.result_code));
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ if (WARN(ndev_vif->vif_type != FAPI_VIFTYPE_STATION, "Not a Station VIF\n"))
+ goto exit_with_lock;
+
+ peer = slsi_get_peer_from_qs(sdev, dev, 0);
+ if (WARN_ON(!peer)) {
+ SLSI_NET_ERR(dev, "PEER Not found\n");
+ goto exit_with_lock;
+ }
+
+ if (fapi_get_u16(skb, u.mlme_reassociate_ind.result_code) != FAPI_RESULTCODE_SUCCESS) {
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ slsi_rx_ba_stop_all(dev, peer);
+ } else {
+ peer->pairwise_key_set = 0;
+
+ if (peer->assoc_ie) {
+ assoc_ie = peer->assoc_ie->data;
+ assoc_ie_len = peer->assoc_ie->len;
+ WARN_ON(assoc_ie_len && !assoc_ie);
+ }
+
+ slsi_peer_reset_stats(sdev, dev, peer);
+
+ peer->sinfo.assoc_req_ies = assoc_ie;
+ peer->sinfo.assoc_req_ies_len = assoc_ie_len;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+ peer->sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+#endif
+ slsi_peer_update_assoc_rsp(sdev, dev, peer, skb);
+ /* skb is consumed by slsi_peer_update_assoc_rsp. So do not access this anymore. */
+ skb = NULL;
+ if (peer->assoc_resp_ie) {
+ reassoc_rsp_ie = peer->assoc_resp_ie->data;
+ reassoc_rsp_ie_len = peer->assoc_resp_ie->len;
+ WARN_ON(reassoc_rsp_ie_len && !reassoc_rsp_ie);
+ }
+
+ /* update the uapsd bitmask according to the bit values
+ * in wmm information element of association request
+ */
+ if (!sta_wmm_update_uapsd(sdev, dev, peer, assoc_ie, assoc_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM uapsd\n");
+
+ /* update the acm bitmask according to the acm bit values that
+ * are included in wmm ie elements of re-association response
+ */
+ if (!sta_wmm_update_wmm_ac_ies(sdev, dev, peer, reassoc_rsp_ie, reassoc_rsp_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM AC ies\n");
+ }
+
+ /* cfg80211_connect_result will take a copy of any ASSOC or (RE)ASSOC RSP IEs passed to it */
+ cfg80211_connect_result(dev,
+ peer->address,
+ assoc_ie, assoc_ie_len,
+ reassoc_rsp_ie, reassoc_rsp_ie_len,
+ status,
+ GFP_KERNEL);
+
+ if (status == WLAN_STATUS_SUCCESS) {
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_CONNECTED;
+
+ /* For Open & WEP AP,send reassoc response.
+ * For secured AP, all this would be done after handshake
+ */
+ if ((peer->capabilities & WLAN_CAPABILITY_PRIVACY) &&
+ (cfg80211_find_ie(WLAN_EID_RSN, assoc_ie, assoc_ie_len) ||
+ cfg80211_find_ie(SLSI_WLAN_EID_WAPI, assoc_ie, assoc_ie_len) ||
+ cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, assoc_ie, assoc_ie_len))) {
+ /*secured AP*/
+ ndev_vif->sta.resp_id = MLME_REASSOCIATE_RES;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DOING_KEY_CONFIG);
+ peer->connected_state = SLSI_STA_CONN_STATE_DOING_KEY_CONFIG;
+ } else {
+ /*Open/WEP AP*/
+ slsi_mlme_reassociate_resp(sdev, dev);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTED;
+ }
+ } else {
+ netif_carrier_off(dev);
+ slsi_mlme_del_vif(sdev, dev);
+ slsi_vif_deactivated(sdev, dev);
+ }
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_connect_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ enum ieee80211_statuscode status = WLAN_STATUS_SUCCESS;
+ struct slsi_peer *peer = NULL;
+ u8 *assoc_ie = NULL;
+ int assoc_ie_len = 0;
+ u8 *assoc_rsp_ie = NULL;
+ int assoc_rsp_ie_len = 0;
+ u8 bssid[ETH_ALEN];
+ u16 fw_result_code;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ fw_result_code = fapi_get_u16(skb, u.mlme_connect_ind.result_code);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_connect_ind(vif:%d, result:0x%04x)\n",
+ fapi_get_vif(skb), fw_result_code);
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ if (WARN(ndev_vif->vif_type != FAPI_VIFTYPE_STATION, "Not a Station VIF\n"))
+ goto exit_with_lock;
+
+ if (ndev_vif->sta.vif_status != SLSI_VIF_STATUS_CONNECTING) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not connecting\n");
+ goto exit_with_lock;
+ }
+
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (peer) {
+ SLSI_ETHER_COPY(bssid, peer->address);
+ } else {
+ SLSI_NET_ERR(dev, "!!NO peer record for AP\n");
+ eth_zero_addr(bssid);
+ }
+ sdev->assoc_result_code = fw_result_code;
+ if (fw_result_code != FAPI_RESULTCODE_SUCCESS) {
+ if (fw_result_code == FAPI_RESULTCODE_AUTH_NO_ACK) {
+ SLSI_INFO(sdev, "Connect failed,Result code:AUTH_NO_ACK\n");
+ } else if (fw_result_code == FAPI_RESULTCODE_ASSOC_NO_ACK) {
+ SLSI_INFO(sdev, "Connect failed,Result code:ASSOC_NO_ACK\n");
+ } else if (fw_result_code >= 0x8100 && fw_result_code <= 0x81FF) {
+ fw_result_code = fw_result_code & 0x00FF;
+ SLSI_INFO(sdev, "Connect failed(Auth failure), Result code:0x%04x\n", fw_result_code);
+ } else if (fw_result_code >= 0x8200 && fw_result_code <= 0x82FF) {
+ fw_result_code = fw_result_code & 0x00FF;
+ SLSI_INFO(sdev, "Connect failed(Assoc Failure), Result code:0x%04x\n", fw_result_code);
+ } else {
+ SLSI_INFO(sdev, "Connect failed,Result code:0x%04x\n", fw_result_code);
+ }
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ /* Trigger log collection if fw result code is not success */
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_WLAN, SCSC_LOG_HOST_WLAN_REASON_CONNECT_ERR);
+#endif
+ status = fw_result_code;
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+ if (ndev_vif->sta.crypto.wpa_versions == 3) {
+ const u8 *connecting_ssid = NULL;
+ int r;
+ struct cfg80211_external_auth_params auth_request;
+
+ if (ndev_vif->sta.sta_bss->ies->len)
+ connecting_ssid = cfg80211_find_ie(WLAN_EID_SSID, ndev_vif->sta.sta_bss->ies->data,
+ ndev_vif->sta.sta_bss->ies->len);
+
+ auth_request.action = NL80211_EXTERNAL_AUTH_ABORT;
+ memcpy(auth_request.bssid, ndev_vif->sta.sta_bss->bssid, ETH_ALEN);
+ if (connecting_ssid && (connecting_ssid[1] > 0)) {
+ memcpy(auth_request.ssid.ssid, &connecting_ssid[2], connecting_ssid[1]);
+ auth_request.ssid.ssid_len = connecting_ssid[1];
+ }
+ auth_request.key_mgmt_suite = ndev_vif->sta.crypto.akm_suites[0];
+ r = cfg80211_external_auth_request(dev, &auth_request, GFP_KERNEL);
+ if (r)
+ SLSI_NET_DBG1(dev, SLSI_MLME, "cfg80211_external_auth_request Abort failed");
+ }
+#endif
+ } else {
+ SLSI_INFO(sdev, "Received Association Response\n");
+ if (!peer || !peer->assoc_ie) {
+ if (peer)
+ WARN(!peer->assoc_ie, "proc-started-ind not received before connect-ind");
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ } else {
+ if (peer->assoc_ie) {
+ assoc_ie = peer->assoc_ie->data;
+ assoc_ie_len = peer->assoc_ie->len;
+ }
+
+ slsi_peer_update_assoc_rsp(sdev, dev, peer, skb);
+ /* skb is consumed by slsi_peer_update_assoc_rsp. So do not access this anymore. */
+ skb = NULL;
+
+ if (peer->assoc_resp_ie) {
+ assoc_rsp_ie = peer->assoc_resp_ie->data;
+ assoc_rsp_ie_len = peer->assoc_resp_ie->len;
+ }
+
+ /* this is the right place to initialize the bitmasks for
+ * acm bit and tspec establishment
+ */
+ peer->wmm_acm = 0;
+ peer->tspec_established = 0;
+ peer->uapsd = 0;
+
+ /* update the uapsd bitmask according to the bit values
+ * in wmm information element of association request
+ */
+ if (!sta_wmm_update_uapsd(sdev, dev, peer, assoc_ie, assoc_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM uapsd\n");
+
+ /* update the wmm ac bitmasks according to the bit values that
+ * are included in wmm ie elements of association response
+ */
+ if (!sta_wmm_update_wmm_ac_ies(sdev, dev, peer, assoc_rsp_ie, assoc_rsp_ie_len))
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Fail to update WMM AC ies\n");
+
+ WARN_ON(!assoc_rsp_ie_len && !assoc_rsp_ie);
+ }
+
+ WARN(!ndev_vif->sta.mlme_scan_ind_skb, "mlme_scan.ind not received before connect-ind");
+
+ if (ndev_vif->sta.mlme_scan_ind_skb) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Sending scan indication to cfg80211, bssid: %pM\n", fapi_get_mgmt(ndev_vif->sta.mlme_scan_ind_skb)->bssid);
+
+ /* saved skb [mlme_scan_ind] freed inside slsi_rx_scan_pass_to_cfg80211 */
+ slsi_rx_scan_pass_to_cfg80211(sdev, dev, ndev_vif->sta.mlme_scan_ind_skb);
+ ndev_vif->sta.mlme_scan_ind_skb = NULL;
+ }
+
+ if (!ndev_vif->sta.sta_bss) {
+ if (peer)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(sdev->wiphy, NULL, peer->address, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+#else
+ ndev_vif->sta.sta_bss = cfg80211_get_bss(sdev->wiphy, NULL, peer->address, NULL, 0, 0, 0);
+#endif
+ if (!ndev_vif->sta.sta_bss) {
+ SLSI_NET_ERR(dev, "sta_bss is not available, terminating the connection (peer: %p)\n", peer);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ }
+ }
+ }
+
+ /* cfg80211_connect_result will take a copy of any ASSOC or ASSOC RSP IEs passed to it */
+ cfg80211_connect_result(dev,
+ bssid,
+ assoc_ie, assoc_ie_len,
+ assoc_rsp_ie, assoc_rsp_ie_len,
+ status,
+ GFP_KERNEL);
+
+ if (status == WLAN_STATUS_SUCCESS) {
+ ndev_vif->sta.vif_status = SLSI_VIF_STATUS_CONNECTED;
+
+ /* For Open & WEP AP,set the power mode (static IP scenario) ,send connect response and install the packet filters .
+ * For secured AP, all this would be done after handshake
+ */
+ if ((peer->capabilities & WLAN_CAPABILITY_PRIVACY) &&
+ (cfg80211_find_ie(WLAN_EID_RSN, assoc_ie, assoc_ie_len) ||
+ cfg80211_find_ie(SLSI_WLAN_EID_WAPI, assoc_ie, assoc_ie_len) ||
+ cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, assoc_ie, assoc_ie_len))) {
+ /*secured AP*/
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_DOING_KEY_CONFIG);
+ ndev_vif->sta.resp_id = MLME_CONNECT_RES;
+ } else {
+ /*Open/WEP AP*/
+ slsi_mlme_connect_resp(sdev, dev);
+ slsi_set_packet_filters(sdev, dev);
+
+ if (ndev_vif->ipaddress)
+ slsi_mlme_powermgt(sdev, dev, ndev_vif->set_power_mode);
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ }
+
+ /* For P2PCLI, set the Connection Timeout (beacon miss) mib to 10 seconds
+ * This MIB set failure does not cause any fatal isuue. It just varies the
+ * detection time of GO's absence from 10 sec to FW default. So Do not disconnect
+ */
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT)
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_GROUP_FORMED_CLI);
+
+ /*Update the firmware with cached channels*/
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ if (!sdev->device_config.roam_scan_mode && ndev_vif->vif_type == FAPI_VIFTYPE_STATION && ndev_vif->activated && ndev_vif->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+#else
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION && ndev_vif->activated && ndev_vif->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+#endif
+ const u8 *ssid = cfg80211_find_ie(WLAN_EID_SSID, assoc_ie, assoc_ie_len);
+ u8 channels[SLSI_ROAMING_CHANNELS_MAX];
+ u32 channels_count = slsi_roaming_scan_configure_channels(sdev, dev, ssid, channels);
+
+ if (channels_count)
+ if (slsi_mlme_set_cached_channels(sdev, dev, channels_count, channels) != 0)
+ SLSI_NET_ERR(dev, "MLME-SET-CACHED-CHANNELS.req failed\n");
+ }
+ } else {
+ /* Firmware reported connection success, but driver reported failure to cfg80211:
+ * send mlme-disconnect.req to firmware
+ */
+ if ((fw_result_code == FAPI_RESULTCODE_SUCCESS) && peer) {
+ slsi_mlme_disconnect(sdev, dev, peer->address, FAPI_REASONCODE_UNSPECIFIED_REASON, true);
+ slsi_handle_disconnect(sdev, dev, peer->address, FAPI_REASONCODE_UNSPECIFIED_REASON);
+ } else {
+ slsi_handle_disconnect(sdev, dev, NULL, FAPI_REASONCODE_UNSPECIFIED_REASON);
+ }
+ }
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_disconnect_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_disconnect_ind(vif:%d, MAC:%pM)\n",
+ fapi_get_vif(skb),
+ fapi_get_buff(skb, u.mlme_disconnect_ind.peer_sta_address));
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_WLAN, SCSC_LOG_HOST_WLAN_REASON_DISCONNECT_IND);
+#else
+ mx140_log_dump();
+#endif
+
+ SLSI_INFO(sdev, "Received DEAUTH, reason = 0\n");
+ slsi_handle_disconnect(sdev,
+ dev,
+ fapi_get_buff(skb, u.mlme_disconnect_ind.peer_sta_address),
+ 0);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_disconnected_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 reason;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ reason = fapi_get_u16(skb, u.mlme_disconnected_ind.reason_code);
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_disconnected_ind(vif:%d, reason:%d, MAC:%pM)\n",
+ fapi_get_vif(skb),
+ fapi_get_u16(skb, u.mlme_disconnected_ind.reason_code),
+ fapi_get_buff(skb, u.mlme_disconnected_ind.peer_sta_address));
+
+#ifdef CONFIG_SCSC_LOG_COLLECTION
+ scsc_log_collector_schedule_collection(SCSC_LOG_HOST_WLAN, SCSC_LOG_HOST_WLAN_REASON_DISCONNECTED_IND);
+#else
+ mx140_log_dump();
+#endif
+ if (reason >= 0 && reason <= 0xFF) {
+ SLSI_INFO(sdev, "Received DEAUTH, reason = %d\n", reason);
+ } else if (reason >= 0x8100 && reason <= 0x81FF) {
+ reason = reason & 0x00FF;
+ SLSI_INFO(sdev, "Received DEAUTH, reason = %d\n", reason);
+ } else if (reason >= 0x8200 && reason <= 0x82FF) {
+ reason = reason & 0x00FF;
+ SLSI_INFO(sdev, "Received DISASSOC, reason = %d\n", reason);
+ } else {
+ SLSI_INFO(sdev, "Received DEAUTH, reason = Local Disconnect <%d>\n", reason);
+ }
+
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ if (fapi_get_u16(skb, u.mlme_disconnected_ind.reason_code) ==
+ FAPI_REASONCODE_HOTSPOT_MAX_CLIENT_REACHED) {
+ SLSI_NET_DBG1(dev, SLSI_MLME,
+ "Sending max hotspot client reached notification to user space\n");
+ cfg80211_conn_failed(dev, fapi_get_buff(skb, u.mlme_disconnected_ind.peer_sta_address),
+ NL80211_CONN_FAIL_MAX_CLIENTS, GFP_KERNEL);
+ goto exit;
+ }
+ }
+
+ slsi_handle_disconnect(sdev,
+ dev,
+ fapi_get_buff(skb, u.mlme_disconnected_ind.peer_sta_address),
+ fapi_get_u16(skb, u.mlme_disconnected_ind.reason_code));
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+ return;
+}
+
+/* Handle Procedure Started (Type = Device Discovered) indication for P2P */
+static void slsi_rx_p2p_device_discovered_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ int mgmt_len;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Freq = %d\n", ndev_vif->chan->center_freq);
+
+ /* Only Probe Request is expected as of now */
+ mgmt_len = fapi_get_mgmtlen(skb);
+ if (mgmt_len) {
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+
+ if (ieee80211_is_mgmt(mgmt->frame_control)) {
+ if (ieee80211_is_probe_req(mgmt->frame_control)) {
+ SLSI_NET_DBG3(dev, SLSI_CFG80211, "Received Probe Request\n");
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_rx_mgmt(&ndev_vif->wdev, ndev_vif->chan->center_freq, 0, (const u8 *)mgmt, mgmt_len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(dev, ndev_vif->chan->center_freq, 0, (const u8 *)mgmt, mgmt_len, GFP_ATOMIC);
+#endif
+ } else
+ SLSI_NET_ERR(dev, "Ignore Indication - Not Probe Request frame\n");
+ } else {
+ SLSI_NET_ERR(dev, "Ignore Indication - Not Management frame\n");
+ }
+ }
+}
+
+void slsi_rx_procedure_started_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_procedure_started_ind(vif:%d, type:%d, peer_index:%d)\n",
+ fapi_get_vif(skb),
+ fapi_get_u16(skb, u.mlme_procedure_started_ind.procedure_type),
+ fapi_get_u16(skb, u.mlme_procedure_started_ind.peer_index));
+ SLSI_INFO(sdev, "Send Association Request\n");
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "VIF not activated\n");
+ goto exit_with_lock;
+ }
+
+ switch (fapi_get_u16(skb, u.mlme_procedure_started_ind.procedure_type)) {
+ case FAPI_PROCEDURETYPE_CONNECTION_STARTED:
+ switch (ndev_vif->vif_type) {
+ case FAPI_VIFTYPE_AP:
+ {
+ u16 peer_index = fapi_get_u16(skb, u.mlme_procedure_started_ind.peer_index);
+
+ /* Check for MAX client */
+ if ((ndev_vif->peer_sta_records + 1) > SLSI_AP_PEER_CONNECTIONS_MAX) {
+ SLSI_NET_ERR(dev, "MAX Station limit reached. Ignore ind for peer_index:%d\n", peer_index);
+ goto exit_with_lock;
+ }
+
+ if (peer_index < SLSI_PEER_INDEX_MIN || peer_index > SLSI_PEER_INDEX_MAX) {
+ SLSI_NET_ERR(dev, "Received incorrect peer_index: %d\n", peer_index);
+ goto exit_with_lock;
+ }
+
+ peer = slsi_peer_add(sdev, dev, (fapi_get_mgmt(skb))->sa, peer_index);
+ if (!peer) {
+ SLSI_NET_ERR(dev, "Peer NOT Created\n");
+ goto exit_with_lock;
+ }
+ slsi_peer_update_assoc_req(sdev, dev, peer, skb);
+ /* skb is consumed by slsi_peer_update_assoc_req. So do not access this anymore. */
+ skb = NULL;
+ peer->connected_state = SLSI_STA_CONN_STATE_CONNECTING;
+
+ if ((ndev_vif->iftype == NL80211_IFTYPE_P2P_GO) &&
+ (peer->assoc_ie) &&
+ (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPS, peer->assoc_ie->data, peer->assoc_ie->len))) {
+ SLSI_NET_DBG2(dev, SLSI_MLME, "WPS IE is present. Setting peer->is_wps to TRUE\n");
+ peer->is_wps = true;
+ }
+
+ /* Take a wakelock to avoid platform suspend before EAPOL exchanges (to avoid connection delay) */
+ slsi_wakelock_timeout(&sdev->wlan_wl_mlme, SLSI_WAKELOCK_TIME_MSEC_EAPOL);
+ break;
+ }
+ case FAPI_VIFTYPE_STATION:
+ {
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer)) {
+ SLSI_NET_ERR(dev, "Peer NOT FOUND\n");
+ goto exit_with_lock;
+ }
+ slsi_peer_update_assoc_req(sdev, dev, peer, skb);
+ /* skb is consumed by slsi_peer_update_assoc_rsp. So do not access this anymore. */
+ skb = NULL;
+ break;
+ }
+ default:
+ SLSI_NET_ERR(dev, "Incorrect vif type for proceduretype_connection_started\n");
+ break;
+ }
+ break;
+ case FAPI_PROCEDURETYPE_DEVICE_DISCOVERED:
+ /* Expected only in P2P Device and P2P GO role */
+ if (WARN_ON(!SLSI_IS_VIF_INDEX_P2P(ndev_vif) && (ndev_vif->iftype != NL80211_IFTYPE_P2P_GO)))
+ goto exit_with_lock;
+
+ /* Send probe request to supplicant only if in listening state. Issues were seen earlier if
+ * Probe request was sent to supplicant while waiting for GO Neg Req from peer.
+ * Send Probe request to supplicant if received in GO mode
+ */
+ if ((sdev->p2p_state == P2P_LISTENING) || (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO))
+ slsi_rx_p2p_device_discovered_ind(sdev, dev, skb);
+ break;
+ case FAPI_PROCEDURETYPE_ROAMING_STARTED:
+ {
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Roaming Procedure Starting with %pM\n", (fapi_get_mgmt(skb))->bssid);
+ if (WARN_ON(ndev_vif->vif_type != FAPI_VIFTYPE_STATION))
+ goto exit_with_lock;
+ if (WARN_ON(!ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET] || !ndev_vif->peer_sta_record[SLSI_STA_PEER_QUEUESET]->valid))
+ goto exit_with_lock;
+ slsi_kfree_skb(ndev_vif->sta.roam_mlme_procedure_started_ind);
+ ndev_vif->sta.roam_mlme_procedure_started_ind = skb;
+ /* skb is consumed here. So remove reference to this.*/
+ skb = NULL;
+ break;
+ }
+ default:
+ SLSI_NET_DBG1(dev, SLSI_MLME, "Unknown Procedure: %d\n", fapi_get_u16(skb, u.mlme_procedure_started_ind.procedure_type));
+ goto exit_with_lock;
+ }
+
+exit_with_lock:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_frame_transmission_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ u16 host_tag = fapi_get_u16(skb, u.mlme_frame_transmission_ind.host_tag);
+ u16 tx_status = fapi_get_u16(skb, u.mlme_frame_transmission_ind.transmission_status);
+ bool ack = true;
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_frame_transmission_ind(vif:%d, host_tag:%d, transmission_status:%d)\n", fapi_get_vif(skb),
+ host_tag,
+ tx_status);
+
+ if (ndev_vif->mgmt_tx_data.host_tag == host_tag) {
+ struct netdev_vif *ndev_vif_to_cfg = ndev_vif;
+
+ /* If frame tx failed allow del_vif work to take care of vif deletion.
+ * This work would be queued as part of frame_tx with the wait duration
+ */
+ if (tx_status != FAPI_TRANSMISSIONSTATUS_SUCCESSFUL) {
+ ack = false;
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ if (sdev->wlan_unsync_vif_state == WLAN_UNSYNC_VIF_TX)
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_ACTIVE; /*We wouldn't delete VIF*/
+ } else {
+ if (sdev->p2p_group_exp_frame != SLSI_P2P_PA_INVALID)
+ slsi_clear_offchannel_data(sdev, false);
+ else if (ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID)
+ (void)slsi_mlme_reset_dwell_time(sdev, dev);
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ }
+ }
+
+ /* Change state if frame tx was in Listen as peer response is not expected */
+ if (SLSI_IS_VIF_INDEX_P2P(ndev_vif) && (ndev_vif->mgmt_tx_data.exp_frame == SLSI_P2P_PA_INVALID)) {
+ if (delayed_work_pending(&ndev_vif->unsync.roc_expiry_work))
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_LISTENING);
+ else
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+ } else if (SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif)) {
+ const struct ieee80211_mgmt *mgmt = (const struct ieee80211_mgmt *)ndev_vif->mgmt_tx_data.buf;
+
+ /* If frame transmission was initiated on P2P device vif by supplicant, then use the net_dev of that vif (i.e. p2p0) */
+ if ((mgmt) && (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)) {
+ struct net_device *ndev = slsi_get_netdev(sdev, SLSI_NET_INDEX_P2P);
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Frame Tx was requested with device address - Change ndev_vif for tx_status\n");
+
+ ndev_vif_to_cfg = netdev_priv(ndev);
+ if (!ndev_vif_to_cfg) {
+ SLSI_NET_ERR(dev, "Getting P2P Index netdev failed\n");
+ ndev_vif_to_cfg = ndev_vif;
+ }
+ }
+ }
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ if (!sdev->device_config.wes_mode) {
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ cfg80211_mgmt_tx_status(&ndev_vif_to_cfg->wdev, ndev_vif->mgmt_tx_data.cookie, ndev_vif->mgmt_tx_data.buf, ndev_vif->mgmt_tx_data.buf_len, ack, GFP_KERNEL);
+#else
+ cfg80211_mgmt_tx_status(ndev_vif_to_cfg->wdev.netdev, ndev_vif->mgmt_tx_data.cookie, ndev_vif->mgmt_tx_data.buf, ndev_vif->mgmt_tx_data.buf_len, ack, GFP_KERNEL);
+#endif
+
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ }
+#endif
+ (void)slsi_set_mgmt_tx_data(ndev_vif, 0, 0, NULL, 0);
+ }
+
+ if ((tx_status == FAPI_TRANSMISSIONSTATUS_SUCCESSFUL) || (tx_status == FAPI_TRANSMISSIONSTATUS_RETRY_LIMIT)) {
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (ndev_vif->enhanced_arp_detect_enabled && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION)) {
+ int i = 0;
+
+ for (i = 0; i < SLSI_MAX_ARP_SEND_FRAME; i++) {
+ if (ndev_vif->enhanced_arp_host_tag[i] == host_tag) {
+ ndev_vif->enhanced_arp_host_tag[i] = 0;
+ ndev_vif->enhanced_arp_stats.arp_req_rx_count_by_lower_mac++;
+ if (tx_status == FAPI_TRANSMISSIONSTATUS_SUCCESSFUL)
+ ndev_vif->enhanced_arp_stats.arp_req_count_tx_success++;
+ break;
+ }
+ }
+ }
+#endif
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.m4_host_tag == host_tag)) {
+ switch (ndev_vif->sta.resp_id) {
+ case MLME_ROAMED_RES:
+ slsi_mlme_roamed_resp(sdev, dev);
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer))
+ break;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ cac_update_roam_traffic_params(sdev, dev);
+ break;
+ case MLME_CONNECT_RES:
+ slsi_mlme_connect_resp(sdev, dev);
+ slsi_set_packet_filters(sdev, dev);
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (WARN_ON(!peer))
+ break;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ break;
+ case MLME_REASSOCIATE_RES:
+ slsi_mlme_reassociate_resp(sdev, dev);
+ break;
+ default:
+ break;
+ }
+ ndev_vif->sta.m4_host_tag = 0;
+ ndev_vif->sta.resp_id = 0;
+ }
+ if (tx_status == FAPI_TRANSMISSIONSTATUS_RETRY_LIMIT) {
+ if ((ndev_vif->iftype == NL80211_IFTYPE_STATION) &&
+ (ndev_vif->sta.eap_hosttag == host_tag)) {
+ if (ndev_vif->sta.sta_bss) {
+ SLSI_NET_WARN(dev, "Disconnect as EAP frame transmission failed\n");
+ slsi_mlme_disconnect(sdev, dev, ndev_vif->sta.sta_bss->bssid, FAPI_REASONCODE_UNSPECIFIED_REASON, false);
+ } else {
+ SLSI_NET_WARN(dev, "EAP frame transmission failed, sta_bss not available\n");
+ }
+ }
+ ndev_vif->stats.tx_errors++;
+ }
+ } else {
+ ndev_vif->stats.tx_errors++;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_received_frame_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 data_unit_descriptor = fapi_get_u16(skb, u.mlme_received_frame_ind.data_unit_descriptor);
+ u16 frequency = SLSI_FREQ_FW_TO_HOST(fapi_get_u16(skb, u.mlme_received_frame_ind.channel_frequency));
+ u8 *eapol = NULL;
+ u8 *eap = NULL;
+ u16 protocol = 0;
+ u32 dhcp_message_type = SLSI_DHCP_MESSAGE_TYPE_INVALID;
+ u16 eap_length = 0;
+
+ SLSI_NET_DBG2(dev, SLSI_MLME, "mlme_received_frame_ind(vif:%d, data descriptor:%d, freq:%d)\n",
+ fapi_get_vif(skb),
+ data_unit_descriptor,
+ frequency);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (data_unit_descriptor == FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME) {
+ struct ieee80211_mgmt *mgmt;
+ int mgmt_len;
+
+ mgmt_len = fapi_get_mgmtlen(skb);
+ if (!mgmt_len)
+ goto exit;
+ mgmt = fapi_get_mgmt(skb);
+ if (ieee80211_is_auth(mgmt->frame_control)) {
+ cfg80211_rx_mgmt(&ndev_vif->wdev, frequency, 0, (const u8 *)mgmt, mgmt_len, GFP_ATOMIC);
+ goto exit;
+ }
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ if (ndev_vif->is_wips_running && ieee80211_is_beacon(mgmt->frame_control) &&
+ SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+ slsi_handle_wips_beacon(sdev, dev, skb, mgmt, mgmt_len);
+ goto exit;
+ }
+#endif
+ if (WARN_ON(!(ieee80211_is_action(mgmt->frame_control))))
+ goto exit;
+ if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ if (slsi_is_wes_action_frame(mgmt)) {
+ SLSI_NET_DBG1(dev, SLSI_CFG80211, "Received NCHO WES VS action frame\n");
+ if (!sdev->device_config.wes_mode)
+ goto exit;
+ } else {
+#endif
+ if (mgmt->u.action.category == WLAN_CATEGORY_WMM) {
+ cac_rx_wmm_action(sdev, dev, mgmt, mgmt_len);
+ } else {
+ slsi_wlan_dump_public_action_subtype(sdev, mgmt, false);
+ if (sdev->wlan_unsync_vif_state == WLAN_UNSYNC_VIF_TX)
+ sdev->wlan_unsync_vif_state = WLAN_UNSYNC_VIF_ACTIVE;
+ }
+#ifdef CONFIG_SCSC_WLAN_WES_NCHO
+ }
+#endif
+ } else {
+ int subtype = slsi_p2p_get_public_action_subtype(mgmt);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Received action frame (%s)\n", slsi_p2p_pa_subtype_text(subtype));
+
+ if (SLSI_IS_P2P_UNSYNC_VIF(ndev_vif) && (ndev_vif->mgmt_tx_data.exp_frame != SLSI_P2P_PA_INVALID) && (subtype == ndev_vif->mgmt_tx_data.exp_frame)) {
+ if (sdev->p2p_state == P2P_LISTENING)
+ SLSI_NET_WARN(dev, "Driver in incorrect P2P state (P2P_LISTENING)");
+
+ cancel_delayed_work(&ndev_vif->unsync.del_vif_work);
+ /* Sending down the Unset channel is delayed when listen work expires in
+ * middle of P2P procedure. For example,When Listen Work expires after
+ * sending provision discovery req,unset channel is not sent to FW.
+ * After Receiving the PROV_DISC_RESP, if listen work is not present
+ * Unset channel to be sent down.Similarly During P2P Negotiation procedure,
+ * Unset channel is not sent to FW.Once Negotiation is complete,
+ * if listen work is not present Unset channel to be sent down.
+ */
+ if ((subtype == SLSI_P2P_PA_GO_NEG_CFM) || (subtype == SLSI_P2P_PA_PROV_DISC_RSP)) {
+ ndev_vif->drv_in_p2p_procedure = false;
+ if (!delayed_work_pending(&ndev_vif->unsync.roc_expiry_work)) {
+ slsi_mlme_unset_channel_req(ndev_vif->sdev, ndev_vif->wdev.netdev);
+ ndev_vif->driver_channel = 0;
+ }
+ }
+
+ ndev_vif->mgmt_tx_data.exp_frame = SLSI_P2P_PA_INVALID;
+ (void)slsi_mlme_reset_dwell_time(sdev, dev);
+ if (delayed_work_pending(&ndev_vif->unsync.roc_expiry_work)) {
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_LISTENING);
+ } else {
+ queue_delayed_work(sdev->device_wq, &ndev_vif->unsync.del_vif_work,
+ msecs_to_jiffies(SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC));
+ SLSI_P2P_STATE_CHANGE(sdev, P2P_IDLE_VIF_ACTIVE);
+ }
+ } else if ((sdev->p2p_group_exp_frame != SLSI_P2P_PA_INVALID) && (sdev->p2p_group_exp_frame == subtype)) {
+ SLSI_NET_DBG2(dev, SLSI_MLME, "Expected action frame (%s) received on Group VIF\n", slsi_p2p_pa_subtype_text(subtype));
+ slsi_clear_offchannel_data(sdev,
+ (!SLSI_IS_VIF_INDEX_P2P_GROUP(sdev,
+ ndev_vif)) ? true : false);
+ }
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_rx_mgmt(&ndev_vif->wdev, frequency, 0, (const u8 *)mgmt, mgmt_len, GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(dev, frequency, 0, (const u8 *)mgmt, mgmt_len, GFP_ATOMIC);
+#endif
+ } else if (data_unit_descriptor == FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME) {
+ struct slsi_peer *peer = NULL;
+ struct ethhdr *ehdr = (struct ethhdr *)fapi_get_data(skb);
+
+ peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_source);
+ if (!peer) {
+ SLSI_DBG1(sdev, SLSI_RX, "drop packet as No peer found\n");
+ goto exit;
+ }
+
+ /* strip signal and any signal/bulk roundings/offsets */
+ skb_pull(skb, fapi_get_siglen(skb));
+
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ ndev_vif->stats.rx_packets++;
+ ndev_vif->stats.rx_bytes += skb->len;
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 10, 0))
+ dev->last_rx = jiffies;
+#endif
+ /* Storing Data for Logging Information */
+ if ((skb->len - sizeof(struct ethhdr)) >= 99)
+ eapol = skb->data + sizeof(struct ethhdr);
+
+ if ((skb->len - sizeof(struct ethhdr)) >= 9) {
+ eap_length = (skb->len - sizeof(struct ethhdr)) - 4;
+ eap = skb->data + sizeof(struct ethhdr);
+ }
+ if (skb->len >= 285 && slsi_is_dhcp_packet(skb->data) != SLSI_TX_IS_NOT_DHCP)
+ dhcp_message_type = skb->data[284];
+
+ skb->protocol = eth_type_trans(skb, dev);
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_PAE) {
+ if (eapol && eapol[SLSI_EAPOL_IEEE8021X_TYPE_POS] == SLSI_IEEE8021X_TYPE_EAPOL_KEY) {
+ if ((eapol[SLSI_EAPOL_TYPE_POS] == SLSI_EAPOL_TYPE_RSN_KEY ||
+ eapol[SLSI_EAPOL_TYPE_POS] == SLSI_EAPOL_TYPE_WPA_KEY) &&
+ (eapol[SLSI_EAPOL_KEY_INFO_LOWER_BYTE_POS] &
+ SLSI_EAPOL_KEY_INFO_KEY_TYPE_BIT_IN_LOWER_BYTE) &&
+ (eapol[SLSI_EAPOL_KEY_INFO_HIGHER_BYTE_POS] &
+ SLSI_EAPOL_KEY_INFO_MIC_BIT_IN_HIGHER_BYTE) &&
+ (eapol[SLSI_EAPOL_KEY_DATA_LENGTH_HIGHER_BYTE_POS] == 0) &&
+ (eapol[SLSI_EAPOL_KEY_DATA_LENGTH_LOWER_BYTE_POS] == 0)) {
+ SLSI_INFO(sdev, "Received 4way-H/S, M4\n");
+ } else if (!(eapol[SLSI_EAPOL_KEY_INFO_HIGHER_BYTE_POS] &
+ SLSI_EAPOL_KEY_INFO_MIC_BIT_IN_HIGHER_BYTE)) {
+ SLSI_INFO(sdev, "Received 4way-H/S, M1\n");
+ } else if (eapol[SLSI_EAPOL_KEY_INFO_HIGHER_BYTE_POS] &
+ SLSI_EAPOL_KEY_INFO_SECURE_BIT_IN_HIGHER_BYTE) {
+ SLSI_INFO(sdev, "Received 4way-H/S, M3\n");
+ } else {
+ SLSI_INFO(sdev, "Received 4way-H/S, M2\n");
+ }
+ } else if (eap && eap[SLSI_EAPOL_IEEE8021X_TYPE_POS] == SLSI_IEEE8021X_TYPE_EAP_PACKET) {
+ if (eap[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_REQUEST)
+ SLSI_INFO(sdev, "Received EAP-Request (%d)\n", eap_length);
+ else if (eap[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_RESPONSE)
+ SLSI_INFO(sdev, "Received EAP-Response (%d)\n", eap_length);
+ else if (eap[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_SUCCESS)
+ SLSI_INFO(sdev, "Received EAP-Success (%d)\n", eap_length);
+ else if (eap[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_FAILURE)
+ SLSI_INFO(sdev, "Received EAP-Failure (%d)\n", eap_length);
+ }
+ } else if (protocol == ETH_P_IP) {
+ if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_DISCOVER)
+ SLSI_INFO(sdev, "Received DHCP [DISCOVER]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_OFFER)
+ SLSI_INFO(sdev, "Received DHCP [OFFER]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_REQUEST)
+ SLSI_INFO(sdev, "Received DHCP [REQUEST]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_DECLINE)
+ SLSI_INFO(sdev, "Received DHCP [DECLINE]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_ACK)
+ SLSI_INFO(sdev, "Received DHCP [ACK]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_NAK)
+ SLSI_INFO(sdev, "Received DHCP [NAK]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_RELEASE)
+ SLSI_INFO(sdev, "Received DHCP [RELEASE]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_INFORM)
+ SLSI_INFO(sdev, "Received DHCP [INFORM]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_FORCERENEW)
+ SLSI_INFO(sdev, "Received DHCP [FORCERENEW]\n");
+ else
+ SLSI_INFO(sdev, "Received DHCP [INVALID]\n");
+ }
+ slsi_dbg_untrack_skb(skb);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ SLSI_DBG2(sdev, SLSI_MLME, "pass %u bytes up (proto:%d)\n", skb->len, ntohs(skb->protocol));
+ netif_rx_ni(skb);
+ slsi_wakelock_timeout(&sdev->wlan_wl_mlme, SLSI_WAKELOCK_TIME_MSEC_EAPOL);
+ return;
+ }
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_mic_failure_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u8 *mac_addr;
+ u16 key_type, key_id;
+ enum nl80211_key_type nl_key_type;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ mac_addr = fapi_get_buff(skb, u.mlme_disconnected_ind.peer_sta_address);
+ key_type = fapi_get_u16(skb, u.mlme_mic_failure_ind.key_type);
+ key_id = fapi_get_u16(skb, u.mlme_mic_failure_ind.key_id);
+
+ SLSI_NET_DBG1(dev, SLSI_MLME, "mlme_mic_failure_ind(vif:%d, MAC:%pM, key_type:%d, key_id:%d)\n",
+ fapi_get_vif(skb), mac_addr, key_type, key_id);
+
+ if (WARN_ON((key_type != FAPI_KEYTYPE_GROUP) && (key_type != FAPI_KEYTYPE_PAIRWISE)))
+ goto exit;
+
+ nl_key_type = (key_type == FAPI_KEYTYPE_GROUP) ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE;
+
+ cfg80211_michael_mic_failure(dev, mac_addr, nl_key_type, key_id, NULL, GFP_KERNEL);
+
+exit:
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+/**
+ * Handler for mlme_listen_end_ind.
+ * The listen_end_ind would be received when the total Listen Offloading time is over.
+ * Indicate completion of Listen Offloading to supplicant by sending Cancel-ROC event
+ * with cookie 0xffff. Queue delayed work for unsync vif deletion.
+ */
+void slsi_rx_listen_end_ind(struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG2(dev, SLSI_CFG80211, "Inform completion of P2P Listen Offloading\n");
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ cfg80211_remain_on_channel_expired(&ndev_vif->wdev, 0xffff, ndev_vif->chan, GFP_KERNEL);
+#else
+ cfg80211_remain_on_channel_expired(ndev_vif->wdev.netdev, 0xffff, ndev_vif->chan, ndev_vif->channel_type, GFP_KERNEL);
+#endif
+
+ ndev_vif->unsync.listen_offload = false;
+
+ slsi_p2p_queue_unsync_vif_del_work(ndev_vif, SLSI_P2P_UNSYNC_VIF_EXTRA_MSEC);
+
+ SLSI_P2P_STATE_CHANGE(ndev_vif->sdev, P2P_IDLE_VIF_ACTIVE);
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ slsi_kfree_skb(skb);
+}
+
+static int slsi_rx_wait_ind_match(u16 recv_id, u16 wait_id)
+{
+ if (recv_id == wait_id)
+ return 1;
+ if (wait_id == MLME_DISCONNECT_IND && recv_id == MLME_DISCONNECTED_IND)
+ return 1;
+ return 0;
+}
+
+int slsi_rx_blocking_signals(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ u16 pid, id;
+ struct slsi_sig_send *sig_wait;
+ u16 vif = fapi_get_vif(skb);
+
+ sig_wait = &sdev->sig_wait;
+ id = fapi_get_sigid(skb);
+ pid = fapi_get_u16(skb, receiver_pid);
+
+ /* ALL mlme cfm signals MUST have blocking call waiting for it (Per Vif or Global) */
+ if (fapi_is_cfm(skb)) {
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+ sig_wait = &ndev_vif->sig_wait;
+ }
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ if (id == sig_wait->cfm_id && pid == sig_wait->process_id) {
+ if (WARN_ON(sig_wait->cfm))
+ slsi_kfree_skb(sig_wait->cfm);
+ sig_wait->cfm = skb;
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ complete(&sig_wait->completion);
+ rcu_read_unlock();
+ return 0;
+ }
+ /**
+ * Important data frames such as EAPOL, ARP, DHCP are send
+ * over MLME. For these frames driver does not block on confirms.
+ * So there can be unexpected confirms here for such data frames.
+ * These confirms are treated as normal and is silently dropped
+ * here
+ */
+ if (id == MLME_SEND_FRAME_CFM) {
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ rcu_read_unlock();
+ slsi_kfree_skb(skb);
+ return 0;
+ }
+
+ SLSI_DBG1(sdev, SLSI_MLME, "Unexpected cfm(0x%.4x, pid:0x%.4x, vif:%d)\n", id, pid, vif);
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ /* Some mlme ind signals have a blocking call waiting (Per Vif or Global) */
+ if (fapi_is_ind(skb)) {
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+ sig_wait = &ndev_vif->sig_wait;
+ }
+ spin_lock_bh(&sig_wait->send_signal_lock);
+ if (slsi_rx_wait_ind_match(id, sig_wait->ind_id) && pid == sig_wait->process_id) {
+ if (WARN_ON(sig_wait->ind))
+ slsi_kfree_skb(sig_wait->ind);
+ sig_wait->ind = skb;
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ complete(&sig_wait->completion);
+ rcu_read_unlock();
+ return 0;
+ }
+ spin_unlock_bh(&sig_wait->send_signal_lock);
+ rcu_read_unlock();
+ }
+ return -EINVAL;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SAP_H__
+#define __SAP_H__
+
+/* Number of SAPs */
+#define SAP_TOTAL 4
+
+#define SAP_MLME 0
+#define SAP_MA 1
+#define SAP_DBG 2
+#define SAP_TST 3
+
+/* Max number of versions supported */
+#define SAP_MAX_VER 2
+
+#define SAP_MAJOR(version) ((version & 0xff00) >> 8)
+#define SAP_MINOR(version) (version & 0xff)
+
+struct slsi_dev;
+struct sk_buff;
+
+struct sap_api {
+ u8 sap_class;
+ u16 sap_versions[SAP_MAX_VER];
+ int (*sap_version_supported)(u16 version);
+ int (*sap_handler)(struct slsi_dev *sdev, struct sk_buff *skb);
+ int (*sap_txdone)(struct slsi_dev *sdev, u16 colour);
+ int (*sap_notifier)(struct slsi_dev *sdev, unsigned long event);
+};
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/types.h>
+#include "debug.h"
+#include "dev.h"
+#include "sap.h"
+#include "sap_dbg.h"
+#include "hip.h"
+
+#define SUPPORTED_OLD_VERSION 0
+
+static int sap_dbg_version_supported(u16 version);
+static int sap_dbg_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb);
+
+static struct sap_api sap_dbg = {
+ .sap_class = SAP_DBG,
+ .sap_version_supported = sap_dbg_version_supported,
+ .sap_handler = sap_dbg_rx_handler,
+ .sap_versions = { FAPI_DEBUG_SAP_VERSION, SUPPORTED_OLD_VERSION },
+};
+
+static int sap_dbg_version_supported(u16 version)
+{
+ unsigned int major = SAP_MAJOR(version);
+ unsigned int minor = SAP_MINOR(version);
+ u8 i = 0;
+
+ SLSI_INFO_NODEV("Reported version: %d.%d\n", major, minor);
+
+ for (i = 0; i < SAP_MAX_VER; i++)
+ if (SAP_MAJOR(sap_dbg.sap_versions[i]) == major)
+ return 0;
+
+ SLSI_ERR_NODEV("Version %d.%d Not supported\n", major, minor);
+
+ return -EINVAL;
+}
+
+static void slsi_rx_debug(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 id = fapi_get_u16(skb, id);
+
+ SLSI_UNUSED_PARAMETER(dev);
+
+ switch (id) {
+ case DEBUG_FAULT_IND:
+ SLSI_WARN(sdev, "WF_FW_INFO: |cpu %s|id 0x%04X|arg 0x%08X|count %d|timestamp %10u|\n",
+ ((fapi_get_u16(skb, u.debug_fault_ind.cpu) == 0x8000) ? "MAC" :
+ (fapi_get_u16(skb, u.debug_fault_ind.cpu) == 0x4000) ? "PHY" : "???"),
+ fapi_get_u16(skb, u.debug_fault_ind.faultid),
+ fapi_get_u32(skb, u.debug_fault_ind.arg),
+ fapi_get_u16(skb, u.debug_fault_ind.count),
+ fapi_get_u32(skb, u.debug_fault_ind.timestamp));
+ break;
+ case DEBUG_WORD12IND:
+ atomic_inc(&sdev->debug_inds);
+ SLSI_DBG4(sdev, SLSI_FW_TEST, "FW DEBUG(id:%d, subid:%d, vif:%d, time:%u) %04X %04X %04X %04X %04X %04X %04X %04X %04X %04X %04X %04X\n",
+ fapi_get_u16(skb, u.debug_word12_ind.module_id),
+ fapi_get_u16(skb, u.debug_word12_ind.module_sub_id),
+ fapi_get_vif(skb),
+ fapi_get_u32(skb, u.debug_word12_ind.timestamp),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[0]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[1]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[2]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[3]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[4]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[5]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[6]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[7]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[8]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[9]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[10]),
+ fapi_get_u16(skb, u.debug_word12_ind.debug_words[11]));
+ break;
+ default:
+ SLSI_DBG1(sdev, SLSI_MLME, "Unhandled Debug Ind: 0x%.4x\n", id);
+ break;
+ }
+ slsi_kfree_skb(skb);
+}
+
+static int slsi_rx_dbg_sap(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ u16 id = fapi_get_u16(skb, id);
+ u16 vif = fapi_get_vif(skb);
+ struct net_device *dev;
+
+ switch (id) {
+ case DEBUG_FAULT_IND:
+ case DEBUG_WORD12IND:
+ case DEBUG_GENERIC_IND:
+ slsi_rx_debug(sdev, NULL, skb);
+ break;
+ case DEBUG_PKT_SINK_REPORT_IND:
+ {
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (!dev) {
+ rcu_read_unlock();
+ slsi_kfree_skb(skb);
+ break;
+ }
+ slsi_rx_sink_report(sdev, dev, skb);
+ rcu_read_unlock();
+ break;
+ }
+ case DEBUG_PKT_GEN_REPORT_IND:
+ {
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (!dev) {
+ rcu_read_unlock();
+ slsi_kfree_skb(skb);
+ break;
+ }
+ slsi_rx_gen_report(sdev, dev, skb);
+ rcu_read_unlock();
+ break;
+ }
+ default:
+ slsi_kfree_skb(skb);
+ SLSI_ERR(sdev, "Unhandled Ind: 0x%.4x\n", id);
+ break;
+ }
+
+ return 0;
+}
+
+void slsi_rx_dbg_sap_work(struct work_struct *work)
+{
+ struct slsi_skb_work *w = container_of(work, struct slsi_skb_work, work);
+ struct slsi_dev *sdev = w->sdev;
+ struct sk_buff *skb = slsi_skb_work_dequeue(w);
+
+ slsi_wakelock(&sdev->wlan_wl);
+ while (skb) {
+ slsi_debug_frame(sdev, NULL, skb, "RX");
+ slsi_rx_dbg_sap(sdev, skb);
+ skb = slsi_skb_work_dequeue(w);
+ }
+ slsi_wakeunlock(&sdev->wlan_wl);
+}
+
+static int sap_dbg_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ /* DEBUG SAP has a generic confirm. Theoretically, that
+ * can mean upper layer code can block on the confirm.
+ */
+ if (slsi_rx_blocking_signals(sdev, skb) == 0)
+ return 0;
+
+ slsi_skb_work_enqueue(&sdev->rx_dbg_sap, skb);
+ return 0;
+}
+
+int sap_dbg_init(void)
+{
+ SLSI_INFO_NODEV("Registering SAP\n");
+
+ slsi_hip_sap_register(&sap_dbg);
+
+ return 0;
+}
+
+int sap_dbg_deinit(void)
+{
+ SLSI_INFO_NODEV("Unregistering SAP\n");
+ slsi_hip_sap_unregister(&sap_dbg);
+ return 0;
+}
--- /dev/null
+
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SAP_DBG_H__
+#define __SAP_DBG_H__
+
+int sap_dbg_init(void);
+int sap_dbg_deinit(void);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/types.h>
+#include "debug.h"
+#include "dev.h"
+#include "sap.h"
+#include "sap_ma.h"
+#include "hip.h"
+#include "ba.h"
+#include "mgt.h"
+#include "nl80211_vendor.h"
+#include "hip4_sampler.h"
+#include "traffic_monitor.h"
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+
+#define SUPPORTED_OLD_VERSION 0
+
+static int sap_ma_version_supported(u16 version);
+static int sap_ma_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb);
+static int sap_ma_txdone(struct slsi_dev *sdev, u16 colour);
+static int sap_ma_notifier(struct slsi_dev *sdev, unsigned long event);
+
+static struct sap_api sap_ma = {
+ .sap_class = SAP_MA,
+ .sap_version_supported = sap_ma_version_supported,
+ .sap_handler = sap_ma_rx_handler,
+ .sap_versions = { FAPI_DATA_SAP_VERSION, SUPPORTED_OLD_VERSION },
+ .sap_txdone = sap_ma_txdone,
+ .sap_notifier = sap_ma_notifier,
+};
+
+static int sap_ma_notifier(struct slsi_dev *sdev, unsigned long event)
+{
+ uint vif;
+
+ SLSI_INFO_NODEV("Notifier event received: %lu\n", event);
+ if (event >= SCSC_MAX_NOTIFIER)
+ return -EIO;
+
+ switch (event) {
+ case SCSC_WIFI_STOP:
+ SLSI_INFO_NODEV("Stop netdev queues\n");
+ rcu_read_lock();
+ for (vif = SLSI_NET_INDEX_WLAN;
+ vif <= SLSI_NET_INDEX_P2PX_SWLAN; vif++) {
+ struct net_device *ndev =
+ slsi_get_netdev_rcu(sdev, vif);
+ if (ndev && !netif_queue_stopped(ndev))
+ netif_tx_stop_all_queues(ndev);
+ }
+ rcu_read_unlock();
+ break;
+
+ case SCSC_WIFI_FAILURE_RESET:
+ SLSI_DBG1_NODEV(SLSI_NETDEV, "Netdevs queues will not be restarted - recovery will take care of it\n");
+ break;
+
+ case SCSC_WIFI_SUSPEND:
+ break;
+
+ case SCSC_WIFI_RESUME:
+ break;
+ default:
+ SLSI_INFO_NODEV("Unknown event code %lu\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static int sap_ma_version_supported(u16 version)
+{
+ unsigned int major = SAP_MAJOR(version);
+ unsigned int minor = SAP_MINOR(version);
+ u8 i = 0;
+
+ SLSI_INFO_NODEV("Reported version: %d.%d\n", major, minor);
+
+ for (i = 0; i < SAP_MAX_VER; i++)
+ if (SAP_MAJOR(sap_ma.sap_versions[i]) == major)
+ return 0;
+
+ SLSI_ERR_NODEV("Version %d.%d Not supported\n", major, minor);
+
+ return -EINVAL;
+}
+
+static int slsi_rx_amsdu_deaggregate(struct net_device *dev, struct sk_buff *skb, struct sk_buff_head *msdu_list)
+{
+ unsigned int msdu_len;
+ unsigned int subframe_len;
+ int padding;
+ struct sk_buff *subframe = NULL;
+ bool last_sub_frame = false;
+ const unsigned char mac_0[ETH_ALEN] = { 0 };
+ bool skip_frame = false;
+ struct ethhdr *mh;
+
+ SLSI_NET_DBG4(dev, SLSI_RX, "A-MSDU received (len:%d)\n", skb->len);
+
+ while (!last_sub_frame) {
+ msdu_len = (skb->data[ETH_ALEN * 2] << 8) | skb->data[(ETH_ALEN * 2) + 1];
+
+ /* check if the length of sub-frame is valid */
+ if (msdu_len > skb->len) {
+ SLSI_NET_ERR(dev, "invalid MSDU length %d, SKB length = %d\n", msdu_len, skb->len);
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ subframe_len = msdu_len + (2 * ETH_ALEN) + 2;
+
+ /* For the last subframe skb length and subframe length will be same */
+ if (skb->len == subframe_len) {
+ subframe = slsi_skb_copy(skb, GFP_ATOMIC);
+
+ if (!subframe) {
+ SLSI_NET_ERR(dev, "failed to alloc the SKB for A-MSDU subframe\n");
+ __skb_queue_purge(msdu_list);
+ slsi_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ /* There is no padding for last subframe */
+ padding = 0;
+ last_sub_frame = true;
+ } else {
+ /* Copy the skb for the subframe */
+ subframe = slsi_skb_copy(skb, GFP_ATOMIC);
+
+ if (!subframe) {
+ SLSI_NET_ERR(dev, "failed to alloc the SKB for A-MSDU subframe\n");
+ __skb_queue_purge(msdu_list);
+ slsi_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ padding = (4 - (subframe_len % 4)) & 0x3;
+ }
+
+ /* Remove the other subframes by adjusting the tail pointer of the copied skb */
+ skb_trim(subframe, subframe_len);
+
+ /* Overwrite LLC+SNAP header with src and dest addr */
+ SLSI_ETHER_COPY(&subframe->data[14], &subframe->data[6]);
+ SLSI_ETHER_COPY(&subframe->data[8], &subframe->data[0]);
+
+ /* Remove 8 bytes of LLC+SNAP header */
+ skb_pull(subframe, LLC_SNAP_HDR_LEN);
+
+ SLSI_NET_DBG_HEX(dev, SLSI_RX, subframe->data,
+ subframe->len < 64 ? subframe->len : 64, "Subframe before giving to OS:\n");
+
+ /* Before preparing the skb, filter out if the Destination Address of the Ethernet frame
+ * or A-MSDU subframe is set to an invalid value, i.e. all zeros
+ */
+ skb_set_mac_header(subframe, 0);
+ mh = eth_hdr(subframe);
+ if (SLSI_ETHER_EQUAL(mh->h_dest, mac_0)) {
+ SLSI_NET_DBG3(dev, SLSI_RX, "msdu subframe filtered out: MAC destination address %pM\n", mh->h_dest);
+ skip_frame = true;
+ }
+
+ /* If this is not the last subframe then move to the next subframe */
+ if (!last_sub_frame)
+ skb_pull(skb, (subframe_len + padding));
+
+ /* If this frame has been filtered out, free the clone and continue */
+ if (skip_frame) {
+ skip_frame = false;
+ /* Free the the skbuff structure itself but not the data */
+ /* skb will be freed if it is the last subframe (i.e. subframe == skb) */
+ slsi_kfree_skb(subframe);
+ continue;
+ }
+ __skb_queue_tail(msdu_list, subframe);
+ }
+ slsi_kfree_skb(skb);
+ return 0;
+}
+
+static inline bool slsi_rx_is_amsdu(struct sk_buff *skb)
+{
+ return (fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor) == FAPI_DATAUNITDESCRIPTOR_AMSDU);
+}
+
+void slsi_rx_data_deliver_skb(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb, bool from_ba_timer)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff_head msdu_list;
+ struct slsi_peer *peer = NULL;
+ struct ethhdr *eth_hdr;
+ bool is_amsdu = slsi_rx_is_amsdu(skb);
+ u8 trafic_q = slsi_frame_priority_to_ac_queue(fapi_get_u16(skb, u.ma_unitdata_ind.priority));
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+
+ __skb_queue_head_init(&msdu_list);
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Check if the payload is in the SMAPPER entry */
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ /* Retrieve the associated smapper skb */
+ skb = slsi_hip_get_skb_from_smapper(sdev, skb);
+ if (!skb) {
+ SLSI_NET_DBG2(dev, SLSI_RX, "SKB from SMAPPER is NULL\n");
+ return;
+ }
+ } else {
+ /* strip signal and any signal/bulk roundings/offsets */
+ skb_pull(skb, fapi_get_siglen(skb));
+ }
+#else
+ skb_pull(skb, fapi_get_siglen(skb));
+#endif
+
+ eth_hdr = (struct ethhdr *)skb->data;
+ peer = slsi_get_peer_from_mac(sdev, dev, eth_hdr->h_source);
+ if (!peer) {
+ SLSI_NET_WARN(dev, "Packet dropped (no peer records)\n");
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ /* A-MSDU deaggregation */
+ if (is_amsdu) {
+ if (slsi_rx_amsdu_deaggregate(dev, skb, &msdu_list)) {
+ ndev_vif->stats.rx_dropped++;
+ if (peer)
+ peer->sinfo.rx_dropped_misc++;
+ return;
+ }
+ } else {
+ __skb_queue_tail(&msdu_list, skb);
+ }
+ /* WARNING: skb may be NULL here and should not be used after this */
+ while (!skb_queue_empty(&msdu_list)) {
+ struct sk_buff *rx_skb;
+
+ rx_skb = __skb_dequeue(&msdu_list);
+
+ /* In STA mode, the AP relays back our multicast traffic.
+ * Receiving these frames and passing it up confuses some
+ * protocols and applications, notably IPv6 Duplicate
+ * Address Detection.
+ *
+ * So these frames are dropped instead of passing it further.
+ * No need to update the drop statistics as these frames are
+ * locally generated and should not be accounted in reception.
+ */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) {
+ struct ethhdr *ehdr = (struct ethhdr *)(rx_skb->data);
+
+ if (is_multicast_ether_addr(ehdr->h_dest) &&
+ !compare_ether_addr(ehdr->h_source, dev->dev_addr)) {
+ SLSI_NET_DBG2(dev, SLSI_RX, "drop locally generated multicast frame relayed back by AP\n");
+ slsi_kfree_skb(rx_skb);
+ continue;
+ }
+ }
+
+ /* Intra BSS */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && ndev_vif->peer_sta_records) {
+ struct slsi_peer *peer = NULL;
+ struct ethhdr *ehdr = (struct ethhdr *)(rx_skb->data);
+
+ if (is_multicast_ether_addr(ehdr->h_dest)) {
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ struct sk_buff *rebroadcast_skb = slsi_skb_copy(rx_skb, GFP_ATOMIC);
+#else
+ struct sk_buff *rebroadcast_skb = slsi_skb_copy(rx_skb, GFP_KERNEL);
+#endif
+ if (!rebroadcast_skb) {
+ SLSI_WARN(sdev, "Intra BSS: failed to alloc new SKB for broadcast\n");
+ } else {
+ SLSI_DBG3(sdev, SLSI_RX, "Intra BSS: multicast %pM\n", ehdr->h_dest);
+ rebroadcast_skb->dev = dev;
+ rebroadcast_skb->protocol = cpu_to_be16(ETH_P_802_3);
+ slsi_dbg_untrack_skb(rebroadcast_skb);
+ skb_reset_network_header(rebroadcast_skb);
+ skb_reset_mac_header(rebroadcast_skb);
+ dev_queue_xmit(rebroadcast_skb);
+ }
+ } else {
+ peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
+ if (peer && peer->authorized) {
+ SLSI_DBG3(sdev, SLSI_RX, "Intra BSS: unicast %pM\n", ehdr->h_dest);
+ rx_skb->dev = dev;
+ rx_skb->protocol = cpu_to_be16(ETH_P_802_3);
+ slsi_dbg_untrack_skb(rx_skb);
+ skb_reset_network_header(rx_skb);
+ skb_reset_mac_header(rx_skb);
+ dev_queue_xmit(rx_skb);
+ continue;
+ }
+ }
+ }
+
+ if (peer) {
+ peer->sinfo.rx_bytes += rx_skb->len;
+ }
+ ndev_vif->stats.rx_packets++;
+ ndev_vif->stats.rx_bytes += rx_skb->len;
+ ndev_vif->rx_packets[trafic_q]++;
+
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (!ndev_vif->enhanced_arp_stats.is_duplicate_addr_detected) {
+ u8 *frame = rx_skb->data + 12; /* frame points to packet type */
+ u16 packet_type = frame[0] << 8 | frame[1];
+
+ if (packet_type == ETH_P_ARP) {
+ frame = frame + 2; /* ARP packet */
+ /*match source IP address in ARP with the DUT Ip address*/
+ if ((frame[SLSI_ARP_SRC_IP_ADDR_OFFSET] == (ndev_vif->ipaddress & 255)) &&
+ (frame[SLSI_ARP_SRC_IP_ADDR_OFFSET + 1] == ((ndev_vif->ipaddress >> 8U) & 255)) &&
+ (frame[SLSI_ARP_SRC_IP_ADDR_OFFSET + 2] == ((ndev_vif->ipaddress >> 16U) & 255)) &&
+ (frame[SLSI_ARP_SRC_IP_ADDR_OFFSET + 3] == ((ndev_vif->ipaddress >> 24U) & 255)) &&
+ !SLSI_IS_GRATUITOUS_ARP(frame) &&
+ !SLSI_ETHER_EQUAL(sdev->hw_addr, frame + 8)) /*if src MAC = DUT MAC */
+ ndev_vif->enhanced_arp_stats.is_duplicate_addr_detected = 1;
+ }
+ }
+
+ if (ndev_vif->enhanced_arp_detect_enabled && (ndev_vif->vif_type == FAPI_VIFTYPE_STATION)) {
+ u8 *frame = rx_skb->data + 12; /* frame points to packet type */
+ u16 packet_type = frame[0] << 8 | frame[1];
+ u16 arp_opcode;
+
+ if (packet_type == ETH_P_ARP) {
+ frame = frame + 2; /* ARP packet */
+ arp_opcode = frame[SLSI_ARP_OPCODE_OFFSET] << 8 | frame[SLSI_ARP_OPCODE_OFFSET + 1];
+ /* check if sender ip = gateway ip and it is an ARP response */
+ if ((arp_opcode == SLSI_ARP_REPLY_OPCODE) &&
+ !SLSI_IS_GRATUITOUS_ARP(frame) &&
+ !memcmp(&frame[SLSI_ARP_SRC_IP_ADDR_OFFSET], &ndev_vif->target_ip_addr, 4)) {
+ ndev_vif->enhanced_arp_stats.arp_rsp_count_to_netdev++;
+ ndev_vif->enhanced_arp_stats.arp_rsp_rx_count_by_upper_mac++;
+ }
+ }
+ }
+#endif
+
+ rx_skb->dev = dev;
+ rx_skb->ip_summed = CHECKSUM_NONE;
+ rx_skb->protocol = eth_type_trans(rx_skb, dev);
+
+ SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, rx_skb->data, true);
+ slsi_traffic_mon_event_rx(sdev, dev, rx_skb);
+ slsi_dbg_untrack_skb(rx_skb);
+
+ SLSI_DBG4(sdev, SLSI_RX, "pass %u bytes to local stack\n", rx_skb->len);
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ if (conf_hip4_ver == 4) {
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
+ if (!from_ba_timer)
+ napi_gro_receive(&sdev->hip4_inst.hip_priv->napi, rx_skb);
+ else
+ netif_receive_skb(rx_skb);
+#else
+ netif_receive_skb(rx_skb);
+#endif
+ } else {
+ netif_rx_ni(rx_skb);
+ }
+#else
+ netif_rx_ni(rx_skb);
+#endif
+ slsi_wakelock_timeout(&sdev->wlan_wl_ma, SLSI_RX_WAKELOCK_TIME);
+ }
+}
+
+static void slsi_rx_data_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = NULL;
+ struct ethhdr *eth_hdr;
+ u16 seq_num;
+
+ if (!((fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor) == FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME) ||
+ (fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor) == FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME) ||
+ (fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor) == FAPI_DATAUNITDESCRIPTOR_AMSDU))) {
+ WARN_ON(1);
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ /* pass the data up "As is" if the VIF type is Monitor */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_MONITOR) {
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Check if the payload is in the SMAPPER entry */
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ /* Retrieve the associated smapper skb */
+ skb = slsi_hip_get_skb_from_smapper(sdev, skb);
+ if (!skb) {
+ SLSI_NET_DBG2(dev, SLSI_RX, "SKB from SMAPPER is NULL\n");
+ return;
+ }
+ } else {
+ /* strip signal and any signal/bulk roundings/offsets */
+ skb_pull(skb, fapi_get_siglen(skb));
+ }
+#else
+ skb_pull(skb, fapi_get_siglen(skb));
+#endif
+ skb_reset_mac_header(skb);
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ netif_rx_ni(skb);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Check if the payload is in the SMAPPER entry */
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ eth_hdr = (struct ethhdr *)slsi_hip_get_skb_data_from_smapper(sdev, skb);
+ if (!(eth_hdr)) {
+ SLSI_NET_DBG2(dev, SLSI_RX, "SKB from SMAPPER is NULL\n");
+ slsi_kfree_skb(skb);
+ return;
+ }
+ } else {
+ eth_hdr = (struct ethhdr *)fapi_get_data(skb);
+ }
+#else
+ eth_hdr = (struct ethhdr *)fapi_get_data(skb);
+#endif
+ seq_num = fapi_get_u16(skb, u.ma_unitdata_ind.sequence_number);
+ SLSI_NET_DBG4(dev, SLSI_RX, "ma_unitdata_ind(vif:%d, dest:%pM, src:%pM, datatype:%d, priority:%d, s:%d)\n",
+ fapi_get_vif(skb),
+ eth_hdr->h_dest,
+ eth_hdr->h_source,
+ fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor),
+ fapi_get_u16(skb, u.ma_unitdata_ind.priority),
+ (seq_num & SLSI_RX_SEQ_NUM_MASK));
+
+ peer = slsi_get_peer_from_mac(sdev, dev, eth_hdr->h_source);
+ if (!peer) {
+ SLSI_NET_WARN(dev, "Packet dropped (no peer records)\n");
+ /* Race in Data plane (Shows up in fw test mode) */
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ /* discard data frames if received before key negotiations are completed */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP && peer->connected_state != SLSI_STA_CONN_STATE_CONNECTED) {
+ SLSI_NET_WARN(dev, "Packet dropped (peer connection not complete (state:%u))\n", peer->connected_state);
+ slsi_kfree_skb(skb);
+ return;
+ }
+
+ /* When TDLS connection has just been closed a few last frame may still arrive from the closed connection.
+ * This frames must not be injected in to the block session with the AP as the sequence numbers are different
+ * that will confuse the BA process. Therefore we have to skip BA for those frames.
+ */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION && peer->aid < SLSI_TDLS_PEER_INDEX_MIN && (seq_num & SLSI_RX_VIA_TDLS_LINK)) {
+ if (printk_ratelimit())
+ SLSI_NET_WARN(dev, "Packet received from TDLS but no TDLS exists (seq: %x) Skip BA\n", seq_num);
+
+ /* Skip BA reorder and pass the frames Up */
+ slsi_rx_data_deliver_skb(sdev, dev, skb, false);
+ return;
+ }
+
+ /* TDLS is enabled for the PEER but still packet is received through the AP. Process this packet with the AP PEER */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_STATION && peer->aid >= SLSI_TDLS_PEER_INDEX_MIN && (!(seq_num & SLSI_RX_VIA_TDLS_LINK))) {
+ SLSI_NET_DBG2(dev, SLSI_TDLS, "Packet received from TDLS peer through the AP(seq: %x)\n", seq_num);
+ peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+ if (!peer) {
+ SLSI_NET_WARN(dev, "Packet dropped (AP peer not found)\n");
+ slsi_kfree_skb(skb);
+ return;
+ }
+ }
+
+ /* If frame belongs to a negotiated BA, BA will consume the frame */
+ if (slsi_ba_check(peer, fapi_get_u16(skb, u.ma_unitdata_ind.priority)))
+ if (!slsi_ba_process_frame(dev, peer, skb, (seq_num & SLSI_RX_SEQ_NUM_MASK),
+ fapi_get_u16(skb, u.ma_unitdata_ind.priority)))
+ return;
+
+ /* Pass to next receive process */
+ slsi_rx_data_deliver_skb(sdev, dev, skb, false);
+}
+
+static int slsi_rx_data_cfm(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ u16 host_tag = fapi_get_u16(skb, u.ma_unitdata_cfm.host_tag);
+
+ SLSI_NET_DBG1(dev, SLSI_TX, "ma_unitdata_cfm(vif:%d, host_tag:0x%x, status:%d)\n",
+ fapi_get_vif(skb),
+ host_tag,
+ fapi_get_u16(skb, u.ma_unitdata_cfm.transmission_status));
+#ifdef CONFIG_SCSC_WLAN_DEBUG
+ if (fapi_get_u16(skb, u.ma_unitdata_cfm.transmission_status) == FAPI_TRANSMISSIONSTATUS_TX_LIFETIME) {
+ if (printk_ratelimit())
+ SLSI_NET_WARN(dev, "ma_unitdata_cfm: tx_lifetime(vif:%d, host_tag:0x%x)\n", fapi_get_vif(skb), host_tag);
+ }
+#endif
+ if (fapi_get_u16(skb, u.ma_unitdata_cfm.transmission_status) == FAPI_TRANSMISSIONSTATUS_RETRY_LIMIT)
+ ndev_vif->tx_no_ack[SLSI_HOST_TAG_TRAFFIC_QUEUE(host_tag)]++;
+
+ slsi_kfree_skb(skb);
+ return 0;
+}
+
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+static int slsi_rx_napi_process(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ u16 vif;
+
+ vif = fapi_get_vif(skb);
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev(%d) No longer exists\n", vif);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ ndev_vif = netdev_priv(dev);
+
+ switch (fapi_get_u16(skb, id)) {
+ case MA_UNITDATA_IND:
+ slsi_rx_data_ind(sdev, dev, skb);
+
+ /* SKBs in a BA session are not passed yet */
+ if (atomic_read(&ndev_vif->ba_flush)) {
+ atomic_set(&ndev_vif->ba_flush, 0);
+ slsi_ba_process_complete(dev, false);
+ }
+ break;
+ case MA_UNITDATA_CFM:
+ (void)slsi_rx_data_cfm(sdev, dev, skb);
+ break;
+ default:
+ SLSI_DBG1(sdev, SLSI_RX, "Unexpected Data: 0x%.4x\n", fapi_get_sigid(skb));
+ slsi_kfree_skb(skb);
+ break;
+ }
+ return 0;
+}
+#endif
+void slsi_rx_netdev_data_work(struct work_struct *work)
+{
+ struct slsi_skb_work *w = container_of(work, struct slsi_skb_work, work);
+ struct slsi_dev *sdev = w->sdev;
+ struct net_device *dev = w->dev;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ if (WARN_ON(!dev))
+ return;
+
+ slsi_wakelock(&sdev->wlan_wl);
+
+ while (1) {
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ if (!ndev_vif->activated) {
+ slsi_skb_queue_purge(&w->queue);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ break;
+ }
+
+ if (atomic_read(&ndev_vif->ba_flush)) {
+ atomic_set(&ndev_vif->ba_flush, 0);
+ slsi_ba_process_complete(dev, false);
+ }
+
+ skb = slsi_skb_work_dequeue(w);
+ if (!skb) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ break;
+ }
+
+ switch (fapi_get_u16(skb, id)) {
+ case MA_UNITDATA_IND:
+#ifdef CONFIG_SCSC_SMAPPER
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ u8 *frame = (u8 *)slsi_hip_get_skb_data_from_smapper(sdev, skb);
+
+ if (frame)
+ SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, frame, false);
+ } else {
+ SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, skb->data + fapi_get_siglen(skb), false);
+ }
+#else
+ SCSC_HIP4_SAMPLER_TCP_DECODE(sdev, dev, skb->data + fapi_get_siglen(skb), false);
+#endif
+ slsi_rx_data_ind(sdev, dev, skb);
+ break;
+ case MA_UNITDATA_CFM:
+ (void)slsi_rx_data_cfm(sdev, dev, skb);
+ break;
+ default:
+ SLSI_DBG1(sdev, SLSI_RX, "Unexpected Data: 0x%.4x\n", fapi_get_sigid(skb));
+ slsi_kfree_skb(skb);
+ break;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+ slsi_wakeunlock(&sdev->wlan_wl);
+}
+
+static int slsi_rx_queue_data(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ int vif;
+
+ vif = fapi_get_vif(skb);
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev(%d) No longer exists\n", vif);
+ rcu_read_unlock();
+ goto err;
+ }
+ ndev_vif = netdev_priv(dev);
+
+ slsi_skb_work_enqueue(&ndev_vif->rx_data, skb);
+ rcu_read_unlock();
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static int sap_ma_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ u32 conf_hip4_ver = 0;
+#endif
+#ifdef CONFIG_SCSC_SMAPPER
+ u16 sig_len;
+ u32 err;
+#endif
+
+ switch (fapi_get_sigid(skb)) {
+ case MA_UNITDATA_IND:
+#ifdef CONFIG_SCSC_SMAPPER
+ /* Check SMAPPER to nullify entry*/
+ if (fapi_get_u16(skb, u.ma_unitdata_ind.bulk_data_descriptor) == FAPI_BULKDATADESCRIPTOR_SMAPPER) {
+ sig_len = fapi_get_siglen(skb);
+ skb_pull(skb, sig_len);
+ err = slsi_hip_consume_smapper_entry(sdev, skb);
+ skb_push(skb, sig_len);
+ if (err)
+ return err;
+ }
+#endif
+ case MA_UNITDATA_CFM:
+#ifdef CONFIG_SCSC_WLAN_RX_NAPI
+ conf_hip4_ver = scsc_wifi_get_hip_config_version(&sdev->hip4_inst.hip_control->init);
+ if (conf_hip4_ver == 4)
+ return slsi_rx_napi_process(sdev, skb);
+ else
+ return slsi_rx_queue_data(sdev, skb);
+#else
+ return slsi_rx_queue_data(sdev, skb);
+#endif
+ case MA_BLOCKACK_IND:
+ /* It is anomolous to handle the MA_BLOCKACK_IND in the
+ * mlme wq.
+ */
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, fapi_get_vif(skb));
+ default:
+ break;
+ }
+
+ SLSI_ERR_NODEV("Shouldn't be getting here!\n");
+ return -EINVAL;
+}
+
+/* Adjust the scod value and flow control appropriately. */
+static int sap_ma_txdone(struct slsi_dev *sdev, u16 colour)
+{
+ struct net_device *dev;
+ struct slsi_peer *peer;
+ u16 vif, peer_index, ac;
+
+ /* Extract information from the coloured mbulk */
+ /* colour is defined as: */
+ /* u16 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ * [10:8] - ac queue
+ */
+ vif = (colour & 0x6) >> 1;
+ peer_index = (colour & 0xf8) >> 3;
+ ac = (colour & 0x300) >> 8;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev(%d) No longer exists\n", vif);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ if (peer_index <= SLSI_PEER_INDEX_MAX) {
+ /* peer_index = 0 for Multicast queues */
+ if (peer_index == 0) {
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ return scsc_wifi_fcq_receive_data(dev, &ndev_vif->ap.group_data_qs, ac, sdev, vif, peer_index);
+ }
+ peer = slsi_get_peer_from_qs(sdev, dev, MAP_AID_TO_QS(peer_index));
+ if (peer)
+ return scsc_wifi_fcq_receive_data(dev, &peer->data_qs, ac, sdev, vif, peer_index);
+
+ SLSI_DBG3(sdev, SLSI_RX, "peer record NOT found for peer_index=%d\n", peer_index);
+ /* We need to handle this case as special. Peer disappeared bug hip4
+ * is sending back the colours to free.
+ */
+ return scsc_wifi_fcq_receive_data_no_peer(dev, ac, sdev, vif, peer_index);
+ }
+ SLSI_ERR(sdev, "illegal peer_index vif=%d peer_index=%d\n", vif, peer_index);
+ return -EINVAL;
+}
+
+int sap_ma_init(void)
+{
+ SLSI_INFO_NODEV("Registering SAP\n");
+ slsi_hip_sap_register(&sap_ma);
+ return 0;
+}
+
+int sap_ma_deinit(void)
+{
+ SLSI_INFO_NODEV("Unregistering SAP\n");
+ slsi_hip_sap_unregister(&sap_ma);
+ return 0;
+}
--- /dev/null
+
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SAP_MA_H__
+#define __SAP_MA_H__
+
+int sap_ma_init(void);
+int sap_ma_deinit(void);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/types.h>
+#include "debug.h"
+#include "dev.h"
+#include "sap.h"
+#include "sap_mlme.h"
+#include "hip.h"
+#include "mgt.h"
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+#include "nl80211_vendor.h"
+#include "mlme.h"
+
+#define SUPPORTED_OLD_VERSION 0
+
+static int sap_mlme_version_supported(u16 version);
+static int sap_mlme_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb);
+
+static int sap_mlme_notifier(struct slsi_dev *sdev, unsigned long event);
+
+static struct sap_api sap_mlme = {
+ .sap_class = SAP_MLME,
+ .sap_version_supported = sap_mlme_version_supported,
+ .sap_handler = sap_mlme_rx_handler,
+ .sap_versions = { FAPI_CONTROL_SAP_VERSION, SUPPORTED_OLD_VERSION },
+ .sap_notifier = sap_mlme_notifier,
+};
+
+static int sap_mlme_notifier(struct slsi_dev *sdev, unsigned long event)
+{
+ int i;
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ struct net_device *dev;
+#endif
+ struct netdev_vif *ndev_vif;
+
+ SLSI_INFO_NODEV("Notifier event received: %lu\n", event);
+ if (event >= SCSC_MAX_NOTIFIER)
+ return -EIO;
+
+ switch (event) {
+ case SCSC_WIFI_STOP:
+ /* Stop sending signals down*/
+ sdev->mlme_blocked = true;
+ SLSI_INFO_NODEV("MLME BLOCKED\n");
+
+ /* cleanup all the VIFs and scan data */
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ complete_all(&sdev->sig_wait.completion);
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
+ if (sdev->netdev[i]) {
+ ndev_vif = netdev_priv(sdev->netdev[i]);
+ slsi_scan_cleanup(sdev, sdev->netdev[i]);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+ slsi_vif_cleanup(sdev, sdev->netdev[i], 0);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ SLSI_INFO_NODEV("Force cleaned all VIFs\n");
+ break;
+
+ case SCSC_WIFI_FAILURE_RESET:
+ break;
+
+ case SCSC_WIFI_SUSPEND:
+ break;
+
+ case SCSC_WIFI_RESUME:
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ dev = slsi_get_netdev(sdev, SLSI_NET_INDEX_WLAN);
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ if ((ndev_vif->is_wips_running) && (ndev_vif->activated) &&
+ (ndev_vif->vif_type == FAPI_VIFTYPE_STATION) &&
+ (ndev_vif->sta.vif_status == SLSI_VIF_STATUS_CONNECTED)) {
+ ndev_vif->is_wips_running = false;
+
+ slsi_send_forward_beacon_abort_vendor_event(sdev, SLSI_FORWARD_BEACON_ABORT_REASON_SUSPENDED);
+ SLSI_INFO_NODEV("FORWARD_BEACON: SUSPEND_RESUMED!! send abort event\n");
+ }
+
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+#endif
+ break;
+
+ default:
+ SLSI_INFO_NODEV("Unknown event code %lu\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static int sap_mlme_version_supported(u16 version)
+{
+ unsigned int major = SAP_MAJOR(version);
+ unsigned int minor = SAP_MINOR(version);
+ u8 i = 0;
+
+ SLSI_INFO_NODEV("Reported version: %d.%d\n", major, minor);
+
+ for (i = 0; i < SAP_MAX_VER; i++)
+ if (SAP_MAJOR(sap_mlme.sap_versions[i]) == major)
+ return 0;
+
+ SLSI_ERR_NODEV("Version %d.%d Not supported\n", major, minor);
+
+ return -EINVAL;
+}
+
+static int slsi_rx_netdev_mlme(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ u16 id = fapi_get_u16(skb, id);
+
+ /* The skb is consumed by the functions called.
+ */
+ switch (id) {
+ case MLME_SCAN_IND:
+ slsi_rx_scan_ind(sdev, dev, skb);
+ break;
+ case MLME_SCAN_DONE_IND:
+ slsi_rx_scan_done_ind(sdev, dev, skb);
+ break;
+ case MLME_CONNECT_IND:
+ slsi_rx_connect_ind(sdev, dev, skb);
+ break;
+ case MLME_CONNECTED_IND:
+ slsi_rx_connected_ind(sdev, dev, skb);
+ break;
+ case MLME_RECEIVED_FRAME_IND:
+ slsi_rx_received_frame_ind(sdev, dev, skb);
+ break;
+ case MLME_DISCONNECT_IND:
+ slsi_rx_disconnect_ind(sdev, dev, skb);
+ break;
+ case MLME_DISCONNECTED_IND:
+ slsi_rx_disconnected_ind(sdev, dev, skb);
+ break;
+ case MLME_PROCEDURE_STARTED_IND:
+ slsi_rx_procedure_started_ind(sdev, dev, skb);
+ break;
+ case MLME_FRAME_TRANSMISSION_IND:
+ slsi_rx_frame_transmission_ind(sdev, dev, skb);
+ break;
+ case MA_BLOCKACK_IND:
+ slsi_rx_blockack_ind(sdev, dev, skb);
+ break;
+ case MLME_ROAMED_IND:
+ slsi_rx_roamed_ind(sdev, dev, skb);
+ break;
+ case MLME_ROAM_IND:
+ slsi_rx_roam_ind(sdev, dev, skb);
+ break;
+ case MLME_MIC_FAILURE_IND:
+ slsi_rx_mic_failure_ind(sdev, dev, skb);
+ break;
+ case MLME_REASSOCIATE_IND:
+ slsi_rx_reassoc_ind(sdev, dev, skb);
+ break;
+ case MLME_TDLS_PEER_IND:
+ slsi_tdls_peer_ind(sdev, dev, skb);
+ break;
+ case MLME_LISTEN_END_IND:
+ slsi_rx_listen_end_ind(dev, skb);
+ break;
+ case MLME_CHANNEL_SWITCHED_IND:
+ slsi_rx_channel_switched_ind(sdev, dev, skb);
+ break;
+ case MLME_AC_PRIORITY_UPDATE_IND:
+ SLSI_DBG1(sdev, SLSI_MLME, "Unexpected MLME_AC_PRIORITY_UPDATE_IND\n");
+ slsi_kfree_skb(skb);
+ break;
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ case MLME_RSSI_REPORT_IND:
+ slsi_rx_rssi_report_ind(sdev, dev, skb);
+ break;
+ case MLME_RANGE_IND:
+ slsi_rx_range_ind(sdev, dev, skb);
+ break;
+ case MLME_RANGE_DONE_IND:
+ slsi_rx_range_done_ind(sdev, dev, skb);
+ break;
+ case MLME_EVENT_LOG_IND:
+ slsi_rx_event_log_indication(sdev, dev, skb);
+ break;
+#endif
+#ifdef CONFIG_SCSC_WIFI_NAN_ENABLE
+ case MLME_NAN_EVENT_IND:
+ slsi_nan_event(sdev, dev, skb);
+ slsi_kfree_skb(skb);
+ break;
+ case MLME_NAN_FOLLOWUP_IND:
+ slsi_nan_followup_ind(sdev, dev, skb);
+ slsi_kfree_skb(skb);
+ break;
+ case MLME_NAN_SERVICE_IND:
+ slsi_nan_service_ind(sdev, dev, skb);
+ slsi_kfree_skb(skb);
+ break;
+#endif
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+ case MLME_SYNCHRONISED_IND:
+ slsi_rx_synchronised_ind(sdev, dev, skb);
+ slsi_kfree_skb(skb);
+ break;
+#endif
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+ case MLME_BEACON_REPORTING_EVENT_IND:
+ slsi_rx_beacon_reporting_event_ind(sdev, dev, skb);
+ break;
+#endif
+ default:
+ slsi_kfree_skb(skb);
+ SLSI_NET_ERR(dev, "Unhandled Ind: 0x%.4x\n", id);
+ break;
+ }
+ return 0;
+}
+
+void slsi_rx_netdev_mlme_work(struct work_struct *work)
+{
+ struct slsi_skb_work *w = container_of(work, struct slsi_skb_work, work);
+ struct slsi_dev *sdev = w->sdev;
+ struct net_device *dev = w->dev;
+ struct sk_buff *skb = slsi_skb_work_dequeue(w);
+
+ if (WARN_ON(!dev))
+ return;
+
+ slsi_wakelock(&sdev->wlan_wl);
+ while (skb) {
+ slsi_debug_frame(sdev, dev, skb, "RX");
+ slsi_rx_netdev_mlme(sdev, dev, skb);
+ skb = slsi_skb_work_dequeue(w);
+ }
+ slsi_wakeunlock(&sdev->wlan_wl);
+}
+
+int slsi_rx_enqueue_netdev_mlme(struct slsi_dev *sdev, struct sk_buff *skb, u16 vif)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (WARN_ON(!dev)) {
+ rcu_read_unlock();
+ /* Calling function should free the skb */
+ return -ENODEV;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ if (unlikely(ndev_vif->is_fw_test)) {
+ slsi_kfree_skb(skb);
+ rcu_read_unlock();
+ return 0;
+ }
+
+ slsi_skb_work_enqueue(&ndev_vif->rx_mlme, skb);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int slsi_rx_action_enqueue_netdev_mlme(struct slsi_dev *sdev, struct sk_buff *skb, u16 vif)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (WARN_ON(!dev)) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+
+ ndev_vif = netdev_priv(dev);
+
+ if (unlikely(ndev_vif->is_fw_test)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ndev_vif->iftype == NL80211_IFTYPE_P2P_GO || ndev_vif->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ struct ieee80211_mgmt *mgmt = fapi_get_mgmt(skb);
+ /* Check the DA of received action frame with the GO interface address */
+ if (memcmp(mgmt->da, dev->dev_addr, ETH_ALEN) != 0) {
+ /* If not equal, compare DA of received action frame with the P2P DEV address*/
+ struct net_device *p2pdev = slsi_get_netdev_rcu(sdev, SLSI_NET_INDEX_P2P);
+
+ if (WARN_ON(!p2pdev)) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ if (memcmp(mgmt->da, p2pdev->dev_addr, ETH_ALEN) == 0) {
+ /* If destination address is equal to P2P DEV ADDR, then action frame is received on
+ * GO interface. Hence indicate action frames on P2P DEV
+ */
+ ndev_vif = netdev_priv(p2pdev);
+
+ if (unlikely(ndev_vif->is_fw_test)) {
+ slsi_kfree_skb(skb);
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ }
+ }
+
+ slsi_skb_work_enqueue(&ndev_vif->rx_mlme, skb);
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static int sap_mlme_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ u16 scan_id;
+ u16 vif = fapi_get_vif(skb);
+
+ if (slsi_rx_blocking_signals(sdev, skb) == 0)
+ return 0;
+
+ if (fapi_is_ind(skb)) {
+#ifdef CONFIG_SCSC_WIFILOGGER
+ SCSC_WLOG_PKTFATE_LOG_RX_CTRL_FRAME(fapi_get_data(skb), fapi_get_datalen(skb));
+#endif
+
+ switch (fapi_get_sigid(skb)) {
+ case MLME_SCAN_DONE_IND:
+ scan_id = fapi_get_u16(skb, u.mlme_scan_done_ind.scan_id);
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ if (slsi_is_gscan_id(scan_id))
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, SLSI_NET_INDEX_WLAN);
+#endif
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, (scan_id >> 8));
+ case MLME_SCAN_IND:
+ if (vif)
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, vif);
+ scan_id = fapi_get_u16(skb, u.mlme_scan_ind.scan_id);
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ if (slsi_is_gscan_id(scan_id))
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, SLSI_NET_INDEX_WLAN);
+#endif
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, (scan_id >> 8));
+ case MLME_RECEIVED_FRAME_IND:
+ if (vif == 0) {
+ SLSI_WARN(sdev, "Received MLME_RECEIVED_FRAME_IND on VIF 0\n");
+ goto err;
+ }
+ return slsi_rx_action_enqueue_netdev_mlme(sdev, skb, vif);
+#ifdef CONFIG_SCSC_WLAN_GSCAN_ENABLE
+ case MLME_NAN_EVENT_IND:
+ case MLME_NAN_FOLLOWUP_IND:
+ case MLME_NAN_SERVICE_IND:
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, vif);
+ case MLME_RANGE_IND:
+ case MLME_RANGE_DONE_IND:
+ if (vif == 0)
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, SLSI_NET_INDEX_WLAN);
+ else
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, vif);
+#endif
+#ifdef CONFIG_SCSC_WLAN_ENHANCED_LOGGING
+ case MLME_EVENT_LOG_IND:
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, SLSI_NET_INDEX_WLAN);
+#endif
+ case MLME_ROAMED_IND:
+ if (vif == 0) {
+ SLSI_WARN(sdev, "Received MLME_ROAMED_IND on VIF 0, return error\n");
+ goto err;
+ } else {
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (WARN_ON(!dev)) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
+ ndev_vif = netdev_priv(dev);
+ if (atomic_read(&ndev_vif->sta.drop_roamed_ind)) {
+ /* If roam cfm is not received for the req, ignore this roamed indication. */
+ slsi_kfree_skb(skb);
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, vif);
+ }
+ default:
+ if (vif == 0) {
+ SLSI_WARN(sdev, "Received signal 0x%04x on VIF 0, return error\n", fapi_get_sigid(skb));
+ goto err;
+ } else {
+ return slsi_rx_enqueue_netdev_mlme(sdev, skb, vif);
+ }
+ }
+ }
+ if (WARN_ON(fapi_is_req(skb)))
+ goto err;
+
+ if (slsi_is_test_mode_enabled()) {
+ slsi_kfree_skb(skb);
+ return 0;
+ }
+
+ WARN_ON(1);
+
+err:
+ return -EINVAL;
+}
+
+int sap_mlme_init(void)
+{
+ SLSI_INFO_NODEV("Registering SAP\n");
+ slsi_hip_sap_register(&sap_mlme);
+ return 0;
+}
+
+int sap_mlme_deinit(void)
+{
+ SLSI_INFO_NODEV("Unregistering SAP\n");
+ slsi_hip_sap_unregister(&sap_mlme);
+ return 0;
+}
--- /dev/null
+
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SAP_MLME_H__
+#define __SAP_MLME_H__
+
+int sap_mlme_init(void);
+int sap_mlme_deinit(void);
+
+/* MLME signal handlers in rx.c */
+void slsi_rx_scan_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+#ifdef CONFIG_SLSI_WLAN_STA_FWD_BEACON
+void slsi_rx_beacon_reporting_event_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+#endif
+void slsi_rx_scan_done_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_channel_switched_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+#ifdef CONFIG_SCSC_WLAN_SAE_CONFIG
+void slsi_rx_synchronised_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+#endif
+void slsi_rx_connect_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_connected_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_received_frame_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_disconnect_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_disconnected_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_procedure_started_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_frame_transmission_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_roamed_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_roam_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_mic_failure_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_reassoc_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_tdls_peer_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_listen_end_ind(struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_blockack_ind(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/types.h>
+#include "dev.h"
+#include "sap.h"
+#include "sap_test.h"
+#include "hip.h"
+
+#include "debug.h"
+
+#define SUPPORTED_OLD_VERSION 0
+
+static int sap_test_version_supported(u16 version);
+static int sap_test_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb);
+
+static struct sap_api sap_test = {
+ .sap_class = SAP_TST,
+ .sap_version_supported = sap_test_version_supported,
+ .sap_handler = sap_test_rx_handler,
+ .sap_versions = { FAPI_TEST_SAP_VERSION, SUPPORTED_OLD_VERSION },
+};
+
+static int sap_test_version_supported(u16 version)
+{
+ unsigned int major = SAP_MAJOR(version);
+ unsigned int minor = SAP_MINOR(version);
+ u8 i = 0;
+
+ SLSI_INFO_NODEV("Reported version: %d.%d\n", major, minor);
+
+ for (i = 0; i < SAP_MAX_VER; i++)
+ if (SAP_MAJOR(sap_test.sap_versions[i]) == major)
+ return 0;
+
+ SLSI_ERR_NODEV("Version %d.%d Not supported\n", major, minor);
+
+ return -EINVAL;
+}
+
+static int sap_test_rx_handler(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ if (slsi_rx_blocking_signals(sdev, skb) == 0)
+ return 0;
+
+ SLSI_INFO_NODEV("TEST SAP not implemented\n");
+ /* Silently consume the skb */
+ slsi_kfree_skb(skb);
+ /* return success */
+ return 0;
+}
+
+int sap_test_init(void)
+{
+ SLSI_INFO_NODEV("Registering SAP\n");
+ slsi_hip_sap_register(&sap_test);
+ return 0;
+}
+
+int sap_test_deinit(void)
+{
+ SLSI_INFO_NODEV("Unregistering SAP\n");
+ slsi_hip_sap_unregister(&sap_test);
+ return 0;
+}
--- /dev/null
+
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SAP_TEST_H__
+#define __SAP_TEST_H__
+
+int sap_test_init(void);
+int sap_test_deinit(void);
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_WIFI_CM_IF_H__
+#define __SCSC_WIFI_CM_IF_H__
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+
+#include <scsc/scsc_mx.h>
+
+struct slsi_dev;
+
+/**
+ * CM interface States:
+ * STOPPED : (default) state,
+ * PROBING :
+ * PROBED :
+ * STARTING :
+ * STARTED :
+ * STOPPING :
+ * REMOVING :
+ * REMOVED :
+ * BLOCKED :
+ */
+enum scsc_wifi_cm_if_state {
+ SCSC_WIFI_CM_IF_STATE_STOPPED,
+ SCSC_WIFI_CM_IF_STATE_PROBING,
+ SCSC_WIFI_CM_IF_STATE_PROBED,
+ SCSC_WIFI_CM_IF_STATE_STARTING,
+ SCSC_WIFI_CM_IF_STATE_STARTED,
+ SCSC_WIFI_CM_IF_STATE_STOPPING,
+ SCSC_WIFI_CM_IF_STATE_REMOVING,
+ SCSC_WIFI_CM_IF_STATE_REMOVED,
+ SCSC_WIFI_CM_IF_STATE_BLOCKED
+};
+
+/**
+ * Notification Events
+ * SCSC_WIFI_STOP : Wifi service should freeze
+ * SCSC_WIFI_FAILURE_RESET : Failure has been handled
+ * SCSC_WIFI_SUSPEND: Host going in to suspend mode
+ * SCSC_WIFI_RESUME: Host resuming
+ */
+enum scsc_wifi_cm_if_notifier {
+ SCSC_WIFI_STOP,
+ SCSC_WIFI_FAILURE_RESET,
+ SCSC_WIFI_SUSPEND,
+ SCSC_WIFI_RESUME,
+ SCSC_MAX_NOTIFIER
+};
+
+struct scsc_wifi_cm_if {
+ struct slsi_dev *sdev;
+ /* a std mutex */
+ struct mutex cm_if_mutex;
+
+ struct kref kref;
+
+ /* refer to enum scsc_wifi_cm_if_state */
+ atomic_t cm_if_state;
+};
+
+/*********************************** API ************************************/
+
+/**
+ * Driver's interface to cm_if
+ */
+struct slsi_dev *slsi_dev_attach(struct device *dev, struct scsc_mx *core, struct scsc_service_client *mx_wlan_client);
+void slsi_dev_detach(struct slsi_dev *sdev);
+
+/**
+ * cm_if's interface to driver
+ */
+int slsi_sm_service_driver_register(void);
+void slsi_sm_service_driver_unregister(void);
+void slsi_sm_service_failed(struct slsi_dev *sdev, const char *reason);
+int slsi_sm_wlan_service_open(struct slsi_dev *sdev);
+int slsi_sm_wlan_service_start(struct slsi_dev *sdev);
+void slsi_sm_wlan_service_stop(struct slsi_dev *sdev);
+void slsi_sm_wlan_service_close(struct slsi_dev *sdev);
+int slsi_wlan_service_notifier_register(struct notifier_block *nb);
+int slsi_wlan_service_notifier_unregister(struct notifier_block *nb);
+
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include "scsc_wifi_fcq.h"
+#include "debug.h"
+#include "dev.h"
+#include "hip4_sampler.h"
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+#include "porting_imx.h"
+#endif
+
+/* Queues hierarchy and control domains
+ *
+ * wlan p2p p2pX
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * --------------------------------------
+ * |
+ * \ Global domain
+ * |
+ * |
+ * ----------------------------------------
+ * | | ... |
+ * | | ... | Smod Domain (vid, peer_id)
+ * \ \ ... \
+ * | | ... |
+ * | | ... |
+ * ------------------------------------------------------
+ * | | | | | | | | ... | | | |
+ * \ \ \ \ \ \ \ \ ... \ \ \ \ Qmod Domain
+ * | | | | | | | | ... | | | |
+ * -----------------------------------------------------
+ * vi vo bk be vi vo bk be vi vo bk be
+ */
+
+uint scsc_wifi_fcq_smod = 400;
+module_param(scsc_wifi_fcq_smod, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_smod, "Initial value of unicast smod - peer normal (default = 400)");
+
+uint scsc_wifi_fcq_mcast_smod = 100;
+module_param(scsc_wifi_fcq_mcast_smod, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_mcast_smod, "Initial value of multicast smod - peer normal (default = 100)");
+
+uint scsc_wifi_fcq_smod_power = 4;
+module_param(scsc_wifi_fcq_smod_power, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_smod_power, "Initial powersave SMOD value - peer powersave (default = 4)");
+
+uint scsc_wifi_fcq_mcast_smod_power = 4;
+module_param(scsc_wifi_fcq_mcast_smod_power, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_mcast_smod_power, "Initial value of powersave multicast smod - peer normal (default = 4)");
+
+uint scsc_wifi_fcq_qmod = 400;
+module_param(scsc_wifi_fcq_qmod, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_qmod, "Initial value of unicast qmod - peer normal (default = 400)");
+
+uint scsc_wifi_fcq_mcast_qmod = 100;
+module_param(scsc_wifi_fcq_mcast_qmod, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_mcast_qmod, "Initial value of multicast qmod - peer normal (default = 100)");
+
+uint scsc_wifi_fcq_minimum_smod = 50;
+module_param(scsc_wifi_fcq_minimum_smod, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_minimum_smod, "Initial value of minimum smod - peer normal (default = 50)");
+
+uint scsc_wifi_fcq_distribution_delay_ms;
+module_param(scsc_wifi_fcq_distribution_delay_ms, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(scsc_wifi_fcq_distribution_delay_ms, "Distribution time in ms (default = 0)");
+
+#define SCSC_WIFI_FCQ_SMOD_RESUME_HYSTERESIS 10
+#define SCSC_WIFI_FCQ_QMOD_RESUME_HYSTERESIS 10
+#define SCSC_WIFI_FCQ_GMOD_RESUME_HYSTERESIS 30
+
+/* Protection guard before reaching Stop queues. */
+#define STOP_GUARD_GMOD 10
+#define STOP_GUARD_SMOD 1
+#define STOP_GUARD_QMOD 2
+
+/* CLOSE_IN_OVERSHOOT could close the overshooted queues quickly, however could lead to ENOSPC on */
+/* a multithreaded environment */
+/* #define CLOSE_IN_OVERSHOOT 1 */
+
+/* To optimally start/stop global queues */
+struct peers_cache {
+ struct list_head list;
+ struct net_device *dev;
+ int vif;
+ int peer_index;
+ struct scsc_wifi_fcq_data_qset *qs;
+ bool is_unicast;
+};
+
+LIST_HEAD(peers_cache_list);
+
+/* AC qmod mapping */
+/* 0 - indicates not active */
+/* > 0 - indicates active and the qmod value */
+static u32 ac_q_layout[8][4] = {
+ { 0, 0, 0, 1000},
+ { 0, 0, 500, 500},
+ { 0, 500, 0, 500},
+ { 0, 333, 333, 333},
+ { 500, 0, 0, 500},
+ { 333, 0, 333, 333},
+ { 333, 333, 0, 333},
+ { 250, 250, 250, 250},
+};
+
+/* Setting ENABLE_QCOD will include a second layer of flow control calculation for a specific queue.
+ * #define ENABLE_QCOD 1
+ */
+#define ENABLE_QCOD 1
+#ifdef CONFIG_SCSC_DEBUG
+/* Global debug counters */
+#define DOMAINS 3
+#define DOMAIN_G 0
+#define DOMAIN_S 1
+#define DOMAIN_Q 2
+
+#define DIREC 2
+#define DIREC_TX 0
+#define DIREC_RX 1
+
+#define AC_Q 4
+
+static int td[DIREC][DOMAINS][AC_Q];
+
+static inline void fcq_update_counters(int direction, int domain, int ac)
+{
+ td[direction][domain][ac] = td[direction][domain][ac] + 1;
+}
+#endif
+
+/* Setting ENABLE_CTRL_FCQ will include flow control on control queues.
+ * #define ENABLE_CTRL_FCQ 1
+ */
+
+/* POC */
+static int total;
+
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+/* Need to track the number of peer in smod */
+static int total_in_sleep;
+#endif
+
+static inline bool is_gmod_active(struct slsi_dev *sdev)
+{
+ return atomic_read(&sdev->hip4_inst.hip_priv->gactive);
+}
+
+static inline bool is_smod_active(struct scsc_wifi_fcq_data_qset *qs)
+{
+ return atomic_read(&qs->active);
+}
+
+static inline bool is_qmod_active(struct scsc_wifi_fcq_data_q *queue)
+{
+ return atomic_read(&queue->head.active);
+}
+
+static inline bool is_in_pause(struct slsi_dev *sdev)
+{
+ return atomic_read(&sdev->in_pause_state);
+}
+
+/* Should be called from locked context */
+static inline void fcq_stop_all_queues(struct slsi_dev *sdev)
+{
+ int i;
+ struct peers_cache *pc_node, *next;
+
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ /* Stop queues all queues */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "fcq_stop_all_queues vif %d peer_index %d ac %d\n", pc_node->vif, pc_node->peer_index, i);
+ netif_stop_subqueue(pc_node->dev, pc_node->qs->ac_q[i].head.netif_queue_id);
+ }
+ }
+}
+
+/* Should be called from locked context */
+static inline void fcq_wake_all_queues(struct slsi_dev *sdev)
+{
+ int i;
+ struct peers_cache *pc_node, *next;
+
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ /* Wake queues that reported to be active, leave stopped the others. Do not wake queues in pause state */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ if (is_gmod_active(sdev) && is_smod_active(pc_node->qs) && is_qmod_active(&pc_node->qs->ac_q[i]) && !is_in_pause(sdev)) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "fcq_wake_all_queues vif %d peer_index %d ac %d\n", pc_node->vif, pc_node->peer_index, i);
+ netif_wake_subqueue(pc_node->dev, pc_node->qs->ac_q[i].head.netif_queue_id);
+ } else {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "fcq_wake_all_queues vif %d peer_index %d ac %d not woken up!\n", pc_node->vif, pc_node->peer_index, i);
+ }
+ }
+ }
+}
+
+void scsc_wifi_fcq_pause_queues(struct slsi_dev *sdev)
+{
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Pause queues\n");
+ atomic_set(&sdev->in_pause_state, 1);
+ fcq_stop_all_queues(sdev);
+}
+
+void scsc_wifi_fcq_unpause_queues(struct slsi_dev *sdev)
+{
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Unpause queues\n");
+ atomic_set(&sdev->in_pause_state, 0);
+ fcq_wake_all_queues(sdev);
+}
+
+#ifdef ENABLE_QCOD
+/* Detects AC queues that have stopped and redistributes the qmod
+ * returns true if the redistribute succeed (qmod increases)
+ * returns false if qmod was not redistributed
+ */
+static bool fcq_redistribute_qmod_before_stopping(struct scsc_wifi_fcq_data_qset *qs)
+{
+ struct scsc_wifi_fcq_q_header *queue;
+ int i, j;
+ u32 *qmod_table;
+ u32 val;
+
+ /* Only BE, so skip as nothing could be done */
+ if (qs->ac_inuse == 1)
+ return false;
+
+ /* Go through the list of possible candidates */
+ for (i = 1; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ queue = &qs->ac_q[i].head;
+ if (queue->can_be_distributed &&
+ (ktime_compare(ktime_get(), ktime_add_ms(queue->empty_t, scsc_wifi_fcq_distribution_delay_ms)) > 0)) {
+ /* This queue could be redistributed */
+ qs->ac_inuse &= ~(1 << i);
+ /* To prevent further reallocation */
+ queue->can_be_distributed = false;
+ qmod_table = &ac_q_layout[qs->ac_inuse >> 1][0];
+ for (j = 0; j < SLSI_NETIF_Q_PER_PEER; j++) {
+ queue = &qs->ac_q[j].head;
+ val = (atomic_read(&qs->smod) * qmod_table[SLSI_NETIF_Q_PER_PEER - 1 - j]) / 1000;
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Detected non-active ac queue (%d). Redistribute qmod[%d] %d qcod %d\n", i, j, val, atomic_read(&queue->qcod));
+ atomic_set(&queue->qmod, val);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void fcq_redistribute_qmod(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 peer_index, u8 vif)
+{
+ int i;
+ struct scsc_wifi_fcq_q_header *queue_redis;
+ u32 *qmod_table;
+ u32 val;
+
+ qmod_table = &ac_q_layout[qs->ac_inuse >> 1][0];
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ queue_redis = &qs->ac_q[i].head;
+ val = (atomic_read(&qs->smod) * qmod_table[SLSI_NETIF_Q_PER_PEER - 1 - i]) / 1000;
+ atomic_set(&queue_redis->qmod, val);
+#ifdef CLOSE_IN_OVERSHOOT
+ if (val > 0) {
+ /* Stop queue that are overshooted because the new smod/qmod */
+ if (atomic_read(&queue_redis->active) && (atomic_read(&queue_redis->qcod) >= (atomic_read(&queue_redis->qmod) - STOP_GUARD_QMOD))) {
+ netif_stop_subqueue(dev, queue_redis->netif_queue_id);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Closing overshoot queue for vif: %d peer: %d ac: %d qcod (%d) qmod (%d)\n", vif, peer_index, i, atomic_read(&queue_redis->qcod), atomic_read(&queue_redis->qmod));
+ atomic_set(&queue_redis->active, 0);
+ }
+ }
+#endif
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Redistribute new value %d: qmod[%d] %d qcod %d active %d\n", val, i, atomic_read(&queue_redis->qmod), atomic_read(&queue_redis->qcod), atomic_read(&queue_redis->active));
+ }
+}
+#endif
+
+static void fcq_redistribute_smod(struct net_device *dev, struct slsi_dev *sdev, int total_to_distribute)
+{
+#ifdef CLOSE_IN_OVERSHOOT
+ int i;
+#endif
+ u32 new_smod;
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ int get_total = 0;
+#endif
+ struct peers_cache *pc_node, *next;
+ struct scsc_wifi_fcq_data_qset *qs_redis;
+
+ /* Redistribute smod - qmod */
+ /* Go through the list of nodes and redistribute smod and qmod accordingly */
+ /* First, get the nominal smod and divide it by total of peers */
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ get_total = total_to_distribute - total_in_sleep;
+ if (get_total > 0)
+ new_smod = scsc_wifi_fcq_smod / get_total;
+ /* Use the nominal total in case of failure */
+ else
+ new_smod = scsc_wifi_fcq_smod / total;
+#else
+ new_smod = scsc_wifi_fcq_smod / total_to_distribute;
+#endif
+ /* Saturate if number is lower than certian low level */
+ if (new_smod < scsc_wifi_fcq_minimum_smod)
+ new_smod = scsc_wifi_fcq_minimum_smod;
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ if (pc_node->is_unicast) {
+ qs_redis = pc_node->qs;
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ if (qs_redis->in_sleep)
+ atomic_set(&qs_redis->smod, 0);
+ else
+ atomic_set(&qs_redis->smod, new_smod);
+#else
+ atomic_set(&qs_redis->smod, new_smod);
+#endif
+#ifdef CLOSE_IN_OVERSHOOT
+ /* Stop queues to avoid overshooting if scod > smod */
+ if (is_smod_active(qs_redis) && atomic_read(&qs_redis->scod) >= (atomic_read(&qs_redis->smod) - STOP_GUARD_SMOD)) {
+ /* Disable the qs that is in overshoot */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++)
+ netif_stop_subqueue(dev, qs_redis->ac_q[i].head.netif_queue_id);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Closing overshoot qs for vif %d peer_index %d\n", pc_node->vif, pc_node->peer_index);
+ atomic_set(&qs_redis->active, 0);
+ }
+#endif
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Redistributed smod = %d for vif %d peer_index %d total %d total_in_sleep %d\n", new_smod, pc_node->vif, pc_node->peer_index, total_to_distribute, total_in_sleep);
+#else
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Redistributed smod = %d for vif %d peer_index %d total %d\n", new_smod, pc_node->vif, pc_node->peer_index, total_to_distribute);
+#endif
+ /* Redistribute the qmod */
+ fcq_redistribute_qmod(pc_node->dev, pc_node->qs, sdev, pc_node->peer_index, pc_node->vif);
+ }
+ }
+}
+
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+static int fcq_redistribute_smod_before_stopping(struct net_device *dev, struct slsi_dev *sdev, int total_to_distribute)
+{
+ struct peers_cache *pc_node, *next;
+
+ /* only one peer, skip */
+ if (total_to_distribute == 1)
+ return false;
+
+ /* Search for nodes that were empty and are candidates to be redistributed */
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ if (pc_node->is_unicast) {
+ if (pc_node->qs->can_be_distributed &&
+ (ktime_compare(ktime_get(), ktime_add_ms(pc_node->qs->empty_t, 5000)) > 0)) {
+ pc_node->qs->in_sleep = true;
+ total_in_sleep += 1;
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Smod qs empty. Can be redistributed for vif %d peer_index %d qs->can_be_distributed %d\n", pc_node->vif, pc_node->peer_index, pc_node->qs->can_be_distributed);
+ fcq_redistribute_smod(dev, sdev, total_to_distribute);
+ pc_node->qs->can_be_distributed = false;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif
+
+static int fcq_transmit_gmod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ int gcod;
+ int gmod;
+
+ spin_lock(&sdev->hip4_inst.hip_priv->gbot_lock);
+
+ /* Check first the global domain */
+ if (sdev->hip4_inst.hip_priv->saturated) {
+#ifdef CONFIG_SCSC_DEBUG
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx Global domain. No space. active: %d vif: %d peer: %d ac: %d gcod (%d) gmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&sdev->hip4_inst.hip_priv->gactive), vif, peer_index, priority, atomic_read(&sdev->hip4_inst.hip_priv->gcod), atomic_read(&sdev->hip4_inst.hip_priv->gmod),
+ td[DIREC_TX][DOMAIN_G][0], td[DIREC_RX][DOMAIN_G][0], td[DIREC_TX][DOMAIN_G][2], td[DIREC_RX][DOMAIN_G][2], td[DIREC_TX][DOMAIN_G][3], td[DIREC_RX][DOMAIN_G][3]);
+ fcq_stop_all_queues(sdev);
+#endif
+ spin_unlock(&sdev->hip4_inst.hip_priv->gbot_lock);
+ return -ENOSPC;
+ }
+
+ if (!atomic_read(&sdev->hip4_inst.hip_priv->gactive) && sdev->hip4_inst.hip_priv->guard-- == 0) {
+#ifdef CONFIG_SCSC_DEBUG
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx Global domain. Saturating Gmod. active: %d vif: %d peer: %d ac: %d gcod (%d) gmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&sdev->hip4_inst.hip_priv->gactive), vif, peer_index, priority, atomic_read(&sdev->hip4_inst.hip_priv->gcod), atomic_read(&sdev->hip4_inst.hip_priv->gmod),
+ td[DIREC_TX][DOMAIN_G][0], td[DIREC_RX][DOMAIN_G][0], td[DIREC_TX][DOMAIN_G][2], td[DIREC_RX][DOMAIN_G][2], td[DIREC_TX][DOMAIN_G][3], td[DIREC_RX][DOMAIN_G][3]);
+#endif
+ sdev->hip4_inst.hip_priv->saturated = true;
+ }
+
+ gmod = atomic_read(&sdev->hip4_inst.hip_priv->gmod);
+ gcod = atomic_inc_return(&sdev->hip4_inst.hip_priv->gcod);
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_TX, DOMAIN_G, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "tx: active: %d vif: %d peer: %d ac: %d gcod (%d) gmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&sdev->hip4_inst.hip_priv->gactive), vif, peer_index, priority, gcod, gmod,
+ td[DIREC_TX][DOMAIN_G][0], td[DIREC_RX][DOMAIN_G][0], td[DIREC_TX][DOMAIN_G][2], td[DIREC_RX][DOMAIN_G][2], td[DIREC_TX][DOMAIN_G][3], td[DIREC_RX][DOMAIN_G][3]);
+#endif
+ if (gcod >= (atomic_read(&sdev->hip4_inst.hip_priv->gmod) - STOP_GUARD_GMOD)) {
+ fcq_stop_all_queues(sdev);
+ if (atomic_read(&sdev->hip4_inst.hip_priv->gactive)) {
+ sdev->hip4_inst.hip_priv->guard = STOP_GUARD_GMOD;
+ /* if GUARD is zero, saturate inmmediatelly */
+ if (sdev->hip4_inst.hip_priv->guard == 0)
+ sdev->hip4_inst.hip_priv->saturated = true;
+ }
+ atomic_set(&sdev->hip4_inst.hip_priv->gactive, 0);
+ SCSC_HIP4_SAMPLER_BOT_STOP_Q(sdev->minor_prof, peer_index << 2 | vif);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Global Queues Stopped. gcod (%d) >= gmod (%d) gactive(%d)\n", gcod, gmod, atomic_read(&sdev->hip4_inst.hip_priv->gactive));
+ }
+ spin_unlock(&sdev->hip4_inst.hip_priv->gbot_lock);
+
+ return 0;
+}
+
+/* This function should be called in spinlock(qs) */
+static int fcq_transmit_smod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ int scod;
+
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ if (qs->in_sleep) {
+ /* Queue was put in sleep and now has become active, need to redistribute smod */
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Detected activity in sleep Qs active %d vif: %d peer: %d ac: %d\n",
+ atomic_read(&qs->active), vif, peer_index, priority);
+ /* Before redistributing need to update the redistribution parameters */
+ qs->in_sleep = false;
+ if (total_in_sleep)
+ total_in_sleep -= 1;
+ fcq_redistribute_smod(dev, sdev, total);
+ }
+ /* If we transmit we consider the queue -not- empty */
+ qs->can_be_distributed = false;
+#endif
+ /* Check smod domain */
+ if (qs->saturated) {
+ int i;
+
+#ifdef CONFIG_SCSC_DEBUG
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx Smod domain. No space. active %d vif: %d peer: %d ac: %d scod (%d) smod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&qs->active), vif, peer_index, priority, atomic_read(&qs->scod), atomic_read(&qs->smod),
+ td[DIREC_TX][DOMAIN_S][0], td[DIREC_RX][DOMAIN_S][0], td[DIREC_TX][DOMAIN_S][2], td[DIREC_RX][DOMAIN_S][2], td[DIREC_TX][DOMAIN_S][3], td[DIREC_RX][DOMAIN_S][3]);
+#endif
+
+ /* Close subqueues again */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++)
+ netif_stop_subqueue(dev, qs->ac_q[i].head.netif_queue_id);
+
+ return -ENOSPC;
+ }
+ /* Pass the frame until reaching the actual saturation */
+ if (!atomic_read(&qs->active) && (qs->guard-- == 0)) {
+#ifdef CONFIG_SCSC_DEBUG
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx Smod domain. Going into Saturation. active %d vif: %d peer: %d ac: %d scod (%d) smod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&qs->active), vif, peer_index, priority, atomic_read(&qs->scod), atomic_read(&qs->smod),
+ td[DIREC_TX][DOMAIN_S][0], td[DIREC_RX][DOMAIN_S][0], td[DIREC_TX][DOMAIN_S][2], td[DIREC_RX][DOMAIN_S][2], td[DIREC_TX][DOMAIN_S][3], td[DIREC_RX][DOMAIN_S][3]);
+#endif
+ qs->saturated = true;
+ }
+ scod = atomic_inc_return(&qs->scod);
+ SCSC_HIP4_SAMPLER_BOT_TX(sdev->minor_prof, scod, atomic_read(&qs->smod), priority << 6 | (peer_index & 0xf) << 2 | vif);
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_TX, DOMAIN_S, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "tx: active: %d vif: %d peer: %d ac: %d scod (%d) smod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&qs->active), vif, peer_index, priority, atomic_read(&qs->scod), atomic_read(&qs->smod),
+ td[DIREC_TX][DOMAIN_S][0], td[DIREC_RX][DOMAIN_S][0], td[DIREC_TX][DOMAIN_S][2], td[DIREC_RX][DOMAIN_S][2], td[DIREC_TX][DOMAIN_S][3], td[DIREC_RX][DOMAIN_S][3]);
+#endif
+ if (scod >= (atomic_read(&qs->smod) - STOP_GUARD_SMOD)) {
+ int i;
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ /* Before closing check whether we could get slots from non used queues */
+ if (fcq_redistribute_smod_before_stopping(dev, sdev, total)) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Skipped Stop vif: %d peer: %d. scod (%d) >= smod (%d)\n", vif, peer_index, atomic_read(&qs->scod), atomic_read(&qs->smod));
+ return 0;
+ }
+#endif
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++)
+ netif_stop_subqueue(dev, qs->ac_q[i].head.netif_queue_id);
+
+ if (atomic_read(&qs->active)) {
+ qs->guard = STOP_GUARD_SMOD;
+ /* if GUARD is zero, saturate inmmediatelly */
+ if (qs->guard == 0)
+ qs->saturated = true;
+ }
+ atomic_set(&qs->active, 0);
+ qs->stats.netq_stops++;
+ SCSC_HIP4_SAMPLER_BOT_STOP_Q(sdev->minor_prof, peer_index << 2 | vif);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Smod Queues Stopped vif: %d peer: %d. scod (%d) >= smod (%d)\n", vif, peer_index, atomic_read(&qs->scod), atomic_read(&qs->smod));
+ }
+ return 0;
+}
+
+#if defined(ENABLE_CTRL_FCQ) || defined(ENABLE_QCOD)
+/* This function should be called in spinlock(qs) */
+static int fcq_transmit_qmod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u16 priority, u8 peer_index, u8 vif)
+{
+ struct scsc_wifi_fcq_q_header *queue;
+ int qcod;
+
+ queue = &qs->ac_q[priority].head;
+
+ if (!(qs->ac_inuse & (1 << priority))) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "New AC detected: %d\n", priority);
+ qs->ac_inuse |= (1 << priority);
+ fcq_redistribute_qmod(dev, qs, sdev, peer_index, vif);
+ queue->can_be_distributed = false;
+ }
+
+ if (queue->saturated) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx No space in ac: %d\n", priority);
+ /* Stop subqueue */
+ netif_stop_subqueue(dev, queue->netif_queue_id);
+ return -ENOSPC;
+ }
+
+ /* Pass the frame until reaching the actual saturation */
+ if (!atomic_read(&queue->active) && (queue->guard-- == 0)) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx No space in ac: %d Saturated\n", priority);
+ queue->saturated = true;
+ }
+
+ qcod = atomic_inc_return(&queue->qcod);
+ SCSC_HIP4_SAMPLER_BOT_QMOD_TX(sdev->minor_prof, qcod, atomic_read(&queue->qmod), priority << 6 | (peer_index & 0xf) << 2 | vif);
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_TX, DOMAIN_Q, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "tx: active: %d vif: %d peer: %d ac: %d qcod (%d) qmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&queue->active), vif, peer_index, priority, atomic_read(&queue->qcod), atomic_read(&queue->qmod),
+ td[DIREC_TX][DOMAIN_Q][0], td[DIREC_RX][DOMAIN_Q][0], td[DIREC_TX][DOMAIN_Q][2], td[DIREC_RX][DOMAIN_Q][2], td[DIREC_TX][DOMAIN_Q][3], td[DIREC_RX][DOMAIN_Q][3]);
+#endif
+ if (atomic_read(&queue->active) && qcod >= (atomic_read(&queue->qmod) - STOP_GUARD_QMOD)) {
+ /* Before closing check whether we could get slots from non used queues */
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ if (fcq_redistribute_smod_before_stopping(dev, sdev, total)) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Skipped Stop vif: %d peer: %d. scod (%d) >= smod (%d)\n", vif, peer_index, atomic_read(&qs->scod), atomic_read(&qs->smod));
+ goto skip_stop;
+ }
+#endif
+ if (fcq_redistribute_qmod_before_stopping(qs))
+ goto skip_stop;
+ SCSC_HIP4_SAMPLER_BOT_QMOD_STOP(sdev->minor_prof, priority << 6 | (peer_index & 0xf) << 2 | vif);
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Stop subqueue vif: %d peer: %d ac: %d qcod (%d) qmod (%d)\n", vif, peer_index, priority, atomic_read(&queue->qcod), atomic_read(&queue->qmod));
+ if (atomic_read(&queue->active)) {
+ queue->guard = STOP_GUARD_QMOD;
+ /* if GUARD is zero, saturate inmmediatelly */
+ if (queue->guard == 0)
+ queue->saturated = true;
+ }
+ atomic_set(&queue->active, 0);
+ netif_stop_subqueue(dev, queue->netif_queue_id);
+ queue->stats.netq_stops++;
+ }
+skip_stop:
+ return 0;
+}
+#endif
+
+int scsc_wifi_fcq_transmit_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ int rc = 0;
+
+#ifdef ENABLE_CTRL_FCQ
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ if (WARN_ON(!queue))
+ return -EINVAL;
+
+ rc = fcq_transmit_qmod_domain(dev, &queue->head);
+#endif
+ return rc;
+}
+
+int scsc_wifi_fcq_transmit_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ int rc;
+ struct peers_cache *pc_node, *next;
+
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ if (WARN_ON(priority >= ARRAY_SIZE(qs->ac_q)))
+ return -EINVAL;
+
+ spin_lock_bh(&qs->cp_lock);
+ /* Check caller matches an existing peer record */
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ if (pc_node->qs == qs && pc_node->peer_index == peer_index &&
+ pc_node->vif == vif && pc_node->dev == dev) {
+ goto found;
+ }
+ }
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Packet dropped. Detected incorrect peer record\n");
+ spin_unlock_bh(&qs->cp_lock);
+ return -EINVAL;
+found:
+ /* Controlled port is not yet open; so can't send data frame */
+ if (qs->controlled_port_state == SCSC_WIFI_FCQ_8021x_STATE_BLOCKED) {
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "8021x_STATE_BLOCKED\n");
+ spin_unlock_bh(&qs->cp_lock);
+ return -EPERM;
+ }
+ rc = fcq_transmit_gmod_domain(dev, qs, priority, sdev, vif, peer_index);
+ if (rc) {
+ spin_unlock_bh(&qs->cp_lock);
+ return rc;
+ }
+
+ rc = fcq_transmit_smod_domain(dev, qs, priority, sdev, vif, peer_index);
+ if (rc) {
+ /* Queue is full and was not active, so decrement gcod since
+ * this packet won't be transmitted, but the overall gcod
+ * resource is still available. This situation should never
+ * happen if flow control works as expected.
+ */
+ atomic_dec(&sdev->hip4_inst.hip_priv->gcod);
+ spin_unlock_bh(&qs->cp_lock);
+ return rc;
+ }
+
+#ifdef ENABLE_QCOD
+ rc = fcq_transmit_qmod_domain(dev, qs, sdev, priority, peer_index, vif);
+ if (rc) {
+ /* Queue is full and was not active, so decrement scod since
+ * this packet won't be transmitted, but the overall scod
+ * resource is still available. This situation should never
+ * happen if flow control works as expected.
+ */
+ atomic_dec(&qs->scod);
+ atomic_dec(&sdev->hip4_inst.hip_priv->gcod);
+ spin_unlock_bh(&qs->cp_lock);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx scsc_wifi_fcq_transmit_data: Flow control not respected. Packet will be dropped.\n");
+ return rc;
+ }
+#endif
+
+ spin_unlock_bh(&qs->cp_lock);
+ return 0;
+}
+
+static int fcq_receive_gmod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u16 priority, u8 peer_index, u8 vif)
+{
+ int gcod;
+ int gmod;
+ int gactive;
+
+ spin_lock(&sdev->hip4_inst.hip_priv->gbot_lock);
+ /* Decrease first the global domain */
+ gmod = atomic_read(&sdev->hip4_inst.hip_priv->gmod);
+ gcod = atomic_dec_return(&sdev->hip4_inst.hip_priv->gcod);
+ gactive = atomic_read(&sdev->hip4_inst.hip_priv->gactive);
+ if (unlikely(gcod < 0)) {
+ atomic_set(&sdev->hip4_inst.hip_priv->gcod, 0);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx scsc_wifi_fcq_receive: gcod is negative. Has been fixed\n");
+ }
+
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_RX, DOMAIN_G, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "rx: active: %d vif: %d peer: %d ac: %d gcod (%d) gmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d %s\n",
+ gactive, vif, peer_index, priority, gcod, gmod,
+ td[DIREC_TX][DOMAIN_G][0], td[DIREC_RX][DOMAIN_G][0], td[DIREC_TX][DOMAIN_G][2], td[DIREC_RX][DOMAIN_G][2], td[DIREC_TX][DOMAIN_G][3], td[DIREC_RX][DOMAIN_G][3], qs ? "" : "NO PEER");
+#endif
+
+ if (!is_gmod_active(sdev) && (gcod + SCSC_WIFI_FCQ_GMOD_RESUME_HYSTERESIS / total < gmod)) {
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Global Queues Started. gcod (%d) < gmod (%d)\n", gcod, gmod);
+ sdev->hip4_inst.hip_priv->saturated = false;
+ atomic_set(&sdev->hip4_inst.hip_priv->gactive, 1);
+ fcq_wake_all_queues(sdev);
+ }
+ spin_unlock(&sdev->hip4_inst.hip_priv->gbot_lock);
+
+ return 0;
+}
+
+static int fcq_receive_smod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u16 priority, u8 peer_index, u8 vif)
+{
+ int scod;
+
+ scod = atomic_dec_return(&qs->scod);
+ if (unlikely(scod < 0)) {
+ atomic_set(&qs->scod, 0);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx scsc_wifi_fcq_receive: scod is negative. Has been fixed\n");
+ }
+
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_RX, DOMAIN_S, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "rx: active: %d vif: %d peer: %d ac: %d scod (%d) smod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&qs->active), vif, peer_index, priority, atomic_read(&qs->scod), atomic_read(&qs->smod),
+ td[DIREC_TX][DOMAIN_S][0], td[DIREC_RX][DOMAIN_S][0], td[DIREC_TX][DOMAIN_S][2], td[DIREC_RX][DOMAIN_S][2], td[DIREC_TX][DOMAIN_S][3], td[DIREC_RX][DOMAIN_S][3]);
+#endif
+ /* Only support a maximum of 16 peers!!!!!!*/
+ SCSC_HIP4_SAMPLER_BOT_RX(sdev->minor_prof, scod, atomic_read(&qs->smod), priority << 6 | (peer_index & 0xf) << 2 | vif);
+ if (!is_smod_active(qs) && (scod + SCSC_WIFI_FCQ_SMOD_RESUME_HYSTERESIS / total < atomic_read(&qs->smod))) {
+ int i;
+ /* Resume all queues for this peer that were active . Do not wake queues in pause state or closed in upper domains */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ if (is_gmod_active(sdev) && is_qmod_active(&qs->ac_q[i]) && !is_in_pause(sdev))
+ netif_wake_subqueue(dev, qs->ac_q[i].head.netif_queue_id);
+ else
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "smod wake vif %d peer_index %d ac %d not woken up!\n", vif, peer_index, i);
+ }
+
+ SCSC_HIP4_SAMPLER_BOT_START_Q(sdev->minor_prof, peer_index << 2 | vif);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Smod Queues Started vif: %d peer: %d. scod (%d) >= smod (%d)\n", vif, peer_index, atomic_read(&qs->scod), atomic_read(&qs->smod));
+ /* Regardless the queue were not woken up, set the qs as active */
+ qs->saturated = false;
+ atomic_set(&qs->active, 1);
+ qs->stats.netq_resumes++;
+ }
+
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ if (scod == 0) {
+ /* Get the empty time */
+ qs->empty_t = ktime_get();
+ qs->can_be_distributed = true;
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Qs empty vif: %d peer: %d\n", vif, peer_index);
+ } else {
+ qs->can_be_distributed = false;
+ }
+#endif
+ return 0;
+}
+
+#ifdef ENABLE_QCOD
+static int fcq_receive_qmod_domain(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u16 priority, u8 peer_index, u8 vif)
+{
+ struct scsc_wifi_fcq_q_header *queue;
+ int qcod;
+
+ queue = &qs->ac_q[priority].head;
+
+ qcod = atomic_dec_return(&queue->qcod);
+ if (unlikely(qcod < 0)) {
+ atomic_set(&queue->qcod, 0);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "xxxxxxxxxxxxxxxxxxxxxxx fcq_receive_qmod_domain: qcod is negative. Has been fixed\n");
+ }
+
+#ifdef CONFIG_SCSC_DEBUG
+ fcq_update_counters(DIREC_RX, DOMAIN_Q, priority);
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "rx: active: %d vif: %d peer: %d ac: %d qcod (%d) qmod (%d) betx:%d berx:%d vitx:%d virx:%d votx:%d vorx:%d\n",
+ atomic_read(&queue->active), vif, peer_index, priority, atomic_read(&queue->qcod), atomic_read(&queue->qmod),
+ td[DIREC_TX][DOMAIN_Q][0], td[DIREC_RX][DOMAIN_Q][0], td[DIREC_TX][DOMAIN_Q][2], td[DIREC_RX][DOMAIN_Q][2], td[DIREC_TX][DOMAIN_Q][3], td[DIREC_RX][DOMAIN_Q][3]);
+#endif
+ SCSC_HIP4_SAMPLER_BOT_QMOD_RX(sdev->minor_prof, qcod, atomic_read(&queue->qmod), priority << 6 | (peer_index & 0xf) << 2 | vif);
+
+ if (!is_qmod_active(&qs->ac_q[priority]) && ((qcod + SCSC_WIFI_FCQ_QMOD_RESUME_HYSTERESIS / total) < atomic_read(&queue->qmod))) {
+ /* Do not wake queues in pause state or closed by other domain */
+ if (is_gmod_active(sdev) && is_smod_active(qs) && !is_in_pause(sdev))
+ netif_wake_subqueue(dev, queue->netif_queue_id);
+ /* Only support a maximum of 16 peers!!!!!!*/
+ SCSC_HIP4_SAMPLER_BOT_QMOD_START(sdev->minor_prof, priority << 6 | (peer_index & 0xf) << 2 | vif);
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Start subqueue vif: %d peer: %d ac: %d qcod (%d) qmod (%d)\n", vif, peer_index, priority, atomic_read(&queue->qcod), atomic_read(&queue->qmod));
+ queue->stats.netq_resumes++;
+ /* Regardless the queue was not woken up, set the queue as active */
+ queue->saturated = false;
+ atomic_set(&queue->active, 1);
+ }
+
+ /* Ignore priority BE as it it always active */
+ if (qcod == 0 && priority != SLSI_TRAFFIC_Q_BE) {
+ /* Get the stop time */
+ queue->empty_t = ktime_get();
+ queue->can_be_distributed = true;
+ } else {
+ queue->can_be_distributed = false;
+ }
+ return 0;
+}
+#endif
+
+int scsc_wifi_fcq_receive_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ if (WARN_ON(!queue))
+ return -EINVAL;
+
+#ifdef ENABLE_CTRL_FCQ
+ /* return fcq_receive(dev, &queue->head, NULL, 0, 0, 0); */
+#endif
+ return 0;
+}
+
+/* This function is to collect missing returning mbulks from a peer that has dissapeared so the qset is missing */
+int scsc_wifi_fcq_receive_data_no_peer(struct net_device *dev, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ fcq_receive_gmod_domain(dev, NULL, sdev, priority, peer_index, vif);
+#ifdef CONFIG_SCSC_DEBUG
+ /* Update also S and Q domain */
+ fcq_update_counters(DIREC_RX, DOMAIN_S, priority);
+ fcq_update_counters(DIREC_RX, DOMAIN_Q, priority);
+#endif
+ return 0;
+}
+
+int scsc_wifi_fcq_receive_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ int rc = 0;
+
+ if (WARN_ON(!dev))
+ return -EINVAL;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ if (WARN_ON(priority >= ARRAY_SIZE(qs->ac_q)))
+ return -EINVAL;
+
+ /* The read/modify/write of the scod here needs synchronisation. */
+ spin_lock_bh(&qs->cp_lock);
+
+ rc = fcq_receive_gmod_domain(dev, qs, sdev, priority, peer_index, vif);
+ if (rc)
+ goto end;
+
+ rc = fcq_receive_smod_domain(dev, qs, sdev, priority, peer_index, vif);
+ if (rc)
+ goto end;
+
+#ifdef ENABLE_QCOD
+ rc = fcq_receive_qmod_domain(dev, qs, sdev, priority, peer_index, vif);
+#endif
+end:
+ spin_unlock_bh(&qs->cp_lock);
+ return rc;
+}
+
+int scsc_wifi_fcq_update_smod(struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_ps_state peer_ps_state,
+ enum scsc_wifi_fcq_queue_set_type type)
+{
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ if (peer_ps_state == SCSC_WIFI_FCQ_PS_STATE_POWERSAVE) {
+ atomic_set(&qs->smod, type == SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST ? scsc_wifi_fcq_smod_power : scsc_wifi_fcq_mcast_smod_power);
+ qs->peer_ps_state = peer_ps_state;
+ qs->peer_ps_state_transitions++;
+ } else if (peer_ps_state == SCSC_WIFI_FCQ_PS_STATE_ACTIVE) {
+ atomic_set(&qs->smod, type == SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST ? scsc_wifi_fcq_smod : scsc_wifi_fcq_mcast_smod);
+ qs->peer_ps_state = peer_ps_state;
+ qs->peer_ps_state_transitions++;
+ } else
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Unknown sta_state %d\n",
+ peer_ps_state);
+
+ return 0;
+}
+
+int scsc_wifi_fcq_8021x_port_state(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_8021x_state state)
+{
+ if (WARN_ON(!dev))
+ return -EINTR;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ spin_lock_bh(&qs->cp_lock);
+ qs->controlled_port_state = state;
+ spin_unlock_bh(&qs->cp_lock);
+ SLSI_NET_DBG1(dev, SLSI_WIFI_FCQ, "802.1x: Queue set 0x%p is %s\n", qs,
+ state == SCSC_WIFI_FCQ_8021x_STATE_OPEN ? "Open" : "Blocked");
+ return 0;
+}
+
+/**
+ * Statistics
+ */
+int scsc_wifi_fcq_stat_queue(struct scsc_wifi_fcq_q_header *queue,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *qmod, int *qcod)
+{
+ if (WARN_ON(!queue) || WARN_ON(!queue_stat) || WARN_ON(!qmod) || WARN_ON(!qmod))
+ return -EINTR;
+
+ memcpy(queue_stat, &queue->stats, sizeof(struct scsc_wifi_fcq_q_stat));
+ *qmod = atomic_read(&queue->qmod);
+ *qcod = atomic_read(&queue->qcod);
+ return 0;
+}
+
+int scsc_wifi_fcq_stat_queueset(struct scsc_wifi_fcq_data_qset *queue_set,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *smod, int *scod, enum scsc_wifi_fcq_8021x_state *cp_state,
+ u32 *peer_ps_state_transitions)
+{
+ if (WARN_ON(!queue_set) || WARN_ON(!queue_stat) || WARN_ON(!smod) || WARN_ON(!scod) ||
+ WARN_ON(!cp_state) || WARN_ON(!peer_ps_state_transitions))
+ return -EINTR;
+
+ memcpy(queue_stat, &queue_set->stats, sizeof(struct scsc_wifi_fcq_q_stat));
+ *peer_ps_state_transitions = queue_set->peer_ps_state_transitions;
+ *cp_state = queue_set->controlled_port_state;
+ *smod = atomic_read(&queue_set->smod);
+ *scod = atomic_read(&queue_set->scod);
+ return 0;
+}
+
+/**
+ * Queue and Queue Set init/deinit
+ */
+int scsc_wifi_fcq_ctrl_q_init(struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ if (WARN_ON(!queue))
+ return -EINVAL;
+
+ /* Ensure that default qmod doesn't exceed 24 bit */
+ if (WARN_ON(scsc_wifi_fcq_qmod >= 0x1000000))
+ return -EINVAL;
+
+ atomic_set(&queue->head.qmod, scsc_wifi_fcq_qmod);
+ atomic_set(&queue->head.qcod, 0);
+ queue->head.netif_queue_id = 0;
+ queue->head.stats.netq_stops = 0;
+ queue->head.stats.netq_resumes = 0;
+ atomic_set(&queue->head.active, 1);
+
+ return 0;
+}
+
+void scsc_wifi_fcq_ctrl_q_deinit(struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ int qcod;
+
+ WARN_ON(!queue);
+
+ qcod = atomic_read(&queue->head.qcod);
+ if (qcod != 0)
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Ctrl queue (0x%p) deinit: qcod is %d, netif queue %d\n",
+ queue, qcod, queue->head.netif_queue_id);
+}
+
+static int fcq_data_q_init(struct net_device *dev, struct slsi_dev *sdev, enum scsc_wifi_fcq_queue_set_type type, struct scsc_wifi_fcq_data_q *queue,
+ struct scsc_wifi_fcq_data_qset *qs, u8 qs_num, s16 ac)
+{
+ if (WARN_ON(!queue))
+ return -EINVAL;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ /* Ensure that default qmods don't exceed 24 bit */
+ if (WARN_ON(scsc_wifi_fcq_qmod >= 0x1000000) || WARN_ON(scsc_wifi_fcq_mcast_qmod >= 0x1000000))
+ return -EINVAL;
+
+ atomic_set(&queue->head.qmod, type == SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST ? scsc_wifi_fcq_qmod : scsc_wifi_fcq_mcast_qmod);
+
+ atomic_set(&queue->head.qcod, 0);
+ queue->qs = qs;
+ queue->head.netif_queue_id = type == SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST ?
+ slsi_netif_get_peer_queue(qs_num, ac) : slsi_netif_get_multicast_queue(ac);
+
+ queue->head.stats.netq_stops = 0;
+ queue->head.stats.netq_resumes = 0;
+
+ /* TODO: This could generate some ENOSPC if queues are full */
+ if (!atomic_read(&sdev->in_pause_state))
+ netif_wake_subqueue(dev, queue->head.netif_queue_id);
+
+ queue->head.saturated = false;
+ atomic_set(&queue->head.active, 1);
+
+ return 0;
+}
+
+static void fcq_data_q_deinit(struct scsc_wifi_fcq_data_q *queue)
+{
+ int qcod;
+
+ WARN_ON(!queue);
+
+ qcod = atomic_read(&queue->head.qcod);
+ if (qcod != 0)
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Data queue (0x%p) deinit: qcod is %d, netif queue %d\n",
+ queue, qcod, queue->head.netif_queue_id);
+}
+
+static void fcq_qset_init(struct net_device *dev, struct slsi_dev *sdev, enum scsc_wifi_fcq_queue_set_type type, struct scsc_wifi_fcq_data_qset *qs, u8 qs_num)
+{
+ int i;
+ struct scsc_wifi_fcq_data_q *queue;
+
+ memset(qs, 0, sizeof(struct scsc_wifi_fcq_data_qset));
+ spin_lock_init(&qs->cp_lock);
+ atomic_set(&qs->smod, type == SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST ? scsc_wifi_fcq_smod : scsc_wifi_fcq_mcast_smod);
+ atomic_set(&qs->scod, 0);
+
+ spin_lock_bh(&qs->cp_lock);
+ qs->peer_ps_state = SCSC_WIFI_FCQ_PS_STATE_ACTIVE;
+ qs->saturated = false;
+ atomic_set(&qs->active, 1);
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ qs->in_sleep = false;
+ qs->can_be_distributed = false;
+#endif
+ qs->controlled_port_state = SCSC_WIFI_FCQ_8021x_STATE_BLOCKED;
+
+ /* Queues init */
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
+ /* Clear all the bits */
+ qs->ac_inuse &= ~(1 << i);
+ queue = &qs->ac_q[i];
+ fcq_data_q_init(dev, sdev, type, queue, qs, qs_num, i);
+ }
+ /* Give all qmod to BE */
+ qs->ac_inuse = 1;
+ spin_unlock_bh(&qs->cp_lock);
+}
+
+int scsc_wifi_fcq_unicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u8 qs_num, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer)
+{
+ struct peers_cache *pc_new_node;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ /* Ensure that default smod doesn't exceed 24 bit */
+ if (WARN_ON(scsc_wifi_fcq_smod >= 0x1000000))
+ return -EINVAL;
+
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Init unicast queue set 0x%p vif %d peer_index %d\n", qs, vif, peer->aid);
+ fcq_qset_init(dev, sdev, SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST, qs, qs_num);
+ SCSC_HIP4_SAMPLER_BOT_ADD(sdev->minor_prof, peer->address[4], peer->address[5], peer->aid << 2 | vif);
+ SCSC_HIP4_SAMPLER_BOT_TX(sdev->minor_prof, atomic_read(&qs->scod), atomic_read(&qs->smod), peer->aid << 2 | vif);
+ /* Cache the added peer to optimize the Global start/stop process */
+ pc_new_node = kzalloc(sizeof(*pc_new_node), GFP_ATOMIC);
+ if (!pc_new_node)
+ return -ENOMEM;
+ pc_new_node->dev = dev;
+ pc_new_node->qs = qs;
+ pc_new_node->peer_index = peer->aid;
+ pc_new_node->vif = vif;
+ pc_new_node->is_unicast = true;
+
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Add new peer qs %p vif %d peer->aid %d\n", qs, vif, peer->aid);
+ list_add_tail(&pc_new_node->list, &peers_cache_list);
+
+ if (total == 0) {
+ /* No peers. Reset gcod. */
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ total_in_sleep = 0;
+#endif
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "First peer. Reset gcod.\n");
+ atomic_set(&sdev->hip4_inst.hip_priv->gcod, 0);
+ atomic_set(&sdev->hip4_inst.hip_priv->gactive, 1);
+ sdev->hip4_inst.hip_priv->saturated = false;
+ }
+
+ total++;
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Add New peer. Total %d\n", total);
+ fcq_redistribute_smod(dev, sdev, total);
+
+ return 0;
+}
+
+int scsc_wifi_fcq_multicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif)
+{
+ struct peers_cache *pc_node;
+
+ if (WARN_ON(!qs))
+ return -EINVAL;
+
+ /* Ensure that default smod doesn't exceed 24 bit */
+ if (WARN_ON(scsc_wifi_fcq_mcast_smod >= 0x1000000))
+ return -EINVAL;
+
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Init multicast queue set 0x%p\n", qs);
+ fcq_qset_init(dev, sdev, SCSC_WIFI_FCQ_QUEUE_SET_TYPE_MULTICAST, qs, 0);
+ SCSC_HIP4_SAMPLER_BOT_ADD(sdev->minor_prof, 0, 0, vif);
+ SCSC_HIP4_SAMPLER_BOT_TX(sdev->minor_prof, atomic_read(&qs->scod), atomic_read(&qs->smod), vif);
+
+ /* Cache the added peer to optimize the Global start/stop process */
+ pc_node = kzalloc(sizeof(*pc_node), GFP_ATOMIC);
+ if (!pc_node)
+ return -ENOMEM;
+ pc_node->dev = dev;
+ pc_node->qs = qs;
+ pc_node->peer_index = 0;
+ pc_node->vif = vif;
+ pc_node->is_unicast = false;
+
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Add Multicast Qset %p vif %d peer->aid 0\n", qs, vif);
+ list_add_tail(&pc_node->list, &peers_cache_list);
+
+ fcq_redistribute_qmod(pc_node->dev, pc_node->qs, sdev, pc_node->peer_index, pc_node->vif);
+
+ return 0;
+}
+
+void scsc_wifi_fcq_qset_deinit(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer)
+{
+ struct peers_cache *pc_node, *next;
+ int i, scod;
+#ifdef CONFIG_SCSC_WLAN_HIP4_PROFILING
+ int aid = 0;
+ u8 addr_4 = 0;
+ u8 addr_5 = 0;
+
+ if (peer) {
+ aid = peer->aid;
+ addr_4 = peer->address[4];
+ addr_5 = peer->address[5];
+ }
+#endif
+
+ WARN_ON(!qs);
+
+ if (!qs)
+ return;
+
+ scod = atomic_read(&qs->scod);
+ if (scod != 0)
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Data set (0x%p) deinit: scod is %d\n", qs, scod);
+
+ for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++)
+ fcq_data_q_deinit(&qs->ac_q[i]);
+
+ qs->ac_inuse = 0;
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ /* Remove from total sleep if was in sleep */
+ if (qs->in_sleep && total_in_sleep)
+ total_in_sleep -= 1;
+#endif
+ SCSC_HIP4_SAMPLER_BOT_RX(sdev->minor_prof, 0, 0, (aid & 0xf) << 2 | vif);
+ SCSC_HIP4_SAMPLER_BOT_REMOVE(sdev->minor_prof, addr_4, addr_5, aid << 2 | vif);
+
+ if (peer)
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Delete qs %p vif %d peer->aid %d\n", qs, vif, peer->aid);
+ else
+ SLSI_DBG1_NODEV(SLSI_WIFI_FCQ, "Delete qs %p vif %d Multicast\n", qs, vif);
+
+ list_for_each_entry_safe(pc_node, next, &peers_cache_list, list) {
+ if (pc_node->qs == qs) {
+ list_del(&pc_node->list);
+ kfree(pc_node);
+ }
+ }
+ /* Only count unicast qs */
+ if (total > 0 && peer)
+ total--;
+
+ SLSI_DBG4_NODEV(SLSI_WIFI_FCQ, "Del peer. Total %d\n", total);
+
+ if (total == 0)
+ return;
+
+ fcq_redistribute_smod(dev, sdev, total);
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include "netif.h"
+
+#ifndef __SCSC_WIFI_FCQ_H
+#define __SCSC_WIFI_FCQ_H
+
+enum scsc_wifi_fcq_8021x_state {
+ SCSC_WIFI_FCQ_8021x_STATE_BLOCKED = 0,
+ SCSC_WIFI_FCQ_8021x_STATE_OPEN = 1
+};
+
+enum scsc_wifi_fcq_ps_state {
+ SCSC_WIFI_FCQ_PS_STATE_POWERSAVE = 1,
+ SCSC_WIFI_FCQ_PS_STATE_ACTIVE = 2
+};
+
+struct scsc_wifi_fcq_q_stat {
+ u32 netq_stops;
+ u32 netq_resumes;
+};
+
+struct scsc_wifi_fcq_q_header {
+ atomic_t qmod;
+ atomic_t qcod;
+ u16 netif_queue_id;
+ atomic_t active;
+ struct scsc_wifi_fcq_q_stat stats;
+ ktime_t empty_t;
+ bool can_be_distributed;
+ bool saturated;
+ int guard;
+};
+
+struct scsc_wifi_fcq_data_q {
+ struct scsc_wifi_fcq_q_header head;
+ struct scsc_wifi_fcq_data_qset *qs;
+};
+
+struct scsc_wifi_fcq_ctrl_q {
+ struct scsc_wifi_fcq_q_header head;
+};
+
+enum scsc_wifi_fcq_queue_set_type {
+ SCSC_WIFI_FCQ_QUEUE_SET_TYPE_UNICAST = 0,
+ SCSC_WIFI_FCQ_QUEUE_SET_TYPE_MULTICAST = 1
+};
+
+#define EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION 1
+
+struct scsc_wifi_fcq_data_qset {
+ atomic_t active;
+ enum scsc_wifi_fcq_8021x_state controlled_port_state;
+ /* a std spinlock */
+ spinlock_t cp_lock;
+
+ struct scsc_wifi_fcq_data_q ac_q[SLSI_NETIF_Q_PER_PEER];
+ /* Control AC usage (BE,BK,VI,VO) bitmap*/
+ u8 ac_inuse;
+ atomic_t smod;
+ atomic_t scod;
+
+ struct scsc_wifi_fcq_q_stat stats; /* Stats for smod */
+ enum scsc_wifi_fcq_ps_state peer_ps_state;
+ u32 peer_ps_state_transitions;
+
+#ifdef EXPERIMENTAL_DYNAMIC_SMOD_ADAPTATION
+ ktime_t empty_t;
+ bool can_be_distributed;
+ bool in_sleep;
+#endif
+ bool saturated;
+ int guard;
+};
+
+/* Queue and queue set management */
+int scsc_wifi_fcq_ctrl_q_init(struct scsc_wifi_fcq_ctrl_q *queue);
+void scsc_wifi_fcq_ctrl_q_deinit(struct scsc_wifi_fcq_ctrl_q *queue);
+
+int scsc_wifi_fcq_unicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u8 qs_num, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer);
+int scsc_wifi_fcq_multicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif);
+void scsc_wifi_fcq_qset_deinit(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer);
+
+/* Transmit/receive bookkeeping and smod power save changes / 802.1x handling */
+int scsc_wifi_fcq_transmit_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index);
+int scsc_wifi_fcq_receive_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index);
+int scsc_wifi_fcq_receive_data_no_peer(struct net_device *dev, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index);
+
+void scsc_wifi_fcq_pause_queues(struct slsi_dev *sdev);
+void scsc_wifi_fcq_unpause_queues(struct slsi_dev *sdev);
+
+int scsc_wifi_fcq_transmit_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue);
+int scsc_wifi_fcq_receive_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue);
+
+int scsc_wifi_fcq_update_smod(struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_ps_state peer_ps_state,
+ enum scsc_wifi_fcq_queue_set_type type);
+int scsc_wifi_fcq_8021x_port_state(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_8021x_state state);
+
+/* Statistics */
+int scsc_wifi_fcq_stat_queue(struct scsc_wifi_fcq_q_header *queue,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *qmod, int *qcod);
+
+int scsc_wifi_fcq_stat_queueset(struct scsc_wifi_fcq_data_qset *queue_set,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *smod, int *scod, enum scsc_wifi_fcq_8021x_state *cp_state,
+ u32 *peer_ps_state_transitions);
+
+#endif /* #ifndef __SCSC_WIFI_FCQ_H */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h" /* sdev access */
+#include "src_sink.h"
+#include "debug.h"
+#include "fapi.h"
+#include "mlme.h"
+#include "mgt.h"
+
+static int slsi_src_sink_fake_sta_start(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ u8 device_address[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ u8 fake_peer_mac[ETH_ALEN] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55};
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ ndev_vif->iftype = NL80211_IFTYPE_STATION;
+ dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ ndev_vif->vif_type = FAPI_VIFTYPE_STATION;
+
+ if (WARN(slsi_mlme_add_vif(sdev, dev, dev->dev_addr, device_address) != 0, "add VIF failed")) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EFAULT;
+ }
+
+ if (WARN(slsi_vif_activated(sdev, dev) != 0, "activate VIF failed")) {
+ slsi_mlme_del_vif(sdev, dev);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EFAULT;
+ }
+
+ peer = slsi_peer_add(sdev, dev, fake_peer_mac, SLSI_STA_PEER_QUEUESET + 1);
+ if (WARN(!peer, "add fake peer failed")) {
+ slsi_vif_deactivated(sdev, dev);
+ slsi_mlme_del_vif(sdev, dev);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EFAULT;
+ }
+ peer->qos_enabled = true;
+ slsi_ps_port_control(sdev, dev, peer, SLSI_STA_CONN_STATE_CONNECTED);
+ netif_carrier_on(dev);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return 0;
+}
+
+static void slsi_src_sink_fake_sta_stop(struct slsi_dev *sdev, struct net_device *dev)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer = slsi_get_peer_from_qs(sdev, dev, SLSI_STA_PEER_QUEUESET);
+
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ SLSI_NET_DBG1(dev, SLSI_SRC_SINK, "station stop(vif:%d)\n", ndev_vif->ifnum);
+
+ if (WARN(!ndev_vif->activated, "not activated")) {
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return;
+ }
+
+ netif_carrier_off(dev);
+ if (peer)
+ slsi_peer_remove(sdev, dev, peer);
+ slsi_vif_deactivated(sdev, dev);
+ slsi_mlme_del_vif(sdev, dev);
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+}
+
+static int slsi_src_sink_loopback_start(struct slsi_dev *sdev)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ u8 i, queue_idx;
+
+ for (i = SLSI_NET_INDEX_WLAN; i <= SLSI_NET_INDEX_P2P; i++) {
+ dev = slsi_get_netdev_locked(sdev, i);
+ if (WARN(!dev, "no netdev (index:%d)", i))
+ return -EFAULT;
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* for p2p0 interface the peer database is not created during init;
+ * peer database is needed for dataplane to be functional.
+ * so create the peer records for p2p0 now and undo it during loopback stop
+ */
+ if (i == SLSI_NET_INDEX_P2P) {
+ for (queue_idx = 0; queue_idx < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queue_idx++) {
+ ndev_vif->peer_sta_record[queue_idx] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queue_idx]), GFP_KERNEL);
+ if (!ndev_vif->peer_sta_record[queue_idx]) {
+ int j;
+
+ SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queue_idx:%d)\n", queue_idx);
+
+ /* free previously allocated peer database memory till current queue_idx */
+ for (j = 0; j < queue_idx; j++) {
+ kfree(ndev_vif->peer_sta_record[j]);
+ ndev_vif->peer_sta_record[j] = NULL;
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ return -EFAULT;
+ }
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ if (WARN(slsi_src_sink_fake_sta_start(sdev, dev) != 0, "fake STA setup failed (vif:%d)\n", i))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void slsi_src_sink_loopback_stop(struct slsi_dev *sdev)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ u8 i, queue_idx;
+
+ for (i = SLSI_NET_INDEX_WLAN; i <= SLSI_NET_INDEX_P2P; i++) {
+ dev = slsi_get_netdev_locked(sdev, i);
+ if (WARN(!dev, "no netdev (index:%d)", i))
+ return;
+
+ slsi_src_sink_fake_sta_stop(sdev, dev);
+
+ ndev_vif = netdev_priv(dev);
+ SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
+
+ /* undo peer database creation for p2p0 interface on loopback stop */
+ if (i == SLSI_NET_INDEX_P2P) {
+ for (queue_idx = 0; queue_idx < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queue_idx++) {
+ kfree(ndev_vif->peer_sta_record[queue_idx]);
+ ndev_vif->peer_sta_record[queue_idx] = NULL;
+ }
+ }
+ SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
+ }
+}
+
+long slsi_src_sink_cdev_ioctl_cfg(struct slsi_dev *sdev, unsigned long arg)
+{
+ long r = 0;
+ struct unifiio_src_sink_arg_t src_sink_arg;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct sk_buff *req = NULL;
+ struct sk_buff *ind = NULL;
+
+ memset((void *)&src_sink_arg, 0, sizeof(struct unifiio_src_sink_arg_t));
+ if (copy_from_user((void *)(&src_sink_arg), (void *)arg, sizeof(struct unifiio_src_sink_arg_t)))
+ return -EFAULT;
+
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, "Source/Sink\n");
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, "====================================================\n");
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, " #action : [0x%04X]\n", src_sink_arg.common.action);
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, " #direction : [0x%04X]\n", src_sink_arg.common.direction);
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, " #vif : [0x%04X]\n", src_sink_arg.common.vif);
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, " #endpoint : [0x%04X]\n", src_sink_arg.common.endpoint);
+ SLSI_DBG2(sdev, SLSI_SRC_SINK, "====================================================\n");
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ dev = slsi_get_netdev_locked(sdev, src_sink_arg.common.vif);
+ if (WARN_ON(!dev)) {
+ SLSI_ERR(sdev, "netdev for input vif:%d is NULL\n", src_sink_arg.common.vif);
+ r = -ENODEV;
+ goto out_locked;
+ }
+ ndev_vif = netdev_priv(dev);
+
+ switch (src_sink_arg.common.action) {
+ case SRC_SINK_ACTION_SINK_START:
+ if (WARN(slsi_src_sink_fake_sta_start(sdev, dev) != 0, "fake STA setup failed\n")) {
+ r = -EFAULT;
+ goto out_locked;
+ }
+ req = fapi_alloc(debug_pkt_sink_start_req, DEBUG_PKT_SINK_START_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.end_point, src_sink_arg.common.endpoint);
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.direction, src_sink_arg.common.direction);
+ fapi_set_u32(req, u.debug_pkt_sink_start_req.interval, src_sink_arg.u.config.interval);
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.packets_per_interval, src_sink_arg.u.config.pkts_per_int);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_sink_start_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ break;
+ case SRC_SINK_ACTION_SINK_STOP:
+ req = fapi_alloc(debug_pkt_sink_stop_req, DEBUG_PKT_SINK_STOP_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_sink_stop_req.direction, src_sink_arg.common.direction);
+ fapi_set_u16(req, u.debug_pkt_sink_stop_req.end_point, src_sink_arg.common.endpoint);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_sink_stop_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ slsi_src_sink_fake_sta_stop(sdev, dev);
+ break;
+ case SRC_SINK_ACTION_GEN_START:
+ if (WARN(slsi_src_sink_fake_sta_start(sdev, dev) != 0, "fake STA setup failed\n")) {
+ r = -EFAULT;
+ goto out_locked;
+ }
+ req = fapi_alloc(debug_pkt_gen_start_req, DEBUG_PKT_GEN_START_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.direction, src_sink_arg.common.direction);
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.end_point, src_sink_arg.common.endpoint);
+ fapi_set_u32(req, u.debug_pkt_gen_start_req.interval, src_sink_arg.u.config.interval);
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.packets_per_interval, src_sink_arg.u.config.pkts_per_int);
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.size, src_sink_arg.u.config.u.gen.size);
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.use_streaming, src_sink_arg.u.config.u.gen.use_streaming);
+ fapi_set_u32(req, u.debug_pkt_gen_start_req.ipv4destination_address, src_sink_arg.u.config.u.gen.ipv4_dest);
+ fapi_set_u16(req, u.debug_pkt_gen_start_req.packets_per_interrupt, src_sink_arg.u.config.u.gen.pkts_per_intr);
+
+ SLSI_DBG3(sdev, SLSI_SRC_SINK,
+ "int:%u, pkts_per_int:%u, vif:%u, size:%u, use_streaming:%u, ipv4_dest:0x%04X, pkts_per_intr:%u\n",
+ src_sink_arg.u.config.interval,
+ src_sink_arg.u.config.pkts_per_int,
+ src_sink_arg.common.vif,
+ src_sink_arg.u.config.u.gen.size,
+ src_sink_arg.u.config.u.gen.use_streaming,
+ src_sink_arg.u.config.u.gen.ipv4_dest,
+ src_sink_arg.u.config.u.gen.pkts_per_intr);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_gen_start_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ break;
+ case SRC_SINK_ACTION_GEN_STOP:
+ req = fapi_alloc(debug_pkt_gen_stop_req, DEBUG_PKT_GEN_STOP_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_gen_stop_req.direction, src_sink_arg.common.direction);
+ fapi_set_u16(req, u.debug_pkt_gen_stop_req.end_point, src_sink_arg.common.endpoint);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_gen_stop_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ slsi_src_sink_fake_sta_stop(sdev, dev);
+ break;
+ case SRC_SINK_ACTION_LOOPBACK_START:
+ if (WARN(slsi_src_sink_loopback_start(sdev) != 0, "loopback setup failed\n")) {
+ r = -EFAULT;
+ goto out_locked;
+ }
+ req = fapi_alloc(debug_pkt_sink_start_req, DEBUG_PKT_SINK_START_REQ, 1, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.end_point, SRC_SINK_ENDPOINT_HOSTIO);
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.direction, SRC_SINK_DIRECTION_RX);
+ fapi_set_u32(req, u.debug_pkt_sink_start_req.interval, src_sink_arg.u.config.interval);
+ fapi_set_u16(req, u.debug_pkt_sink_start_req.packets_per_interval, src_sink_arg.u.config.pkts_per_int);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_loopback_start_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ break;
+ case SRC_SINK_ACTION_LOOPBACK_STOP:
+ req = fapi_alloc(debug_pkt_sink_stop_req, DEBUG_PKT_SINK_STOP_REQ, 1, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_sink_stop_req.direction, SRC_SINK_DIRECTION_RX);
+ fapi_set_u16(req, u.debug_pkt_sink_stop_req.end_point, SRC_SINK_ENDPOINT_HOSTIO);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_loopback_stop_req->\n");
+ r = slsi_mlme_req(sdev, dev, req);
+ slsi_src_sink_loopback_stop(sdev);
+ break;
+ case SRC_SINK_ACTION_SINK_REPORT:
+ req = fapi_alloc(debug_pkt_sink_report_req, DEBUG_PKT_SINK_REPORT_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_sink_report_req.end_point, src_sink_arg.common.endpoint);
+ fapi_set_u16(req, u.debug_pkt_sink_report_req.direction, src_sink_arg.common.direction);
+ fapi_set_u32(req, u.debug_pkt_sink_report_req.report_interval, src_sink_arg.u.report.interval);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_sink_report_req->\n");
+
+ ind = slsi_mlme_req_ind(sdev, dev, req, DEBUG_PKT_SINK_REPORT_IND);
+ if (!ind) {
+ SLSI_ERR(sdev, "slsi_mlme_req_ind [SINK_REPORT] failed\n");
+ r = -EIO;
+ break;
+ }
+
+ src_sink_arg.u.report.duration = fapi_get_u32(ind, u.debug_pkt_sink_report_ind.duration);
+ src_sink_arg.u.report.count = fapi_get_u32(ind, u.debug_pkt_sink_report_ind.received_packets);
+ src_sink_arg.u.report.octet = fapi_get_u32(ind, u.debug_pkt_sink_report_ind.received_octets);
+ src_sink_arg.u.report.kbps = fapi_get_u32(ind, u.debug_pkt_sink_report_ind.kbps);
+ src_sink_arg.u.report.idle_ratio = fapi_get_u16(ind, u.debug_pkt_sink_report_ind.idle_ratio);
+ src_sink_arg.u.report.interrupt_latency = fapi_get_u16(ind, u.debug_pkt_sink_report_ind.int_latency);
+ src_sink_arg.u.report.free_kbytes = fapi_get_u16(ind, u.debug_pkt_sink_report_ind.free_kbytes);
+ src_sink_arg.u.report.timestamp = jiffies_to_msecs(jiffies);
+
+ memset(&ndev_vif->src_sink_params.sink_report, 0, sizeof(struct unifiio_src_sink_report));
+
+ /* copy the report to userspace */
+ if (copy_to_user((void *)arg, (void *)(&src_sink_arg), sizeof(struct unifiio_src_sink_arg_t)))
+ r = -EFAULT;
+ slsi_kfree_skb(ind);
+ break;
+ case SRC_SINK_ACTION_GEN_REPORT:
+ req = fapi_alloc(debug_pkt_gen_report_req, DEBUG_PKT_GEN_REPORT_REQ, src_sink_arg.common.vif, 0);
+ if (WARN_ON(!req)) {
+ r = -ENODEV;
+ goto out_locked;
+ }
+ fapi_set_u16(req, u.debug_pkt_gen_report_req.end_point, src_sink_arg.common.endpoint);
+ fapi_set_u16(req, u.debug_pkt_gen_report_req.direction, src_sink_arg.common.direction);
+ fapi_set_u32(req, u.debug_pkt_gen_report_req.report_interval, src_sink_arg.u.report.interval);
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "debug_pkt_gen_report_req->\n");
+
+ ind = slsi_mlme_req_ind(sdev, dev, req, DEBUG_PKT_GEN_REPORT_IND);
+ if (!ind) {
+ SLSI_ERR(sdev, "slsi_mlme_req_ind [GEN_REPORT] failed\n");
+ r = -EIO;
+ break;
+ }
+
+ src_sink_arg.u.report.duration = fapi_get_u32(ind, u.debug_pkt_gen_report_ind.duration);
+ src_sink_arg.u.report.count = fapi_get_u32(ind, u.debug_pkt_gen_report_ind.received_packets);
+ src_sink_arg.u.report.failed_count = fapi_get_u32(ind, u.debug_pkt_gen_report_ind.failed_count);
+ src_sink_arg.u.report.octet = fapi_get_u32(ind, u.debug_pkt_gen_report_ind.received_octets);
+ src_sink_arg.u.report.kbps = fapi_get_u32(ind, u.debug_pkt_gen_report_ind.kbps);
+ src_sink_arg.u.report.idle_ratio = fapi_get_u16(ind, u.debug_pkt_gen_report_ind.idle_ratio);
+ src_sink_arg.u.report.interrupt_latency = fapi_get_u16(ind, u.debug_pkt_gen_report_ind.int_latency);
+ src_sink_arg.u.report.free_kbytes = fapi_get_u16(ind, u.debug_pkt_gen_report_ind.free_kbytes);
+
+ src_sink_arg.u.report.timestamp = jiffies_to_msecs(jiffies);
+
+ memset(&ndev_vif->src_sink_params.gen_report, 0, sizeof(struct unifiio_src_sink_report));
+
+ /* copy the report to userspace */
+ if (copy_to_user((void *)arg, (void *)(&src_sink_arg), sizeof(struct unifiio_src_sink_arg_t)))
+ r = -EFAULT;
+ slsi_kfree_skb(ind);
+ break;
+ case SRC_SINK_ACTION_SINK_REPORT_CACHED:
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "cached sink_report\n");
+ memcpy(&src_sink_arg.u.report, &ndev_vif->src_sink_params.sink_report, sizeof(struct unifiio_src_sink_report));
+ memset(&ndev_vif->src_sink_params.sink_report, 0, sizeof(struct unifiio_src_sink_report));
+
+ if (copy_to_user((void *)arg, (void *)(&src_sink_arg), sizeof(struct unifiio_src_sink_arg_t)))
+ r = -EFAULT;
+
+ break;
+ case SRC_SINK_ACTION_GEN_REPORT_CACHED:
+ SLSI_DBG1(sdev, SLSI_SRC_SINK, "cached gen_report\n");
+ memcpy(&src_sink_arg.u.report, &ndev_vif->src_sink_params.gen_report, sizeof(struct unifiio_src_sink_report));
+ memset(&ndev_vif->src_sink_params.gen_report, 0, sizeof(struct unifiio_src_sink_report));
+
+ if (copy_to_user((void *)arg, (void *)(&src_sink_arg), sizeof(struct unifiio_src_sink_arg_t)))
+ r = -EFAULT;
+ break;
+ default:
+ SLSI_ERR(sdev, "Invalid input for action: 0x%04X\n", src_sink_arg.common.action);
+ r = -EINVAL;
+ break;
+ }
+
+out_locked:
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ if (r)
+ SLSI_ERR(sdev, "slsi_src_sink_cdev_ioctl_cfg(vif:%d) failed with %ld\n", src_sink_arg.common.vif, r);
+ return r;
+}
+
+void slsi_rx_sink_report(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct unifiio_src_sink_report *report = &ndev_vif->src_sink_params.sink_report;
+
+ SLSI_DBG3(sdev, SLSI_SRC_SINK, "RX debug_pkt_sink_report_ind\n");
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ memset(report, 0, sizeof(struct unifiio_src_sink_report));
+ report->duration = fapi_get_u32(skb, u.debug_pkt_sink_report_ind.duration);
+ report->count = fapi_get_u32(skb, u.debug_pkt_sink_report_ind.received_packets);
+ report->octet = fapi_get_u32(skb, u.debug_pkt_sink_report_ind.received_octets);
+ report->kbps = fapi_get_u32(skb, u.debug_pkt_sink_report_ind.kbps);
+ report->idle_ratio = fapi_get_u16(skb, u.debug_pkt_sink_report_ind.idle_ratio);
+ report->interrupt_latency = fapi_get_u16(skb, u.debug_pkt_sink_report_ind.int_latency);
+ report->free_kbytes = fapi_get_u16(skb, u.debug_pkt_sink_report_ind.free_kbytes);
+ report->timestamp = jiffies_to_msecs(jiffies);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ slsi_kfree_skb(skb);
+}
+
+void slsi_rx_gen_report(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct unifiio_src_sink_report *report = &ndev_vif->src_sink_params.gen_report;
+
+ SLSI_DBG3(sdev, SLSI_SRC_SINK, "RX debug_pkt_gen_report_ind\n");
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ memset(report, 0, sizeof(struct unifiio_src_sink_report));
+ report->duration = fapi_get_u32(skb, u.debug_pkt_gen_report_ind.duration);
+ report->count = fapi_get_u32(skb, u.debug_pkt_gen_report_ind.received_packets);
+ report->failed_count = fapi_get_u32(skb, u.debug_pkt_gen_report_ind.failed_count);
+ report->octet = fapi_get_u32(skb, u.debug_pkt_gen_report_ind.received_octets);
+ report->kbps = fapi_get_u32(skb, u.debug_pkt_gen_report_ind.kbps);
+ report->idle_ratio = fapi_get_u16(skb, u.debug_pkt_gen_report_ind.idle_ratio);
+ report->interrupt_latency = fapi_get_u16(skb, u.debug_pkt_gen_report_ind.int_latency);
+ report->free_kbytes = fapi_get_u16(skb, u.debug_pkt_gen_report_ind.free_kbytes);
+ report->timestamp = jiffies_to_msecs(jiffies);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ slsi_kfree_skb(skb);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_SRC_SINK_H__
+#define __SLSI_SRC_SINK_H__
+
+#include "unifiio.h"
+
+struct slsi_src_sink_params {
+ struct unifiio_src_sink_report sink_report;
+ struct unifiio_src_sink_report gen_report;
+};
+
+long slsi_src_sink_cdev_ioctl_cfg(struct slsi_dev *sdev, unsigned long arg);
+
+void slsi_rx_sink_report(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_rx_gen_report(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+
+#endif /* __SLSI_SRC_SINK_H__ */
--- /dev/null
+ # ----------------------------------------------------------------------------
+# FILE: Makefile
+#
+# PURPOSE:
+# Build instructions for SCSC Wlan driver.
+#
+#
+# Copyright (C) 2016 by Samsung Electronics Co., Ltd
+# ----------------------------------------------------------------------------
+
+CONFIG_SCSC_WLAN_MODULE=scsc_wlan_unittest
+
+# ----------------------------------------------------------------------------
+# Common Driver Files
+# ----------------------------------------------------------------------------
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../mib.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../mib_text_convert.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../debug.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../debug_frame.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../procfs.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../mgt.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../udi.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../log_clients.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../src_sink.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../fw_test.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../cac.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../ioctl.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../wakelock.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../traffic_monitor.o
+# enable GSCAN
+ifeq ($(CONFIG_SCSC_WLAN_GSCAN_ENABLE),y)
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../nl80211_vendor.o
+ccflags-y += -DCONFIG_SCSC_WLAN_GSCAN_ENABLE
+endif
+
+# enable roam offload
+ifeq ($(CONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD),y)
+ccflags-y += -DCONFIG_SCSC_WLAN_KEY_MGMT_OFFLOAD
+endif
+
+# ----------------------------------------------------------------------------
+# Building for cfg802.11
+# ----------------------------------------------------------------------------
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../cfg80211_ops.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../mlme.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../netif.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../rx.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../tx.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../ba.o
+
+# ----------------------------------------------------------------------------
+# Building for SAP
+# ----------------------------------------------------------------------------
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../sap_ma.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../sap_mlme.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../sap_dbg.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../sap_test.o
+
+# ----------------------------------------------------------------------------
+# unittest Driver
+# ----------------------------------------------------------------------------
+$(CONFIG_SCSC_WLAN_MODULE)-y += ../dev.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += dev_test.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += hip_test.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += hydra_test.o
+$(CONFIG_SCSC_WLAN_MODULE)-y += udi_test.o
+
+# Suppress -Wempty-body when not built with Debug enabled.
+ifneq ($(CONFIG_SCSC_WLAN_DEBUG),y)
+ccflags-y += -Wno-empty-body
+else
+ccflags-y += -g -ggdb
+endif
+
+ccflags-y += -Wno-unused-parameter
+ccflags-y += -Wno-sign-compare
+
+# ----------------------------------------------------------------------------
+# wlan configuration
+# ----------------------------------------------------------------------------
+ccflags-$(CONFIG_SCSC_WLAN_DEBUG) += -DCONFIG_SCSC_WLAN_DEBUG
+ccflags-$(CONFIG_SCSC_WLAN_SKB_TRACKING) += -DCONFIG_SCSC_WLAN_SKB_TRACKING
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_SDIO_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_SDIO_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_DATA_PLANE_PROFILE_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_DATA_PLANE_PROFILE_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_OFFLINE_TX_TRACE) += -DCONFIG_SCSC_WLAN_OFFLINE_TX_TRACE
+ccflags-$(CONFIG_SCSC_WLAN_HIP_CLEAR_TH_SLOT_SUPPORT) += -DCONFIG_SCSC_WLAN_HIP_CLEAR_TH_SLOT_SUPPORT
+ccflags-$(CONFIG_SCSC_WLAN_HIP_DEFER_SLOT_SUPPORT) += -DCONFIG_SCSC_WLAN_HIP_DEFER_SLOT_SUPPORT
+ccflags-$(CONFIG_SCSC_WLAN_RX_NAPI) += -DCONFIG_SCSC_WLAN_RX_NAPI
+ccflags-$(CONFIG_SCSC_WLAN_RX_NAPI_GRO) += -DCONFIG_SCSC_WLAN_RX_NAPI_GRO
+ccflags-$(CONFIG_SCSC_WLAN_WES_NCHO) += -DCONFIG_SCSC_WLAN_WES_NCHO
+ccflags-$(CONFIG_SCSC_WLAN_MUTEX_DEBUG) += -DCONFIG_SCSC_WLAN_MUTEX_DEBUG
+
+ccflags-y += -DCONFIG_SCSC_WLAN_MAX_INTERFACES=$(CONFIG_SCSC_WLAN_MAX_INTERFACES)
+
+# Android specific build options
+ccflags-$(CONFIG_SCSC_WLAN_ANDROID) += -DCONFIG_SCSC_WLAN_ANDROID
+
+ccflags-y += -I$(src) -I$(src)/..
+ccflags-y += -I$(src)/../../../../../include -I$(src)/../../../../../include/scsc
+
+ccflags-y += -DSLSI_TEST_DEV
+ccflags-y += $(CONFIG_SCSC_WLAN_EXTRA)
+
+# ----------------------------------------------------------------------------
+# module
+# ----------------------------------------------------------------------------
+obj-$(CONFIG_SCSC_WLAN_UNITTEST) := $(CONFIG_SCSC_WLAN_MODULE).o
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd
+ *
+ ****************************************************************************/
+
+#include <net/ip.h>
+
+#include "debug.h"
+#include "utils.h"
+#include "udi.h"
+#include "unittest.h"
+#include "mgt.h"
+#include "scsc/scsc_mx.h"
+
+#define SLSI_TESTDRV_NAME "s5n2560_test"
+
+static int radios = 11;
+module_param(radios, int, 0444);
+MODULE_PARM_DESC(radios, "Number of simulated radios");
+
+/* spinlock for retaining the (struct slsi_dev) information */
+static struct slsi_spinlock slsi_test_devices_lock;
+static struct slsi_test_dev *slsi_test_devices[SLSI_UDI_MINOR_NODES];
+
+static struct class *test_dev_class;
+/* Major number of device created by system. */
+static dev_t major_number;
+
+static struct device_driver slsi_test_driver = {
+ .name = SLSI_TESTDRV_NAME
+};
+
+static void slsi_test_dev_attach_work(struct work_struct *work);
+static void slsi_test_dev_detach_work(struct work_struct *work);
+
+static void slsi_test_dev_free(void)
+{
+ int i;
+
+ for (i = 0; i < SLSI_UDI_MINOR_NODES; i++) {
+ struct slsi_test_dev *uftestdev;
+
+ slsi_spinlock_lock(&slsi_test_devices_lock);
+ uftestdev = slsi_test_devices[i];
+ slsi_test_devices[i] = NULL;
+ slsi_spinlock_unlock(&slsi_test_devices_lock);
+ if (uftestdev != NULL) {
+ SLSI_INFO_NODEV("Free Test Device: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ uftestdev->hw_addr[0],
+ uftestdev->hw_addr[1],
+ uftestdev->hw_addr[2],
+ uftestdev->hw_addr[3],
+ uftestdev->hw_addr[4],
+ uftestdev->hw_addr[5]);
+
+ if (WARN_ON(uftestdev->attached)) {
+ slsi_test_bh_deinit(uftestdev);
+ flush_workqueue(uftestdev->attach_detach_work_queue);
+ }
+ destroy_workqueue(uftestdev->attach_detach_work_queue);
+
+ slsi_test_udi_node_deinit(uftestdev);
+
+ device_unregister(uftestdev->dev);
+ device_destroy(test_dev_class, uftestdev->dev->devt);
+ }
+ }
+
+ slsi_test_udi_deinit();
+
+ if (test_dev_class != NULL)
+ class_destroy(test_dev_class);
+
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+}
+
+int slsi_sdio_func_drv_register(void)
+{
+ int i = 0, err = 0, ret = 0;
+ struct slsi_test_dev *uftestdev;
+ dev_t devno;
+
+ SLSI_INFO_NODEV("Loading SLSI " SLSI_TESTDRV_NAME " Test Driver for mac80211\n");
+
+ if (radios > SLSI_UDI_MINOR_NODES) {
+ SLSI_ERR_NODEV("Loading failed, configure SLSI_UDI_MINOR_NODES to match no. of simulated radios\n");
+ return -ENOMEM;
+ }
+
+ slsi_spinlock_create(&slsi_test_devices_lock);
+ memset(slsi_test_devices, 0x00, sizeof(slsi_test_devices));
+
+ /* Allocate two device numbers for each device. */
+ ret = alloc_chrdev_region(&major_number, 0, SLSI_UDI_MINOR_NODES, SLSI_TESTDRV_NAME);
+ if (ret) {
+ SLSI_ERR_NODEV("Failed to add alloc dev numbers: %d\n", ret);
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+ major_number = 0;
+ return -ENOMEM;
+ }
+
+ test_dev_class = class_create(THIS_MODULE, SLSI_TESTDRV_NAME);
+ if (IS_ERR(test_dev_class))
+ return -EAGAIN;
+
+ slsi_test_udi_init();
+
+ for (i = 0; i < radios; i++) {
+ uftestdev = kmalloc(sizeof(*uftestdev), GFP_KERNEL);
+ memset(uftestdev, 0, sizeof(*uftestdev));
+ uftestdev->attach_detach_work_queue = alloc_ordered_workqueue("Test Work", 0);
+ INIT_WORK(&uftestdev->attach_work, slsi_test_dev_attach_work);
+ INIT_WORK(&uftestdev->detach_work, slsi_test_dev_detach_work);
+
+ devno = MKDEV(MAJOR(major_number), i);
+
+ uftestdev->dev = device_create(test_dev_class, NULL, devno, uftestdev, SLSI_TESTDRV_NAME "_dev%d", i);
+ if (IS_ERR(uftestdev->dev)) {
+ SLSI_ERR_NODEV("device_create FAILED, returned - (%ld)\n", PTR_ERR(uftestdev->dev));
+ err = -ENOMEM;
+ goto failed_free_all;
+ }
+
+ uftestdev->dev->driver = &slsi_test_driver;
+
+ mutex_init(&uftestdev->attach_detach_mutex);
+ slsi_test_bh_init(uftestdev);
+ spin_lock_init(&uftestdev->route_spinlock);
+
+ if (slsi_test_udi_node_init(uftestdev, uftestdev->dev) != 0) {
+ SLSI_ERR_NODEV("udi <node> init FAILED\n");
+ goto failed_dev_unregister;
+ }
+
+ /* Using a fixed MAC address instead of slsi_get_hw_mac_address(),
+ * MAC Address format 00:12:FB:00:00:<xx> where xx increments for every PHY
+ * (00:12:FB OUI Samsung Electronics)
+ */
+
+ memset(uftestdev->hw_addr, 0, sizeof(uftestdev->hw_addr));
+ uftestdev->hw_addr[1] = 0x12;
+ uftestdev->hw_addr[2] = 0xFB;
+ /*To randomize the mac address*/
+ uftestdev->hw_addr[ETH_ALEN - 1] += (i & (0xff));
+
+ SLSI_INFO_NODEV("Create Test Device: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ uftestdev->hw_addr[0],
+ uftestdev->hw_addr[1],
+ uftestdev->hw_addr[2],
+ uftestdev->hw_addr[3],
+ uftestdev->hw_addr[4],
+ uftestdev->hw_addr[5]);
+ slsi_test_devices[uftestdev->device_minor_number] = uftestdev;
+ }
+
+ return 0;
+
+failed_dev_unregister:
+ device_unregister(uftestdev->dev);
+ device_destroy(test_dev_class, uftestdev->dev->devt);
+failed_free_all:
+ slsi_test_dev_free();
+
+ return -EPERM;
+}
+
+void slsi_sdio_func_drv_unregister(void)
+{
+ SLSI_INFO_NODEV("Unloading UF6K Test Driver for mac80211\n");
+ slsi_test_dev_free();
+}
+
+void slsi_test_dev_attach(struct slsi_test_dev *uftestdev)
+{
+ struct slsi_dev *sdev;
+ struct scsc_service_client service_client;
+
+ mutex_lock(&uftestdev->attach_detach_mutex);
+ SLSI_INFO_NODEV("UnitTest UDI Attached : %02X:%02X:%02X:%02X:%02X:%02X\n",
+ uftestdev->hw_addr[0],
+ uftestdev->hw_addr[1],
+ uftestdev->hw_addr[2],
+ uftestdev->hw_addr[3],
+ uftestdev->hw_addr[4],
+ uftestdev->hw_addr[5]);
+
+ if (uftestdev->attached) {
+ SLSI_ERR_NODEV("attached == true\n");
+ goto exit;
+ }
+
+ uftestdev->attached = true;
+ sdev = slsi_dev_attach(uftestdev->dev, (struct scsc_mx *)uftestdev, &service_client);
+
+ slsi_spinlock_lock(&slsi_test_devices_lock);
+ uftestdev->sdev = sdev;
+ if (!sdev) {
+ SLSI_ERR_NODEV("slsi_dev_attach() Failed\n");
+ uftestdev->attached = false;
+ } else {
+ slsi_test_bh_start(uftestdev);
+ }
+
+ slsi_spinlock_unlock(&slsi_test_devices_lock);
+
+exit:
+ mutex_unlock(&uftestdev->attach_detach_mutex);
+}
+
+void slsi_test_dev_detach(struct slsi_test_dev *uftestdev)
+{
+ mutex_lock(&uftestdev->attach_detach_mutex);
+ SLSI_INFO(uftestdev->sdev, "UnitTest UDI Detached : %02X:%02X:%02X:%02X:%02X:%02X\n",
+ uftestdev->hw_addr[0],
+ uftestdev->hw_addr[1],
+ uftestdev->hw_addr[2],
+ uftestdev->hw_addr[3],
+ uftestdev->hw_addr[4],
+ uftestdev->hw_addr[5]);
+ if (!uftestdev->attached) {
+ SLSI_ERR(uftestdev->sdev, "attached != true\n");
+ goto exit;
+ }
+
+ uftestdev->attached = false;
+ if (uftestdev->sdev) {
+ struct slsi_dev *sdev = uftestdev->sdev;
+
+ slsi_test_bh_stop(uftestdev);
+
+ slsi_dev_detach(sdev);
+ slsi_spinlock_lock(&slsi_test_devices_lock);
+ uftestdev->sdev = NULL;
+ slsi_spinlock_unlock(&slsi_test_devices_lock);
+ }
+
+exit:
+ mutex_unlock(&uftestdev->attach_detach_mutex);
+}
+
+void slsi_init_netdev_mac_addr(struct slsi_dev *sdev)
+{
+ /* Get mac address from file system. */
+ slsi_get_hw_mac_address(sdev, sdev->hw_addr);
+
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
+
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02; /* Set the local bit */
+
+ SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN], sdev->hw_addr);
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][0] |= 0x02; /* Set the local bit */
+ sdev->netdev_addresses[SLSI_NET_INDEX_P2PX_SWLAN][4] ^= 0x80; /* EXOR 5th byte with 0x80 */
+
+ SLSI_ETHER_COPY(sdev->netdev[SLSI_NET_INDEX_WLAN]->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_WLAN]);
+ SLSI_ETHER_COPY(sdev->netdev[SLSI_NET_INDEX_P2P]->dev_addr, sdev->netdev_addresses[SLSI_NET_INDEX_P2P]);
+}
+
+bool slsi_test_process_signal_ip_remap(struct slsi_test_dev *uftestdev, struct sk_buff *skb, struct slsi_test_data_route *route)
+{
+ int proto = ntohs(skb->protocol);
+ u8 *frame = fapi_get_data(skb) + 14;
+
+ switch (proto) {
+ case 0x0806:
+ {
+ /* Arp */
+ u8 *sha = &frame[8];
+ u8 *spa = &frame[14];
+ u8 *tha = &frame[18];
+ u8 *tpa = &frame[24];
+
+ SLSI_UNUSED_PARAMETER(sha);
+ SLSI_UNUSED_PARAMETER(spa);
+ SLSI_UNUSED_PARAMETER(tha);
+ SLSI_UNUSED_PARAMETER(tpa);
+
+ SLSI_DBG4(uftestdev->sdev, SLSI_TEST, "ARP: sha:%pM, spa:%d.%d.%d.%d, tha:%pM, tpa:%d.%d.%d.%d\n",
+ sha, spa[0], spa[1], spa[2], spa[3],
+ tha, tpa[0], tpa[1], tpa[2], tpa[3]);
+ spa[2] = route->ipsubnet;
+ tpa[2] = route->ipsubnet;
+ SLSI_DBG4(uftestdev->sdev, SLSI_TEST, "ARP: sha:%pM, spa:%d.%d.%d.%d, tha:%pM, tpa:%d.%d.%d.%d\n",
+ sha, spa[0], spa[1], spa[2], spa[3],
+ tha, tpa[0], tpa[1], tpa[2], tpa[3]);
+ return true;
+ }
+ case 0x0800:
+ {
+ /* IPv4 */
+ struct iphdr *iph = (struct iphdr *)frame;
+ u8 *src = (u8 *)&iph->saddr;
+ u8 *dst = (u8 *)&iph->daddr;
+
+ SLSI_UNUSED_PARAMETER(src);
+ SLSI_UNUSED_PARAMETER(dst);
+
+ SLSI_DBG4(uftestdev->sdev, SLSI_TEST, "PING: src:%d.%d.%d.%d, dst:%d.%d.%d.%d, check:0x%.4X\n",
+ src[0], src[1], src[2], src[3],
+ dst[0], dst[1], dst[2], dst[3],
+ iph->check);
+ src[2] = route->ipsubnet;
+ dst[2] = route->ipsubnet;
+
+ /* Calculation of IP header checksum */
+ iph->check = 0;
+ ip_send_check(iph);
+
+ SLSI_DBG4(uftestdev->sdev, SLSI_TEST, "PING: src:%d.%d.%d.%d, dst:%d.%d.%d.%d, check:0x%.4X\n",
+ src[0], src[1], src[2], src[3],
+ dst[0], dst[1], dst[2], dst[3],
+ iph->check);
+
+ return true;
+ }
+ default:
+ SLSI_DBG4(uftestdev->sdev, SLSI_TEST, "Proto:0x%.4X\n", proto);
+ break;
+ }
+ return false;
+}
+
+static struct slsi_test_data_route *slsi_test_process_signal_get_route(struct slsi_test_dev *uftestdev, const u8 *mac)
+{
+ struct slsi_test_data_route *route;
+ int i;
+
+ if (WARN_ON(!spin_is_locked(&uftestdev->route_spinlock)))
+ return NULL;
+
+ for (i = 0; i < SLSI_AP_PEER_CONNECTIONS_MAX; i++) {
+ route = &uftestdev->route[i];
+ if (route->configured && ether_addr_equal(route->mac, mac))
+ return route;
+ }
+
+ return NULL;
+}
+
+static struct slsi_test_data_route *slsi_test_process_signal_get_free_route(struct slsi_test_dev *uftestdev)
+{
+ struct slsi_test_data_route *route;
+ int i;
+
+ if (WARN_ON(!spin_is_locked(&uftestdev->route_spinlock)))
+ return NULL;
+
+ for (i = 0; i < SLSI_AP_PEER_CONNECTIONS_MAX; i++) {
+ route = &uftestdev->route[i];
+ if (!route->configured)
+ return route;
+ }
+
+ return NULL;
+}
+
+void slsi_test_process_signal_set_route(struct slsi_test_dev *uftestdev, struct sk_buff *skb)
+{
+ struct slsi_test_data_route *route;
+ u8 mac[ETH_ALEN];
+ u16 dest_device_minor_number = 0xFFFF;
+ int i;
+
+ mac[0] = fapi_get_buff(skb, u.debug_generic_req.debug_words[2]) >> 8;
+ mac[1] = fapi_get_buff(skb, u.debug_generic_req.debug_words[2]) & 0xFF;
+ mac[2] = fapi_get_buff(skb, u.debug_generic_req.debug_words[3]) >> 8;
+ mac[3] = fapi_get_buff(skb, u.debug_generic_req.debug_words[3]) & 0xFF;
+ mac[4] = fapi_get_buff(skb, u.debug_generic_req.debug_words[4]) >> 8;
+ mac[5] = fapi_get_buff(skb, u.debug_generic_req.debug_words[4]) & 0xFF;
+
+ slsi_spinlock_lock(&slsi_test_devices_lock);
+ for (i = 0; i < SLSI_UDI_MINOR_NODES; i++) {
+ struct slsi_test_dev *destdev = slsi_test_devices[i];
+
+ if (destdev != NULL && ether_addr_equal(destdev->hw_addr, mac))
+ dest_device_minor_number = destdev->device_minor_number;
+ }
+ slsi_spinlock_unlock(&slsi_test_devices_lock);
+
+ if (dest_device_minor_number == 0xFFFF) {
+ SLSI_ERR(uftestdev->sdev, "Setting Route for %pM FAILED. No match found\n", mac);
+ return;
+ }
+ spin_lock(&uftestdev->route_spinlock);
+ route = slsi_test_process_signal_get_route(uftestdev, mac);
+ if (!route)
+ route = slsi_test_process_signal_get_free_route(uftestdev);
+
+ if (route) {
+ SLSI_DBG1(uftestdev->sdev, SLSI_TEST, "Setting Route for %pM -> %pM\n", uftestdev->hw_addr, mac);
+ route->configured = true;
+ route->test_device_minor_number = dest_device_minor_number;
+ SLSI_ETHER_COPY(route->mac, mac);
+ route->vif = fapi_get_u16(skb, u.debug_generic_req.debug_words[5]);
+ route->ipsubnet = fapi_get_u16(skb, u.debug_generic_req.debug_words[6]) & 0xFF;
+ route->sequence_number = 1;
+ } else {
+ SLSI_ERR(uftestdev->sdev, "Setting Route for %pM FAILED. No Free Route Entry\n", mac);
+ }
+
+ spin_unlock(&uftestdev->route_spinlock);
+}
+
+void slsi_test_process_signal_clear_route(struct slsi_test_dev *uftestdev, struct sk_buff *skb)
+{
+ struct slsi_test_data_route *route;
+ u8 mac[ETH_ALEN];
+
+ mac[0] = fapi_get_buff(skb, u.debug_generic_req.debug_words[2]) >> 8;
+ mac[1] = fapi_get_buff(skb, u.debug_generic_req.debug_words[2]) & 0xFF;
+ mac[2] = fapi_get_buff(skb, u.debug_generic_req.debug_words[3]) >> 8;
+ mac[3] = fapi_get_buff(skb, u.debug_generic_req.debug_words[3]) & 0xFF;
+ mac[4] = fapi_get_buff(skb, u.debug_generic_req.debug_words[4]) >> 8;
+ mac[5] = fapi_get_buff(skb, u.debug_generic_req.debug_words[4]) & 0xFF;
+
+ spin_lock(&uftestdev->route_spinlock);
+ SLSI_DBG1(uftestdev->sdev, SLSI_TEST, "Clearing Route for %pM\n", mac);
+ route = slsi_test_process_signal_get_route(uftestdev, mac);
+ if (route)
+ route->configured = false;
+ else
+ SLSI_ERR(uftestdev->sdev, "Clearing Route for %pM FAILED. No Route Entry Found\n", mac);
+ spin_unlock(&uftestdev->route_spinlock);
+}
+
+bool slsi_test_process_signal(struct slsi_test_dev *uftestdev, struct sk_buff *skb)
+{
+ if (fapi_get_sigid(skb) == DEBUG_GENERIC_REQ) {
+ SLSI_DBG1(uftestdev->sdev, SLSI_TEST, "fapi_get_u16(skb, u.debug_generic_req.debug_words[0]) = %d\n", fapi_get_u16(skb, u.debug_generic_req.debug_words[0]));
+ SLSI_DBG1(uftestdev->sdev, SLSI_TEST, "fapi_get_u16(skb, u.debug_generic_req.debug_words[1]) = %d\n", fapi_get_u16(skb, u.debug_generic_req.debug_words[1]));
+ if (fapi_get_u16(skb, u.debug_generic_req.debug_words[0]) == 0x1357) {
+ if (fapi_get_u16(skb, u.debug_generic_req.debug_words[1]) == 0)
+ queue_work(uftestdev->attach_detach_work_queue, &uftestdev->detach_work);
+ else if (fapi_get_u16(skb, u.debug_generic_req.debug_words[1]) == 1)
+ queue_work(uftestdev->attach_detach_work_queue, &uftestdev->attach_work);
+ else if (fapi_get_u16(skb, u.debug_generic_req.debug_words[1]) == 2)
+ slsi_test_process_signal_set_route(uftestdev, skb);
+ else if (fapi_get_u16(skb, u.debug_generic_req.debug_words[1]) == 3)
+ slsi_test_process_signal_clear_route(uftestdev, skb);
+ }
+ slsi_kfree_skb(skb);
+ return true;
+ }
+
+ /* Automatically route the packet to the other test device and bypass the */
+ if (fapi_get_sigid(skb) == MA_UNITDATA_REQ) {
+ struct slsi_test_data_route *route;
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+
+ spin_lock(&uftestdev->route_spinlock);
+ route = slsi_test_process_signal_get_route(uftestdev, ehdr->h_dest);
+ if (route && slsi_test_process_signal_ip_remap(uftestdev, skb, route)) {
+ struct slsi_skb_cb *cb;
+ struct fapi_signal req = *((struct fapi_signal *)skb->data);
+ struct fapi_signal *ind;
+
+ /* Convert the MA_UNITDATA_REQ to a MA_UNITDATA_IND */
+ WARN_ON(!skb_pull(skb, fapi_sig_size(ma_unitdata_req)));
+ ind = (struct fapi_signal *)skb_push(skb, fapi_sig_size(ma_unitdata_ind));
+ if (WARN_ON(!ind)) {
+ slsi_kfree_skb(skb);
+ spin_unlock(&uftestdev->route_spinlock);
+ return true;
+ }
+
+ ind->id = cpu_to_le16(MA_UNITDATA_IND);
+ ind->receiver_pid = 0;
+ ind->sender_pid = 0;
+ fapi_set_u16(skb, u.ma_unitdata_ind.vif, cpu_to_le16(route->vif));
+ fapi_set_u16(skb, u.ma_unitdata_ind.sequence_number, route->sequence_number++);
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = fapi_get_expected_size(skb);
+ cb->data_length = skb->len;
+
+ slsi_spinlock_lock(&slsi_test_devices_lock);
+ if (slsi_test_devices[route->test_device_minor_number] &&
+ slsi_test_devices[route->test_device_minor_number]->sdev) {
+ if (slsi_hip_rx(slsi_test_devices[route->test_device_minor_number]->sdev, skb) != 0)
+ slsi_kfree_skb(skb);
+ } else {
+ route->configured = false;
+ slsi_kfree_skb(skb);
+ }
+ slsi_spinlock_unlock(&slsi_test_devices_lock);
+ spin_unlock(&uftestdev->route_spinlock);
+ return true;
+ }
+ spin_unlock(&uftestdev->route_spinlock);
+ }
+
+ return false;
+}
+
+static void slsi_test_dev_attach_work(struct work_struct *work)
+{
+ struct slsi_test_dev *uftestdev = container_of(work, struct slsi_test_dev, attach_work);
+
+ SLSI_INFO_NODEV("UnitTest TEST Attach\n");
+ slsi_test_dev_attach(uftestdev);
+ slsi_test_udi_node_reregister(uftestdev);
+}
+
+static void slsi_test_dev_detach_work(struct work_struct *work)
+{
+ struct slsi_test_dev *uftestdev = container_of(work, struct slsi_test_dev, detach_work);
+
+ SLSI_INFO(uftestdev->sdev, "UnitTest TEST Detach\n");
+ slsi_test_dev_detach(uftestdev);
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include <linux/kthread.h>
+
+#include "unittest.h"
+#include "hip.h"
+#include "sap.h"
+#include "debug.h"
+#include "procfs.h"
+#include "hip4.h"
+
+#define SLSI_TEST_DEV_SDIO_BLOCK_SIZE 500
+bool hip4_sampler_sample_start_stop_q = true;
+struct hip4_hip_control test_hip_control;
+
+/* SAP implementations container. Local and static to hip */
+static struct hip_sap {
+ struct sap_api *sap[SAP_TOTAL];
+} hip_sap_cont;
+
+/* Register SAP with HIP layer */
+int slsi_hip_sap_register(struct sap_api *sap_api)
+{
+ u8 class = sap_api->sap_class;
+
+ if (class >= SAP_TOTAL)
+ return -ENODEV;
+
+ hip_sap_cont.sap[class] = sap_api;
+
+ return 0;
+}
+
+/* UNregister SAP with HIP layer */
+int slsi_hip_sap_unregister(struct sap_api *sap_api)
+{
+ u8 class = sap_api->sap_class;
+
+ if (class >= SAP_TOTAL)
+ return -ENODEV;
+
+ hip_sap_cont.sap[class] = NULL;
+
+ return 0;
+}
+
+int slsi_hip_sap_setup(struct slsi_dev *sdev)
+{
+ /* Execute callbacks to intorm Supported version */
+ u16 version = 0;
+
+ if (hip_sap_cont.sap[SAP_MLME]->sap_version_supported) {
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_mlme_ver);
+ if (hip_sap_cont.sap[SAP_MLME]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_MA]->sap_version_supported) {
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_ma_ver);
+ if (hip_sap_cont.sap[SAP_MA]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_DBG]->sap_version_supported) {
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_debug_ver);
+ if (hip_sap_cont.sap[SAP_DBG]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ if (hip_sap_cont.sap[SAP_TST]->sap_version_supported) {
+ version = scsc_wifi_get_hip_config_version_4_u16(&sdev->hip4_inst.hip_control->config_v4, sap_test_ver);
+ if (hip_sap_cont.sap[SAP_TST]->sap_version_supported(version))
+ return -ENODEV;
+ } else {
+ return -ENODEV;
+ }
+
+ /* Success */
+ return 0;
+}
+
+/* SAP rx proxy */
+int slsi_hip_rx(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ u16 pid;
+
+ /* Udi test : If pid in UDI range then pass to UDI and ignore */
+ slsi_log_clients_log_signal_fast(sdev, &sdev->log_clients, skb, SLSI_LOG_DIRECTION_TO_HOST);
+ pid = fapi_get_u16(skb, receiver_pid);
+ if (pid >= SLSI_TX_PROCESS_ID_UDI_MIN && pid <= SLSI_TX_PROCESS_ID_UDI_MAX) {
+ slsi_kfree_skb(skb);
+ return 0;
+ }
+
+ if (fapi_is_ma(skb))
+ return hip_sap_cont.sap[SAP_MA]->sap_handler(sdev, skb);
+
+ if (fapi_is_mlme(skb))
+ return hip_sap_cont.sap[SAP_MLME]->sap_handler(sdev, skb);
+
+ if (fapi_is_debug(skb))
+ return hip_sap_cont.sap[SAP_DBG]->sap_handler(sdev, skb);
+
+ if (fapi_is_test(skb))
+ return hip_sap_cont.sap[SAP_TST]->sap_handler(sdev, skb);
+
+ return -EIO;
+}
+
+/* value used at all levels in the driver */
+int slsi_hip_init(struct slsi_dev *sdev, struct device *dev)
+{
+ SLSI_UNUSED_PARAMETER(dev);
+
+ memset(&sdev->hip, 0, sizeof(sdev->hip));
+
+ sdev->hip.sdev = sdev;
+ mutex_init(&sdev->hip.hip_mutex);
+
+ sdev->hip4_inst.hip_control = &test_hip_control;
+ return 0;
+}
+
+void slsi_hip_deinit(struct slsi_dev *sdev)
+{
+ mutex_destroy(&sdev->hip.hip_mutex);
+}
+
+int slsi_hip_stop(struct slsi_dev *sdev)
+{
+ return 0;
+}
+
+int hip4_free_ctrl_slots_count(struct slsi_hip4 *hip)
+{
+ return HIP4_CTL_SLOTS;
+}
+
+int scsc_wifi_transmit_frame(struct slsi_hip4 *hip, bool ctrl_packet, struct sk_buff *skb)
+{
+ struct slsi_dev *sdev = container_of(hip, struct slsi_dev, hip4_inst);
+
+ slsi_log_clients_log_signal_fast(sdev, &sdev->log_clients, skb, SLSI_LOG_DIRECTION_FROM_HOST);
+
+ slsi_kfree_skb(skb);
+
+ return 0;
+}
+
+void slsi_test_bh_work_f(struct work_struct *work)
+{
+}
+
+/* ALL Dummies to get UT build through goes below */
+bool hip4_sampler_sample_q;
+bool hip4_sampler_sample_qref;
+bool hip4_sampler_sample_int;
+bool hip4_sampler_sample_fapi;
+bool hip4_sampler_sample_through;
+bool hip4_sampler_sample_start_stop_q;
+bool hip4_sampler_sample_mbulk;
+bool hip4_sampler_sample_qfull;
+bool hip4_sampler_sample_mfull;
+bool hip4_sampler_vif;
+bool hip4_sampler_bot;
+bool hip4_sampler_pkt_tx;
+
+void hip4_sampler_update_record(u32 minor, u8 param1, u8 param2, u8 param3, u8 param4)
+{
+}
+
+void hip4_sampler_create(struct slsi_dev *sdev, struct scsc_mx *mx)
+{
+}
+
+void hip4_sampler_destroy(struct slsi_dev *sdev, struct scsc_mx *mx)
+{
+}
+
+int hip4_sampler_register_hip(struct scsc_mx *mx)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_ctrl_q_init(struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ return 0;
+}
+
+void scsc_wifi_fcq_ctrl_q_deinit(struct scsc_wifi_fcq_ctrl_q *queue)
+{
+}
+
+int scsc_wifi_fcq_unicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u8 qs_num, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_multicast_qset_init(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif)
+{
+ return 0;
+}
+
+void scsc_wifi_fcq_qset_deinit(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, struct slsi_dev *sdev, u8 vif, struct slsi_peer *peer)
+{
+}
+
+int scsc_wifi_fcq_transmit_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_receive_data(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_receive_data_no_peer(struct net_device *dev, u16 priority, struct slsi_dev *sdev, u8 vif, u8 peer_index)
+{
+ return 0;
+}
+
+void scsc_wifi_fcq_pause_queues(struct slsi_dev *sdev)
+{
+}
+
+void scsc_wifi_fcq_unpause_queues(struct slsi_dev *sdev)
+{
+}
+
+int scsc_wifi_fcq_transmit_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_receive_ctrl(struct net_device *dev, struct scsc_wifi_fcq_ctrl_q *queue)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_update_smod(struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_ps_state peer_ps_state,
+ enum scsc_wifi_fcq_queue_set_type type)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_8021x_port_state(struct net_device *dev, struct scsc_wifi_fcq_data_qset *qs, enum scsc_wifi_fcq_8021x_state state)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_stat_queue(struct scsc_wifi_fcq_q_header *queue,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *qmod, int *qcod)
+{
+ return 0;
+}
+
+int scsc_wifi_fcq_stat_queueset(struct scsc_wifi_fcq_data_qset *queue_set,
+ struct scsc_wifi_fcq_q_stat *queue_stat,
+ int *smod, int *scod, enum scsc_wifi_fcq_8021x_state *cp_state,
+ u32 *peer_ps_state_transitions)
+{
+ return 0;
+}
+
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include "dev.h"
+#include "unittest.h"
+#include "debug.h"
+#include "hip_bh.h"
+
+int slsi_sm_service_driver_register(void)
+{
+ int csr_result;
+
+ csr_result = slsi_sdio_func_drv_register();
+ if (csr_result != 0) {
+ SLSI_ERR_NODEV("Failed to register the pretend SDIO function driver: csrResult=%d\n", csr_result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void slsi_sm_service_driver_unregister(void)
+{
+ slsi_sdio_func_drv_unregister();
+}
+
+void slsi_sm_service_failed(struct slsi_dev *sdev, const char *reason)
+{
+}
+
+bool slsi_is_test_mode_enabled(void)
+{
+ return false;
+}
+
+bool slsi_is_rf_test_mode_enabled(void)
+{
+ return false;
+}
+
+int slsi_check_rf_test_mode(void)
+{
+ return 0;
+}
+
+int slsi_sm_wlan_service_start(struct slsi_dev *sdev)
+{
+ return 0;
+}
+
+void slsi_sm_wlan_service_stop(struct slsi_dev *sdev)
+{
+}
+
+int slsi_sm_wlan_service_open(struct slsi_dev *sdev)
+{
+ return 0;
+}
+
+int mx140_file_request_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_rel_path, const char *filename)
+{
+ return 0;
+}
+
+void mx140_file_release_conf(struct scsc_mx *mx, const struct firmware *conf)
+{
+}
+
+void slsi_sm_wlan_service_close(struct slsi_dev *sdev)
+{
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/sysfs.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+
+#include "dev.h"
+
+#include "hip.h"
+#include "log_clients.h"
+#include "debug.h"
+#include "unittest.h"
+#include "udi.h"
+
+#include "unifiio.h"
+
+#define UDI_CHAR_DEVICE_NAME "s5e7570unittesthip"
+#define UDI_CLASS_NAME "s5e7570unittesthip"
+
+/**
+ * Control character device for debug
+ * ==================================
+ */
+#define NUM_CHAR_CLIENTS 1 /* Number of client programmes on one node. */
+
+#define MAX_MINOR (SLSI_UDI_MINOR_NODES - 1) /* Maximum node number. */
+static dev_t major_number; /* Major number of device created by system. */
+static struct class *class; /* Device class. */
+
+struct slsi_test_cdev_client;
+
+struct slsi_cdev {
+ int minor;
+ struct cdev cdev;
+ struct slsi_test_cdev_client *client[NUM_CHAR_CLIENTS];
+
+ struct slsi_test_dev *uftestdev;
+ struct device *parent;
+};
+
+struct slsi_test_cdev_client {
+ struct slsi_cdev *ufcdev;
+ int log_enabled;
+
+ /* Flags set for special filtering of ma_packet data */
+ u16 ma_packet_filter_config;
+
+ struct sk_buff_head log_list;
+ wait_queue_head_t log_wq;
+};
+
+/**
+ * One minor node per phy. In normal driver mode, this may be one.
+ * In unit test mode, this may be several.
+ */
+static struct slsi_cdev *uf_cdevs[SLSI_UDI_MINOR_NODES];
+
+static int udi_log_event(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+static int send_signal_to_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+static int send_signal_to_inverse_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+
+static int slsi_test_cdev_open(struct inode *inode, struct file *file)
+{
+ struct slsi_cdev *uf_cdev;
+ struct slsi_test_cdev_client *client;
+ int indx;
+ int minor;
+
+ minor = iminor(inode);
+ if (minor > MAX_MINOR) {
+ SLSI_ERR_NODEV("minor %d exceeds range\n", minor);
+ return -EINVAL;
+ }
+
+ uf_cdev = uf_cdevs[minor];
+ if (!uf_cdev) {
+ SLSI_ERR_NODEV("no cdev instance for minor %d\n", minor);
+ return -EINVAL;
+ }
+
+ if (!uf_cdev->uftestdev) {
+ SLSI_ERR_NODEV("uftestdev not set\n");
+ return -EINVAL;
+ }
+
+ for (indx = 0; indx < NUM_CHAR_CLIENTS; indx++)
+ if (uf_cdev->client[indx] == NULL)
+ break;
+ if (indx >= NUM_CHAR_CLIENTS) {
+ SLSI_ERR_NODEV("already opened\n");
+ return -ENOTSUPP;
+ }
+
+ client = kmalloc(sizeof(*client), GFP_KERNEL);
+ if (client == NULL)
+ return -ENOMEM;
+ memset(client, 0, sizeof(struct slsi_test_cdev_client));
+
+ /* init other resource */
+ skb_queue_head_init(&client->log_list);
+ init_waitqueue_head(&client->log_wq);
+
+ client->ufcdev = uf_cdev;
+ uf_cdev->client[indx] = client;
+ file->private_data = client;
+
+ slsi_test_dev_attach(client->ufcdev->uftestdev);
+
+ return 0;
+}
+
+static int slsi_test_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct slsi_test_cdev_client *client = (void *)filp->private_data;
+ struct slsi_cdev *uf_cdev;
+ int indx;
+ int minor;
+
+ minor = iminor(inode);
+ if (minor > MAX_MINOR) {
+ SLSI_ERR_NODEV("minor %d exceeds range\n", minor);
+ return -EINVAL;
+ }
+
+ uf_cdev = uf_cdevs[minor];
+ if (!uf_cdev) {
+ SLSI_ERR_NODEV("no cdev instance for minor %d\n", minor);
+ return -EINVAL;
+ }
+
+ if (client == NULL)
+ return -EINVAL;
+
+ if (!client->ufcdev) {
+ SLSI_ERR_NODEV("ufcdev not set\n");
+ return -EINVAL;
+ }
+
+ if (!client->ufcdev->uftestdev) {
+ SLSI_ERR_NODEV("uftestdev not set\n");
+ return -EINVAL;
+ }
+
+ for (indx = 0; indx < NUM_CHAR_CLIENTS; indx++)
+ if (uf_cdev->client[indx] == client)
+ break;
+ if (indx >= NUM_CHAR_CLIENTS) {
+ SLSI_ERR_NODEV("client not found in list\n");
+ return -EINVAL;
+ }
+
+ if (waitqueue_active(&client->log_wq))
+ wake_up_interruptible(&client->log_wq);
+
+ if (client->log_enabled && client->ufcdev->uftestdev->sdev)
+ slsi_log_client_unregister(client->ufcdev->uftestdev->sdev, client);
+
+ slsi_test_dev_detach(client->ufcdev->uftestdev);
+
+ slsi_skb_queue_purge(&client->log_list);
+
+ /* free other resource */
+ kfree(client);
+
+ uf_cdev->client[indx] = NULL;
+
+ return 0;
+}
+
+static ssize_t slsi_test_cdev_read(struct file *filp, char *p, size_t len, loff_t *poff)
+{
+ struct slsi_test_cdev_client *client = (void *)filp->private_data;
+ int msglen;
+ struct sk_buff *skb;
+
+ SLSI_UNUSED_PARAMETER(poff);
+
+ if (client == NULL)
+ return -EINVAL;
+
+ if (!skb_queue_len(&client->log_list)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return 0;
+
+ /* wait until getting a signal */
+ if (wait_event_interruptible(client->log_wq, skb_queue_len(&client->log_list)))
+ return -ERESTARTSYS;
+ }
+
+ skb = slsi_skb_dequeue(&client->log_list);
+
+ msglen = skb->len;
+ if (msglen > (s32)len) {
+ SLSI_WARN_NODEV("truncated read to %d actual msg len is %lu\n", msglen, (unsigned long int)len);
+ msglen = len;
+ }
+
+ SLSI_DBG_HEX_NODEV(SLSI_TEST, skb->data, skb->len, "cdev read skb:%p skb->data:%p\n", skb, skb->data);
+ if (copy_to_user(p, skb->data, msglen)) {
+ SLSI_ERR_NODEV("Failed to copy UDI log to user\n");
+ slsi_kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ slsi_kfree_skb(skb);
+ return msglen;
+}
+
+static ssize_t slsi_test_cdev_write(struct file *filp, const char *p, size_t len, loff_t *poff)
+{
+ struct slsi_test_cdev_client *client;
+ struct slsi_test_dev *uftestdev;
+ struct sk_buff *skb;
+ struct slsi_skb_cb *cb;
+ u8 *data;
+
+ SLSI_UNUSED_PARAMETER(poff);
+
+ client = (void *)filp->private_data;
+ if (client == NULL) {
+ SLSI_ERR_NODEV("filep private data not set\n");
+ return -EINVAL;
+ }
+
+ if (!client->ufcdev) {
+ SLSI_ERR_NODEV("ufcdev not set\n");
+ return -EINVAL;
+ }
+
+ uftestdev = client->ufcdev->uftestdev;
+ if (!uftestdev) {
+ SLSI_ERR_NODEV("uftestdev not set\n");
+ return -EINVAL;
+ }
+
+ skb = slsi_alloc_skb(len, GFP_KERNEL);
+ data = skb_put(skb, len);
+ if (copy_from_user(data, p, len)) {
+ SLSI_ERR_NODEV("copy from user failed\n");
+ slsi_kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ if (skb->len < sizeof(struct fapi_signal_header)) {
+ SLSI_ERR_NODEV("Data(%d) too short for a signal\n", skb->len);
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ SLSI_DBG_HEX_NODEV(SLSI_TEST, skb->data, skb->len, "cdev write skb:%p skb->data:%p\n", skb, skb->data);
+
+ /* Intercept some requests */
+ if (slsi_test_process_signal(uftestdev, skb))
+ return len;
+
+ {
+ struct slsi_dev *sdev;
+
+ sdev = uftestdev->sdev;
+ if (!sdev) {
+ SLSI_ERR_NODEV("sdev not set\n");
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = fapi_get_expected_size(skb);
+ cb->data_length = skb->len;
+
+ if (WARN_ON(slsi_hip_rx(sdev, skb))) {
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ }
+ return len;
+}
+
+static long slsi_test_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct slsi_test_cdev_client *client = (void *)filp->private_data;
+ struct slsi_test_dev *uftestdev;
+ struct slsi_dev *sdev;
+ long r = 0;
+ int int_param;
+
+ if (client == NULL || client->ufcdev == NULL)
+ return -EINVAL;
+
+ uftestdev = client->ufcdev->uftestdev;
+ if (!uftestdev) {
+ SLSI_ERR_NODEV("uftestdev not set\n");
+ return -EINVAL;
+ }
+
+ sdev = uftestdev->sdev;
+ if (!sdev) {
+ SLSI_ERR_NODEV("sdev not set\n");
+ return -EINVAL;
+ }
+
+ FUNC_ENTER_NODEV();
+
+ slsi_wakelock(&sdev->wlan_wl);
+
+ switch (cmd) {
+ case UNIFI_GET_UDI_ENABLE:
+ int_param = client->log_enabled;
+ put_user(int_param, (int *)arg);
+ break;
+
+ case UNIFI_SET_UDI_ENABLE:
+ if (get_user(int_param, (int *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+
+ if (int_param) {
+ slsi_log_client_register(sdev, client, udi_log_event, NULL, 0, 0);
+ client->log_enabled = 1;
+ } else {
+ slsi_log_client_unregister(sdev, client);
+ client->log_enabled = 0;
+ }
+
+ break;
+ case UNIFI_SET_UDI_LOG_MASK:
+ {
+ struct unifiio_filter_t filter;
+ int i;
+
+ /* to minimise load on data path, list is converted here to array indexed by signal number */
+ if (copy_from_user(&filter, (void *)arg, sizeof(filter))) {
+ SLSI_ERR(sdev, "UNIFI_SET_UDI_LOG_MASK: Failed to copy from userspace\n");
+ r = -EFAULT;
+ break;
+ }
+ if (filter.signal_ids_n) {
+ char *signal_filter_index;
+ int max;
+ int min;
+ int size = filter.signal_ids_n * sizeof(filter.signal_ids[0]);
+ u16 *signal_ids = kmalloc(size, GFP_KERNEL);
+
+ if (!signal_ids) {
+ r = -ENOMEM;
+ break;
+ }
+
+ max = signal_ids[0];
+ min = signal_ids[0];
+
+ if (copy_from_user(signal_ids, filter.signal_ids, size)) {
+ SLSI_ERR(sdev, "UNIFI_SET_UDI_LOG_MASK: Failed to copy filter from userspace\n");
+ kfree(signal_ids);
+ r = -EFAULT;
+ break;
+ }
+ /* find maximum and minimum signal id in filter */
+ for (i = 0; i < filter.signal_ids_n; i++) {
+ if (signal_ids[i] & UDI_MA_UNITDATA_FILTER_ALLOW_MASK) {
+ client->ma_packet_filter_config |= signal_ids[i];
+ continue;
+ }
+ if (signal_ids[i] > max)
+ max = signal_ids[i];
+ else if (signal_ids[i] < min)
+ min = signal_ids[i];
+ }
+ /* and create array only big enough to index the range of signal id specified */
+ signal_filter_index = kmalloc(max - min + 1, GFP_KERNEL);
+ if (signal_filter_index) {
+ memset(signal_filter_index, 0, max - min + 1);
+ for (i = 0; i < filter.signal_ids_n; i++) {
+ if (signal_ids[i] & UDI_MA_UNITDATA_FILTER_ALLOW_MASK)
+ continue;
+ signal_filter_index[signal_ids[i] - min] = 1;
+ }
+ slsi_log_client_unregister(sdev, client);
+ slsi_log_client_register(sdev, client,
+ filter.log_listed_flag ? send_signal_to_inverse_log_filter :
+ send_signal_to_log_filter, signal_filter_index, min, max);
+ } else {
+ r = -ENOMEM;
+ }
+ kfree(signal_ids);
+ }
+ break;
+ }
+ default:
+ SLSI_WARN(sdev, "Operation (%d) not supported\n", cmd);
+ r = -EINVAL;
+ }
+
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return r;
+}
+
+static unsigned int slsi_test_cdev_poll(struct file *filp, poll_table *wait)
+{
+ struct slsi_test_cdev_client *client = (void *)filp->private_data;
+ unsigned int mask = 0;
+ int ready;
+
+ ready = skb_queue_len(&client->log_list);
+ poll_wait(filp, &client->log_wq, wait);
+ if (ready)
+ mask |= POLLIN | POLLRDNORM; /* readable */
+
+ return mask;
+}
+
+/* we know for sure that there is a filter present in log_client->signal_filter if this function is called.
+ * we know this because it is called only through a function pointer that is assigned
+ * only when a filter is also set up in the log_client
+ */
+static int send_signal_to_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ int ret = 0;
+ u16 signal_id = fapi_get_u16(skb, id);
+
+ if (signal_id > log_client->max_signal_id || signal_id < log_client->min_signal_id || !log_client->signal_filter[signal_id - log_client->min_signal_id])
+ ret = udi_log_event(log_client, skb, dir);
+
+ return ret;
+}
+
+static int send_signal_to_inverse_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ int ret = 0;
+ u16 signal_id = fapi_get_u16(skb, id);
+
+ if (signal_id <= log_client->max_signal_id && signal_id >= log_client->min_signal_id && log_client->signal_filter[signal_id - log_client->min_signal_id])
+ ret = udi_log_event(log_client, skb, dir);
+ return ret;
+}
+
+static int udi_log_event(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ struct slsi_test_cdev_client *client = log_client->log_client_ctx;
+ struct udi_msg_t msg;
+ struct udi_msg_t *msg_skb;
+
+ if (WARN_ON(client == NULL))
+ return -EINVAL;
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+ if (WARN_ON(skb->len == 0))
+ return -EINVAL;
+
+ skb = slsi_skb_copy_expand(skb, sizeof(msg), 0, GFP_ATOMIC);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
+ /* Intercept some requests */
+ if (slsi_test_process_signal(client->ufcdev->uftestdev, skb))
+ return -ECANCELED;
+
+ if (WARN_ON(skb_headroom(skb) < sizeof(msg)))
+ return -ENOMEM;
+
+ msg.length = sizeof(msg) + skb->len;
+ msg.timestamp = jiffies_to_msecs(jiffies);
+ msg.direction = dir;
+ msg.signal_length = fapi_get_siglen(skb);
+
+ msg_skb = (struct udi_msg_t *)skb_push(skb, sizeof(msg));
+ *msg_skb = msg;
+
+ slsi_skb_queue_tail(&client->log_list, skb);
+
+ /* Wake any waiting user process */
+ wake_up_interruptible(&client->log_wq);
+
+ return 0;
+}
+
+#define UF_DEVICE_CREATE(_class, _parent, _devno, _priv, _fmt, _args) \
+ device_create(_class, _parent, _devno, _priv, _fmt, _args)
+
+static const struct file_operations slsi_test_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = slsi_test_cdev_open,
+ .release = slsi_test_cdev_release,
+ .read = slsi_test_cdev_read,
+ .write = slsi_test_cdev_write,
+ .unlocked_ioctl = slsi_test_cdev_ioctl,
+ .compat_ioctl = slsi_test_cdev_ioctl,
+ .poll = slsi_test_cdev_poll,
+};
+
+#define UF_DEVICE_CREATE(_class, _parent, _devno, _priv, _fmt, _args) \
+ device_create(_class, _parent, _devno, _priv, _fmt, _args)
+
+static int slsi_get_minor(void)
+{
+ int minor;
+
+ for (minor = 0; minor < SLSI_UDI_MINOR_NODES; minor++)
+ if (uf_cdevs[minor] == 0)
+ return minor;
+ return -1;
+}
+
+static int slsi_test_cdev_create(struct slsi_test_dev *uftestdev, struct device *parent)
+{
+ dev_t devno;
+ int ret;
+ struct slsi_cdev *pdev;
+ int minor;
+
+ FUNC_ENTER_NODEV();
+ minor = slsi_get_minor();
+ if (minor < 0) {
+ SLSI_ERR_NODEV("no minor numbers available\n");
+ return -ENOMEM;
+ }
+
+ pdev = kmalloc(sizeof(*pdev), GFP_KERNEL);
+ if (pdev == NULL)
+ return -ENOMEM;
+ memset(pdev, 0, sizeof(*pdev));
+
+ cdev_init(&pdev->cdev, &slsi_test_cdev_fops);
+ pdev->cdev.owner = THIS_MODULE;
+ pdev->minor = minor;
+ devno = MKDEV(MAJOR(major_number), minor);
+ ret = cdev_add(&pdev->cdev, devno, 1);
+ if (ret) {
+ SLSI_ERR_NODEV("cdev_add failed with %d for minor %d\n", ret, minor);
+ kfree(pdev);
+ return ret;
+ }
+
+ pdev->uftestdev = uftestdev;
+ pdev->parent = parent;
+ if (!UF_DEVICE_CREATE(class, pdev->parent, devno, pdev, UDI_CHAR_DEVICE_NAME "%d", minor)) {
+ cdev_del(&pdev->cdev);
+ kfree(pdev);
+ return -EINVAL;
+ }
+ uftestdev->uf_cdev = (void *)pdev;
+ uftestdev->device_minor_number = minor;
+ uf_cdevs[minor] = pdev;
+ return 0;
+}
+
+static void slsi_test_cdev_destroy(struct slsi_test_dev *uftestdev)
+{
+ struct slsi_cdev *pdev = (struct slsi_cdev *)uftestdev->uf_cdev;
+
+ FUNC_ENTER_NODEV();
+ if (!pdev)
+ return;
+ device_destroy(class, pdev->cdev.dev);
+ cdev_del(&pdev->cdev);
+ uftestdev->uf_cdev = 0;
+ uf_cdevs[pdev->minor] = 0;
+ kfree(pdev);
+}
+
+static int udi_initialised;
+
+int slsi_test_udi_init(void)
+{
+ int ret;
+
+ memset(uf_cdevs, 0, sizeof(uf_cdevs));
+
+ /* Allocate two device numbers for each device. */
+ ret = alloc_chrdev_region(&major_number, 0, SLSI_UDI_MINOR_NODES, UDI_CLASS_NAME);
+ if (ret) {
+ SLSI_ERR_NODEV("Failed to add alloc dev numbers: %d\n", ret);
+ return ret;
+ }
+
+ /* Create a UniFi class */
+ class = class_create(THIS_MODULE, UDI_CLASS_NAME);
+ if (IS_ERR(class)) {
+ SLSI_ERR_NODEV("Failed to create UniFi class\n");
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+ major_number = 0;
+ return -EINVAL;
+ }
+
+ udi_initialised = 1;
+
+ return 0;
+}
+
+int slsi_test_udi_deinit(void)
+{
+ if (!udi_initialised)
+ return -1;
+ class_destroy(class);
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+ udi_initialised = 0;
+ return 0;
+}
+
+int slsi_test_udi_node_init(struct slsi_test_dev *uftestdev, struct device *parent)
+{
+ FUNC_ENTER_NODEV();
+ if (!udi_initialised)
+ return -1;
+ return slsi_test_cdev_create(uftestdev, parent);
+}
+
+int slsi_test_udi_node_reregister(struct slsi_test_dev *uftestdev)
+{
+ struct slsi_cdev *pdev = uftestdev->uf_cdev;
+ int indx;
+
+ if (uftestdev->sdev)
+ for (indx = 0; indx < NUM_CHAR_CLIENTS; indx++)
+ if (pdev->client[indx] != NULL && pdev->client[indx]->log_enabled)
+ slsi_log_client_register(uftestdev->sdev, pdev->client[indx], udi_log_event, NULL, 0, 0);
+
+ return 0;
+}
+
+int slsi_test_udi_node_deinit(struct slsi_test_dev *uftestdev)
+{
+ FUNC_ENTER_NODEV();
+ if (!udi_initialised)
+ return -1;
+ slsi_test_cdev_destroy(uftestdev);
+ return 0;
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_UNITTEST_H__
+#define __SLSI_UNITTEST_H__
+
+#include "dev.h"
+
+struct slsi_test_dev;
+struct slsi_test_bh_work {
+ bool available;
+ struct slsi_test_dev *uftestdev;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ struct slsi_spinlock spinlock;
+};
+
+struct slsi_test_data_route {
+ bool configured;
+ u16 test_device_minor_number; /* index into slsi_test_devices[] */
+ u8 mac[ETH_ALEN];
+ u16 vif;
+ u8 ipsubnet;
+ u16 sequence_number;
+};
+
+struct slsi_test_dev {
+ /* This is used for:
+ * 1) The uf6kunittesthip<n> chardevice number
+ * 2) The uf6kunittest<n> chardevice number
+ * 3) The /procf/devices/unifi<n> number
+ */
+ int device_minor_number;
+
+ void *uf_cdev;
+ struct device *dev;
+ struct slsi_dev *sdev;
+
+ struct workqueue_struct *attach_detach_work_queue;
+ /* a std mutex */
+ struct mutex attach_detach_mutex;
+ struct work_struct attach_work;
+ struct work_struct detach_work;
+ bool attached;
+
+ u8 hw_addr[ETH_ALEN];
+ struct slsi_test_bh_work bh_work;
+
+ /* a std spinlock */
+ spinlock_t route_spinlock;
+ struct slsi_test_data_route route[SLSI_AP_PEER_CONNECTIONS_MAX];
+};
+
+void slsi_test_dev_attach(struct slsi_test_dev *uftestdev);
+void slsi_test_dev_detach(struct slsi_test_dev *uftestdev);
+bool slsi_test_process_signal(struct slsi_test_dev *uftestdev, struct sk_buff *skb);
+
+int slsi_test_udi_node_init(struct slsi_test_dev *uftestdev, struct device *parent);
+int slsi_test_udi_node_reregister(struct slsi_test_dev *uftestdev);
+int slsi_test_udi_node_deinit(struct slsi_test_dev *uftestdev);
+
+int slsi_test_udi_init(void);
+int slsi_test_udi_deinit(void);
+
+void slsi_test_bh_work_f(struct work_struct *work);
+static inline int slsi_test_bh_init(struct slsi_test_dev *uftestdev)
+{
+ uftestdev->bh_work.available = false;
+ uftestdev->bh_work.uftestdev = uftestdev;
+ slsi_spinlock_create(&uftestdev->bh_work.spinlock);
+ INIT_WORK(&uftestdev->bh_work.work, slsi_test_bh_work_f);
+ uftestdev->bh_work.workqueue = alloc_ordered_workqueue("slsi_wlan_unittest_bh", 0);
+ if (!uftestdev->bh_work.workqueue)
+ return -ENOMEM;
+ uftestdev->bh_work.available = true;
+ return 0;
+}
+
+static inline void slsi_test_bh_start(struct slsi_test_dev *uftestdev)
+{
+ slsi_spinlock_lock(&uftestdev->bh_work.spinlock);
+ uftestdev->bh_work.available = true;
+ slsi_spinlock_unlock(&uftestdev->bh_work.spinlock);
+}
+
+static inline void slsi_test_bh_run(struct slsi_test_dev *uftestdev)
+{
+ slsi_spinlock_lock(&uftestdev->bh_work.spinlock);
+ if (!uftestdev->bh_work.available)
+ goto exit;
+ queue_work(uftestdev->bh_work.workqueue, &uftestdev->bh_work.work);
+exit:
+ slsi_spinlock_unlock(&uftestdev->bh_work.spinlock);
+}
+
+static inline void slsi_test_bh_stop(struct slsi_test_dev *uftestdev)
+{
+ struct workqueue_struct *workqueue = NULL;
+
+ slsi_spinlock_lock(&uftestdev->bh_work.spinlock);
+ uftestdev->bh_work.available = false;
+ workqueue = uftestdev->bh_work.workqueue;
+ uftestdev->bh_work.workqueue = NULL;
+ slsi_spinlock_unlock(&uftestdev->bh_work.spinlock);
+
+ if (workqueue)
+ flush_workqueue(workqueue);
+}
+
+static inline void slsi_test_bh_deinit(struct slsi_test_dev *uftestdev)
+{
+ struct workqueue_struct *workqueue = NULL;
+
+ slsi_spinlock_lock(&uftestdev->bh_work.spinlock);
+ WARN_ON(uftestdev->bh_work.available);
+ uftestdev->bh_work.available = false;
+ workqueue = uftestdev->bh_work.workqueue;
+ uftestdev->bh_work.workqueue = NULL;
+ slsi_spinlock_unlock(&uftestdev->bh_work.spinlock);
+ if (workqueue) {
+ flush_workqueue(workqueue);
+ destroy_workqueue(workqueue);
+ }
+}
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+#include <linux/ktime.h>
+#include "dev.h"
+#include "debug.h"
+#include "traffic_monitor.h"
+
+struct slsi_traffic_mon_client_entry {
+ struct list_head q;
+ void *client_ctx;
+ u32 throughput;
+ u32 state;
+ u32 hysteresis;
+ u32 mode;
+ u32 mid_tput;
+ u32 high_tput;
+ void (*traffic_mon_client_cb)(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx);
+};
+
+static inline void traffic_mon_invoke_client_callback(struct slsi_dev *sdev, u32 tput_tx, u32 tput_rx)
+{
+ struct list_head *pos, *n;
+ struct slsi_traffic_mon_client_entry *traffic_client;
+
+ list_for_each_safe(pos, n, &sdev->traffic_mon_clients.client_list) {
+ traffic_client = list_entry(pos, struct slsi_traffic_mon_client_entry, q);
+
+ if (traffic_client->mode == TRAFFIC_MON_CLIENT_MODE_PERIODIC) {
+ if (traffic_client->traffic_mon_client_cb)
+ traffic_client->traffic_mon_client_cb(traffic_client->client_ctx, TRAFFIC_MON_CLIENT_STATE_NONE, tput_tx, tput_rx);
+ } else if (traffic_client->mode == TRAFFIC_MON_CLIENT_MODE_EVENTS) {
+ if ((traffic_client->high_tput) && ((tput_tx + tput_rx) > traffic_client->high_tput)) {
+ if (traffic_client->state != TRAFFIC_MON_CLIENT_STATE_HIGH &&
+ (traffic_client->hysteresis++ > SLSI_TRAFFIC_MON_HYSTERESIS_HIGH)) {
+ SLSI_DBG1(sdev, SLSI_HIP, "notify traffic event (tput:%u, state:%u --> HIGH)\n", (tput_tx + tput_rx), traffic_client->state);
+ traffic_client->hysteresis = 0;
+ traffic_client->state = TRAFFIC_MON_CLIENT_STATE_HIGH;
+
+ if (traffic_client->traffic_mon_client_cb)
+ traffic_client->traffic_mon_client_cb(traffic_client->client_ctx, TRAFFIC_MON_CLIENT_STATE_HIGH, tput_tx, tput_rx);
+ }
+ } else if ((traffic_client->mid_tput) && ((tput_tx + tput_rx) > traffic_client->mid_tput)) {
+ if (traffic_client->state != TRAFFIC_MON_CLIENT_STATE_MID) {
+ if ((traffic_client->state == TRAFFIC_MON_CLIENT_STATE_LOW && (traffic_client->hysteresis++ > SLSI_TRAFFIC_MON_HYSTERESIS_HIGH)) ||
+ (traffic_client->state == TRAFFIC_MON_CLIENT_STATE_HIGH && (traffic_client->hysteresis++ > SLSI_TRAFFIC_MON_HYSTERESIS_LOW))) {
+ SLSI_DBG1(sdev, SLSI_HIP, "notify traffic event (tput:%u, state:%u --> MID)\n", (tput_tx + tput_rx), traffic_client->state);
+ traffic_client->hysteresis = 0;
+ traffic_client->state = TRAFFIC_MON_CLIENT_STATE_MID;
+ if (traffic_client->traffic_mon_client_cb)
+ traffic_client->traffic_mon_client_cb(traffic_client->client_ctx, TRAFFIC_MON_CLIENT_STATE_MID, tput_tx, tput_rx);
+ }
+ }
+ } else if (traffic_client->state != TRAFFIC_MON_CLIENT_STATE_LOW &&
+ (traffic_client->hysteresis++ > SLSI_TRAFFIC_MON_HYSTERESIS_LOW)) {
+ SLSI_DBG1(sdev, SLSI_HIP, "notify traffic event (tput:%u, state:%u --> LOW\n", (tput_tx + tput_rx), traffic_client->state);
+ traffic_client->hysteresis = 0;
+ traffic_client->state = TRAFFIC_MON_CLIENT_STATE_LOW;
+ if (traffic_client->traffic_mon_client_cb)
+ traffic_client->traffic_mon_client_cb(traffic_client->client_ctx, TRAFFIC_MON_CLIENT_STATE_LOW, tput_tx, tput_rx);
+ }
+ }
+ traffic_client->throughput = (tput_tx + tput_rx);
+ }
+}
+
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+static void traffic_mon_timer(struct timer_list *t)
+#else
+static void traffic_mon_timer(unsigned long data)
+#endif
+{
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ struct slsi_traffic_mon_clients *clients = from_timer(clients, t, timer);
+ struct slsi_dev *sdev = container_of(clients, typeof(*sdev), traffic_mon_clients);
+#else
+ struct slsi_dev *sdev = (struct slsi_dev *)data;
+#endif
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ bool stop_monitor;
+ u8 i;
+ u32 tput_rx = 0;
+ u32 tput_tx = 0;
+
+ if (!sdev) {
+ SLSI_ERR_NODEV("invalid sdev\n");
+ return;
+ }
+
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = sdev->netdev[i];
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+
+ if (ndev_vif) {
+ u32 time_in_ms = 0;
+
+ /* the Timer is jiffies based so resolution is not High and it may
+ * be off by a few ms. So to accurately measure the throughput find
+ * the time diff between last timer and this one
+ */
+ time_in_ms = ktime_to_ms(ktime_sub(ktime_get(), ndev_vif->last_timer_time));
+
+ /* the Timer may be any value but it still needs to calculate the
+ * throughput over a period of 1 second
+ */
+ ndev_vif->num_bytes_rx_per_sec += ndev_vif->num_bytes_rx_per_timer;
+ ndev_vif->num_bytes_tx_per_sec += ndev_vif->num_bytes_tx_per_timer;
+ ndev_vif->report_time += time_in_ms;
+ if (ndev_vif->report_time >= 1000) {
+ ndev_vif->throughput_rx_bps = (ndev_vif->num_bytes_rx_per_sec * 8 / ndev_vif->report_time) * 1000;
+ ndev_vif->throughput_tx_bps = (ndev_vif->num_bytes_tx_per_sec * 8 / ndev_vif->report_time) * 1000;
+ ndev_vif->num_bytes_rx_per_sec = 0;
+ ndev_vif->num_bytes_tx_per_sec = 0;
+ ndev_vif->report_time = 0;
+ }
+
+ /* throughput per timer interval is measured but extrapolated to 1 sec */
+ ndev_vif->throughput_tx = (ndev_vif->num_bytes_tx_per_timer * 8 / time_in_ms) * 1000;
+ ndev_vif->throughput_rx = (ndev_vif->num_bytes_rx_per_timer * 8 / time_in_ms) * 1000;
+
+ ndev_vif->num_bytes_tx_per_timer = 0;
+ ndev_vif->num_bytes_rx_per_timer = 0;
+ ndev_vif->last_timer_time = ktime_get();
+ tput_tx += ndev_vif->throughput_tx;
+ tput_rx += ndev_vif->throughput_rx;
+ }
+ }
+ }
+
+ traffic_mon_invoke_client_callback(sdev, tput_tx, tput_rx);
+ stop_monitor = list_empty(&sdev->traffic_mon_clients.client_list);
+
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ if (!stop_monitor)
+ mod_timer(&sdev->traffic_mon_clients.timer, jiffies + msecs_to_jiffies(SLSI_TRAFFIC_MON_TIMER_PERIOD));
+}
+
+inline void slsi_traffic_mon_event_rx(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+
+ /* Apply a correction to length to exclude IP and transport header.
+ * Can either peek into packets to derive the exact payload size
+ * or apply a rough correction to roughly calculate the throughput.
+ * rough correction is applied with a number inbetween IP header (20 bytes) +
+ * UDP header (8 bytes) or TCP header (can be 20 bytes to 60 bytes) i.e. 40
+ */
+ if (skb->len >= 40)
+ ndev_vif->num_bytes_rx_per_timer += (skb->len - 40);
+ else
+ ndev_vif->num_bytes_rx_per_timer += skb->len;
+}
+
+inline void slsi_traffic_mon_event_tx(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb);
+
+ if ((skb->len - cb->sig_length) >= 40)
+ ndev_vif->num_bytes_tx_per_timer += ((skb->len - 40) - cb->sig_length);
+ else
+ ndev_vif->num_bytes_tx_per_timer += (skb->len - cb->sig_length);
+}
+
+u8 slsi_traffic_mon_is_running(struct slsi_dev *sdev)
+{
+ u8 is_running = 0;
+
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+ if (!list_empty(&sdev->traffic_mon_clients.client_list))
+ is_running = 1;
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ return is_running;
+}
+
+int slsi_traffic_mon_client_register(
+ struct slsi_dev *sdev,
+ void *client_ctx,
+ u32 mode,
+ u32 mid_tput,
+ u32 high_tput,
+ void (*traffic_mon_client_cb)(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx))
+{
+ struct slsi_traffic_mon_client_entry *traffic_mon_client;
+ bool start_monitor;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ u8 i;
+
+ if (!client_ctx) {
+ SLSI_ERR(sdev, "A client context must be provided\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+ SLSI_DBG1(sdev, SLSI_HIP, "client:%p, mode:%u, mid_tput:%u, high_tput:%u\n", client_ctx, mode, mid_tput, high_tput);
+ start_monitor = list_empty(&sdev->traffic_mon_clients.client_list);
+ traffic_mon_client = kmalloc(sizeof(*traffic_mon_client), GFP_ATOMIC);
+ if (!traffic_mon_client) {
+ SLSI_ERR(sdev, "could not allocate memory for Monitor client\n");
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ return -ENOMEM;
+ }
+
+ traffic_mon_client->client_ctx = client_ctx;
+ traffic_mon_client->state = TRAFFIC_MON_CLIENT_STATE_LOW;
+ traffic_mon_client->hysteresis = 0;
+ traffic_mon_client->mode = mode;
+ traffic_mon_client->mid_tput = mid_tput;
+ traffic_mon_client->high_tput = high_tput;
+ traffic_mon_client->traffic_mon_client_cb = traffic_mon_client_cb;
+
+ /* Add to tail of monitor clients queue */
+ list_add_tail(&traffic_mon_client->q, &sdev->traffic_mon_clients.client_list);
+
+ if (start_monitor) {
+ /* reset counters before starting Timer */
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = sdev->netdev[i];
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+
+ if (ndev_vif) {
+ ndev_vif->throughput_tx = 0;
+ ndev_vif->throughput_rx = 0;
+ ndev_vif->num_bytes_tx_per_timer = 0;
+ ndev_vif->num_bytes_rx_per_timer = 0;
+ ndev_vif->last_timer_time = ktime_get();
+ ndev_vif->num_bytes_rx_per_sec = 0;
+ ndev_vif->num_bytes_tx_per_sec = 0;
+ ndev_vif->throughput_rx_bps = 0;
+ ndev_vif->throughput_tx_bps = 0;
+ ndev_vif->report_time = 0;
+ }
+ }
+ }
+ mod_timer(&sdev->traffic_mon_clients.timer, jiffies + msecs_to_jiffies(SLSI_TRAFFIC_MON_TIMER_PERIOD));
+ }
+
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ return 0;
+}
+
+void slsi_traffic_mon_client_unregister(struct slsi_dev *sdev, void *client_ctx)
+{
+ struct list_head *pos, *n;
+ struct slsi_traffic_mon_client_entry *traffic_mon_client;
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ u8 i;
+
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+ SLSI_DBG1(sdev, SLSI_HIP, "client: %p\n", client_ctx);
+ list_for_each_safe(pos, n, &sdev->traffic_mon_clients.client_list) {
+ traffic_mon_client = list_entry(pos, struct slsi_traffic_mon_client_entry, q);
+ if (traffic_mon_client->client_ctx == client_ctx) {
+ SLSI_DBG1(sdev, SLSI_HIP, "delete: %p\n", traffic_mon_client->client_ctx);
+ list_del(pos);
+ kfree(traffic_mon_client);
+ }
+ }
+
+ if (list_empty(&sdev->traffic_mon_clients.client_list)) {
+ /* reset counters */
+ for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++) {
+ dev = sdev->netdev[i];
+ if (dev) {
+ ndev_vif = netdev_priv(dev);
+
+ if (ndev_vif) {
+ ndev_vif->throughput_tx = 0;
+ ndev_vif->throughput_rx = 0;
+ ndev_vif->num_bytes_tx_per_timer = 0;
+ ndev_vif->num_bytes_rx_per_timer = 0;
+ ndev_vif->num_bytes_rx_per_sec = 0;
+ ndev_vif->num_bytes_tx_per_sec = 0;
+ ndev_vif->throughput_rx_bps = 0;
+ ndev_vif->throughput_tx_bps = 0;
+ ndev_vif->report_time = 0;
+ }
+ }
+ }
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ del_timer_sync(&sdev->traffic_mon_clients.timer);
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+ }
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+}
+
+void slsi_traffic_mon_clients_init(struct slsi_dev *sdev)
+{
+ if (!sdev) {
+ SLSI_ERR_NODEV("invalid sdev\n");
+ return;
+ }
+#if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
+ timer_setup(&sdev->traffic_mon_clients.timer, traffic_mon_timer, 0);
+#else
+ setup_timer(&sdev->traffic_mon_clients.timer, traffic_mon_timer, (unsigned long)sdev);
+#endif
+ INIT_LIST_HEAD(&sdev->traffic_mon_clients.client_list);
+ spin_lock_init(&sdev->traffic_mon_clients.lock);
+}
+
+void slsi_traffic_mon_clients_deinit(struct slsi_dev *sdev)
+{
+ struct list_head *pos, *n;
+ struct slsi_traffic_mon_client_entry *traffic_mon_client;
+
+ if (!sdev) {
+ SLSI_ERR_NODEV("invalid sdev\n");
+ return;
+ }
+
+ spin_lock_bh(&sdev->traffic_mon_clients.lock);
+ list_for_each_safe(pos, n, &sdev->traffic_mon_clients.client_list) {
+ traffic_mon_client = list_entry(pos, struct slsi_traffic_mon_client_entry, q);
+ SLSI_DBG1(sdev, SLSI_HIP, "delete: %p\n", traffic_mon_client->client_ctx);
+ list_del(pos);
+ kfree(traffic_mon_client);
+ }
+ spin_unlock_bh(&sdev->traffic_mon_clients.lock);
+ del_timer_sync(&sdev->traffic_mon_clients.timer);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __TRAFFIC_MONITOR_H__
+#define __TRAFFIC_MONITOR_H__
+
+#define SLSI_TRAFFIC_MON_TIMER_PERIOD 100 /* in ms */
+#define SLSI_TRAFFIC_MON_HYSTERESIS_HIGH 2 /* in number of timer period */
+#define SLSI_TRAFFIC_MON_HYSTERESIS_LOW 10 /* in number of timer period */
+
+enum {
+ TRAFFIC_MON_CLIENT_MODE_NONE,
+ TRAFFIC_MON_CLIENT_MODE_PERIODIC,
+ TRAFFIC_MON_CLIENT_MODE_EVENTS
+};
+
+enum {
+ TRAFFIC_MON_CLIENT_STATE_NONE,
+ TRAFFIC_MON_CLIENT_STATE_LOW,
+ TRAFFIC_MON_CLIENT_STATE_MID,
+ TRAFFIC_MON_CLIENT_STATE_HIGH
+};
+
+struct slsi_traffic_mon_clients {
+ /* client list lock */
+ spinlock_t lock;
+
+ struct timer_list timer;
+ struct list_head client_list;
+};
+
+void slsi_traffic_mon_event_rx(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+void slsi_traffic_mon_event_tx(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb);
+
+/* Is traffic monitor running?
+ *
+ * A caller can seek whether the traffic monitor is running or not.
+ * If the traffic monitor is running, then the throughput for Rx and
+ * Tx is stored in netdev_vif structure and caller can access them.
+ *
+ * Returns: 1 - traffic monitor is running
+ * 0 - traffic monitor is NOT running
+ */
+u8 slsi_traffic_mon_is_running(struct slsi_dev *sdev);
+
+/* register a client to traffic monitor
+ *
+ * A client can register to traffic monitor to either get notification
+ * per timer period or to get notification when the throughput changes
+ * state from one state of LOW/MID/HIGH to another.
+ *
+ * client_ctx: client context that is passed back in callback
+ * Also, is an unique ID for the client.
+ * mode: can be periodic or event based
+ * mid_tput: Mid throughput level
+ * high_tput: High throughput level
+ * traffic_mon_client_cb: function to callback each period or on events
+ * This function is called back in Timer interrupt
+ * context.
+ */
+int slsi_traffic_mon_client_register(
+ struct slsi_dev *sdev,
+ void *client_ctx,
+ u32 mode,
+ u32 mid_tput,
+ u32 high_tput,
+ /* WARNING: THIS IS CALLED BACK IN TIMER INTERRUPT CONTEXT! */
+ void (*traffic_mon_client_cb)(void *client_ctx, u32 state, u32 tput_tx, u32 tput_rx));
+void slsi_traffic_mon_client_unregister(struct slsi_dev *sdev, void *client_ctx);
+
+void slsi_traffic_mon_clients_init(struct slsi_dev *sdev);
+void slsi_traffic_mon_clients_deinit(struct slsi_dev *sdev);
+
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#include "dev.h"
+#include "debug.h"
+#include "mgt.h"
+#include "mlme.h"
+#include "netif.h"
+#include "log_clients.h"
+#include "hip4_sampler.h"
+#include "traffic_monitor.h"
+
+static bool msdu_enable = true;
+module_param(msdu_enable, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(msdu_enable, "MSDU frame format, Y: enable (default), N: disable");
+
+#ifdef CONFIG_ANDROID
+#include "scsc_wifilogger_rings.h"
+#endif
+
+/**
+ * Needed to get HIP4_DAT)SLOTS...should be part
+ * of initialization and callbacks registering
+ */
+#include "hip4.h"
+
+#include <linux/spinlock.h>
+
+int slsi_get_dwell_time_for_wps(struct slsi_dev *sdev, struct netdev_vif *ndev_vif, u8 *eapol, u16 eap_length)
+{
+ /* Note that Message should not be M8.This check is to identify only WSC_START message or M1-M7 */
+ /*Return 100ms If opcode type WSC msg and Msg Type M1-M7 or if opcode is WSC start.*/
+ if (eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_REQUEST ||
+ eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_RESPONSE) {
+ if (eapol[SLSI_EAP_TYPE_POS] == SLSI_EAP_TYPE_EXPANDED && eap_length >= SLSI_EAP_OPCODE_POS - 3 &&
+ ((eapol[SLSI_EAP_OPCODE_POS] == SLSI_EAP_OPCODE_WSC_MSG && eap_length >= SLSI_EAP_MSGTYPE_POS - 3 &&
+ eapol[SLSI_EAP_MSGTYPE_POS] != SLSI_EAP_MSGTYPE_M8) ||
+ eapol[SLSI_EAP_OPCODE_POS] == SLSI_EAP_OPCODE_WSC_START))
+ return SLSI_EAP_WPS_DWELL_TIME;
+ /* This is to check if a frame is EAP request identity and on P2P vif.If yes then set dwell time to 100ms */
+ if (SLSI_IS_VIF_INDEX_P2P_GROUP(sdev, ndev_vif) &&
+ eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_REQUEST &&
+ eapol[SLSI_EAP_TYPE_POS] == SLSI_EAP_TYPE_IDENTITY)
+ return SLSI_EAP_WPS_DWELL_TIME;
+ }
+ return 0;
+}
+
+static int slsi_tx_eapol(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ u8 *eapol = NULL;
+ u16 msg_type = 0;
+ u16 proto = ntohs(skb->protocol);
+ int ret = 0;
+ u32 dwell_time = sdev->fw_dwell_time;
+ u64 tx_bytes_tmp = 0;
+ u16 eap_length = 0;
+
+ slsi_spinlock_lock(&ndev_vif->peer_lock);
+ peer = slsi_get_peer_from_mac(sdev, dev, eth_hdr(skb)->h_dest);
+ if (!peer) {
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ SLSI_NET_WARN(dev, "no peer record for %pM, drop EAP frame\n", eth_hdr(skb)->h_dest);
+ return -EINVAL;
+ }
+
+ switch (proto) {
+ case ETH_P_PAE:
+ /* Detect if this is an EAPOL key frame. If so detect if
+ * it is an EAPOL-Key M4 packet
+ * In M4 packet,
+ * - MIC bit set in key info
+ * - Key type bit set in key info (pairwise=1, Group=0)
+ * - Key Data Length would be 0
+ */
+ if ((skb->len - sizeof(struct ethhdr)) >= 99)
+ eapol = skb->data + sizeof(struct ethhdr);
+ if (eapol && eapol[SLSI_EAPOL_IEEE8021X_TYPE_POS] == SLSI_IEEE8021X_TYPE_EAPOL_KEY) {
+ msg_type = FAPI_MESSAGETYPE_EAPOL_KEY_M123;
+
+ if ((eapol[SLSI_EAPOL_TYPE_POS] == SLSI_EAPOL_TYPE_RSN_KEY || eapol[SLSI_EAPOL_TYPE_POS] == SLSI_EAPOL_TYPE_WPA_KEY) &&
+ (eapol[SLSI_EAPOL_KEY_INFO_LOWER_BYTE_POS] & SLSI_EAPOL_KEY_INFO_KEY_TYPE_BIT_IN_LOWER_BYTE) &&
+ (eapol[SLSI_EAPOL_KEY_INFO_HIGHER_BYTE_POS] & SLSI_EAPOL_KEY_INFO_MIC_BIT_IN_HIGHER_BYTE) &&
+ (eapol[SLSI_EAPOL_KEY_DATA_LENGTH_HIGHER_BYTE_POS] == 0) &&
+ (eapol[SLSI_EAPOL_KEY_DATA_LENGTH_LOWER_BYTE_POS] == 0)) {
+ msg_type = FAPI_MESSAGETYPE_EAPOL_KEY_M4;
+ dwell_time = 0;
+ }
+ } else {
+ msg_type = FAPI_MESSAGETYPE_EAP_MESSAGE;
+ if ((skb->len - sizeof(struct ethhdr)) >= 9)
+ eapol = skb->data + sizeof(struct ethhdr);
+
+ dwell_time = 0;
+ if (eapol && eapol[SLSI_EAPOL_IEEE8021X_TYPE_POS] == SLSI_IEEE8021X_TYPE_EAP_PACKET) {
+ eap_length = (skb->len - sizeof(struct ethhdr)) - 4;
+ if (eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_REQUEST)
+ SLSI_INFO(sdev, "Send EAP-Request (%d)\n", eap_length);
+ else if (eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_RESPONSE)
+ SLSI_INFO(sdev, "Send EAP-Response (%d)\n", eap_length);
+ else if (eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_SUCCESS)
+ SLSI_INFO(sdev, "Send EAP-Success (%d)\n", eap_length);
+ else if (eapol[SLSI_EAP_CODE_POS] == SLSI_EAP_PACKET_FAILURE)
+ SLSI_INFO(sdev, "Send EAP-Failure (%d)\n", eap_length);
+ /* Need to set dwell time for wps exchange and EAP identity frame for P2P */
+ dwell_time = slsi_get_dwell_time_for_wps(sdev, ndev_vif, eapol, eap_length);
+ }
+ }
+ break;
+ case ETH_P_WAI:
+ SLSI_NET_DBG1(dev, SLSI_MLME, "WAI protocol frame\n");
+ msg_type = FAPI_MESSAGETYPE_WAI_MESSAGE;
+ if ((skb->data[17]) != 9) /*subtype 9 refers to unicast negotiation response*/
+ dwell_time = 0;
+ break;
+ default:
+ SLSI_NET_WARN(dev, "protocol NOT supported\n");
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return -EOPNOTSUPP;
+ }
+
+ /* EAPOL/WAI frames are send via the MLME */
+ tx_bytes_tmp = skb->len; /*len copy to avoid null pointer of skb*/
+ ret = slsi_mlme_send_frame_data(sdev, dev, skb, msg_type, 0, dwell_time, 0);
+ if (!ret)
+ peer->sinfo.tx_bytes += tx_bytes_tmp; //skb->len;
+
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return ret;
+}
+
+uint slsi_sg_host_align_mask; /* TODO -- this needs to be resolved! */
+
+/**
+ * This function deals with TX of data frames.
+ * On success, skbs are properly FREED; on error skb is NO MORE freed.
+ *
+ * NOTE THAT currently ONLY the following set of err-codes will trigger
+ * a REQUEUE and RETRY by upper layers in Kernel NetStack:
+ *
+ * -ENOSPC
+ */
+int slsi_tx_data(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_skb_cb *cb;
+ struct netdev_vif *ndev_vif = netdev_priv(dev);
+ struct slsi_peer *peer;
+ struct sk_buff *original_skb = NULL;
+ u16 len = skb->len;
+ int ret = 0;
+ enum slsi_traffic_q tq;
+ u32 dwell_time = 0;
+ u8 *frame;
+ u16 arp_opcode;
+ u32 dhcp_message_type = SLSI_DHCP_MESSAGE_TYPE_INVALID;
+
+ if (slsi_is_test_mode_enabled()) {
+ /* This signals is in XML file because parts of the Firmware need the symbols defined by them
+ * but this is actually not handled in wlanlite firmware.
+ */
+ SLSI_NET_WARN(dev, "WlanLite: NOT supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ndev_vif->activated) {
+ SLSI_NET_WARN(dev, "vif NOT activated\n");
+ return -EINVAL;
+ }
+
+ if ((ndev_vif->vif_type == FAPI_VIFTYPE_AP) && !ndev_vif->peer_sta_records) {
+ SLSI_NET_DBG3(dev, SLSI_TX, "AP with no STAs associated, drop Tx frame\n");
+ return -EINVAL;
+ }
+
+ /* check if it is an high important frame? At the moment EAPOL, DHCP
+ * and ARP are treated as high important frame and are sent over
+ * MLME for applying special rules in transmission.
+ */
+ if (skb->queue_mapping == SLSI_NETIF_Q_PRIORITY) {
+ int proto = be16_to_cpu(eth_hdr(skb)->h_proto);
+
+ switch (proto) {
+ default:
+ /* Only EAP packets and IP frames with DHCP are stored in SLSI_NETIF_Q_PRIORITY */
+ SLSI_NET_ERR(dev, "Bad h_proto=0x%x in SLSI_NETIF_Q_PRIORITY\n", proto);
+ return -EINVAL;
+ case ETH_P_PAE:
+ case ETH_P_WAI:
+ SLSI_NET_DBG2(dev, SLSI_MLME, "transmit EAP packet from SLSI_NETIF_Q_PRIORITY\n");
+ return slsi_tx_eapol(sdev, dev, skb);
+ case ETH_P_ARP:
+ SLSI_NET_DBG2(dev, SLSI_MLME, "transmit ARP frame from SLSI_NETIF_Q_PRIORITY\n");
+ frame = skb->data + sizeof(struct ethhdr);
+ arp_opcode = frame[SLSI_ARP_OPCODE_OFFSET] << 8 | frame[SLSI_ARP_OPCODE_OFFSET + 1];
+ if ((arp_opcode == SLSI_ARP_REQUEST_OPCODE) &&
+ !SLSI_IS_GRATUITOUS_ARP(frame)) {
+#ifdef CONFIG_SCSC_WLAN_STA_ENHANCED_ARP_DETECT
+ if (ndev_vif->enhanced_arp_detect_enabled &&
+ !memcmp(&frame[SLSI_ARP_DEST_IP_ADDR_OFFSET], &ndev_vif->target_ip_addr, 4)) {
+ ndev_vif->enhanced_arp_stats.arp_req_count_from_netdev++;
+ }
+#endif
+ dwell_time = sdev->fw_dwell_time;
+ }
+ return slsi_mlme_send_frame_data(sdev, dev, skb, FAPI_MESSAGETYPE_ARP, 0, dwell_time, 0);
+ case ETH_P_IP:
+ if (skb->len >= 285 && slsi_is_dhcp_packet(skb->data) != SLSI_TX_IS_NOT_DHCP) {
+ if (skb->data[42] == 1) /*opcode 1 refers to DHCP discover/request*/
+ dwell_time = sdev->fw_dwell_time;
+ dhcp_message_type = skb->data[284];
+ if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_DISCOVER)
+ SLSI_INFO(sdev, "Send DHCP [DISCOVER]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_OFFER)
+ SLSI_INFO(sdev, "Send DHCP [OFFER]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_REQUEST)
+ SLSI_INFO(sdev, "Send DHCP [REQUEST]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_DECLINE)
+ SLSI_INFO(sdev, "Send DHCP [DECLINE]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_ACK)
+ SLSI_INFO(sdev, "Send DHCP [ACK]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_NAK)
+ SLSI_INFO(sdev, "Send DHCP [NAK]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_RELEASE)
+ SLSI_INFO(sdev, "Send DHCP [RELEASE]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_INFORM)
+ SLSI_INFO(sdev, "Send DHCP [INFORM]\n");
+ else if (dhcp_message_type == SLSI_DHCP_MESSAGE_TYPE_FORCERENEW)
+ SLSI_INFO(sdev, "Send DHCP [FORCERENEW]\n");
+ else
+ SLSI_INFO(sdev, "Send DHCP [INVALID]\n");
+ return slsi_mlme_send_frame_data(sdev, dev, skb, FAPI_MESSAGETYPE_DHCP, 0, dwell_time,
+ 0);
+ }
+ /* IP frame can have only DHCP packet in SLSI_NETIF_Q_PRIORITY */
+ SLSI_NET_ERR(dev, "Bad IP frame in SLSI_NETIF_Q_PRIORITY\n");
+ return -EINVAL;
+ }
+ }
+
+ if (skb_headroom(skb) < (fapi_sig_size(ma_unitdata_req) + 160)) {
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = slsi_skb_realloc_headroom(skb, fapi_sig_size(ma_unitdata_req) + 160);
+ if (!skb2) {
+ SLSI_NET_WARN(dev, "failed to alloc SKB headroom, drop Tx frame\n");
+ return -EINVAL;
+ }
+ /* Keep track of this copy...*/
+ original_skb = skb;
+ skb = skb2;
+ }
+
+ /* Align mac_header with skb->data */
+ if (skb_headroom(skb) != skb->mac_header)
+ skb_pull(skb, skb->mac_header - skb_headroom(skb));
+
+ if (msdu_enable)
+ ethr_ii_to_subframe_msdu(skb);
+
+ len = skb->len;
+
+ (void)skb_push(skb, fapi_sig_size(ma_unitdata_req));
+ tq = slsi_frame_priority_to_ac_queue(skb->priority);
+ fapi_set_u16(skb, id, MA_UNITDATA_REQ);
+ fapi_set_u16(skb, receiver_pid, 0);
+ fapi_set_u16(skb, sender_pid, SLSI_TX_PROCESS_ID_MIN);
+ fapi_set_u32(skb, fw_reference, 0);
+ fapi_set_u16(skb, u.ma_unitdata_req.vif, ndev_vif->ifnum);
+ fapi_set_u16(skb, u.ma_unitdata_req.host_tag, slsi_tx_host_tag(sdev, tq));
+ fapi_set_u16(skb, u.ma_unitdata_req.peer_index, MAP_QS_TO_AID(slsi_netif_get_qs_from_queue
+ (skb->queue_mapping, tq)));
+
+ SCSC_HIP4_SAMPLER_PKT_TX(sdev->minor_prof, fapi_get_u16(skb, u.ma_unitdata_req.host_tag));
+
+ /* by default the priority is set to contention. It is overridden and set appropriate
+ * priority if peer supports QoS. The broadcast/multicast frames are sent in non-QoS .
+ */
+ fapi_set_u16(skb, u.ma_unitdata_req.priority, FAPI_PRIORITY_CONTENTION);
+
+ if (msdu_enable)
+ fapi_set_u16(skb, u.ma_unitdata_req.data_unit_descriptor, FAPI_DATAUNITDESCRIPTOR_AMSDU_SUBFRAME);
+ else
+ fapi_set_u16(skb, u.ma_unitdata_req.data_unit_descriptor, FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME);
+
+ SLSI_NET_DBG_HEX(dev, SLSI_TX, skb->data, skb->len < 128 ? skb->len : 128, "\n");
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = fapi_sig_size(ma_unitdata_req);
+ cb->data_length = skb->len;
+ /* colour is defined as: */
+ /* u16 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ * [10:8] - ac queue
+ */
+ cb->colour = (slsi_frame_priority_to_ac_queue(skb->priority) << 8) |
+ (fapi_get_u16(skb, u.ma_unitdata_req.peer_index) << 3) | ndev_vif->ifnum << 1;
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+ /* Log only the linear skb chunk ... unidata anywya will be truncated to 100.*/
+ SCSC_WLOG_PKTFATE_LOG_TX_DATA_FRAME(fapi_get_u16(skb, u.ma_unitdata_req.host_tag),
+ skb->data, skb_headlen(skb));
+#endif
+
+ /* ACCESS POINT MODE */
+ if (ndev_vif->vif_type == FAPI_VIFTYPE_AP) {
+ struct ethhdr *ehdr = eth_hdr(skb);
+
+ if (is_multicast_ether_addr(ehdr->h_dest)) {
+ ret = scsc_wifi_fcq_transmit_data(dev,
+ &ndev_vif->ap.group_data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+ if (ret < 0) {
+ SLSI_NET_WARN(dev, "no fcq for groupcast, drop Tx frame\n");
+ /* Free the local copy here ..if any */
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return ret;
+ }
+ ret = scsc_wifi_transmit_frame(&sdev->hip4_inst, false, skb);
+ if (ret == NETDEV_TX_OK) {
+ /**
+ * Frees the original since the copy has already
+ * been freed downstream
+ */
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+ return ret;
+ } else if (ret < 0) {
+ /* scsc_wifi_transmit_frame failed, decrement BoT counters */
+ scsc_wifi_fcq_receive_data(dev,
+ &ndev_vif->ap.group_data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return ret;
+ }
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return -EIO;
+ }
+ }
+ slsi_spinlock_lock(&ndev_vif->peer_lock);
+
+ peer = slsi_get_peer_from_mac(sdev, dev, eth_hdr(skb)->h_dest);
+ if (!peer) {
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ SLSI_NET_WARN(dev, "no peer record for %pM, drop Tx frame\n", eth_hdr(skb)->h_dest);
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ /**
+ * skb->priority will contain the priority obtained from the IP Diff/Serv field.
+ * The skb->priority field is defined in terms of the FAPI_PRIORITY_* definitions.
+ * For QoS enabled associations, this is the tid and is the value required in
+ * the ma_unitdata_req.priority field. For non-QoS assocations, the ma_unitdata_req.
+ * priority field requires FAPI_PRIORITY_CONTENTION.
+ */
+ if (peer->qos_enabled)
+ fapi_set_u16(skb, u.ma_unitdata_req.priority, skb->priority);
+
+ slsi_debug_frame(sdev, dev, skb, "TX");
+
+ ret = scsc_wifi_fcq_transmit_data(dev, &peer->data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+ if (ret < 0) {
+ SLSI_NET_WARN(dev, "no fcq for %pM, drop Tx frame\n", eth_hdr(skb)->h_dest);
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return ret;
+ }
+
+ /* SKB is owned by scsc_wifi_transmit_frame() unless the transmission is
+ * unsuccesful.
+ */
+ slsi_traffic_mon_event_tx(sdev, dev, skb);
+ ret = scsc_wifi_transmit_frame(&sdev->hip4_inst, false, skb);
+ if (ret != NETDEV_TX_OK) {
+ /* scsc_wifi_transmit_frame failed, decrement BoT counters */
+ scsc_wifi_fcq_receive_data(dev, &peer->data_qs, slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+
+ if (ret == -ENOSPC) {
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return ret;
+ }
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ if (original_skb)
+ slsi_kfree_skb(skb);
+ return -EIO;
+ }
+ /* Frame has been successfully sent, and freed by lower layers */
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ /* What about the original if we passed in a copy ? */
+ if (original_skb)
+ slsi_kfree_skb(original_skb);
+ peer->sinfo.tx_bytes += len;
+ return ret;
+}
+
+int slsi_tx_data_lower(struct slsi_dev *sdev, struct sk_buff *skb)
+{
+ struct net_device *dev;
+ struct netdev_vif *ndev_vif;
+ struct slsi_peer *peer;
+ u16 vif;
+ u8 *dest;
+ int ret;
+ struct slsi_skb_cb *cb = slsi_skb_cb_get(skb);
+
+ vif = fapi_get_vif(skb);
+
+ switch (fapi_get_u16(skb, u.ma_unitdata_req.data_unit_descriptor)) {
+ case FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME:
+ if (ntohs(eth_hdr(skb)->h_proto) == ETH_P_PAE || ntohs(eth_hdr(skb)->h_proto) == ETH_P_WAI)
+ return slsi_tx_control(sdev, NULL, skb);
+ dest = eth_hdr(skb)->h_dest;
+ break;
+
+ case FAPI_DATAUNITDESCRIPTOR_AMSDU:
+ /* The AMSDU frame type is an AMSDU payload ready to be prepended by
+ * an 802.11 frame header by the firmware. The AMSDU subframe header
+ * is identical to an Ethernet header in terms of addressing, so it
+ * is safe to access the destination address through the ethernet
+ * structure.
+ */
+ dest = eth_hdr(skb)->h_dest;
+ break;
+ case FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME:
+ dest = ieee80211_get_DA((struct ieee80211_hdr *)fapi_get_data(skb));
+ break;
+ default:
+ SLSI_ERR(sdev, "data_unit_descriptor incorrectly set (0x%02x), dropping TX frame\n",
+ fapi_get_u16(skb, u.ma_unitdata_req.data_unit_descriptor));
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ dev = slsi_get_netdev_rcu(sdev, vif);
+ if (!dev) {
+ SLSI_ERR(sdev, "netdev(%d) No longer exists\n", vif);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ ndev_vif = netdev_priv(dev);
+ rcu_read_unlock();
+
+ if (is_multicast_ether_addr(dest) && ((ndev_vif->vif_type == FAPI_VIFTYPE_AP))) {
+ if (scsc_wifi_fcq_transmit_data(dev, &ndev_vif->ap.group_data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3) < 0) {
+ SLSI_NET_DBG3(dev, SLSI_TX, "no fcq for groupcast, dropping TX frame\n");
+ return -EINVAL;
+ }
+ ret = scsc_wifi_transmit_frame(&sdev->hip4_inst, false, skb);
+ if (ret == NETDEV_TX_OK)
+ return ret;
+ /**
+ * This should be NEVER RETRIED/REQUEUED and its' handled
+ * by the caller in UDI cdev_write
+ */
+ if (ret == -ENOSPC)
+ SLSI_NET_DBG1(dev, SLSI_TX, "TX_LOWER...Queue Full... BUT Dropping packet\n");
+ else
+ SLSI_NET_DBG1(dev, SLSI_TX, "TX_LOWER...Generic Error...Dropping packet\n");
+ /* scsc_wifi_transmit_frame failed, decrement BoT counters */
+ scsc_wifi_fcq_receive_data(dev, &ndev_vif->ap.group_data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+ return ret;
+ }
+
+ slsi_spinlock_lock(&ndev_vif->peer_lock);
+ peer = slsi_get_peer_from_mac(sdev, dev, dest);
+ if (!peer) {
+ SLSI_ERR(sdev, "no peer record for %02x:%02x:%02x:%02x:%02x:%02x, dropping TX frame\n",
+ dest[0], dest[1], dest[2], dest[3], dest[4], dest[5]);
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return -EINVAL;
+ }
+ slsi_debug_frame(sdev, dev, skb, "TX");
+
+ if (fapi_get_u16(skb, u.ma_unitdata_req.priority) == FAPI_PRIORITY_CONTENTION)
+ skb->priority = FAPI_PRIORITY_QOS_UP0;
+ else
+ skb->priority = fapi_get_u16(skb, u.ma_unitdata_req.priority);
+
+ if (scsc_wifi_fcq_transmit_data(dev, &peer->data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3) < 0) {
+ SLSI_NET_DBG3(dev, SLSI_TX, "no fcq for %02x:%02x:%02x:%02x:%02x:%02x, dropping TX frame\n",
+ eth_hdr(skb)->h_dest[0], eth_hdr(skb)->h_dest[1], eth_hdr(skb)->h_dest[2], eth_hdr(skb)->h_dest[3], eth_hdr(skb)->h_dest[4], eth_hdr(skb)->h_dest[5]);
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return -EINVAL;
+ }
+ /* SKB is owned by scsc_wifi_transmit_frame() unless the transmission is
+ * unsuccesful.
+ */
+ ret = scsc_wifi_transmit_frame(&sdev->hip4_inst, false, skb);
+ if (ret < 0) {
+ SLSI_NET_DBG1(dev, SLSI_TX, "%s (signal: %d)\n", ret == -ENOSPC ? "Queue is full. Flow control" : "Failed to transmit", fapi_get_sigid(skb));
+ /* scsc_wifi_transmit_frame failed, decrement BoT counters */
+ scsc_wifi_fcq_receive_data(dev, &ndev_vif->ap.group_data_qs,
+ slsi_frame_priority_to_ac_queue(skb->priority),
+ sdev,
+ (cb->colour & 0x6) >> 1,
+ (cb->colour & 0xf8) >> 3);
+ if (ret == -ENOSPC)
+ SLSI_NET_DBG1(dev, SLSI_TX,
+ "TX_LOWER...Queue Full...BUT Dropping packet\n");
+ else
+ SLSI_NET_DBG1(dev, SLSI_TX,
+ "TX_LOWER...Generic Error...Dropping packet\n");
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return ret;
+ }
+
+ slsi_spinlock_unlock(&ndev_vif->peer_lock);
+ return 0;
+}
+
+/**
+ * NOTE:
+ * 1. dev can be NULL
+ * 2. On error the SKB is NOT freed, NOR retried (ENOSPC dropped).
+ * Callers should take care to free the SKB eventually.
+ */
+int slsi_tx_control(struct slsi_dev *sdev, struct net_device *dev, struct sk_buff *skb)
+{
+ struct slsi_skb_cb *cb;
+ int res = 0;
+ struct fapi_signal_header *hdr;
+
+ if (WARN_ON(!skb)) {
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /**
+ * Sanity check of the skb - if it's not an MLME, MA, debug or test
+ * signal it will be discarded.
+ * Skip if test mode (wlanlite) is enabled.
+ */
+ if (!slsi_is_test_mode_enabled())
+ if (!fapi_is_mlme(skb) && !fapi_is_ma(skb) && !fapi_is_debug(skb) && !fapi_is_test(skb)) {
+ SLSI_NET_WARN(dev, "Discarding skb because it has type: 0x%04X\n", fapi_get_sigid(skb));
+ return -EINVAL;
+ }
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = fapi_get_expected_size(skb);
+ cb->data_length = skb->len;
+ /* F/w will panic if fw_reference is not zero. */
+ hdr = (struct fapi_signal_header *)skb->data;
+ hdr->fw_reference = 0;
+
+#ifdef CONFIG_SCSC_WIFILOGGER
+ /* Log only the linear skb chunk */
+ SCSC_WLOG_PKTFATE_LOG_TX_CTRL_FRAME(fapi_get_u16(skb, u.mlme_frame_transmission_ind.host_tag),
+ skb->data, skb_headlen(skb));
+#endif
+
+ slsi_debug_frame(sdev, dev, skb, "TX");
+ res = scsc_wifi_transmit_frame(&sdev->hip4_inst, true, skb);
+ if (res != NETDEV_TX_OK) {
+ char reason[80];
+
+ SLSI_NET_ERR(dev, "%s (signal %d)\n", res == -ENOSPC ? "Queue is full. Flow control" : "Failed to transmit", fapi_get_sigid(skb));
+
+ if (!in_interrupt()) {
+ snprintf(reason, sizeof(reason), "Failed to transmit signal 0x%04X (err:%d)", fapi_get_sigid(skb), res);
+ slsi_sm_service_failed(sdev, reason);
+
+ res = -EIO;
+ }
+ }
+exit:
+ return res;
+}
+
+void slsi_tx_pause_queues(struct slsi_dev *sdev)
+{
+ if (!sdev)
+ return;
+
+ scsc_wifi_fcq_pause_queues(sdev);
+}
+
+void slsi_tx_unpause_queues(struct slsi_dev *sdev)
+{
+ if (!sdev)
+ return;
+
+ scsc_wifi_fcq_unpause_queues(sdev);
+}
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#include <linux/sysfs.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+
+#include "dev.h"
+
+#include "hip.h"
+#include "log_clients.h"
+#include "mlme.h"
+#include "fw_test.h"
+#include "debug.h"
+#include "udi.h"
+#include "src_sink.h"
+#include "unifiio.h"
+#include "procfs.h"
+
+#ifdef SLSI_TEST_DEV
+#include "unittest.h"
+#define UDI_CHAR_DEVICE_NAME "s5n2560unittest"
+#define UDI_CLASS_NAME "s5n2560test"
+#else
+#define UDI_CHAR_DEVICE_NAME "s5n2560udi"
+#define UDI_CLASS_NAME "s5n2560"
+#endif
+
+#define UDI_LOG_MASK_FILTER_NUM_MAX 5
+
+#define UDI_MIB_SET_LEN_MAX 65535
+#define UDI_MIB_GET_LEN_MAX 2048
+
+#ifndef ETH_P_WAPI
+#define ETH_P_WAPI 0x88b4
+#endif
+
+#define SLSI_IP_TYPE_UDP 0x11
+#define SLSI_DHCP_SERVER_PORT 67
+#define SLSI_DHCP_CLIENT_PORT 68
+#define SLSI_DHCP_MAGIC_OFFSET 272
+#define SLSI_DHCP_MESSAGE_TYPE_ACK 0x05
+
+/**
+ * Control character device for debug
+ * ==================================
+ */
+#define NUM_CHAR_CLIENTS 12 /* Number of client programmes on one node. */
+
+#define MAX_MINOR (SLSI_UDI_MINOR_NODES - 1) /* Maximum node number. */
+static dev_t major_number; /* Major number of device created by system. */
+static struct class *class; /* Device class. */
+
+struct slsi_cdev_client;
+
+struct slsi_cdev {
+ int minor;
+ struct cdev cdev;
+ struct slsi_cdev_client *client[NUM_CHAR_CLIENTS];
+
+ struct slsi_dev *sdev;
+ struct device *parent;
+};
+
+struct slsi_cdev_client {
+ struct slsi_cdev *ufcdev;
+ int log_enabled;
+ int log_allow_driver_signals;
+
+ u16 tx_sender_id;
+ struct slsi_fw_test fw_test;
+
+ /* Flags set for special filtering of ma_packet data */
+ u16 ma_unitdata_filter_config;
+
+ u16 ma_unitdata_size_limit;
+
+ struct sk_buff_head log_list;
+ struct semaphore log_mutex;
+ wait_queue_head_t log_wq;
+
+ /* Drop Frames and report the number dropped */
+#define UDI_MAX_QUEUED_FRAMES 10000
+#define UDI_RESTART_QUEUED_FRAMES 9000
+
+#define UDI_MAX_QUEUED_DATA_FRAMES 9000
+#define UDI_RESTART_QUEUED_DATA_FRAMES 8000
+
+ /* Start dropping ALL frames at queue_len == UDI_MAX_QUEUED_FRAMES
+ * Restart queueing ALL frames at queue_len == UDI_RESTART_QUEUED_FRAMES
+ * Enable MA_PACKET filters at queue_len == UDI_MAX_QUEUED_DATA_FRAMES
+ * Disable MA_PACKET filters at queue_len == UDI_RESTART_QUEUED_DATA_FRAMES
+ */
+ u32 log_dropped;
+ u32 log_dropped_data;
+ bool log_drop_data_packets;
+};
+
+static inline bool slsi_cdev_unitdata_filter_allow(struct slsi_cdev_client *client, u16 filter)
+{
+ return (client->ma_unitdata_filter_config & filter) == filter;
+}
+
+/* One minor node per phy. In normal driver mode, this may be one.
+ * In unit test mode, this may be several.
+ */
+static struct slsi_cdev *uf_cdevs[SLSI_UDI_MINOR_NODES];
+
+static int udi_log_event(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+static int send_signal_to_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+static int send_signal_to_inverse_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir);
+
+int slsi_check_cdev_refs(void)
+{
+ int client_num;
+ int cdev_num;
+ struct slsi_cdev *cdev = NULL;
+
+ for (cdev_num = 0; cdev_num < SLSI_UDI_MINOR_NODES; cdev_num++) {
+ cdev = uf_cdevs[cdev_num];
+
+ if (!cdev)
+ continue;
+
+ for (client_num = 0; client_num < NUM_CHAR_CLIENTS; client_num++)
+ if (cdev->client[client_num])
+ return 1;
+ }
+
+ return 0;
+}
+
+int slsi_kernel_to_user_space_event(struct slsi_log_client *log_client, u16 event, u32 data_length, const u8 *data)
+{
+ struct slsi_cdev_client *client = log_client->log_client_ctx;
+ struct sk_buff *skb;
+ int ret;
+
+ if (WARN_ON(!client))
+ return -EINVAL;
+
+ if (!client->log_allow_driver_signals)
+ return 0;
+
+ skb = fapi_alloc_f(sizeof(struct fapi_signal_header), data_length, event, 0, __FILE__, __LINE__);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
+ if (data_length)
+ fapi_append_data(skb, data, data_length);
+
+ ret = udi_log_event(log_client, skb, UDI_CONFIG_IND);
+ if (ret)
+ SLSI_WARN_NODEV("Udi log event not registered\n");
+
+ /* udi_log_event takes a copy, so ensure that the skb allocated in this
+ * function is freed again.
+ */
+ slsi_kfree_skb(skb);
+ return ret;
+}
+
+static int slsi_cdev_open(struct inode *inode, struct file *file)
+{
+ struct slsi_cdev *uf_cdev;
+ struct slsi_cdev_client *client;
+ int indx;
+ int minor;
+
+ minor = iminor(inode);
+ if (minor > MAX_MINOR) {
+ SLSI_ERR_NODEV("minor %d exceeds range\n", minor);
+ return -EINVAL;
+ }
+
+ uf_cdev = uf_cdevs[minor];
+ if (!uf_cdev) {
+ SLSI_ERR_NODEV("no cdev instance for minor %d\n", minor);
+ return -EINVAL;
+ }
+
+ for (indx = 0; indx < NUM_CHAR_CLIENTS; indx++)
+ if (!uf_cdev->client[indx])
+ break;
+ if (indx >= NUM_CHAR_CLIENTS) {
+ SLSI_ERR_NODEV("already opened\n");
+ return -ENOTSUPP;
+ }
+
+ client = kmalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+ memset(client, 0, sizeof(struct slsi_cdev_client));
+
+ /* init other resource */
+ skb_queue_head_init(&client->log_list);
+ init_waitqueue_head(&client->log_wq);
+ sema_init(&client->log_mutex, 1);
+ client->tx_sender_id = SLSI_TX_PROCESS_ID_UDI_MIN;
+ slsi_fw_test_init(uf_cdev->sdev, &client->fw_test);
+
+ client->ufcdev = uf_cdev;
+ uf_cdev->client[indx] = client;
+ file->private_data = client;
+ slsi_procfs_inc_node();
+
+#ifdef CONFIG_SCSC_MXLOGGER
+ scsc_service_register_observer(NULL, "udi");
+#endif
+
+ SLSI_DBG1_NODEV(SLSI_UDI, "Client:%d added\n", indx);
+
+ return 0;
+}
+
+static int slsi_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct slsi_cdev_client *client = (void *)filp->private_data;
+ struct slsi_cdev *uf_cdev;
+ int indx;
+ int minor;
+
+ minor = iminor(inode);
+ if (minor > MAX_MINOR) {
+ SLSI_ERR_NODEV("minor %d exceeds range\n", minor);
+ return -EINVAL;
+ }
+
+ uf_cdev = uf_cdevs[minor];
+ if (!uf_cdev) {
+ SLSI_ERR_NODEV("no cdev instance for minor %d\n", minor);
+ return -EINVAL;
+ }
+
+ if (!client)
+ return -EINVAL;
+
+ for (indx = 0; indx < NUM_CHAR_CLIENTS; indx++)
+ if (uf_cdev->client[indx] == client)
+ break;
+ if (indx >= NUM_CHAR_CLIENTS) {
+ SLSI_ERR_NODEV("client not found in list\n");
+ return -EINVAL;
+ }
+
+ if (waitqueue_active(&client->log_wq))
+ wake_up_interruptible(&client->log_wq);
+
+ if (client->log_enabled)
+ slsi_log_client_unregister(client->ufcdev->sdev, client);
+
+ slsi_skb_queue_purge(&client->log_list);
+
+ slsi_fw_test_deinit(uf_cdev->sdev, &client->fw_test);
+ uf_cdev->client[indx] = NULL;
+
+ /* free other resource */
+ kfree(client);
+ slsi_procfs_dec_node();
+
+#ifdef CONFIG_SCSC_MXLOGGER
+ scsc_service_unregister_observer(NULL, "udi");
+#endif
+
+ SLSI_DBG1_NODEV(SLSI_UDI, "Client:%d removed\n", indx);
+
+ return 0;
+}
+
+static ssize_t slsi_cdev_read(struct file *filp, char *p, size_t len, loff_t *poff)
+{
+ struct slsi_cdev_client *client = (void *)filp->private_data;
+ struct slsi_dev *sdev;
+ int msglen;
+ struct sk_buff *skb;
+
+ SLSI_UNUSED_PARAMETER(poff);
+
+ if (!client)
+ return -EINVAL;
+
+ if (!skb_queue_len(&client->log_list)) {
+ if (filp->f_flags & O_NONBLOCK)
+ return 0;
+
+ /* wait until getting a signal */
+ if (wait_event_interruptible(client->log_wq, skb_queue_len(&client->log_list))) {
+ SLSI_ERR_NODEV("slsi_cdev_read: wait_event_interruptible failed.\n");
+ return -ERESTARTSYS;
+ }
+ }
+
+ sdev = client->ufcdev->sdev;
+ if (!sdev) {
+ SLSI_ERR_NODEV("sdev not set\n");
+ return -EINVAL;
+ }
+
+ skb = slsi_skb_dequeue(&client->log_list);
+ if (!skb) {
+ SLSI_ERR(sdev, "No Data\n");
+ return -EINVAL;
+ }
+
+ slsi_fw_test_signal_with_udi_header(sdev, &client->fw_test, skb);
+
+ msglen = skb->len;
+ if (msglen > (s32)len) {
+ SLSI_WARN(sdev, "truncated read to %d actual msg len is %lu\n", msglen, (unsigned long int)len);
+ msglen = len;
+ }
+
+ if (copy_to_user(p, skb->data, msglen)) {
+ SLSI_ERR(sdev, "Failed to copy UDI log to user\n");
+ slsi_kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ slsi_kfree_skb(skb);
+ return msglen;
+}
+
+static ssize_t slsi_cdev_write(struct file *filp, const char *p, size_t len, loff_t *poff)
+{
+ struct slsi_cdev_client *client;
+ struct slsi_dev *sdev;
+ struct sk_buff *skb;
+ u8 *data;
+ struct slsi_skb_cb *cb;
+
+ SLSI_UNUSED_PARAMETER(poff);
+
+ client = (void *)filp->private_data;
+ if (!client) {
+ SLSI_ERR_NODEV("filep private data not set\n");
+ return -EINVAL;
+ }
+
+ if (!client->ufcdev) {
+ SLSI_ERR_NODEV("ufcdev not set\n");
+ return -EINVAL;
+ }
+
+ sdev = client->ufcdev->sdev;
+ if (!sdev) {
+ SLSI_ERR_NODEV("sdev not set\n");
+ return -EINVAL;
+ }
+ skb = slsi_alloc_skb_headroom(len, GFP_KERNEL);
+ data = skb_put(skb, len);
+ if (copy_from_user(data, p, len)) {
+ SLSI_ERR(sdev, "copy from user failed\n");
+ slsi_kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ cb = slsi_skb_cb_init(skb);
+ cb->sig_length = fapi_get_expected_size(skb);
+ cb->data_length = skb->len;
+ /* colour is defined as: */
+ /* u16 register bits:
+ * 0 - do not use
+ * [2:1] - vif
+ * [7:3] - peer_index
+ * [10:8] - ac queue
+ */
+ if (fapi_is_ma(skb))
+ cb->colour = (slsi_frame_priority_to_ac_queue(skb->priority) << 8) |
+ (fapi_get_u16(skb, u.ma_unitdata_req.peer_index) << 3) | (fapi_get_u16(skb, u.ma_unitdata_req.vif) << 1);
+
+ /* F/w will panic if fw_reference is not zero. */
+ fapi_set_u32(skb, fw_reference, 0);
+ /* set mac header uses values from above initialized cb */
+ skb_set_mac_header(skb, fapi_get_data(skb) - skb->data);
+
+ SLSI_DBG3_NODEV(SLSI_UDI,
+ "UDI Signal:%.4X SigLEN:%d DataLen:%d SKBHeadroom:%d bytes:%d\n",
+ fapi_get_sigid(skb), fapi_get_siglen(skb),
+ fapi_get_datalen(skb), skb_headroom(skb), (int)len);
+
+ /* In WlanLite test mode req signals IDs are 0x1000, 0x1002, 0x1004 */
+ if (slsi_is_test_mode_enabled() || fapi_is_req(skb) || fapi_is_res(skb)) {
+ /* Use the range of PIDs allocated to the udi clients */
+ client->tx_sender_id++;
+ if (client->tx_sender_id > SLSI_TX_PROCESS_ID_UDI_MAX)
+ client->tx_sender_id = SLSI_TX_PROCESS_ID_UDI_MIN;
+
+ fapi_set_u16(skb, sender_pid, client->tx_sender_id);
+ if (!slsi_is_test_mode_enabled())
+ slsi_fw_test_signal(sdev, &client->fw_test, skb);
+ if (fapi_is_ma(skb)) {
+ if (slsi_tx_data_lower(sdev, skb)) {
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ } else if (slsi_tx_control(sdev, NULL, skb)) {
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+ } else if (slsi_hip_rx(sdev, skb)) {
+ slsi_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static long slsi_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct slsi_cdev_client *client = (void *)filp->private_data;
+ struct slsi_dev *sdev;
+ long r = 0;
+ int int_param;
+ u32 mib_data_length; /* Length of valid Mib data in the buffer */
+ u32 mib_data_size; /* Size of the mib buffer */
+ unsigned char *mib_data; /* Mib Input/Output Buffer */
+ u16 mib_vif;
+
+ if (!client || !client->ufcdev)
+ return -EINVAL;
+ sdev = client->ufcdev->sdev;
+
+ slsi_wakelock(&sdev->wlan_wl);
+
+ switch (cmd) {
+ case UNIFI_GET_UDI_ENABLE:
+ int_param = client->log_enabled;
+ put_user(int_param, (int *)arg);
+ break;
+
+ case UNIFI_SET_UDI_ENABLE:
+ if (get_user(int_param, (int *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+
+ if (int_param) {
+ slsi_log_client_register(sdev, client, udi_log_event, NULL, 0, 0);
+ client->log_enabled = 1;
+ if (int_param > 1)
+ client->log_allow_driver_signals = 1;
+ } else {
+ slsi_log_client_unregister(sdev, client);
+ client->log_enabled = 0;
+ }
+
+ break;
+
+ case UNIFI_SET_UDI_LOG_CONFIG:
+ {
+ struct unifiio_udi_config_t config;
+
+ if (copy_from_user(&config, (void *)arg, sizeof(config))) {
+ SLSI_ERR(sdev, "UNIFI_SET_UDI_LOG_CONFIG: Failed to copy from userspace\n");
+ r = -EFAULT;
+ break;
+ }
+
+ client->ma_unitdata_size_limit = config.ma_unitdata_size_limit;
+ break;
+ }
+ case UNIFI_SET_UDI_LOG_MASK:
+ {
+ struct unifiio_filter_t filter;
+ int i;
+
+ /* to minimise load on data path, list is converted here to array indexed by signal number */
+ if (copy_from_user(&filter, (void *)arg, sizeof(filter))) {
+ SLSI_ERR(sdev, "UNIFI_SET_UDI_LOG_MASK: Failed to copy from userspace\n");
+ r = -EFAULT;
+ break;
+ }
+
+ if (unlikely(filter.signal_ids_n > UDI_LOG_MASK_FILTER_NUM_MAX)) {
+ SLSI_ERR(sdev, "UNIFI_SET_UDI_LOG_MASK: number of filters too long\n");
+ r = -EFAULT;
+ break;
+ }
+
+ if (filter.signal_ids_n) {
+ char *signal_filter_index;
+ int max;
+ int min;
+
+ max = filter.signal_ids[0];
+ min = filter.signal_ids[0];
+
+ /* find maximum and minimum signal id in filter */
+ for (i = 0; i < filter.signal_ids_n; i++) {
+ if (filter.signal_ids[i] & UDI_MA_UNITDATA_FILTER_ALLOW_MASK) {
+ client->ma_unitdata_filter_config |= filter.signal_ids[i];
+ continue;
+ }
+ if (filter.signal_ids[i] > max)
+ max = filter.signal_ids[i];
+ else if (filter.signal_ids[i] < min)
+ min = filter.signal_ids[i];
+ }
+ /* and create array only big enough to index the range of signal id specified */
+ signal_filter_index = kmalloc(max - min + 1, GFP_KERNEL);
+ if (signal_filter_index) {
+ memset(signal_filter_index, 0, max - min + 1);
+ for (i = 0; i < filter.signal_ids_n; i++) {
+ if (filter.signal_ids[i] & UDI_MA_UNITDATA_FILTER_ALLOW_MASK)
+ continue;
+ signal_filter_index[filter.signal_ids[i] - min] = 1;
+ }
+ slsi_log_client_unregister(sdev, client);
+ slsi_log_client_register(sdev, client,
+ filter.log_listed_flag ? send_signal_to_inverse_log_filter :
+ send_signal_to_log_filter, signal_filter_index, min, max);
+ } else {
+ r = -ENOMEM;
+ }
+ }
+ break;
+ }
+ case UNIFI_SET_MIB:
+ {
+ struct net_device *dev = NULL;
+
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Device not yet available\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* First 2 Bytes are the VIF */
+ if (copy_from_user((void *)&mib_vif, (void *)arg, 2)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed to copy in vif\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* First 4 Bytes are the Number of Bytes of input Data */
+ if (copy_from_user((void *)&mib_data_length, (void *)(arg + 2), 4)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed to copy in mib_data_length\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* Second 4 Bytes are the size of the Buffer */
+ if (copy_from_user((void *)&mib_data_size, (void *)(arg + 6), 4)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed to copy in mib_data_size\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* check if length is valid */
+ if (unlikely(mib_data_length > UDI_MIB_SET_LEN_MAX || mib_data_size > UDI_MIB_SET_LEN_MAX)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: size too long (mib_data_length:%u mib_data_size:%u)\n", mib_data_length, mib_data_size);
+ r = -EFAULT;
+ break;
+ }
+
+ mib_data = kmalloc(mib_data_size, GFP_KERNEL);
+
+ /* Read the rest of the Mib Data */
+ if (copy_from_user((void *)mib_data, (void *)(arg + 10), mib_data_length)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed to copy in mib_data\n");
+ kfree(mib_data);
+ r = -EFAULT;
+ break;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ dev = slsi_get_netdev_locked(sdev, mib_vif);
+ if (mib_vif != 0 && !dev) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed - net_device is NULL for interface = %d\n", mib_vif);
+ kfree(mib_data);
+ r = -EFAULT;
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ break;
+ }
+
+ r = slsi_mlme_set(sdev, dev, mib_data, mib_data_length);
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ kfree(mib_data);
+ break;
+ }
+ case UNIFI_GET_MIB:
+ {
+ struct net_device *dev = NULL;
+
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Device not yet available\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* First 2 Bytes are the VIF */
+ if (copy_from_user((void *)&mib_vif, (void *)arg, 2)) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed to copy in vif\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* First 4 Bytes are the Number of Bytes of input Data */
+ if (copy_from_user((void *)&mib_data_length, (void *)(arg + 2), 4)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Failed to copy in mib_data_length\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* Second 4 Bytes are the size of the Buffer */
+ if (copy_from_user((void *)&mib_data_size, (void *)(arg + 6), 4)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Failed to copy in mib_data_size\n");
+ r = -EFAULT;
+ break;
+ }
+
+ /* check if length is valid */
+ if (unlikely(mib_data_length > UDI_MIB_GET_LEN_MAX || mib_data_size > UDI_MIB_GET_LEN_MAX)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: size too long (mib_data_length:%u mib_data_size:%u)\n", mib_data_length, mib_data_size);
+ r = -EFAULT;
+ break;
+ }
+
+ mib_data = kmalloc(mib_data_size, GFP_KERNEL);
+
+ /* Read the rest of the Mib Data */
+ if (copy_from_user((void *)mib_data, (void *)(arg + 10), mib_data_length)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Failed to copy in mib_data\n");
+ kfree(mib_data);
+ r = -EFAULT;
+ break;
+ }
+
+ SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
+ dev = slsi_get_netdev_locked(sdev, mib_vif);
+ if (mib_vif != 0 && !dev) {
+ SLSI_ERR(sdev, "UNIFI_SET_MIB: Failed - net_device is NULL for interface = %d\n", mib_vif);
+ kfree(mib_data);
+ r = -EFAULT;
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ break;
+ }
+ if (slsi_mlme_get(sdev, dev, mib_data, mib_data_length, mib_data, mib_data_size, &mib_data_length)) {
+ kfree(mib_data);
+ r = -EINVAL;
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+ break;
+ }
+
+ SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
+
+ /* Check the buffer is big enough */
+ if (mib_data_length > mib_data_size) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Mib result data is to long. (%d bytes when the max is %d bytes)\n", mib_data_length, mib_data_size);
+ kfree(mib_data);
+ r = -EINVAL;
+ break;
+ }
+
+ /* Copy back the number of Bytes in the Mib result */
+ if (copy_to_user((void *)arg, (void *)&mib_data_length, 4)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Failed to copy in mib_data_length back to user\n");
+ kfree(mib_data);
+ r = -EINVAL;
+ break;
+ }
+
+ /* Copy back the Mib data */
+ if (copy_to_user((void *)(arg + 4), mib_data, mib_data_length)) {
+ SLSI_ERR(sdev, "UNIFI_GET_MIB: Failed to copy in mib_data back to user\n");
+ kfree(mib_data);
+ r = -EINVAL;
+ break;
+ }
+ kfree(mib_data);
+ break;
+ }
+ case UNIFI_SRC_SINK_IOCTL:
+ if (sdev->device_state != SLSI_DEVICE_STATE_STARTED) {
+ SLSI_ERR(sdev, "UNIFI_SRC_SINK_IOCTL: Device not yet available\n");
+ r = -EFAULT;
+ break;
+ }
+ r = slsi_src_sink_cdev_ioctl_cfg(sdev, arg);
+ break;
+
+ case UNIFI_SOFTMAC_CFG:
+ {
+ u32 softmac_cmd;
+ u8 cmd_param_size;
+
+ SLSI_ERR(sdev, "UNIFI_SOFTMAC_CFG\n");
+
+ if (copy_from_user((void *)&softmac_cmd, (void *)arg, 4)) {
+ SLSI_ERR(sdev, "Failed to get the command\n");
+ r = -EFAULT;
+ break;
+ }
+ SLSI_DBG3_NODEV(SLSI_UDI, "softmac_cmd -> %u\n", softmac_cmd);
+
+ arg += sizeof(softmac_cmd); /* Advance past the command bit */
+ if (copy_from_user((void *)&cmd_param_size, (void *)(arg + 4), 1)) {
+ SLSI_ERR(sdev, "Failed to get the command size\n");
+ r = -EFAULT;
+ break;
+ }
+ SLSI_DBG3_NODEV(SLSI_UDI, "cmd_param_size -> %u\n", cmd_param_size);
+
+ if (cmd_param_size)
+ client->ma_unitdata_filter_config = UDI_MA_UNITDATA_FILTER_ALLOW_EAPOL_ID;
+ else
+ client->ma_unitdata_filter_config = 0;
+ break;
+ }
+ default:
+ SLSI_WARN(sdev, "Operation (%d) not supported\n", cmd);
+ r = -EINVAL;
+ }
+
+ slsi_wakeunlock(&sdev->wlan_wl);
+ return r;
+}
+
+static unsigned int slsi_cdev_poll(struct file *filp, poll_table *wait)
+{
+ struct slsi_cdev_client *client = (void *)filp->private_data;
+
+ SLSI_DBG4_NODEV(SLSI_UDI, "Poll(%d)\n", skb_queue_len(&client->log_list));
+
+ if (skb_queue_len(&client->log_list))
+ return POLLIN | POLLRDNORM; /* readable */
+
+ poll_wait(filp, &client->log_wq, wait);
+
+ if (skb_queue_len(&client->log_list))
+ return POLLIN | POLLRDNORM; /* readable */
+
+ return 0;
+}
+
+/* we know for sure that there is a filter present in log_client->signal_filter if this function is called.
+ * we know this because it is called only through a function pointer that is assigned
+ * only when a filter is also set up in the log_client
+ */
+static int send_signal_to_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ int ret = 0;
+ u16 signal_id = fapi_get_sigid(skb);
+
+ if (signal_id > log_client->max_signal_id || signal_id < log_client->min_signal_id || !log_client->signal_filter[signal_id - log_client->min_signal_id])
+ ret = udi_log_event(log_client, skb, dir);
+
+ return ret;
+}
+
+static int send_signal_to_inverse_log_filter(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ int ret = 0;
+ u16 signal_id = fapi_get_sigid(skb);
+
+ if (signal_id <= log_client->max_signal_id && signal_id >= log_client->min_signal_id && log_client->signal_filter[signal_id - log_client->min_signal_id])
+ ret = udi_log_event(log_client, skb, dir);
+
+ return ret;
+}
+
+static bool is_allowed_ip_frame(struct ethhdr *ehdr, u16 signal_id)
+{
+ u8 *ip_frame = ((u8 *)ehdr) + sizeof(struct ethhdr);
+ u8 *ip_data;
+ u16 ip_data_offset = 20;
+ /*u8 version = ip_frame[0] >> 4; */
+ u8 hlen = ip_frame[0] & 0x0F;
+ /*u8 tos = ip_frame[1]; */
+ /*u16 len = ip_frame[2] << 8 | frame[3]; */
+ /*u16 id = ip_frame[4] << 8 | frame[5]; */
+ /*u16 flags_foff = ip_frame[6] << 8 | frame[7]; */
+ /*u8 ttl = ip_frame[8]; */
+ u8 ip_proto = ip_frame[9];
+
+ /*u16 cksum = ip_frame[10] << 8 | frame[11]; */
+ /*u8 *src_ip = &ip_frame[12];*/
+ /*u8 *dest_ip = &ip_frame[16];*/
+
+ SLSI_UNUSED_PARAMETER(signal_id);
+
+ if (hlen > 5)
+ ip_data_offset += (hlen - 5) * 4;
+
+ ip_data = ip_frame + ip_data_offset;
+
+ switch (ip_proto) {
+ case SLSI_IP_TYPE_UDP:
+ {
+ u16 srcport = ip_data[0] << 8 | ip_data[1];
+ u16 dstport = ip_data[2] << 8 | ip_data[3];
+
+ SLSI_DBG3_NODEV(SLSI_UDI, "FILTER(0x%.4X) Key -> Proto(0x%.4X) -> IpProto(%d) ->UDP(s:%d, d:%d)\n", signal_id, ntohs(ehdr->h_proto), ip_proto, srcport, dstport);
+ if (srcport == SLSI_DHCP_CLIENT_PORT || srcport == SLSI_DHCP_SERVER_PORT ||
+ dstport == SLSI_DHCP_CLIENT_PORT || dstport == SLSI_DHCP_SERVER_PORT) {
+ SLSI_DBG3_NODEV(SLSI_UDI, "FILTER(0x%.4X) Key -> Proto(0x%.4X) -> IpProto(%d) ->UDP(s:%d, d:%d) ALLOW\n", signal_id, ntohs(ehdr->h_proto), ip_proto, srcport, dstport);
+ return true;
+ }
+ }
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int udi_log_event(struct slsi_log_client *log_client, struct sk_buff *skb, int dir)
+{
+ struct slsi_cdev_client *client = log_client->log_client_ctx;
+ struct udi_msg_t msg;
+ struct udi_msg_t *msg_skb;
+ u16 signal_id = fapi_get_sigid(skb);
+
+ if (WARN_ON(!client))
+ return -EINVAL;
+ if (WARN_ON(!skb))
+ return -EINVAL;
+ if (WARN_ON(skb->len == 0))
+ return -EINVAL;
+
+ /* Special Filtering of MaPacket frames */
+ if (slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_MASK) &&
+ (signal_id == MA_UNITDATA_REQ || signal_id == MA_UNITDATA_IND)) {
+ u16 frametype;
+
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X)\n", signal_id);
+ if (signal_id == MA_UNITDATA_REQ)
+ frametype = fapi_get_u16(skb, u.ma_unitdata_req.data_unit_descriptor);
+ else
+ frametype = fapi_get_u16(skb, u.ma_unitdata_ind.data_unit_descriptor);
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) frametype:%d\n", signal_id, frametype);
+
+ if (frametype == FAPI_DATAUNITDESCRIPTOR_IEEE802_3_FRAME) {
+ struct ethhdr *ehdr = (struct ethhdr *)fapi_get_data(skb);
+
+ if (signal_id == MA_UNITDATA_REQ)
+ ehdr = (struct ethhdr *)fapi_get_data(skb);
+
+ if (slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_EAPOL_ID) ||
+ slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_KEY_ID)) {
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) Eap -> Proto(0x%.4X)\n", signal_id, ntohs(ehdr->h_proto));
+ switch (ntohs(ehdr->h_proto)) {
+ case ETH_P_PAE:
+ case ETH_P_WAI:
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) Eap -> Proto(0x%.4X) ALLOW\n", signal_id, ntohs(ehdr->h_proto));
+ goto allow_frame;
+ default:
+ break;
+ }
+ }
+
+ if (slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_KEY_ID)) {
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) Key -> Proto(0x%.4X)\n", signal_id, ntohs(ehdr->h_proto));
+ switch (ntohs(ehdr->h_proto)) {
+ case ETH_P_ARP:
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) Key -> Proto(0x%.4X) -> Arp ALLOW\n", signal_id, ntohs(ehdr->h_proto));
+ goto allow_frame;
+ case ETH_P_IP:
+ if (is_allowed_ip_frame(ehdr, signal_id))
+ goto allow_frame;
+ default:
+ break;
+ }
+ }
+ }
+ if (frametype == FAPI_DATAUNITDESCRIPTOR_IEEE802_11_FRAME)
+ if (slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_MGT_ID))
+ if (ieee80211_is_mgmt(fapi_get_mgmt(skb)->frame_control))
+ goto allow_frame;
+
+ SLSI_DBG4_NODEV(SLSI_UDI, "FILTER(0x%.4X) DROP\n", signal_id);
+
+ if (down_interruptible(&client->log_mutex)) {
+ SLSI_WARN_NODEV("Failed to get udi sem\n");
+ return -ERESTARTSYS;
+ }
+ if (client->log_drop_data_packets)
+ client->log_dropped_data++;
+ up(&client->log_mutex);
+ return -ECANCELED;
+ }
+
+ /* Special Filtering of MaPacketCfm.
+ * Only log ma_packet_cfm if the tx status != Success
+ */
+ if (signal_id == MA_UNITDATA_CFM && slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_CFM_ERROR_ID))
+ if (fapi_get_u16(skb, u.ma_unitdata_cfm.transmission_status) == FAPI_TRANSMISSIONSTATUS_SUCCESSFUL)
+ return -ECANCELED;
+
+ /* Exception for driver configuration frames.
+ * these frames must be sent irrespective of number of frames
+ * in queue.
+ */
+ if (dir == UDI_CONFIG_IND)
+ goto allow_config_frame;
+
+allow_frame:
+ if (down_interruptible(&client->log_mutex)) {
+ SLSI_WARN_NODEV("Failed to get udi sem\n");
+ return -ERESTARTSYS;
+ }
+
+ /* Handle hitting the UDI_MAX_QUEUED_FRAMES Limit */
+ if (client->log_dropped) {
+ if (skb_queue_len(&client->log_list) <= UDI_RESTART_QUEUED_FRAMES) {
+ u32 dropped = client->log_dropped;
+
+ SLSI_WARN_NODEV("Stop Dropping UDI Frames : %d frames Dropped\n", dropped);
+ client->log_dropped = 0;
+ up(&client->log_mutex);
+ slsi_kernel_to_user_space_event(log_client, UDI_DRV_DROPPED_FRAMES, sizeof(u32), (u8 *)&dropped);
+ return -ECANCELED;
+ }
+ client->log_dropped++;
+ up(&client->log_mutex);
+ return -ECANCELED;
+ } else if (!client->log_dropped && skb_queue_len(&client->log_list) >= UDI_MAX_QUEUED_FRAMES) {
+ SLSI_WARN_NODEV("Start Dropping UDI Frames\n");
+ client->log_dropped++;
+ up(&client->log_mutex);
+ return -ECANCELED;
+ }
+
+ /* Handle hitting the UDI_MAX_QUEUED_DATA_FRAMES Limit
+ * Turn ON the MA_PACKET Filters before we get near the absolute limit of UDI_MAX_QUEUED_FRAMES
+ * This should allow key frames (mgt, dhcp and eapol etc) to still be in the logs but stop the logging general data frames.
+ * This occurs when the Transfer rate is higher than we can take the frames out of the UDI list.
+ */
+ if (client->log_drop_data_packets && skb_queue_len(&client->log_list) < UDI_RESTART_QUEUED_DATA_FRAMES) {
+ u32 dropped = client->log_dropped_data;
+
+ SLSI_WARN_NODEV("Stop Dropping UDI Frames : %d Basic Data frames Dropped\n", client->log_dropped_data);
+ client->log_drop_data_packets = false;
+ client->ma_unitdata_filter_config = 0;
+ client->log_dropped_data = 0;
+ up(&client->log_mutex);
+ slsi_kernel_to_user_space_event(log_client, UDI_DRV_DROPPED_DATA_FRAMES, sizeof(u32), (u8 *)&dropped);
+ return -ECANCELED;
+ } else if (!client->log_drop_data_packets && skb_queue_len(&client->log_list) >= UDI_MAX_QUEUED_DATA_FRAMES && !slsi_cdev_unitdata_filter_allow(client, UDI_MA_UNITDATA_FILTER_ALLOW_MASK)) {
+ SLSI_WARN_NODEV("Start Dropping UDI Basic Data Frames\n");
+ client->log_drop_data_packets = true;
+ client->ma_unitdata_filter_config = UDI_MA_UNITDATA_FILTER_ALLOW_MGT_ID |
+ UDI_MA_UNITDATA_FILTER_ALLOW_KEY_ID |
+ UDI_MA_UNITDATA_FILTER_ALLOW_CFM_ERROR_ID |
+ UDI_MA_UNITDATA_FILTER_ALLOW_EAPOL_ID;
+ }
+ up(&client->log_mutex);
+
+allow_config_frame:
+ if ((signal_id == MA_UNITDATA_REQ || signal_id == MA_UNITDATA_IND) &&
+ (client->ma_unitdata_size_limit) && (skb->len > client->ma_unitdata_size_limit)) {
+ struct slsi_skb_cb *cb;
+ struct sk_buff *skb2 = alloc_skb(sizeof(msg) + client->ma_unitdata_size_limit, GFP_ATOMIC);
+
+ if (WARN_ON(!skb2))
+ return -ENOMEM;
+
+ skb_reserve(skb2, sizeof(msg));
+ cb = slsi_skb_cb_init(skb2);
+ cb->sig_length = fapi_get_siglen(skb);
+ cb->data_length = client->ma_unitdata_size_limit;
+ skb_copy_bits(skb, 0, skb_put(skb2, client->ma_unitdata_size_limit), client->ma_unitdata_size_limit);
+ skb = skb2;
+ } else {
+ skb = slsi_skb_copy_expand(skb, sizeof(msg), 0, GFP_ATOMIC);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+ }
+
+ msg.length = sizeof(msg) + skb->len;
+ msg.timestamp = ktime_to_ms(ktime_get());
+ msg.direction = dir;
+ msg.signal_length = fapi_get_siglen(skb);
+
+ msg_skb = (struct udi_msg_t *)skb_push(skb, sizeof(msg));
+ *msg_skb = msg;
+
+ slsi_skb_queue_tail(&client->log_list, skb);
+
+ /* Wake any waiting user process */
+ wake_up_interruptible(&client->log_wq);
+
+ return 0;
+}
+
+#define UF_DEVICE_CREATE(_class, _parent, _devno, _priv, _fmt, _args) \
+ device_create(_class, _parent, _devno, _priv, _fmt, _args)
+
+static const struct file_operations slsi_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = slsi_cdev_open,
+ .release = slsi_cdev_release,
+ .read = slsi_cdev_read,
+ .write = slsi_cdev_write,
+ .unlocked_ioctl = slsi_cdev_ioctl,
+ .compat_ioctl = slsi_cdev_ioctl,
+ .poll = slsi_cdev_poll,
+};
+
+#define UF_DEVICE_CREATE(_class, _parent, _devno, _priv, _fmt, _args) \
+ device_create(_class, _parent, _devno, _priv, _fmt, _args)
+
+#ifndef SLSI_TEST_DEV
+static int slsi_get_minor(void)
+{
+ int minor;
+
+ for (minor = 0; minor < SLSI_UDI_MINOR_NODES; minor++)
+ if (!uf_cdevs[minor])
+ return minor;
+ return -1;
+}
+#endif
+
+static int slsi_cdev_create(struct slsi_dev *sdev, struct device *parent)
+{
+ dev_t devno;
+ int ret;
+ struct slsi_cdev *pdev;
+ int minor;
+
+ SLSI_DBG3_NODEV(SLSI_UDI, "\n");
+#ifdef SLSI_TEST_DEV
+ {
+ /* Use the same minor as the unittesthip char device so the number match */
+ struct slsi_test_dev *uftestdev = (struct slsi_test_dev *)sdev->maxwell_core;
+
+ minor = uftestdev->device_minor_number;
+ if (uf_cdevs[minor])
+ return -EINVAL;
+ }
+#else
+ minor = slsi_get_minor();
+#endif
+ if (minor < 0) {
+ SLSI_ERR(sdev, "no minor numbers available\n");
+ return -ENOMEM;
+ }
+
+ pdev = kmalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+ memset(pdev, 0, sizeof(*pdev));
+
+ cdev_init(&pdev->cdev, &slsi_cdev_fops);
+ pdev->cdev.owner = THIS_MODULE;
+ pdev->minor = minor;
+ devno = MKDEV(MAJOR(major_number), minor);
+ ret = cdev_add(&pdev->cdev, devno, 1);
+ if (ret) {
+ SLSI_ERR(sdev, "cdev_add failed with %d for minor %d\n", ret, minor);
+ kfree(pdev);
+ return ret;
+ }
+
+ pdev->sdev = sdev;
+ pdev->parent = parent;
+ if (!UF_DEVICE_CREATE(class, pdev->parent, devno, pdev, UDI_CHAR_DEVICE_NAME "%d", minor)) {
+ cdev_del(&pdev->cdev);
+ kfree(pdev);
+ return -EINVAL;
+ }
+ sdev->uf_cdev = (void *)pdev;
+ sdev->procfs_instance = minor;
+ uf_cdevs[minor] = pdev;
+
+ return 0;
+}
+
+static void slsi_cdev_destroy(struct slsi_dev *sdev)
+{
+ struct slsi_cdev *pdev = (struct slsi_cdev *)sdev->uf_cdev;
+ struct kobject *kobj;
+ struct kref *kref;
+
+ if (!pdev)
+ return;
+
+ SLSI_DBG1(sdev, SLSI_UDI, "\n");
+ while (slsi_check_cdev_refs()) {
+ SLSI_ERR(sdev, "UDI Client still attached. Please Terminate!\n");
+ msleep(1000);
+ }
+
+ /* There exist a possibility of race such that the
+ *
+ * - file operation release callback (slsi_cdev_release) is called
+ * - the cdev client structure is freed
+ * - the context is pre-empted and this context (slsi_cdev_destroy) is executed
+ * - slsi_cdev_destroy deletes cdev and hence the kobject embedded inside cdev
+ * and returns
+ * - the release context again executes and operates on a non-existent kobject
+ * leading to kernel Panic
+ *
+ * Ideally the kernel should protect against such race. But it is not!
+ * So we check here that the file operation release callback is complete by
+ * checking the refcount in the kobject embedded in cdev structure.
+ * The refcount is initialized to 1; so anything more than that means
+ * there exists attached clients.
+ */
+
+ kobj = &pdev->cdev.kobj;
+ kref = &kobj->kref;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ while (refcount_read(&kref->refcount) > 1) {
+ SLSI_WARN(sdev, "UDI client File op release not completed yet! (count=%d)\n", refcount_read(&kref->refcount));
+ msleep(50);
+ }
+#else
+ while (atomic_read(&kref->refcount) > 1) {
+ SLSI_WARN(sdev, "UDI client File op release not completed yet! (count=%d)\n", atomic_read(&kref->refcount));
+ msleep(50);
+ }
+#endif
+ device_destroy(class, pdev->cdev.dev);
+ cdev_del(&pdev->cdev);
+ sdev->uf_cdev = NULL;
+ uf_cdevs[pdev->minor] = NULL;
+ kfree(pdev);
+}
+
+static int udi_initialised;
+
+int slsi_udi_init(void)
+{
+ int ret;
+
+ SLSI_DBG1_NODEV(SLSI_UDI, "\n");
+ memset(uf_cdevs, 0, sizeof(uf_cdevs));
+
+ /* Allocate two device numbers for each device. */
+ ret = alloc_chrdev_region(&major_number, 0, SLSI_UDI_MINOR_NODES, UDI_CLASS_NAME);
+ if (ret) {
+ SLSI_ERR_NODEV("Failed to add alloc dev numbers: %d\n", ret);
+ return ret;
+ }
+
+ /* Create a driver class */
+ class = class_create(THIS_MODULE, UDI_CLASS_NAME);
+ if (IS_ERR(class)) {
+ SLSI_ERR_NODEV("Failed to create driver udi class\n");
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+ major_number = 0;
+ return -EINVAL;
+ }
+
+ udi_initialised = 1;
+
+ return 0;
+}
+
+int slsi_udi_deinit(void)
+{
+ if (!udi_initialised)
+ return -1;
+ SLSI_DBG1_NODEV(SLSI_UDI, "\n");
+ class_destroy(class);
+ unregister_chrdev_region(major_number, SLSI_UDI_MINOR_NODES);
+ udi_initialised = 0;
+ return 0;
+}
+
+int slsi_udi_node_init(struct slsi_dev *sdev, struct device *parent)
+{
+ return slsi_cdev_create(sdev, parent);
+}
+
+int slsi_udi_node_deinit(struct slsi_dev *sdev)
+{
+ slsi_cdev_destroy(sdev);
+ return 0;
+}
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef __SLSI_UDI_H__
+#define __SLSI_UDI_H__
+
+#include "dev.h"
+
+#ifdef SLSI_TEST_DEV
+
+/* Maximum number of nodes supported in UNIT TEST MODE
+ * arbitrarily set to 20, could increase this if needed
+ */
+#define SLSI_UDI_MINOR_NODES 20
+
+#else
+#define SLSI_UDI_MINOR_NODES 2 /* Maximum number of nodes supported. */
+#endif
+
+int slsi_udi_node_init(struct slsi_dev *sdev, struct device *parent);
+int slsi_udi_node_deinit(struct slsi_dev *sdev);
+
+int slsi_udi_init(void);
+int slsi_udi_deinit(void);
+int slsi_kernel_to_user_space_event(struct slsi_log_client *log_client, u16 event, u32 data_length, const u8 *data);
+int slsi_check_cdev_refs(void);
+
+#endif
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2017 Samsung Electronics Co., Ltd and its Licensors.
+ * All rights reserved.
+ *
+ *****************************************************************************/
+
+#ifndef __UNIFIIO_H__
+#define __UNIFIIO_H__
+
+#include <linux/types.h>
+/* The following include of linux/if.h is needed to get IFNAMSIZ.
+ * The conditional include before it is necessary to get the linux/if.h
+ * include to compile...
+ */
+#ifdef __KERNEL__
+#include <linux/socket.h>
+#include <linux/types.h>
+#else
+#include <sys/socket.h>
+#include <stdbool.h>
+#endif
+#include <linux/if.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define UNIFI_GET_UDI_ENABLE _IOR('u', 1, int)
+#define UNIFI_SET_UDI_ENABLE _IOW('u', 2, int)
+/* Values for UDI_ENABLE */
+#define UDI_ENABLE_DATA 0x1
+#define UDI_ENABLE_CONTROL 0x2
+
+/* MIB set/get. Arg is a pointer to a varbind */
+#define UNIFI_GET_MIB _IOWR('u', 3, unsigned char)
+#define UNIFI_SET_MIB _IOW('u', 4, unsigned char)
+#define MAX_MIB_DATA_LENGTH 2048
+
+/* Private IOCTLs */
+#define SIOCIWS80211POWERSAVEPRIV SIOCIWFIRSTPRIV
+#define SIOCIWG80211POWERSAVEPRIV (SIOCIWFIRSTPRIV + 1)
+#define SIOCIWS80211RELOADDEFAULTSPRIV (SIOCIWFIRSTPRIV + 2)
+#define SIOCIWSCONFWAPIPRIV (SIOCIWFIRSTPRIV + 4)
+#define SIOCIWSWAPIKEYPRIV (SIOCIWFIRSTPRIV + 6)
+#define SIOCIWSSMEDEBUGPRIV (SIOCIWFIRSTPRIV + 8)
+#define SIOCIWSAPCFGPRIV (SIOCIWFIRSTPRIV + 10)
+#define SIOCIWSAPSTARTPRIV (SIOCIWFIRSTPRIV + 12)
+#define SIOCIWSAPSTOPPRIV (SIOCIWFIRSTPRIV + 14)
+#define SIOCIWSFWRELOADPRIV (SIOCIWFIRSTPRIV + 16)
+#define SIOCIWSSTACKSTART (SIOCIWFIRSTPRIV + 18)
+#define SIOCIWSSTACKSTOP (SIOCIWFIRSTPRIV + 20)
+
+#define IWPRIV_POWER_SAVE_MAX_STRING 32
+#define IWPRIV_SME_DEBUG_MAX_STRING 32
+#define IWPRIV_SME_MAX_STRING 120
+
+/* Private configuration commands */
+#define UNIFI_CFG _IOWR('u', 5, unsigned char[4])
+/**
+ * <------------------ Read/Write Buffer -------------------->
+ * _____________________________________________________________
+ * | Cmd | Arg | ... Buffer (opt) ... |
+ * -------------------------------------------------------------
+ * <-- uint --><-- uint --><----- unsigned char buffer ------>
+ *
+ * Cmd: A enum unifi_cfg_command command.
+ * Arg: Out:Length if Cmd==UNIFI_CFG_GET
+ * In:PowerOnOff if Cmd==UNIFI_CFG_POWER
+ * In:PowerMode if Cmd==UNIFI_CFG_POWERSAVE
+ * In:Length if Cmd==UNIFI_CFG_FILTER
+ * In:WMM Qos Info if Cmd==UNIFI_CFG_WMM_QOS_INFO
+ * Buffer: Out:Data if Cmd==UNIFI_CFG_GET
+ * NULL if Cmd==UNIFI_CFG_POWER
+ * NULL if Cmd==UNIFI_CFG_POWERSAVE
+ * In:Filters if Cmd==UNIFI_CFG_FILTER
+ *
+ * where Filters is a struct uf_cfg_bcast_packet_filter structure
+ * followed by 0 - n struct tclas structures. The length of the struct tclas
+ * structures is obtained by struct uf_cfg_bcast_packet_filter::tclas_ies_length.
+ */
+
+#define UNIFI_BUILD_TYPE _IOWR('u', 7, unsigned char)
+
+#define UNIFI_BUILD_NME 1
+#define UNIFI_BUILD_WEXT 2
+#define UNIFI_BUILD_AP 3
+
+/* debugging */
+#define UNIFI_SET_DEBUG _IO('u', 0x11)
+#define UNIFI_SET_TRACE _IO('u', 0x12)
+
+#define UNIFI_GET_INIT_STATUS _IOR('u', 0x15, int)
+#define UNIFI_SET_UDI_LOG_CONFIG _IOR('u', 0x16, struct unifiio_udi_config_t)
+#define UNIFI_SET_UDI_LOG_MASK _IOR('u', 0x18, struct unifiio_filter_t)
+
+#define UNIFI_INIT_HW _IOR('u', 0x13, unsigned char)
+#define UNIFI_INIT_NETDEV _IOW('u', 0x14, unsigned char[6])
+#define UNIFI_SME_PRESENT _IOW('u', 0x19, int)
+
+#define UNIFI_CFG_PERIOD_TRAFFIC _IOW('u', 0x21, unsigned char[4])
+#define UNIFI_CFG_UAPSD_TRAFFIC _IOW('u', 0x22, unsigned char)
+
+#define UNIFI_SOFTMAC_CFG _IOWR('u', 0x30, unsigned char[4])
+
+#define UNIFI_NETDEV_CFG _IOWR('u', 0x31, struct unifiio_netdev_cfg_t)
+#define UNIFI_STREAM_CFG _IOWR('u', 0x32, struct unifiio_stream_cfg_t)
+#define UNIFI_AMSDU_CFG _IOWR('u', 0x33, struct unifiio_amsdu_cfg_t)
+#define UNIFI_BA_CFG _IOWR('u', 0x34, struct unifiio_ba_cfg_t)
+#define UNIFI_SUSPEND_RESUME_CFG _IOWR('u', 0x35, struct unifiio_suspend_resume_cfg_t)
+#define UNIFI_SRC_SINK_IOCTL _IOWR('u', 0x41, struct unifiio_src_sink_arg_t)
+
+#define src_sink_action uint16_t
+#define SRC_SINK_ACTION_NONE ((src_sink_action)0x0000)
+#define SRC_SINK_ACTION_ENABLE_BA ((src_sink_action)0x0001)
+#define SRC_SINK_ACTION_SINK_START ((src_sink_action)0x0002)
+#define SRC_SINK_ACTION_SINK_STOP ((src_sink_action)0x0003)
+#define SRC_SINK_ACTION_GEN_START ((src_sink_action)0x0004)
+#define SRC_SINK_ACTION_GEN_STOP ((src_sink_action)0x0005)
+#define SRC_SINK_ACTION_SINK_REPORT ((src_sink_action)0x0006)
+#define SRC_SINK_ACTION_GEN_REPORT ((src_sink_action)0x0007)
+#define SRC_SINK_ACTION_SINK_REPORT_CACHED ((src_sink_action)0x0008)
+#define SRC_SINK_ACTION_GEN_REPORT_CACHED ((src_sink_action)0x0009)
+#define SRC_SINK_ACTION_LOOPBACK_START ((src_sink_action)0x000A)
+#define SRC_SINK_ACTION_LOOPBACK_STOP ((src_sink_action)0x000B)
+
+#define src_sink_direction uint16_t
+#define SRC_SINK_DIRECTION_TX ((src_sink_direction)0x0000)
+#define SRC_SINK_DIRECTION_RX ((src_sink_direction)0x0001)
+
+#define src_sink_endpoint uint16_t
+#define SRC_SINK_ENDPOINT_HOSTIO ((src_sink_endpoint)0x0000)
+#define SRC_SINK_ENDPOINT_MACRAME ((src_sink_endpoint)0x0001)
+
+struct unifiio_src_sink_report {
+ /* total reporting time requested by the user in seconds*/
+ uint32_t time;
+ /* requested report interval in microseconds */
+ uint32_t interval;
+
+ /* timestamp [jiffies in millisec] for each report returned by the driver */
+ uint32_t timestamp;
+ /* actual interval used for the report calculations from the firmware */
+ uint32_t duration;
+ /* number of packets during the above duration */
+ uint32_t count;
+ /* number of octets during the above duration */
+ uint32_t octet;
+ /* throughput in kbps */
+ uint32_t kbps;
+ /* CPU idle ratios */
+ uint16_t idle_ratio;
+ uint16_t interrupt_latency;
+ uint16_t free_kbytes;
+ /* only relevant for SRC a.k.a GEN mode */
+ uint32_t failed_count;
+};
+
+/* fields specific to SRC a.k.a GEN <CONFIG> IOCTLs */
+struct unifiio_src_sink_gen_config {
+ uint16_t size;
+ uint16_t use_streaming;
+ uint16_t pkts_per_intr;
+ uint32_t ipv4_dest;
+};
+
+/* fields specific to SRC/SINK CONFIG IOCTLs */
+struct unifiio_src_sink_config {
+ uint32_t interval;
+ uint16_t pkts_per_int;
+ union {
+ struct unifiio_src_sink_gen_config gen;
+ } u;
+};
+
+/* fields common to <ALL> SRC/SINK IOCTLs */
+struct unifiio_src_sink_common {
+ /* one of SRC_SINK_ACTION_* for driver IOCTL */
+ src_sink_action action;
+ uint16_t vif;
+ src_sink_direction direction;
+ src_sink_endpoint endpoint;
+};
+
+struct unifiio_src_sink_arg_t {
+ /* arg in */
+ struct unifiio_src_sink_common common;
+ union {
+ struct unifiio_src_sink_config config;
+ struct unifiio_src_sink_report report;
+ } u;
+};
+
+/* Structure of data read from the unifi device. */
+struct udi_msg_t {
+ /* Length (in bytes) of entire structure including appended bulk data */
+ int length;
+
+ /* System time (in milliseconds) that signal was transferred */
+ int timestamp;
+
+ /* Direction in which signal was transferred. */
+ int direction;
+#define UDI_FROM_HOST 0
+#define UDI_TO_HOST 1
+#define UDI_CONFIG_IND 2
+
+ /* The length of the signal (in bytes) not including bulk data */
+ int signal_length;
+
+ /* Signal body follows, then any bulk data */
+};
+
+/* Add these to the filter signal_ids to enable partial filtering of the MA_UNITDATA Data
+ * the top bit is set to indicate this is not a HIP Signal ID but a special Filter
+ */
+#define UDI_MA_UNITDATA_FILTER_ALLOW_MASK 0x8000 /* Filter MA_UNITDATA_REQ and MA_UNITDATA_IND */
+#define UDI_MA_UNITDATA_FILTER_ALLOW_MGT_ID 0x8001 /* Filter MA_UNITDATA_REQ and MA_UNITDATA_IND but Log management Frames */
+#define UDI_MA_UNITDATA_FILTER_ALLOW_KEY_ID 0x8002 /* Filter MA_UNITDATA_REQ and MA_UNITDATA_IND but Log Key Data Frames (Arp, Eapol, Dhcp etc) */
+#define UDI_MA_UNITDATA_FILTER_ALLOW_CFM_ERROR_ID 0x8004 /* Filter MA_UNITDATA_CFM but MA_UNITDATA_CFM(error) */
+#define UDI_MA_UNITDATA_FILTER_ALLOW_EAPOL_ID 0x8008 /* Filter MA_UNITDATA_REQ and MA_UNITDATA_IND but Log Eapol data */
+
+/**
+ * Signals used to indicate to user space that the kernel module has been
+ * unloaded. The user space applications can based on this indication determine
+ * if they should unregister from the char device.
+ */
+#define UDI_DRV_SIGNAL_BASE 0xA000
+#define UDI_DRV_UNLOAD_IND (UDI_DRV_SIGNAL_BASE + 1)
+#define UDI_DRV_DROPPED_FRAMES (UDI_DRV_SIGNAL_BASE + 2)
+#define UDI_DRV_DROPPED_DATA_FRAMES (UDI_DRV_SIGNAL_BASE + 3)
+#define UDI_DRV_SUSPEND_IND (UDI_DRV_SIGNAL_BASE + 4)
+#define UDI_DRV_RESUME_IND (UDI_DRV_SIGNAL_BASE + 5)
+
+struct unifiio_udi_config_t {
+ uint16_t ma_unitdata_size_limit; /* if non-zero, the MA_UNITDATA_REQ and MA_UNITDATA_IND are capped by this size */
+};
+
+struct unifiio_filter_t {
+ uint16_t log_listed_flag; /* if non-zero, log listed sigs and ignore others (otherwise vice versa) */
+ uint16_t signal_ids_n; /* Number of elements in signal_ids[] */
+ uint16_t signal_ids[5]; /* list of the signals to log */
+};
+
+#define unifi_cfg_command uint32_t
+#define UNIFI_CFG_GET ((unifi_cfg_command)0)
+#define UNIFI_CFG_POWER ((unifi_cfg_command)1)
+#define UNIFI_CFG_POWERSAVE ((unifi_cfg_command)2)
+#define UNIFI_CFG_FILTER ((unifi_cfg_command)3)
+#define UNIFI_CFG_POWERSUPPLY ((unifi_cfg_command)4)
+#define UNIFI_CFG_WMM_QOSINFO ((unifi_cfg_command)5)
+#define UNIFI_CFG_WMM_ADDTS ((unifi_cfg_command)6)
+#define UNIFI_CFG_WMM_DELTS ((unifi_cfg_command)7)
+#define UNIFI_CFG_STRICT_DRAFT_N ((unifi_cfg_command)8)
+#define UNIFI_CFG_SET_AP_CONFIG ((unifi_cfg_command)9)
+#define UNIFI_CFG_CORE_DUMP ((unifi_cfg_command)10)
+
+#define unifi_cfg_power uint32_t
+#define UNIFI_CFG_POWER_UNSPECIFIED ((unifi_cfg_power)0)
+#define UNIFI_CFG_POWER_OFF ((unifi_cfg_power)1)
+#define UNIFI_CFG_POWER_ON ((unifi_cfg_power)2)
+
+#define unifi_cfg_powersupply uint32_t
+#define UNIFI_CFG_POWERSUPPLY_UNSPECIFIED ((unifi_cfg_powersupply)0)
+#define UNIFI_CFG_POWERSUPPLY_MAINS ((unifi_cfg_powersupply)1)
+#define UNIFI_CFG_POWERSUPPLY_BATTERIES ((unifi_cfg_powersupply)2)
+
+#define unifi_cfg_powersave uint32_t
+#define UNIFI_CFG_POWERSAVE_UNSPECIFIED ((unifi_cfg_powersave)0)
+#define UNIFI_CFG_POWERSAVE_NONE ((unifi_cfg_powersave)1)
+#define UNIFI_CFG_POWERSAVE_FAST ((unifi_cfg_powersave)2)
+#define UNIFI_CFG_POWERSAVE_FULL ((unifi_cfg_powersave)3)
+#define UNIFI_CFG_POWERSAVE_AUTO ((unifi_cfg_powersave)4)
+
+#define unifi_cfg_get uint32_t
+#define UNIFI_CFG_GET_COEX ((unifi_cfg_get)0)
+#define UNIFI_CFG_GET_POWER_MODE ((unifi_cfg_get)1)
+#define UNIFI_CFG_GET_VERSIONS ((unifi_cfg_get)2)
+#define UNIFI_CFG_GET_POWER_SUPPLY ((unifi_cfg_get)3)
+#define UNIFI_CFG_GET_INSTANCE ((unifi_cfg_get)4)
+#define UNIFI_CFG_GET_AP_CONFIG ((unifi_cfg_get)5)
+
+#define UNIFI_CFG_FILTER_NONE 0x0000
+#define UNIFI_CFG_FILTER_DHCP 0x0001
+#define UNIFI_CFG_FILTER_ARP 0x0002
+#define UNIFI_CFG_FILTER_NBNS 0x0004
+#define UNIFI_CFG_FILTER_NBDS 0x0008
+#define UNIFI_CFG_FILTER_CUPS 0x0010
+#define UNIFI_CFG_FILTER_ALL 0xFFFF
+
+#define uf_cfg_packet_filter_type uint32_t
+#define UNIFI_CFG_FILTER_TYPE_NONE ((uf_cfg_packet_filter_type)0)
+#define UNIFI_CFG_FILTER_TYPE_ACTIVE_HOST ((uf_cfg_packet_filter_type)1)
+#define UNIFI_CFG_FILTER_TYPE_SUSPENDED_HOST ((uf_cfg_packet_filter_type)2)
+
+struct uf_cfg_bcast_packet_filter {
+ unsigned long filter_mode; /* as defined by HIP protocol */
+ uf_cfg_packet_filter_type packet_filter_type;
+ unsigned char arp_filter;
+ unsigned char dhcp_filter;
+ unsigned long tclas_ies_length; /* length of tclas_ies in bytes */
+ unsigned char tclas_ies[1]; /* variable length depending on above field */
+};
+
+struct uf_cfg_ap_config {
+ uint8_t phySupportedBitmap;
+ uint8_t channel;
+ uint16_t beaconInterval;
+ uint8_t dtimPeriod;
+ bool wmmEnabled;
+ uint8_t shortSlotTimeEnabled;
+ uint16_t groupkeyTimeout;
+ bool strictGtkRekeyEnabled;
+ uint16_t gmkTimeout;
+ uint16_t responseTimeout;
+ uint8_t retransLimit;
+ uint8_t rxStbc;
+ bool rifsModeAllowed;
+ uint8_t dualCtsProtection;
+ uint8_t ctsProtectionType;
+ uint16_t maxListenInterval;
+};
+
+struct csr_wifi_cfg_coex_info {
+ bool hasTrafficData;
+ uint8_t currentTrafficType;
+ uint16_t currentPeriodMs;
+ uint8_t currentPowerSave;
+ uint16_t currentCoexPeriodMs;
+ uint16_t currentCoexLatencyMs;
+ bool hasBtDevice;
+ uint32_t currentBlackOutDurationUs;
+ uint32_t currentBlackOutPeriodUs;
+ uint8_t currentCoexScheme;
+};
+
+struct tcpip_clsfr {
+ __u8 cls_fr_type;
+ __u8 cls_fr_mask;
+ __u8 version;
+ __u8 source_ip_addr[4];
+ __u8 dest_ip_addr[4];
+ __u16 source_port;
+ __u16 dest_port;
+ __u8 dscp;
+ __u8 protocol;
+ __u8 reserved;
+} __packed;
+#define tcpip_clsfr_t struct tcpip_clsfr
+
+struct tclas {
+ __u8 element_id;
+ __u8 length;
+ __u8 user_priority;
+ tcpip_clsfr_t tcp_ip_cls_fr;
+} __packed;
+
+#define CONFIG_IND_ERROR 0x01
+#define CONFIG_IND_EXIT 0x02
+#define CONFIG_SME_NOT_PRESENT 0x10
+#define CONFIG_SME_PRESENT 0x20
+
+/* This is used by the UNIFI_NETDEV_CFG ioctl which is a Multi-netdev replacement for
+ * UNIFI_INIT_NETDEV. The all netdev are created statically and can be mapped/registered
+ * to a particular vif_index/MAC address/interface mode dynamically using this call.
+ *
+ * Configure operations (operation == 0):
+ *
+ * Attempting to operate on a negative interfaceTag is an attempt to
+ * create a NEW netdevice.A valid vif_index and macaddr must be supplied.
+ * interfaceMode will not be checked. The new interfaceTag and name will be
+ * copied back on success.
+ *
+ * Using an interfaceTag >=0 attempts to reconfigure an existing netdevice. The new
+ * vif_index, macaddr and interfaceMode will be applied if all is valid. Setting a
+ * vif_index of zero causes the given netdevice to be unregistered instead.
+ * The contents of the name[] field will be ignored as input, and the existing
+ * interface name copied back as output.
+ *
+ * Query operations (operation == 1):
+ *
+ * The first identifying field with a potentially valid value will be used to
+ * identify an interface, and if a match is found its values will be copied back
+ * into the structure and returned to the user. The fields are checked in this order:
+ *
+ * interfaceTag (valid if >= 0)
+ * vif_index (valid if non-zero)
+ * name (valid if first byte non-zero)
+ * macaddr (valid if not broadcast)
+ */
+struct unifiio_netdev_cfg_t {
+ uint8_t operation; /* 0 = configure, 1 = query */
+ /* All other fields potentially bidirectional */
+ int interfaceTag; /* <0 => create new, >=0 => reconfigure existing */
+ uint8_t vif_index; /* VIF index to use (0 => remove netdev) */
+ uint8_t interfaceMode; /* Values defined in csr_wifi_router_ctrl_prim.h */
+ unsigned char macaddr[6]; /* MAC address */
+ char name[IFNAMSIZ]; /* Interface name */
+ unsigned char peer_macaddr[6]; /* peer MAC address */
+ uint16_t association_id; /* Associate ID used for the peer */
+};
+
+#define unifiio_stream_cfg_operation_type uint32_t
+#define UNIFI_CFG_STREAM_OPERATION_TYPE_NONE ((unifiio_stream_cfg_operation_type)0)
+#define UNIFI_CFG_STREAM_OPERATION_TYPE_CREATE ((unifiio_stream_cfg_operation_type)1)
+#define UNIFI_CFG_STREAM_OPERATION_TYPE_STOP ((unifiio_stream_cfg_operation_type)2)
+#define UNIFI_CFG_STREAM_OPERATION_TYPE_DELETE ((unifiio_stream_cfg_operation_type)3)
+
+struct unifiio_stream_cfg_t {
+ unifiio_stream_cfg_operation_type operation;
+ int interfaceTag; /* A valid interface tag */
+ unsigned char peer_macaddr[6]; /* Peer STA MAC address */
+ uint16_t tId; /* Traffic identifier user priority */
+ uint16_t handle; /* Handle as indicated in MA-stream.indication */
+ uint16_t size; /* Size as indicated in MA-stream.indication */
+};
+
+#define unifiio_amsdu_cfg_operation_type uint32_t
+#define UNIFI_CFG_AMSDU_OPERATION_TYPE_SET_MAX_SIZE ((unifiio_amsdu_cfg_operation_type)0)
+
+struct unifiio_amsdu_cfg_t {
+ unifiio_amsdu_cfg_operation_type operation;
+ int interfaceTag; /* A valid interface tag */
+ unsigned char peer_macaddr[6]; /* Peer STA MAC address */
+ uint16_t size; /* Maximum A-MSDU length for peer obtained from HT element */
+};
+
+#define unifiio_ba_cfg_operation_type uint32_t
+#define UNIFI_CFG_BA_SESSION_STOP ((unifiio_ba_cfg_operation_type)0)
+#define UNIFI_CFG_BA_SESSION_START ((unifiio_ba_cfg_operation_type)1)
+
+struct unifiio_ba_cfg_t {
+ unifiio_ba_cfg_operation_type operation;
+ int interfaceTag; /* A valid interface tag */
+ unsigned char peer_macaddr[6]; /* Peer STA MAC address */
+ uint16_t tId; /* Traffic identifier */
+ uint8_t role; /* Role - 0: originator, 1 - recipient */
+ uint16_t window_size; /* Window size as negotiated in BA establish */
+ uint16_t ssn; /* Starting sequence number as negotiated in BA establish */
+};
+
+#define unifiio_suspend_resume_cfg_operation_type uint32_t
+#define UNIFI_CFG_SUSPEND ((unifiio_suspend_resume_cfg_operation_type)0)
+#define UNIFI_CFG_RESUME ((unifiio_suspend_resume_cfg_operation_type)1)
+#define UNIFI_CFG_WAIT_FOR_RESUME ((unifiio_suspend_resume_cfg_operation_type)2)
+
+struct unifiio_suspend_resume_cfg_t {
+ unifiio_suspend_resume_cfg_operation_type operation;
+ unsigned timeout_ms; /* used with UNIFI_CFG_WAIT_FOR_RESUME only */
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __UNIFIIO_H__ */
--- /dev/null
+/******************************************************************************
+ *
+ * Copyright (c) 2012 - 2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef SLSI_UTILS_H__
+#define SLSI_UTILS_H__
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <net/cfg80211.h>
+
+#include "netif.h"
+#include "wakelock.h"
+#ifdef CONFIG_SCSC_SMAPPER
+struct slsi_skb_cb {
+ u32 sig_length;
+ u32 data_length;
+ u32 frame_format;
+ u32 colour;
+ bool free_ma_unitdat;
+ struct sk_buff *skb_addr;
+};
+
+static inline struct slsi_skb_cb *slsi_skb_cb_get(struct sk_buff *skb)
+{
+ return (struct slsi_skb_cb *)skb->cb;
+}
+
+static inline struct slsi_skb_cb *slsi_skb_cb_init(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct slsi_skb_cb) > sizeof(skb->cb));
+
+ memset(skb->cb, 0, sizeof(struct slsi_skb_cb));
+ return slsi_skb_cb_get(skb);
+}
+#endif
+
+static inline u32 slsi_convert_tlv_data_to_value(u8 *data, u16 length)
+{
+ u32 value = 0;
+ int i;
+
+ if (length > 4)
+ return 0;
+ for (i = 0; i < length; i++)
+ value |= ((u32)data[i]) << i * 8;
+
+ return value;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#define SLSI_ETHER_COPY(dst, src) ether_addr_copy((dst), (src))
+#define SLSI_ETHER_EQUAL(mac1, mac2) ether_addr_equal((mac1), (mac2))
+#else
+#define SLSI_ETHER_COPY(dst, src) memcpy((dst), (src), ETH_ALEN)
+#define SLSI_ETHER_EQUAL(mac1, mac2) (memcmp((mac1), (mac2), ETH_ALEN) == 0)
+#endif
+
+extern uint slsi_sg_host_align_mask;
+#define SLSI_HIP_FH_SIG_PREAMBLE_LEN 4
+#define SLSI_SKB_GET_ALIGNMENT_OFFSET(skb) (offset_in_page(skb->data + SLSI_NETIF_SKB_HEADROOM - SLSI_HIP_FH_SIG_PREAMBLE_LEN) \
+ & slsi_sg_host_align_mask)
+
+/* Get the Compiler to ignore Unused parameters */
+#define SLSI_UNUSED_PARAMETER(x) ((void)(x))
+
+/* Helper ERROR Macros */
+#define SLSI_ECR(func) \
+ do { \
+ int _err = (func); \
+ if (_err != 0) { \
+ SLSI_ERR_NODEV("e=%d\n", _err); \
+ return _err; \
+ } \
+ } while (0)
+
+#define SLSI_EC(func) \
+ do { \
+ int _err = (func); \
+ if (_err != 0) { \
+ SLSI_ERR_NODEV("e=%d\n", _err); \
+ return; \
+ } \
+ } while (0)
+
+#define SLSI_EC_GOTO(func, err, label) \
+ do { \
+ (err) = func; \
+ if ((err) != 0) { \
+ WARN_ON(1); \
+ SLSI_ERR(sdev, "fail at line:%d\n", __LINE__); \
+ goto label; \
+ } \
+ } while (0)
+
+/*------------------------------------------------------------------*/
+/* Endian conversion */
+/*------------------------------------------------------------------*/
+#define SLSI_BUFF_LE_TO_U16(ptr) (((u16)((u8 *)(ptr))[0]) | ((u16)((u8 *)(ptr))[1]) << 8)
+#define SLSI_U16_TO_BUFF_LE(uint, ptr) \
+ do { \
+ u32 local_uint_tmp = (uint); \
+ ((u8 *)(ptr))[0] = ((u8)((local_uint_tmp & 0x00FF))); \
+ ((u8 *)(ptr))[1] = ((u8)(local_uint_tmp >> 8)); \
+ } while (0)
+
+#define SLSI_U32_TO_BUFF_LE(uint, ptr) ((*(u32 *)ptr) = cpu_to_le32(uint))
+
+#define SLSI_BUFF_LE_TO_U16_P(output, input) \
+ do { \
+ (output) = (u16)((((u16)(input)[1]) << 8) | ((u16)(input)[0])); \
+ (input) += 2; \
+ } while (0)
+
+#define SLSI_BUFF_LE_TO_U32_P(output, input) \
+ do { \
+ (output) = le32_to_cpu(*(u32 *)input); \
+ (input) += 4; \
+ } while (0)
+
+#define SLSI_U16_TO_BUFF_LE_P(output, input) \
+ do { \
+ (output)[0] = ((u8)((input) & 0x00FF)); \
+ (output)[1] = ((u8)((input) >> 8)); \
+ (output) += 2; \
+ } while (0)
+
+#define SLSI_U32_TO_BUFF_LE_P(output, input) \
+ do { \
+ (*(u32 *)output) = cpu_to_le32(input); \
+ (output) += 4; \
+ } while (0)
+
+#ifdef CONFIG_SCSC_WLAN_SKB_TRACKING
+void slsi_dbg_track_skb_init(void);
+void slsi_dbg_track_skb_reset(void);
+void slsi_dbg_track_skb_f(struct sk_buff *skb, gfp_t flags, const char *file, int line);
+bool slsi_dbg_untrack_skb_f(struct sk_buff *skb, const char *file, int line);
+bool slsi_dbg_track_skb_marker_f(struct sk_buff *skb, const char *file, int line);
+#define slsi_dbg_track_skb(skb_, flags_) slsi_dbg_track_skb_f(skb_, flags_, __FILE__, __LINE__)
+#define slsi_dbg_untrack_skb(skb_) slsi_dbg_untrack_skb_f(skb_, __FILE__, __LINE__)
+#define slsi_dbg_track_skb_marker(skb_) slsi_dbg_track_skb_marker_f(skb_, __FILE__, __LINE__)
+void slsi_dbg_track_skb_report(void);
+
+static inline struct sk_buff *slsi_dev_alloc_skb_f(unsigned int length, const char *file, int line)
+{
+ struct sk_buff *skb = dev_alloc_skb(SLSI_NETIF_SKB_HEADROOM + SLSI_NETIF_SKB_TAILROOM + length);
+
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ skb_reserve(skb, SLSI_NETIF_SKB_HEADROOM - SLSI_SKB_GET_ALIGNMENT_OFFSET(skb));
+ slsi_dbg_track_skb_f(skb, GFP_ATOMIC, file, line);
+ }
+ return skb;
+}
+
+static inline struct sk_buff *slsi_alloc_skb_f(unsigned int size, gfp_t priority, const char *file, int line)
+{
+ struct sk_buff *skb = alloc_skb(size, priority);
+
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ slsi_dbg_track_skb_f(skb, priority, file, line);
+ }
+
+ return skb;
+}
+
+static inline struct sk_buff *slsi_alloc_skb_headroom_f(unsigned int size, gfp_t priority, const char *file, int line)
+{
+ struct sk_buff *skb = alloc_skb(SLSI_NETIF_SKB_HEADROOM + SLSI_NETIF_SKB_TAILROOM + size, priority);
+
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ skb_reserve(skb, SLSI_NETIF_SKB_HEADROOM - SLSI_SKB_GET_ALIGNMENT_OFFSET(skb));
+ slsi_dbg_track_skb_f(skb, priority, file, line);
+ }
+ return skb;
+}
+
+static inline void slsi_skb_unlink_f(struct sk_buff *skb, struct sk_buff_head *list, const char *file, int line)
+{
+ skb_unlink(skb, list);
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+}
+
+static inline void slsi_skb_queue_tail_f(struct sk_buff_head *list, struct sk_buff *skb, const char *file, int line)
+{
+ skb_queue_tail(list, skb);
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+}
+
+static inline void slsi_skb_queue_head_f(struct sk_buff_head *list, struct sk_buff *skb, const char *file, int line)
+{
+ skb_queue_head(list, skb);
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+}
+
+static inline struct sk_buff *slsi_skb_dequeue_f(struct sk_buff_head *list, const char *file, int line)
+{
+ struct sk_buff *skb = skb_dequeue(list);
+
+ if (skb)
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+ return skb;
+}
+
+static inline struct sk_buff *slsi_skb_realloc_headroom_f(struct sk_buff *skb, unsigned int headroom, const char *file, int line)
+{
+ skb = skb_realloc_headroom(skb, headroom);
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ slsi_dbg_track_skb_f(skb, GFP_ATOMIC, file, line);
+ }
+ return skb;
+}
+
+static inline struct sk_buff *slsi_skb_copy_f(struct sk_buff *skb, gfp_t priority, const char *file, int line)
+{
+ skb = skb_copy(skb, priority);
+
+ if (skb)
+ slsi_dbg_track_skb_f(skb, priority, file, line);
+ return skb;
+}
+
+static inline struct sk_buff *skb_copy_expand_f(struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority, const char *file, int line)
+{
+ skb = skb_copy_expand(skb, newheadroom, newtailroom, priority);
+
+ if (skb)
+ slsi_dbg_track_skb_f(skb, priority, file, line);
+ return skb;
+}
+
+static inline struct sk_buff *slsi_skb_clone_f(struct sk_buff *skb, gfp_t priority, const char *file, int line)
+{
+ skb = skb_clone(skb, priority);
+
+ if (skb)
+ slsi_dbg_track_skb_f(skb, priority, file, line);
+ return skb;
+}
+
+static inline void slsi_kfree_skb_f(struct sk_buff *skb, const char *file, int line)
+{
+ /* If untrack fails we do not free the SKB
+ * This helps tracking bad pointers and double frees
+ */
+#ifdef CONFIG_SCSC_SMAPPER
+ struct slsi_skb_cb *cb;
+
+ if (!skb)
+ return;
+
+ cb = (struct slsi_skb_cb *)skb->cb;
+
+ if (cb && !cb->free_ma_unitdat && cb->skb_addr && slsi_dbg_untrack_skb_f(cb->skb_addr, file, line)) {
+ kfree_skb(cb->skb_addr);
+ cb->skb_addr = NULL;
+ }
+#endif
+ if (slsi_dbg_untrack_skb_f(skb, file, line))
+ kfree_skb(skb);
+}
+
+#define slsi_dev_alloc_skb(length_) slsi_dev_alloc_skb_f(length_, __FILE__, __LINE__)
+#define slsi_alloc_skb(size_, priority_) slsi_alloc_skb_f(size_, priority_, __FILE__, __LINE__)
+#define slsi_alloc_skb_headroom(size_, priority_) slsi_alloc_skb_headroom_f(size_, priority_, __FILE__, __LINE__)
+#define slsi_skb_realloc_headroom(skb_, headroom_) slsi_skb_realloc_headroom_f(skb_, headroom_, __FILE__, __LINE__)
+#define slsi_skb_copy(skb_, priority_) slsi_skb_copy_f(skb_, priority_, __FILE__, __LINE__)
+#define slsi_skb_copy_expand(skb_, newheadroom_, newtailroom_, priority_) skb_copy_expand_f(skb_, newheadroom_, newtailroom_, priority_, __FILE__, __LINE__)
+#define slsi_skb_clone(skb_, priority_) slsi_skb_clone_f(skb_, priority_, __FILE__, __LINE__)
+#define slsi_kfree_skb(skb_) slsi_kfree_skb_f(skb_, __FILE__, __LINE__)
+#define slsi_skb_unlink(skb_, list_) slsi_skb_unlink_f(skb_, list_, __FILE__, __LINE__)
+#define slsi_skb_queue_tail(list_, skb_) slsi_skb_queue_tail_f(list_, skb_, __FILE__, __LINE__)
+#define slsi_skb_queue_head(list_, skb_) slsi_skb_queue_head_f(list_, skb_, __FILE__, __LINE__)
+#define slsi_skb_dequeue(list_) slsi_skb_dequeue_f(list_, __FILE__, __LINE__)
+
+static inline void slsi_skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL)
+ slsi_kfree_skb(skb);
+}
+
+#else
+#define slsi_dbg_track_skb_init()
+#define slsi_dbg_track_skb_reset()
+#define slsi_dbg_track_skb(skb_, flags_)
+#define slsi_dbg_untrack_skb(skb_)
+#define slsi_dbg_track_skb_marker(skb_)
+#define slsi_dbg_track_skb_report()
+
+static inline struct sk_buff *slsi_dev_alloc_skb_f(unsigned int length, const char *file, int line)
+{
+ struct sk_buff *skb = dev_alloc_skb(SLSI_NETIF_SKB_HEADROOM + SLSI_NETIF_SKB_TAILROOM + length);
+
+ SLSI_UNUSED_PARAMETER(file);
+ SLSI_UNUSED_PARAMETER(line);
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ skb_reserve(skb, SLSI_NETIF_SKB_HEADROOM - SLSI_SKB_GET_ALIGNMENT_OFFSET(skb));
+ }
+ return skb;
+}
+
+static inline struct sk_buff *slsi_alloc_skb_f(unsigned int size, gfp_t priority, const char *file, int line)
+{
+ struct sk_buff *skb = alloc_skb(size, priority);
+
+ SLSI_UNUSED_PARAMETER(file);
+ SLSI_UNUSED_PARAMETER(line);
+#ifdef CONFIG_SCSC_SMAPPER
+ if (skb)
+ slsi_skb_cb_init(skb);
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *slsi_alloc_skb_headroom_f(unsigned int size, gfp_t priority, const char *file, int line)
+{
+ struct sk_buff *skb = alloc_skb(SLSI_NETIF_SKB_HEADROOM + SLSI_NETIF_SKB_TAILROOM + size, priority);
+
+ SLSI_UNUSED_PARAMETER(file);
+ SLSI_UNUSED_PARAMETER(line);
+
+ if (skb) {
+#ifdef CONFIG_SCSC_SMAPPER
+ slsi_skb_cb_init(skb);
+#endif
+ skb_reserve(skb, SLSI_NETIF_SKB_HEADROOM - SLSI_SKB_GET_ALIGNMENT_OFFSET(skb));
+ }
+ return skb;
+}
+
+static inline void slsi_kfree_skb_f(struct sk_buff *skb)
+{
+ /* If untrack fails we do not free the SKB
+ * This helps tracking bad pointers and double frees
+ */
+#ifdef CONFIG_SCSC_SMAPPER
+ struct slsi_skb_cb *cb;
+
+ if (!skb)
+ return;
+
+ cb = (struct slsi_skb_cb *)skb->cb;
+
+ if (cb && !cb->free_ma_unitdat && cb->skb_addr) {
+ kfree_skb(cb->skb_addr);
+ cb->skb_addr = NULL;
+ }
+#endif
+ kfree_skb(skb);
+}
+
+#define slsi_dev_alloc_skb(length_) slsi_dev_alloc_skb_f(length_, __FILE__, __LINE__)
+#define slsi_alloc_skb(size_, priority_) slsi_alloc_skb_f(size_, priority_, __FILE__, __LINE__)
+#define slsi_alloc_skb_headroom(size_, priority_) slsi_alloc_skb_headroom_f(size_, priority_, __FILE__, __LINE__)
+#define slsi_skb_realloc_headroom(skb_, headroom_) skb_realloc_headroom(skb_, headroom_)
+#define slsi_skb_copy(skb_, priority_) skb_copy(skb_, priority_)
+#define slsi_skb_copy_expand(skb_, newheadroom_, newtailroom_, priority_) skb_copy_expand(skb_, newheadroom_, newtailroom_, priority_)
+#define slsi_skb_clone(skb_, priority_) skb_clone(skb_, priority_)
+#define slsi_kfree_skb(skb_) slsi_kfree_skb_f(skb_)
+#define slsi_skb_unlink(skb_, list_) skb_unlink(skb_, list_)
+#define slsi_skb_queue_tail(list_, skb_) skb_queue_tail(list_, skb_)
+#define slsi_skb_queue_head(list_, skb_) skb_queue_head(list_, skb_)
+#define slsi_skb_dequeue(list_) skb_dequeue(list_)
+#define slsi_skb_queue_purge(list_) slsi_skb_queue_purge(list_)
+
+static inline void slsi_skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL)
+ slsi_kfree_skb(skb);
+}
+
+#endif
+
+struct slsi_spinlock {
+ /* a std spinlock */
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+/* Spinlock create can't fail, so return success regardless. */
+static inline void slsi_spinlock_create(struct slsi_spinlock *lock)
+{
+ spin_lock_init(&lock->lock);
+}
+
+static inline void slsi_spinlock_lock(struct slsi_spinlock *lock)
+{
+ spin_lock_bh(&lock->lock);
+}
+
+static inline void slsi_spinlock_unlock(struct slsi_spinlock *lock)
+{
+ spin_unlock_bh(&lock->lock);
+}
+
+struct slsi_dev;
+struct slsi_skb_work {
+ struct slsi_dev *sdev;
+ struct net_device *dev; /* This can be NULL */
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ struct sk_buff_head queue;
+ void __rcu *sync_ptr;
+};
+
+static inline int slsi_skb_work_init(struct slsi_dev *sdev, struct net_device *dev, struct slsi_skb_work *work, const char *name, void (*func)(struct work_struct *work))
+{
+ rcu_assign_pointer(work->sync_ptr, (void *)sdev);
+ work->sdev = sdev;
+ work->dev = dev;
+ skb_queue_head_init(&work->queue);
+ INIT_WORK(&work->work, func);
+ work->workqueue = alloc_ordered_workqueue(name, 0);
+
+ if (!work->workqueue)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void slsi_skb_schedule_work(struct slsi_skb_work *work)
+{
+ queue_work(work->workqueue, &work->work);
+}
+
+static inline void slsi_skb_work_enqueue_l(struct slsi_skb_work *work, struct sk_buff *skb)
+{
+ void *sync_ptr;
+
+ rcu_read_lock();
+
+ sync_ptr = rcu_dereference(work->sync_ptr);
+
+ if (WARN_ON(!sync_ptr)) {
+ slsi_kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+ skb_queue_tail(&work->queue, skb);
+ slsi_skb_schedule_work(work);
+
+ rcu_read_unlock();
+}
+
+static inline struct sk_buff *slsi_skb_work_dequeue_l(struct slsi_skb_work *work)
+{
+ return skb_dequeue(&work->queue);
+}
+
+static inline void slsi_skb_work_deinit(struct slsi_skb_work *work)
+{
+ rcu_read_lock();
+
+ if (WARN_ON(!work->sync_ptr)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_assign_pointer(work->sync_ptr, NULL);
+ rcu_read_unlock();
+
+ synchronize_rcu();
+ flush_workqueue(work->workqueue);
+ destroy_workqueue(work->workqueue);
+ work->workqueue = NULL;
+ slsi_skb_queue_purge(&work->queue);
+}
+
+static inline void slsi_cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ cfg80211_put_bss(wiphy, bss);
+#else
+ cfg80211_put_bss(bss);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) */
+}
+
+#ifdef CONFIG_SCSC_WLAN_SKB_TRACKING
+static inline void slsi_skb_work_enqueue_f(struct slsi_skb_work *work, struct sk_buff *skb, const char *file, int line)
+{
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+ slsi_skb_work_enqueue_l(work, skb);
+}
+
+static inline struct sk_buff *slsi_skb_work_dequeue_f(struct slsi_skb_work *work, const char *file, int line)
+{
+ struct sk_buff *skb;
+
+ skb = slsi_skb_work_dequeue_l(work);
+ if (skb)
+ slsi_dbg_track_skb_marker_f(skb, file, line);
+ return skb;
+}
+
+#define slsi_skb_work_enqueue(work_, skb_) slsi_skb_work_enqueue_f(work_, skb_, __FILE__, __LINE__)
+#define slsi_skb_work_dequeue(work_) slsi_skb_work_dequeue_f(work_, __FILE__, __LINE__)
+#else
+#define slsi_skb_work_enqueue(work_, skb_) slsi_skb_work_enqueue_l(work_, skb_)
+#define slsi_skb_work_dequeue(work_) slsi_skb_work_dequeue_l(work_)
+#endif
+
+static inline void slsi_eth_zero_addr(u8 *addr)
+{
+ memset(addr, 0x00, ETH_ALEN);
+}
+
+static inline void slsi_eth_broadcast_addr(u8 *addr)
+{
+ memset(addr, 0xff, ETH_ALEN);
+}
+
+static inline int slsi_str_to_int(char *str, int *result)
+{
+ int i = 0;
+
+ *result = 0;
+ if ((str[i] == '-') || ((str[i] >= '0') && (str[i] <= '9'))) {
+ if (str[0] == '-')
+ i++;
+ while (str[i] >= '0' && str[i] <= '9') {
+ *result *= 10;
+ *result += (int)str[i++] - '0';
+ }
+
+ *result = ((str[0] == '-') ? (-(*result)) : *result);
+ }
+ return i;
+}
+
+#define P80211_OUI_LEN 3
+
+struct ieee80211_snap_hdr {
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+} __packed;
+
+struct msdu_hdr {
+ unsigned char da[ETH_ALEN];
+ unsigned char sa[ETH_ALEN];
+ __be16 length;
+ struct ieee80211_snap_hdr snap;
+ __be16 ether_type;
+} __packed;
+
+#define ETHER_TYPE_SIZE 2
+#define MSDU_HLEN sizeof(struct msdu_hdr)
+#define MSDU_LENGTH (sizeof(struct ieee80211_snap_hdr) + sizeof(__be16))
+
+static inline int slsi_skb_msdu_to_ethhdr(struct sk_buff *skb)
+{
+ struct ethhdr *eth;
+ struct msdu_hdr *msdu;
+
+ unsigned char da[ETH_ALEN];
+ unsigned char sa[ETH_ALEN];
+ __be16 proto;
+
+ msdu = (struct msdu_hdr *)skb->data;
+ SLSI_ETHER_COPY(da, msdu->da);
+ SLSI_ETHER_COPY(sa, msdu->sa);
+ proto = msdu->ether_type;
+
+ skb_pull(skb, MSDU_HLEN);
+
+ eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+
+ SLSI_ETHER_COPY(eth->h_dest, da);
+ SLSI_ETHER_COPY(eth->h_source, sa);
+ eth->h_proto = proto;
+
+ return 0;
+}
+
+static inline int slsi_skb_ethhdr_to_msdu(struct sk_buff *skb)
+{
+ struct ethhdr *eth;
+ struct msdu_hdr *msdu;
+ unsigned int len;
+ __be16 ether_type;
+
+ if (skb_headroom(skb) < (MSDU_HLEN - ETH_HLEN))
+ return -EINVAL;
+
+ eth = eth_hdr(skb);
+ ether_type = eth->h_proto;
+
+ len = skb->len;
+
+ skb_pull(skb, ETH_HLEN);
+
+ msdu = (struct msdu_hdr *)skb_push(skb, MSDU_HLEN);
+
+ SLSI_ETHER_COPY(msdu->da, eth->h_dest);
+ SLSI_ETHER_COPY(msdu->sa, eth->h_source);
+ msdu->length = htons(len - ETH_HLEN + MSDU_LENGTH);
+ memcpy(&msdu->snap, rfc1042_header, sizeof(struct ieee80211_snap_hdr));
+ msdu->ether_type = ether_type;
+
+ return 0;
+}
+
+static inline u32 slsi_get_center_freq1(struct slsi_dev *sdev, u16 chann_info, u16 center_freq)
+{
+ u32 center_freq1 = 0x0000;
+
+ SLSI_UNUSED_PARAMETER(sdev);
+
+ switch (chann_info & 0xFF) {
+ case 40:
+ center_freq1 = center_freq - 20 * ((chann_info & 0xFF00) >> 8) + 10;
+ break;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 9))
+ case 80:
+ center_freq1 = center_freq - 20 * ((chann_info & 0xFF00) >> 8) + 30;
+ break;
+#endif
+ default:
+ break;
+ }
+ return center_freq1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SLSI_UTILS_H__ */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd and its Licensors.
+ * All rights reserved.
+ *
+ ****************************************************************************/
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include "dev.h"
+#include "debug.h"
+#include "wakelock.h"
+#include "utils.h"
+
+void slsi_wakelock(struct slsi_wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK
+ unsigned long flags;
+
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to take wakelock, lock is NULL");
+ return;
+ }
+ spin_lock_irqsave(&lock->wl_spinlock, flags);
+ if (!lock->counter)
+ wake_lock(&lock->wl);
+
+ lock->counter++;
+ spin_unlock_irqrestore(&lock->wl_spinlock, flags);
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+#endif
+}
+
+void slsi_wakeunlock(struct slsi_wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK
+ unsigned long flags;
+
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to unlock the wakelock, lock is NULL");
+ return;
+ }
+ spin_lock_irqsave(&lock->wl_spinlock, flags);
+
+ if (lock->counter) {
+ lock->counter--;
+ if (!lock->counter)
+ wake_unlock(&lock->wl);
+ } else {
+ SLSI_ERR_NODEV("Wakelock has already released!");
+ }
+ spin_unlock_irqrestore(&lock->wl_spinlock, flags);
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+#endif
+}
+
+void slsi_wakelock_timeout(struct slsi_wake_lock *lock, int timeout)
+{
+#ifdef CONFIG_WAKELOCK
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to take wakelock timeout, lock is NULL");
+ return;
+ }
+ lock->counter = 1;
+ wake_lock_timeout(&lock->wl, msecs_to_jiffies(timeout));
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+ SLSI_UNUSED_PARAMETER(timeout);
+#endif
+}
+
+int slsi_is_wakelock_active(struct slsi_wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to check wakelock status, lock is NULL");
+ return 0;
+ }
+
+ if (wake_lock_active(&lock->wl))
+ return 1;
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+#endif
+ return 0;
+}
+
+void slsi_wakelock_exit(struct slsi_wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to destroy the wakelock, lock is NULL");
+ return;
+ }
+
+ wake_lock_destroy(&lock->wl);
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+#endif
+}
+
+void slsi_wakelock_init(struct slsi_wake_lock *lock, char *name)
+{
+#ifdef CONFIG_WAKELOCK
+ if (lock == NULL) {
+ SLSI_ERR_NODEV("Failed to init the wakelock, lock is NULL");
+ return;
+ }
+ lock->counter = 0;
+ wake_lock_init(&lock->wl, WAKE_LOCK_SUSPEND, name);
+ spin_lock_init(&lock->wl_spinlock);
+#else
+ SLSI_UNUSED_PARAMETER(lock);
+ SLSI_UNUSED_PARAMETER(name);
+#endif
+}
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. and its Licensors.
+ * All rights reserved.
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_WAKELOCK_H__
+#define __SLSI_WAKELOCK_H__
+
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#include <linux/spinlock.h>
+
+struct slsi_wake_lock {
+#ifdef CONFIG_WAKELOCK
+ struct wake_lock wl;
+#endif
+ /* Spinlock to synchronize the access of the counter */
+ spinlock_t wl_spinlock;
+ int counter;
+};
+
+void slsi_wakelock(struct slsi_wake_lock *lock);
+void slsi_wakeunlock(struct slsi_wake_lock *lock);
+void slsi_wakelock_timeout(struct slsi_wake_lock *lock, int timeout);
+int slsi_is_wakelock_active(struct slsi_wake_lock *lock);
+void slsi_wakelock_exit(struct slsi_wake_lock *lock);
+void slsi_wakelock_init(struct slsi_wake_lock *lock, char *name);
+#endif
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ * WLAN result codes
+ *
+ ****************************************************************************/
+
+#ifndef WL_RESULT_H__
+#define WL_RESULT_H__
+
+#define CsrResult u16
+
+#define CSR_RESULT_SUCCESS ((CsrResult)0x0000)
+#define CSR_RESULT_FAILURE ((CsrResult)0xffff)
+
+#define CSR_WIFI_HIP_RESULT_INVALID_VALUE ((CsrResult)1) /* Invalid argument value */
+#define CSR_WIFI_HIP_RESULT_NO_DEVICE ((CsrResult)2) /* The specified device is no longer present */
+#define CSR_WIFI_HIP_RESULT_NO_SPACE ((CsrResult)3) /* A queue or buffer is full */
+#define CSR_WIFI_HIP_RESULT_NO_MEMORY ((CsrResult)4) /* Fatal error, no memory */
+#define CSR_WIFI_HIP_RESULT_RANGE ((CsrResult)5) /* Request exceeds the range of a file or a buffer */
+#define CSR_WIFI_HIP_RESULT_NOT_FOUND ((CsrResult)6) /* A file (typically a f/w patch) is not found */
+#define CSR_WIFI_PS_RESULT_FULL ((CsrResult)7) /* Signal successfully queued, queue now full. */
+#define CSR_WIFI_PS_QSIG_NONE ((CsrResult)8) /* no qsig found, queue empty */
+#define CSR_WIFI_PS_QSIG_NO_SPACE ((CsrResult)9) /* no room for outstanding qsigs */
+#define CSR_WIFI_PS_QS_PAUSED_UNTIL_BOT_UPDATE ((CsrResult)10) /* BOT has disabled the qset and waiting for updates from the firmware */
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * FILE
+ * asmhcp.h - ANT Shared Memory Host Controller Protocol
+ *
+ * DESCRIPTION
+ * This file specifies the layout of the ANT Shared Memory
+ * Host Controller Protocol
+ */
+
+#ifndef __ASMHCP_H__
+#define __ASMHCP_H__
+
+#define ASMHCP_TRANSFER_RING_DATA_SIZE (16)
+#define ASMHCP_TRANSFER_RING_CMD_SIZE (16)
+
+#define ASMHCP_BUFFER_SIZE (258)
+
+struct ASMHCP_TD_CONTROL {
+ uint16_t length;
+ uint8_t data[ASMHCP_BUFFER_SIZE];
+};
+
+struct ASMHCP_HEADER {
+ /* AP RW - R4 RO - 64 octets */
+ uint32_t magic_value; /* 0x00 */
+ uint32_t mailbox_data_ctr_driv_read; /* 0x04 */
+ uint32_t mailbox_data_driv_ctr_write; /* 0x08 */
+ uint32_t mailbox_cmd_ctr_driv_read; /* 0x0C */
+ uint32_t mailbox_cmd_driv_ctr_write; /* 0x10 */
+ uint16_t ap_to_bg_int_src; /* 0x14 */
+ uint16_t bg_to_ap_int_src; /* 0x16 */
+ uint32_t mxlog_filter; /* 0x18 */
+ uint32_t firmware_control; /* 0x1C */
+ uint8_t reserved1[0x20]; /* 0x20 */
+
+ /* AP RO - R4 RW - 64 octets */
+ uint32_t mailbox_cmd_driv_ctr_read; /* 0x40 */
+ uint32_t mailbox_cmd_ctr_driv_write; /* 0x44 */
+ uint32_t mailbox_data_driv_ctr_read; /* 0x48 */
+ uint32_t mailbox_data_ctr_driv_write; /* 0x4C */
+ uint32_t firmware_features; /* 0x50 */
+ uint16_t panic_deathbed_confession; /* 0x54 */
+ uint8_t reserved2[0x2A]; /* 0x56 */
+};
+
+struct ASMHCP_PROTOCOL {
+ /* header offset: 0x00000000 */
+ volatile struct ASMHCP_HEADER header;
+ /* from controller */
+ struct ASMHCP_TD_CONTROL /* offset: 0x00000080 */
+ cmd_controller_driver_transfer_ring[ASMHCP_TRANSFER_RING_CMD_SIZE];
+ struct ASMHCP_TD_CONTROL /* offset: 0x000008A0 */
+ data_controller_driver_transfer_ring[ASMHCP_TRANSFER_RING_DATA_SIZE];
+
+ /* Padding used to ensure minimum 32 octets between sections */
+ uint8_t reserved[0x20]; /* offset: 0x000010C0 */
+
+ /* from driver */
+ struct ASMHCP_TD_CONTROL /* offset: 0x000010E0 */
+ cmd_driver_controller_transfer_ring[ASMHCP_TRANSFER_RING_CMD_SIZE];
+ struct ASMHCP_TD_CONTROL /* offset: 0x00001900 */
+ data_driver_controller_transfer_ring[ASMHCP_TRANSFER_RING_DATA_SIZE];
+};
+
+#define ASMHCP_PROTOCOL_MAGICVALUE \
+ ((ASMHCP_TRANSFER_RING_DATA_SIZE | (ASMHCP_TRANSFER_RING_CMD_SIZE << 4) | \
+ (offsetof(struct ASMHCP_PROTOCOL, cmd_driver_controller_transfer_ring) << 15)) ^ \
+ sizeof(struct ASMHCP_PROTOCOL))
+
+#endif /* __ASMHCP_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * FILE
+ * bhcs.h - Bluetooth Host Configuration Structure
+ *
+ * DESCRIPTION
+ * This file specifies the layout of the Bluetooth Host Configuration
+ * Structure. The structure is written by the host and passed to the
+ * firmware as an argument to the service start callback function in
+ * the form of an offset that must be converted to a local address.
+ *
+ * ASSUMPTIONS
+ * The host and the firmware has the same endiannes.
+ * The ABI on the host and the firmware results in the same memory
+ * layout of the defined structure.
+ *
+ */
+
+#ifndef BHCS_H__
+#define BHCS_H__
+
+/* The version of the BHCS structure. Must be written to the version field
+ * by the host and confirmed to match the define by the firmware. Increment
+ * the version when changing the layout of the structure. This also serves
+ * as a rudimentary endianess check. */
+#define BHCS_VERSION 2
+
+struct BHCS {
+ uint32_t version; /* BHCS_VERSION */
+ uint32_t bsmhcp_protocol_offset; /* BSMHCP_PROTOCOL structure offset */
+ uint32_t bsmhcp_protocol_length; /* BSMHCP_PROTOCOL structure length */
+ uint32_t configuration_offset; /* Binary configuration data offset */
+ uint32_t configuration_length; /* Binary configuration data length */
+ uint32_t bluetooth_address_lap; /* Lower Address Part 00..23 */
+ uint8_t bluetooth_address_uap; /* Upper Address Part 24..31 */
+ uint16_t bluetooth_address_nap; /* Non-significant 32..47 */
+};
+
+#endif /* BHCS_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * FILE
+ * bsmhcp.h - Bluetooth Shared Memory Host Controller Protocol
+ *
+ * DESCRIPTION
+ * This file specifies the layout of the Bluetooth Shared Memory
+ * Host Controller Protocol as described in SC-505753-DD
+ */
+
+#ifndef __BSMHCP_H__
+#define __BSMHCP_H__
+
+#define BSMHCP_TRANSFER_RING_CMD_SIZE (8)
+#define BSMHCP_TRANSFER_RING_EVT_SIZE (32)
+#define BSMHCP_TRANSFER_RING_ACL_SIZE (32)
+#define BSMHCP_TRANSFER_RING_AVDTP_SIZE (16)
+#define BSMHCP_TRANSFER_RING_IQ_REPORT_SIZE (8)
+
+/* Of the buffers in BSMHCP_TRANSFER_RING_ACL_SIZE, reserve a number for ULP
+ * operation. */
+#define BSMHCP_TRANSFER_RING_ACL_ULP_RESERVED (4)
+#define BSMHCP_TRANSFER_RING_SCO_SIZE (0)
+#define BSMHCP_TRANSFER_RING_ACL_COUNT (36)
+#define BSMHCP_TRANSFER_RING_TIMING_COUNT (64)
+
+#define BSMHCP_CMD_EVT_BUFFER_SIZE (258)
+#define BSMHCP_ACL_BUFFER_SIZE (1024)
+#define BSMHCP_IQ_REPORT_BUFFER_SIZE (164)
+
+#define BSMHCP_ACL_PACKET_SIZE (1021)
+#define BSMHCP_ULP_PACKET_SIZE BSMHCP_ACL_PACKET_SIZE
+#define BSMHCP_SCO_PACKET_SIZE (0)
+
+#define BSMHCP_DATA_BUFFER_CMD_SIZE \
+ BSMHCP_TRANSFER_RING_CMD_SIZE
+#define BSMHCP_DATA_BUFFER_EVT_SIZE \
+ BSMHCP_TRANSFER_RING_EVT_SIZE
+#define BSMHCP_DATA_BUFFER_TX_ACL_SIZE \
+ (BSMHCP_TRANSFER_RING_ACL_SIZE - 2)
+#define BSMHCP_DATA_BUFFER_RX_ACL_SIZE \
+ BSMHCP_TRANSFER_RING_ACL_SIZE
+
+#define BSMHCP_EVENT_TYPE_NONE (0x00)
+#define BSMHCP_EVENT_TYPE_CONNECTED (0x01)
+#define BSMHCP_EVENT_TYPE_DISCONNECTED (0x02)
+
+#define BSMHCP_ACL_BC_FLAG_BCAST_NON (0x00)
+#define BSMHCP_ACL_BC_FLAG_BCAST_ACTIVE (0x40)
+#define BSMHCP_ACL_BC_FLAG_BCAST_ALL (0x80)
+#define BSMHCP_ACL_BC_FLAG_BCAST_RSVD (0xC0)
+#define BSMHCP_ACL_BC_FLAG_BCAST_MASK (0xC0)
+
+#define BSMHCP_ACL_PB_FLAG_START_NONFLUSH (0x00)
+#define BSMHCP_ACL_PB_FLAG_CONT (0x10)
+#define BSMHCP_ACL_PB_FLAG_START_FLUSH (0x20)
+#define BSMHCP_ACL_PB_FLAG_RSVD_3 (0x30)
+#define BSMHCP_ACL_PB_FLAG_MASK (0x30)
+
+#define BSMHCP_ACL_L2CAP_FLAG_NON (0x00)
+#define BSMHCP_ACL_L2CAP_FLAG_END (0x01)
+#define BSMHCP_ACL_L2CAP_FLAG_MASK (0x01)
+
+#define BSMHCP_SERVICE_BT_STATE_INACTIVE (0x00)
+#define BSMHCP_SERVICE_BT_STATE_ACTIVE (0x01)
+
+#define BSMHCP_CONTROLLER_STATE_ACTIVE (0x00000001)
+
+#define BSMHCP_HCI_CONNECTION_HANDLE_LOOPBACK (0x4000)
+
+#define BSMHCP_ALIGNMENT (32)
+
+#define BSMHCP_FEATURE_LPA2DP (0x00000001)
+#define BSMHCP_FEATURE_M4_INTERRUPTS (0x00000002)
+#define BSMHCP_FEATURE_FW_INFORMATION (0x00000004)
+#define BSMHCP_FEATURE_AVDTP_TRANSFER_RING (0x00000008)
+
+#define BSMHCP_CONTROL_START_PANIC (0x10DEAD01)
+#define BSMHCP_CONTROL_STOP_PANIC (0x0201DEAD)
+#define BSMHCP_CONTROL_CONNECTION_PANIC (0xDEAD2002)
+
+#define BSMHCP_FW_INFO_USER_DEFINED_COUNT (96)
+
+#define BSMHCP_TIMING_SOURCE_ITIME (0)
+#define BSMHCP_TIMING_SOURCE_ITIME_L1 (1)
+#define BSMHCP_TIMING_SOURCE_RADIO_TX (2)
+#define BSMHCP_TIMING_SOURCE_RADIO_RX (3)
+#define BSMHCP_TIMING_SOURCE_RADIO_LC (4)
+#define BSMHCP_TIMING_SOURCE_COUNT (5)
+
+#define BSMHCP_INCREASE_INDEX(index, limit) \
+ ((index) = ((index) + 1) % (limit))
+
+#define BSMHCP_HAS_ROOM(write, read, limit) \
+ ((((write) + 1) % (limit)) != (read))
+
+struct BSMHCP_TD_CONTROL {
+ uint16_t length;
+ uint8_t data[BSMHCP_CMD_EVT_BUFFER_SIZE];
+};
+
+struct BSMHCP_TD_HCI_EVT {
+ uint16_t length;
+ uint16_t hci_connection_handle;
+ uint16_t event_type;
+ uint8_t data[BSMHCP_CMD_EVT_BUFFER_SIZE];
+};
+
+struct BSMHCP_TD_ACL_RX {
+ uint16_t hci_connection_handle;
+ uint16_t length;
+ uint8_t broadcast_flag;
+ uint8_t packet_boundary;
+ uint8_t disconnected;
+ uint8_t reserved;
+ uint8_t data[BSMHCP_ACL_BUFFER_SIZE];
+};
+
+struct BSMHCP_TD_ACL_TX_DATA {
+ uint16_t length;
+ uint8_t buffer_index;
+ uint8_t flags;
+ uint16_t hci_connection_handle;
+ uint16_t l2cap_cid;
+};
+
+struct BSMHCP_TD_ACL_TX_FREE {
+ uint8_t buffer_index;
+ uint8_t reserved;
+ uint16_t hci_connection_handle;
+};
+
+struct BSMHCP_TD_AVDTP {
+ uint32_t flags;
+ uint16_t l2cap_cid;
+ uint16_t hci_connection_handle;
+ uint32_t reserved;
+};
+
+struct BSMHCP_ACL_TR_DRV_INDEX {
+ uint32_t read_free;
+ uint32_t write_data;
+};
+
+struct BSMHCP_ACL_TR_CTRL_INDEX {
+ uint32_t read_data;
+ uint32_t write_free;
+};
+
+struct BSMHCP_INDEX {
+ uint32_t read;
+ uint32_t write;
+};
+
+struct BSMHCP_TIMING_PACKET {
+ uint16_t source;
+ uint16_t sequence_number;
+ uint32_t interrupt_enter;
+ uint32_t critical_section_enter;
+ uint32_t time[4];
+ uint32_t critical_section_leave;
+ uint32_t interrupt_leave;
+};
+
+struct BSMHCP_FW_INFO {
+ uint32_t r4_from_ap_interrupt_count;
+ uint32_t m4_from_ap_interrupt_count;
+ uint32_t r4_to_ap_interrupt_count;
+ uint32_t m4_to_ap_interrupt_count;
+ uint32_t bt_deep_sleep_time_total;
+ uint32_t bt_deep_sleep_wakeup_duration;
+ uint32_t sched_n_messages;
+ uint32_t user_defined_count;
+ uint32_t user_defined[BSMHCP_FW_INFO_USER_DEFINED_COUNT];
+};
+
+struct BSMHCP_TD_IQ_REPORTING_EVT {
+ uint8_t subevent_code;
+ uint8_t packet_status;
+ uint16_t connection_handle;
+ uint16_t sync_handle;
+ uint8_t rx_phy;
+ uint8_t channel_index;
+ int16_t rssi;
+ uint8_t rssi_antenna_id;
+ uint8_t cte_type;
+ uint8_t slot_durations;
+ uint8_t sample_count;
+ uint16_t event_count;
+ uint8_t data[BSMHCP_IQ_REPORT_BUFFER_SIZE];
+};
+
+struct BSMHCP_HEADER {
+ /* AP RW - M4/R4 RO - 64 octets */
+ uint32_t magic_value; /* 0x00 */
+ uint16_t ap_to_fg_m4_int_src; /* 0x04 */
+ uint8_t service_request; /* 0x06 */
+ uint8_t reserved1; /* 0x07 */
+ uint32_t acl_buffer_size; /* 0x08 */
+ uint32_t cmd_evt_buffer_size; /* 0x0C */
+ uint32_t acl_tx_buffers; /* 0x10 */
+ uint16_t ap_to_bg_int_src; /* 0x14 */
+ uint16_t ap_to_fg_int_src; /* 0x16 */
+ uint16_t bg_to_ap_int_src; /* 0x18 */
+ uint16_t fg_to_ap_int_src; /* 0x1A */
+ uint32_t mailbox_offset; /* 0x1C */
+ uint32_t reserved1_u32; /* 0x20 */
+ uint32_t mailbox_hci_cmd_write; /* 0x24 */
+ uint32_t mailbox_hci_evt_read; /* 0x28 */
+ uint32_t mailbox_acl_tx_write; /* 0x2C */
+ uint32_t mailbox_acl_free_read; /* 0x30 */
+ uint32_t mailbox_acl_rx_read; /* 0x34 */
+ uint32_t abox_offset; /* 0x38 */
+ uint32_t abox_length; /* 0x3C */
+
+ /* AP RO - R4 RW - M4 NA - 32 octets */
+ uint16_t panic_deathbed_confession; /* 0x40 */
+ uint16_t panic_diatribe_value; /* 0x42 */
+ uint32_t mailbox_hci_cmd_read; /* 0x44 */
+ uint32_t mailbox_hci_evt_write; /* 0x48 */
+ uint32_t controller_flags; /* 0x4C */
+ uint32_t firmware_features; /* 0x50 */
+ uint16_t reserved_u16; /* 0x54 */
+ uint8_t service_state; /* 0x56 */
+ uint8_t reserved_u8; /* 0x57 */
+ uint32_t reserved4_u32; /* 0x58 */
+ uint32_t mailbox_avdtp_read; /* 0x5C */
+
+ /* AP RO - R4 NA - M4 RW - 32 octets */
+ uint32_t mailbox_acl_tx_read; /* 0x60 */
+ uint32_t mailbox_acl_free_write; /* 0x64 */
+ uint32_t mailbox_acl_rx_write; /* 0x68 */
+ uint32_t mailbox_timing_write; /* 0x6C */
+ uint32_t mailbox_iq_report_write; /* 0x70 */
+ uint8_t reserved4[0x0C]; /* 0x74 */
+
+
+ /* AP RO - R4/M4 RW - 32 octets */
+ uint32_t reserved6_u32; /* 0x80 */
+ uint8_t reserved5[0x1C]; /* 0x84 */
+
+ /* AP RW - M4/R4 RO */
+ uint32_t mailbox_timing_read; /* 0xA0 */
+ uint32_t mailbox_avdtp_write; /* 0xA4 */
+ uint32_t mailbox_iq_report_read; /* 0xA8 */
+ uint32_t reserved9_u32; /* 0xAC */
+ uint32_t reserved10_u32; /* 0xB0 */
+ uint32_t reserved11_u32; /* 0xB4 */
+ uint32_t reserved12_u32; /* 0xB8 */
+ uint16_t info_ap_to_bg_int_src; /* 0xBC */
+ uint16_t info_bg_to_ap_int_src; /* 0xBE */
+ uint32_t mxlog_filter; /* 0xC0 */
+ uint32_t firmware_control; /* 0xC4 */
+ uint8_t reserved6[0x24]; /* 0xC8 */
+
+ /* Obsolete region - not used */
+ uint32_t smm_debug_read; /* 0xEC */
+ uint32_t smm_debug_write; /* 0xF0 */
+ uint32_t smm_exception; /* 0xF4 */
+ uint32_t avdtp_detect_stream_id; /* 0xF8 */
+ uint32_t smm_int_ready; /* 0xFE */
+};
+
+struct BSMHCP_PROTOCOL {
+ /* header offset: 0x00000000 */
+ volatile struct BSMHCP_HEADER header;
+
+ /* from AP */
+ struct BSMHCP_TD_CONTROL /* offset: 0x00000100 */
+ hci_cmd_transfer_ring[BSMHCP_TRANSFER_RING_CMD_SIZE];
+
+ /* AVDTP detection */
+ struct BSMHCP_TD_AVDTP /* offset: 0x00000920 */
+ avdtp_transfer_ring[BSMHCP_TRANSFER_RING_AVDTP_SIZE];
+
+ uint8_t /* offset: 0x000009E0 */
+ to_air_reserved[0x1FC0];
+
+ struct BSMHCP_TD_ACL_TX_DATA /* offset: 0x000029A0 */
+ acl_tx_data_transfer_ring[BSMHCP_TRANSFER_RING_ACL_SIZE];
+
+ uint8_t /* offset: 0x00002AA0 */
+ acl_tx_buffer[BSMHCP_DATA_BUFFER_TX_ACL_SIZE]
+ [BSMHCP_ACL_BUFFER_SIZE];
+
+ /* Padding used to ensure minimum 32 octets between sections */
+ uint8_t reserved[0x20]; /* offset: 0x0000A2A0 */
+
+ /* to AP */
+ struct BSMHCP_TD_HCI_EVT /* offset: 0x0000A2C0 */
+ hci_evt_transfer_ring[BSMHCP_TRANSFER_RING_EVT_SIZE];
+
+ struct BSMHCP_TIMING_PACKET /* offset: 0x0000C3C0 */
+ timing_transfer_ring[BSMHCP_TRANSFER_RING_TIMING_COUNT];
+
+ struct BSMHCP_FW_INFO /* offset: 0x0000CCC0 */
+ information;
+
+ uint8_t /* offset: 0x0000CCC0 + sizoef(struct BSMHCP_FW_INFO) */
+ from_air_reserved[0x11E0 - sizeof(struct BSMHCP_FW_INFO)];
+
+ struct BSMHCP_TD_IQ_REPORTING_EVT /* offset: 0x0000DEA0 */
+ iq_reporting_transfer_ring[BSMHCP_TRANSFER_RING_IQ_REPORT_SIZE];
+
+ struct BSMHCP_TD_ACL_RX /* offset: 0x0000E440 */
+ acl_rx_transfer_ring[BSMHCP_TRANSFER_RING_ACL_SIZE];
+
+ struct BSMHCP_TD_ACL_TX_FREE /* offset: 0x00016540 */
+ acl_tx_free_transfer_ring[BSMHCP_TRANSFER_RING_ACL_SIZE];
+};
+
+#define BSMHCP_TD_ACL_RX_CONTROL_SIZE \
+ (sizeof(struct BSMHCP_TD_ACL_RX) - BSMHCP_ACL_BUFFER_SIZE)
+
+#define BSMHCP_PROTOCOL_MAGICVALUE \
+ ((BSMHCP_ACL_BUFFER_SIZE | BSMHCP_CMD_EVT_BUFFER_SIZE | \
+ (offsetof(struct BSMHCP_PROTOCOL, acl_tx_buffer) << 15)) ^ \
+ sizeof(struct BSMHCP_PROTOCOL))
+
+#endif /* __BSMHCP_H__ */
--- /dev/null
+#ifndef _BT_AUDIO_H
+#define _BT_AUDIO_H
+
+/* Version number */
+#define SCSC_BT_AUDIO_ABOX_VERSION_MAJOR (0x01)
+#define SCSC_BT_AUDIO_ABOX_VERSION_MINOR (0x01)
+
+/* The A-Box uses a ARM Cortex-A7 with a 64 bytes cache line whereas
+ * the WLBT uses a ARM Cortex-R4 with a 32 bytes cache line. The data
+ * needs to be aligned to the largest cache line
+ */
+#define SCSC_BT_AUDIO_ABOX_DCACHE_LINE_WIDTH (64)
+
+/* kernel page size used for memory alignment */
+#define SCSC_BT_AUDIO_PAGE_SIZE (PAGE_SIZE)
+
+/* Total size of the shared memory in one direction */
+#define SCSC_BT_AUDIO_ABOX_DATA_SIZE (128 * SCSC_BT_AUDIO_ABOX_DCACHE_LINE_WIDTH)
+
+/* Size of the buffer for each interface */
+#define SCSC_BT_AUDIO_ABOX_IF_0_SIZE (10 * SCSC_BT_AUDIO_ABOX_DCACHE_LINE_WIDTH)
+#define SCSC_BT_AUDIO_ABOX_IF_1_SIZE (10 * SCSC_BT_AUDIO_ABOX_DCACHE_LINE_WIDTH)
+
+/* Feature mask */
+#define SCSC_BT_AUDIO_FEATURE_STREAMING_IF_0 (0x00000001)
+#define SCSC_BT_AUDIO_FEATURE_STREAMING_IF_1 (0x00000002)
+#define SCSC_BT_AUDIO_FEATURE_MESSAGING (0x00000004)
+#define SCSC_BT_AUDIO_FEATURE_A2DP_OFFLOAD (0x00000008)
+
+struct scsc_bt_audio_abox {
+ /* AP RW - BT R4 RO - ABOX RO - 128 octets */
+
+ /* header */
+ uint32_t magic_value;
+ uint16_t version_major;
+ uint16_t version_minor;
+
+ /* align to cache line (32 bytes) */
+ uint8_t reserved1[0x18];
+
+ /* streaming interface 0 */
+ uint32_t abox_to_bt_streaming_if_0_size;
+
+ /* offset in abox_to_bt_streaming_if_data */
+ uint32_t abox_to_bt_streaming_if_0_offset;
+
+ uint32_t bt_to_abox_streaming_if_0_size;
+
+ /* offset in bt_to_abox_streaming_if_data */
+ uint32_t bt_to_abox_streaming_if_0_offset;
+
+ /* streaming interface 1 */
+ uint32_t abox_to_bt_streaming_if_1_size;
+
+ /* offset in abox_to_bt_streaming_if_data */
+ uint32_t abox_to_bt_streaming_if_1_offset;
+
+ uint32_t bt_to_abox_streaming_if_1_size;
+
+ /* offset in bt_to_abox_streaming_if_data */
+ uint32_t bt_to_abox_streaming_if_1_offset;
+
+ /* reserved room for additional AP information (64 bytes) */
+ uint8_t reserved2[0x40];
+
+ /* AP RO - BT R4 RO - ABOX RW - 64 octets */
+ uint32_t abox_fw_features;
+
+ /* BTWLAN audio and ABOX firmware may start at different time
+ * and a number of interrupts may be already triggered by ABOX
+ * firmware before BTWLAN audio can process them causing
+ * misalignment between the two systems (e.g. both accessing
+ * the same buffer at the same time). The fields below provide
+ * information about which half of the double buffer the ABOX
+ * firmware is processing using 0/1.
+ * filled by ABOX firmware at each interrupt (read/write) and
+ * initialised to 0 by BT driver.
+ */
+ uint32_t bt_to_abox_streaming_if_0_current_index;
+ uint32_t abox_to_bt_streaming_if_0_current_index;
+ uint32_t bt_to_abox_streaming_if_1_current_index;
+ uint32_t abox_to_bt_streaming_if_1_current_index;
+
+ /* align to cache line (64 bytes) */
+ uint8_t reserved3[0x2C];
+
+ /* AP RO - BT R4 RW - ABOX RO - 64 octets */
+ uint32_t bt_fw_features;
+
+ /* sample rate (Hz) of the streaming interfaces */
+ uint32_t streaming_if_0_sample_rate;
+ uint32_t streaming_if_1_sample_rate;
+
+ uint8_t reserved4[0x34];
+
+ /* payload */
+
+ /* AP RO - BT R4 RO - ABOX RW - multiple of 64 octets */
+ uint8_t abox_to_bt_streaming_if_data[SCSC_BT_AUDIO_ABOX_DATA_SIZE];
+
+ /* AP RO - BT R4 RW - ABOX RO - multiple of 64 octets */
+ uint8_t bt_to_abox_streaming_if_data[SCSC_BT_AUDIO_ABOX_DATA_SIZE];
+};
+
+/* Magic value */
+#define SCSC_BT_AUDIO_ABOX_MAGIC_VALUE \
+ (((offsetof(struct scsc_bt_audio_abox, abox_to_bt_streaming_if_0_size) << 20) | \
+ (offsetof(struct scsc_bt_audio_abox, bt_to_abox_streaming_if_0_current_index) << 10) | \
+ offsetof(struct scsc_bt_audio_abox, abox_to_bt_streaming_if_data)) ^ \
+ 0xBA12EF82)
+
+struct scsc_bt_audio {
+ struct device *dev;
+ struct scsc_bt_audio_abox *abox_virtual;
+ struct scsc_bt_audio_abox *abox_physical;
+ int (*dev_iommu_map)(struct device *, phys_addr_t, size_t);
+ void (*dev_iommu_unmap)(struct device *, size_t);
+};
+
+struct scsc_bt_audio_driver {
+ const char *name;
+ void (*probe)(struct scsc_bt_audio_driver *driver, struct scsc_bt_audio *bt_audio);
+ void (*remove)(struct scsc_bt_audio *bt_audio);
+};
+
+phys_addr_t scsc_bt_audio_get_paddr_buf(bool tx);
+unsigned int scsc_bt_audio_get_rate(int id);
+int scsc_bt_audio_register(struct device *dev,
+ int (*dev_iommu_map)(struct device *, phys_addr_t, size_t),
+ void (*dev_iommu_unmap)(struct device *, size_t));
+int scsc_bt_audio_unregister(struct device *dev);
+
+#endif /* _BT_AUDIO_H */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ * Confidential information of Samsung Electronics Co., Ltd
+ *
+ * Refer to LICENSE.txt included with this source for details
+ * on the license terms.
+ *
+ * CME3_PRIM_H defines data structures and macros for coex signalling
+ * between BT Host and BT FW
+ *
+ *****************************************************************************/
+#ifndef CME3_PRIM_H__
+#define CME3_PRIM_H__
+
+#if defined(XAP)
+#include "types.h"
+#else
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+#define CME3_PRIM_ANY_SIZE 1
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_Signal_Id
+ *
+ * DESCRIPTION
+ *
+ * VALUES
+ * profile_a2dp_start_ind -
+ * profile_a2dp_stop_ind -
+ * coex_stop_ind -
+ * coex_start_ind -
+ *
+ *******************************************************************************/
+typedef enum {
+ CME_SIGNAL_ID_PROFILE_A2DP_START_IND = 0,
+ CME_SIGNAL_ID_PROFILE_A2DP_STOP_IND = 1,
+ CME_SIGNAL_ID_COEX_STOP_IND = 2,
+ CME_SIGNAL_ID_COEX_START_IND = 3,
+} CME_SIGNAL_ID;
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_CODEC_TYPE
+ *
+ * DESCRIPTION
+ * Codec types used for A2DP profile.
+ *
+ * VALUES
+ * CME_CODEC_TYPE_SBC -
+ * CME_CODEC_TYPE_APTX -
+ * CME_CODEC_TYPE_SSHD -
+ *
+ *******************************************************************************/
+typedef enum {
+ CME_CODEC_TYPE_SBC = 0,
+ CME_CODEC_TYPE_APTX = 1,
+ CME_CODEC_TYPE_SSHD = 2
+} CME_CODEC_TYPE;
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_A2DP_ROLE
+ *
+ * DESCRIPTION
+ * A2DP Device role for A2DP profile.
+ *
+ * VALUES
+ * CME_A2DP_SOURCE -
+ * CME_A2DP_SINK -
+ *
+ *******************************************************************************/
+typedef enum {
+ CME_A2DP_SOURCE = 0,
+ CME_A2DP_SINK = 1
+} CME_A2DP_ROLE;
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * Cme_Header
+ *
+ * DESCRIPTION
+ *
+ * MEMBERS
+ * Signal_Id -
+ * Length - The length of the whole message in 16-bit words.
+ *
+ *******************************************************************************/
+typedef struct {
+ CME_SIGNAL_ID Signal_Id;
+ uint8_t Length;
+} CME_HEADER;
+
+/* The following macros take CME_HEADER *cme_header_ptr or uint16_t *addr */
+#define CME_HEADER_SIGNAL_ID_WORD_OFFSET (0)
+
+#define CME_HEADER_SIGNAL_ID_GET(addr) \
+ ((CME_SIGNAL_ID)((*(((volatile const uint16_t *)(addr))) & 0xff)))
+
+#define CME_HEADER_SIGNAL_ID_SET(addr, signal_id) \
+ (*(((volatile uint16_t *)(addr))) = \
+ (uint16_t)((*(((volatile uint16_t *)(addr))) & ~0xff) | (((signal_id)) & 0xff)))
+
+#define CME_HEADER_LENGTH_GET(addr) (((*(((volatile const uint16_t *)(addr))) & 0xff00) >> 8))
+
+#define CME_HEADER_LENGTH_SET(addr, length) (*(((volatile uint16_t *)(addr))) = \
+ (uint16_t)((*(((volatile uint16_t *)(addr))) & ~0xff00) | (((length) << 8) & 0xff00)))
+
+#define CME_HEADER_WORD_SIZE (1)
+
+/*lint -e(773) allow unparenthesized*/
+#define CME_HEADER_CREATE(Signal_Id, Length) \
+ (uint16_t)(((Signal_Id)) & 0xff) | \
+ (uint16_t)(((Length) << 8) & 0xff00)
+
+#define CME_HEADER_PACK(addr, Signal_Id, Length) \
+ do { \
+ *(((volatile uint16_t *)(addr))) = (uint16_t)((uint16_t)(((Signal_Id)) & 0xff) | \
+ (uint16_t)(((Length) << 8) & 0xff00)); \
+ } while (0)
+
+#define CME_HEADER_MARSHALL(addr, cme_header_ptr) \
+ do { \
+ *((addr)) = (uint16_t)((((cme_header_ptr)->Signal_Id)) & 0xff) | \
+ (uint16_t)((((cme_header_ptr)->Length) << 8) & 0xff00); \
+ } while (0)
+
+#define CME_HEADER_UNMARSHALL(addr, cme_header_ptr) \
+ do { \
+ (cme_header_ptr)->Signal_Id = CME_HEADER_SIGNAL_ID_GET(addr); \
+ (cme_header_ptr)->Length = CME_HEADER_LENGTH_GET(addr); \
+ } while (0)
+
+
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_PROFILE_A2DP_START_IND
+ *
+ * DESCRIPTION
+ * Message from CME_BH to CME_BT to indicate that an A2DP profile has
+ * started a connection or that an existing connection has resumed with
+ * updated parameters.
+ *
+ * MEMBERS
+ * acl_handle - Identifies the ACL Link used for the profile
+ * connection.
+ * l2cap_connection_id - Identifies the remote L2CAP connection ID (as used on
+ * the air).
+ * bit_rate - Identifies the bit rate of the codec in kbps.
+ * codec_type - Identifies the codec type (CME_CODEC_TYPE enum).
+ * sdu_size - Identifies the maximum size of the A2DP SDU (MTU
+ * negotiated for the L2CAP link) in octets.
+ * period - Identifies the period in ms of codec data being
+ * available for transmission.
+ * role - Identifies the local device role, source or sink.
+ * spare - Spare.
+ *
+ *******************************************************************************/
+typedef struct {
+ CME_HEADER header;
+ uint16_t acl_handle;
+ uint16_t l2cap_connection_id;
+ uint16_t bit_rate; /* Only 12 bits used */
+ CME_CODEC_TYPE codec_type;
+ uint16_t sdu_size;
+ uint8_t period;
+ CME_A2DP_ROLE role;
+} CME_PROFILE_A2DP_START_IND;
+
+/* The following macros take
+ * CME_PROFILE_A2DP_START_IND *cme_profile_a2dp_start_ind_ptr or uint16_t *addr */
+#define CME_PROFILE_A2DP_START_IND_ACL_HANDLE_WORD_OFFSET (1)
+
+#define CME_PROFILE_A2DP_START_IND_ACL_HANDLE_GET(addr) (*((addr) + 1))
+
+#define CME_PROFILE_A2DP_START_IND_ACL_HANDLE_SET(addr, acl_handle) \
+ (*((addr) + 1) = (uint16_t)(acl_handle))
+
+#define CME_PROFILE_A2DP_START_IND_L2CAP_CONNECTION_ID_WORD_OFFSET (2)
+
+#define CME_PROFILE_A2DP_START_IND_L2CAP_CONNECTION_ID_GET(addr) (*((addr) + 2))
+
+#define CME_PROFILE_A2DP_START_IND_L2CAP_CONNECTION_ID_SET(addr, l2cap_connection_id) \
+ (*((addr) + 2) = (uint16_t)(l2cap_connection_id))
+#define CME_PROFILE_A2DP_START_IND_BIT_RATE_WORD_OFFSET (3)
+
+#define CME_PROFILE_A2DP_START_IND_BIT_RATE_GET(addr) (((*((addr) + 3) & 0xfff)))
+
+#define CME_PROFILE_A2DP_START_IND_BIT_RATE_SET(addr, bit_rate) (*((addr) + 3) = \
+ (uint16_t)((*((addr) + 3) & ~0xfff) | (((bit_rate)) & 0xfff)))
+
+#define CME_PROFILE_A2DP_START_IND_CODEC_TYPE_GET(addr) \
+ ((CME_CODEC_TYPE)((*((addr) + 3) & 0x7000) >> 12))
+
+#define CME_PROFILE_A2DP_START_IND_CODEC_TYPE_SET(addr, codec_type) \
+ (*((addr) + 3) = \
+ (uint16_t)((*((addr) + 3) & ~0x7000) | (((codec_type) << 12) & 0x7000)))
+
+#define CME_PROFILE_A2DP_START_IND_SDU_SIZE_WORD_OFFSET (4)
+
+#define CME_PROFILE_A2DP_START_IND_SDU_SIZE_GET(addr) (*((addr) + 4))
+
+#define CME_PROFILE_A2DP_START_IND_SDU_SIZE_SET(addr, sdu_size) (*((addr) + 4) = \
+ (uint16_t)(sdu_size))
+
+#define CME_PROFILE_A2DP_START_IND_PERIOD_WORD_OFFSET (5)
+
+#define CME_PROFILE_A2DP_START_IND_PERIOD_GET(addr) (((*((addr) + 5) & 0xff)))
+
+#define CME_PROFILE_A2DP_START_IND_PERIOD_SET(addr, period) (*((addr) + 5) = \
+ (uint16_t)((*((addr) + 5) & ~0xff) | (((period)) & 0xff)))
+
+#define CME_PROFILE_A2DP_START_IND_ROLE_GET(addr) \
+ ((CME_A2DP_ROLE)((*((addr) + 5) & 0x100) >> 8))
+
+#define CME_PROFILE_A2DP_START_IND_ROLE_SET(addr, role) (*((addr) + 5) = \
+ (uint16_t)((*((addr) + 5) & ~0x100) | (((role) << 8) & 0x100)))
+
+#define CME_PROFILE_A2DP_START_IND_WORD_SIZE (6)
+
+#define CME_PROFILE_A2DP_START_IND_PACK(addr, acl_handle, l2cap_connection_id, \
+ bit_rate, codec_type, sdu_size, period, role, sampling_freq) \
+ do { \
+ *((addr) + 1) = (uint16_t)((uint16_t)(acl_handle)); \
+ *((addr) + 2) = (uint16_t)((uint16_t)(l2cap_connection_id)); \
+ *((addr) + 3) = (uint16_t)((uint16_t)(((bit_rate)) & 0xfff) | \
+ (uint16_t)(((codec_type) << 12) & 0x7000); \
+ *((addr) + 4) = (uint16_t)((uint16_t)(sdu_size)); \
+ *((addr) + 5) = (uint16_t)((uint16_t)(((period)) & 0xff) | \
+ (uint16_t)(((role) << 8) & 0x100); \
+ } while (0)
+
+#define CME_PROFILE_A2DP_START_IND_MARSHALL(addr, cme_profile_a2dp_start_ind_ptr) \
+ do { \
+ CME_HEADER_MARSHALL((addr), &((cme_profile_a2dp_start_ind_ptr)->header)); \
+ *((addr) + 1) = (uint16_t)((cme_profile_a2dp_start_ind_ptr)->acl_handle); \
+ *((addr) + 2) = (uint16_t)((cme_profile_a2dp_start_ind_ptr)->l2cap_connection_id); \
+ *((addr) + 3) = (uint16_t)((((cme_profile_a2dp_start_ind_ptr)->bit_rate)) & 0xfff) | \
+ (uint16_t)((((cme_profile_a2dp_start_ind_ptr)->codec_type) << 12) & 0x7000); \
+ *((addr) + 4) = (uint16_t)((cme_profile_a2dp_start_ind_ptr)->sdu_size); \
+ *((addr) + 5) = (uint16_t)((((cme_profile_a2dp_start_ind_ptr)->period)) & 0xff) | \
+ (uint16_t)((((cme_profile_a2dp_start_ind_ptr)->role) << 8) & 0x100); \
+ } while (0)
+
+#define CME_PROFILE_A2DP_START_IND_UNMARSHALL(addr, cme_profile_a2dp_start_ind_ptr) \
+ do { \
+ CME_HEADER_UNMARSHALL((addr), &((cme_profile_a2dp_start_ind_ptr)->header)); \
+ (cme_profile_a2dp_start_ind_ptr)->acl_handle = CME_PROFILE_A2DP_START_IND_ACL_HANDLE_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->l2cap_connection_id = CME_PROFILE_A2DP_START_IND_L2CAP_CONNECTION_ID_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->bit_rate = CME_PROFILE_A2DP_START_IND_BIT_RATE_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->codec_type = CME_PROFILE_A2DP_START_IND_CODEC_TYPE_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->sdu_size = CME_PROFILE_A2DP_START_IND_SDU_SIZE_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->period = CME_PROFILE_A2DP_START_IND_PERIOD_GET(addr); \
+ (cme_profile_a2dp_start_ind_ptr)->role = CME_PROFILE_A2DP_START_IND_ROLE_GET(addr); \
+ } while (0)
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_PROFILE_A2DP_STOP_IND
+ *
+ * DESCRIPTION
+ * Message from CME_BH to CME_BT to indicate that an A2DP profile has
+ * stopped or paused.
+ *
+ * MEMBERS
+ * acl_handle - Identifies the ACL Link used for the profile
+ * connection.
+ * l2cap_connection_id - Identifies the remote L2CAP connection ID (as used on
+ * the air).
+ *
+ *******************************************************************************/
+typedef struct {
+ CME_HEADER header;
+ uint16_t acl_handle;
+ uint16_t l2cap_connection_id;
+} CME_PROFILE_A2DP_STOP_IND;
+
+/* The following macros take
+ * CME_PROFILE_A2DP_STOP_IND *cme_profile_a2dp_stop_ind_ptr or uint16_t *addr */
+#define CME_PROFILE_A2DP_STOP_IND_ACL_HANDLE_WORD_OFFSET (1)
+
+#define CME_PROFILE_A2DP_STOP_IND_ACL_HANDLE_GET(addr) (*((addr) + 1))
+
+#define CME_PROFILE_A2DP_STOP_IND_ACL_HANDLE_SET(addr, acl_handle) \
+ (*((addr) + 1) = (uint16_t)(acl_handle))
+
+#define CME_PROFILE_A2DP_STOP_IND_L2CAP_CONNECTION_ID_WORD_OFFSET (2)
+
+#define CME_PROFILE_A2DP_STOP_IND_L2CAP_CONNECTION_ID_GET(addr) (*((addr) + 2))
+
+#define CME_PROFILE_A2DP_STOP_IND_L2CAP_CONNECTION_ID_SET(addr, l2cap_connection_id) \
+ (*((addr) + 2) = (uint16_t)(l2cap_connection_id))
+
+#define CME_PROFILE_A2DP_STOP_IND_WORD_SIZE (3)
+
+#define CME_PROFILE_A2DP_STOP_IND_PACK(addr, acl_handle, l2cap_connection_id) \
+ do { \
+ *((addr) + 1) = (uint16_t)((uint16_t)(acl_handle)); \
+ *((addr) + 2) = (uint16_t)((uint16_t)(l2cap_connection_id)); \
+ } while (0)
+
+#define CME_PROFILE_A2DP_STOP_IND_MARSHALL(addr, cme_profile_a2dp_stop_ind_ptr) \
+ do { \
+ CME_HEADER_MARSHALL((addr), &((cme_profile_a2dp_stop_ind_ptr)->header)); \
+ *((addr) + 1) = (uint16_t)((cme_profile_a2dp_stop_ind_ptr)->acl_handle); \
+ *((addr) + 2) = (uint16_t)((cme_profile_a2dp_stop_ind_ptr)->l2cap_connection_id); \
+ } while (0)
+
+#define CME_PROFILE_A2DP_STOP_IND_UNMARSHALL(addr, cme_profile_a2dp_stop_ind_ptr) \
+ do { \
+ CME_HEADER_UNMARSHALL((addr), &((cme_profile_a2dp_stop_ind_ptr)->header)); \
+ (cme_profile_a2dp_stop_ind_ptr)->acl_handle = CME_PROFILE_A2DP_STOP_IND_ACL_HANDLE_GET(addr); \
+ (cme_profile_a2dp_stop_ind_ptr)->l2cap_connection_id = CME_PROFILE_A2DP_STOP_IND_L2CAP_CONNECTION_ID_GET(addr); \
+ } while (0)
+
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_COEX_STOP_IND
+ *
+ * DESCRIPTION
+ * Message from CME_BT to CME_BH to indicate that the coex service is
+ * stopping.
+ *
+ * MEMBERS
+ *
+ *******************************************************************************/
+typedef struct {
+ CME_HEADER header;
+} CME_COEX_STOP_IND;
+
+/* The following macros take
+ * CME_COEX_STOP_IND *cme_coex_stop_ind_ptr or uint16_t *addr */
+#define CME_COEX_STOP_IND_WORD_SIZE (1)
+
+
+/*******************************************************************************
+ *
+ * NAME
+ * CME_COEX_START_IND
+ *
+ * DESCRIPTION
+ * Message from CME_BT to CME_BH to indicate that the coex service is
+ * starting.
+ *
+ * MEMBERS
+ *
+ *******************************************************************************/
+typedef struct {
+ CME_HEADER header;
+} CME_COEX_START_IND;
+
+/* The following macros take
+ * CME_COEX_START_IND *cme_coex_start_ind_ptr or uint16_t *addr */
+#define CME_COEX_START_IND_WORD_SIZE (1)
+
+
+
+#endif /* CME3_PRIM_H__ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_H
+#define __SLSI_KIC_H
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+int slsi_kic_system_event_ind(enum slsi_kic_system_event_category event_cat,
+ enum slsi_kic_system_events event, gfp_t flags);
+int slsi_kic_service_information_ind(enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info);
+
+int slsi_kic_firmware_event_ind(uint16_t firmware_event_type, uint32_t tech_type,
+ uint32_t contain_type,
+ struct slsi_kic_firmware_event_ccp_host *event);
+
+#endif /* #ifndef __SLSI_KIC_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_ANT_H
+#define __SLSI_KIC_ANT_H
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+/**
+ * struct slsi_kic_ant_ops - backend description for ANT service driver ops.
+ *
+ * This struct is registered by the ANT service driver during initilisation
+ * in order provide ANT specific services, which can be used by KIC.
+ *
+ * All callbacks except where otherwise noted should return 0 on success or a
+ * negative error code.
+ *
+ * @trigger_recovery: Trigger a ANT firmware subsystem recovery. The variable
+ * @type specifies the recovery type.
+ */
+struct slsi_kic_ant_ops {
+ int (*trigger_recovery)(void *priv, enum slsi_kic_test_recovery_type type);
+};
+
+#ifdef CONFIG_SAMSUNG_KIC
+
+/**
+ * slsi_kic_ant_ops_register - register ant_ops with KIC
+ *
+ * @priv: Private pointer, which will be included in all calls from KIC.
+ * @ant_ops: The ant_ops to register.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int slsi_kic_ant_ops_register(void *priv, struct slsi_kic_ant_ops *ant_ops);
+
+/**
+ * slsi_kic_ant_ops_unregister - unregister ant_ops with KIC
+ *
+ * @ant_ops: The ant_ops to unregister.
+ *
+ * After this call, no more requests can be made, but the call may sleep to wait
+ * for an outstanding request that is being handled.
+ */
+void slsi_kic_ant_ops_unregister(struct slsi_kic_ant_ops *ant_ops);
+
+#else
+
+#ifndef UNUSED
+#define UNUSED(x) ((void)(x))
+#endif
+
+#define slsi_kic_ant_ops_register(a, b) \
+ do { \
+ UNUSED(a); \
+ UNUSED(b); \
+ } while (0)
+
+#define slsi_kic_ant_ops_unregister(a) UNUSED(a)
+#endif /* CONFIG_SAMSUNG_KIC */
+
+#endif /* #ifndef __SLSI_KIC_ANT_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_BT_H
+#define __SLSI_KIC_BT_H
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+/**
+ * struct slsi_kic_bt_ops - backend description for BT service driver ops.
+ *
+ * This struct is registered by the BT service driver during initilisation
+ * in order provide BT specific services, which can be used by KIC.
+ *
+ * All callbacks except where otherwise noted should return 0 on success or a
+ * negative error code.
+ *
+ * @trigger_recovery: Trigger a BT firmware subsystem recovery. The variable
+ * @type specifies the recovery type.
+ */
+struct slsi_kic_bt_ops {
+ int (*trigger_recovery)(void *priv, enum slsi_kic_test_recovery_type type);
+};
+
+#ifdef CONFIG_SAMSUNG_KIC
+
+/**
+ * slsi_kic_bt_ops_register - register bt_ops with KIC
+ *
+ * @priv: Private pointer, which will be included in all calls from KIC.
+ * @bt_ops: The bt_ops to register.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int slsi_kic_bt_ops_register(void *priv, struct slsi_kic_bt_ops *bt_ops);
+
+/**
+ * slsi_kic_bt_ops_unregister - unregister bt_ops with KIC
+ *
+ * @bt_ops: The bt_ops to unregister.
+ *
+ * After this call, no more requests can be made, but the call may sleep to wait
+ * for an outstanding request that is being handled.
+ */
+void slsi_kic_bt_ops_unregister(struct slsi_kic_bt_ops *bt_ops);
+
+#else
+
+#define slsi_kic_bt_ops_register(a, b) \
+ do { \
+ (void)(a); \
+ (void)(b); \
+ } while (0)
+
+#define slsi_kic_bt_ops_unregister(a) \
+ do { \
+ (void)(a); \
+ } while (0)
+
+#endif /* CONFIG_SAMSUNG_KIC */
+
+#endif /* #ifndef __SLSI_KIC_BT_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_CM_H
+#define __SLSI_KIC_CM_H
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+/**
+ * struct slsi_kic_cm_ops - backend description for Chip Manager (CM) driver ops
+ *
+ * This struct is registered by the Chip Manager driver during initilisation
+ * in order provide CM specific services, which can be used by KIC.
+ *
+ * All callbacks except where otherwise noted should return 0 on success or a
+ * negative error code.
+ *
+ * @trigger_recovery: Trigger a firmware crash, which requires a full chip
+ * recovery.
+ * @type specifies the recovery type.
+ */
+struct slsi_kic_cm_ops {
+ int (*trigger_recovery)(void *priv, enum slsi_kic_test_recovery_type type);
+};
+
+
+/**
+ * slsi_kic_cm_ops_register - register cm_ops with KIC
+ *
+ * @priv: Private pointer, which will be included in all calls from KIC.
+ * @wifi_ops: The wifi_ops to register.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int slsi_kic_cm_ops_register(void *priv, struct slsi_kic_cm_ops *cm_ops);
+
+/**
+ * slsi_kic_cm_ops_unregister - unregister cm_ops with KIC
+ *
+ * @cm_ops: The cm_ops to unregister.
+ *
+ * After this call, no more requests can be made, but the call may sleep to wait
+ * for an outstanding request that is being handled.
+ */
+void slsi_kic_cm_ops_unregister(struct slsi_kic_cm_ops *cm_ops);
+
+#endif /* #ifndef __SLSI_KIC_CM_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_LIB_H
+#define __SLSI_KIC_LIB_H
+
+#ifdef CONFIG_SLSI_KIC_API_ENABLED
+#include <scsc/kic/slsi_kic.h>
+#endif
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+/**
+ * Library functions for sending information to kernel KIC, which will process
+ * the event and take appropriate action, i.e. forward to relevant user
+ * processes etc.
+ */
+#ifdef CONFIG_SLSI_KIC_API_ENABLED
+
+static inline void slsi_kic_system_event(enum slsi_kic_system_event_category event_cat,
+ enum slsi_kic_system_events event, gfp_t flags)
+{
+ (void)slsi_kic_system_event_ind(event_cat, event, flags);
+}
+
+
+static inline void slsi_kic_service_information(enum slsi_kic_technology_type tech,
+ struct slsi_kic_service_info *info)
+{
+ (void)slsi_kic_service_information_ind(tech, info);
+}
+
+static inline void slsi_kic_firmware_event(uint16_t firmware_event_type,
+ enum slsi_kic_technology_type tech_type,
+ uint32_t contain_type,
+ struct slsi_kic_firmware_event_ccp_host *event)
+{
+ (void)slsi_kic_firmware_event_ind(firmware_event_type, tech_type,
+ contain_type, event);
+}
+
+#else
+
+#define slsi_kic_system_event(a, b, c) \
+ do { \
+ (void)(a); \
+ (void)(b); \
+ (void)(c); \
+ } while (0)
+
+#define slsi_kic_service_information(a, b) \
+ do { \
+ (void)(a); \
+ (void)(b); \
+ } while (0)
+
+#define slsi_kic_firmware_event(a, b, c, d) \
+ do { \
+ (void)(a); \
+ (void)(b); \
+ (void)(c); \
+ (void)(d); \
+ } while (0)
+
+#endif
+
+#endif /* #ifndef __SLSI_KIC_LIB_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_PRIM_H
+#define __SLSI_KIC_PRIM_H
+
+#ifdef __KERNEL__
+#include <net/netlink.h>
+#else
+#include <netlink/attr.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define SLSI_KIC_INTERFACE_VERSION_MAJOR 1
+#define SLSI_KIC_INTERFACE_VERSION_MINOR 0
+
+/**
+ * Common
+ */
+enum slsi_kic_technology_type {
+ slsi_kic_technology_type_curator,
+ slsi_kic_technology_type_bt,
+ slsi_kic_technology_type_wifi,
+ slsi_kic_technology_type_audio,
+ slsi_kic_technology_type_gnss,
+ slsi_kic_technology_type_nfc,
+ slsi_kic_technology_type_janitor,
+ slsi_kic_technology_type_common,
+ slsi_kic_technology_type_ant,
+
+ /* keep last */
+ slsi_kic_technology_type__after_last,
+ slsi_kic_technology_type_max_category = slsi_kic_technology_type__after_last - 1
+};
+
+static const char *const slsi_kic_technology_type_text[slsi_kic_technology_type_max_category + 1] = {
+ "curator",
+ "bt",
+ "wifi",
+ "audio",
+ "gnss",
+ "nfc",
+ "janitor",
+ "ant"
+};
+
+/**
+ * System events
+ */
+enum slsi_kic_system_event_category {
+ slsi_kic_system_event_category_initialisation,
+ slsi_kic_system_event_category_deinitialisation,
+ slsi_kic_system_event_category_error,
+ slsi_kic_system_event_category_recovery,
+
+ /* keep last */
+ slsi_kic_system_category__after_last,
+ slsi_kic_system_category_max_category = slsi_kic_system_category__after_last - 1
+};
+
+static const char *const slsi_kic_system_category_text[slsi_kic_system_category_max_category + 1] = {
+ "Initialisation",
+ "Deinitialisation",
+ "Error",
+ "Recovery"
+};
+
+enum slsi_kic_system_events {
+ slsi_kic_system_events_wifi_on,
+ slsi_kic_system_events_wifi_off,
+ slsi_kic_system_events_wifi_suspend,
+ slsi_kic_system_events_wifi_resume,
+ slsi_kic_system_events_wifi_service_driver_attached,
+ slsi_kic_system_events_wifi_service_driver_detached,
+ slsi_kic_system_events_wifi_firmware_patch_downloaded,
+ slsi_kic_system_events_wifi_service_driver_started,
+ slsi_kic_system_events_wifi_service_driver_stopped,
+ slsi_kic_system_events_bt_on,
+ slsi_kic_system_events_bt_off,
+ slsi_kic_system_events_bt_service_driver_attached,
+ slsi_kic_system_events_bt_service_driver_detached,
+ slsi_kic_system_events_bt_firmware_patch_downloaded,
+ slsi_kic_system_events_curator_firmware_patch_downloaded,
+ slsi_kic_system_events_sdio_powered,
+ slsi_kic_system_events_sdio_inserted,
+ slsi_kic_system_events_sdio_removed,
+ slsi_kic_system_events_sdio_powered_off,
+ slsi_kic_system_events_sdio_error,
+ slsi_kic_system_events_uart_powered,
+ slsi_kic_system_events_uart_powered_off,
+ slsi_kic_system_events_uart_error,
+ slsi_kic_system_events_coredump_in_progress,
+ slsi_kic_system_events_coredump_done,
+ slsi_kic_system_events_subsystem_crashed,
+ slsi_kic_system_events_subsystem_recovered,
+ slsi_kic_system_events_host_ready_ind,
+ slsi_kic_system_events_ant_on,
+ slsi_kic_system_events_ant_off,
+
+ /* keep last */
+ slsi_kic_system_events__after_last,
+ slsi_kic_system_events_max_event = slsi_kic_system_events__after_last - 1
+};
+
+static const char *const slsi_kic_system_event_text[slsi_kic_system_events_max_event + 1] = {
+ "Wi-Fi on",
+ "Wi-Fi off",
+ "Wi-Fi suspend",
+ "Wi-Fi resume",
+ "Wi-Fi service driver attached",
+ "Wi-Fi service driver detached",
+ "Wi-Fi firmware patch downloaded",
+ "Wi-Fi service driver started",
+ "Wi-Fi service driver stopped",
+ "BT on",
+ "BT off",
+ "BT service driver attached",
+ "BT service driver detached",
+ "BT firmware patch downloaded",
+ "Curator firmware patch downloaded",
+ "SDIO powered",
+ "SDIO inserted",
+ "SDIO removed",
+ "SDIO powered off",
+ "SDIO error",
+ "UART powered",
+ "UART powered off",
+ "UART error",
+ "Coredump in progress",
+ "Coredump done",
+ "Subsystem has crashed",
+ "Subsystem has been recovered",
+ "CCP Host ready Ind sent",
+ "ANT on",
+ "ANT off"
+};
+
+/**
+ * Time stamp
+ */
+struct slsi_kic_timestamp {
+ uint64_t tv_sec; /* seconds */
+ uint64_t tv_nsec; /* nanoseconds */
+};
+
+/* Policy */
+enum slsi_kic_attr_timestamp_attributes {
+ __SLSI_KIC_ATTR_TIMESTAMP_INVALID,
+ SLSI_KIC_ATTR_TIMESTAMP_TV_SEC,
+ SLSI_KIC_ATTR_TIMESTAMP_TV_NSEC,
+
+ /* keep last */
+ __SLSI_KIC_ATTR_TIMESTAMP_AFTER_LAST,
+ SLSI_KIC_ATTR_TIMESTAMP_MAX = __SLSI_KIC_ATTR_TIMESTAMP_AFTER_LAST - 1
+};
+
+
+/**
+ * Firmware event
+ */
+enum slsi_kic_firmware_event_type {
+ slsi_kic_firmware_event_type_panic,
+ slsi_kic_firmware_event_type_fault,
+
+ /* keep last */
+ slsi_kic_firmware_events__after_last,
+ slsi_kic_firmware_events_max_event = slsi_kic_firmware_events__after_last - 1
+};
+
+static const char *const slsi_kic_firmware_event_type_text[slsi_kic_firmware_events_max_event + 1] = {
+ "Firmware panic",
+ "Firmware fault"
+};
+
+enum slsi_kic_firmware_container_type {
+ slsi_kic_firmware_container_type_ccp_host,
+};
+
+
+/*
+ * Firmware event: CCP host container
+ */
+#define SLSI_KIC_FIRMWARE_EVENT_CCP_HOST_ARG_LENGTH 16
+struct slsi_kic_firmware_event_ccp_host {
+ uint32_t id;
+ uint32_t level;
+ char *level_string;
+ uint32_t timestamp;
+ uint32_t cpu;
+ uint32_t occurences;
+ uint32_t arg_length;
+ uint8_t *arg;
+};
+
+/**
+ * Trigger recovery
+ */
+enum slsi_kic_test_recovery_type {
+ slsi_kic_test_recovery_type_subsystem_panic,
+ slsi_kic_test_recovery_type_emulate_firmware_no_response,
+ slsi_kic_test_recovery_type_watch_dog,
+ slsi_kic_test_recovery_type_chip_crash,
+ slsi_kic_test_recovery_type_service_start_panic,
+ slsi_kic_test_recovery_type_service_stop_panic,
+};
+
+enum slsi_kic_test_recovery_status {
+ slsi_kic_test_recovery_status_ok,
+ slsi_kic_test_recovery_status_error_invald_param,
+ slsi_kic_test_recovery_status_error_send_msg,
+};
+
+
+/* Policy */
+enum slsi_kic_attr_firmware_event_ccp_host_attributes {
+ __SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_INVALID,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ID,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL_STRING,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_TIMESTAMP,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_CPU,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_OCCURENCES,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ARG,
+
+ /* keep last */
+ __SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_AFTER_LAST,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_MAX = __SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_AFTER_LAST - 1
+};
+
+
+/**
+ * System information
+ */
+struct slsi_kic_service_info {
+ char ver_str[128];
+ uint16_t fw_api_major;
+ uint16_t fw_api_minor;
+ uint16_t release_product;
+ uint16_t host_release_iteration;
+ uint16_t host_release_candidate;
+};
+
+/* Policy */
+enum slsi_kic_attr_service_info_attributes {
+ __SLSI_KIC_ATTR_SERVICE_INFO_INVALID,
+ SLSI_KIC_ATTR_SERVICE_INFO_VER_STR,
+ SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MAJOR,
+ SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MINOR,
+ SLSI_KIC_ATTR_SERVICE_INFO_RELEASE_PRODUCT,
+ SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_ITERATION,
+ SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_CANDIDATE,
+
+ /* keep last */
+ __SLSI_KIC_ATTR_SERVICE_INFO_AFTER_LAST,
+ SLSI_KIC_ATTR_SERVICE_INFO_MAX = __SLSI_KIC_ATTR_SERVICE_INFO_AFTER_LAST - 1
+};
+
+
+
+/**
+ * enum slsi_kic_commands - supported Samsung KIC commands
+ *
+ * @SLSI_KIC_CMD_UNSPEC: unspecified command to catch errors
+ * @SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ: Requests the KIC interface
+ * version numbers to be send back.
+ * @SLSI_KIC_CMD_SYSTEM_EVENT_IND: Indicate a system event to user space.
+ * @SLSI_KIC_CMD_SERVICE_INFORMATION_REQ: Requests information for all
+ * already enabled subsystems.
+ * @SLSI_KIC_CMD_SERVICE_INFORMATION_IND: Indicate that a new subsystem has
+ * been started and the information for this subsystem.
+ * @SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ: Triggers a recovery (crash) for a
+ * subsystem specified in the primitive.
+ * @SLSI_KIC_CMD_FIRMWARE_EVENT_IND: Indicates a firmware event to user space.
+ * @SLSI_KIC_CMD_ECHO_REQ: Request an echo (test primitive, which will be
+ * removed later).
+ *
+ * @SLSI_KIC_CMD_MAX: highest used command number
+ * @__SLSI_KIC_CMD_AFTER_LAST: internal use
+ */
+enum slsi_kic_commands {
+/* Do not change the order or add anything between, this is ABI! */
+ SLSI_KIC_CMD_UNSPEC,
+
+ SLSI_KIC_CMD_KIC_INTERFACE_VERSION_NUMBER_REQ,
+ SLSI_KIC_CMD_SYSTEM_EVENT_IND,
+ SLSI_KIC_CMD_SERVICE_INFORMATION_REQ,
+ SLSI_KIC_CMD_SERVICE_INFORMATION_IND,
+ SLSI_KIC_CMD_TEST_TRIGGER_RECOVERY_REQ,
+ SLSI_KIC_CMD_FIRMWARE_EVENT_IND,
+ SLSI_KIC_CMD_ECHO_REQ,
+
+ /* add new commands above here */
+
+ /* used to define SLSI_KIC_CMD_MAX below */
+ __SLSI_KIC_CMD_AFTER_LAST,
+ SLSI_KIC_CMD_MAX = __SLSI_KIC_CMD_AFTER_LAST - 1
+};
+
+
+/**
+ * enum slsi_kic_attrs - Samsung KIC netlink attributes
+ *
+ * @SLSI_KIC_ATTR_UNSPEC: unspecified attribute to catch errors
+ * @SLSI_KIC_ATTR_KIC_VERSION_MAJOR: KIC version number - increments when the
+ * interface is updated with backward incompatible changes.
+ * @SLSI_KIC_ATTR_KIC_VERSION_MINOR: KIC version number - increments when the
+ * interface is updated with backward compatible changes.
+ * @SLSI_KIC_ATTR_TECHNOLOGY_TYPE: Technology type
+ * @SLSI_KIC_ATTR_SYSTEM_EVENT_CATEGORY: System event category
+ * @SLSI_KIC_ATTR_SYSTEM_EVENT: System event
+ * @SLSI_KIC_ATTR_SERVICE_INFO: Pass service info to user space.
+ * @SLSI_KIC_ATTR_NUMBER_OF_ENCODED_SERVICES: The attribute is used to determine
+ * the number of encoded services in the payload
+ * @SLSI_KIC_ATTR_TEST_RECOVERY_TYPE: Specifies the recovery type.
+ * @SLSI_KIC_ATTR_TIMESTAMP: A timestamp - ideally nano second resolution and
+ * precision, but it's platform dependent.
+ * @SLSI_KIC_ATTR_FIRMWARE_EVENT_TYPE: A firmware even type - panic or fault
+ * @SLSI_KIC_ATTR_FIRMWARE_CONTAINER_TYPE: Indicates container type carried in
+ * payload.
+ * @SLSI_KIC_ATTR_FIRMWARE_EVENT_CONTAINER_CCP_HOST: The firmware event data.
+ * @SLSI_KIC_ATTR_TRIGGER_RECOVERY_STATUS: Indicates if the recovery has been
+ * successfully triggered. The recovery signalling will happen afterwards as
+ * normal system events.
+ * @SLSI_KIC_ATTR_ECHO: An echo test primitive, which will be removed later.
+ *
+ * @SLSI_KIC_ATTR_MAX: highest attribute number currently defined
+ * @__SLSI_KIC_ATTR_AFTER_LAST: internal use
+ */
+enum slsi_kic_attrs {
+/* Do not change the order or add anything between, this is ABI! */
+ SLSI_KIC_ATTR_UNSPEC,
+
+ SLSI_KIC_ATTR_KIC_VERSION_MAJOR,
+ SLSI_KIC_ATTR_KIC_VERSION_MINOR,
+ SLSI_KIC_ATTR_TECHNOLOGY_TYPE,
+ SLSI_KIC_ATTR_SYSTEM_EVENT_CATEGORY,
+ SLSI_KIC_ATTR_SYSTEM_EVENT,
+ SLSI_KIC_ATTR_SERVICE_INFO,
+ SLSI_KIC_ATTR_NUMBER_OF_ENCODED_SERVICES,
+ SLSI_KIC_ATTR_TEST_RECOVERY_TYPE,
+ SLSI_KIC_ATTR_TIMESTAMP,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_TYPE,
+ SLSI_KIC_ATTR_FIRMWARE_CONTAINER_TYPE,
+ SLSI_KIC_ATTR_FIRMWARE_EVENT_CONTAINER_CCP_HOST,
+ SLSI_KIC_ATTR_TRIGGER_RECOVERY_STATUS,
+ SLSI_KIC_ATTR_ECHO,
+
+ /* Add attributes here, update the policy below */
+ __SLSI_KIC_ATTR_AFTER_LAST,
+ SLSI_KIC_ATTR_MAX = __SLSI_KIC_ATTR_AFTER_LAST - 1
+};
+
+
+/* Policy for the attributes */
+static const struct nla_policy slsi_kic_attr_policy[SLSI_KIC_ATTR_MAX + 1] = {
+ [SLSI_KIC_ATTR_KIC_VERSION_MAJOR] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_KIC_VERSION_MINOR] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_TECHNOLOGY_TYPE] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_SYSTEM_EVENT_CATEGORY] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_SYSTEM_EVENT] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_SERVICE_INFO] = { .type = NLA_NESTED, },
+ [SLSI_KIC_ATTR_NUMBER_OF_ENCODED_SERVICES] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_TEST_RECOVERY_TYPE] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_TIMESTAMP] = { .type = NLA_NESTED },
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_TYPE] = { .type = NLA_U16 },
+ [SLSI_KIC_ATTR_FIRMWARE_CONTAINER_TYPE] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CONTAINER_CCP_HOST] = { .type = NLA_NESTED },
+ [SLSI_KIC_ATTR_TRIGGER_RECOVERY_STATUS] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_ECHO] = { .type = NLA_U32 },
+};
+
+/* Policy for the slsi_kic_firmware_event_ccp_host attribute */
+static const struct nla_policy slsi_kic_attr_firmware_event_ccp_host_policy[SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_MAX + 1] = {
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ID] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL] = { .type = NLA_U32 },
+#ifdef __KERNEL__
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL_STRING] = { .type = NLA_STRING, .len = 128 },
+#else
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_LEVEL_STRING] = { .type = NLA_STRING, .maxlen = 128 },
+#endif
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_TIMESTAMP] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_CPU] = { .type = NLA_U32 },
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_OCCURENCES] = { .type = NLA_U32 },
+#ifdef __KERNEL__
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ARG] = { .type = NLA_UNSPEC,
+ .len = SLSI_KIC_FIRMWARE_EVENT_CCP_HOST_ARG_LENGTH },
+#else
+ [SLSI_KIC_ATTR_FIRMWARE_EVENT_CCP_HOST_ARG] = { .type = NLA_UNSPEC,
+ .maxlen = SLSI_KIC_FIRMWARE_EVENT_CCP_HOST_ARG_LENGTH },
+#endif
+};
+
+
+/* Policy for the slsi_kic_service_info attribute */
+static const struct nla_policy slsi_kic_attr_service_info_policy[SLSI_KIC_ATTR_SERVICE_INFO_MAX + 1] = {
+#ifdef __KERNEL__
+ [SLSI_KIC_ATTR_SERVICE_INFO_VER_STR] = { .type = NLA_STRING, .len = 128 },
+#else
+ [SLSI_KIC_ATTR_SERVICE_INFO_VER_STR] = { .type = NLA_STRING, .maxlen = 128 },
+#endif
+ [SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MAJOR] = { .type = NLA_U16 },
+ [SLSI_KIC_ATTR_SERVICE_INFO_FW_API_MINOR] = { .type = NLA_U16 },
+ [SLSI_KIC_ATTR_SERVICE_INFO_RELEASE_PRODUCT] = { .type = NLA_U16 },
+ [SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_ITERATION] = { .type = NLA_U16 },
+ [SLSI_KIC_ATTR_SERVICE_INFO_HOST_RELEASE_CANDIDATE] = { .type = NLA_U16 },
+};
+
+/* Policy for the slsi_kic_timestamp attribute */
+static const struct nla_policy slsi_kic_attr_timestamp_policy[SLSI_KIC_ATTR_TIMESTAMP_MAX + 1] = {
+ [SLSI_KIC_ATTR_TIMESTAMP_TV_SEC] = { .type = NLA_U64 },
+ [SLSI_KIC_ATTR_TIMESTAMP_TV_NSEC] = { .type = NLA_U64 },
+};
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* #ifndef __SLSI_KIC_PRIM_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SLSI_KIC_WIFI_H
+#define __SLSI_KIC_WIFI_H
+
+#include <scsc/kic/slsi_kic_prim.h>
+
+/**
+ * struct slsi_kic_wifi_ops - backend description for Wi-Fi service driver ops.
+ *
+ * This struct is registered by the Wi-Fi service driver during initilisation
+ * in order provide Wi-Fi specific services, which can be used by KIC.
+ *
+ * All callbacks except where otherwise noted should return 0 on success or a
+ * negative error code.
+ *
+ * @trigger_recovery: Trigger a Wi-Fi firmware subsystem recovery. The variable
+ * @type specifies the recovery type.
+ */
+struct slsi_kic_wifi_ops {
+ int (*trigger_recovery)(void *priv, enum slsi_kic_test_recovery_type type);
+};
+
+#ifdef CONFIG_SAMSUNG_KIC
+/**
+ * slsi_kic_wifi_ops_register - register wifi_ops with KIC
+ *
+ * @priv: Private pointer, which will be included in all calls from KIC.
+ * @wifi_ops: The wifi_ops to register.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int slsi_kic_wifi_ops_register(void *priv, struct slsi_kic_wifi_ops *wifi_ops);
+
+/**
+ * slsi_kic_wifi_ops_unregister - unregister wifi_ops with KIC
+ *
+ * @wifi_ops: The wifi_ops to unregister.
+ *
+ * After this call, no more requests can be made, but the call may sleep to wait
+ * for an outstanding request that is being handled.
+ */
+void slsi_kic_wifi_ops_unregister(struct slsi_kic_wifi_ops *wifi_ops);
+
+#else
+
+#define slsi_kic_wifi_ops_register(a, b) \
+ do { \
+ (void)(a); \
+ (void)(b); \
+ } while (0)
+
+#define slsi_kic_wifi_ops_unregister(a) \
+ do { \
+ (void)(a); \
+ } while (0)
+
+#endif /* CONFIG_SAMSUNG_KIC */
+
+#endif /* #ifndef __SLSI_KIC_WIFI_H */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef __SCSC_LOG_COLLECTOR_H__
+#define __SCSC_LOG_COLLECTOR_H__
+
+/* High nibble is Major, Low nibble is Minor */
+#define SCSC_LOG_HEADER_VERSION_MAJOR 0x03
+#define SCSC_LOG_HEADER_VERSION_MINOR 0x00
+/* Magic string. 4 bytes "SCSC"*/
+/* Header version. 1 byte */
+/* Num chunks. 1 byte */
+/* Offset first Chunk. 2 bytes */
+/* Collection reason. 1 byte */
+/* Reserved. 1 byte */
+/* Reason Code . 2 bytes */
+/* Observer present . 1 bytes */
+#define SCSC_LOG_HEADER_SIZE (13)
+#define SCSC_LOG_FW_VERSION_SIZE (128)
+#define SCSC_LOG_HOST_VERSION_SIZE (64)
+#define SCSC_LOG_FAPI_VERSION_SIZE (64)
+/* Reserved 2 . 4 byte */
+#define SCSC_LOG_RESERVED_2 3
+/* Ideally header + versions should be 16 bytes aligne*/
+#define SCSC_SUPPORTED_CHUNKS_HEADER 48
+
+#define SCSC_LOG_CHUNK_ALIGN 1
+/* First chunk should be aligned */
+#define SCSC_LOG_OFFSET_FIRST_CHUNK (((SCSC_LOG_HEADER_SIZE + SCSC_LOG_FW_VERSION_SIZE + \
+ SCSC_LOG_HOST_VERSION_SIZE + SCSC_LOG_FAPI_VERSION_SIZE + \
+ SCSC_LOG_RESERVED_2 + SCSC_SUPPORTED_CHUNKS_HEADER) + \
+ (SCSC_LOG_CHUNK_ALIGN - 1)) & ~(SCSC_LOG_CHUNK_ALIGN - 1))
+enum scsc_log_reason {
+ SCSC_LOG_UNKNOWN = 0,
+ SCSC_LOG_FW_PANIC,
+ SCSC_LOG_USER,
+ SCSC_LOG_FW,
+ SCSC_LOG_DUMPSTATE,
+ SCSC_LOG_HOST_WLAN,
+ SCSC_LOG_HOST_BT,
+ SCSC_LOG_HOST_COMMON,
+ SCSC_LOG_SYS_ERR,
+ /* Add others */
+};
+
+extern const char *scsc_loc_reason_str[];
+
+#define SCSC_CHUNK_DAT_LEN_SIZE 4
+#define SCSC_CHUNK_TYP_LEN_SIZE 4
+#define SCSC_CHUNK_HEADER_SIZE (SCSC_CHUNK_DAT_LEN_SIZE + SCSC_CHUNK_TYP_LEN_SIZE)
+
+/* CHUNKS WILL COLLECTED ON THIS ORDER -
+ * SYNC SHOULD BE THE FIRST CHUNK
+ * LOGRING SHOULD BE THE LAST ONE SO IT COULD CAPTURE COLLECTION ERRORS
+ */
+enum scsc_log_chunk_type {
+ SCSC_LOG_CHUNK_SYNC, /* SYNC should be the first chunk to collect */
+ SCSC_LOG_MINIMOREDUMP,
+ /* Add other chunks */
+ SCSC_LOG_CHUNK_IMP = 127,
+ SCSC_LOG_CHUNK_MXL,
+ SCSC_LOG_CHUNK_UDI,
+ SCSC_LOG_CHUNK_BT_HCF,
+ SCSC_LOG_CHUNK_WLAN_HCF,
+ SCSC_LOG_CHUNK_HIP4_SAMPLER,
+ SCSC_LOG_RESERVED_COMMON,
+ SCSC_LOG_RESERVED_BT,
+ SCSC_LOG_RESERVED_WLAN,
+ SCSC_LOG_RESERVED_RADIO,
+ /* Add other chunks */
+ SCSC_LOG_CHUNK_LOGRING = 254,
+ SCSC_LOG_CHUNK_INVALID = 255,
+};
+
+#define SCSC_LOG_COLLECT_MAX_SIZE (16*1024*1024)
+
+/* ADD Collection codes here for HOST triggers */
+/* Consider moving the definitions to specific services if required */
+
+/* Reason codes for SCSC_LOG_USER */
+#define SCSC_LOG_USER_REASON_PROC 0x0000
+/* Reason codes for SCSC_LOG_DUMPSTATE */
+#define SCSC_LOG_DUMPSTATE_REASON 0x0000
+#define SCSC_LOG_DUMPSTATE_REASON_DRIVERDEBUGDUMP 0x0001
+/* Reason codes for SCSC_LOG_HOST_WLAN */
+#define SCSC_LOG_HOST_WLAN_REASON_DISCONNECT 0x0000
+#define SCSC_LOG_HOST_WLAN_REASON_DISCONNECT_IND 0x0001
+#define SCSC_LOG_HOST_WLAN_REASON_DISCONNECTED_IND 0x0002
+#define SCSC_LOG_HOST_WLAN_REASON_DRIVERDEBUGDUMP 0x0003
+#define SCSC_LOG_HOST_WLAN_REASON_CONNECT_ERR 0x0004
+/* Reason codes for SCSC_LOG_HOST_BT */
+/* Reason codes for SCSC_LOG_HOST_COMMON */
+#define SCSC_LOG_HOST_COMMON_REASON_START 0x0000
+#define SCSC_LOG_HOST_COMMON_REASON_STOP 0x0001
+#define SCSC_LOG_HOST_COMMON_RECOVER_RST 0x0002
+
+/* SBL HEADER v 0.0*/
+struct scsc_log_sbl_header {
+ char magic[4];
+ u8 version_major;
+ u8 version_minor;
+ u8 num_chunks;
+ u8 trigger;
+ u16 offset_data;
+ char fw_version[SCSC_LOG_FW_VERSION_SIZE];
+ char host_version[SCSC_LOG_HOST_VERSION_SIZE];
+ char fapi_version[SCSC_LOG_FAPI_VERSION_SIZE];
+ u16 reason_code;
+ bool observer;
+ u8 reserved2[SCSC_LOG_RESERVED_2];
+ char supported_chunks[SCSC_SUPPORTED_CHUNKS_HEADER];
+} __packed;
+
+struct scsc_log_chunk_header {
+ char magic[3];
+ u8 type;
+ u32 chunk_size;
+} __packed;
+
+struct scsc_log_collector_client {
+ char *name;
+ enum scsc_log_chunk_type type;
+ int (*collect_init)(struct scsc_log_collector_client *collect_client);
+ int (*collect)(struct scsc_log_collector_client *collect_client, size_t size);
+ int (*collect_end)(struct scsc_log_collector_client *collect_client);
+ void *prv;
+};
+
+int scsc_log_collector_register_client(struct scsc_log_collector_client *collect_client);
+int scsc_log_collector_unregister_client(struct scsc_log_collector_client *collect_client);
+
+/* Public method to get pointer of SBL RAM buffer. */
+unsigned char *scsc_log_collector_get_buffer(void);
+
+/* Public method to register FAPI version. */
+void scsc_log_collector_write_fapi(char __user *buf, size_t len);
+
+/* Public method to notify the presence/absense of observers */
+void scsc_log_collector_is_observer(bool observer);
+
+void scsc_log_collector_schedule_collection(enum scsc_log_reason reason, u16 reason_code);
+int scsc_log_collector_write(char __user *buf, size_t count, u8 align);
+#endif /* __SCSC_LOG_COLLECTOR_H__ */
--- /dev/null
+/*****************************************************************************
+ *
+ * Copyright (c) 2016-2018 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef _SCSC_LOGRING_H_
+#define _SCSC_LOGRING_H_
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/device.h>
+#include <linux/sched/clock.h>
+
+/* NOTE_CREATING_TAGS: when adding a tag here REMEMBER to add it also
+ * where required, taking care to maintain the same ordering.
+ * (Search 4 NOTE_CREATING_TAGS)
+ *
+ * You must update "int *scsc_droplevels[]" to match.
+ */
+enum scsc_logring_tags {
+ FIRST_TAG,
+ FIRST_BIN_TAG = FIRST_TAG,
+ BINARY = FIRST_BIN_TAG,
+ BIN_WIFI_CTRL_RX,
+ BIN_WIFI_DATA_RX,
+ BIN_WIFI_CTRL_TX,
+ BIN_WIFI_DATA_TX,
+ LAST_BIN_TAG = BIN_WIFI_DATA_TX,
+ NO_TAG,
+ WLBT = NO_TAG,
+ WIFI_RX,
+ WIFI_TX,
+ BT_COMMON,
+ BT_H4,
+ BT_FW,
+ BT_RX,
+ BT_TX,
+ CPKTBUFF,
+ FW_LOAD,
+ FW_PANIC,
+ GDB_TRANS,
+ MIF,
+ CLK20,
+ CLK20_TEST,
+ FM,
+ FM_TEST,
+ MX_FILE,
+ MX_FW,
+ MX_SAMPLER,
+ MXLOG_TRANS,
+ MXMAN,
+ MXMAN_TEST,
+ MXMGT_TRANS,
+ MX_MMAP,
+ MX_PROC,
+ PANIC_MON,
+ PCIE_MIF,
+ PLAT_MIF,
+ KIC_COMMON,
+ WLBTD,
+ WLOG,
+ LERNA,
+ MX_CFG,
+#ifdef CONFIG_SCSC_DEBUG_COMPATIBILITY
+ SLSI_INIT_DEINIT,
+ SLSI_NETDEV,
+ SLSI_CFG80211,
+ SLSI_MLME,
+ SLSI_SUMMARY_FRAMES,
+ SLSI_HYDRA,
+ SLSI_TX,
+ SLSI_RX,
+ SLSI_UDI,
+ SLSI_WIFI_FCQ,
+ SLSI_HIP,
+ SLSI_HIP_INIT_DEINIT,
+ SLSI_HIP_FW_DL,
+ SLSI_HIP_SDIO_OP,
+ SLSI_HIP_PS,
+ SLSI_HIP_TH,
+ SLSI_HIP_FH,
+ SLSI_HIP_SIG,
+ SLSI_FUNC_TRACE,
+ SLSI_TEST,
+ SLSI_SRC_SINK,
+ SLSI_FW_TEST,
+ SLSI_RX_BA,
+ SLSI_TDLS,
+ SLSI_GSCAN,
+ SLSI_MBULK,
+ SLSI_FLOWC,
+ SLSI_SMAPPER,
+#endif
+ TEST_ME,
+ MAX_TAG = TEST_ME /* keep it last */
+};
+
+
+#define NODEV_LABEL ""
+#define SCSC_SDEV_2_DEV(sdev) \
+ (((sdev) && (sdev)->wiphy) ? &((sdev)->wiphy->dev) : NULL)
+#define SCSC_NDEV_2_DEV(ndev) \
+ ((ndev) ? SCSC_SDEV_2_DEV(((struct netdev_vif *)netdev_priv(ndev))->sdev) : NULL)
+
+#define SCSC_PREFIX "wlbt: " /* prepended to log statements */
+
+#define SCSC_TAG_FMT(tag, fmt) SCSC_PREFIX"[" # tag "]: %-5s: - %s: "fmt
+#define SCSC_TAG_DBG_FMT(tag, fmt) SCSC_PREFIX"[" # tag "]: %s: "fmt
+#define SCSC_DEV_FMT(fmt) SCSC_PREFIX"%-5s: - %s: "fmt
+#define SCSC_DBG_FMT(fmt) SCSC_PREFIX"%s: "fmt
+
+int scsc_logring_enable(bool logging_enable);
+
+#ifdef CONFIG_SCSC_PRINTK
+
+int scsc_printk_tag(int force, int tag, const char *fmt, ...);
+int scsc_printk_tag_dev(int force, int tag, struct device *dev, const char *fmt, ...);
+int scsc_printk_tag_dev_lvl(int force, int tag, struct device *dev, int lvl, const char *fmt, ...);
+int scsc_printk_tag_lvl(int tag, int lvl, const char *fmt, ...);
+int scsc_printk_bin(int force, int tag, int dlev, const void *start, size_t len);
+
+/**
+ * This fields helps in trimming the behavior respect the kernel ring buffer:
+ * - NO_FORCE_PRK: the tag-based filtering mechanism is obeyed.
+ * - FORCE_PRK: the tag-based filtering is bypassed by this macro and message
+ * always get to the kernel ring buffer
+ * - NO_ECHO_PRK: disable completely the printk redirect.
+ */
+#define NO_FORCE_PRK 0
+#define FORCE_PRK 1
+#define NO_ECHO_PRK 2
+
+#define SCSC_PRINTK(args ...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ args)
+
+#define SCSC_PRINTK_TAG(tag, args ...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ args)
+#define SCSC_PRINTK_BIN(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, \
+ -1, (start), (len))
+
+#define SCSC_EMERG(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_EMERG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_ALERT(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_ALERT SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_CRIT(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_CRIT SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_ERR(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_ERR SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_WARNING(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_WARNING SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_NOTICE(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_NOTICE SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_INFO(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_INFO SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_DEBUG(fmt, args...) scsc_printk_tag(NO_FORCE_PRK, WLBT, \
+ KERN_DEBUG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_EMERG(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_EMERG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_ALERT(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_ALERT SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_CRIT(tag, fmt, args ...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_CRIT SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_ERR(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_ERR SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_WARNING(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_WARNING SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_NOTICE(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_NOTICE SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_INFO(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_INFO SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_DEBUG(tag, fmt, args...) scsc_printk_tag(NO_FORCE_PRK, (tag), \
+ KERN_DEBUG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_ERR_SDEV(sdev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_SDEV_2_DEV((sdev)), \
+ KERN_ERR SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_WARNING_SDEV(sdev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_SDEV_2_DEV((sdev)), \
+ KERN_WARNING SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_INFO_SDEV(sdev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_SDEV_2_DEV((sdev)), \
+ KERN_INFO SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+#define SCSC_TAG_DEBUG_SDEV(sdev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_SDEV_2_DEV((sdev)), \
+ KERN_DEBUG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_ERR_NDEV(ndev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_NDEV_2_DEV((ndev)), \
+ KERN_ERR SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+#define SCSC_TAG_WARNING_NDEV(ndev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_NDEV_2_DEV((ndev)), \
+ KERN_WARNING SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+#define SCSC_TAG_INFO_NDEV(ndev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_NDEV_2_DEV((ndev)), \
+ KERN_INFO SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+#define SCSC_TAG_DEBUG_NDEV(ndev, tag, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), SCSC_NDEV_2_DEV((ndev)), \
+ KERN_DEBUG SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+
+#define SCSC_TAG_ERR_DEV(tag, dev, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), dev, \
+ KERN_ERR SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_WARNING_DEV(tag, dev, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), dev, \
+ KERN_WARNING SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_INFO_DEV(tag, dev, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), dev, \
+ KERN_INFO SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_DEBUG_DEV(tag, dev, fmt, args...) \
+ scsc_printk_tag_dev(NO_FORCE_PRK, (tag), dev, \
+ KERN_DEBUG SCSC_DBG_FMT(fmt), \
+ __func__, ## args)
+
+#define SCSC_ERR_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_ERR_SDEV(sdev, WLBT, fmt, ## args)
+#define SCSC_WARNING_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_WARNING_SDEV(sdev, WLBT, fmt, ## args)
+#define SCSC_INFO_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_INFO_SDEV(sdev, WLBT, fmt, ## args)
+
+#define SCSC_ERR_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_ERR_NDEV(ndev, WLBT, fmt, ## args)
+#define SCSC_WARNING_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_WARNING_NDEV(ndev, WLBT, fmt, ## args)
+#define SCSC_INFO_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_INFO_NDEV(ndev, WLBT, fmt, ## args)
+
+
+#define SCSC_BIN_EMERG(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 0, \
+ (start), (len))
+#define SCSC_BIN_ALERT(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 1, \
+ (start), (len))
+#define SCSC_BIN_CRIT(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 2, \
+ (start), (len))
+#define SCSC_BIN_ERR(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 3, \
+ (start), (len))
+#define SCSC_BIN_WARNING(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 4, \
+ (start), (len))
+#define SCSC_BIN_NOTICE(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 5, \
+ (start), (len))
+#define SCSC_BIN_INFO(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 6, \
+ (start), (len))
+#define SCSC_BIN_DEBUG(start, len) scsc_printk_bin(NO_FORCE_PRK, BINARY, 7, \
+ (start), (len))
+
+#define SCSC_BIN_TAG_EMERG(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 0, \
+ (start), (len))
+#define SCSC_BIN_TAG_ALERT(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 1, \
+ (start), (len))
+#define SCSC_BIN_TAG_CRIT(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 2, \
+ (start), (len))
+#define SCSC_BIN_TAG_ERR(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 3, \
+ (start), (len))
+#define SCSC_BIN_TAG_WARNING(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 4, \
+ (start), (len))
+#define SCSC_BIN_TAG_NOTICE(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 5, \
+ (start), (len))
+#define SCSC_BIN_TAG_INFO(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 6, \
+ (start), (len))
+#define SCSC_BIN_TAG_DEBUG(tag, start, len) scsc_printk_bin(NO_FORCE_PRK, (tag), 7, \
+ (start), (len))
+
+
+/*
+ * These macros forces a redundant copy of their output to kernel log buffer and
+ * console through standard kernel facilities, NO matter how the tag-based
+ * filtering is configured and NO matter what the value in
+ * scsc_redirect_to_printk_droplvl module param.
+ */
+#define SCSC_PRINTK_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, args)
+#define SCSC_PRINTK_TAG_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), args)
+#define SCSC_PRINTK_BIN_FF(start, len) scsc_printk_bin(FORCE_PRK, -1, \
+ (start), (len))
+
+#define SCSC_EMERG_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_EMERG SCSC_PREFIX args)
+#define SCSC_ALERT_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_ALERT SCSC_PREFIX args)
+#define SCSC_CRIT_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_CRIT SCSC_PREFIX args)
+#define SCSC_ERR_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_ERR SCSC_PREFIX args)
+#define SCSC_WARNING_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_WARNING SCSC_PREFIX args)
+#define SCSC_NOTICE_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_NOTICE SCSC_PREFIX args)
+#define SCSC_INFO_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_INFO SCSC_PREFIX args)
+#define SCSC_DEBUG_FF(args ...) scsc_printk_tag(FORCE_PRK, WLBT, \
+ KERN_DEBUG SCSC_PREFIX args)
+
+#define SCSC_TAG_EMERG_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_EMERG SCSC_PREFIX args)
+#define SCSC_TAG_ALERT_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_ALERT SCSC_PREFIX args)
+#define SCSC_TAG_CRIT_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_CRIT SCSC_PREFIX args)
+#define SCSC_TAG_ERR_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_ERR SCSC_PREFIX args)
+#define SCSC_TAG_WARNING_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_WARNING SCSC_PREFIX args)
+#define SCSC_TAG_NOTICE_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_NOTICE SCSC_PREFIX args)
+#define SCSC_TAG_INFO_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_INFO SCSC_PREFIX args)
+#define SCSC_TAG_DEBUG_FF(tag, args ...) scsc_printk_tag(FORCE_PRK, (tag), \
+ KERN_DEBUG SCSC_PREFIX args)
+
+#define SCSC_BIN_EMERG_FF(start, len) scsc_printk_bin(FORCE_PRK, 0, \
+ (start), (len))
+#define SCSC_BIN_ALERT_FF(start, len) scsc_printk_bin(FORCE_PRK, 1, \
+ (start), (len))
+#define SCSC_BIN_CRIT_FF(start, len) scsc_printk_bin(FORCE_PRK, 2, \
+ (start), (len))
+#define SCSC_BIN_ERR_FF(start, len) scsc_printk_bin(FORCE_PRK, 3, \
+ (start), (len))
+#define SCSC_BIN_WARNING_FF(start, len) scsc_printk_bin(FORCE_PRK, 4, \
+ (start), (len))
+#define SCSC_BIN_NOTICE_FF(start, len) scsc_printk_bin(FORCE_PRK, 5, \
+ (start), (len))
+#define SCSC_BIN_INFO_FF(start, len) scsc_printk_bin(FORCE_PRK, 6, \
+ (start), (len))
+#define SCSC_BIN_DEBUG_FF(start, len) scsc_printk_bin(FORCE_PRK, 7, \
+ (start), (len))
+
+#define SCSC_TAG_LVL(tag, lvl, fmt, args...) \
+ scsc_printk_tag_lvl((tag), (lvl), fmt, ## args)
+
+#define SCSC_TAG_DEV_LVL(tag, lvl, dev, fmt, args...) \
+ scsc_printk_tag_dev_lvl(NO_FORCE_PRK, (tag), (dev), (lvl), fmt, ## args)
+
+
+#define SCSC_TAG_DBG1_SDEV(sdev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 7, SCSC_SDEV_2_DEV((sdev)), \
+ SCSC_DBG_FMT(fmt), __func__, ## args)
+
+#define SCSC_TAG_DBG2_SDEV(sdev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 8, SCSC_SDEV_2_DEV((sdev)), \
+ SCSC_DBG_FMT(fmt), __func__, ## args)
+
+#define SCSC_TAG_DBG3_SDEV(sdev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 9, SCSC_SDEV_2_DEV((sdev)), \
+ SCSC_DBG_FMT(fmt), __func__, ## args)
+
+#define SCSC_TAG_DBG4_SDEV(sdev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 10, SCSC_SDEV_2_DEV((sdev)), \
+ SCSC_DBG_FMT(fmt), __func__, ## args)
+
+#define SCSC_TAG_DBG1_NDEV(ndev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 7, SCSC_NDEV_2_DEV((ndev)), SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+
+#define SCSC_TAG_DBG2_NDEV(ndev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 8, SCSC_NDEV_2_DEV((ndev)), SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+
+#define SCSC_TAG_DBG3_NDEV(ndev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 9, SCSC_NDEV_2_DEV((ndev)), SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+
+#define SCSC_TAG_DBG4_NDEV(ndev, tag, fmt, args...) \
+ SCSC_TAG_DEV_LVL((tag), 10, SCSC_NDEV_2_DEV((ndev)), SCSC_DEV_FMT(fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), \
+ __func__, ## args)
+
+#define SCSC_TAG_DBG1(tag, fmt, args ...) \
+ SCSC_TAG_LVL((tag), 7, fmt, ## args)
+
+#define SCSC_TAG_DBG2(tag, fmt, args ...) \
+ SCSC_TAG_LVL((tag), 8, fmt, ## args)
+
+#define SCSC_TAG_DBG3(tag, fmt, args ...) \
+ SCSC_TAG_LVL((tag), 9, fmt, ## args)
+
+#define SCSC_TAG_DBG4(tag, fmt, args ...) \
+ SCSC_TAG_LVL((tag), 10, fmt, ## args)
+
+#else /* CONFIG_SCSC_PRINTK */
+
+#define SCSC_TAG_LVL(tag, lvl, fmt, args...) \
+ do {\
+ if ((lvl) < 7)\
+ printk_emit(0, (lvl), NULL, 0, fmt, ## args);\
+ } while (0)
+
+#define SCSC_TAG_DEV_LVL(tag, lvl, dev, fmt, args...) \
+ do {\
+ if ((lvl) < 7)\
+ dev_printk_emit((lvl), (dev), fmt, ## args);\
+ } while (0)
+
+#define SCSC_PRINTK(fmt, args ...) printk(SCSC_PREFIX fmt, ## args)
+#define SCSC_PRINTK_TAG(tag, fmt, args ...) printk(SCSC_PREFIX "[" # tag "] "fmt, ## args)
+#define SCSC_PRINTK_BIN(start, len) print_hex_dump(KERN_INFO, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_PRINTK_FF(fmt, args ...) printk(SCSC_PREFIX fmt, ## args)
+#define SCSC_PRINTK_TAG_FF(tag, fmt, args ...) printk(SCSC_PREFIX"[" # tag "] "fmt, ## args)
+#define SCSC_PRINTK_BIN_FF(start, len) print_hex_dump(KERN_INFO, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_EMERG(fmt, args...) pr_emerg(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_ALERT(fmt, args...) pr_alert(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_CRIT(fmt, args...) pr_crit(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_ERR(fmt, args...) pr_err(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_WARNING(fmt, args...) pr_warn(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_NOTICE(fmt, args...) pr_notice(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_INFO(fmt, args...) pr_info(SCSC_DBG_FMT(fmt), __func__, ## args)
+#define SCSC_DEBUG(args...) do {} while (0)
+
+/* Reverting to pr_* keeping the [tag] */
+#define SCSC_TAG_EMERG(tag, fmt, args...) \
+ pr_emerg(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_ALERT(tag, fmt, args...) \
+ pr_alert(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_CRIT(tag, fmt, args...) \
+ pr_crit(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_ERR(tag, fmt, args...) \
+ pr_err(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_WARNING(tag, fmt, args...) \
+ pr_warn(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_NOTICE(tag, fmt, args...) \
+ pr_notice(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_INFO(tag, fmt, args...) \
+ pr_info(SCSC_TAG_DBG_FMT(tag, fmt), __func__, ## args)
+#define SCSC_TAG_DEBUG(tag, fmt, args...) do {} while (0)
+
+
+#define SCSC_BIN_EMERG(start, len) print_hex_dump(KERN_EMERG, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_ALERT(start, len) print_hex_dump(KERN_ALERT, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_CRIT(start, len) print_hex_dump(KERN_CRIT, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_ERR(start, len) print_hex_dump(KERN_ERR, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_WARNING(start, len) print_hex_dump(KERN_WARNING, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_NOTICE(start, len) print_hex_dump(KERN_NOTICE, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_INFO(start, len) print_hex_dump(KERN_INFO, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_DEBUG(start, len) do {} while (0)
+
+
+#define SCSC_BIN_TAG_EMERG(tag, start, len) print_hex_dump(KERN_EMERG, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_ALERT(tag, start, len) print_hex_dump(KERN_ALERT, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_CRIT(tag, start, len) print_hex_dump(KERN_CRIT, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_ERR(tag, start, len) print_hex_dump(KERN_ERR, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_WARNING(tag, start, len) print_hex_dump(KERN_WARNING, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_NOTICE(tag, start, len) print_hex_dump(KERN_NOTICE, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_TAG_INFO(tag, start, len) print_hex_dump(KERN_INFO, \
+ SCSC_PREFIX"[" # tag "]->|", \
+ DUMP_PREFIX_NONE, \
+ 16, 1, start, \
+ len, false)
+
+#define SCSC_BIN_TAG_DEBUG(tag, start, len) do {} while (0)
+
+
+#define SCSC_EMERG_FF(args ...) pr_emerg(SCSC_PREFIX args)
+#define SCSC_ALERT_FF(args ...) pr_alert(SCSC_PREFIX args)
+#define SCSC_CRIT_FF(args ...) pr_crit(SCSC_PREFIX args)
+#define SCSC_ERR_FF(args ...) pr_err(SCSC_PREFIX args)
+#define SCSC_WARNING_FF(args ...) pr_warn(SCSC_PREFIX args)
+#define SCSC_NOTICE_FF(args ...) pr_notice(SCSC_PREFIX args)
+#define SCSC_INFO_FF(args ...) pr_info(SCSC_PREFIX args)
+#define SCSC_DEBUG_FF(args ...) do {} while (0)
+
+
+#define SCSC_TAG_EMERG_FF(tag, fmt, args ...) pr_emerg(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_ALERT_FF(tag, fmt, args ...) pr_alert(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_CRIT_FF(tag, fmt, args ...) pr_crit(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_ERR_FF(tag, fmt, args ...) pr_err(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_WARNING_FF(tag, fmt, args ...) pr_warn(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_NOTICE_FF(tag, fmt, args ...) pr_notice(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_INFO_FF(tag, fmt, args ...) pr_info(SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+#define SCSC_TAG_DEBUG_FF(tag, fmt, args ...) do {} while (0)
+
+#define SCSC_BIN_EMERG_FF(start, len) print_hex_dump(KERN_EMERG, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_ALERT_FF(start, len) print_hex_dump(KERN_ALERT, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_CRIT_FF(start, len) print_hex_dump(KERN_CRIT, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_ERR_FF(start, len) print_hex_dump(KERN_ERR, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_WARNING_FF(start, len) print_hex_dump(KERN_WARNING, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_NOTICE_FF(start, len) print_hex_dump(KERN_NOTICE, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_INFO_FF(start, len) print_hex_dump(KERN_INFO, \
+ SCSC_PREFIX"[BINARY]->|", \
+ DUMP_PREFIX_ADDRESS, \
+ 16, 4, start, \
+ len, true)
+
+#define SCSC_BIN_DEBUG_FF(start, len) do {} while (0)
+
+
+#define SCSC_TAG_ERR_SDEV(sdev, tag, fmt, args...) \
+ dev_err(SCSC_SDEV_2_DEV((sdev)), SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_WARNING_SDEV(sdev, tag, fmt, args...) \
+ dev_warn(SCSC_SDEV_2_DEV((sdev)), SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_INFO_SDEV(sdev, tag, fmt, args...) \
+ dev_info(SCSC_SDEV_2_DEV((sdev)), SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_DEBUG_SDEV(sdev, tag, fmt, args...) do {} while (0)
+
+
+#define SCSC_TAG_ERR_NDEV(ndev, tag, fmt, args...) \
+ dev_err(SCSC_NDEV_2_DEV((ndev)), SCSC_TAG_FMT(tag, fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), __func__, ## args)
+#define SCSC_TAG_WARNING_NDEV(ndev, tag, fmt, args...) \
+ dev_warn(SCSC_NDEV_2_DEV((ndev)), SCSC_TAG_FMT(tag, fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), __func__, ## args)
+#define SCSC_TAG_INFO_NDEV(ndev, tag, fmt, args...) \
+ dev_info(SCSC_NDEV_2_DEV((ndev)), SCSC_TAG_FMT(tag, fmt), \
+ ((ndev) ? netdev_name(ndev) : NODEV_LABEL), __func__, ## args)
+
+#define SCSC_TAG_DEBUG_NDEV(ndev, tag, fmt, args...) do {} while (0)
+
+#define SCSC_TAG_ERR_DEV(tag, dev, fmt, args...) \
+ dev_err(dev, SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_WARNING_DEV(tag, dev, fmt, args...) \
+ dev_warn(dev, SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_INFO_DEV(tag, dev, fmt, args...) \
+ dev_info(dev, SCSC_TAG_DBG_FMT(tag, fmt), \
+ __func__, ## args)
+
+#define SCSC_TAG_DEBUG_DEV(tag, dev, fmt, args...) do {} while (0)
+
+#define SCSC_ERR_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_ERR_SDEV(sdev, WLBT, fmt, ## args)
+#define SCSC_WARNING_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_WARNING_SDEV(sdev, WLBT, fmt, ## args)
+#define SCSC_INFO_SDEV(sdev, fmt, args...) \
+ SCSC_TAG_INFO_SDEV(sdev, WLBT, fmt, ## args)
+
+#define SCSC_ERR_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_ERR_NDEV(ndev, WLBT, fmt, ## args)
+#define SCSC_WARNING_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_WARNING_NDEV(ndev, WLBT, fmt, ## args)
+#define SCSC_INFO_NDEV(ndev, fmt, args...) \
+ SCSC_TAG_INFO_NDEV(ndev, WLBT, fmt, ## args)
+
+
+#define SCSC_TAG_DBG1_SDEV(sdev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG2_SDEV(sdev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG3_SDEV(sdev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG4_SDEV(sdev, tag, fmt, args...) do {} while (0)
+
+#define SCSC_TAG_DBG1_NDEV(ndev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG2_NDEV(ndev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG3_NDEV(ndev, tag, fmt, args...) do {} while (0)
+#define SCSC_TAG_DBG4_NDEV(ndev, tag, fmt, args...) do {} while (0)
+
+#define SCSC_TAG_DBG1(tag, fmt, args ...) do {} while (0)
+#define SCSC_TAG_DBG2(tag, fmt, args ...) do {} while (0)
+#define SCSC_TAG_DBG3(tag, fmt, args ...) do {} while (0)
+#define SCSC_TAG_DBG4(tag, fmt, args ...) do {} while (0)
+
+#endif
+
+#endif /* _SCSC_LOGRING_H_ */
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _SCSC_MIFRAM_H
+#define _SCSC_MIFRAM_H
+
+/*
+ * Portable reference to DRAM address.
+ * In practice this should native R4 relative address.
+ */
+
+typedef s32 scsc_mifram_ref;
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ ****************************************************************************/
+
+#ifndef _SCSC_CORE_H
+#define _SCSC_CORE_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include "scsc_mifram.h"
+
+#define SCSC_PANIC_CODE_FW 0
+#define SCSC_PANIC_CODE_HOST 1
+
+#define SCSC_FW_EVENT_FAILURE 0
+#define SCSC_FW_EVENT_MOREDUMP_COMPLETE 1
+
+/** The following flags define the pools that can used for memory allocation.
+ * To be used with scsc_mx_service_mifram_alloc_extended **/
+/* Standard memory allocation */
+#define MIFRAMMAN_MEM_POOL_GENERIC 1
+/* Used for buffers containing logs that will not be dumped by moredump */
+#define MIFRAMMAN_MEM_POOL_LOGGING 2
+
+struct device;
+struct firmware;
+struct scsc_mx;
+
+enum scsc_service_id {
+ SCSC_SERVICE_ID_NULL = 0,
+ SCSC_SERVICE_ID_WLAN = 1,
+ SCSC_SERVICE_ID_BT = 2,
+ SCSC_SERVICE_ID_ANT = 3,
+ SCSC_SERVICE_ID_R4DBG = 4,
+ SCSC_SERVICE_ID_ECHO = 5,
+ SCSC_SERVICE_ID_DBG_SAMPLER = 6,
+ SCSC_SERVICE_ID_CLK20MHZ = 7,
+ SCSC_SERVICE_ID_FM = 8,
+ SCSC_SERVICE_ID_INVALID = 0xff,
+};
+
+#ifdef CONFIG_SCSC_QOS
+#define SCSC_SERVICE_TOTAL 9
+#endif
+
+enum scsc_module_client_reason {
+ SCSC_MODULE_CLIENT_REASON_HW_PROBE = 0,
+ SCSC_MODULE_CLIENT_REASON_HW_REMOVE = 1,
+ SCSC_MODULE_CLIENT_REASON_RECOVERY = 2,
+ SCSC_MODULE_CLIENT_REASON_INVALID = 0xff,
+};
+
+#ifdef CONFIG_SCSC_QOS
+enum scsc_qos_config {
+ SCSC_QOS_DISABLED = 0,
+ SCSC_QOS_MIN = 1,
+ SCSC_QOS_MED = 2,
+ SCSC_QOS_MAX = 3,
+};
+#endif
+
+/* Core Driver Module registration */
+struct scsc_mx_module_client {
+ char *name;
+ void (*probe)(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason);
+ void (*remove)(struct scsc_mx_module_client *module_client, struct scsc_mx *mx, enum scsc_module_client_reason reason);
+};
+
+/* Service Client interface */
+
+struct scsc_service_client;
+
+struct scsc_service_client {
+ /** Called on Maxwell failure. The Client should Stop all SDRAM & MIF
+ * Mailbox access as fast as possible and inform the Manager by calling
+ * client_stopped() */
+ void (*stop_on_failure)(struct scsc_service_client *client);
+ /** Called when Maxwell failure has handled and the Maxwell has been
+ * reset. The Client should assume that any Maxwell resources it held are
+ * invalid */
+ void (*failure_reset)(struct scsc_service_client *client, u16 scsc_panic_code);
+ /* called when AP processor is going into suspend. */
+ int (*suspend)(struct scsc_service_client *client);
+ /* called when AP processor has resumed */
+ int (*resume)(struct scsc_service_client *client);
+ /* called when log collection has been triggered */
+ void (*log)(struct scsc_service_client *client, u16 reason);
+};
+
+#ifdef CONFIG_SCSC_FM
+/*
+ * This must be used by FM Radio Service only. Other services must not use it.
+ * FM Radio client must allocate memory for this structure using scsc_mx_service_mifram_alloc()
+ * and pass this structure as a ref parameter to scsc_mx_service_start().
+ * The version of fm_ldo_conf (the LDO configuration structure) must be written
+ * to the version field by the FM Radio Service and confirmed to match the define by the firmware.
+ * Increment the version (FM_LDO_CONFIG_VERSION) when changing the layout of the structure.
+ */
+#define FM_LDO_CONFIG_VERSION 0
+
+struct fm_ldo_conf {
+ uint32_t version; /* FM_LDO_CONFIG_VERSION */
+ uint32_t ldo_on;
+};
+
+/* Parameters to pass from FM radio client driver to WLBT drivers */
+struct wlbt_fm_params {
+ u32 freq; /* Frequency (Hz) in use by FM radio */
+};
+
+#endif
+
+#define PANIC_RECORD_SIZE 64
+#define PANIC_RECORD_DUMP_BUFFER_SZ 4096
+
+/* WARNING: THIS IS INTERRUPT CONTEXT!
+ * here: some serious warnings about not blocking or doing anything lengthy at all
+ */
+typedef void (*scsc_mifintrbit_handler)(int which_bit, void *data);
+
+/*
+ * Core Module Inteface
+ */
+int scsc_mx_module_register_client_module(struct scsc_mx_module_client *module_client);
+void scsc_mx_module_unregister_client_module(struct scsc_mx_module_client *module_client);
+int scsc_mx_module_reset(void);
+
+/*
+ * Core Instance interface
+ */
+/** 1st thing to do is call open and return service managment interface*/
+struct scsc_service *scsc_mx_service_open(struct scsc_mx *mx, enum scsc_service_id id, struct scsc_service_client *client, int *status);
+
+/*
+ * Service interface
+ */
+/** pass a portable dram reference and returns kernel pointer (basically is dealing with the pointers) */
+void *scsc_mx_service_mif_addr_to_ptr(struct scsc_service *service, scsc_mifram_ref ref);
+void *scsc_mx_service_mif_addr_to_phys(struct scsc_service *service, scsc_mifram_ref ref);
+int scsc_mx_service_mif_ptr_to_addr(struct scsc_service *service, void *mem_ptr, scsc_mifram_ref *ref);
+
+int scsc_mx_service_start(struct scsc_service *service, scsc_mifram_ref ref);
+int scsc_mx_service_stop(struct scsc_service *service);
+int scsc_mx_service_close(struct scsc_service *service);
+int scsc_mx_service_mif_dump_registers(struct scsc_service *service);
+
+/** Signal a failure detected by the Client. This will trigger the systemwide
+ * failure handling procedure: _All_ Clients will be called back via
+ * their stop_on_failure() handler as a side-effect. */
+void scsc_mx_service_service_failed(struct scsc_service *service, const char *reason);
+
+/* MEMORY Interface*/
+/** Allocate a contiguous block of SDRAM accessible to Client Driver. The memory will be allocated
+ * from generic pool (MIFRAMMAN_MEM_POOL_GENERIC) */
+int scsc_mx_service_mifram_alloc(struct scsc_service *service, size_t nbytes, scsc_mifram_ref *ref, u32 align);
+/* Same as scsc_mx_service_mifram_alloc but allows to specify flags (MIFRAMMAN_MEM_POOL_XX).
+ * So, for example, to allocate memory from the logging pool use MIFRAMMAN_MEM_POOL_LOGGING. */
+int scsc_mx_service_mifram_alloc_extended(struct scsc_service *service, size_t nbytes, scsc_mifram_ref *ref, u32 align, uint32_t flags);
+struct scsc_bt_audio_abox *scsc_mx_service_get_bt_audio_abox(struct scsc_service *service);
+struct mifabox *scsc_mx_service_get_aboxram(struct scsc_service *service);
+/** Free a contiguous block of SDRAM */
+void scsc_mx_service_mifram_free(struct scsc_service *service, scsc_mifram_ref ref);
+void scsc_mx_service_mifram_free_extended(struct scsc_service *service, scsc_mifram_ref ref, uint32_t flags);
+
+/* MBOX Interface */
+/** Allocate n contiguous mailboxes. Outputs index of first mbox, returns FALSE if can’t allocate n contiguous mailboxes. */
+bool scsc_mx_service_alloc_mboxes(struct scsc_service *service, int n, int *first_mbox_index);
+/** Free n contiguous mailboxes. */
+void scsc_service_free_mboxes(struct scsc_service *service, int n, int first_mbox_index);
+
+/** Get kernel-space pointer to a mailbox.
+ * The pointer can be cached as it is guaranteed not to change between service start & stop.
+ **/
+u32 *scsc_mx_service_get_mbox_ptr(struct scsc_service *service, int mbox_index);
+
+/* IRQ Interface */
+/* Getters/Setters */
+
+/* From R4/M4 */
+int scsc_service_mifintrbit_bit_mask_status_get(struct scsc_service *service);
+int scsc_service_mifintrbit_get(struct scsc_service *service);
+void scsc_service_mifintrbit_bit_clear(struct scsc_service *service, int which_bit);
+void scsc_service_mifintrbit_bit_mask(struct scsc_service *service, int which_bit);
+void scsc_service_mifintrbit_bit_unmask(struct scsc_service *service, int which_bit);
+
+/* To R4/M4 */
+enum scsc_mifintr_target {
+ SCSC_MIFINTR_TARGET_R4 = 0,
+ SCSC_MIFINTR_TARGET_M4 = 1
+};
+
+void scsc_service_mifintrbit_bit_set(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir);
+
+/* Register an interrupt handler -TOHOST direction.
+ * Function returns the IRQ associated , -EIO if all interrupts have been assigned */
+int scsc_service_mifintrbit_register_tohost(struct scsc_service *service, scsc_mifintrbit_handler handler, void *data);
+/* Unregister an interrupt handler associated with a bit -TOHOST direction */
+int scsc_service_mifintrbit_unregister_tohost(struct scsc_service *service, int which_bit);
+
+/* Get an interrupt bit associated with the target (R4/M4) -FROMHOST direction
+ * Function returns the IRQ bit associated , -EIO if error */
+int scsc_service_mifintrbit_alloc_fromhost(struct scsc_service *service, enum scsc_mifintr_target dir);
+/* Free an interrupt bit associated with the target (R4/M4) -FROMHOST direction
+ * Function returns the 0 if succedes , -EIO if error */
+int scsc_service_mifintrbit_free_fromhost(struct scsc_service *service, int which_bit, enum scsc_mifintr_target dir);
+/*
+ * Return a kernel device associated 1:1 with the Maxwell instance.
+ * This is published only for the purpose of associating service drivers
+ * with a Maxwell instance for logging purposes. Clients should not make
+ * any assumptions about the device type. In some configurations this may
+ * be the associated host-interface device (AXI/PCIe),
+ * but this may change in future.
+ */
+struct device *scsc_service_get_device(struct scsc_service *service);
+struct device *scsc_service_get_device_by_mx(struct scsc_mx *mx);
+
+int scsc_service_force_panic(struct scsc_service *service);
+
+/*
+ * API to share /sys/wifi kobject between core and wifi driver modules.
+ * Depending upon the order of loading respective drivers, a kobject is
+ * created and shared with the other driver. This convoluted implementation
+ * is required as we need the common kobject associated with "/sys/wifi" directory
+ * when creating a file underneth. core driver (mxman.c) need to create "memdump"
+ * and wifi driver (dev.c,mgt.c) needs to create "mac_addr" files respectively.
+ */
+struct kobject *mxman_wifi_kobject_ref_get(void);
+void mxman_wifi_kobject_ref_put(void);
+
+#ifdef CONFIG_SCSC_SMAPPER
+/* SMAPPER Interface */
+/* Configure smapper. Function should configure smapper FW memory map, range, and granularity */
+void scsc_service_mifsmapper_configure(struct scsc_service *service, u32 granularity);
+/* Allocate large/small entries bank. Outputs index of bank, returns -EIO if can’t allocate any banks. */
+/* Function also returns by the numbers of entries that could be used in the bank as the number of entries
+ * is HW dependent (entries/granurality/memory window in FW)
+ */
+int scsc_service_mifsmapper_alloc_bank(struct scsc_service *service, bool large_bank, u32 entry_size, u16 *entries);
+/* Free large/small entries bank */
+int scsc_service_mifsmapper_free_bank(struct scsc_service *service, u8 bank);
+/* Get number entries, returns error if entries have not been allocated */
+int scsc_service_mifsmapper_get_entries(struct scsc_service *service, u8 bank, u8 num_entries, u8 *entries);
+/* Free number entries, returns error if entries have not been allocated */
+int scsc_service_mifsmapper_free_entries(struct scsc_service *service, u8 bank, u8 num_entries, u8 *entries);
+/* Program SRAM entry */
+int scsc_service_mifsmapper_write_sram(struct scsc_service *service, u8 bank, u8 num_entries, u8 first_entry, dma_addr_t *addr);
+u32 scsc_service_mifsmapper_get_bank_base_address(struct scsc_service *service, u8 bank);
+/* Get SMAPPER aligment */
+u16 scsc_service_get_alignment(struct scsc_service *service);
+#endif
+
+#ifdef CONFIG_SCSC_QOS
+int scsc_service_pm_qos_add_request(struct scsc_service *service, enum scsc_qos_config config);
+int scsc_service_pm_qos_update_request(struct scsc_service *service, enum scsc_qos_config config);
+int scsc_service_pm_qos_remove_request(struct scsc_service *service);
+#endif
+
+/* MXLOGGER API */
+/* If there is no service/mxman associated, register the observer as global (will affect all the mx instanes)*/
+/* Users of these functions should ensure that the registers/unregister functions are balanced (i.e. if observer is registed as global,
+ * it _has_ to unregister as global) */
+int scsc_service_register_observer(struct scsc_service *service, char *name);
+/* Unregister an observer */
+int scsc_service_unregister_observer(struct scsc_service *service, char *name);
+
+/* Reads a configuration file into memory.
+ *
+ * Path is relative to the currently selected firmware configuration
+ * subdirectory.
+ * Returns pointer to data or NULL if file not found.
+ * Call mx140_file_release_conf()to release the memory.
+ */
+int mx140_file_request_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_path, const char *filename);
+
+/* Reads a debug configuration file into memory.
+ *
+ * Path is relative to the currently selected firmware configuration
+ * subdirectory.
+ * Returns pointer to data or NULL if file not found.
+ * Call mx140_file_release_conf()to release the memory.
+ */
+int mx140_file_request_debug_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_path);
+
+/* Read device configuration file into memory.
+ *
+ * Path is relative to the device configuration directory.
+ * Returns pointer to data or NULL if file not found.
+ * Call mx140_file_release_conf() to release the memory.
+ * This call is only used for configuration files that are
+ * device instance specific (e.g. mac addresses)
+ */
+int mx140_file_request_device_conf(struct scsc_mx *mx, const struct firmware **conf, const char *config_path);
+
+/* Release configuration file memory
+ *
+ * If conf is NULL, has no effect.
+ */
+void mx140_file_release_conf(struct scsc_mx *mx, const struct firmware *conf);
+
+/* Read device configuration file into memory.
+ *
+ * Path is absolute.
+ * Returns pointer to data or NULL if file not found.
+ * Call mx140_release_file() to release the memory.
+ */
+int mx140_request_file(struct scsc_mx *mx, char *path, const struct firmware **firmp);
+
+/* Release configuration file memory allocated with mx140_request_file()
+ *
+ * If firmp is NULL, has no effect.
+ */
+int mx140_release_file(struct scsc_mx *mx, const struct firmware *firmp);
+
+/* 20 MHz clock API.
+ * The mx140 device uses a clock that is also required by the USB driver.
+ * This API allows the USB/clock driver to inform the mx140 driver that the
+ * clock is required and that it must boot and/or keep the clock running.
+ */
+
+enum mx140_clk20mhz_status {
+ MX140_CLK_SUCCESS = 0, /* Returned successfully */
+ MX140_CLK_STARTED, /* mx140 has started the clock */
+ MX140_CLK_STOPPED, /* mx140 has stopped the clock */
+ MX140_CLK_NOT_STARTED, /* failed to start the clock */
+ MX140_CLK_NOT_STOPPED, /* failed to stop the clock */
+ MX140_CLK_ASYNC_FAIL, /* mx140 failure, async call */
+};
+
+/* Register for 20 MHz clock API callbacks
+ *
+ * Parameters:
+ * client_cb:
+ * If client provides non-NULL client_cb, the request is asynchronous and
+ * the client will be called back when the clock service is started.
+ * If client_cb is NULL, the request is blocking.
+ * data:
+ * opaque context for the client, and will be passed back in any callback
+ *
+ * Note it is possible that the callback may be made in the context of the
+ * calling request/release function.
+ *
+ * Returns 0 on success
+ */
+int mx140_clk20mhz_register(void (*client_cb)(void *data, enum mx140_clk20mhz_status event), void *data);
+
+/* Unregister for 20 MHz clock API callbacks.
+ * After this call is made, the mx140 driver will no longer call back.
+ */
+void mx140_clk20mhz_unregister(void);
+
+/* Client request that the clock be available.
+ *
+ * If a callback was installed via mx140_clk20mhz_register(), the mx140 driver
+ * will call back when the clock is available. If no callback was installed,
+ * the request is blocking and will return when the clock is running.
+ *
+ * Returns:
+ * mx140_clk20mhz_status if a blocking attempt was made to start the clock,
+ * MX140_CLK_SUCCESS if the request will happen asynchronously, or,
+ * -ve error code on other error.
+ *
+ */
+int mx140_clk20mhz_request(void);
+
+/* Client informs that the clock is no longer needed
+ *
+ * Returns:
+ * mx140_clk20mhz_status if a blocking attempt was made to stop the clock,
+ * MX140_CLK_SUCCESS if the request will happen asynchronously, or,
+ * -ve error code on other error.
+ */
+int mx140_clk20mhz_release(void);
+
+
+/* Client requests that FM LDO be available.
+ *
+ * Returns:
+ * 0 on success or -ve error code on error.
+ *
+ */
+int mx250_fm_request(void);
+
+
+/* Client informs that the LDO is no longer needed
+ *
+ * Returns:
+ * 0 on success or -ve error code on error.
+ */
+int mx250_fm_release(void);
+
+
+/* FM client informs of parameter change.
+ *
+ * mx250_fm_request() must have been called first.
+ *
+ * Returns:
+ * None
+ */
+void mx250_fm_set_params(struct wlbt_fm_params *info);
+
+/*
+ * for set test mode.
+ *
+ */
+bool slsi_is_rf_test_mode_enabled(void);
+
+int mx140_log_dump(void);
+
+void mxman_get_fw_version(char *version, size_t ver_sz);
+void mxman_get_driver_version(char *version, size_t ver_sz);
+
+int mxman_register_firmware_notifier(struct notifier_block *nb);
+int mxman_unregister_firmware_notifier(struct notifier_block *nb);
+
+/* Status of WLBT autorecovery on the platform
+ *
+ * Returns:
+ * false - enabled, true disabled
+ */
+bool mxman_recovery_disabled(void);
+
+/* function to provide string representation of uint8 trigger code */
+static inline const char *scsc_get_trigger_str(int code)
+{
+ switch (code) {
+ case 1: return "scsc_log_fw_panic";
+ case 2: return "scsc_log_user";
+ case 3: return "scsc_log_fw";
+ case 4: return "scsc_log_dumpstate";
+ case 5: return "scsc_log_host_wlan";
+ case 6: return "scsc_log_host_bt";
+ case 7: return "scsc_log_host_common";
+ case 8: return "scsc_log_sys_error";
+ case 0:
+ default:
+ return "unknown";
+ }
+};
+
+#endif
--- /dev/null
+/****************************************************************************
+ *
+ * Copyright (c) 2014 - 2019 Samsung Electronics Co., Ltd. All rights reserved
+ *
+ *****************************************************************************/
+
+#ifndef _SCSC_RELEASE_H
+#define _SCSC_RELEASE_H
+
+#define SCSC_RELEASE_SOLUTION "mx250"
+
+#define SCSC_RELEASE_PRODUCT 10
+#define SCSC_RELEASE_ITERATION 1
+#define SCSC_RELEASE_CANDIDATE 1
+
+#define SCSC_RELEASE_POINT 0
+
+#endif
+
+