VERSION = 3
PATCHLEVEL = 10
- SUBLEVEL = 102
+ SUBLEVEL = 103
EXTRAVERSION =
NAME = TOSSUG Baby Fish
-Werror-implicit-function-declaration \
-Wno-format-security \
-fno-delete-null-pointer-checks \
- -std=gnu89
+ -w -std=gnu89
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
if (ret)
return ret;
- vfp_flush_hwstate(thread);
thread->vfpstate.hard = new_vfp;
+ vfp_flush_hwstate(thread);
return 0;
}
#endif
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
+ ret = put_user(task_thread_info(child)->tp_value[0],
datap);
break;
struct pt_regs *regs,
int error, long val)
{
- regs->gprs[2] = error ? -error : val;
+ regs->gprs[2] = error ? error : val;
}
static inline void syscall_get_arguments(struct task_struct *task,
regs->orig_gpr2 = args[0];
}
-static inline int syscall_get_arch(struct task_struct *task,
- struct pt_regs *regs)
+static inline int syscall_get_arch(void)
{
#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(task, TIF_31BIT))
+ if (test_tsk_thread_flag(current, TIF_31BIT))
return AUDIT_ARCH_S390;
#endif
return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
case 0x06c: map_key_clear(KEY_YELLOW); break;
case 0x06d: map_key_clear(KEY_ZOOM); break;
+ case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break;
+ case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break;
+ case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE); break;
+ case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN); break;
+ case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break;
+ case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break;
+
case 0x082: map_key_clear(KEY_VIDEO_NEXT); break;
case 0x083: map_key_clear(KEY_LAST); break;
case 0x084: map_key_clear(KEY_ENTER); break;
case 0x0bf: map_key_clear(KEY_SLOW); break;
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
+ case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
case 0x0e2: map_key_clear(KEY_MUTE); break;
case 0x0e5: map_key_clear(KEY_BASSBOOST); break;
case 0x0ea: map_key_clear(KEY_VOLUMEDOWN); break;
case 0x0f5: map_key_clear(KEY_SLOW); break;
+ case 0x181: map_key_clear(KEY_BUTTONCONFIG); break;
case 0x182: map_key_clear(KEY_BOOKMARKS); break;
case 0x183: map_key_clear(KEY_CONFIG); break;
case 0x184: map_key_clear(KEY_WORDPROCESSOR); break;
case 0x18c: map_key_clear(KEY_VOICEMAIL); break;
case 0x18d: map_key_clear(KEY_ADDRESSBOOK); break;
case 0x18e: map_key_clear(KEY_CALENDAR); break;
+ case 0x18f: map_key_clear(KEY_TASKMANAGER); break;
+ case 0x190: map_key_clear(KEY_JOURNAL); break;
case 0x191: map_key_clear(KEY_FINANCE); break;
case 0x192: map_key_clear(KEY_CALC); break;
case 0x193: map_key_clear(KEY_PLAYER); break;
case 0x199: map_key_clear(KEY_CHAT); break;
case 0x19c: map_key_clear(KEY_LOGOFF); break;
case 0x19e: map_key_clear(KEY_COFFEE); break;
+ case 0x19f: map_key_clear(KEY_CONTROLPANEL); break;
+ case 0x1a2: map_key_clear(KEY_APPSELECT); break;
+ case 0x1a3: map_key_clear(KEY_NEXT); break;
+ case 0x1a4: map_key_clear(KEY_PREVIOUS); break;
case 0x1a6: map_key_clear(KEY_HELP); break;
case 0x1a7: map_key_clear(KEY_DOCUMENTS); break;
case 0x1ab: map_key_clear(KEY_SPELLCHECK); break;
case 0x1ae: map_key_clear(KEY_KEYBOARD); break;
+ case 0x1b1: map_key_clear(KEY_SCREENSAVER); break;
+ case 0x1b4: map_key_clear(KEY_FILE); break;
case 0x1b6: map_key_clear(KEY_IMAGES); break;
case 0x1b7: map_key_clear(KEY_AUDIO); break;
case 0x1b8: map_key_clear(KEY_VIDEO); break;
return;
/* report the usage code as scancode if the key status has changed */
- if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ if (usage->type == EV_KEY && (!!test_bit(usage->code, input->key)) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
input_event(input, usage->type, usage->code, value);
* UGCI) cram a lot of unrelated inputs into the
* same interface. */
hidinput->report = report;
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
hidinput = NULL;
}
if (hidinput) {
- if (drv->input_configured)
- drv->input_configured(hid, hidinput);
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_cleanup;
if (input_register_device(hidinput->input))
goto out_cleanup;
}
#include <linux/capability.h>
#include <linux/compat.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <asm/uaccess.h>
#include "queue.h"
+#include <mach/mtk_meminfo.h>
+
+//add vmstat info with block tag log
+#include <linux/vmstat.h>
+#define FEATURE_STORAGE_VMSTAT_LOGGER
+
+
+#include <linux/xlog.h>
+#include <asm/div64.h>
+#include <linux/vmalloc.h>
+
+#include <linux/mmc/sd_misc.h>
+
+#define MET_USER_EVENT_SUPPORT
+#include <linux/met_drv.h>
+
+#define FEATURE_STORAGE_PERF_INDEX
+//enable storage log in user load
+#if 0
+#ifdef USER_BUILD_KERNEL
+#undef FEATURE_STORAGE_PERF_INDEX
+#endif
+#endif
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+#ifndef CONFIG_MTK_FPGA
+#include <linux/met_ftrace_bio.h>
+#endif
+
+char mmc_get_rw_type(u32 opcode)
+{
+ switch (opcode)
+ {
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_READ_MULTIPLE_BLOCK:
+ return 'R';
+ case MMC_WRITE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ return 'W';
+ default:
+ // Unknown opcode!!!
+ return 'X';
+ }
+}
+
+inline int check_met_mmc_async_req_legal(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ int is_legal = 0;
+
+ if (!((host == NULL) || (areq == NULL) || (areq->mrq == NULL)
+ || (areq->mrq->cmd == NULL) || (areq->mrq->data == NULL)
+ || (host->card == NULL))) {
+ is_legal = 1;
+ }
+
+ return is_legal;
+}
+
+inline int check_met_mmc_blk_data_legal(struct mmc_blk_data *md)
+{
+ int is_legal = 0;
+
+ if (!((md == NULL) || (md->disk == NULL))) {
+ is_legal = 1;
+ }
+
+ return is_legal;
+}
+
+inline int check_met_mmc_req_legal(struct mmc_host *host, struct mmc_request *req)
+{
+ int is_legal = 0;
+
+ if (!((host == NULL) || (req == NULL) || (req->cmd == NULL)
+ || (req->data == NULL) || (host->card == NULL))) {
+ is_legal = 1;
+ }
+
+ return is_legal;
+}
+
+void met_mmc_insert(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_insert, md, areq, type);
+#endif
+}
+
+void met_mmc_dma_map(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_dma_map, md, areq, type);
+#endif
+}
+
+//void met_mmc_issue(struct mmc_host *host, struct mmc_async_req *areq)
+//{
+// struct mmc_blk_data *md;
+// char type;
+//
+// if (!check_met_mmc_async_req_legal(host, areq))
+// return;
+//
+// md = mmc_get_drvdata(host->card);
+//
+// type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+// if (type == 'X')
+// return;
+//
+// MET_FTRACE_PRINTK(met_mmc_issue, md, areq, type);
+//}
+
+void met_mmc_issue(struct mmc_host *host, struct mmc_request *req)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_req_legal(host, req))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(req->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_issue, md, req, type);
+#endif
+}
+
+void met_mmc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(host->card);
+ char type;
+
+ type = mmc_get_rw_type(cmd->opcode);
+ if (type == 'X')
+ return;
+
+ trace_printk("%d,%d %c %d + %d [%s]\n",
+ md->disk->major, md->disk->first_minor, type,
+ cmd->arg, cmd->data->blocks,
+ current->comm);
+}
+
+void met_mmc_xfr_done(struct mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_blk_data *md=mmc_get_drvdata(host->card);
+ char type;
+
+ type = mmc_get_rw_type(cmd->opcode);
+ if (type == 'X')
+ return;
+
+ trace_printk("%d,%d %c %d + %d [%s]\n",
+ md->disk->major, md->disk->first_minor, type,
+ cmd->arg, cmd->data->blocks,
+ current->comm);
+}
+
+void met_mmc_wait_xfr(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(host->card);
+ char type;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+
+ trace_printk("%d,%d %c %d + %d [%s]\n",
+ md->disk->major, md->disk->first_minor, type,
+ areq->mrq->cmd->arg, areq->mrq->data->blocks,
+ current->comm);
+
+}
+
+void met_mmc_tuning_start(struct mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(host->card);
+ char type;
+
+ type = mmc_get_rw_type(cmd->opcode);
+ if (type == 'X')
+ return;
+
+ trace_printk("%d,%d %c %d + %d [%s]\n",
+ md->disk->major, md->disk->first_minor, type,
+ cmd->arg, cmd->data->blocks,
+ current->comm);
+}
+
+void met_mmc_tuning_end(struct mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_blk_data *md = mmc_get_drvdata(host->card);
+ char type;
+
+ type = mmc_get_rw_type(cmd->opcode);
+ if (type == 'X')
+ return;
+
+ trace_printk("%d,%d %c %d + %d [%s]\n",
+ md->disk->major, md->disk->first_minor, type,
+ cmd->arg, cmd->data->blocks,
+ current->comm);
+}
+
+void met_mmc_complete(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_complete, md, areq, type);
+#endif
+}
+
+void met_mmc_dma_unmap_start(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_dma_unmap_start, md, areq, type);
+#endif
+}
+
+void met_mmc_dma_unmap_stop(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_dma_unmap_stop, md, areq, type);
+#endif
+}
+
+void met_mmc_continue_req_end(struct mmc_host *host, struct mmc_async_req *areq)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_continue_req_end, md, areq, type);
+#endif
+}
+
+void met_mmc_dma_stop(struct mmc_host *host, struct mmc_async_req *areq, unsigned int bd_num)
+{
+ struct mmc_blk_data *md;
+ char type;
+
+ if (!check_met_mmc_async_req_legal(host, areq))
+ return;
+
+ md = mmc_get_drvdata(host->card);
+ if (!check_met_mmc_blk_data_legal(md))
+ return;
+
+ type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+ if (type == 'X')
+ return;
+#ifndef CONFIG_MTK_FPGA
+ MET_FTRACE_PRINTK(met_mmc_dma_stop, md, areq, type, bd_num);
+#endif
+}
+
+//void met_mmc_end(struct mmc_host *host, struct mmc_async_req *areq)
+//{
+// struct mmc_blk_data *md;
+// char type;
+//
+// if (areq && areq->mrq && host && host->card) {
+// type = mmc_get_rw_type(areq->mrq->cmd->opcode);
+// if (type == 'X')
+// return;
+//
+// md = mmc_get_drvdata(host->card);
+//
+// if (areq && areq->mrq)
+// {
+// trace_printk("%d,%d %c %d + %d [%s]\n",
+// md->disk->major, md->disk->first_minor, type,
+// areq->mrq->cmd->arg, areq->mrq->data->blocks,
+// current->comm);
+// }
+// }
+//}
+
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
struct mmc_packed *packed = mqrq->packed;
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
return result;
}
+u32 __mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+ return mmc_sd_num_wr_blocks(card);
+}
+EXPORT_SYMBOL(__mmc_sd_num_wr_blocks);
+
static int send_stop(struct mmc_card *card, u32 *status)
{
struct mmc_command cmd = {0};
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
-
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
return ERR_ABORT;
default:
if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
R1_CURRENT_STATE(status) == R1_STATE_RCV) {
err = send_stop(card, &stop_status);
+ if (err)
+ {
+ get_card_status(card,&status,0);
+ if ((R1_CURRENT_STATE(status) == R1_STATE_TRAN) ||(R1_CURRENT_STATE(status) == R1_STATE_PRG)){
+ err=0;
+ stop_status=0;
+ pr_err("b card status %d \n",status);
+ }
+ else
+ pr_err("g card status %d \n",status);
+ }
if (err)
pr_err("%s: error %d sending stop command\n",
req->rq_disk->disk_name, err);
goto out;
}
- if (mmc_can_sanitize(card))
+ if (mmc_can_sanitize(card)) {
+ trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_SANITIZE_START, 1, 0);
+ trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
+ }
out_retry:
if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
+#ifdef CONFIG_MTK_EMMC_CACHE
+ /* for non-cacheable system data,
+ * the implementation of reliable write / force prg write,
+ * must be applied with mutli write cmd
+ * */
+ if (mmc_card_mmc(card) && (card->ext_csd.cache_ctrl & 0x1)){
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ }
+#endif
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
brq->data.flags |= MMC_DATA_READ;
brq->data.sg = mqrq->sg;
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+ if (brq->data.sg_len > 1024)
+ pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
+
/*
* Adjust the sg list so it is the same size as the
* request.
}
}
brq->data.sg_len = i;
+ pr_err("%s:%d sglen = %x\n", __func__, __LINE__, brq->data.sg_len);
}
mqrq->mmc_active.mrq = &brq->mrq;
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
brq->data.sg = mqrq->sg;
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+ pr_err("%s: sglen = %d\n", __func__, brq->data.sg_len);
mqrq->mmc_active.mrq = &brq->mrq;
mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
mmc_blk_clear_packed(mq_rq);
}
+#if defined(FEATURE_STORAGE_PERF_INDEX)
+#define PRT_TIME_PERIOD 500000000
+#define UP_LIMITS_4BYTE 4294967295UL //((4*1024*1024*1024)-1)
+#define ID_CNT 10
+pid_t mmcqd[ID_CNT]={0};
+bool start_async_req[ID_CNT] = {0};
+unsigned long long start_async_req_time[ID_CNT] = {0};
+static unsigned long long mmcqd_tag_t1[ID_CNT]={0}, mmccid_tag_t1=0;
+unsigned long long mmcqd_t_usage_wr[ID_CNT]={0}, mmcqd_t_usage_rd[ID_CNT]={0};
+unsigned int mmcqd_rq_size_wr[ID_CNT]={0}, mmcqd_rq_size_rd[ID_CNT]={0};
+static unsigned int mmcqd_wr_offset_tag[ID_CNT]={0}, mmcqd_rd_offset_tag[ID_CNT]={0}, mmcqd_wr_offset[ID_CNT]={0}, mmcqd_rd_offset[ID_CNT]={0};
+static unsigned int mmcqd_wr_bit[ID_CNT]={0},mmcqd_wr_tract[ID_CNT]={0};
+static unsigned int mmcqd_rd_bit[ID_CNT]={0},mmcqd_rd_tract[ID_CNT]={0};
+static unsigned int mmcqd_wr_break[ID_CNT]={0}, mmcqd_rd_break[ID_CNT]={0};
+unsigned int mmcqd_rq_count[ID_CNT]={0}, mmcqd_wr_rq_count[ID_CNT]={0}, mmcqd_rd_rq_count[ID_CNT]={0};
+extern u32 g_u32_cid[4];
+#ifdef FEATURE_STORAGE_META_LOG
+int check_perdev_minors = CONFIG_MMC_BLOCK_MINORS;
+struct metadata_rwlogger metadata_logger[10] = {{{0}}};
+#endif
+unsigned int mmcqd_work_percent[ID_CNT]={0};
+unsigned int mmcqd_w_throughput[ID_CNT]={0};
+unsigned int mmcqd_r_throughput[ID_CNT]={0};
+unsigned int mmcqd_read_clear[ID_CNT]={0};
+
+static void g_var_clear(unsigned int idx)
+{
+ mmcqd_t_usage_wr[idx]=0;
+ mmcqd_t_usage_rd[idx]=0;
+ mmcqd_rq_size_wr[idx]=0;
+ mmcqd_rq_size_rd[idx]=0;
+ mmcqd_rq_count[idx]=0;
+ mmcqd_wr_offset[idx]=0;
+ mmcqd_rd_offset[idx]=0;
+ mmcqd_wr_break[idx]=0;
+ mmcqd_rd_break[idx]=0;
+ mmcqd_wr_tract[idx]=0;
+ mmcqd_wr_bit[idx]=0;
+ mmcqd_rd_tract[idx]=0;
+ mmcqd_rd_bit[idx]=0;
+ mmcqd_wr_rq_count[idx]=0;
+ mmcqd_rd_rq_count[idx]=0;
+}
+
+unsigned int find_mmcqd_index(void)
+{
+ pid_t mmcqd_pid=0;
+ unsigned int idx=0;
+ unsigned char i=0;
+
+ mmcqd_pid = task_pid_nr(current);
+
+ if(mmcqd[0] ==0) {
+ mmcqd[0] = mmcqd_pid;
+ start_async_req[0]=0;
+ }
+
+ for(i=0;i<ID_CNT;i++)
+ {
+ if(mmcqd_pid == mmcqd[i])
+ {
+ idx=i;
+ break;
+ }
+ if ((mmcqd[i] == 0) ||( i==ID_CNT-1))
+ {
+ mmcqd[i]=mmcqd_pid;
+ start_async_req[i]=0;
+ idx=i;
+ break;
+ }
+ }
+ return idx;
+}
+
+#endif
+//#undef FEATURE_STORAGE_PID_LOGGER
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+
+struct struct_pid_logger g_pid_logger[PID_ID_CNT]={{0,0,{0},{0},{0},{0}}};
+
+
+
+unsigned char *page_logger = NULL;
+spinlock_t g_locker;
+
+#endif
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+ unsigned long long time1 = 0;
+#if defined(FEATURE_STORAGE_PERF_INDEX)
+ pid_t mmcqd_pid=0;
+ unsigned long long t_period=0, t_usage=0;
+ unsigned int t_percent=0;
+ unsigned int perf_meter=0;
+ unsigned int rq_byte=0,rq_sector=0,sect_offset=0;
+ unsigned int diversity=0;
+ unsigned int idx=0;
+#ifdef FEATURE_STORAGE_META_LOG
+ unsigned int mmcmetaindex=0;
+#endif
+#endif
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ unsigned int index=0;
+#endif
if (!rqc && !mq->mqrq_prev->req)
return 0;
+ time1 = sched_clock();
if (rqc)
reqs = mmc_blk_prep_packed_list(mq, rqc);
+#if defined(FEATURE_STORAGE_PERF_INDEX)
+ mmcqd_pid = task_pid_nr(current);
+
+ idx = find_mmcqd_index();
+
+ mmcqd_read_clear[idx] = 1;
+ if(mmccid_tag_t1==0)
+ mmccid_tag_t1 = time1;
+ t_period = time1 - mmccid_tag_t1;
+ if(t_period >= (unsigned long long )((PRT_TIME_PERIOD)*(unsigned long long )10))
+ {
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC Queue Thread:%d, %d, %d, %d, %d \n", mmcqd[0], mmcqd[1], mmcqd[2], mmcqd[3], mmcqd[4]);
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "MMC CID: %lx %lx %lx %lx \n", g_u32_cid[0], g_u32_cid[1], g_u32_cid[2], g_u32_cid[3]);
+ mmccid_tag_t1 = time1;
+ }
+ if(mmcqd_tag_t1[idx]==0)
+ mmcqd_tag_t1[idx] = time1;
+ t_period = time1 - mmcqd_tag_t1[idx];
+
+ if(t_period >= (unsigned long long )PRT_TIME_PERIOD)
+ {
+ mmcqd_read_clear[idx] = 2;
+ mmcqd_work_percent[idx] = 1;
+ mmcqd_r_throughput[idx] = 0;
+ mmcqd_w_throughput[idx] = 0;
+ t_usage = mmcqd_t_usage_wr [idx] + mmcqd_t_usage_rd[idx];
+ if(t_period > t_usage*100)
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload < 1%%, duty %lld, period %lld, req_cnt=%d \n", mmcqd[idx], t_usage, t_period, mmcqd_rq_count[idx]);
+ else
+ {
+ do_div(t_period, 100); //boundary issue
+ t_percent =((unsigned int)t_usage)/((unsigned int)t_period);
+ mmcqd_work_percent[idx] = t_percent;
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Workload=%d%%, duty %lld, period %lld00, req_cnt=%d \n", mmcqd[idx], t_percent, t_usage, t_period, mmcqd_rq_count[idx]); //period %lld00 == period %lld x100
+ }
+ if(mmcqd_wr_rq_count[idx] >= 2)
+ {
+ diversity = mmcqd_wr_offset[idx]/(mmcqd_wr_rq_count[idx]-1);
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_wr_rq_count[idx], mmcqd_wr_break[idx], mmcqd_wr_tract[idx], mmcqd_wr_bit[idx]);
+ }
+ if(mmcqd_rd_rq_count[idx] >= 2)
+ {
+ diversity = mmcqd_rd_offset[idx]/(mmcqd_rd_rq_count[idx]-1);
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Diversity=%d sectors offset, req_cnt=%d, break_cnt=%d, tract_cnt=%d, bit_cnt=%d\n", mmcqd[idx], diversity, mmcqd_rd_rq_count[idx], mmcqd_rd_break[idx], mmcqd_rd_tract[idx], mmcqd_rd_bit[idx]);
+ }
+ if(mmcqd_t_usage_wr[idx])
+ {
+ do_div(mmcqd_t_usage_wr[idx], 1000000); //boundary issue
+ if(mmcqd_t_usage_wr[idx]) // discard print if duration will <1ms
+ {
+ perf_meter = (mmcqd_rq_size_wr[idx])/((unsigned int)mmcqd_t_usage_wr[idx]); //kb/s
+ mmcqd_w_throughput[idx] = perf_meter;
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Write Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_wr[idx], mmcqd_t_usage_wr[idx]);
+ }
+ }
+ if(mmcqd_t_usage_rd[idx])
+ {
+ do_div(mmcqd_t_usage_rd[idx], 1000000); //boundary issue
+ if(mmcqd_t_usage_rd[idx]) // discard print if duration will <1ms
+ {
+ perf_meter = (mmcqd_rq_size_rd[idx])/((unsigned int)mmcqd_t_usage_rd[idx]); //kb/s
+ mmcqd_r_throughput[idx] = perf_meter;
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd:%d Read Throughput=%d kB/s, size: %d bytes, time:%lld ms\n", mmcqd[idx], perf_meter, mmcqd_rq_size_rd[idx], mmcqd_t_usage_rd[idx]);
+ }
+ }
+ mmcqd_tag_t1[idx]=time1;
+ g_var_clear(idx);
+#ifdef FEATURE_STORAGE_META_LOG
+ mmcmetaindex = mmc_get_devidx(md->disk);
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd metarw WR:%d NWR:%d HR:%d WDR:%d HDR:%d WW:%d NWW:%d HW:%d\n",
+ metadata_logger[mmcmetaindex].metadata_rw_logger[0], metadata_logger[mmcmetaindex].metadata_rw_logger[1],
+ metadata_logger[mmcmetaindex].metadata_rw_logger[2], metadata_logger[mmcmetaindex].metadata_rw_logger[3],
+ metadata_logger[mmcmetaindex].metadata_rw_logger[4], metadata_logger[mmcmetaindex].metadata_rw_logger[5],
+ metadata_logger[mmcmetaindex].metadata_rw_logger[6], metadata_logger[mmcmetaindex].metadata_rw_logger[7]);
+ clear_metadata_rw_status(md->disk->first_minor);
+#endif
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ do {
+ int i;
+ for(index=0; index<PID_ID_CNT; index++) {
+
+ if( g_pid_logger[index].current_pid!=0 && g_pid_logger[index].current_pid == mmcqd_pid)
+ break;
+ }
+ if( index == PID_ID_CNT )
+ break;
+ for( i=0; i<PID_LOGGER_COUNT; i++) {
+ //printk(KERN_INFO"hank mmcqd %d %d", g_pid_logger[index].pid_logger[i], mmcqd_pid);
+ if( g_pid_logger[index].pid_logger[i] == 0)
+ break;
+ sprintf (g_pid_logger[index].pid_buffer+i*37, "{%05d:%05d:%08d:%05d:%08d}", g_pid_logger[index].pid_logger[i], g_pid_logger[index].pid_logger_counter[i], g_pid_logger[index].pid_logger_length[i], g_pid_logger[index].pid_logger_r_counter[i], g_pid_logger[index].pid_logger_r_length[i]);
+
+ }
+ if( i != 0) {
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "mmcqd pid:%d %s\n", g_pid_logger[index].current_pid, g_pid_logger[index].pid_buffer);
+ //xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "sizeof(&(g_pid_logger[index].pid_logger)):%d\n", sizeof(unsigned short)*PID_LOGGER_COUNT);
+ //memset( &(g_pid_logger[index].pid_logger), 0, sizeof(struct struct_pid_logger)-(unsigned long)&(((struct struct_pid_logger *)0)->pid_logger));
+ memset( &(g_pid_logger[index].pid_logger), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
+ memset( &(g_pid_logger[index].pid_logger_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
+ memset( &(g_pid_logger[index].pid_logger_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
+ memset( &(g_pid_logger[index].pid_logger_r_counter), 0, sizeof(unsigned short)*PID_LOGGER_COUNT);
+ memset( &(g_pid_logger[index].pid_logger_r_length), 0, sizeof(unsigned int)*PID_LOGGER_COUNT);
+ memset( &(g_pid_logger[index].pid_buffer), 0, sizeof(char)*1024);
+
+
+ }
+ g_pid_logger[index].pid_buffer[0] = '\0';
+
+ } while(0);
+#endif
+
+#if defined(FEATURE_STORAGE_VMSTAT_LOGGER)
+ xlog_printk(ANDROID_LOG_DEBUG, "BLOCK_TAG", "vmstat (FP:%ld)(FD:%ld)(ND:%ld)(WB:%ld)(NW:%ld)\n",
+ ((global_page_state(NR_FILE_PAGES)) << (PAGE_SHIFT - 10)),
+ ((global_page_state(NR_FILE_DIRTY)) << (PAGE_SHIFT - 10)),
+ ((global_page_state(NR_DIRTIED)) << (PAGE_SHIFT - 10)),
+ ((global_page_state(NR_WRITEBACK)) << (PAGE_SHIFT - 10)),
+ ((global_page_state(NR_WRITTEN)) << (PAGE_SHIFT - 10)));
+#endif
+ }
+ if( rqc )
+ {
+ rq_byte = blk_rq_bytes(rqc);
+ rq_sector = blk_rq_sectors(rqc);
+ if(rq_data_dir(rqc) == WRITE)
+ {
+ if(mmcqd_wr_offset_tag[idx]>0)
+ {
+ sect_offset = abs(blk_rq_pos(rqc) - mmcqd_wr_offset_tag[idx]);
+ mmcqd_wr_offset[idx] += sect_offset;
+ if(sect_offset == 1)
+ mmcqd_wr_break[idx]++;
+ }
+ mmcqd_wr_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
+ if(rq_sector <= 1) //512 bytes
+ mmcqd_wr_bit[idx] ++;
+ else if(rq_sector >= 1016) //508kB
+ mmcqd_wr_tract[idx] ++;
+ }
+ else //read
+ {
+ if(mmcqd_rd_offset_tag[idx]>0)
+ {
+ sect_offset = abs(blk_rq_pos(rqc) - mmcqd_rd_offset_tag[idx]);
+ mmcqd_rd_offset[idx] += sect_offset;
+ if(sect_offset == 1)
+ mmcqd_rd_break[idx]++;
+ }
+ mmcqd_rd_offset_tag[idx] = blk_rq_pos(rqc) + rq_sector;
+ if(rq_sector <= 1) //512 bytes
+ mmcqd_rd_bit[idx] ++;
+ else if(rq_sector >= 1016) //508kB
+ mmcqd_rd_tract[idx] ++;
+ }
+ }
+#endif
do {
if (rqc) {
/*
brq->data.bytes_xfered);
}
+// if (card && card->host && card->host->areq)
+// met_mmc_end(card->host, card->host->areq);
+
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
unsigned long flags;
unsigned int cmd_flags = req ? req->cmd_flags : 0;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host))
+ mmc_resume_bus(card->host);
+#endif
+
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
mmc_claim_host(card->host);
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+//#if defined(FEATURE_STORAGE_PID_LOGGER)
+//extern unsigned long get_memory_size(void);
+//#endif
+#ifdef CONFIG_MTK_EXTMEM
+extern void* extmem_malloc_page_align(size_t bytes);
+#endif
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ if( !page_logger){
+ //num_page_logger = sizeof(struct page_pid_logger);
+ //page_logger = vmalloc(num_physpages*sizeof(struct page_pid_logger));
+ // solution: use get_memory_size to obtain the size from start pfn to max pfn
+
+ //unsigned long count = get_memory_size() >> PAGE_SHIFT;
+ unsigned long count = get_max_DRAM_size() >> PAGE_SHIFT;
+#ifdef CONFIG_MTK_EXTMEM
+ page_logger = extmem_malloc_page_align(count * sizeof(struct page_pid_logger));
+#else
+ page_logger = vmalloc(count * sizeof(struct page_pid_logger));
+#endif
+ if( page_logger) {
+ memset( page_logger, -1, count*sizeof( struct page_pid_logger));
+ }
+ spin_lock_init(&g_locker);
+ }
+#endif
+#if defined(FEATURE_STORAGE_META_LOG)
+ check_perdev_minors = perdev_minors;
+#endif
md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & MMC_BLK_DATA_AREA_RPMB)
md->disk->flags |= GENHD_FL_NO_PART_SCAN;
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
sector_t size;
+#ifdef CONFIG_MTK_EMMC_SUPPORT
+ unsigned int l_reserve;
+ struct storage_info s_info = {0};
+#endif
struct mmc_blk_data *md;
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
+ if(!mmc_card_sd(card)){
+#ifdef CONFIG_MTK_EMMC_SUPPORT
+ msdc_get_info(EMMC_CARD_BOOT, EMMC_RESERVE, &s_info);
+ l_reserve = s_info.emmc_reserve;
+ printk("l_reserve = 0x%x\n", l_reserve);
+ size -= l_reserve; /*reserved for 64MB (emmc otp + emmc combo offset + reserved)*/
+#endif
+ }
md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
MMC_BLK_DATA_AREA_MAIN);
return md;
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_SANDISK_NEW 0x45
+#define CID_MANFID_HYNIX 0x90
+#define CID_MANFID_KSI 0x70
static const struct mmc_fixup blk_fixups[] =
{
MMC_QUIRK_INAND_CMD38),
MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
MMC_QUIRK_INAND_CMD38),
-
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_NEW, CID_OEMID_ANY, add_quirk,
+ MMC_QUIRK_PON),
/*
* Some MMC cards experience performance degradation with CMD23
* instead of CMD12-bounded multiblock transfers. For now we'll
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+#ifdef CONFIG_MTK_EMMC_CACHE
+ /*
+ * Some MMC cards cache feature, cannot flush the previous cache data by force programming or reliable write
+ * which cannot gurrantee the strong order betwee meta data and file data.
+ */
+
+ /*
+ * Toshiba eMMC after enable cache feature, write performance drop, because flush operation waste much time
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_DISABLE_CACHE),
+#endif
+
+ /* Hynix 4.41 trim will lead boot up failed. */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_UNSTABLE),
+
+ /* KSI PRV=0x3 trim will lead write performance drop. */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_KSI, CID_OEMID_ANY, add_quirk_mmc_ksi_v03_skip_trim,
+ MMC_QUIRK_KSI_V03_SKIP_TRIM),
END_FIXUP
};
+#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
+ extern void emmc_create_sys_symlink (struct mmc_card *card);
+#endif
static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md, *part_md;
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+ printk("[%s]: %s by manufacturer settings, quirks=0x%x\n", __func__, md->disk->disk_name, card->quirks);
+
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
if (mmc_add_disk(part_md))
goto out;
}
+#if defined(CONFIG_MTK_EMMC_SUPPORT) && !defined(CONFIG_MTK_GPT_SCHEME_SUPPORT)
+ emmc_create_sys_symlink(card);
+#endif
return 0;
out:
mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+#include <linux/mtd/combo_nand.h>
+#endif
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
-static bool fm_autoconvert;
+#ifdef CONFIG_MTK_NAND_UBIFS_FASTMAP_SUPPORT
+static bool fm_autoconvert = 1;
+#else
+static bool fm_autoconvert = 0;
+#endif
#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
+//MTK
+static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_ec =
__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+//MTK start
+static struct device_attribute dev_lbb =
+ __ATTR(lbb, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_move_retry =
+ __ATTR(move_retry, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ec_count =
+ __ATTR(ec_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mean_ec =
+ __ATTR(mean_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ec_sum =
+ __ATTR(ec_sum, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_ec =
+ __ATTR(min_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_count =
+ __ATTR(wl_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_size =
+ __ATTR(wl_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_scrub_count =
+ __ATTR(scrub_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_scrub_size =
+ __ATTR(scrub_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_wl_th =
+ __ATTR(wl_th, 00755, dev_attribute_show, dev_attribute_store);
+static struct device_attribute dev_torture =
+ __ATTR(torture, 00755, dev_attribute_show, NULL);
+//MTK end
static struct device_attribute dev_reserved_for_bad =
__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bad_peb_count =
return ubi_num;
}
+/* MTK: "Store" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ubi_device *ubi;
+ int th=0;
+
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_wl_th)
+ {
+ sscanf(buf, "%d", &th);
+ printk("set th=%d\n", th);
+ ubi->wl_th = th;
+ }
+ return count;
+}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
ret = sprintf(buf, "%d\n", ubi->max_ec);
+//MTK start
+ else if (attr == &dev_torture)
+ ret = sprintf(buf, "torture: %d\n", ubi->torture);
+ else if (attr == &dev_wl_th)
+ ret = sprintf(buf, "wl_th: %d\n", ubi->wl_th);
+ else if (attr == &dev_wl_count)
+ ret = sprintf(buf, "wl_count: %d\n", ubi->wl_count);
+ else if (attr == &dev_wl_size)
+ ret = sprintf(buf, "wl_size: %lld\n", ubi->wl_size);
+ else if (attr == &dev_scrub_count)
+ ret = sprintf(buf, "scrub_count: %d\n", ubi->scrub_count);
+ else if (attr == &dev_scrub_size)
+ ret = sprintf(buf, "scrub_size: %lld\n", ubi->scrub_size);
+ else if (attr == &dev_move_retry)
+ ret = sprintf(buf, "move_retry: %d\n", atomic_read(&ubi->move_retry));
+ else if (attr == &dev_lbb)
+ ret = sprintf(buf, "lbb: %d\n", atomic_read(&ubi->lbb));
+ else if (attr == &dev_ec_count)
+ ret = sprintf(buf, "ec_count: %d\n", atomic_read(&ubi->ec_count));
+ else if (attr == &dev_mean_ec)
+ ret = sprintf(buf, "mean_ec: %d\n", ubi->mean_ec);
+ else if (attr == &dev_ec_sum)
+ ret = sprintf(buf, "%lld\n", ubi->ec_sum);
+ else if (attr == &dev_min_ec) {
+ struct ubi_wl_entry *e=NULL, *efree=NULL, *eused=NULL;
+ spin_lock(&ubi->wl_lock);
+ efree = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
+ eused = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ if(efree && eused) {
+ if(efree->ec < eused->ec)
+ e = efree;
+ else
+ e = eused;
+ } else if(efree){
+ e = efree;
+ } else {
+ e = eused;
+ }
+ ret = sprintf(buf, "%d\n", e->ec);
+ spin_unlock(&ubi->wl_lock);
+ }
+//MTK end
else if (attr == &dev_reserved_for_bad)
ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
err = device_create_file(&ubi->dev, &dev_max_ec);
if (err)
return err;
+//MTK start
+ err = device_create_file(&ubi->dev, &dev_lbb);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_move_retry);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_ec_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mean_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_ec_sum);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_min_ec);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_scrub_count);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_scrub_size);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_wl_th);
+ if (err)
+ return err;
+ err = device_create_file(&ubi->dev, &dev_torture);
+ if (err)
+ return err;
+//MTK end
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
if (err)
return err;
* physical eraseblocks maximum.
*/
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->peb_size = COMBO_NAND_BLOCK_SIZE;
+ ubi->peb_count = (int)div_u64(ubi->mtd->size, ubi->peb_size);
+#else
ubi->peb_size = ubi->mtd->erasesize;
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+#endif
ubi->flash_size = ubi->mtd->size;
if (mtd_can_have_bb(ubi->mtd)) {
ubi->nor_flash = 1;
}
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->min_io_size = COMBO_NAND_PAGE_SIZE;
+ ubi->hdrs_min_io_size = ubi->min_io_size >> ubi->mtd->subpage_sft;
+#else
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+#endif
/*
* Make sure minimal I/O unit is power of 2. Note, there is no
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+#ifdef CONFIG_MTK_COMBO_NAND_SUPPORT
+ ubi->max_write_size = COMBO_NAND_PAGE_SIZE;
+#else
ubi->max_write_size = ubi->mtd->writebufsize;
+#endif
+#ifdef CONFIG_MTK_MLC_NAND_SUPPORT
+ ubi->max_write_size = ubi->mtd->erasesize/4;
+#endif
/*
* Maximum write size has to be greater or equivalent to min. I/O
* size, and be multiple of min. I/O size.
ubi->ro_mode = 1;
}
+ ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
+ ubi->peb_size, ubi->peb_size >> 10);
+ ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
+ ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
+ if (ubi->hdrs_min_io_size != ubi->min_io_size)
+ ubi_msg("sub-page size: %d",
+ ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d)",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
+ ubi_msg("data offset: %d", ubi->leb_start);
+
/*
* Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ unsigned long long attach_time = 0;
if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
return -EINVAL;
ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+//MTK start
+ ubi->wl_th = CONFIG_MTD_UBI_WL_THRESHOLD;
+ atomic_set(&ubi->ec_count, 0);
+ atomic_set(&ubi->move_retry, 0);
+//MTK end
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_pool.used = ubi->fm_pool.size = 0;
goto out_free;
err = -ENOMEM;
- ubi->peb_buf = vmalloc(ubi->peb_size);
+ ubi->peb_buf = kmalloc(ubi->peb_size, GFP_KERNEL);
if (!ubi->peb_buf)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_size = ubi_calc_fm_size(ubi);
- ubi->fm_buf = vzalloc(ubi->fm_size);
+ ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
if (!ubi->fm_buf)
goto out_free;
#endif
+ attach_time = sched_clock();
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_detach;
}
+ /* Make device "available" before it becomes accessible via sysfs */
+ ubi_devices[ubi_num] = ubi;
+
err = uif_init(ubi, &ref);
if (err)
goto out_detach;
goto out_debugfs;
}
+ attach_time = sched_clock() - attach_time;
+ do_div(attach_time, 1000000);
ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
ubi_assert(ref);
uif_close(ubi);
out_detach:
+ ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
- vfree(ubi->vtbl);
+ kfree(ubi->vtbl);
out_free:
- vfree(ubi->peb_buf);
- vfree(ubi->fm_buf);
+ kfree(ubi->peb_buf);
+ kfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
- vfree(ubi->vtbl);
+ kfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- vfree(ubi->peb_buf);
- vfree(ubi->fm_buf);
+#ifdef CONFIG_BLB
+ kfree(ubi->databuf);
+ kfree(ubi->oobbuf);
+#endif
+ kfree(ubi->peb_buf);
+ kfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-
#include "hub.h"
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+int is_musbfsh_rh(struct usb_device *udev);
+void set_icusb_sts_disconnect_done(void);
+#endif
+
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+static struct usb_device *g_dsda_dev = NULL;
+
+#ifdef CONFIG_PM_RUNTIME
+struct usb_hub *usb11_hub = NULL;
+int is_musbfsh_rh(struct usb_device *udev);
+
+struct usb_device *get_usb11_child_udev(void)
+{
+ if(usb11_hub){
+ MYDBG("\n");
+ return usb11_hub->ports[0]->child;
+ }else{
+ MYDBG("\n");
+ return NULL;
+ }
+}
+#endif
+
+void dump_data(char *buf, int len)
+{
+ int i;
+ for(i =0 ; i< len ; i++)
+ {
+ MYDBG("data[%d]: %x\n", i, buf[i]);
+ }
+}
+
+void test_dsda_device_ep0(void)
+{
+
+ int ret;
+ char data_buf[256];
+ ret = usb_control_msg(g_dsda_dev, usb_rcvctrlpipe(g_dsda_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN,
+ USB_DT_DEVICE << 8,
+ 0,
+ data_buf,
+ 64,
+ USB_CTRL_GET_TIMEOUT);
+
+
+
+ if (ret < 0) {
+ MYDBG("test ep fail, ret : %d\n", ret);
+ }
+ else
+ {
+ MYDBG("test ep0 ok, ret : %d\n", ret);
+ dump_data(data_buf, ret);
+ }
+
+}
+
+void release_usb11_wakelock(void);
+static ssize_t dsda_tmp_proc_entry(struct file *file_ptr, const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char cmd[64];
+
+ int ret = copy_from_user((char *) &cmd, user_buffer, count);
+
+ if(ret != 0)
+ {
+ return -EFAULT;
+ }
+
+ /* apply action here */
+ if(cmd[0] == '0')
+ {
+ MYDBG("");
+ test_dsda_device_ep0();
+ }
+ if(cmd[0] == '1')
+ {
+ MYDBG("");
+ release_usb11_wakelock();
+ }
+
+ MYDBG("");
+
+ return count;
+}
+
+struct file_operations dsda_tmp_proc_fops = {
+ .write = dsda_tmp_proc_entry
+};
+
+
+void create_dsda_tmp_entry(void)
+{
+ struct proc_dir_entry *prEntry;
+
+ MYDBG("");
+
+ prEntry = proc_create("DSDA_TMP_ENTRY", 0660, 0, &dsda_tmp_proc_fops);
+ if (prEntry)
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY ok\n");
+ }
+ else
+ {
+ MYDBG("add /proc/DSDA_TMP_ENTRY fail\n");
+ }
+}
+#endif
+
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
#ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+extern int usbif_u3h_send_event(char* event) ;
+#include "otg_whitelist.h"
+#endif
+
+
static inline int hub_is_superspeed(struct usb_device *hdev)
{
return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
+#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
+#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
+
+
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeed(hub->hdev))
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
+ MYDBG("");
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
- *change = le16_to_cpu(hub->status->hub.wHubChange);
+ *change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
}
if (type == HUB_INIT2)
goto init2;
-
goto init3;
}
kref_get(&hub->kref);
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
- } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
+ } else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) { // bus powered
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
- if (remaining < hdev->maxchild * unit_load)
+ if (remaining < hdev->maxchild * unit_load){
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
+ }
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
struct usb_device *hdev;
struct usb_hub *hub;
+
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("MAX_HUB_TIER_EXCEED");
+#endif
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_BLACKLIST_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("HUB_NOT_SUPPORTED");
+#endif
return -ENODEV;
}
#endif
struct usb_device *udev = *pdev;
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
+ struct timeval tv_begin, tv_end;
+ struct timeval tv_before, tv_after;
+ do_gettimeofday(&tv_begin);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ int is_icusb_rh;
+#endif
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ is_icusb_rh = is_musbfsh_rh(udev->parent);
+#endif
+
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+
+ do_gettimeofday(&tv_before);
usb_disable_device(udev, 0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_disable_device(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port_dev->did_runtime_put = false;
}
+ do_gettimeofday(&tv_before);
usb_remove_ep_devs(&udev->ep0);
+ do_gettimeofday(&tv_after);
+ MYDBG("usb_remove_ep_devs(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
+
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
+ do_gettimeofday(&tv_before);
device_del(&udev->dev);
+ do_gettimeofday(&tv_after);
+ MYDBG("device_del(), time spent, sec : %d, usec : %d\n", (unsigned int)(tv_after.tv_sec - tv_before.tv_sec), (unsigned int)(tv_after.tv_usec - tv_before.tv_usec));
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
hub_free_dev(udev);
put_device(&udev->dev);
+
+#ifdef CONFIG_MTK_ICUSB_SUPPORT
+ if (is_icusb_rh)
+ {
+ set_icusb_sts_disconnect_done();
+ MYDBG("ICUSB Disconnect\n");
+ }
+#endif
+ do_gettimeofday(&tv_end);
+ MYDBG("time spent, sec : %d, usec : %d\n", (unsigned int)(tv_end.tv_sec - tv_begin.tv_sec), (unsigned int)(tv_end.tv_usec - tv_begin.tv_usec));
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (udev->parent){ // we don't have to check ourself (roothub)
+ if (!is_targeted(udev)) {
+ usbif_u3h_send_event("DEV_NOT_SUPPORTED");
+ err = -ENOTSUPP;
+ }
+ }
+#endif
+
if (err < 0)
return err;
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
+ MYDBG("udev :%p\n", udev);
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+#ifdef CONFIG_PM_RUNTIME
+ if(is_musbfsh_rh(udev->parent)){
+ MYDBG("\n");
+ /*find out struct *usb_hub and hook it */
+ usb11_hub = usb_hub_to_struct_hub(udev->parent);
+ }
+#endif
+#endif
}
/* Tell the runtime-PM framework the device is active */
msleep(delay);
/* read and decode port status */
+ MYDBG("");
ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ MYDBG("");
if (ret < 0)
return ret;
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
+ MYDBG("");
if (status == -ENODEV) {
+ MYDBG("");
; /* The hub is gone */
} else if (status) {
+ MYDBG("");
dev_err(hub->intfdev,
"cannot %sreset port %d (err = %d)\n",
warm ? "warm " : "", port1, status);
} else {
+ MYDBG("");
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
- if (status && status != -ENOTCONN && status != -ENODEV)
+ if (status && status != -ENOTCONN)
+ {
+ MYDBG("");
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
+ }
}
+ MYDBG("");
/* Check for disconnect or reset */
if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+ MYDBG("");
hub_port_finish_reset(hub, port1, udev, &status);
+ MYDBG("");
if (!hub_is_superspeed(hub->hdev))
goto done;
warm = true;
}
}
+ MYDBG("");
dev_dbg (hub->intfdev,
"port %d not enabled, trying %sreset again...\n",
port1, warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
+ MYDBG("");
+
dev_err (hub->intfdev,
"Cannot enable port %i. Maybe the USB cable is bad?\n",
done:
if (!hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
up_read(&ehci_cf_port_reset_rwsem);
+ }
+
+ MYDBG("");
return status;
}
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
+ {
+ MYDBG("");
goto err_wakeup;
+ }
}
}
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
status = -ENOMEM;
+ MYDBG("");
if (PMSG_IS_AUTO(msg))
goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
+ {
+ MYDBG("");
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
+#if 0 /* behavior for kernel 3.10 */
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
- else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ } else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0) {
+ MYDBG("");
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
- else {
+ } else {
really_suspend = false;
status = 0;
}
+#else /*roll back behavior to kernel 3.4 */
+ }else{
+ MYDBG("");
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
+ }
+#endif
+
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
+ MYDBG("");
/* Try to enable USB3 LPM and LTM again */
usb_unlocked_enable_lpm(udev);
*/
if (status == 0) {
devstatus = 0;
+ MYDBG("\n");
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ MYDBG("%d\n", status);
if (status >= 0)
status = (status > 0 ? 0 : -ENODEV);
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
- *
+ *
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
const char *speed;
int devnum = udev->devnum;
+ dump_stack();
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ MYDBG("");
retval = hub_port_reset(hub, port1, udev, delay, false);
+ MYDBG("");
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
default:
goto fail;
}
+ MYDBG("");
if (udev->speed == USB_SPEED_WIRELESS)
speed = "variable speed Wireless";
udev->tt = &hub->tt;
udev->ttport = port1;
}
-
+
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* value.
*/
for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ MYDBG("");
if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
struct usb_device_descriptor *buf;
int r = 0;
*/
if (r == 0 || (r == -ETIMEDOUT && j == 0))
break;
+
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ if (buf->bMaxPacketSize0 == 0) {
+ usbif_u3h_send_event("DEV_CONN_TMOUT");
+ }
+#endif
+
}
udev->descriptor.bMaxPacketSize0 =
buf->bMaxPacketSize0;
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
}
-
+
retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
if (retval < (signed)sizeof(udev->descriptor)) {
if (retval != -ENODEV)
remaining -= delta;
}
if (remaining < 0) {
+#if defined(CONFIG_USBIF_COMPLIANCE) && defined(CONFIG_USB_XHCI_HCD)
+ usbif_u3h_send_event("DEV_OVER_CURRENT");
+#endif
dev_warn(hub->intfdev, "%dmA over power budget!\n",
- remaining);
remaining = 0;
int status, i;
unsigned unit_load;
+ MYDBG("");
dev_dbg (hub_dev,
"port %d, status %04x, change %04x, %s\n",
port1, portstatus, portchange, portspeed(hub, portstatus));
}
/* reset (non-USB 3.0 devices) and get descriptor */
+ MYDBG("");
status = hub_port_init(hub, udev, port1, i);
if (status < 0)
+ {
+ MYDBG("");
goto loop;
+ }
+ MYDBG("");
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(1000);
goto loop_disable;
}
}
-
+
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
hub->ports[port1 - 1]->child = NULL;
spin_unlock_irq(&device_state_lock);
}
+#ifdef CONFIG_MTK_DT_USB_SUPPORT
+ g_dsda_dev = udev;
+ MYDBG("get new device !!!, BUILD TIME : %s, g_dsda_dev : %p\n", __TIME__, g_dsda_dev);
+#endif
}
if (status)
dev_err(hub_dev, "unable to enumerate USB device on port %d\n",
port1);
}
-
+
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent)
dev_dbg (hub_dev, "resetting for error %d\n",
hub->error);
+ MYDBG("");
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg (hub_dev,
* EM interference sometimes causes badly
* shielded USB devices to be shutdown by
* the hub, this hack enables them again.
- * Works at least with mouse driver.
+ * Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change
.supports_autosuspend = 1,
};
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+extern void mtk_hub_event_steal(spinlock_t *lock, struct list_head* list);
+#endif
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
return -1;
}
+#if defined(CONFIG_MTK_XHCI) && defined(CONFIG_USB_MTK_DUALMODE)
+ mtk_hub_event_steal(&hub_event_lock, &hub_event_list);
+#endif
+
khubd_task = kthread_run(hub_thread, NULL, "khubd");
if (!IS_ERR(khubd_task))
return 0;
int i, ret = 0;
int port1 = udev->portnum;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
if (ret < 0)
goto re_enumerate;
-
+
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor)) {
dev_info(&udev->dev, "device firmware changed\n");
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
return 0;
-
+
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
unsigned int noio_flag;
struct usb_host_config *config = udev->actconfig;
+ MYDBG("");
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_K4605 0x14C6
#define HUAWEI_PRODUCT_E173S6 0x1C07
+#define HW_USB_DEVICE_AND_INTERFACE_INFO(vend, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (vend), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
+ #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
-/* Samsung products */
+/* SS products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
+#define MEDIATEK_PRODUCT_DC_4COM3 0x00a8
#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
#define MEDIATEK_PRODUCT_7208_1COM 0x7101
#define MEDIATEK_PRODUCT_7208_2COM 0x7102
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
- { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* SS GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM3, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
(const struct option_blacklist_info *) id->driver_info))
return -ENODEV;
/*
- * Don't bind network interface on Samsung GT-B3730, it is handled by
+ * Don't bind network interface on SS GT-B3730, it is handled by
* a separate module.
*/
if (dev_desc->idVendor == cpu_to_le16(SAMSUNG_VENDOR_ID) &&
#include "truncate.h"
#include <trace/events/ext4.h>
+#include <linux/blkdev.h>
#define MPAGE_DA_EXTENT_TAIL 0x01
* Note that directories do not have this problem because they
* don't use page cache.
*/
- if (ext4_should_journal_data(inode) &&
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
- inode->i_ino != EXT4_JOURNAL_INO) {
+ if (inode->i_ino != EXT4_JOURNAL_INO &&
+ ext4_should_journal_data(inode) &&
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
struct page *page;
pgoff_t index;
unsigned from, to;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ extern unsigned char *page_logger;
+ struct page_pid_logger *tmp_logger;
+ unsigned long page_index;
+ extern spinlock_t g_locker;
+ unsigned long g_flags;
+#endif
trace_ext4_write_begin(inode, pos, len, flags);
/*
return ret;
}
*pagep = page;
+#if defined(FEATURE_STORAGE_PID_LOGGER)
+ if( page_logger && (*pagep)) {
+ //#if defined(CONFIG_FLATMEM)
+ //page_index = (unsigned long)((*pagep) - mem_map) ;
+ //#else
+ page_index = (unsigned long)(__page_to_pfn(*pagep))- PHYS_PFN_OFFSET;
+ //#endif
+ tmp_logger =((struct page_pid_logger *)page_logger) + page_index;
+ spin_lock_irqsave(&g_locker, g_flags);
+ if( page_index < num_physpages) {
+ if( tmp_logger->pid1 == 0XFFFF)
+ tmp_logger->pid1 = current->pid;
+ else if( tmp_logger->pid1 != current->pid)
+ tmp_logger->pid2 = current->pid;
+ }
+ spin_unlock_irqrestore(&g_locker, g_flags);
+ }
+#endif
return ret;
}
int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
+#if 0
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
/*
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
+#ifdef FEATURE_STORAGE_META_LOG
+ if( bh && bh->b_bdev && bh->b_bdev->bd_disk)
+ set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_READ_CNT);
+#endif
submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
{
int order = 1;
+ int bb_incr = 1 << (e4b->bd_blkbits - 1);
void *bb;
BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
/* this block is part of buddy of order 'order' */
return order;
}
- bb += 1 << (e4b->bd_blkbits - order);
+ bb += bb_incr;
+ bb_incr >>= 1;
order++;
}
return 0;
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned i, j;
- unsigned offset;
+ unsigned offset, offset_incr;
unsigned max;
int ret;
i = 1;
offset = 0;
+ offset_incr = 1 << (sb->s_blocksize_bits - 1);
max = sb->s_blocksize << 2;
do {
sbi->s_mb_offsets[i] = offset;
sbi->s_mb_maxs[i] = max;
- offset += 1 << (sb->s_blocksize_bits - i);
+ offset += offset_incr;
+ offset_incr = offset_incr >> 1;
max = max >> 1;
i++;
} while (i <= sb->s_blocksize_bits + 1);
}
static inline int ext4_issue_discard(struct super_block *sb,
- ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+ ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+ unsigned long flags)
{
ext4_fsblk_t discard_block;
count = EXT4_C2B(EXT4_SB(sb), count);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
- return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+ return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
}
/*
if (test_opt(sb, DISCARD)) {
err = ext4_issue_discard(sb, entry->efd_group,
entry->efd_start_cluster,
- entry->efd_count);
+ entry->efd_count, 0);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%d failed"
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata", block, block+len);
/* File system mounted not to panic on error
- * Fix the bitmap and repeat the block allocation
+ * Fix the bitmap and return EUCLEAN
* We leak some of the blocks here.
*/
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
- err = -EAGAIN;
+ err = -EUCLEAN;
goto out_err;
}
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
- if (*errp == -EAGAIN) {
- /*
- * drop the reference that we took
- * in ext4_mb_use_best_found
- */
- ext4_mb_release_context(ac);
- ac->ac_b_ex.fe_group = 0;
- ac->ac_b_ex.fe_start = 0;
- ac->ac_b_ex.fe_len = 0;
- ac->ac_status = AC_STATUS_CONTINUE;
- goto repeat;
- } else if (*errp) {
+ if (*errp) {
ext4_discard_allocated_blocks(ac);
goto errout;
} else {
* them with group lock_held
*/
if (test_opt(sb, DISCARD)) {
- err = ext4_issue_discard(sb, block_group, bit, count);
+ err = ext4_issue_discard(sb, block_group, bit, count,
+ 0);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%lu failed"
* @count: number of blocks to TRIM
* @group: alloc. group we are working with
* @e4b: ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
*
* Trim "count" blocks starting at "start" in the "group". To assure that no
* one will allocate those blocks, mark it as used in buddy bitmap. This must
* be called with under the group lock.
*/
static int ext4_trim_extent(struct super_block *sb, int start, int count,
- ext4_group_t group, struct ext4_buddy *e4b)
+ ext4_group_t group, struct ext4_buddy *e4b,
+ unsigned long blkdev_flags)
{
struct ext4_free_extent ex;
int ret = 0;
*/
mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group);
- ret = ext4_issue_discard(sb, group, start, count);
+ ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
return ret;
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
+ * @blkdev_flags: flags for the block device
*
* ext4_trim_all_free walks through group's buddy bitmap searching for free
* extents. When the free block is found, ext4_trim_extent is called to TRIM
static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
- ext4_grpblk_t minblocks)
+ ext4_grpblk_t minblocks, unsigned long blkdev_flags)
{
void *bitmap;
ext4_grpblk_t next, count = 0, free_count = 0;
if ((next - start) >= minblocks) {
ret = ext4_trim_extent(sb, start,
- next - start, group, &e4b);
+ next - start, group, &e4b,
+ blkdev_flags);
if (ret && ret != -EOPNOTSUPP)
break;
ret = 0;
* ext4_trim_fs() -- trim ioctl handle function
* @sb: superblock for filesystem
* @range: fstrim_range structure
+ * @blkdev_flags: flags for the block device
*
* start: First Byte to trim
* len: number of Bytes to trim from start
* start to start+len. For each such a group ext4_trim_all_free function
* is invoked to trim all free space.
*/
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+ unsigned long blkdev_flags)
{
struct ext4_group_info *grp;
ext4_group_t group, first_group, last_group;
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
- end, minlen);
+ end, minlen, blkdev_flags);
if (cnt < 0) {
ret = cnt;
break;
while (es->s_last_orphan) {
struct inode *inode;
+ /*
+ * We may have encountered an error during cleanup; if
+ * so, skip the rest.
+ */
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ es->s_last_orphan = 0;
+ break;
+ }
+
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
if (IS_ERR(inode)) {
es->s_last_orphan = 0;
unsigned long next_wakeup, cur;
BUG_ON(NULL == eli);
+ set_freezable();
cont_thread:
while (true) {
schedule_timeout_interruptible(next_wakeup - cur);
- if (kthread_should_stop()) {
+ if (kthread_freezable_should_stop(NULL)) {
ext4_clear_request_list();
goto exit_thread;
}
*/
#include "fuse_i.h"
-
+#include "fuse.h"
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/file.h>
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
sanitize_global_limit(&max_user_bgreq);
sanitize_global_limit(&max_user_congthresh);
-
+ fuse_iolog_init();
return 0;
err_sysfs_cleanup:
fuse_sysfs_cleanup();
fuse_fs_cleanup();
fuse_dev_cleanup();
+ fuse_iolog_exit();
}
module_init(fuse_init);
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
- freezable_schedule_timeout_killable(*timeout);
+ freezable_schedule_timeout_killable_unsafe(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
call_close |= is_wronly;
else if (is_wronly)
calldata->arg.fmode |= FMODE_WRITE;
+ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
+ call_close |= is_rdwr;
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (calldata->arg.fmode == 0)
- call_close |= is_rdwr;
-
if (!nfs4_valid_open_stateid(state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- freezable_schedule_timeout_killable(timeout);
+ freezable_schedule_timeout_killable_unsafe(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/slab.h>
+ #include <linux/migrate.h>
static int read_block(struct inode *inode, void *addr, unsigned int block,
struct ubifs_data_node *dn)
goto dump;
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+
+ if (UBIFS_COMPR_LZ4K == le16_to_cpu(dn->compr_type))
+ out_len = len; //Jack modify for lz4k decompress
+ else
out_len = UBIFS_BLOCK_SIZE;
err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type));
goto out_err;
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+
+ if (UBIFS_COMPR_LZ4K == le16_to_cpu(dn->compr_type))
+ out_len = len; //Jack modify for lz4k decompress
+ else
out_len = UBIFS_BLOCK_SIZE;
err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
le16_to_cpu(dn->compr_type));
return ret;
}
+ #ifdef CONFIG_MIGRATION
+ static int ubifs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+ {
+ int rc;
+
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ SetPagePrivate(newpage);
+ }
+
+ migrate_page_copy(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+ }
+ #endif
+
static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{
/*
return 0;
}
+//MTK add for cts
+long ubifs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+{
+ int err;
+ struct inode *inode = file->f_mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+ struct iattr newattrs;
+
+ loff_t new_len = offset + len;
+ if (len < 0 || offset < 0)
+ return -EINVAL;
+
+ if(new_len < inode->i_size)
+ return -EINVAL;
+
+ newattrs.ia_size = new_len;
+ newattrs.ia_valid = ATTR_SIZE | ATTR_MTIME|ATTR_CTIME;
+ newattrs.ia_file = file;
+ newattrs.ia_valid |= ATTR_FILE;
+
+
+ err = do_setattr(c, inode, &newattrs);
+ return err;
+}
+
const struct address_space_operations ubifs_file_address_operations = {
.readpage = ubifs_readpage,
.writepage = ubifs_writepage,
.write_end = ubifs_write_end,
.invalidatepage = ubifs_invalidatepage,
.set_page_dirty = ubifs_set_page_dirty,
+ #ifdef CONFIG_MIGRATION
+ .migratepage = ubifs_migrate_page,
+ #endif
.releasepage = ubifs_releasepage,
};
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+ .setxattr = ubifs_setxattr,
+ .getxattr = ubifs_getxattr,
+ .listxattr = ubifs_listxattr,
+ .removexattr = ubifs_removexattr,
};
const struct file_operations ubifs_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ubifs_compat_ioctl,
#endif
+ .fallocate = ubifs_fallocate,
};
#include <linux/gfp.h>
#include <asm/processor.h>
+#include <linux/rtpm_prio.h>
struct exec_domain;
struct futex_pi_state;
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
-
-
+extern unsigned long get_cpu_load(int cpu);
+extern unsigned long long mt_get_thread_cputime(pid_t pid);
+extern unsigned long long mt_get_cpu_idle(int cpu);
+extern unsigned long long mt_sched_clock(void);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
+
+#ifdef CONFIG_HMP_PACK_SMALL_TASK
+#define SD_SHARE_POWERLINE 0x0100 /* Domain members share power domain */
+#endif /* CONFIG_HMP_PACK_SMALL_TASK */
+
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+#define SD_BALANCE_TG 0x4000 /* Balance for thread group */
+#endif
+#ifdef CONFIG_MTK_SCHED_CMP_PACK_SMALL_TASK
+#define SD_SHARE_POWERLINE 0x8000 /* Domain members share power domain */
+#endif
extern int __weak arch_sd_sibiling_asym_packing(void);
unsigned long last_balance; /* init to jiffies. units in jiffies */
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
+#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
+ unsigned int mt_lbprof_nr_balance_failed; /* initialise to 0 */
+#endif
u64 last_update;
bool cpus_share_cache(int this_cpu, int that_cpu);
+struct clb_stats {
+ int ncpu; /* The number of CPU */
+ int ntask; /* The number of tasks */
+ int load_avg; /* Arithmetic average of task load ratio */
+ int cpu_capacity; /* Current CPU capacity */
+ int cpu_power; /* Max CPU capacity */
+ int acap; /* Available CPU capacity */
+ int scaled_acap; /* Scaled available CPU capacity */
+ int scaled_atask; /* Scaled available task */
+ int threshold; /* Dynamic threshold */
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ int nr_normal_prio_task; /* The number of normal-prio tasks */
+ int nr_dequeuing_low_prio; /* The number of dequeuing low-prio tasks */
+#endif
+};
+
+#ifdef CONFIG_SCHED_HMP
+struct hmp_domain {
+ struct cpumask cpus;
+ struct cpumask possible_cpus;
+ struct list_head hmp_domains;
+};
+
+#ifdef CONFIG_SCHED_HMP_ENHANCEMENT
+#ifdef CONFIG_HMP_TRACER
+struct hmp_statisic {
+ unsigned int nr_force_up; /* The number of task force up-migration */
+ unsigned int nr_force_down; /* The number of task force down-migration */
+};
+#endif /* CONFIG_HMP_TRACER */
+#endif /* CONFIG_SCHED_HMP_ENHANCEMENT */
+#endif /* CONFIG_SCHED_HMP */
#else /* CONFIG_SMP */
struct sched_domain_attr;
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+ unsigned long load_avg_ratio;
+#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_HMP_ENHANCEMENT
+ unsigned long pending_load;
+ u32 nr_pending;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ u32 nr_dequeuing_low_prio;
+ u32 nr_normal_prio;
+#endif
+#endif
+ u64 hmp_last_up_migration;
+ u64 hmp_last_down_migration;
+#endif /* CONFIG_SCHED_HMP */
+ u32 usage_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
};
#endif
+#ifdef CONFIG_MTPROF_CPUTIME
+struct mtk_isr_info{
+ int isr_num;
+ int isr_count;
+ u64 isr_time;
+ char *isr_name;
+ struct mtk_isr_info *next;
+} ;
+#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
struct cfs_rq *my_q;
#endif
-/*
- * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
- * removed when useful for applications beyond shares distribution (e.g.
- * load-balance).
- */
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+#ifdef CONFIG_SMP
/* Per-entity load-tracking */
struct sched_avg avg;
#endif
+#ifdef CONFIG_MTPROF_CPUTIME
+ u64 mtk_isr_time;
+ int mtk_isr_count;
+ struct mtk_isr_info *mtk_isr;
+#endif
};
struct sched_rt_entity {
perf_nr_task_contexts,
};
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+#define NUM_CLUSTER 2
+struct thread_group_info_t {
+ /* # of cfs threas in the thread group per cluster*/
+ unsigned long cfs_nr_running;
+ /* # of threads in the thread group per cluster */
+ unsigned long nr_running;
+ /* runnable load of the thread group per cluster */
+ unsigned long load_avg_ratio;
+};
+
+#endif
+
+#ifdef CONFIG_MT_SCHED_NOTICE
+ #ifdef CONFIG_MT_SCHED_DEBUG
+#define mt_sched_printf(x...) \
+ do{ \
+ char strings[128]=""; \
+ snprintf(strings, 128, x); \
+ printk(KERN_NOTICE x); \
+ trace_sched_log(strings); \
+ }while (0)
+ #else
+#define mt_sched_printf(x...) \
+ do{ \
+ char strings[128]=""; \
+ snprintf(strings, 128, x); \
+ trace_sched_log(strings); \
+ }while (0)
+ #endif
+
+#else
+#define mt_sched_printf(x...) do {} while (0)
+#endif
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
* execve */
unsigned in_iowait:1;
- /* task may not gain privileges */
- unsigned no_new_privs:1;
-
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
pid_t pid;
pid_t tgid;
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
+#ifdef CONFIG_MTK_SCHED_CMP_TGS
+ raw_spinlock_t thread_group_info_lock;
+ struct thread_group_info_t thread_group_info[NUM_CLUSTER];
+#endif
+
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
struct timespec real_start_time; /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
+/* for thrashing accounting */
+#ifdef CONFIG_ZRAM
+ unsigned long fm_flt, swap_in, swap_out;
+#endif
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
/*
* Per process flags
*/
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MTKPASR 0x80000000 /* I am in MTKPASR process */
+
+#define task_in_mtkpasr(task) unlikely(task->flags & PF_MTKPASR)
/*
* Only the _current_ task can read/write to tsk->flags, but other
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
+
+static inline bool task_no_new_privs(struct task_struct *p)
+{
+ return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
+static inline void task_set_no_new_privs(struct task_struct *p)
+{
+ set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
+}
+
/*
* task->jobctl flags
*/
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
+
+#ifdef CONFIG_MT_PRIO_TRACER
+extern void set_user_nice_core(struct task_struct *p, long nice);
+extern int sched_setscheduler_core(struct task_struct *, int,
+ const struct sched_param *);
+extern int sched_setscheduler_nocheck_core(struct task_struct *, int,
+ const struct sched_param *);
+#endif
+
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+#if defined(CONFIG_MT_RT_SCHED) || defined(CONFIG_MT_RT_SCHED_LOG)
+static inline void set_tsk_need_released(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk, TIF_NEED_RELEASED);
+}
+
+static inline void clear_tsk_need_released(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RELEASED);
+}
+
+static inline int test_tsk_need_released(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RELEASED));
+}
+#endif
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_MTK_SCHED_RQAVG_US
+/*
+ * @cpu: cpu id
+ * @reset: reset the statistic start time after this time query
+ * @use_maxfreq: caculate cpu loading with max cpu max frequency
+ * return: cpu loading as percentage (0~100)
+ */
+extern unsigned int sched_get_percpu_load(int cpu, bool reset, bool use_maxfreq);
+
+/*
+ * return: heavy task(loading>90%) number in the system
+ */
+extern unsigned int sched_get_nr_heavy_task(void);
+
+/*
+ * @threshold: heavy task loading threshold (0~1023)
+ * return: heavy task(loading>threshold) number in the system
+ */
+extern unsigned int sched_get_nr_heavy_task_by_threshold(unsigned int threshold);
+#endif /* CONFIG_MTK_SCHED_RQAVG_US */
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_KS
+extern void sched_update_nr_prod(int cpu, unsigned long nr, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg);
+#endif /* CONFIG_MTK_SCHED_RQAVG_KS */
+
+extern void sched_get_big_little_cpus(struct cpumask *big, struct cpumask *little);
+
#endif
#endif
#ifdef CONFIG_MODULE_SIG
- static int module_sig_check(struct load_info *info)
+ static int module_sig_check(struct load_info *info, int flags)
{
int err = -ENOKEY;
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
const void *mod = info->hdr;
- if (info->len > markerlen &&
+ /*
+ * Require flags == 0, as a module with version information
+ * removed is no longer the module that was signed
+ */
+ if (flags == 0 &&
+ info->len > markerlen &&
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
info->len -= markerlen;
return err;
}
#else /* !CONFIG_MODULE_SIG */
- static int module_sig_check(struct load_info *info)
+ static int module_sig_check(struct load_info *info, int flags)
{
return 0;
}
struct module *mod;
long err;
- err = module_sig_check(info);
+ err = module_sig_check(info, flags);
if (err)
goto free_copy;
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- printk(" %s%s", mod->name, module_flags(mod, buf));
+ printk(" %s %p %s", mod->name, mod->module_core, module_flags(mod, buf));
}
preempt_enable();
if (last_unloaded_module[0])
#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/nmi.h>
+ #include <linux/console.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
+/* Machine specific panic information string */
+char *mach_panic_string;
+
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
bust_spinlocks(0);
+ console_flush_on_panic();
+
if (!panic_blink)
panic_blink = no_blink;
void print_oops_end_marker(void)
{
init_oops_id();
+
+ if (mach_panic_string)
+ printk(KERN_WARNING "Board Information: %s\n",
+ mach_panic_string);
+
printk(KERN_WARNING "---[ end trace %016llx ]---\n",
(unsigned long long)oops_id);
}
*/
void __stack_chk_fail(void)
{
+/*
panic("stack-protector: Kernel stack is corrupted in: %p\n",
__builtin_return_address(0));
+*/
+ BUG();
+ printk(KERN_ERR "stack-protector: Kernel stack is corrupted in: %p\n",
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__stack_chk_fail);
* 01Mar01 Andrew Morton
*/
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/memblock.h>
#include <linux/aio.h>
#include <linux/syscalls.h>
+#include <linux/suspend.h>
#include <linux/kexec.h>
#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
#include <linux/utsname.h>
+#include <linux/mt_sched_mon.h>
+#include <linux/aee.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+/* Some options {*/
+#define LOG_TOO_MUCH_WARNING
+#ifdef LOG_TOO_MUCH_WARNING
+static int log_in_resume;
+#endif
+/* Some options }*/
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
+bool printk_disable_uart = 0;
+static DEFINE_PER_CPU(char, printk_state);
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
/* We show everything that is MORE important than this.. */
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
-#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
+#define DEFAULT_CONSOLE_LOGLEVEL 6 /* anything MORE serious than KERN_INFO */
int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
+EXPORT_SYMBOL_GPL(console_printk);
/*
* Low level drivers may need that to know if they can schedule in
static size_t syslog_partial;
/* index and sequence number of the first record stored in the buffer */
-static u64 log_first_seq;
-static u32 log_first_idx;
+/*static*/ u64 log_first_seq;
+/*static*/ u32 log_first_idx;
/* index and sequence number of the next record to store in the buffer */
-static u64 log_next_seq;
-static u32 log_next_idx;
+/*static*/ u64 log_next_seq;
+/*static*/ u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
+
+extern int mt_need_uart_console;
+inline void mt_disable_uart()
+{
+ if (mt_need_uart_console == 0) {
+ printk("<< printk console disable >>\n");
+ printk_disable_uart = 1;
+ } else {
+ printk("<< printk console can't be disabled >>\n");
+ }
+}
+inline void mt_enable_uart()
+{
+ if (mt_need_uart_console == 1) {
+ if (printk_disable_uart == 0)
+ return;
+ printk_disable_uart = 0;
+ printk("<< printk console enable >>\n");
+ } else {
+ printk("<< printk console can't be enabled >>\n");
+ }
+}
+
+#endif
/* cpu currently holding logbuf_lock */
static volatile unsigned int logbuf_cpu = UINT_MAX;
{
struct log *msg;
u32 size, pad_len;
-
+ int this_cpu = smp_processor_id();
+ char state = __raw_get_cpu_var(printk_state);
+ if (state == 0) {
+ __raw_get_cpu_var(printk_state) = ' ';
+ state = ' ';
+ }
+ /*printk prefix {*/
+ char tbuf[50];
+ unsigned tlen;
+ if (console_suspended == 0) {
+ tlen = snprintf(tbuf, sizeof(tbuf), "%c(%x)[%d:%s]",
+ state, this_cpu, current->pid, current->comm);
+ } else {
+ tlen = snprintf(tbuf, sizeof(tbuf), "%c%x)", state, this_cpu);
+ }
+ /*printk prefix }*/
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
+ size = sizeof(struct log) + text_len +tlen + dict_len;
pad_len = (-size) & (LOG_ALIGN - 1);
size += pad_len;
/* fill message */
msg = (struct log *)(log_buf + log_next_idx);
- memcpy(log_text(msg), text, text_len);
+ //memcpy(log_text(msg), text, text_len);
+ memcpy(log_text(msg), tbuf, tlen);
+ memcpy(log_text(msg) + tlen, text, text_len);
+ text_len += tlen;
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
msg->dict_len = dict_len;
static bool printk_time;
#endif
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
+module_param_named(disable_uart, printk_disable_uart, bool, S_IRUGO | S_IWUSR);
static size_t print_time(u64 ts, char *buf)
{
rem_nsec = do_div(ts, 1000000000);
if (!buf)
- return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
+ return snprintf(NULL, 0, "[%5lu.000000]", (unsigned long)ts);
- return sprintf(buf, "[%5lu.%06lu] ",
+ return sprintf(buf, "[%5lu.%06lu]",
(unsigned long)ts, rem_nsec / 1000);
}
return;
for_each_console(con) {
+ if (printk_disable_uart && (con->flags & CON_CONSDEV))
+ continue;
if (exclusive_console && con != exclusive_console)
continue;
if (!(con->flags & CON_ENABLED))
unsigned long flags;
int this_cpu;
int printed_len = 0;
-
+ int in_irq_disable, in_non_preempt;
+ in_irq_disable = irqs_disabled();
+ in_non_preempt = in_atomic();
+ vscnprintf(text, sizeof(textbuf), fmt, args);
+ memset(text, 0x0, sizeof(textbuf));
boot_delay_msec(level);
printk_delay();
}
}
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+ printascii(text);
+#endif
+
if (level == -1)
level = default_message_loglevel;
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
-
+
+#ifdef CONFIG_PRINTK_PROCESS_INFO
+ if (in_irq_disable)
+ __raw_get_cpu_var(printk_state) = '-';
+#ifdef CONFIG_MT_PRINTK_UART_CONSOLE
+ else if (printk_disable_uart == 0)
+ __raw_get_cpu_var(printk_state) = '.';
+#endif
+ else
+ __raw_get_cpu_var(printk_state) = ' ';
+#endif
+
if (!(lflags & LOG_NEWLINE)) {
/*
* Flush the conflicting buffer. An earlier newline was missing,
console_lock();
console_suspended = 1;
up(&console_sem);
+ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
}
+EXPORT_SYMBOL_GPL(suspend_console);
void resume_console(void)
{
if (!console_suspend_enabled)
return;
down(&console_sem);
+ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
console_suspended = 0;
- console_unlock();
+#ifdef LOG_TOO_MUCH_WARNING
+// __raw_get_cpu_var(MT_trace_in_resume_console) = 1;
+// log_in_resume = 1;
+ console_unlock();
+// log_in_resume = 0;
+// __raw_get_cpu_var(MT_trace_in_resume_console) = 0;
+#else
+ console_unlock();
+#endif
}
+EXPORT_SYMBOL_GPL(resume_console);
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
*
* console_unlock(); may be called from any context.
*/
+#ifdef LOG_TOO_MUCH_WARNING
+static int console_log_max = 400000;
+static int already_skip_log;
+#endif
void console_unlock(void)
{
static char text[LOG_LINE_MAX + PREFIX_MAX];
static u64 seen_seq;
unsigned long flags;
bool wake_klogd = false;
- bool retry;
+ bool do_cond_resched, retry;
+#ifdef LOG_TOO_MUCH_WARNING
+ unsigned long total_log_size = 0;
+ unsigned long long t1 = 0, t2 = 0;
+ char aee_str[512];
+ int org_loglevel = console_loglevel;
+#endif
+
+
if (console_suspended) {
up(&console_sem);
return;
}
+ /*
+ * Console drivers are called under logbuf_lock, so
+ * @console_may_schedule should be cleared before; however, we may
+ * end up dumping a lot of lines, for example, if called from
+ * console registration path, and should invoke cond_resched()
+ * between lines if allowable. Not doing so can cause a very long
+ * scheduling stall on a slow console leading to RCU stall and
+ * softlockup warnings which exacerbate the issue with more
+ * messages practically incapacitating the system.
+ */
+ do_cond_resched = console_may_schedule;
console_may_schedule = 0;
/* flush buffered message fragment immediately to console */
int level;
raw_spin_lock_irqsave(&logbuf_lock, flags);
+#ifdef LOG_TOO_MUCH_WARNING /*For Resume log too much*/
+ if (log_in_resume) {
+ t1 = sched_clock();
+ }
+#endif
+
if (seen_seq != log_next_seq) {
wake_klogd = true;
seen_seq = log_next_seq;
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, text, len);
- start_critical_timings();
+#ifdef LOG_TOO_MUCH_WARNING
+ /*
+ For uart console, 10us/per chars
+ 400,000 chars = need to wait 4.0 sec
+ normal case: 4sec
+ */
+ if (log_in_resume) {
+ org_loglevel = console_loglevel;
+ console_loglevel = 4;
+ }
+ total_log_size += len;
+ if (total_log_size < console_log_max)
+ call_console_drivers(level, text, len);
+ else if (!already_skip_log) {
+ sprintf(aee_str, "PRINTK too much:%lu", total_log_size);
+ aee_kernel_warning(aee_str, "Need to shrink kernel log");
+ already_skip_log = 1;
+ }
+ /**/
+ start_critical_timings();
+ /* For Resume log too much*/
+ if (log_in_resume) {
+ t2 = sched_clock();
+ console_loglevel = org_loglevel;
+ if (t2 - t1 > 100000000) {
+ sprintf( aee_str,"[RESUME CONSOLE too long:%lluns>100ms] s:%lluns, e:%lluns\n", t2 - t1, t1, t2);
+ aee_kernel_warning(aee_str, "Need to shrink kernel log");
+ }
+ }
+
+ /**/
+#else
+ start_critical_timings();
+ call_console_drivers(level, text, len);
+#endif
local_irq_restore(flags);
+
+ if (do_cond_resched)
+ cond_resched();
}
console_locked = 0;
mutex_release(&console_lock_dep_map, 1, _RET_IP_);
console_unlock();
}
+ /**
+ * console_flush_on_panic - flush console content on panic
+ *
+ * Immediately output all pending messages no matter what.
+ */
+ void console_flush_on_panic(void)
+ {
+ /*
+ * If someone else is holding the console lock, trylock will fail
+ * and may_schedule may be set. Ignore and proceed to unlock so
+ * that messages are flushed out. As this can be called from any
+ * context and we don't want to get preempted while flushing,
+ * ensure may_schedule is cleared.
+ */
+ console_trylock();
+ console_may_schedule = 0;
+ console_unlock();
+ }
+
/*
* Return the console tty driver structure and its associated index
*/
static DEFINE_PER_CPU(int, printk_pending);
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+static DEFINE_PER_CPU(int, printk_sched_length);
static void wake_up_klogd_work_func(struct irq_work *irq_work)
{
if (pending & PRINTK_PENDING_SCHED) {
char *buf = __get_cpu_var(printk_sched_buf);
- printk(KERN_WARNING "[sched_delayed] %s", buf);
+ printk(KERN_WARNING "[printk_delayed:start]\n");
+ printk(KERN_WARNING "%s", buf);
+ printk(KERN_WARNING "[printk_delayed:done]\n");
+ __get_cpu_var(printk_sched_length) = 0;
}
if (pending & PRINTK_PENDING_WAKEUP)
va_list args;
char *buf;
int r;
-
+ int buf_length;
local_irq_save(flags);
buf = __get_cpu_var(printk_sched_buf);
+ buf_length = __get_cpu_var(printk_sched_length);
va_start(args, fmt);
- r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
+ if(PRINTK_BUF_SIZE >= buf_length){
+ r = vsnprintf((buf_length + buf), PRINTK_BUF_SIZE-buf_length, fmt, args);
+ __get_cpu_var(printk_sched_length) += r;
+ }else{
+ printk("delayed log buf overflow, size:%d\n", buf_length);
+ r = 0;
+ }
va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
task_thread_info(current));
}
+void get_kernel_log_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
+{
+ *addr = (unsigned long)log_buf;
+ *size = log_buf_len;
+ *start = (unsigned long)&log_first_idx;
+}
#endif
struct task_struct *t;
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
- if (signal->flags & SIGNAL_GROUP_COREDUMP)
- return sig == SIGKILL;
+ if (signal->flags & SIGNAL_GROUP_COREDUMP) {
+ printk(KERN_DEBUG "[%d:%s] is in the middle of dying so skip sig %d\n",p->pid, p->comm, sig);
+ }
+ return 0;
/*
* The process is in the middle of dying, nothing to do.
*/
}
#endif
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
+
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
int group, int from_ancestor_ns)
{
struct sigqueue *q;
int override_rlimit;
int ret = 0, result;
+ unsigned state;
+ state = t->state ? __ffs(t->state) + 1 : 0;
+ printk(KERN_DEBUG "[%d:%s] sig %d to [%d:%s] stat=%c\n",
+ current->pid, current->comm, sig, t->pid, t->comm,
+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
assert_spin_locked(&t->sighand->siglock);
result = TRACE_SIGNAL_IGNORED;
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
- timeout = schedule_timeout_interruptible(timeout);
+ timeout = freezable_schedule_timeout_interruptible(timeout);
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
- (task_pid_vnr(current) != pid)) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info->si_code < 0);
+ (task_pid_vnr(current) != pid))
return -EPERM;
- }
+
info->si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
- if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
- (task_pid_vnr(current) != pid)) {
- /* We used to allow any < 0 si_code */
- WARN_ON_ONCE(info->si_code < 0);
+ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+ (task_pid_vnr(current) != pid))
return -EPERM;
- }
+
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
extern unsigned int core_pipe_limit;
#endif
extern int pid_max;
+extern int extra_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
extern int compat_log;
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
+ {
+ .procname = "extra_free_kbytes",
+ .data = &extra_free_kbytes,
+ .maxlen = sizeof(extra_free_kbytes),
+ .mode = 0644,
+ .proc_handler = min_free_kbytes_sysctl_handler,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.proc_handler = &pipe_proc_fn,
.extra1 = &pipe_min_size,
},
+ {
+ .procname = "pipe-user-pages-hard",
+ .data = &pipe_user_pages_hard,
+ .maxlen = sizeof(pipe_user_pages_hard),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "pipe-user-pages-soft",
+ .data = &pipe_user_pages_soft,
+ .maxlen = sizeof(pipe_user_pages_soft),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
{ }
};
len = 0;
p = buffer;
while (len < *lenp) {
- if (get_user(c, p++))
+ if (get_user(c, p))
return -EFAULT;
if (c == 0 || c == '\n')
break;
+ p++;
len++;
}
if (len >= maxlen)
static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
{
struct trace_bprintk_fmt *pos;
+
+ if (!fmt)
+ return ERR_PTR(-EINVAL);
+
list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
if (!strcmp(pos->fmt, fmt))
return pos;
for (iter = start; iter < end; iter++) {
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
if (tb_fmt) {
- *iter = tb_fmt->fmt;
+ if (!IS_ERR(tb_fmt))
+ *iter = tb_fmt->fmt;
continue;
}
{
const char **fmt = v;
int start_index;
+ int last_index;
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
if (*pos < start_index)
return __start___trace_bprintk_fmt + *pos;
+ /*
+ * The __tracepoint_str section is treated the same as the
+ * __trace_printk_fmt section. The difference is that the
+ * __trace_printk_fmt section should only be used by trace_printk()
+ * in a debugging environment, as if anything exists in that section
+ * the trace_prink() helper buffers are allocated, which would just
+ * waste space in a production environment.
+ *
+ * The __tracepoint_str sections on the other hand are used by
+ * tracepoints which need to map pointers to their strings to
+ * the ASCII text for userspace.
+ */
+ last_index = start_index;
+ start_index = __stop___tracepoint_str - __start___tracepoint_str;
+
+ if (*pos < last_index + start_index)
+ return __start___tracepoint_str + (*pos - last_index);
+
return find_next_mod_format(start_index, v, fmt, pos);
}
SetPageUptodate(page);
}
+#ifndef CONFIG_MEMCG
swap = get_swap_page();
+#else
+ swap = get_swap_page_by_memcg(page);
+#endif
if (!swap.val)
goto redirty;
}
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
- umode_t mode, dev_t dev, unsigned long flags)
+ umode_t mode, dev_t dev, unsigned long flags, int atomic_copy)
{
struct inode *inode;
struct shmem_inode_info *info;
inode = new_inode(sb);
if (inode) {
+ /* We don't let shmem use __GFP_SLOWHIGHMEM */
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE);
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
+ if (atomic_copy)
+ inode->i_flags |= S_ATOMIC_COPY;
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
pgoff_t start, index, end;
int error;
- mutex_lock(&inode->i_mutex);
+ //To avoid nested lock
+ if (!mutex_trylock(&inode->i_mutex))
+ return -1;
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
NULL);
if (error) {
/* Remove the !PageUptodate pages we added */
- shmem_undo_range(inode,
- (loff_t)start << PAGE_CACHE_SHIFT,
- (loff_t)index << PAGE_CACHE_SHIFT, true);
+ if (index > start) {
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_CACHE_SHIFT,
+ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
+ }
goto undone;
}
struct inode *inode;
int error = -ENOSPC;
- inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
+ inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE, 0);
if (inode) {
error = security_inode_init_security(inode, dir,
&dentry->d_name,
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
- inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
+ inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE, 0);
if (!inode)
return -ENOSPC;
sb->s_flags |= MS_POSIXACL;
#endif
- inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
+ inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE, 0);
if (!inode)
goto failed;
inode->i_uid = sbinfo->uid;
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
-#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
+#define shmem_get_inode(sb, dir, mode, dev, flags, atomic_copy) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ * @atomic_copy: Atomically copy the area when hibernating?
*/
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags,
+ int atomic_copy)
{
struct file *res;
struct inode *inode;
path.mnt = mntget(shm_mnt);
res = ERR_PTR(-ENOSPC);
- inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
+ inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags, atomic_copy);
if (!inode)
goto put_dentry;
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
struct file *file;
loff_t size = vma->vm_end - vma->vm_start;
- file = shmem_file_setup("dev/zero", size, vma->vm_flags);
+ file = shmem_file_setup("dev/zero", size, vma->vm_flags, 0);
if (IS_ERR(file))
return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
+ shmem_set_file(vma, file);
return 0;
}
static const struct arpt_arp uncond;
return e->target_offset == sizeof(struct arpt_entry) &&
- memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
+ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
size = e->next_offset;
e = (struct arpt_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct arpt_entry *)
(entry0 + newpos);
return 1;
}
-
+static inline int check_entry(const struct arpt_entry *e)
+{
+ const struct xt_entry_target *t;
+
+ if (!arp_checkentry(&e->arp))
+ return -EINVAL;
+
+ if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
+ return -EINVAL;
+
+ t = arpt_get_target_c(e);
+ if (e->target_offset + t->u.target_size > e->next_offset)
+ return -EINVAL;
+
+ return 0;
+}
+
static inline int check_target(struct arpt_entry *e, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
return -EINVAL;
}
- if (!arp_checkentry(&e->arp))
- return -EINVAL;
+ err = check_entry(e);
+
- err = xt_check_entry_offsets(e, e->elems, e->target_offset,
- e->next_offset);
if (err)
return err;
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
- duprintf("Looping hook\n");
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
return -ELOOP;
- }
/* Finally, each sanity check must pass */
i = 0;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- const char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
struct arpt_entry *iter;
unsigned int addend;
- #ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
-
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
- #endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
-
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-
- #ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
- #endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, NFPROTO_ARP, name);
+ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
}
#ifdef CONFIG_COMPAT
+ struct compat_arpt_replace {
+ char name[XT_TABLE_MAXNAMELEN];
+ u32 valid_hooks;
+ u32 num_entries;
+ u32 size;
+ u32 hook_entry[NF_ARP_NUMHOOKS];
+ u32 underflow[NF_ARP_NUMHOOKS];
+ u32 num_counters;
+ compat_uptr_t counters;
+ struct compat_arpt_entry entries[0];
+ };
+
static inline void compat_release_entry(struct compat_arpt_entry *e)
{
struct xt_entry_target *t;
module_put(t->u.kernel.target->me);
}
- static inline int
+ static int
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
return -EINVAL;
}
- if (!arp_checkentry(&e->arp))
- return -EINVAL;
+ /* For purposes of check_entry casting the compat entry is fine */
+ ret = check_entry((struct arpt_entry *)e);
+
- ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
- e->next_offset);
if (ret)
return ret;
if (ret)
goto release_target;
- /* Check hooks & underflows */
- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
release_target:
return ret;
}
- static int
+ static void
compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct xt_target *target;
struct arpt_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
- ret = 0;
origsize = *size;
de = (struct arpt_entry *)*dstptr;
memcpy(de, e, sizeof(struct arpt_entry));
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
}
- static int translate_compat_table(const char *name,
- unsigned int valid_hooks,
- struct xt_table_info **pinfo,
+ static int translate_compat_table(struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_arpt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_arpt_entry *iter0;
- struct arpt_entry *iter1;
+ struct arpt_replace repl;
unsigned int size;
int ret = 0;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
- xt_compat_init_offsets(NFPROTO_ARP, number);
+ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
newinfo->hook_entry[i] = info->hook_entry[i];
newinfo->underflow[i] = info->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone */
+
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = check_target(iter1, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(arpt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
free_newinfo:
xt_free_table_info(newinfo);
- out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+ out_unlock:
+ xt_compat_flush_offsets(NFPROTO_ARP);
+ xt_compat_unlock(NFPROTO_ARP);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
- out_unlock:
- xt_compat_flush_offsets(NFPROTO_ARP);
- xt_compat_unlock(NFPROTO_ARP);
- goto out;
}
- struct compat_arpt_replace {
- char name[XT_TABLE_MAXNAMELEN];
- u32 valid_hooks;
- u32 num_entries;
- u32 size;
- u32 hook_entry[NF_ARP_NUMHOOKS];
- u32 underflow[NF_ARP_NUMHOOKS];
- u32 num_counters;
- compat_uptr_t counters;
- struct compat_arpt_entry entries[0];
- };
-
static int compat_do_replace(struct net *net, void __user *user,
unsigned int len)
{
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
goto free_newinfo;
}
- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
static const struct ipt_ip uncond;
return e->target_offset == sizeof(struct ipt_entry) &&
- memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
+ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
#undef FWINV
}
size = e->next_offset;
e = (struct ipt_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct ipt_entry *)
(entry0 + newpos);
module_put(par.match->me);
}
+static int
+check_entry(const struct ipt_entry *e)
+{
+ const struct xt_entry_target *t;
+
+ if (!ip_checkentry(&e->ip))
+ return -EINVAL;
+
+ if (e->target_offset + sizeof(struct xt_entry_target) >
+ e->next_offset)
+ return -EINVAL;
+
+ t = ipt_get_target_c(e);
+ if (e->target_offset + t->u.target_size > e->next_offset)
+ return -EINVAL;
+
+ return 0;
+}
+
+
static int
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
return -EINVAL;
}
- if (!ip_checkentry(&e->ip))
- return -EINVAL;
+ err = check_entry(e);
+
- err = xt_check_entry_offsets(e, e->elems, e->target_offset,
- e->next_offset);
if (err)
return err;
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- const char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
struct ipt_entry *iter;
unsigned int addend;
- #ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
- #endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-
- #ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
- #endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
-
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
-
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, AF_INET, name);
+ t = xt_find_table_lock(net, AF_INET, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
static int
compat_find_calc_match(struct xt_entry_match *m,
- const char *name,
const struct ipt_ip *ip,
unsigned int hookmask,
int *size)
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
return -EINVAL;
}
- if (!ip_checkentry(&e->ip))
- return -EINVAL;
+ /* For purposes of check_entry casting the compat entry is fine */
+ ret = check_entry((struct ipt_entry *)e);
+
- ret = xt_compat_check_entry_offsets(e, e->elems,
- e->target_offset, e->next_offset);
if (ret)
return ret;
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ip, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
+ &off);
if (ret != 0)
goto release_matches;
++j;
if (ret)
goto out;
- /* Check hooks & underflows */
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
out:
return ret;
}
- static int
+ static void
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct xt_target *target;
struct ipt_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
struct xt_entry_match *ematch;
- ret = 0;
origsize = *size;
de = (struct ipt_entry *)*dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
*dstptr += sizeof(struct ipt_entry);
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
- xt_ematch_foreach(ematch, e) {
- ret = xt_compat_match_from_user(ematch, dstptr, size);
- if (ret != 0)
- return ret;
- }
+ xt_ematch_foreach(ematch, e)
+ xt_compat_match_from_user(ematch, dstptr, size);
+
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ipt_get_target(e);
target = t->u.kernel.target;
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
+
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
- }
-
- static int
- compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
- {
- struct xt_entry_match *ematch;
- struct xt_mtchk_param mtpar;
- unsigned int j;
- int ret = 0;
-
- j = 0;
- mtpar.net = net;
- mtpar.table = name;
- mtpar.entryinfo = &e->ip;
- mtpar.hook_mask = e->comefrom;
- mtpar.family = NFPROTO_IPV4;
- xt_ematch_foreach(ematch, e) {
- ret = check_match(ematch, &mtpar);
- if (ret != 0)
- goto cleanup_matches;
- ++j;
- }
-
- ret = check_target(e, net, name);
- if (ret)
- goto cleanup_matches;
- return 0;
-
- cleanup_matches:
- xt_ematch_foreach(ematch, e) {
- if (j-- == 0)
- break;
- cleanup_match(ematch, net);
- }
- return ret;
}
static int
translate_compat_table(struct net *net,
- const char *name,
- unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_ipt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ipt_entry *iter0;
- struct ipt_entry *iter1;
+ struct ipt_replace repl;
unsigned int size;
int ret;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET);
- xt_compat_init_offsets(AF_INET, number);
+ xt_compat_init_offsets(AF_INET, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- newinfo->hook_entry[i] = info->hook_entry[i];
- newinfo->underflow[i] = info->underflow[i];
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
+ newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone.
+ * entry1/newinfo contains a 64bit ruleset that looks exactly as
+ * generated by 64bit userspace.
+ *
+ * Call standard translate_table() to validate all hook_entrys,
+ * underflows, check for loops, etc.
+ */
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = compat_check_entry(iter1, net, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(ipt_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1, net);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(net, newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
free_newinfo:
xt_free_table_info(newinfo);
- out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+ out_unlock:
+ xt_compat_flush_offsets(AF_INET);
+ xt_compat_unlock(AF_INET);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
- out_unlock:
- xt_compat_flush_offsets(AF_INET);
- xt_compat_unlock(AF_INET);
- goto out;
}
static int
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
goto free_newinfo;
}
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
static void tcp_fixup_rcvbuf(struct sock *sk)
{
u32 mss = tcp_sk(sk)->advmss;
- u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ u32 icwnd = sysctl_tcp_default_init_rwnd;
int rcvmem;
/* Limit to 10 segments if mss <= 1460,
* or 14600/mss segments, with a minimum of two segments.
*/
if (mss > 1460)
- icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < mss)
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk1 = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
bool new_recovery = false;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
+ if (icsk->icsk_MMSRB == 1)
+ {
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss snd_cwnd=%u, snd_cwnd_cnt=%u\n", tp->snd_cwnd, tp->snd_cwnd_cnt);
+ #endif
+ if (tp->mss_cache != 0)
+ tp->snd_cwnd = (tp->rcv_wnd / tp->mss_cache);
+ else
+ {
+ tp->snd_cwnd = (tp->rcv_wnd / tp->advmss);
+ }
+
+ if (tp->snd_ssthresh > 16)
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2;//set snd_cwnd is half of default snd_ssthresh
+ }
+ else
+ {
+ tp->snd_cwnd = tp->snd_ssthresh / 2 + 4;
+ }
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss update snd_cwnd=%u\n", tp->snd_cwnd);
+ #endif
+ icsk1->icsk_MMSRB = 0;
+ #ifdef CONFIG_MTK_NET_LOGGING
+ printk("[mtk_net][mmspb] tcp_enter_loss set icsk_MMSRB=0\n");
+ #endif
+ }
+ else
+ {
tp->snd_cwnd = 1;
+ }
+
+ //tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
return true;
}
return false;
return false;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rto = delta;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
}
}
challenge_timestamp = now;
ACCESS_ONCE(challenge_count) = half +
- reciprocal_divide(prandom_u32(),
- sysctl_tcp_challenge_ack_limit);
+ reciprocal_divide(prandom_u32(),
+ sysctl_tcp_challenge_ack_limit);
}
count = ACCESS_ONCE(challenge_count);
if (count > 0) {
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
discard:
__kfree_skb(skb);
/* Set window scaling on max possible window
* See RFC1323 for an explanation of the limit to 14
*/
- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
+ space = max_t(u32, space, sysctl_tcp_rmem[2]);
+ space = max_t(u32, space, sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
while (space > 65535 && (*rcv_wscale) < 14) {
space >>= 1;
}
/* Set initial window to a value enough for senders starting with
- * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+ * initial congestion window of sysctl_tcp_default_init_rwnd. Place
* a limit on the initial window when mss is larger than 1460.
*/
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+ int init_cwnd = sysctl_tcp_default_init_rwnd;
if (mss > 1460)
- init_cwnd =
- max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+ init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
return true;
}
rearm_timer:
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ TCP_DELACK_MAX, sysctl_tcp_rto_max);
return;
}
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min_t(unsigned int, icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
static const struct ip6t_ip6 uncond;
return e->target_offset == sizeof(struct ip6t_entry) &&
- memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
+ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
}
+
static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry *e)
{
size = e->next_offset;
e = (struct ip6t_entry *)
(entry0 + pos + size);
+ if (pos + size >= newinfo->size)
+ return 0;
e->counters.pcnt = pos;
pos += size;
} else {
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
+ if (newpos >= newinfo->size)
+ return 0;
}
e = (struct ip6t_entry *)
(entry0 + newpos);
module_put(par.match->me);
}
-
+static int
+check_entry(const struct ip6t_entry *e)
+{
+ const struct xt_entry_target *t;
+
+ if (!ip6_checkentry(&e->ipv6))
+ return -EINVAL;
+
+ if (e->target_offset + sizeof(struct xt_entry_target) >
+ e->next_offset)
+ return -EINVAL;
+
+ t = ip6t_get_target_c(e);
+ if (e->target_offset + t->u.target_size > e->next_offset)
+ return -EINVAL;
+
+ return 0;
+}
+
static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ipv6 = par->entryinfo;
return -EINVAL;
}
- if (!ip6_checkentry(&e->ipv6))
- return -EINVAL;
+ err = check_entry(e);
+
- err = xt_check_entry_offsets(e, e->elems, e->target_offset,
- e->next_offset);
if (err)
return err;
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
- pr_err("Underflows must be unconditional and "
- "use the STANDARD target with "
- "ACCEPT/DROP\n");
+ pr_debug("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
unsigned int i, curcpu;
struct xt_counters_info tmp;
struct xt_counters *paddc;
- unsigned int num_counters;
- char *name;
- int size;
- void *ptmp;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
const void *loc_cpu_entry;
struct ip6t_entry *iter;
unsigned int addend;
- #ifdef CONFIG_COMPAT
- struct compat_xt_counters_info compat_tmp;
- if (compat) {
- ptmp = &compat_tmp;
- size = sizeof(struct compat_xt_counters_info);
- } else
- #endif
- {
- ptmp = &tmp;
- size = sizeof(struct xt_counters_info);
- }
-
- if (copy_from_user(ptmp, user, size) != 0)
- return -EFAULT;
-
- #ifdef CONFIG_COMPAT
- if (compat) {
- num_counters = compat_tmp.num_counters;
- name = compat_tmp.name;
- } else
- #endif
- {
- num_counters = tmp.num_counters;
- name = tmp.name;
- }
-
- if (len != size + num_counters * sizeof(struct xt_counters))
- return -EINVAL;
-
- paddc = vmalloc(len - size);
- if (!paddc)
- return -ENOMEM;
-
- if (copy_from_user(paddc, user + size, len - size) != 0) {
- ret = -EFAULT;
- goto free;
- }
-
- t = xt_find_table_lock(net, AF_INET6, name);
+ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+ if (IS_ERR(paddc))
+ return PTR_ERR(paddc);
+ t = xt_find_table_lock(net, AF_INET6, tmp.name);
if (IS_ERR_OR_NULL(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
local_bh_disable();
private = t->private;
- if (private->number != num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
static int
compat_find_calc_match(struct xt_entry_match *m,
- const char *name,
const struct ip6t_ip6 *ipv6,
unsigned int hookmask,
int *size)
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
- const unsigned char *limit,
- const unsigned int *hook_entries,
- const unsigned int *underflows,
- const char *name)
+ const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
- int ret, off, h;
+ int ret, off;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
return -EINVAL;
}
- if (!ip6_checkentry(&e->ipv6))
- return -EINVAL;
+ /* For purposes of check_entry casting the compat entry is fine */
+ ret = check_entry((struct ip6t_entry *)e);
+
- ret = xt_compat_check_entry_offsets(e, e->elems,
- e->target_offset, e->next_offset);
if (ret)
return ret;
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ipv6, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
+ &off);
if (ret != 0)
goto release_matches;
++j;
if (ret)
goto out;
- /* Check hooks & underflows */
- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
- if ((unsigned char *)e - base == hook_entries[h])
- newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
- newinfo->underflow[h] = underflows[h];
- }
-
- /* Clear counters and comefrom */
- memset(&e->counters, 0, sizeof(e->counters));
- e->comefrom = 0;
return 0;
out:
return ret;
}
- static int
+ static void
compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
- unsigned int *size, const char *name,
+ unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct ip6t_entry *de;
unsigned int origsize;
- int ret, h;
+ int h;
struct xt_entry_match *ematch;
- ret = 0;
origsize = *size;
de = (struct ip6t_entry *)*dstptr;
memcpy(de, e, sizeof(struct ip6t_entry));
*dstptr += sizeof(struct ip6t_entry);
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
- xt_ematch_foreach(ematch, e) {
- ret = xt_compat_match_from_user(ematch, dstptr, size);
- if (ret != 0)
- return ret;
- }
+ xt_ematch_foreach(ematch, e)
+ xt_compat_match_from_user(ematch, dstptr, size);
+
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ip6t_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
- return ret;
- }
-
- static int compat_check_entry(struct ip6t_entry *e, struct net *net,
- const char *name)
- {
- unsigned int j;
- int ret = 0;
- struct xt_mtchk_param mtpar;
- struct xt_entry_match *ematch;
-
- j = 0;
- mtpar.net = net;
- mtpar.table = name;
- mtpar.entryinfo = &e->ipv6;
- mtpar.hook_mask = e->comefrom;
- mtpar.family = NFPROTO_IPV6;
- xt_ematch_foreach(ematch, e) {
- ret = check_match(ematch, &mtpar);
- if (ret != 0)
- goto cleanup_matches;
- ++j;
- }
-
- ret = check_target(e, net, name);
- if (ret)
- goto cleanup_matches;
- return 0;
-
- cleanup_matches:
- xt_ematch_foreach(ematch, e) {
- if (j-- == 0)
- break;
- cleanup_match(ematch, net);
- }
- return ret;
}
static int
translate_compat_table(struct net *net,
- const char *name,
- unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
- unsigned int total_size,
- unsigned int number,
- unsigned int *hook_entries,
- unsigned int *underflows)
+ const struct compat_ip6t_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ip6t_entry *iter0;
- struct ip6t_entry *iter1;
+ struct ip6t_replace repl;
unsigned int size;
int ret = 0;
info = *pinfo;
entry0 = *pentry0;
- size = total_size;
- info->number = number;
-
- /* Init all hooks to impossible value. */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- info->hook_entry[i] = 0xFFFFFFFF;
- info->underflow[i] = 0xFFFFFFFF;
- }
+ size = compatr->size;
+ info->number = compatr->num_entries;
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET6);
- xt_compat_init_offsets(AF_INET6, number);
+ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
/* Walk through entries, checking offsets. */
- xt_entry_foreach(iter0, entry0, total_size) {
+ xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
- entry0 + total_size,
- hook_entries,
- underflows,
- name);
+ entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
- if (j != number) {
+ if (j != compatr->num_entries) {
duprintf("translate_compat_table: %u not %u entries\n",
- j, number);
+ j, compatr->num_entries);
goto out_unlock;
}
- /* Check hooks all assigned */
- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- /* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
- continue;
- if (info->hook_entry[i] == 0xFFFFFFFF) {
- duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
- goto out_unlock;
- }
- if (info->underflow[i] == 0xFFFFFFFF) {
- duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
- goto out_unlock;
- }
- }
-
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
- newinfo->number = number;
+ newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
- newinfo->hook_entry[i] = info->hook_entry[i];
- newinfo->underflow[i] = info->underflow[i];
+ newinfo->hook_entry[i] = compatr->hook_entry[i];
+ newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
- size = total_size;
- xt_entry_foreach(iter0, entry0, total_size) {
- ret = compat_copy_entry_from_user(iter0, &pos, &size,
- name, newinfo, entry1);
- if (ret != 0)
- break;
- }
+ size = compatr->size;
+ xt_entry_foreach(iter0, entry0, compatr->size)
+ compat_copy_entry_from_user(iter0, &pos, &size,
+ newinfo, entry1);
+
+ /* all module references in entry0 are now gone. */
xt_compat_flush_offsets(AF_INET6);
xt_compat_unlock(AF_INET6);
- if (ret)
- goto free_newinfo;
- ret = -ELOOP;
- if (!mark_source_chains(newinfo, valid_hooks, entry1))
- goto free_newinfo;
+ memcpy(&repl, compatr, sizeof(*compatr));
- i = 0;
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- ret = compat_check_entry(iter1, net, name);
- if (ret != 0)
- break;
- ++i;
- if (strcmp(ip6t_get_target(iter1)->u.user.name,
- XT_ERROR_TARGET) == 0)
- ++newinfo->stacksize;
- }
- if (ret) {
- /*
- * The first i matches need cleanup_entry (calls ->destroy)
- * because they had called ->check already. The other j-i
- * entries need only release.
- */
- int skip = i;
- j -= i;
- xt_entry_foreach(iter0, entry0, newinfo->size) {
- if (skip-- > 0)
- continue;
- if (j-- == 0)
- break;
- compat_release_entry(iter0);
- }
- xt_entry_foreach(iter1, entry1, newinfo->size) {
- if (i-- == 0)
- break;
- cleanup_entry(iter1, net);
- }
- xt_free_table_info(newinfo);
- return ret;
+ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ repl.hook_entry[i] = newinfo->hook_entry[i];
+ repl.underflow[i] = newinfo->underflow[i];
}
- /* And one copy for every other CPU */
- for_each_possible_cpu(i)
- if (newinfo->entries[i] && newinfo->entries[i] != entry1)
- memcpy(newinfo->entries[i], entry1, newinfo->size);
+ repl.num_counters = 0;
+ repl.counters = NULL;
+ repl.size = newinfo->size;
+ ret = translate_table(net, newinfo, entry1, &repl);
+ if (ret)
+ goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
free_newinfo:
xt_free_table_info(newinfo);
- out:
- xt_entry_foreach(iter0, entry0, total_size) {
+ return ret;
+ out_unlock:
+ xt_compat_flush_offsets(AF_INET6);
+ xt_compat_unlock(AF_INET6);
+ xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
- out_unlock:
- xt_compat_flush_offsets(AF_INET6);
- xt_compat_unlock(AF_INET6);
- goto out;
}
static int
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
+ if (tmp.num_counters == 0)
+ return -EINVAL;
+
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
goto free_newinfo;
}
- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
- &newinfo, &loc_cpu_entry, tmp.size,
- tmp.num_entries, tmp.hook_entry,
- tmp.underflow);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
+ fl6.flowi6_uid = sock_i_uid(sk);
final_p = fl6_update_dst(&fl6, np->opt, &final);
if (err)
goto late_failure;
+ printk(KERN_INFO "net_sock, IPV6 socket[%lu] sport:%u \n", SOCK_INODE(sk->sk_socket)->i_ino, ntohs(inet->inet_sport));
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
fl6.flowi6_proto = IPPROTO_TCP;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = inet6_iif(skb);
+ fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
TCP_ECN_create_request(req, skb, sock_net(sk));
treq->iif = sk->sk_bound_dev_if;
+ inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
#include <string.h>
#include <unistd.h>
+ /*
+ * glibc synced up and added the metag number but didn't add the relocations.
+ * Work around this in a crude manner for now.
+ */
#ifndef EM_METAG
- /* Remove this when these make it to the standard system elf.h. */
#define EM_METAG 174
+ #endif
+ #ifndef R_METAG_ADDR32
#define R_METAG_ADDR32 2
+ #endif
+ #ifndef R_METAG_NONE
#define R_METAG_NONE 3
#endif
+#ifndef EM_AARCH64
+#define EM_AARCH64 183
+#define R_AARCH64_ABS64 257
+#endif
+
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
case EM_ARM: reltype = R_ARM_ABS32;
altmcount = "__gnu_mcount_nc";
break;
+ case EM_AARCH64:
+ reltype = R_AARCH64_ABS64; gpfx = '_'; break;
case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
case EM_METAG: reltype = R_METAG_ADDR32;
altmcount = "_mcount_wrapper";