From: Stricted Date: Wed, 21 Mar 2018 21:13:57 +0000 (+0100) Subject: Merge tag 'v3.10.55' into update X-Git-Url: https://git.stricted.de/?p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git;a=commitdiff_plain;h=b4350432998578500203d562d5b093c6a5beadd2;hp=-c Merge tag 'v3.10.55' into update This is the 3.10.55 stable release --- b4350432998578500203d562d5b093c6a5beadd2 diff --combined Makefile index dd2dba85aa9c,6141df04fcb5..89c5b4cc790f --- a/Makefile +++ b/Makefile @@@ -1,6 -1,6 +1,6 @@@ VERSION = 3 PATCHLEVEL = 10 - SUBLEVEL = 54 + SUBLEVEL = 55 EXTRAVERSION = NAME = TOSSUG Baby Fish @@@ -373,9 -373,7 +373,9 @@@ KBUILD_CFLAGS := -Wall -Wundef -Wstri -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -fno-delete-null-pointer-checks + -fno-delete-null-pointer-checks \ + -w + KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := KBUILD_AFLAGS := -D__ASSEMBLY__ diff --combined fs/namespace.c index 83151b9915c7,00409add4d96..31df51c8b248 --- a/fs/namespace.c +++ b/fs/namespace.c @@@ -21,10 -21,6 +21,10 @@@ #include /* get_fs_root et.al. */ #include /* fsnotify_vfsmount_delete */ #include +//#define UMOUNT_LOG //enable kernel layer unmount log when unmount fail +#ifdef UMOUNT_LOG +#include +#endif #include #include #include "pnode.h" @@@ -136,130 -132,10 +136,130 @@@ void mnt_release_group_id(struct mount /* * vfsmount lock must be held for read */ +#ifdef UMOUNT_LOG +#define UMOUNT_Partition "/emmc@usrdata" +struct record_ref_count{ + pid_t pid; + char name[TASK_COMM_LEN]; + int count; + struct record_ref_count *next; +}; + +struct record_ref_count *ref_head = NULL; +struct record_ref_count *ref_current = NULL; +struct record_ref_count *ref_prev = NULL; +int s_total_count = 0; +#endif static inline void mnt_add_count(struct mount *mnt, int n) { +#ifdef UMOUNT_LOG +int print_link_list=0; +#ifndef CONFIG_SMP + preempt_disable(); +#endif + + if (strcmp(UMOUNT_Partition,mnt->mnt_devname)==0) + { + //if (strcmp("mobile_log_d",current->comm)!=0) + { + //if (current->pid < 100) //((current->pid < 70) && (current->pid > 60)) + { + //printk("Ahsin n=%d current->pid=%d name=%s \n",n,current->pid,current->comm); + spin_lock(&mnt_id_lock); + if (ref_head == NULL) //linked list head (start) + { + printk("Ahsin link list init mnt_get_count=%d \n",mnt_get_count(mnt)); + + ref_current = kmalloc(sizeof(struct record_ref_count), GFP_KERNEL); + if (ref_current == NULL) + printk("Ahsin can't allocate memory for ref_current /n"); + + ref_current->next = NULL; + ref_current->pid = current->pid; + strncpy(ref_current->name, current->comm, TASK_COMM_LEN -1); + ref_current->name[TASK_COMM_LEN -1] = '\0'; + ref_current->count = n; + s_total_count = s_total_count + n; + ref_head = ref_current; + + printk("Ahsin ref_head == NULL pid=%d name=%s counter=%d n=%d \n",ref_current->pid,ref_current->name,ref_current->count,n); + } + else //check exist first and then add linked list or modify counter + { + ref_prev = ref_head; + while(ref_prev != NULL) + { + //printk("Ahsin PID= %d, Name= %s, Count= %d n=%d current->pid=%d \n", ref_prev->pid, ref_prev->name, ref_prev->count,n,current->pid); + if (strcmp(ref_prev->name,current->comm)==0) //(ref_prev->pid==current->pid)//exist and find, modify counter + { + ref_prev->count = ref_prev->count + n; + s_total_count = s_total_count + n; + //printk("Ahsin (ref_prev->name,current->comm) pid=%d name=%s counter=%d n=%d \n",ref_prev->pid,ref_prev->name,ref_prev->count,n); + break; + } + else + { + if (ref_prev->next != NULL) + ref_prev = ref_prev->next; + else + { // end of link list + ref_current = kmalloc(sizeof(struct record_ref_count), GFP_KERNEL); + if (ref_current == NULL) + printk("Ahsin can't allocate memory for ref_prev /n"); + + ref_current->next = NULL; + ref_current->pid = current->pid; + strncpy(ref_current->name, current->comm, TASK_COMM_LEN -1); + ref_current->name[TASK_COMM_LEN -1] = '\0'; + ref_current->count = n; + s_total_count = s_total_count + n; + ref_prev->next = ref_current; + //printk("Ahsin new node(end of link list) pid=%d name=%s counter=%d n=%d \n",ref_current->pid,ref_current->name,ref_current->count,n); + break; + + } + } + } + } + spin_unlock(&mnt_id_lock); + } + } + } + +#ifndef CONFIG_SMP + preempt_enable(); +#endif +#endif #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); +#ifdef UMOUNT_LOG +#if 0 + if (strcmp(UMOUNT_Partition,mnt->mnt_devname)==0) + { + if (strcmp("mobile_log_d",current->comm)!=0) + { + printk("Ahsin s_total_count=%d mnt_get_count=%d n=%d current->pid=%d \n",s_total_count,mnt_get_count(mnt),n,current->pid); + //if (current->pid < 100) + { + // print linked list + spin_lock(&mnt_id_lock); + ref_current = ref_head; + while(ref_current != NULL) + { + if (ref_current->count) + { + print_link_list = print_link_list + ref_current->count; + //printk("Ahsin PID= %d, Name = %s, Count= %d \n", ref_current->pid, ref_current->name, ref_current->count); + } + ref_current = ref_current->next; + } + spin_unlock(&mnt_id_lock); + printk("Ahsin print_link_list=%d \n",print_link_list); + } + } + } +#endif +#endif #else preempt_disable(); mnt->mnt_count += n; @@@ -307,12 -183,6 +307,12 @@@ static struct mount *alloc_vfsmnt(cons if (!mnt->mnt_pcp) goto out_free_devname; +#ifdef UMOUNT_LOG + if (strcmp(UMOUNT_Partition,mnt->mnt_devname)==0) + { + printk("Ahsin alloc_vfsmnt current->pid=%d name=%s \n",current->pid,current->comm); + } +#endif this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; @@@ -958,8 -828,21 +958,21 @@@ static struct mount *clone_mnt(struct m mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; /* Don't allow unprivileged users to change mount flags */ - if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) - mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; + if (flag & CL_UNPRIVILEGED) { + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; + + if (mnt->mnt.mnt_flags & MNT_READONLY) + mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; + + if (mnt->mnt.mnt_flags & MNT_NODEV) + mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; + + if (mnt->mnt.mnt_flags & MNT_NOSUID) + mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; + + if (mnt->mnt.mnt_flags & MNT_NOEXEC) + mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; + } atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; @@@ -1438,9 -1321,7 +1451,9 @@@ SYSCALL_DEFINE2(umount, char __user *, struct mount *mnt; int retval; int lookup_flags = 0; - +#ifdef UMOUNT_LOG + int total_value =0; +#endif if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; @@@ -1461,32 -1342,6 +1474,32 @@@ goto dput_and_out; retval = do_umount(mnt, flags); +#ifdef UMOUNT_LOG + { + printk("Ahsin do_umount retval=%d \n",retval); + //do_umount success: 0, do_umount busy: -16 + //if do_umount fail, need to dump the link list here + + if(retval) + printk("Ahsin do_umount fail; mnt_get_count=%d mnt->mnt_devname=%s\n",mnt_get_count(mnt),mnt->mnt_devname); + else + printk("Ahsin do_umount success; mnt_get_count=%d mnt->mnt_devname=%s\n",mnt_get_count(mnt),mnt->mnt_devname); + + // print linked list + spin_lock(&mnt_id_lock); + ref_current = ref_head; + while(ref_current != NULL) + { + total_value = total_value + ref_current->count; + + if (ref_current->count) + printk("Ahsin PID= %d, Name = %s, Count= %d \n", ref_current->pid, ref_current->name, ref_current->count); + ref_current = ref_current->next; + } + spin_unlock(&mnt_id_lock); + printk("Ahsin total_value=%d \n",total_value); + } +#endif dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); @@@ -1922,9 -1777,6 +1935,6 @@@ static int change_mount_flags(struct vf if (readonly_request == __mnt_is_readonly(mnt)) return 0; - if (mnt->mnt_flags & MNT_LOCK_READONLY) - return -EPERM; - if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else @@@ -1950,6 -1802,33 +1960,33 @@@ static int do_remount(struct path *path if (path->dentry != path->mnt->mnt_root) return -EINVAL; + /* Don't allow changing of locked mnt flags. + * + * No locks need to be held here while testing the various + * MNT_LOCK flags because those flags can never be cleared + * once they are set. + */ + if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && + !(mnt_flags & MNT_READONLY)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && + !(mnt_flags & MNT_NODEV)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && + !(mnt_flags & MNT_NOSUID)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && + !(mnt_flags & MNT_NOEXEC)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && + ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { + return -EPERM; + } + err = security_sb_remount(sb, data); if (err) return err; @@@ -1963,7 -1842,7 +2000,7 @@@ err = do_remount_sb(sb, flags, data, 0); if (!err) { br_write_lock(&vfsmount_lock); - mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; + mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; mnt->mnt.mnt_flags = mnt_flags; br_write_unlock(&vfsmount_lock); } @@@ -2149,7 -2028,7 +2186,7 @@@ static int do_new_mount(struct path *pa */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; - mnt_flags |= MNT_NODEV; + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; } } @@@ -2467,6 -2346,14 +2504,14 @@@ long do_mount(const char *dev_name, con if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; + /* The default atime for remount is preservation */ + if ((flags & MS_REMOUNT) && + ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | + MS_STRICTATIME)) == 0)) { + mnt_flags &= ~MNT_ATIME_MASK; + mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; + } + flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); diff --combined fs/proc/array.c index 05dff1cbd5ed,09f0d9c374a3..6ed95802239d --- a/fs/proc/array.c +++ b/fs/proc/array.c @@@ -168,16 -168,16 +168,16 @@@ static inline void task_state(struct se int g; struct fdtable *fdt = NULL; const struct cred *cred; - pid_t ppid, tpid; + pid_t ppid = 0, tpid = 0; + struct task_struct *leader = NULL; rcu_read_lock(); - ppid = pid_alive(p) ? - task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; - tpid = 0; if (pid_alive(p)) { struct task_struct *tracer = ptrace_parent(p); if (tracer) tpid = task_pid_nr_ns(tracer, ns); + ppid = task_tgid_nr_ns(rcu_dereference(p->real_parent), ns); + leader = p->group_leader; } cred = get_task_cred(p); seq_printf(m, @@@ -189,7 -189,7 +189,7 @@@ "Uid:\t%d\t%d\t%d\t%d\n" "Gid:\t%d\t%d\t%d\t%d\n", get_task_state(p), - task_tgid_nr_ns(p, ns), + leader ? task_pid_nr_ns(leader, ns) : 0, pid_nr_ns(pid, ns), ppid, tpid, from_kuid_munged(user_ns, cred->uid), @@@ -304,15 -304,11 +304,11 @@@ static void render_cap_t(struct seq_fil seq_puts(m, header); CAP_FOR_EACH_U32(__capi) { seq_printf(m, "%08x", - a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); + a->cap[CAP_LAST_U32 - __capi]); } seq_putc(m, '\n'); } - /* Remove non-existent capabilities */ - #define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \ - CAP_TO_MASK(CAP_LAST_CAP + 1) - 1) - static inline void task_cap(struct seq_file *m, struct task_struct *p) { const struct cred *cred; @@@ -326,11 -322,6 +322,6 @@@ cap_bset = cred->cap_bset; rcu_read_unlock(); - NORM_CAPS(cap_inheritable); - NORM_CAPS(cap_permitted); - NORM_CAPS(cap_effective); - NORM_CAPS(cap_bset); - render_cap_t(m, "CapInh:\t", &cap_inheritable); render_cap_t(m, "CapPrm:\t", &cap_permitted); render_cap_t(m, "CapEff:\t", &cap_effective); diff --combined kernel/audit.c index 6411e3711558,4dd7529b0845..74f8647fef26 --- a/kernel/audit.c +++ b/kernel/audit.c @@@ -372,9 -372,8 +372,9 @@@ static void audit_printk_skb(struct sk_ char *data = nlmsg_data(nlh); if (nlh->nlmsg_type != AUDIT_EOE) { - if (printk_ratelimit()) + if (printk_ratelimit()){ printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data); + } else audit_log_lost("printk limit exceeded\n"); } @@@ -1413,7 -1412,7 +1413,7 @@@ void audit_log_cap(struct audit_buffer audit_log_format(ab, " %s=", prefix); CAP_FOR_EACH_U32(i) { audit_log_format(ab, "%08x", - cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); + cap->cap[CAP_LAST_U32 - i]); } } diff --combined kernel/smp.c index 80c8653a22e9,88797cb0d23a..7d1bf67e3ca7 --- a/kernel/smp.c +++ b/kernel/smp.c @@@ -270,51 -270,6 +270,51 @@@ int smp_call_function_single(int cpu, s } EXPORT_SYMBOL(smp_call_function_single); +/* This function can be used by MTK Monitor only */ +/* Dont use this function directly */ +int mtk_smp_call_function_single(int cpu, smp_call_func_t func, void *info, + int wait) +{ + struct call_single_data d = { + .flags = 0, + }; + unsigned long flags; + int this_cpu; + int err = 0; + + /* + * prevent preemption and reschedule on another processor, + * as well as CPU removal + */ + this_cpu = get_cpu(); + + if (cpu == this_cpu) { + local_irq_save(flags); + func(info); + local_irq_restore(flags); + } else { + if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { + struct call_single_data *data = &d; + + if (!wait) + data = &__get_cpu_var(csd_data); + + csd_lock(data); + + cpumask_set_cpu(cpu, data->cpumask); + data->func = func; + data->info = info; + generic_exec_single(cpu, data, wait); + } else { + err = -ENXIO; /* CPU not online */ + } + } + + put_cpu(); + + return err; +} + /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. @@@ -703,7 -658,7 +703,7 @@@ void on_each_cpu_cond(bool (*cond_func) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); - WARN_ON_ONCE(!ret); + WARN_ON_ONCE(ret); } preempt_enable(); } diff --combined kernel/trace/ring_buffer.c index b62ff04ab05a,5efbc122e5ce..2f48d4d90b83 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@@ -27,11 -27,6 +27,11 @@@ #include +#ifdef CONFIG_MTK_EXTMEM +extern void* extmem_malloc_page_align(size_t bytes); +extern void extmem_free(void* mem); +#endif + static void update_pages_handler(struct work_struct *work); /* @@@ -402,11 -397,7 +402,11 @@@ size_t ring_buffer_page_len(void *page */ static void free_buffer_page(struct buffer_page *bpage) { +#ifdef CONFIG_MTK_EXTMEM + extmem_free((void*) bpage->page); +#else free_page((unsigned long)bpage->page); +#endif kfree(bpage); } @@@ -1121,9 -1112,7 +1121,9 @@@ static int __rb_allocate_pages(int nr_p struct buffer_page *bpage, *tmp; for (i = 0; i < nr_pages; i++) { +#if !defined (CONFIG_MTK_EXTMEM) struct page *page; +#endif /* * __GFP_NORETRY flag makes sure that the allocation fails * gracefully without invoking oom-killer and the system is @@@ -1137,19 -1126,11 +1137,19 @@@ list_add(&bpage->list, pages); +#ifdef CONFIG_MTK_EXTMEM + bpage->page = extmem_malloc_page_align(PAGE_SIZE); + if(bpage->page == NULL) { + pr_err("%s[%s] ext memory alloc failed!!!\n", __FILE__, __FUNCTION__); + goto free_pages; + } +#else page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto free_pages; bpage->page = page_address(page); +#endif rb_init_page(bpage->page); } @@@ -1194,9 -1175,7 +1194,9 @@@ rb_allocate_cpu_buffer(struct ring_buff { struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; +#if !defined (CONFIG_MTK_EXTMEM) struct page *page; +#endif int ret; cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), @@@ -1222,17 -1201,10 +1222,17 @@@ rb_check_bpage(cpu_buffer, bpage); cpu_buffer->reader_page = bpage; + +#ifdef CONFIG_MTK_EXTMEM + bpage->page = extmem_malloc_page_align(PAGE_SIZE); + if(bpage->page == NULL) + goto fail_free_reader; +#else page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); if (!page) goto fail_free_reader; bpage->page = page_address(page); +#endif rb_init_page(bpage->page); INIT_LIST_HEAD(&cpu_buffer->reader_page->list); @@@ -2008,7 -1980,7 +2008,7 @@@ rb_add_time_stamp(struct ring_buffer_ev /** * rb_update_event - update event type and data - * @event: the even to update + * @event: the event to update * @type: the type of event * @length: the size of the event field in the ring buffer * @@@ -3381,21 -3353,16 +3381,16 @@@ static void rb_iter_reset(struct ring_b struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; /* Iterator usage is expected to have record disabled */ - if (list_empty(&cpu_buffer->reader_page->list)) { - iter->head_page = rb_set_head_page(cpu_buffer); - if (unlikely(!iter->head_page)) - return; - iter->head = iter->head_page->read; - } else { - iter->head_page = cpu_buffer->reader_page; - iter->head = cpu_buffer->reader_page->read; - } + iter->head_page = cpu_buffer->reader_page; + iter->head = cpu_buffer->reader_page->read; + + iter->cache_reader_page = iter->head_page; + iter->cache_read = iter->head; + if (iter->head) iter->read_stamp = cpu_buffer->read_stamp; else iter->read_stamp = iter->head_page->page->time_stamp; - iter->cache_reader_page = cpu_buffer->reader_page; - iter->cache_read = cpu_buffer->read; } /** @@@ -3788,12 -3755,14 +3783,14 @@@ rb_iter_peek(struct ring_buffer_iter *i return NULL; /* - * We repeat when a time extend is encountered. - * Since the time extend is always attached to a data event, - * we should never loop more than once. - * (We never hit the following condition more than twice). + * We repeat when a time extend is encountered or we hit + * the end of the page. Since the time extend is always attached + * to a data event, we should never loop more than three times. + * Once for going to next page, once on time extend, and + * finally once to get the event. + * (We never hit the following condition more than thrice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) return NULL; if (rb_per_cpu_empty(cpu_buffer)) diff --combined net/bluetooth/rfcomm/core.c index 0c77476d33d2,19ba192e9dbf..3ca5e40fe390 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@@ -436,6 -436,7 +436,6 @@@ static int __rfcomm_dlc_close(struct rf switch (d->state) { case BT_CONNECT: - case BT_CONFIG: if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { set_bit(RFCOMM_AUTH_REJECT, &d->flags); rfcomm_schedule(); @@@ -1856,10 -1857,13 +1856,13 @@@ static struct rfcomm_session *rfcomm_pr /* Get data directly from socket receive queue without copying it. */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); - if (!skb_linearize(skb)) + if (!skb_linearize(skb)) { s = rfcomm_recv_frame(s, skb); - else + if (!s) + break; + } else { kfree_skb(skb); + } } if (s && (sk->sk_state == BT_CLOSED)) diff --combined net/bluetooth/sco.c index 3178c7b4a171,c9ae6b703c13..de9c955b247a --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@@ -158,7 -158,6 +158,7 @@@ static int sco_connect(struct sock *sk { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; + __u16 pkt_type = sco_pi(sk)->pkt_type; struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; @@@ -174,13 -173,11 +174,13 @@@ if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; - else + else { type = SCO_LINK; + pkt_type &= SCO_ESCO_MASK; + } - hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW, - HCI_AT_NO_BONDING); + hcon = hci_connect(hdev, type, pkt_type, dst, BDADDR_BREDR, + BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; @@@ -448,21 -445,17 +448,21 @@@ static int sco_sock_create(struct net * return 0; } -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sockaddr_sco sa; struct sock *sk = sock->sk; - int err = 0; + int len, err = 0; - BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); + BT_DBG("sk %p %pMR", sk, &sa.sco_bdaddr); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); + lock_sock(sk); if (sk->sk_state != BT_OPEN) { @@@ -475,8 -468,7 +475,8 @@@ goto done; } - bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; sk->sk_state = BT_BOUND; @@@ -487,34 -479,26 +487,34 @@@ done static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { - struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; - int err; + struct sockaddr_sco sa; + int len, err; BT_DBG("sk %p", sk); - if (alen < sizeof(struct sockaddr_sco) || - addr->sa_family != AF_BLUETOOTH) + if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; - if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) - return -EBADFD; - - if (sk->sk_type != SOCK_SEQPACKET) - return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), alen); + memcpy(&sa, addr, len); lock_sock(sk); + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + /* Set destination address and psm */ - bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); + bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr); + sco_pi(sk)->pkt_type = sa.sco_pkt_type; err = sco_connect(sk); if (err) @@@ -638,7 -622,6 +638,7 @@@ static int sco_sock_getname(struct sock bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst); else bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src); + sa->sco_pkt_type = sco_pi(sk)->pkt_type; return 0; } @@@ -875,7 -858,8 +875,8 @@@ static int sco_sock_shutdown(struct soc sco_sock_clear_timer(sk); __sco_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } @@@ -895,7 -879,8 +896,8 @@@ static int sco_sock_release(struct sock sco_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) { + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); diff --combined security/commoncap.c index 5870fdc224b4,c9219a66b7c6..0405522995c5 --- a/security/commoncap.c +++ b/security/commoncap.c @@@ -31,10 -31,6 +31,10 @@@ #include #include +#ifdef CONFIG_ANDROID_PARANOID_NETWORK +#include +#endif + /* * If a non-root user executes a setuid-root binary in * !secure(SECURE_NOROOT) mode, then we raise capabilities. @@@ -82,13 -78,6 +82,13 @@@ int cap_capable(const struct cred *cred { struct user_namespace *ns = targ_ns; +#ifdef CONFIG_ANDROID_PARANOID_NETWORK + if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) + return 0; + if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) + return 0; +#endif + /* See if cred has the capability in the target user namespace * by examining the target user namespace and all of the target * user namespace's parents. @@@ -432,6 -421,9 +432,9 @@@ int get_vfs_caps_from_disk(const struc cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable); } + cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + return 0; } diff --combined sound/soc/codecs/max98090.c index 9b7746c9546f,76bfeb3c3e30..76bfeb3c3e30 mode 100755,100644..100755 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@@ -2234,7 -2234,7 +2234,7 @@@ static int max98090_probe(struct snd_so /* Register for interrupts */ dev_dbg(codec->dev, "irq = %d\n", max98090->irq); - ret = request_threaded_irq(max98090->irq, NULL, + ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL, max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max98090_interrupt", codec); if (ret < 0) {