* We cant access below the stack pointer in the 32bit ABI and
* can access 288 bytes in the 64bit ABI
*/
- if (!(test_thread_flag(TIF_32BIT)))
+ if (!is_32bit_task())
usp -= 288;
return (void __user *) (usp - len);
static inline int is_compat_task(void)
{
- return test_thread_flag(TIF_32BIT);
+ return is_32bit_task();
}
#endif /* __KERNEL__ */
* the 64bit ABI has never had these issues dont enable the workaround
* even if we have an executable stack.
*/
-# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
+# define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \
(exec_stk == EXSTACK_DEFAULT) : 0)
#else
# define SET_PERSONALITY(ex) \
#endif /* !CONFIG_HUGETLB_PAGE */
#define VM_DATA_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
+ (is_32bit_task() ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
/*
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
+ (is_32bit_task() ? \
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
#include <asm-generic/getorder.h>
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
-#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
+#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
#endif
#define STACK_TOP_USER64 TASK_SIZE_USER64
#define STACK_TOP_USER32 TASK_SIZE_USER32
-#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
+#define STACK_TOP (is_32bit_task() ? \
STACK_TOP_USER32 : STACK_TOP_USER64)
#define STACK_TOP_MAX STACK_TOP_USER64
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
- if (!test_thread_flag(TIF_32BIT))
+ if (!is_32bit_task())
audit_syscall_entry(AUDIT_ARCH_PPC64,
regs->gpr[0],
regs->gpr[3], regs->gpr[4],
{
int i;
- if (!vma || test_thread_flag(TIF_32BIT)) {
+ if (!vma || is_32bit_task()) {
printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
for (i=0; i<vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase +
dump_one_vdso_page(pg, upg);
}
}
- if (!vma || !test_thread_flag(TIF_32BIT)) {
+ if (!vma || !is_32bit_task()) {
printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
for (i=0; i<vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase +
return 0;
#ifdef CONFIG_PPC64
- if (test_thread_flag(TIF_32BIT)) {
+ if (is_32bit_task()) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
vdso_base = VDSO32_MBASE;
}
} else {
#ifdef CONFIG_PPC64
- if (!test_thread_flag(TIF_32BIT)) {
+ if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)