[RISCV][ASAN] implementation of ThreadSelf for riscv64

[6/11] patch series to port ASAN for riscv64

Depends On D87574

Reviewed By: eugenis

Differential Revision: https://reviews.llvm.org/D87575
This commit is contained in:
Anatoly Parshintsev 2020-09-22 22:27:40 -07:00 committed by Vitaly Buka
parent aa1b1d35cb
commit 00f6ebef6e
1 changed files with 118 additions and 113 deletions

View File

@ -16,6 +16,13 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
#include <syslog.h>
#include "sanitizer_allocator_internal.h" #include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h" #include "sanitizer_atomic.h"
#include "sanitizer_common.h" #include "sanitizer_common.h"
@ -28,20 +35,13 @@
#include "sanitizer_placement_new.h" #include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h" #include "sanitizer_procmaps.h"
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
#include <syslog.h>
#if !defined(ElfW) #if !defined(ElfW)
#define ElfW(type) Elf_##type #define ElfW(type) Elf_##type
#endif #endif
#if SANITIZER_FREEBSD #if SANITIZER_FREEBSD
#include <pthread_np.h>
#include <osreldate.h> #include <osreldate.h>
#include <pthread_np.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np #define pthread_getattr_np pthread_attr_get_np
#endif #endif
@ -52,9 +52,9 @@
#endif #endif
#if SANITIZER_NETBSD #if SANITIZER_NETBSD
#include <lwp.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <sys/tls.h> #include <sys/tls.h>
#include <lwp.h>
#endif #endif
#if SANITIZER_SOLARIS #if SANITIZER_SOLARIS
@ -83,8 +83,8 @@ struct __sanitizer::linux_dirent {
namespace __sanitizer { namespace __sanitizer {
SANITIZER_WEAK_ATTRIBUTE int SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act,
real_sigaction(int signum, const void *act, void *oldact); void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) { int internal_sigaction(int signum, const void *act, void *oldact) {
#if !SANITIZER_GO #if !SANITIZER_GO
@ -105,7 +105,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable. // Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true); MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error()) { if (proc_maps.Error()) {
*stack_top = *stack_bottom = 0; *stack_top = *stack_bottom = 0;
return; return;
@ -113,7 +113,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
MemoryMappedSegment segment; MemoryMappedSegment segment;
uptr prev_end = 0; uptr prev_end = 0;
while (proc_maps.Next(&segment)) { while (proc_maps.Next(&segment)) {
if ((uptr)&rl < segment.end) break; if ((uptr)&rl < segment.end)
break;
prev_end = segment.end; prev_end = segment.end;
} }
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end); CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
@ -121,7 +122,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// Get stacksize from rlimit, but clip it so that it does not overlap // Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings. // with other mappings.
uptr stacksize = rl.rlim_cur; uptr stacksize = rl.rlim_cur;
if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end; if (stacksize > segment.end - prev_end)
stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit. // When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'. // The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack. // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
@ -160,7 +162,7 @@ bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv"); void *f = dlsym(RTLD_NEXT, "setenv");
if (!f) if (!f)
return false; return false;
typedef int(*setenv_ft)(const char *name, const char *value, int overwrite); typedef int (*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f; setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f)); CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f)); internal_memcpy(&setenv_f, &f, sizeof(f));
@ -194,27 +196,27 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
static uptr g_tls_size; static uptr g_tls_size;
#ifdef __i386__ #ifdef __i386__
# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27)) #define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#else #else
# define CHECK_GET_TLS_STATIC_INFO_VERSION 0 #define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif #endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION #if CHECK_GET_TLS_STATIC_INFO_VERSION
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall)) #define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else #else
# define DL_INTERNAL_FUNCTION #define DL_INTERNAL_FUNCTION
#endif #endif
namespace { namespace {
struct GetTlsStaticInfoCall { struct GetTlsStaticInfoCall {
typedef void (*get_tls_func)(size_t*, size_t*); typedef void (*get_tls_func)(size_t *, size_t *);
}; };
struct GetTlsStaticInfoRegparmCall { struct GetTlsStaticInfoRegparmCall {
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; typedef void (*get_tls_func)(size_t *, size_t *) DL_INTERNAL_FUNCTION;
}; };
template <typename T> template <typename T>
void CallGetTls(void* ptr, size_t* size, size_t* align) { void CallGetTls(void *ptr, size_t *size, size_t *align) {
typename T::get_tls_func get_tls; typename T::get_tls_func get_tls;
CHECK_EQ(sizeof(get_tls), sizeof(ptr)); CHECK_EQ(sizeof(get_tls), sizeof(ptr));
internal_memcpy(&get_tls, &ptr, sizeof(ptr)); internal_memcpy(&get_tls, &ptr, sizeof(ptr));
@ -251,17 +253,17 @@ void InitTlsSize() {
// __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
// function in 2.27 and later. // function in 2.27 and later.
if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0)) if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr, CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr, &tls_size,
&tls_size, &tls_align); &tls_align);
else else
CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr, CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr, &tls_size,
&tls_size, &tls_align); &tls_align);
if (tls_align < kStackAlign) if (tls_align < kStackAlign)
tls_align = kStackAlign; tls_align = kStackAlign;
g_tls_size = RoundUpTo(tls_size, tls_align); g_tls_size = RoundUpTo(tls_size, tls_align);
} }
#else #else
void InitTlsSize() { } void InitTlsSize() {}
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && #endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
// !SANITIZER_NETBSD && !SANITIZER_SOLARIS // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
@ -323,19 +325,19 @@ uptr ThreadDescriptorSize() {
// The offset at which pointer to self is located in the thread descriptor. // The offset at which pointer to self is located in the thread descriptor.
const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16); const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
uptr ThreadSelfOffset() { uptr ThreadSelfOffset() { return kThreadSelfOffset; }
return kThreadSelfOffset;
}
#if defined(__mips__) || defined(__powerpc64__) #if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb // TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks. // head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() { static uptr TlsPreTcbSize() {
# if defined(__mips__) #if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t) const uptr kTcbHead = 16; // sizeof (tcbhead_t)
# elif defined(__powerpc64__) #elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t) const uptr kTcbHead = 88; // sizeof (tcbhead_t)
# endif #elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#endif
const uptr kTlsAlign = 16; const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize = const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign); RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
@ -345,28 +347,37 @@ static uptr TlsPreTcbSize() {
uptr ThreadSelf() { uptr ThreadSelf() {
uptr descr_addr; uptr descr_addr;
# if defined(__i386__) #if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__x86_64__) #elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__mips__) #elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29) // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is // points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr. // TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000; const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer; uptr thread_pointer;
asm volatile(".set push;\ asm volatile(
".set push;\
.set mips64r2;\ .set mips64r2;\
rdhwr %0,$29;\ rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer)); .set pop"
: "=r"(thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize(); descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# elif defined(__aarch64__) || defined(__arm__) #elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) - descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize(); ThreadDescriptorSize();
# elif defined(__s390__) #elif SANITIZER_RISCV64
uptr tcb_end;
asm volatile("mv %0, tp;\n" : "=r"(tcb_end));
// https://github.com/riscv/riscv-elf-psabi-doc/issues/53
const uptr kTlsTcbOffset = 0x800;
descr_addr =
reinterpret_cast<uptr>(tcb_end - kTlsTcbOffset - TlsPreTcbSize());
#elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()); descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__) #elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13) // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is // points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
@ -375,9 +386,9 @@ uptr ThreadSelf() {
uptr thread_pointer; uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset)); asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize(); descr_addr = thread_pointer - TlsPreTcbSize();
# else #else
# error "unsupported CPU arch" #error "unsupported CPU arch"
# endif #endif
return descr_addr; return descr_addr;
} }
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX #endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
@ -385,31 +396,27 @@ uptr ThreadSelf() {
#if SANITIZER_FREEBSD #if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() { static void **ThreadSelfSegbase() {
void **segbase = 0; void **segbase = 0;
# if defined(__i386__) #if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase); // sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase)); __asm __volatile("mov %%gs:0, %0" : "=r"(segbase));
# elif defined(__x86_64__) #elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase); // sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); __asm __volatile("movq %%fs:0, %0" : "=r"(segbase));
# else #else
# error "unsupported CPU arch" #error "unsupported CPU arch"
# endif #endif
return segbase; return segbase;
} }
uptr ThreadSelf() { uptr ThreadSelf() { return (uptr)ThreadSelfSegbase()[2]; }
return (uptr)ThreadSelfSegbase()[2];
}
#endif // SANITIZER_FREEBSD #endif // SANITIZER_FREEBSD
#if SANITIZER_NETBSD #if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() { static struct tls_tcb *ThreadSelfTlsTcb() {
return (struct tls_tcb *)_lwp_getprivate(); return (struct tls_tcb *)_lwp_getprivate();
} }
uptr ThreadSelf() { uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; }
return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
}
int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) { int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
const Elf_Phdr *hdr = info->dlpi_phdr; const Elf_Phdr *hdr = info->dlpi_phdr;
@ -417,7 +424,7 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
for (; hdr != last_hdr; ++hdr) { for (; hdr != last_hdr; ++hdr) {
if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) { if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
*(uptr*)data = hdr->p_memsz; *(uptr *)data = hdr->p_memsz;
break; break;
} }
} }
@ -428,21 +435,21 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
#if !SANITIZER_GO #if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) { static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_LINUX && !SANITIZER_ANDROID
# if defined(__x86_64__) || defined(__i386__) || defined(__s390__) #if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf(); *addr = ThreadSelf();
*size = GetTlsSize(); *size = GetTlsSize();
*addr -= *size; *addr -= *size;
*addr += ThreadDescriptorSize(); *addr += ThreadDescriptorSize();
# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \ #elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
|| defined(__arm__) defined(__arm__)
*addr = ThreadSelf(); *addr = ThreadSelf();
*size = GetTlsSize(); *size = GetTlsSize();
# else #else
*addr = 0; *addr = 0;
*size = 0; *size = 0;
# endif #endif
#elif SANITIZER_FREEBSD #elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase(); void **segbase = ThreadSelfSegbase();
*addr = 0; *addr = 0;
*size = 0; *size = 0;
if (segbase != 0) { if (segbase != 0) {
@ -450,12 +457,12 @@ static void GetTls(uptr *addr, uptr *size) {
// tls_size = round(tls_static_space, tcbalign); // tls_size = round(tls_static_space, tcbalign);
// dtv = segbase[1]; // dtv = segbase[1];
// dtv[2] = segbase - tls_static_space; // dtv[2] = segbase - tls_static_space;
void **dtv = (void**) segbase[1]; void **dtv = (void **)segbase[1];
*addr = (uptr) dtv[2]; *addr = (uptr)dtv[2];
*size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]); *size = (*addr == 0) ? 0 : ((uptr)segbase[0] - (uptr)dtv[2]);
} }
#elif SANITIZER_NETBSD #elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb(); struct tls_tcb *const tcb = ThreadSelfTlsTcb();
*addr = 0; *addr = 0;
*size = 0; *size = 0;
if (tcb != 0) { if (tcb != 0) {
@ -479,7 +486,7 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0; *addr = 0;
*size = 0; *size = 0;
#else #else
# error "Unknown OS" #error "Unknown OS"
#endif #endif
} }
#endif #endif
@ -538,7 +545,7 @@ struct DlIteratePhdrData {
}; };
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg; DlIteratePhdrData *data = (DlIteratePhdrData *)arg;
InternalScopedString module_name(kMaxPathLength); InternalScopedString module_name(kMaxPathLength);
if (data->first) { if (data->first) {
data->first = false; data->first = false;
@ -558,8 +565,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
uptr cur_end = cur_beg + phdr->p_memsz; uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X; bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W; bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable, cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
writable);
} }
} }
data->modules->push_back(cur_module); data->modules->push_back(cur_module);
@ -583,7 +589,7 @@ static bool requiresProcmaps() {
} }
static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) { static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
MemoryMappingLayout memory_mapping(/*cache_enabled*/true); MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
memory_mapping.DumpListOfModules(modules); memory_mapping.DumpListOfModules(modules);
} }
@ -635,15 +641,12 @@ uptr GetRSS() {
// We need the second number which is RSS in pages. // We need the second number which is RSS in pages.
char *pos = buf; char *pos = buf;
// Skip the first number. // Skip the first number.
while (*pos >= '0' && *pos <= '9') while (*pos >= '0' && *pos <= '9') pos++;
pos++;
// Skip whitespaces. // Skip whitespaces.
while (!(*pos >= '0' && *pos <= '9') && *pos != 0) while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++;
pos++;
// Read the number. // Read the number.
uptr rss = 0; uptr rss = 0;
while (*pos >= '0' && *pos <= '9') while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0';
rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached(); return rss * GetPageSizeCached();
} }
@ -686,8 +689,8 @@ u32 GetNumberOfCPUs() {
break; break;
if (entry->d_ino != 0 && *d_type == DT_DIR) { if (entry->d_ino != 0 && *d_type == DT_DIR) {
if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' && if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
entry->d_name[2] == 'u' && entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
entry->d_name[3] >= '0' && entry->d_name[3] <= '9') entry->d_name[3] <= '9')
n_cpus++; n_cpus++;
} }
entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen); entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
@ -705,7 +708,7 @@ u32 GetNumberOfCPUs() {
#if SANITIZER_LINUX #if SANITIZER_LINUX
# if SANITIZER_ANDROID #if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized; static atomic_uint8_t android_log_initialized;
void AndroidLogInit() { void AndroidLogInit() {
@ -717,10 +720,12 @@ static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire); return atomic_load(&android_log_initialized, memory_order_acquire);
} }
extern "C" SANITIZER_WEAK_ATTRIBUTE extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri,
int async_safe_write_log(int pri, const char* tag, const char* msg); const char *tag,
extern "C" SANITIZER_WEAK_ATTRIBUTE const char *msg);
int __android_log_write(int prio, const char* tag, const char* msg); extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio,
const char *tag,
const char *msg);
// ANDROID_LOG_INFO is 4, but can't be resolved at runtime. // ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
#define SANITIZER_ANDROID_LOG_INFO 4 #define SANITIZER_ANDROID_LOG_INFO 4
@ -742,14 +747,14 @@ void WriteOneLineToSyslog(const char *s) {
} }
} }
extern "C" SANITIZER_WEAK_ATTRIBUTE extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message(
void android_set_abort_message(const char *); const char *);
void SetAbortMessage(const char *str) { void SetAbortMessage(const char *str) {
if (&android_set_abort_message) if (&android_set_abort_message)
android_set_abort_message(str); android_set_abort_message(str);
} }
# else #else
void AndroidLogInit() {} void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; } static bool ShouldLogAfterPrintf() { return true; }
@ -757,7 +762,7 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); } void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {} void SetAbortMessage(const char *str) {}
# endif // SANITIZER_ANDROID #endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) { void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf()) if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
@ -784,8 +789,8 @@ inline bool CanUseVDSO() {
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling // MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is // clock_gettime. real_clock_gettime only exists if clock_gettime is
// intercepted, so define it weakly and use it if available. // intercepted, so define it weakly and use it if available.
extern "C" SANITIZER_WEAK_ATTRIBUTE extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id,
int real_clock_gettime(u32 clk_id, void *tp); void *tp);
u64 MonotonicNanoTime() { u64 MonotonicNanoTime() {
timespec ts; timespec ts;
if (CanUseVDSO()) { if (CanUseVDSO()) {