blob: e4f3d358f40c049983e110986659fecfd1bec358 [file] [log] [blame]
Timur Iskhodzhanov36d297d2012-02-22 13:59:491//===-- interception_linux.cc -----------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Windows-specific interception methods.
Etienne Bergeronf5525672016-07-11 23:02:1813//
14// This file is implementing several hooking techniques to intercept calls
15// to functions. The hooks are dynamically installed by modifying the assembly
16// code.
17//
18// The hooking techniques are making assumptions on the way the code is
19// generated and are safe under these assumptions.
20//
21// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow
22// arbitrary branching on the whole memory space, the notion of trampoline
23// region is used. A trampoline region is a memory space withing 2G boundary
24// where it is safe to add custom assembly code to build 64-bit jumps.
25//
26// Hooking techniques
27// ==================
28//
29// 1) Detour
30//
31// The Detour hooking technique is assuming the presence of an header with
32// padding and an overridable 2-bytes nop instruction (mov edi, edi). The
33// nop instruction can safely be replaced by a 2-bytes jump without any need
34// to save the instruction. A jump to the target is encoded in the function
35// header and the nop instruction is replaced by a short jump to the header.
36//
37// head: 5 x nop head: jmp <hook>
38// func: mov edi, edi --> func: jmp short <head>
39// [...] real: [...]
40//
41// This technique is only implemented on 32-bit architecture.
42// Most of the time, Windows API are hookable with the detour technique.
43//
44// 2) Redirect Jump
45//
46// The redirect jump is applicable when the first instruction is a direct
47// jump. The instruction is replaced by jump to the hook.
48//
49// func: jmp <label> --> func: jmp <hook>
50//
51// On an 64-bit architecture, a trampoline is inserted.
52//
53// func: jmp <label> --> func: jmp <tramp>
54// [...]
55//
56// [trampoline]
57// tramp: jmp QWORD [addr]
58// addr: .bytes <hook>
59//
60// Note: <real> is equilavent to <label>.
61//
62// 3) HotPatch
63//
64// The HotPatch hooking is assuming the presence of an header with padding
65// and a first instruction with at least 2-bytes.
66//
67// The reason to enforce the 2-bytes limitation is to provide the minimal
68// space to encode a short jump. HotPatch technique is only rewriting one
69// instruction to avoid breaking a sequence of instructions containing a
70// branching target.
71//
72// Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.
73// see: https://msdn.microsoft.com/en-us/library/ms173507.aspx
74// Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.
75//
76// head: 5 x nop head: jmp <hook>
77// func: <instr> --> func: jmp short <head>
78// [...] body: [...]
79//
80// [trampoline]
81// real: <instr>
82// jmp <body>
83//
84// On an 64-bit architecture:
85//
86// head: 6 x nop head: jmp QWORD [addr1]
87// func: <instr> --> func: jmp short <head>
88// [...] body: [...]
89//
90// [trampoline]
91// addr1: .bytes <hook>
92// real: <instr>
93// jmp QWORD [addr2]
94// addr2: .bytes <body>
95//
96// 4) Trampoline
97//
98// The Trampoline hooking technique is the most aggressive one. It is
99// assuming that there is a sequence of instructions that can be safely
100// replaced by a jump (enough room and no incoming branches).
101//
102// Unfortunately, these assumptions can't be safely presumed and code may
103// be broken after hooking.
104//
105// func: <instr> --> func: jmp <hook>
106// <instr>
107// [...] body: [...]
108//
109// [trampoline]
110// real: <instr>
111// <instr>
112// jmp <body>
113//
114// On an 64-bit architecture:
115//
116// func: <instr> --> func: jmp QWORD [addr1]
117// <instr>
118// [...] body: [...]
119//
120// [trampoline]
121// addr1: .bytes <hook>
122// real: <instr>
123// <instr>
124// jmp QWORD [addr2]
125// addr2: .bytes <body>
Timur Iskhodzhanov36d297d2012-02-22 13:59:49126//===----------------------------------------------------------------------===//
127
128#ifdef _WIN32
129
Alexey Samsonov9d7429502012-08-02 11:29:14130#include "interception.h"
Etienne Bergeronf5525672016-07-11 23:02:18131#include "sanitizer_common/sanitizer_platform.h"
Saleem Abdulrasoold006c932015-10-29 20:36:55132#define WIN32_LEAN_AND_MEAN
Timur Iskhodzhanov36d297d2012-02-22 13:59:49133#include <windows.h>
134
135namespace __interception {
136
Etienne Bergeronf5525672016-07-11 23:02:18137static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
138static const int kJumpInstructionLength = 5;
139static const int kShortJumpInstructionLength = 2;
140static const int kIndirectJumpInstructionLength = 6;
141static const int kBranchLength =
142 FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
143static const int kDirectBranchLength = kBranchLength + kAddressLength;
144
145static void InterceptionFailed() {
146 // Do we have a good way to abort with an error message here?
147 __debugbreak();
148}
149
150static bool DistanceIsWithin2Gig(uptr from, uptr target) {
Reid Kleckner0d7c42c2016-11-15 18:29:17151#if SANITIZER_WINDOWS64
Etienne Bergeronf5525672016-07-11 23:02:18152 if (from < target)
153 return target - from <= (uptr)0x7FFFFFFFU;
154 else
155 return from - target <= (uptr)0x80000000U;
Reid Kleckner0d7c42c2016-11-15 18:29:17156#else
157 // In a 32-bit address space, the address calculation will wrap, so this check
158 // is unnecessary.
159 return true;
160#endif
Etienne Bergeronf5525672016-07-11 23:02:18161}
162
163static uptr GetMmapGranularity() {
164 SYSTEM_INFO si;
165 GetSystemInfo(&si);
166 return si.dwAllocationGranularity;
167}
168
169static uptr RoundUpTo(uptr size, uptr boundary) {
170 return (size + boundary - 1) & ~(boundary - 1);
171}
172
Timur Iskhodzhanov2f48b872012-03-12 11:45:09173// FIXME: internal_str* and internal_mem* functions should be moved from the
174// ASan sources into interception/.
175
Etienne Bergeron0b2d7132016-07-31 17:56:26176static size_t _strlen(const char *str) {
177 const char* p = str;
178 while (*p != '\0') ++p;
179 return p - str;
180}
181
182static char* _strchr(char* str, char c) {
183 while (*str) {
184 if (*str == c)
185 return str;
186 ++str;
187 }
188 return nullptr;
189}
190
Timur Iskhodzhanov2f48b872012-03-12 11:45:09191static void _memset(void *p, int value, size_t sz) {
192 for (size_t i = 0; i < sz; ++i)
193 ((char*)p)[i] = (char)value;
194}
195
196static void _memcpy(void *dst, void *src, size_t sz) {
197 char *dst_c = (char*)dst,
198 *src_c = (char*)src;
199 for (size_t i = 0; i < sz; ++i)
200 dst_c[i] = src_c[i];
201}
202
Etienne Bergeronf5525672016-07-11 23:02:18203static bool ChangeMemoryProtection(
204 uptr address, uptr size, DWORD *old_protection) {
205 return ::VirtualProtect((void*)address, size,
206 PAGE_EXECUTE_READWRITE,
207 old_protection) != FALSE;
Etienne Bergeron7a1bafd62016-06-06 18:09:54208}
Etienne Bergeronf5525672016-07-11 23:02:18209
210static bool RestoreMemoryProtection(
211 uptr address, uptr size, DWORD old_protection) {
212 DWORD unused;
213 return ::VirtualProtect((void*)address, size,
214 old_protection,
215 &unused) != FALSE;
216}
217
218static bool IsMemoryPadding(uptr address, uptr size) {
219 u8* function = (u8*)address;
220 for (size_t i = 0; i < size; ++i)
221 if (function[i] != 0x90 && function[i] != 0xCC)
222 return false;
223 return true;
224}
225
Etienne Bergeron9c2a2202016-07-12 15:33:04226static const u8 kHintNop10Bytes[] = {
227 0x66, 0x66, 0x0F, 0x1F, 0x84,
228 0x00, 0x00, 0x00, 0x00, 0x00
229};
230
231template<class T>
232static bool FunctionHasPrefix(uptr address, const T &pattern) {
233 u8* function = (u8*)address - sizeof(pattern);
234 for (size_t i = 0; i < sizeof(pattern); ++i)
235 if (function[i] != pattern[i])
236 return false;
237 return true;
238}
239
240static bool FunctionHasPadding(uptr address, uptr size) {
241 if (IsMemoryPadding(address - size, size))
242 return true;
243 if (size <= sizeof(kHintNop10Bytes) &&
244 FunctionHasPrefix(address, kHintNop10Bytes))
245 return true;
246 return false;
247}
248
Etienne Bergeronf5525672016-07-11 23:02:18249static void WritePadding(uptr from, uptr size) {
250 _memset((void*)from, 0xCC, (size_t)size);
251}
252
Etienne Bergeronf5525672016-07-11 23:02:18253static void WriteJumpInstruction(uptr from, uptr target) {
254 if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
255 InterceptionFailed();
256 ptrdiff_t offset = target - from - kJumpInstructionLength;
257 *(u8*)from = 0xE9;
258 *(u32*)(from + 1) = offset;
259}
260
261static void WriteShortJumpInstruction(uptr from, uptr target) {
262 sptr offset = target - from - kShortJumpInstructionLength;
263 if (offset < -128 || offset > 127)
264 InterceptionFailed();
265 *(u8*)from = 0xEB;
266 *(u8*)(from + 1) = (u8)offset;
267}
268
269#if SANITIZER_WINDOWS64
270static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
271 // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative
272 // offset.
273 // The offset is the distance from then end of the jump instruction to the
274 // memory location containing the targeted address. The displacement is still
275 // 32-bit in x64, so indirect_target must be located within +/- 2GB range.
276 int offset = indirect_target - from - kIndirectJumpInstructionLength;
277 if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
278 indirect_target)) {
279 InterceptionFailed();
280 }
281 *(u16*)from = 0x25FF;
282 *(u32*)(from + 2) = offset;
Timur Iskhodzhanov2f48b872012-03-12 11:45:09283}
Etienne Bergeron7a1bafd62016-06-06 18:09:54284#endif
285
Etienne Bergeronf5525672016-07-11 23:02:18286static void WriteBranch(
287 uptr from, uptr indirect_target, uptr target) {
288#if SANITIZER_WINDOWS64
289 WriteIndirectJumpInstruction(from, indirect_target);
290 *(u64*)indirect_target = target;
291#else
292 (void)indirect_target;
293 WriteJumpInstruction(from, target);
294#endif
295}
296
297static void WriteDirectBranch(uptr from, uptr target) {
Etienne Bergeron7a1bafd62016-06-06 18:09:54298#if SANITIZER_WINDOWS64
299 // Emit an indirect jump through immediately following bytes:
Etienne Bergeronf5525672016-07-11 23:02:18300 // jmp [rip + kBranchLength]
301 // .quad <target>
302 WriteBranch(from, from + kBranchLength, target);
Etienne Bergeron7a1bafd62016-06-06 18:09:54303#else
Etienne Bergeronf5525672016-07-11 23:02:18304 WriteJumpInstruction(from, target);
Etienne Bergeron7a1bafd62016-06-06 18:09:54305#endif
306}
307
Etienne Bergeronf5525672016-07-11 23:02:18308struct TrampolineMemoryRegion {
309 uptr content;
310 uptr allocated_size;
311 uptr max_size;
312};
313
Etienne Bergerond61042b2016-07-18 19:33:05314static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
Etienne Bergeronf5525672016-07-11 23:02:18315static const int kMaxTrampolineRegion = 1024;
316static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
317
318static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
Etienne Bergeron7a1bafd62016-06-06 18:09:54319#if SANITIZER_WINDOWS64
Etienne Bergeronf5525672016-07-11 23:02:18320 uptr address = image_address;
321 uptr scanned = 0;
322 while (scanned < kTrampolineScanLimitRange) {
323 MEMORY_BASIC_INFORMATION info;
324 if (!::VirtualQuery((void*)address, &info, sizeof(info)))
325 return nullptr;
326
327 // Check whether a region can be allocated at |address|.
328 if (info.State == MEM_FREE && info.RegionSize >= granularity) {
329 void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
330 granularity,
331 MEM_RESERVE | MEM_COMMIT,
332 PAGE_EXECUTE_READWRITE);
333 return page;
334 }
335
336 // Move to the next region.
337 address = (uptr)info.BaseAddress + info.RegionSize;
338 scanned += info.RegionSize;
339 }
340 return nullptr;
Etienne Bergeron7a1bafd62016-06-06 18:09:54341#else
Etienne Bergeronf5525672016-07-11 23:02:18342 return ::VirtualAlloc(nullptr,
343 granularity,
344 MEM_RESERVE | MEM_COMMIT,
345 PAGE_EXECUTE_READWRITE);
Etienne Bergeron7a1bafd62016-06-06 18:09:54346#endif
347}
Timur Iskhodzhanov2f48b872012-03-12 11:45:09348
Etienne Bergeronf5525672016-07-11 23:02:18349// Used by unittests to release mapped memory space.
350void TestOnlyReleaseTrampolineRegions() {
351 for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
352 TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
353 if (current->content == 0)
354 return;
355 ::VirtualFree((void*)current->content, 0, MEM_RELEASE);
356 current->content = 0;
357 }
358}
359
360static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
361 // Find a region within 2G with enough space to allocate |size| bytes.
362 TrampolineMemoryRegion *region = nullptr;
363 for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
364 TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
365 if (current->content == 0) {
366 // No valid region found, allocate a new region.
367 size_t bucket_size = GetMmapGranularity();
368 void *content = AllocateTrampolineRegion(image_address, bucket_size);
369 if (content == nullptr)
370 return 0U;
371
372 current->content = (uptr)content;
373 current->allocated_size = 0;
374 current->max_size = bucket_size;
375 region = current;
376 break;
377 } else if (current->max_size - current->allocated_size > size) {
378#if SANITIZER_WINDOWS64
379 // In 64-bits, the memory space must be allocated within 2G boundary.
380 uptr next_address = current->content + current->allocated_size;
381 if (next_address < image_address ||
Etienne Bergeronef4b4492016-07-12 01:30:58382 next_address - image_address >= 0x7FFF0000)
Etienne Bergeronf5525672016-07-11 23:02:18383 continue;
384#endif
385 // The space can be allocated in the current region.
386 region = current;
387 break;
388 }
Timur Iskhodzhanov2f48b872012-03-12 11:45:09389 }
390
Etienne Bergeronf5525672016-07-11 23:02:18391 // Failed to find a region.
392 if (region == nullptr)
393 return 0U;
Timur Iskhodzhanov2f48b872012-03-12 11:45:09394
Etienne Bergeronf5525672016-07-11 23:02:18395 // Allocate the space in the current region.
396 uptr allocated_space = region->content + region->allocated_size;
397 region->allocated_size += size;
398 WritePadding(allocated_space, size);
399
400 return allocated_space;
Timur Iskhodzhanov37c00b42014-05-16 14:04:57401}
402
403// Returns 0 on error.
Etienne Bergeron901b0dc2016-07-21 21:08:54404static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
Etienne Bergeron8fc1dca2016-07-14 22:14:33405 switch (*(u64*)address) {
406 case 0x90909090909006EB: // stub: jmp over 6 x nop.
407 return 8;
408 }
409
Etienne Bergeronf5525672016-07-11 23:02:18410 switch (*(u8*)address) {
411 case 0x90: // 90 : nop
412 return 1;
413
414 case 0x50: // push eax / rax
415 case 0x51: // push ecx / rcx
416 case 0x52: // push edx / rdx
417 case 0x53: // push ebx / rbx
418 case 0x54: // push esp / rsp
419 case 0x55: // push ebp / rbp
420 case 0x56: // push esi / rsi
421 case 0x57: // push edi / rdi
422 case 0x5D: // pop ebp / rbp
423 return 1;
424
425 case 0x6A: // 6A XX = push XX
426 return 2;
427
428 case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
429 case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
Etienne Bergeronf5525672016-07-11 23:02:18430 return 5;
431
432 // Cannot overwrite control-instruction. Return 0 to indicate failure.
433 case 0xE9: // E9 XX XX XX XX : jmp <label>
434 case 0xE8: // E8 XX XX XX XX : call <func>
435 case 0xC3: // C3 : ret
436 case 0xEB: // EB XX : jmp XX (short jump)
437 case 0x70: // 7Y YY : jy XX (short conditional jump)
438 case 0x71:
439 case 0x72:
440 case 0x73:
441 case 0x74:
442 case 0x75:
443 case 0x76:
444 case 0x77:
445 case 0x78:
446 case 0x79:
447 case 0x7A:
448 case 0x7B:
449 case 0x7C:
450 case 0x7D:
451 case 0x7E:
452 case 0x7F:
453 return 0;
454 }
455
456 switch (*(u16*)(address)) {
457 case 0xFF8B: // 8B FF : mov edi, edi
458 case 0xEC8B: // 8B EC : mov ebp, esp
459 case 0xc889: // 89 C8 : mov eax, ecx
460 case 0xC18B: // 8B C1 : mov eax, ecx
461 case 0xC033: // 33 C0 : xor eax, eax
462 case 0xC933: // 33 C9 : xor ecx, ecx
463 case 0xD233: // 33 D2 : xor edx, edx
464 return 2;
465
466 // Cannot overwrite control-instruction. Return 0 to indicate failure.
467 case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX]
468 return 0;
469 }
470
Etienne Bergeron2bb23bf2016-08-03 05:03:35471 switch (0x00FFFFFF & *(u32*)address) {
472 case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
473 return 7;
474 }
475
Etienne Bergeron7a1bafd62016-06-06 18:09:54476#if SANITIZER_WINDOWS64
Vedant Kumar55ecc102016-07-18 20:07:27477 switch (*(u8*)address) {
Etienne Bergerona81a44f2016-07-18 19:50:55478 case 0xA1: // A1 XX XX XX XX XX XX XX XX :
479 // movabs eax, dword ptr ds:[XXXXXXXX]
480 return 8;
481 }
482
Etienne Bergeronf5525672016-07-11 23:02:18483 switch (*(u16*)address) {
484 case 0x5040: // push rax
485 case 0x5140: // push rcx
486 case 0x5240: // push rdx
487 case 0x5340: // push rbx
488 case 0x5440: // push rsp
489 case 0x5540: // push rbp
490 case 0x5640: // push rsi
491 case 0x5740: // push rdi
492 case 0x5441: // push r12
493 case 0x5541: // push r13
494 case 0x5641: // push r14
495 case 0x5741: // push r15
Etienne Bergerone318b832016-07-12 19:39:07496 case 0x9066: // Two-byte NOP
Etienne Bergeronf5525672016-07-11 23:02:18497 return 2;
Etienne Bergeron7a1bafd62016-06-06 18:09:54498 }
499
Etienne Bergeronf5525672016-07-11 23:02:18500 switch (0x00FFFFFF & *(u32*)address) {
501 case 0xe58948: // 48 8b c4 : mov rbp, rsp
502 case 0xc18b48: // 48 8b c1 : mov rax, rcx
503 case 0xc48b48: // 48 8b c4 : mov rax, rsp
504 case 0xd9f748: // 48 f7 d9 : neg rcx
505 case 0xd12b48: // 48 2b d1 : sub rdx, rcx
506 case 0x07c1f6: // f6 c1 07 : test cl, 0x7
Etienne Bergeron9f987d32016-09-30 19:37:11507 case 0xc98548: // 48 85 C9 : test rcx, rcx
Etienne Bergeronf5525672016-07-11 23:02:18508 case 0xc0854d: // 4d 85 c0 : test r8, r8
509 case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
510 case 0xc03345: // 45 33 c0 : xor r8d, r8d
Etienne Bergeron5d427632016-08-02 20:38:05511 case 0xdb3345: // 45 33 DB : xor r11d, r11d
Etienne Bergeronf5525672016-07-11 23:02:18512 case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
513 case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
Etienne Bergeron5d427632016-08-02 20:38:05514 case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
Etienne Bergeronf5525672016-07-11 23:02:18515 case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
516 case 0xca2b48: // 48 2b ca : sub rcx, rdx
517 case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
518 case 0xc00b4d: // 3d 0b c0 : or r8, r8
519 case 0xd18b48: // 48 8b d1 : mov rdx, rcx
Etienne Bergeron5d427632016-08-02 20:38:05520 case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
Etienne Bergeronf5525672016-07-11 23:02:18521 case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
522 return 3;
523
524 case 0xec8348: // 48 83 ec XX : sub rsp, XX
525 case 0xf88349: // 49 83 f8 XX : cmp r8, XX
526 case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
527 return 4;
528
Etienne Bergeron5d427632016-08-02 20:38:05529 case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
530 return 7;
531
Etienne Bergeronf5525672016-07-11 23:02:18532 case 0x058b48: // 48 8b 05 XX XX XX XX :
533 // mov rax, QWORD PTR [rip + XXXXXXXX]
534 case 0x25ff48: // 48 ff 25 XX XX XX XX :
535 // rex.W jmp QWORD PTR [rip + XXXXXXXX]
Etienne Bergeron901b0dc2016-07-21 21:08:54536
537 // Instructions having offset relative to 'rip' need offset adjustment.
538 if (rel_offset)
539 *rel_offset = 3;
540 return 7;
Etienne Bergerona81a44f2016-07-18 19:50:55541
542 case 0x2444c7: // C7 44 24 XX YY YY YY YY
543 // mov dword ptr [rsp + XX], YYYYYYYY
544 return 8;
Etienne Bergeronf5525672016-07-11 23:02:18545 }
546
547 switch (*(u32*)(address)) {
Etienne Bergeron8fc1dca2016-07-14 22:14:33548 case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
549 case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
Etienne Bergeronf5525672016-07-11 23:02:18550 case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
551 case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
552 return 5;
553 }
554
Etienne Bergeron00f3f6e2016-05-27 21:29:31555#else
Etienne Bergerona566fe32016-07-15 17:26:33556
Etienne Bergerona81a44f2016-07-18 19:50:55557 switch (*(u8*)address) {
558 case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
559 return 5;
560 }
Etienne Bergeronf5525672016-07-11 23:02:18561 switch (*(u16*)address) {
562 case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
563 case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
564 case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
565 case 0xEC83: // 83 EC XX : sub esp, XX
566 case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
567 return 3;
568 case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
569 case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
570 return 6;
571 case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
572 return 7;
573 case 0x7D83: // 83 7D XX YY : cmp dword ptr [ebp + XX], YY
574 return 4;
Timur Iskhodzhanov2f48b872012-03-12 11:45:09575 }
576
Etienne Bergeronf5525672016-07-11 23:02:18577 switch (0x00FFFFFF & *(u32*)address) {
578 case 0x24448A: // 8A 44 24 XX : mov eal, dword ptr [esp + XX]
579 case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
580 case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
581 case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
582 case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
583 case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
584 return 4;
585 }
586
587 switch (*(u32*)address) {
588 case 0x2444B60F: // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]
589 return 5;
590 }
Etienne Bergeron00f3f6e2016-05-27 21:29:31591#endif
Etienne Bergeronf5525672016-07-11 23:02:18592
593 // Unknown instruction!
594 // FIXME: Unknown instruction failures might happen when we add a new
595 // interceptor or a new compiler version. In either case, they should result
596 // in visible and readable error messages. However, merely calling abort()
597 // leads to an infinite recursion in CheckFailed.
598 InterceptionFailed();
599 return 0;
Timur Iskhodzhanov37c00b42014-05-16 14:04:57600}
Timur Iskhodzhanov2f48b872012-03-12 11:45:09601
Etienne Bergeronf5525672016-07-11 23:02:18602// Returns 0 on error.
603static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
604 size_t cursor = 0;
605 while (cursor < size) {
606 size_t instruction_size = GetInstructionSize(address + cursor);
607 if (!instruction_size)
608 return 0;
609 cursor += instruction_size;
610 }
611 return cursor;
612}
613
Etienne Bergeron901b0dc2016-07-21 21:08:54614static bool CopyInstructions(uptr to, uptr from, size_t size) {
615 size_t cursor = 0;
616 while (cursor != size) {
617 size_t rel_offset = 0;
618 size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
619 _memcpy((void*)(to + cursor), (void*)(from + cursor),
620 (size_t)instruction_size);
621 if (rel_offset) {
622 uptr delta = to - from;
623 uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
624#if SANITIZER_WINDOWS64
625 if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
626 return false;
627#endif
628 *(u32*)(to + cursor + rel_offset) = relocated_offset;
629 }
630 cursor += instruction_size;
631 }
632 return true;
633}
634
635
Etienne Bergeronf5525672016-07-11 23:02:18636#if !SANITIZER_WINDOWS64
637bool OverrideFunctionWithDetour(
638 uptr old_func, uptr new_func, uptr *orig_old_func) {
639 const int kDetourHeaderLen = 5;
640 const u16 kDetourInstruction = 0xFF8B;
641
642 uptr header = (uptr)old_func - kDetourHeaderLen;
643 uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;
644
645 // Validate that the function is hookable.
646 if (*(u16*)old_func != kDetourInstruction ||
647 !IsMemoryPadding(header, kDetourHeaderLen))
648 return false;
649
650 // Change memory protection to writable.
651 DWORD protection = 0;
652 if (!ChangeMemoryProtection(header, patch_length, &protection))
653 return false;
654
655 // Write a relative jump to the redirected function.
656 WriteJumpInstruction(header, new_func);
657
658 // Write the short jump to the function prefix.
659 WriteShortJumpInstruction(old_func, header);
660
661 // Restore previous memory protection.
662 if (!RestoreMemoryProtection(header, patch_length, protection))
663 return false;
664
665 if (orig_old_func)
666 *orig_old_func = old_func + kShortJumpInstructionLength;
667
668 return true;
669}
670#endif
671
672bool OverrideFunctionWithRedirectJump(
673 uptr old_func, uptr new_func, uptr *orig_old_func) {
674 // Check whether the first instruction is a relative jump.
675 if (*(u8*)old_func != 0xE9)
676 return false;
677
678 if (orig_old_func) {
679 uptr relative_offset = *(u32*)(old_func + 1);
680 uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
681 *orig_old_func = absolute_target;
682 }
Timur Iskhodzhanov37c00b42014-05-16 14:04:57683
Etienne Bergeron7a1bafd62016-06-06 18:09:54684#if SANITIZER_WINDOWS64
Etienne Bergeronf5525672016-07-11 23:02:18685 // If needed, get memory space for a trampoline jump.
686 uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);
687 if (!trampoline)
688 return false;
689 WriteDirectBranch(trampoline, new_func);
Etienne Bergeron7a1bafd62016-06-06 18:09:54690#endif
Etienne Bergeronf5525672016-07-11 23:02:18691
692 // Change memory protection to writable.
693 DWORD protection = 0;
694 if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))
695 return false;
696
697 // Write a relative jump to the redirected function.
698 WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));
699
700 // Restore previous memory protection.
701 if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))
702 return false;
703
704 return true;
705}
706
707bool OverrideFunctionWithHotPatch(
708 uptr old_func, uptr new_func, uptr *orig_old_func) {
709 const int kHotPatchHeaderLen = kBranchLength;
710
711 uptr header = (uptr)old_func - kHotPatchHeaderLen;
712 uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;
713
714 // Validate that the function is hot patchable.
715 size_t instruction_size = GetInstructionSize(old_func);
716 if (instruction_size < kShortJumpInstructionLength ||
Etienne Bergeron9c2a2202016-07-12 15:33:04717 !FunctionHasPadding(old_func, kHotPatchHeaderLen))
Etienne Bergeronf5525672016-07-11 23:02:18718 return false;
719
720 if (orig_old_func) {
721 // Put the needed instructions into the trampoline bytes.
722 uptr trampoline_length = instruction_size + kDirectBranchLength;
723 uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
724 if (!trampoline)
725 return false;
Etienne Bergeron901b0dc2016-07-21 21:08:54726 if (!CopyInstructions(trampoline, old_func, instruction_size))
727 return false;
Etienne Bergeronf5525672016-07-11 23:02:18728 WriteDirectBranch(trampoline + instruction_size,
729 old_func + instruction_size);
730 *orig_old_func = trampoline;
731 }
732
733 // If needed, get memory space for indirect address.
734 uptr indirect_address = 0;
735#if SANITIZER_WINDOWS64
736 indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
737 if (!indirect_address)
738 return false;
739#endif
740
741 // Change memory protection to writable.
742 DWORD protection = 0;
743 if (!ChangeMemoryProtection(header, patch_length, &protection))
744 return false;
745
746 // Write jumps to the redirected function.
747 WriteBranch(header, indirect_address, new_func);
748 WriteShortJumpInstruction(old_func, header);
749
750 // Restore previous memory protection.
751 if (!RestoreMemoryProtection(header, patch_length, protection))
752 return false;
753
754 return true;
755}
756
757bool OverrideFunctionWithTrampoline(
758 uptr old_func, uptr new_func, uptr *orig_old_func) {
759
760 size_t instructions_length = kBranchLength;
761 size_t padding_length = 0;
762 uptr indirect_address = 0;
763
Timur Iskhodzhanov37c00b42014-05-16 14:04:57764 if (orig_old_func) {
765 // Find out the number of bytes of the instructions we need to copy
Etienne Bergeronf5525672016-07-11 23:02:18766 // to the trampoline.
767 instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);
768 if (!instructions_length)
Timur Iskhodzhanov37c00b42014-05-16 14:04:57769 return false;
770
771 // Put the needed instructions into the trampoline bytes.
Etienne Bergeronf5525672016-07-11 23:02:18772 uptr trampoline_length = instructions_length + kDirectBranchLength;
773 uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
Timur Iskhodzhanov37c00b42014-05-16 14:04:57774 if (!trampoline)
775 return false;
Etienne Bergeron901b0dc2016-07-21 21:08:54776 if (!CopyInstructions(trampoline, old_func, instructions_length))
777 return false;
Etienne Bergeronf5525672016-07-11 23:02:18778 WriteDirectBranch(trampoline + instructions_length,
779 old_func + instructions_length);
780 *orig_old_func = trampoline;
Timur Iskhodzhanov37c00b42014-05-16 14:04:57781 }
782
Etienne Bergeronf5525672016-07-11 23:02:18783#if SANITIZER_WINDOWS64
784 // Check if the targeted address can be encoded in the function padding.
785 // Otherwise, allocate it in the trampoline region.
786 if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {
787 indirect_address = old_func - kAddressLength;
788 padding_length = kAddressLength;
789 } else {
790 indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
791 if (!indirect_address)
792 return false;
793 }
794#endif
795
796 // Change memory protection to writable.
797 uptr patch_address = old_func - padding_length;
798 uptr patch_length = instructions_length + padding_length;
799 DWORD protection = 0;
800 if (!ChangeMemoryProtection(patch_address, patch_length, &protection))
Timur Iskhodzhanov2f48b872012-03-12 11:45:09801 return false;
802
Etienne Bergeronf5525672016-07-11 23:02:18803 // Patch the original function.
804 WriteBranch(old_func, indirect_address, new_func);
Timur Iskhodzhanov2f48b872012-03-12 11:45:09805
Etienne Bergeronf5525672016-07-11 23:02:18806 // Restore previous memory protection.
807 if (!RestoreMemoryProtection(patch_address, patch_length, protection))
808 return false;
Timur Iskhodzhanov2f48b872012-03-12 11:45:09809
810 return true;
811}
812
Etienne Bergeronf5525672016-07-11 23:02:18813bool OverrideFunction(
814 uptr old_func, uptr new_func, uptr *orig_old_func) {
815#if !SANITIZER_WINDOWS64
816 if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))
817 return true;
818#endif
819 if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))
820 return true;
821 if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))
822 return true;
823 if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))
824 return true;
825 return false;
826}
827
Reid Klecknerd85f7012015-08-18 22:38:27828static void **InterestingDLLsAvailable() {
Etienne Bergeroncc464d52016-07-06 19:15:11829 static const char *InterestingDLLs[] = {
Reid Kleckner3b029052016-03-24 20:19:48830 "kernel32.dll",
831 "msvcr110.dll", // VS2012
832 "msvcr120.dll", // VS2013
833 "vcruntime140.dll", // VS2015
834 "ucrtbase.dll", // Universal CRT
835 // NTDLL should go last as it exports some functions that we should
836 // override in the CRT [presumably only used internally].
837 "ntdll.dll", NULL};
Timur Iskhodzhanov0a88b252014-08-25 13:19:05838 static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
839 if (!result[0]) {
840 for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
841 if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
842 result[j++] = (void *)h;
843 }
844 }
Reid Klecknerd85f7012015-08-18 22:38:27845 return &result[0];
846}
847
848namespace {
849// Utility for reading loaded PE images.
850template <typename T> class RVAPtr {
851 public:
852 RVAPtr(void *module, uptr rva)
853 : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
854 operator T *() { return ptr_; }
855 T *operator->() { return ptr_; }
856 T *operator++() { return ++ptr_; }
857
858 private:
859 T *ptr_;
860};
861} // namespace
862
863// Internal implementation of GetProcAddress. At least since Windows 8,
864// GetProcAddress appears to initialize DLLs before returning function pointers
865// into them. This is problematic for the sanitizers, because they typically
866// want to intercept malloc *before* MSVCRT initializes. Our internal
867// implementation walks the export list manually without doing initialization.
868uptr InternalGetProcAddress(void *module, const char *func_name) {
869 // Check that the module header is full and present.
870 RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
871 RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
872 if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
873 headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
874 headers->FileHeader.SizeOfOptionalHeader <
875 sizeof(IMAGE_OPTIONAL_HEADER)) {
876 return 0;
877 }
878
879 IMAGE_DATA_DIRECTORY *export_directory =
880 &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
Marcos Pividori7ac943c2017-01-30 18:23:37881 if (export_directory->Size == 0)
882 return 0;
Reid Klecknerd85f7012015-08-18 22:38:27883 RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
884 export_directory->VirtualAddress);
885 RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
886 RVAPtr<DWORD> names(module, exports->AddressOfNames);
887 RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
888
889 for (DWORD i = 0; i < exports->NumberOfNames; i++) {
890 RVAPtr<char> name(module, names[i]);
891 if (!strcmp(func_name, name)) {
892 DWORD index = ordinals[i];
893 RVAPtr<char> func(module, functions[index]);
Etienne Bergeron0b2d7132016-07-31 17:56:26894
895 // Handle forwarded functions.
896 DWORD offset = functions[index];
897 if (offset >= export_directory->VirtualAddress &&
898 offset < export_directory->VirtualAddress + export_directory->Size) {
899 // An entry for a forwarded function is a string with the following
900 // format: "<module> . <function_name>" that is stored into the
901 // exported directory.
902 char function_name[256];
903 size_t funtion_name_length = _strlen(func);
904 if (funtion_name_length >= sizeof(function_name) - 1)
905 InterceptionFailed();
906
907 _memcpy(function_name, func, funtion_name_length);
908 function_name[funtion_name_length] = '\0';
909 char* separator = _strchr(function_name, '.');
910 if (!separator)
911 InterceptionFailed();
912 *separator = '\0';
913
914 void* redirected_module = GetModuleHandleA(function_name);
915 if (!redirected_module)
916 InterceptionFailed();
917 return InternalGetProcAddress(redirected_module, separator + 1);
918 }
919
Reid Klecknerd85f7012015-08-18 22:38:27920 return (uptr)(char *)func;
921 }
922 }
923
924 return 0;
Timur Iskhodzhanov0a88b252014-08-25 13:19:05925}
926
Etienne Bergeron42cdfbc2016-09-28 18:04:07927bool OverrideFunction(
928 const char *func_name, uptr new_func, uptr *orig_old_func) {
929 bool hooked = false;
Reid Klecknerd85f7012015-08-18 22:38:27930 void **DLLs = InterestingDLLsAvailable();
Etienne Bergeron42cdfbc2016-09-28 18:04:07931 for (size_t i = 0; DLLs[i]; ++i) {
932 uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
933 if (func_addr &&
934 OverrideFunction(func_addr, new_func, orig_old_func)) {
935 hooked = true;
936 }
937 }
938 return hooked;
Timur Iskhodzhanov0a88b252014-08-25 13:19:05939}
940
Reid Kleckner3b029052016-03-24 20:19:48941bool OverrideImportedFunction(const char *module_to_patch,
942 const char *imported_module,
943 const char *function_name, uptr new_function,
944 uptr *orig_old_func) {
945 HMODULE module = GetModuleHandleA(module_to_patch);
946 if (!module)
947 return false;
948
949 // Check that the module header is full and present.
950 RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
951 RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
952 if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
Etienne Bergeronf5525672016-07-11 23:02:18953 headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
Reid Kleckner3b029052016-03-24 20:19:48954 headers->FileHeader.SizeOfOptionalHeader <
955 sizeof(IMAGE_OPTIONAL_HEADER)) {
956 return false;
957 }
958
959 IMAGE_DATA_DIRECTORY *import_directory =
960 &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
961
962 // Iterate the list of imported DLLs. FirstThunk will be null for the last
963 // entry.
964 RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,
965 import_directory->VirtualAddress);
966 for (; imports->FirstThunk != 0; ++imports) {
967 RVAPtr<const char> modname(module, imports->Name);
968 if (_stricmp(&*modname, imported_module) == 0)
969 break;
970 }
971 if (imports->FirstThunk == 0)
972 return false;
973
974 // We have two parallel arrays: the import address table (IAT) and the table
975 // of names. They start out containing the same data, but the loader rewrites
976 // the IAT to hold imported addresses and leaves the name table in
977 // OriginalFirstThunk alone.
978 RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);
979 RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);
980 for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {
981 if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
982 RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
983 module, name_table->u1.ForwarderString);
984 const char *funcname = &import_by_name->Name[0];
985 if (strcmp(funcname, function_name) == 0)
986 break;
987 }
988 }
989 if (name_table->u1.Ordinal == 0)
990 return false;
991
992 // Now we have the correct IAT entry. Do the swap. We have to make the page
993 // read/write first.
994 if (orig_old_func)
995 *orig_old_func = iat->u1.AddressOfData;
996 DWORD old_prot, unused_prot;
997 if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,
998 &old_prot))
999 return false;
1000 iat->u1.AddressOfData = new_function;
1001 if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))
1002 return false; // Not clear if this failure bothers us.
1003 return true;
1004}
1005
Timur Iskhodzhanov36d297d2012-02-22 13:59:491006} // namespace __interception
1007
1008#endif // _WIN32