blob: 9f60d3a9f752e36e0efbfdbc655287d7ff12ecef [file] [log] [blame]
[email protected]f7817822009-09-24 05:11:581// Copyright (c) 2009 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "chrome_frame/vtable_patch_manager.h"
6
[email protected]53b19ac12010-07-21 16:35:217#include <atlcomcli.h>
[email protected]05358092010-07-21 16:07:248
[email protected]5ae94d22010-07-21 19:55:369#include <algorithm>
10
[email protected]d999c7c2010-03-23 21:09:0211#include "base/atomicops.h"
12#include "base/lock.h"
[email protected]f7817822009-09-24 05:11:5813#include "base/logging.h"
[email protected]3f55e872009-10-17 04:48:3714#include "base/scoped_ptr.h"
[email protected]f7817822009-09-24 05:11:5815
16#include "chrome_frame/function_stub.h"
[email protected]ec18ca672010-09-02 13:39:5017#include "chrome_frame/utils.h"
[email protected]f7817822009-09-24 05:11:5818
19namespace vtable_patch {
20
[email protected]d999c7c2010-03-23 21:09:0221// The number of times we retry a patch/unpatch operation in case of
22// VM races with other 3rd party software trying to patch the same thing.
23const int kMaxRetries = 3;
24
25// We hold a lock over all patching operations to make sure that we don't
26// e.g. race on VM operations to the same patches, or to physical pages
27// shared across different VTABLEs.
28Lock patch_lock_;
29
30namespace internal {
31// Because other parties in our process might be attempting to patch the same
32// virtual tables at the same time, we have a race to modify the VM protections
33// on the pages. We also need to do a compare/swap type operation when we
34// modify the function, so as to be sure that we grab the most recent value.
35// Hence the SEH blocks and the nasty-looking compare/swap operation.
36bool ReplaceFunctionPointer(void** entry, void* new_proc, void* curr_proc) {
37 __try {
38 base::subtle::Atomic32 prev_value;
39
40 prev_value = base::subtle::NoBarrier_CompareAndSwap(
41 reinterpret_cast<base::subtle::Atomic32 volatile*>(entry),
42 reinterpret_cast<base::subtle::Atomic32>(curr_proc),
43 reinterpret_cast<base::subtle::Atomic32>(new_proc));
44
45 return curr_proc == reinterpret_cast<void*>(prev_value);
46 } __except(EXCEPTION_EXECUTE_HANDLER) {
47 // Oops, we took exception on access.
48 }
49
50 return false;
51}
52
53} // namespace
54
[email protected]f7817822009-09-24 05:11:5855// Convenient definition of a VTABLE
56typedef PROC* Vtable;
57
58// Returns a pointer to the VTable of a COM interface.
59// @param unknown [in] The pointer of the COM interface.
60inline Vtable GetIFVTable(void* unknown) {
61 return reinterpret_cast<Vtable>(*reinterpret_cast<void**>(unknown));
62}
63
64HRESULT PatchInterfaceMethods(void* unknown, MethodPatchInfo* patches) {
65 // Do some sanity checking of the input arguments.
66 if (NULL == unknown || NULL == patches) {
67 NOTREACHED();
68 return E_INVALIDARG;
69 }
70
71 Vtable vtable = GetIFVTable(unknown);
72 DCHECK(vtable);
73
[email protected]d999c7c2010-03-23 21:09:0274 // All VM operations, patching and manipulation of MethodPatchInfo
75 // is done under a global lock, to ensure multiple threads don't
76 // race, whether on an individual patch, or on VM operations to
77 // the same physical pages.
78 AutoLock lock(patch_lock_);
79
[email protected]f7817822009-09-24 05:11:5880 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
[email protected]bc88e9d72009-09-25 16:04:4381 if (it->stub_ != NULL) {
82 // If this DCHECK fires it means that we are using the same VTable
[email protected]d999c7c2010-03-23 21:09:0283 // information to patch two different interfaces, or we've lost a
84 // race with another thread who's patching the same interface.
85 DLOG(WARNING) << "Attempting to patch two different VTables with the "
86 "same VTable information, or patching the same interface on "
87 "multiple threads";
[email protected]bc88e9d72009-09-25 16:04:4388 continue;
89 }
90
[email protected]f7817822009-09-24 05:11:5891 PROC original_fn = vtable[it->index_];
[email protected]d999c7c2010-03-23 21:09:0292 FunctionStub* stub = NULL;
93
94#ifndef NDEBUG
[email protected]5ae94d22010-07-21 19:55:3695 stub = FunctionStub::FromCode(original_fn);
[email protected]f7817822009-09-24 05:11:5896 if (stub != NULL) {
97 DLOG(ERROR) << "attempt to patch a function that's already patched";
[email protected]d999c7c2010-03-23 21:09:0298 DCHECK(stub->destination_function() ==
[email protected]f7817822009-09-24 05:11:5899 reinterpret_cast<uintptr_t>(it->method_)) <<
100 "patching the same method multiple times with different hooks?";
101 continue;
102 }
[email protected]d999c7c2010-03-23 21:09:02103#endif
[email protected]f7817822009-09-24 05:11:58104
105 stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(original_fn),
106 it->method_);
107 if (!stub) {
108 NOTREACHED();
109 return E_OUTOFMEMORY;
[email protected]d999c7c2010-03-23 21:09:02110 }
111
112 // Do the VM operations and the patching in a loop, to try and ensure
113 // we succeed even if there's a VM operation or a patch race against
114 // other 3rd parties patching.
115 bool succeeded = false;
116 for (int i = 0; !succeeded && i < kMaxRetries; ++i) {
[email protected]f7817822009-09-24 05:11:58117 DWORD protect = 0;
[email protected]d999c7c2010-03-23 21:09:02118 if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC),
119 PAGE_EXECUTE_READWRITE, &protect)) {
120 HRESULT hr = AtlHresultFromLastError();
121 DLOG(ERROR) << "VirtualProtect failed 0x" << std::hex << hr;
122
123 // Go around again in the feeble hope that this is
124 // a temporary problem.
125 continue;
[email protected]f7817822009-09-24 05:11:58126 }
[email protected]d999c7c2010-03-23 21:09:02127 original_fn = vtable[it->index_];
128 stub->set_argument(reinterpret_cast<uintptr_t>(original_fn));
129 succeeded = internal::ReplaceFunctionPointer(
130 reinterpret_cast<void**>(&vtable[it->index_]), stub->code(),
131 original_fn);
132
133 if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect,
134 &protect)) {
135 DLOG(ERROR) << "VirtualProtect failed to restore protection";
136 }
137 }
138
139 if (!succeeded) {
140 FunctionStub::Destroy(stub);
141 stub = NULL;
142
143 DLOG(ERROR) << "Failed to patch VTable.";
144 return E_FAIL;
145 } else {
146 // Success, save the stub we created.
147 it->stub_ = stub;
[email protected]ec18ca672010-09-02 13:39:50148 PinModule();
[email protected]f7817822009-09-24 05:11:58149 }
150 }
151
152 return S_OK;
153}
154
155HRESULT UnpatchInterfaceMethods(MethodPatchInfo* patches) {
[email protected]d999c7c2010-03-23 21:09:02156 AutoLock lock(patch_lock_);
157
[email protected]f7817822009-09-24 05:11:58158 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
159 if (it->stub_) {
[email protected]d999c7c2010-03-23 21:09:02160 DCHECK(it->stub_->destination_function() ==
[email protected]f7817822009-09-24 05:11:58161 reinterpret_cast<uintptr_t>(it->method_));
162 // Modify the stub to just jump directly to the original function.
163 it->stub_->BypassStub(reinterpret_cast<void*>(it->stub_->argument()));
164 it->stub_ = NULL;
165 // Leave the stub in memory so that we won't break any possible chains.
[email protected]d999c7c2010-03-23 21:09:02166
167 // TODO(siggi): why not restore the original VTBL pointer here, provided
168 // we haven't been chained?
[email protected]f7817822009-09-24 05:11:58169 } else {
170 DLOG(WARNING) << "attempt to unpatch a function that wasn't patched";
171 }
172 }
173
174 return S_OK;
175}
176
[email protected]3f55e872009-10-17 04:48:37177// Disabled for now as we're not using it atm.
178#if 0
179
180DynamicPatchManager::DynamicPatchManager(const MethodPatchInfo* patch_prototype)
181 : patch_prototype_(patch_prototype) {
182 DCHECK(patch_prototype_);
183 DCHECK(patch_prototype_->stub_ == NULL);
184}
185
186DynamicPatchManager::~DynamicPatchManager() {
187 UnpatchAll();
188}
189
190HRESULT DynamicPatchManager::PatchObject(void* unknown) {
191 int patched_methods = 0;
192 for (; patch_prototype_[patched_methods].index_ != -1; patched_methods++) {
193 // If you hit this, then you are likely using the prototype instance for
194 // patching in _addition_ to this class. This is not a good idea :)
195 DCHECK(patch_prototype_[patched_methods].stub_ == NULL);
196 }
197
198 // Prepare a new patch object using the patch info from the prototype.
199 int mem_size = sizeof(PatchedObject) +
200 sizeof(MethodPatchInfo) * patched_methods;
201 PatchedObject* entry = reinterpret_cast<PatchedObject*>(new char[mem_size]);
202 entry->vtable_ = GetIFVTable(unknown);
203 memcpy(entry->patch_info_, patch_prototype_,
204 sizeof(MethodPatchInfo) * (patched_methods + 1));
205
206 patch_list_lock_.Acquire();
207
208 // See if we've already patched this vtable before.
209 // The search is done via the == operator of the PatchedObject class.
210 PatchList::const_iterator it = std::find(patch_list_.begin(),
211 patch_list_.end(), entry);
212 HRESULT hr;
213 if (it == patch_list_.end()) {
214 hr = PatchInterfaceMethods(unknown, entry->patch_info_);
215 if (SUCCEEDED(hr)) {
216 patch_list_.push_back(entry);
217 entry = NULL; // Ownership transferred to the array.
218 }
219 } else {
220 hr = S_FALSE;
221 }
222
223 patch_list_lock_.Release();
224
225 delete entry;
226
227 return hr;
228}
229
230bool DynamicPatchManager::UnpatchAll() {
231 patch_list_lock_.Acquire();
232 PatchList::iterator it;
233 for (it = patch_list_.begin(); it != patch_list_.end(); it++) {
234 UnpatchInterfaceMethods((*it)->patch_info_);
235 delete (*it);
236 }
237 patch_list_.clear();
238 patch_list_lock_.Release();
239
240 return true;
241}
242
243#endif // disabled DynamicPatchManager
244
[email protected]f7817822009-09-24 05:11:58245} // namespace vtable_patch