blob: 177771dc65ec071bde024838203bc30c410f3a16 [file] [log] [blame]
morrita373af03b2014-09-09 19:35:241// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
avi246998d82015-12-22 02:39:045#include <stddef.h>
danakj03de39b22016-04-23 04:21:096#include <memory>
avi246998d82015-12-22 02:39:047
danakj03de39b22016-04-23 04:21:098#include "base/memory/ptr_util.h"
thestig0df2bae82016-07-26 17:59:369#include "base/process/process_metrics.h"
morritac4db5472015-03-13 20:44:3910#include "base/run_loop.h"
jamca86c9e2017-01-06 19:55:5711#include "base/strings/stringprintf.h"
12#include "base/test/perf_time_logger.h"
13#include "base/test/test_io_thread.h"
gabf08ccc02016-05-11 18:51:1114#include "base/threading/thread_task_runner_handle.h"
avi246998d82015-12-22 02:39:0415#include "build/build_config.h"
amistryd4aa70d2016-06-23 07:52:3716#include "ipc/ipc_channel_mojo.h"
jamca86c9e2017-01-06 19:55:5717#include "ipc/ipc_test.mojom.h"
18#include "ipc/ipc_test_base.h"
rockotc637caf9b2016-02-10 09:57:0819#include "mojo/edk/embedder/embedder.h"
20#include "mojo/edk/embedder/platform_channel_pair.h"
jamca86c9e2017-01-06 19:55:5721#include "mojo/edk/test/mojo_test_base.h"
sammce4d0abd2016-03-07 22:38:0422#include "mojo/edk/test/multiprocess_test_helper.h"
jamca86c9e2017-01-06 19:55:5723#include "mojo/public/cpp/bindings/binding.h"
24#include "mojo/public/cpp/system/message_pipe.h"
morrita373af03b2014-09-09 19:35:2425
sammce4d0abd2016-03-07 22:38:0426namespace IPC {
morrita373af03b2014-09-09 19:35:2427namespace {
28
jamca86c9e2017-01-06 19:55:5729// This class simply collects stats about abstract "events" (each of which has a
30// start time and an end time).
31class EventTimeTracker {
sammce4d0abd2016-03-07 22:38:0432 public:
jamca86c9e2017-01-06 19:55:5733 explicit EventTimeTracker(const char* name)
34 : name_(name),
35 count_(0) {
morritac4db5472015-03-13 20:44:3936 }
37
jamca86c9e2017-01-06 19:55:5738 void AddEvent(const base::TimeTicks& start, const base::TimeTicks& end) {
39 DCHECK(end >= start);
40 count_++;
41 base::TimeDelta duration = end - start;
42 total_duration_ += duration;
43 max_duration_ = std::max(max_duration_, duration);
44 }
45
46 void ShowResults() const {
47 VLOG(1) << name_ << " count: " << count_;
48 VLOG(1) << name_ << " total duration: "
49 << total_duration_.InMillisecondsF() << " ms";
50 VLOG(1) << name_ << " average duration: "
51 << (total_duration_.InMillisecondsF() / static_cast<double>(count_))
52 << " ms";
53 VLOG(1) << name_ << " maximum duration: "
54 << max_duration_.InMillisecondsF() << " ms";
55 }
56
57 void Reset() {
58 count_ = 0;
59 total_duration_ = base::TimeDelta();
60 max_duration_ = base::TimeDelta();
61 }
62
63 private:
64 const std::string name_;
65
66 uint64_t count_;
67 base::TimeDelta total_duration_;
68 base::TimeDelta max_duration_;
69
70 DISALLOW_COPY_AND_ASSIGN(EventTimeTracker);
71};
72
73class PerformanceChannelListener : public Listener {
74 public:
75 explicit PerformanceChannelListener(const std::string& label)
76 : label_(label),
77 sender_(NULL),
78 msg_count_(0),
79 msg_size_(0),
80 count_down_(0),
81 latency_tracker_("Server messages") {
82 VLOG(1) << "Server listener up";
83 }
84
85 ~PerformanceChannelListener() override {
86 VLOG(1) << "Server listener down";
87 }
88
89 void Init(Sender* sender) {
90 DCHECK(!sender_);
91 sender_ = sender;
92 }
93
94 // Call this before running the message loop.
95 void SetTestParams(int msg_count, size_t msg_size) {
96 DCHECK_EQ(0, count_down_);
97 msg_count_ = msg_count;
98 msg_size_ = msg_size;
99 count_down_ = msg_count_;
100 payload_ = std::string(msg_size_, 'a');
101 }
102
103 bool OnMessageReceived(const Message& message) override {
104 CHECK(sender_);
105
106 base::PickleIterator iter(message);
107 int64_t time_internal;
108 EXPECT_TRUE(iter.ReadInt64(&time_internal));
109 int msgid;
110 EXPECT_TRUE(iter.ReadInt(&msgid));
111 std::string reflected_payload;
112 EXPECT_TRUE(iter.ReadString(&reflected_payload));
113
114 // Include message deserialization in latency.
115 base::TimeTicks now = base::TimeTicks::Now();
116
117 if (reflected_payload == "hello") {
118 // Start timing on hello.
119 latency_tracker_.Reset();
120 DCHECK(!perf_logger_.get());
121 std::string test_name =
122 base::StringPrintf("IPC_%s_Perf_%dx_%u",
123 label_.c_str(),
124 msg_count_,
125 static_cast<unsigned>(msg_size_));
126 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
127 } else {
128 DCHECK_EQ(payload_.size(), reflected_payload.size());
129
130 latency_tracker_.AddEvent(
131 base::TimeTicks::FromInternalValue(time_internal), now);
132
133 CHECK(count_down_ > 0);
134 count_down_--;
135 if (count_down_ == 0) {
136 perf_logger_.reset(); // Stop the perf timer now.
137 latency_tracker_.ShowResults();
138 base::MessageLoop::current()->QuitWhenIdle();
139 return true;
140 }
141 }
142
143 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
144 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
145 msg->WriteInt(count_down_);
146 msg->WriteString(payload_);
147 sender_->Send(msg);
148 return true;
149 }
150
151 private:
152 std::string label_;
153 Sender* sender_;
154 int msg_count_;
155 size_t msg_size_;
156
157 int count_down_;
158 std::string payload_;
159 EventTimeTracker latency_tracker_;
160 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
161};
162
163// This channel listener just replies to all messages with the exact same
164// message. It assumes each message has one string parameter. When the string
165// "quit" is sent, it will exit.
166class ChannelReflectorListener : public Listener {
167 public:
168 ChannelReflectorListener()
169 : channel_(NULL),
170 latency_tracker_("Client messages") {
171 VLOG(1) << "Client listener up";
172 }
173
174 ~ChannelReflectorListener() override {
175 VLOG(1) << "Client listener down";
176 latency_tracker_.ShowResults();
177 }
178
179 void Init(Channel* channel) {
180 DCHECK(!channel_);
181 channel_ = channel;
182 }
183
184 bool OnMessageReceived(const Message& message) override {
185 CHECK(channel_);
186
187 base::PickleIterator iter(message);
188 int64_t time_internal;
189 EXPECT_TRUE(iter.ReadInt64(&time_internal));
190 int msgid;
191 EXPECT_TRUE(iter.ReadInt(&msgid));
192 base::StringPiece payload;
193 EXPECT_TRUE(iter.ReadStringPiece(&payload));
194
195 // Include message deserialization in latency.
196 base::TimeTicks now = base::TimeTicks::Now();
197
198 if (payload == "hello") {
199 latency_tracker_.Reset();
200 } else if (payload == "quit") {
201 latency_tracker_.ShowResults();
202 base::MessageLoop::current()->QuitWhenIdle();
203 return true;
204 } else {
205 // Don't track hello and quit messages.
206 latency_tracker_.AddEvent(
207 base::TimeTicks::FromInternalValue(time_internal), now);
208 }
209
210 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
211 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
212 msg->WriteInt(msgid);
213 msg->WriteString(payload);
214 channel_->Send(msg);
215 return true;
216 }
217
218 private:
219 Channel* channel_;
220 EventTimeTracker latency_tracker_;
221};
222
223// This class locks the current thread to a particular CPU core. This is
224// important because otherwise the different threads and processes of these
225// tests end up on different CPU cores which means that all of the cores are
226// lightly loaded so the OS (Windows and Linux) fails to ramp up the CPU
227// frequency, leading to unpredictable and often poor performance.
228class LockThreadAffinity {
229 public:
230 explicit LockThreadAffinity(int cpu_number) : affinity_set_ok_(false) {
231#if defined(OS_WIN)
232 const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number;
233 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
234 affinity_set_ok_ = old_affinity_ != 0;
235#elif defined(OS_LINUX)
236 cpu_set_t cpuset;
237 CPU_ZERO(&cpuset);
238 CPU_SET(cpu_number, &cpuset);
239 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
240 DCHECK_EQ(0, get_result);
241 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
242 // Check for get_result failure, even though it should always succeed.
243 affinity_set_ok_ = (set_result == 0) && (get_result == 0);
244#endif
245 if (!affinity_set_ok_)
246 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
247 }
248
249 ~LockThreadAffinity() {
250 if (!affinity_set_ok_)
251 return;
252#if defined(OS_WIN)
253 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
254 DCHECK_NE(0u, set_result);
255#elif defined(OS_LINUX)
256 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
257 DCHECK_EQ(0, set_result);
258#endif
259 }
260
261 private:
262 bool affinity_set_ok_;
263#if defined(OS_WIN)
264 DWORD_PTR old_affinity_;
265#elif defined(OS_LINUX)
266 cpu_set_t old_cpuset_;
267#endif
268
269 DISALLOW_COPY_AND_ASSIGN(LockThreadAffinity);
270};
271
272class PingPongTestParams {
273 public:
274 PingPongTestParams(size_t size, int count)
275 : message_size_(size), message_count_(count) {
276 }
277
278 size_t message_size() const { return message_size_; }
279 int message_count() const { return message_count_; }
280
281 private:
282 size_t message_size_;
283 int message_count_;
284};
285
286std::vector<PingPongTestParams> GetDefaultTestParams() {
287 // Test several sizes. We use 12^N for message size, and limit the message
288 // count to keep the test duration reasonable.
289#ifdef NDEBUG
290 const int kMultiplier = 100;
291#else
292 // Debug builds on Windows run these tests orders of magnitude more slowly.
293 const int kMultiplier = 1;
294#endif
295 std::vector<PingPongTestParams> list;
296 list.push_back(PingPongTestParams(12, 500 * kMultiplier));
297 list.push_back(PingPongTestParams(144, 500 * kMultiplier));
298 list.push_back(PingPongTestParams(1728, 500 * kMultiplier));
299 list.push_back(PingPongTestParams(20736, 120 * kMultiplier));
300 list.push_back(PingPongTestParams(248832, 10 * kMultiplier));
301 return list;
302}
303
304// Avoid core 0 due to conflicts with Intel's Power Gadget.
305// Setting thread affinity will fail harmlessly on single/dual core machines.
306const int kSharedCore = 2;
307
308class MojoChannelPerfTest : public IPCChannelMojoTestBase {
309 public:
310 MojoChannelPerfTest() = default;
311 ~MojoChannelPerfTest() override = default;
312
313 void RunTestChannelPingPong() {
314 Init("MojoPerfTestClient");
315
316 // Set up IPC channel and start client.
317 PerformanceChannelListener listener("Channel");
318 CreateChannel(&listener);
319 listener.Init(channel());
320 ASSERT_TRUE(ConnectChannel());
321
322 LockThreadAffinity thread_locker(kSharedCore);
323 std::vector<PingPongTestParams> params = GetDefaultTestParams();
324 for (size_t i = 0; i < params.size(); i++) {
325 listener.SetTestParams(params[i].message_count(),
326 params[i].message_size());
327
328 // This initial message will kick-start the ping-pong of messages.
329 Message* message =
330 new Message(0, 2, Message::PRIORITY_NORMAL);
331 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
332 message->WriteInt(-1);
333 message->WriteString("hello");
334 sender()->Send(message);
335
336 // Run message loop.
337 base::RunLoop().Run();
338 }
339
340 // Send quit message.
341 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
342 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
343 message->WriteInt(-1);
344 message->WriteString("quit");
345 sender()->Send(message);
346
347 EXPECT_TRUE(WaitForClientShutdown());
348 DestroyChannel();
349}
350
351 void RunTestChannelProxyPingPong() {
352 io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart));
353
354 Init("MojoPerfTestClient");
355
356 // Set up IPC channel and start client.
357 PerformanceChannelListener listener("ChannelProxy");
358 auto channel_proxy = IPC::ChannelProxy::Create(
359 TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
360 io_thread_->task_runner());
361 listener.Init(channel_proxy.get());
362
363 LockThreadAffinity thread_locker(kSharedCore);
364 std::vector<PingPongTestParams> params = GetDefaultTestParams();
365 for (size_t i = 0; i < params.size(); i++) {
366 listener.SetTestParams(params[i].message_count(),
367 params[i].message_size());
368
369 // This initial message will kick-start the ping-pong of messages.
370 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
371 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
372 message->WriteInt(-1);
373 message->WriteString("hello");
374 channel_proxy->Send(message);
375
376 // Run message loop.
377 base::RunLoop().Run();
378 }
379
380 // Send quit message.
381 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
382 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
383 message->WriteInt(-1);
384 message->WriteString("quit");
385 channel_proxy->Send(message);
386
387 EXPECT_TRUE(WaitForClientShutdown());
388 channel_proxy.reset();
389
390 io_thread_.reset();
391 }
392
393 scoped_refptr<base::TaskRunner> io_task_runner() {
394 if (io_thread_)
395 return io_thread_->task_runner();
396 return base::ThreadTaskRunnerHandle::Get();
397 }
398
399 private:
400 std::unique_ptr<base::TestIOThread> io_thread_;
morrita373af03b2014-09-09 19:35:24401};
402
morrita373af03b2014-09-09 19:35:24403TEST_F(MojoChannelPerfTest, ChannelPingPong) {
jamca86c9e2017-01-06 19:55:57404 RunTestChannelPingPong();
morritac4db5472015-03-13 20:44:39405
406 base::RunLoop run_loop;
407 run_loop.RunUntilIdle();
morrita373af03b2014-09-09 19:35:24408}
409
410TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) {
jamca86c9e2017-01-06 19:55:57411 RunTestChannelProxyPingPong();
morritac4db5472015-03-13 20:44:39412
413 base::RunLoop run_loop;
414 run_loop.RunUntilIdle();
morrita373af03b2014-09-09 19:35:24415}
416
jam76bcf0c2015-10-02 21:01:28417// Test to see how many channels we can create.
418TEST_F(MojoChannelPerfTest, DISABLED_MaxChannelCount) {
419#if defined(OS_POSIX)
420 LOG(INFO) << "base::GetMaxFds " << base::GetMaxFds();
421 base::SetFdLimit(20000);
422#endif
423
rockotc637caf9b2016-02-10 09:57:08424 std::vector<mojo::edk::PlatformChannelPair*> channels;
jam76bcf0c2015-10-02 21:01:28425 for (size_t i = 0; i < 10000; ++i) {
426 LOG(INFO) << "channels size: " << channels.size();
rockotc637caf9b2016-02-10 09:57:08427 channels.push_back(new mojo::edk::PlatformChannelPair());
jam76bcf0c2015-10-02 21:01:28428 }
429}
430
jamca86c9e2017-01-06 19:55:57431class MojoPerfTestClient {
morrita373af03b2014-09-09 19:35:24432 public:
jamca86c9e2017-01-06 19:55:57433 MojoPerfTestClient()
434 : listener_(new ChannelReflectorListener()) {
435 mojo::edk::test::MultiprocessTestHelper::ChildSetup();
436 }
morrita373af03b2014-09-09 19:35:24437
jamca86c9e2017-01-06 19:55:57438 ~MojoPerfTestClient() = default;
morrita373af03b2014-09-09 19:35:24439
jamca86c9e2017-01-06 19:55:57440 int Run(MojoHandle handle) {
441 handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
442 LockThreadAffinity thread_locker(kSharedCore);
443 std::unique_ptr<Channel> channel = ChannelMojo::Create(
444 std::move(handle_), Channel::MODE_CLIENT, listener_.get());
445 listener_->Init(channel.get());
446 CHECK(channel->Connect());
sammce4d0abd2016-03-07 22:38:04447
jamca86c9e2017-01-06 19:55:57448 base::RunLoop().Run();
449 return 0;
450 }
sammc57ed9f982016-03-10 06:28:35451
sammce4d0abd2016-03-07 22:38:04452 private:
jamca86c9e2017-01-06 19:55:57453 base::MessageLoopForIO main_message_loop_;
454 std::unique_ptr<ChannelReflectorListener> listener_;
455 std::unique_ptr<Channel> channel_;
sammc57ed9f982016-03-10 06:28:35456 mojo::ScopedMessagePipeHandle handle_;
morrita373af03b2014-09-09 19:35:24457};
458
sammce4d0abd2016-03-07 22:38:04459MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) {
460 MojoPerfTestClient client;
sammc57ed9f982016-03-10 06:28:35461 int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain(
462 base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client)));
morritac4db5472015-03-13 20:44:39463
464 base::RunLoop run_loop;
465 run_loop.RunUntilIdle();
466
467 return rv;
morrita373af03b2014-09-09 19:35:24468}
469
jamca86c9e2017-01-06 19:55:57470class ReflectorImpl : public IPC::mojom::Reflector {
471 public:
472 explicit ReflectorImpl(mojo::ScopedMessagePipeHandle handle)
473 : binding_(this, std::move(handle)) {}
474 ~ReflectorImpl() override {
475 ignore_result(binding_.Unbind().PassMessagePipe().release());
476 }
477
478 private:
479 // IPC::mojom::Reflector:
480 void Ping(const std::string& value, const PingCallback& callback) override {
481 callback.Run(value);
482 }
483
484 void Quit() override {
485 base::MessageLoop::current()->QuitWhenIdle();
486 }
487
488 mojo::Binding<IPC::mojom::Reflector> binding_;
489};
490
491class MojoInterfacePerfTest : public mojo::edk::test::MojoTestBase {
492 public:
493 MojoInterfacePerfTest() : message_count_(0), count_down_(0) {}
494
495 protected:
496 void RunPingPongServer(MojoHandle mp, const std::string& label) {
jamca86c9e2017-01-06 19:55:57497 label_ = label;
498
499 mojo::MessagePipeHandle mp_handle(mp);
500 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
501 ping_receiver_.Bind(IPC::mojom::ReflectorPtrInfo(
502 std::move(scoped_mp), 0u));
503
504 LockThreadAffinity thread_locker(kSharedCore);
505 std::vector<PingPongTestParams> params = GetDefaultTestParams();
506 for (size_t i = 0; i < params.size(); i++) {
507 ping_receiver_->Ping(
508 "hello",
509 base::Bind(&MojoInterfacePerfTest::OnPong, base::Unretained(this)));
510 message_count_ = count_down_ = params[i].message_count();
511 payload_ = std::string(params[i].message_size(), 'a');
512
513 base::RunLoop().Run();
514 }
515
516 ping_receiver_->Quit();
517
518 ignore_result(ping_receiver_.PassInterface().PassHandle().release());
519 }
520
521 void OnPong(const std::string& value) {
522 if (value == "hello") {
523 DCHECK(!perf_logger_.get());
524 std::string test_name =
525 base::StringPrintf("IPC_%s_Perf_%dx_%zu",
526 label_.c_str(),
527 message_count_,
528 payload_.size());
529 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
530 } else {
531 DCHECK_EQ(payload_.size(), value.size());
532
533 CHECK(count_down_ > 0);
534 count_down_--;
535 if (count_down_ == 0) {
536 perf_logger_.reset();
537 base::MessageLoop::current()->QuitWhenIdle();
538 return;
539 }
540 }
541
542 ping_receiver_->Ping(
543 payload_,
544 base::Bind(&MojoInterfacePerfTest::OnPong, base::Unretained(this)));
545 }
546
547 static int RunPingPongClient(MojoHandle mp) {
548 mojo::MessagePipeHandle mp_handle(mp);
549 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
550
551 // In single process mode, this is running in a task and by default other
552 // tasks (in particular, the binding) won't run. To keep the single process
553 // and multi-process code paths the same, enable nestable tasks.
554 base::MessageLoop::ScopedNestableTaskAllower nest_loop(
555 base::MessageLoop::current());
556
557 LockThreadAffinity thread_locker(kSharedCore);
558 ReflectorImpl impl(std::move(scoped_mp));
559 base::RunLoop().Run();
560 return 0;
561 }
562
563 private:
564 int message_count_;
565 int count_down_;
566 std::string label_;
567 std::string payload_;
568 IPC::mojom::ReflectorPtr ping_receiver_;
569 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
570
571 DISALLOW_COPY_AND_ASSIGN(MojoInterfacePerfTest);
572};
573
574DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MojoInterfacePerfTest, h) {
575 base::MessageLoop main_message_loop;
576 return RunPingPongClient(h);
577}
578
579// Similar to MojoChannelPerfTest above, but uses a Mojo interface instead of
580// raw IPC::Messages.
581TEST_F(MojoInterfacePerfTest, MultiprocessPingPong) {
582 RUN_CHILD_ON_PIPE(PingPongClient, h)
jamf9fa5b82017-01-14 00:28:02583 base::MessageLoop main_message_loop;
jamca86c9e2017-01-06 19:55:57584 RunPingPongServer(h, "MultiProcess");
585 END_CHILD()
586}
587
588// A single process version of the above test.
jamf9fa5b82017-01-14 00:28:02589TEST_F(MojoInterfacePerfTest, SingleProcessMultiThreadPingPong) {
jamca86c9e2017-01-06 19:55:57590 MojoHandle server_handle, client_handle;
591 CreateMessagePipe(&server_handle, &client_handle);
592
593 base::Thread client_thread("PingPongClient");
594 client_thread.Start();
595 client_thread.task_runner()->PostTask(
596 FROM_HERE,
597 base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle));
598
jamf9fa5b82017-01-14 00:28:02599 base::MessageLoop main_message_loop;
600 RunPingPongServer(server_handle, "SingleProcess");
601}
602
603TEST_F(MojoInterfacePerfTest, SingleProcessSingleThreadPingPong) {
604 MojoHandle server_handle, client_handle;
605 CreateMessagePipe(&server_handle, &client_handle);
606
607 base::MessageLoop main_message_loop;
608 mojo::MessagePipeHandle mp_handle(client_handle);
609 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
610 LockThreadAffinity thread_locker(kSharedCore);
611 ReflectorImpl impl(std::move(scoped_mp));
612
jamca86c9e2017-01-06 19:55:57613 RunPingPongServer(server_handle, "SingleProcess");
614}
615
616class CallbackPerfTest : public testing::Test {
617 public:
618 CallbackPerfTest()
619 : client_thread_("PingPongClient"), message_count_(0), count_down_(0) {}
620
621 protected:
jamf9fa5b82017-01-14 00:28:02622 void RunMultiThreadPingPongServer() {
jamca86c9e2017-01-06 19:55:57623 client_thread_.Start();
624
625 LockThreadAffinity thread_locker(kSharedCore);
626 std::vector<PingPongTestParams> params = GetDefaultTestParams();
627 for (size_t i = 0; i < params.size(); i++) {
628 std::string hello("hello");
629 client_thread_.task_runner()->PostTask(
630 FROM_HERE,
631 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), hello));
632 message_count_ = count_down_ = params[i].message_count();
633 payload_ = std::string(params[i].message_size(), 'a');
634
635 base::RunLoop().Run();
636 }
637 }
638
639 void Ping(const std::string& value) {
jamf9fa5b82017-01-14 00:28:02640 main_message_loop_.task_runner()->PostTask(
jamca86c9e2017-01-06 19:55:57641 FROM_HERE,
642 base::Bind(&CallbackPerfTest::OnPong, base::Unretained(this),
643 value));
644 }
645
646 void OnPong(const std::string& value) {
647 if (value == "hello") {
648 DCHECK(!perf_logger_.get());
649 std::string test_name =
jamf9fa5b82017-01-14 00:28:02650 base::StringPrintf("Callback_MultiProcess_Perf_%dx_%zu",
jamca86c9e2017-01-06 19:55:57651 message_count_,
652 payload_.size());
653 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
654 } else {
655 DCHECK_EQ(payload_.size(), value.size());
656
657 CHECK(count_down_ > 0);
658 count_down_--;
659 if (count_down_ == 0) {
660 perf_logger_.reset();
661 base::MessageLoop::current()->QuitWhenIdle();
662 return;
663 }
664 }
665
666 client_thread_.task_runner()->PostTask(
667 FROM_HERE,
668 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), payload_));
669 }
670
jamf9fa5b82017-01-14 00:28:02671 void RunSingleThreadNoPostTaskPingPongServer() {
672 LockThreadAffinity thread_locker(kSharedCore);
673 std::vector<PingPongTestParams> params = GetDefaultTestParams();
674 base::Callback<void(const std::string&,
675 const base::Callback<void(const std::string&)>&)> ping =
676 base::Bind(&CallbackPerfTest::SingleThreadPingNoPostTask,
677 base::Unretained(this));
678 for (size_t i = 0; i < params.size(); i++) {
679 payload_ = std::string(params[i].message_size(), 'a');
680 std::string test_name =
681 base::StringPrintf("Callback_SingleThreadPostTask_Perf_%dx_%zu",
682 params[i].message_count(),
683 payload_.size());
684 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
685 for (int j = 0; j < params[i].message_count(); ++j) {
686 ping.Run(payload_,
687 base::Bind(&CallbackPerfTest::SingleThreadPongNoPostTask,
688 base::Unretained(this)));
689 }
690 perf_logger_.reset();
691 }
692 }
693
694 void SingleThreadPingNoPostTask(const std::string& value,
695 const base::Callback<void(const std::string&)>& pong) {
696 pong.Run(value);
697 }
698
699 void SingleThreadPongNoPostTask(const std::string& value) {
700 }
701
702 void RunSingleThreadPostTaskPingPongServer() {
703 LockThreadAffinity thread_locker(kSharedCore);
704 std::vector<PingPongTestParams> params = GetDefaultTestParams();
705 for (size_t i = 0; i < params.size(); i++) {
706 std::string hello("hello");
707 base::MessageLoop::current()->task_runner()->PostTask(
708 FROM_HERE,
709 base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
710 base::Unretained(this), hello));
711 message_count_ = count_down_ = params[i].message_count();
712 payload_ = std::string(params[i].message_size(), 'a');
713
714 base::RunLoop().Run();
715 }
716 }
717
718 void SingleThreadPingPostTask(const std::string& value) {
719 base::MessageLoop::current()->task_runner()->PostTask(
720 FROM_HERE,
721 base::Bind(&CallbackPerfTest::SingleThreadPongPostTask,
722 base::Unretained(this),
723 value));
724 }
725
726 void SingleThreadPongPostTask(const std::string& value) {
727 if (value == "hello") {
728 DCHECK(!perf_logger_.get());
729 std::string test_name =
730 base::StringPrintf("Callback_SingleThreadNoPostTask_Perf_%dx_%zu",
731 message_count_,
732 payload_.size());
733 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
734 } else {
735 DCHECK_EQ(payload_.size(), value.size());
736
737 CHECK(count_down_ > 0);
738 count_down_--;
739 if (count_down_ == 0) {
740 perf_logger_.reset();
741 base::MessageLoop::current()->QuitWhenIdle();
742 return;
743 }
744 }
745
746 base::MessageLoop::current()->task_runner()->PostTask(
747 FROM_HERE,
748 base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
749 base::Unretained(this), payload_));
750 }
751
jamca86c9e2017-01-06 19:55:57752 private:
753 base::Thread client_thread_;
jamf9fa5b82017-01-14 00:28:02754 base::MessageLoop main_message_loop_;
jamca86c9e2017-01-06 19:55:57755 int message_count_;
756 int count_down_;
757 std::string payload_;
758 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
759
760 DISALLOW_COPY_AND_ASSIGN(CallbackPerfTest);
761};
762
jamf9fa5b82017-01-14 00:28:02763// Sends the same data as above using PostTask to a different thread instead of
764// IPCs for comparison.
765TEST_F(CallbackPerfTest, MultiThreadPingPong) {
766 RunMultiThreadPingPongServer();
767}
768
769// Sends the same data as above using PostTask to the same thread.
770TEST_F(CallbackPerfTest, SingleThreadPostTaskPingPong) {
771 RunSingleThreadPostTaskPingPongServer();
772}
773
774// Sends the same data as above without using PostTask to the same thread.
775TEST_F(CallbackPerfTest, SingleThreadNoPostTaskPingPong) {
776 RunSingleThreadNoPostTaskPingPongServer();
jamca86c9e2017-01-06 19:55:57777}
778
morrita373af03b2014-09-09 19:35:24779} // namespace
sammce4d0abd2016-03-07 22:38:04780} // namespace IPC