Bitcoin Core 30.99.0
P2P Digital Currency
proxy.cpp
Go to the documentation of this file.
1// Copyright (c) The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5#include <mp/proxy.h>
6
7#include <mp/proxy-io.h>
8#include <mp/proxy-types.h>
9#include <mp/proxy.capnp.h>
10#include <mp/type-threadmap.h>
11#include <mp/util.h>
12
13#include <atomic>
14#include <capnp/capability.h>
15#include <capnp/rpc.h>
16#include <condition_variable>
17#include <functional>
18#include <future>
19#include <kj/async.h>
20#include <kj/async-io.h>
21#include <kj/async-prelude.h>
22#include <kj/common.h>
23#include <kj/debug.h>
24#include <kj/function.h>
25#include <kj/memory.h>
26#include <map>
27#include <memory>
28#include <mutex>
29#include <optional>
30#include <stdexcept>
31#include <string>
32#include <sys/socket.h>
33#include <thread>
34#include <tuple>
35#include <unistd.h>
36#include <utility>
37
38namespace mp {
39
41
42void LoggingErrorHandler::taskFailed(kj::Exception&& exception)
43{
44 KJ_LOG(ERROR, "Uncaught exception in daemonized task.", exception);
45 m_loop.log() << "Uncaught exception in daemonized task.";
46}
47
48EventLoopRef::EventLoopRef(EventLoop& loop, Lock* lock) : m_loop(&loop), m_lock(lock)
49{
50 auto loop_lock{PtrOrValue{m_lock, m_loop->m_mutex}};
51 loop_lock->assert_locked(m_loop->m_mutex);
52 m_loop->m_num_clients += 1;
53}
54
55// Due to the conditionals in this function, MP_NO_TSA is required to avoid
56// error "error: mutex 'loop_lock' is not held on every path through here
57// [-Wthread-safety-analysis]"
59{
60 if (auto* loop{m_loop}) {
61 m_loop = nullptr;
62 auto loop_lock{PtrOrValue{m_lock, loop->m_mutex}};
63 loop_lock->assert_locked(loop->m_mutex);
64 assert(loop->m_num_clients > 0);
65 loop->m_num_clients -= 1;
66 if (loop->done()) {
67 loop->m_cv.notify_all();
68 int post_fd{loop->m_post_fd};
69 loop_lock->unlock();
70 char buffer = 0;
71 KJ_SYSCALL(write(post_fd, &buffer, 1)); // NOLINT(bugprone-suspicious-semicolon)
72 // By default, do not try to relock `loop_lock` after writing,
73 // because the event loop could wake up and destroy itself and the
74 // mutex might no longer exist.
75 if (relock) loop_lock->lock();
76 }
77 }
78}
79
80ProxyContext::ProxyContext(Connection* connection) : connection(connection), loop{*connection->m_loop} {}
81
83{
84 // Shut down RPC system first, since this will garbage collect any
85 // ProxyServer objects that were not freed before the connection was closed.
86 // Typically all ProxyServer objects associated with this connection will be
87 // freed before this call returns. However that will not be the case if
88 // there are asynchronous IPC calls over this connection still currently
89 // executing. In that case, Cap'n Proto will destroy the ProxyServer objects
90 // after the calls finish.
91 m_rpc_system.reset();
92
93 // ProxyClient cleanup handlers are in sync list, and ProxyServer cleanup
94 // handlers are in the async list.
95 //
96 // The ProxyClient cleanup handlers are synchronous because they are fast
97 // and don't do anything besides release capnp resources and reset state so
98 // future calls to client methods immediately throw exceptions instead of
99 // trying to communicating across the socket. The synchronous callbacks set
100 // ProxyClient capability pointers to null, so new method calls on client
101 // objects fail without triggering i/o or relying on event loop which may go
102 // out of scope or trigger obscure capnp i/o errors.
103 //
104 // The ProxySever cleanup handlers call user defined destructors on server
105 // object, which can run arbitrary blocking bitcoin code so they have to run
106 // asynchronously in a different thread. The asynchronous cleanup functions
107 // intentionally aren't started until after the synchronous cleanup
108 // functions run, so client objects are fully disconnected before bitcoin
109 // code in the destructors are run. This way if the bitcoin code tries to
110 // make client requests the requests will just fail immediately instead of
111 // sending i/o or accessing the event loop.
112 //
113 // The context where Connection objects are destroyed and this destructor is invoked
114 // is different depending on whether this is an outgoing connection being used
115 // to make an Init.makeX call() (e.g. Init.makeNode or Init.makeWalletClient) or an incoming
116 // connection implementing the Init interface and handling the Init.makeX() calls.
117 //
118 // Either way when a connection is closed, capnp behavior is to call all
119 // ProxyServer object destructors first, and then trigger an onDisconnect
120 // callback.
121 //
122 // On incoming side of the connection, the onDisconnect callback is written
123 // to delete the Connection object from the m_incoming_connections and call
124 // this destructor which calls Connection::disconnect.
125 //
126 // On the outgoing side, the Connection object is owned by top level client
127 // object client, which onDisconnect handler doesn't have ready access to,
128 // so onDisconnect handler just calls Connection::disconnect directly
129 // instead.
130 //
131 // Either way disconnect code runs in the event loop thread and called both
132 // on clean and unclean shutdowns. In unclean shutdown case when the
133 // connection is broken, sync and async cleanup lists will filled with
134 // callbacks. In the clean shutdown case both lists will be empty.
135 Lock lock{m_loop->m_mutex};
136 while (!m_sync_cleanup_fns.empty()) {
137 CleanupList fn;
138 fn.splice(fn.begin(), m_sync_cleanup_fns, m_sync_cleanup_fns.begin());
139 Unlock(lock, fn.front());
140 }
141}
142
143CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
144{
145 const Lock lock(m_loop->m_mutex);
146 // Add cleanup callbacks to the front of list, so sync cleanup functions run
147 // in LIFO order. This is a good approach because sync cleanup functions are
148 // added as client objects are created, and it is natural to clean up
149 // objects in the reverse order they were created. In practice, however,
150 // order should not be significant because the cleanup callbacks run
151 // synchronously in a single batch when the connection is broken, and they
152 // only reset the connection pointers in the client objects without actually
153 // deleting the client objects.
154 return m_sync_cleanup_fns.emplace(m_sync_cleanup_fns.begin(), std::move(fn));
155}
156
158{
159 const Lock lock(m_loop->m_mutex);
160 m_sync_cleanup_fns.erase(it);
161}
162
163void EventLoop::addAsyncCleanup(std::function<void()> fn)
164{
165 const Lock lock(m_mutex);
166 // Add async cleanup callbacks to the back of the list. Unlike the sync
167 // cleanup list, this list order is more significant because it determines
168 // the order server objects are destroyed when there is a sudden disconnect,
169 // and it is possible objects may need to be destroyed in a certain order.
170 // This function is called in ProxyServerBase destructors, and since capnp
171 // destroys ProxyServer objects in LIFO order, we should preserve this
172 // order, and add cleanup callbacks to the end of the list so they can be
173 // run starting from the beginning of the list.
174 //
175 // In bitcoin core, running these callbacks in the right order is
176 // particularly important for the wallet process, because it uses blocking
177 // shared_ptrs and requires Chain::Notification pointers owned by the node
178 // process to be destroyed before the WalletLoader objects owned by the node
179 // process, otherwise shared pointer counts of the CWallet objects (which
180 // inherit from Chain::Notification) will not be 1 when WalletLoader
181 // destructor runs and it will wait forever for them to be released.
182 m_async_fns->emplace_back(std::move(fn));
184}
185
186EventLoop::EventLoop(const char* exe_name, LogFn log_fn, void* context)
187 : m_exe_name(exe_name),
188 m_io_context(kj::setupAsyncIo()),
189 m_task_set(new kj::TaskSet(m_error_handler)),
190 m_context(context)
191{
192 m_log_opts.log_fn = log_fn;
193 int fds[2];
194 KJ_SYSCALL(socketpair(AF_UNIX, SOCK_STREAM, 0, fds));
195 m_wait_fd = fds[0];
196 m_post_fd = fds[1];
197}
198
200{
201 if (m_async_thread.joinable()) m_async_thread.join();
202 const Lock lock(m_mutex);
203 KJ_ASSERT(m_post_fn == nullptr);
204 KJ_ASSERT(!m_async_fns);
205 KJ_ASSERT(m_wait_fd == -1);
206 KJ_ASSERT(m_post_fd == -1);
207 KJ_ASSERT(m_num_clients == 0);
208
209 // Spin event loop. wait for any promises triggered by RPC shutdown.
210 // auto cleanup = kj::evalLater([]{});
211 // cleanup.wait(m_io_context.waitScope);
212}
213
215{
218 KJ_DEFER(g_thread_context.loop_thread = false);
219
220 {
221 const Lock lock(m_mutex);
222 assert(!m_async_fns);
223 m_async_fns.emplace();
224 }
225
226 kj::Own<kj::AsyncIoStream> wait_stream{
227 m_io_context.lowLevelProvider->wrapSocketFd(m_wait_fd, kj::LowLevelAsyncIoProvider::TAKE_OWNERSHIP)};
228 int post_fd{m_post_fd};
229 char buffer = 0;
230 for (;;) {
231 const size_t read_bytes = wait_stream->read(&buffer, 0, 1).wait(m_io_context.waitScope);
232 if (read_bytes != 1) throw std::logic_error("EventLoop wait_stream closed unexpectedly");
233 Lock lock(m_mutex);
234 if (m_post_fn) {
235 Unlock(lock, *m_post_fn);
236 m_post_fn = nullptr;
237 m_cv.notify_all();
238 } else if (done()) {
239 // Intentionally do not break if m_post_fn was set, even if done()
240 // would return true, to ensure that the EventLoopRef write(post_fd)
241 // call always succeeds and the loop does not exit between the time
242 // that the done condition is set and the write call is made.
243 break;
244 }
245 }
246 log() << "EventLoop::loop done, cancelling event listeners.";
247 m_task_set.reset();
248 log() << "EventLoop::loop bye.";
249 wait_stream = nullptr;
250 KJ_SYSCALL(::close(post_fd));
251 const Lock lock(m_mutex);
252 m_wait_fd = -1;
253 m_post_fd = -1;
254 m_async_fns.reset();
255 m_cv.notify_all();
256}
257
258void EventLoop::post(kj::Function<void()> fn)
259{
260 if (std::this_thread::get_id() == m_thread_id) {
261 fn();
262 return;
263 }
264 Lock lock(m_mutex);
265 EventLoopRef ref(*this, &lock);
266 m_cv.wait(lock.m_lock, [this]() MP_REQUIRES(m_mutex) { return m_post_fn == nullptr; });
267 m_post_fn = &fn;
268 int post_fd{m_post_fd};
269 Unlock(lock, [&] {
270 char buffer = 0;
271 KJ_SYSCALL(write(post_fd, &buffer, 1));
272 });
273 m_cv.wait(lock.m_lock, [this, &fn]() MP_REQUIRES(m_mutex) { return m_post_fn != &fn; });
274}
275
277{
278 assert (std::this_thread::get_id() == m_thread_id);
279 if (m_async_thread.joinable()) {
280 // Notify to wake up the async thread if it is already running.
281 m_cv.notify_all();
282 } else if (!m_async_fns->empty()) {
283 m_async_thread = std::thread([this] {
284 Lock lock(m_mutex);
285 while (m_async_fns) {
286 if (!m_async_fns->empty()) {
287 EventLoopRef ref{*this, &lock};
288 const std::function<void()> fn = std::move(m_async_fns->front());
289 m_async_fns->pop_front();
290 Unlock(lock, fn);
291 // Important to relock because of the wait() call below.
292 ref.reset(/*relock=*/true);
293 // Continue without waiting in case there are more async_fns
294 continue;
295 }
296 m_cv.wait(lock.m_lock);
297 }
298 });
299 }
300}
301
302bool EventLoop::done() const
303{
304 assert(m_num_clients >= 0);
305 return m_num_clients == 0 && m_async_fns->empty();
306}
307
308std::tuple<ConnThread, bool> SetThread(ConnThreads& threads, std::mutex& mutex, Connection* connection, const std::function<Thread::Client()>& make_thread)
309{
310 const std::unique_lock<std::mutex> lock(mutex);
311 auto thread = threads.find(connection);
312 if (thread != threads.end()) return {thread, false};
313 thread = threads.emplace(
314 std::piecewise_construct, std::forward_as_tuple(connection),
315 std::forward_as_tuple(make_thread(), connection, /* destroy_connection= */ false)).first;
316 thread->second.setDisconnectCallback([&threads, &mutex, thread] {
317 // Note: it is safe to use the `thread` iterator in this cleanup
318 // function, because the iterator would only be invalid if the map entry
319 // was removed, and if the map entry is removed the ProxyClient<Thread>
320 // destructor unregisters the cleanup.
321
322 // Connection is being destroyed before thread client is, so reset
323 // thread client m_disconnect_cb member so thread client destructor does not
324 // try to unregister this callback after connection is destroyed.
325 // Remove connection pointer about to be destroyed from the map
326 const std::unique_lock<std::mutex> lock(mutex);
327 thread->second.m_disconnect_cb.reset();
328 threads.erase(thread);
329 });
330 return {thread, true};
331}
332
334{
335 // If thread is being destroyed before connection is destroyed, remove the
336 // cleanup callback that was registered to handle the connection being
337 // destroyed before the thread being destroyed.
338 if (m_disconnect_cb) {
339 m_context.connection->removeSyncCleanup(*m_disconnect_cb);
340 }
341}
342
343void ProxyClient<Thread>::setDisconnectCallback(const std::function<void()>& fn)
344{
345 assert(fn);
346 assert(!m_disconnect_cb);
347 m_disconnect_cb = m_context.connection->addSyncCleanup(fn);
348}
349
350ProxyServer<Thread>::ProxyServer(ThreadContext& thread_context, std::thread&& thread)
351 : m_thread_context(thread_context), m_thread(std::move(thread))
352{
353 assert(m_thread_context.waiter.get() != nullptr);
354}
355
357{
358 if (!m_thread.joinable()) return;
359 // Stop async thread and wait for it to exit. Need to wait because the
360 // m_thread handle needs to outlive the thread to avoid "terminate called
361 // without an active exception" error. An alternative to waiting would be
362 // detach the thread, but this would introduce nondeterminism which could
363 // make code harder to debug or extend.
364 assert(m_thread_context.waiter.get());
365 std::unique_ptr<Waiter> waiter;
366 {
367 const std::unique_lock<std::mutex> lock(m_thread_context.waiter->m_mutex);
370 waiter = std::move(m_thread_context.waiter);
372 assert(!waiter->m_fn);
373 // Clear client maps now to avoid deadlock in m_thread.join() call
374 // below. The maps contain Thread::Client objects that need to be
375 // destroyed from the event loop thread (this thread), which can't
376 // happen if this thread is busy calling join.
377 m_thread_context.request_threads.clear();
378 m_thread_context.callback_threads.clear();
380 waiter->m_cv.notify_all();
381 }
382 m_thread.join();
383}
384
385kj::Promise<void> ProxyServer<Thread>::getName(GetNameContext context)
386{
387 context.getResults().setResult(m_thread_context.thread_name);
388 return kj::READY_NOW;
389}
390
392
393kj::Promise<void> ProxyServer<ThreadMap>::makeThread(MakeThreadContext context)
394{
395 const std::string from = context.getParams().getName();
396 std::promise<ThreadContext*> thread_context;
397 std::thread thread([&thread_context, from, this]() {
398 g_thread_context.thread_name = ThreadName(m_connection.m_loop->m_exe_name) + " (from " + from + ")";
399 g_thread_context.waiter = std::make_unique<Waiter>();
400 thread_context.set_value(&g_thread_context);
401 std::unique_lock<std::mutex> lock(g_thread_context.waiter->m_mutex);
402 // Wait for shutdown signal from ProxyServer<Thread> destructor (signal
403 // is just waiter getting set to null.)
404 g_thread_context.waiter->wait(lock, [] { return !g_thread_context.waiter; });
405 });
406 auto thread_server = kj::heap<ProxyServer<Thread>>(*thread_context.get_future().get(), std::move(thread));
407 auto thread_client = m_connection.m_threads.add(kj::mv(thread_server));
408 context.getResults().setResult(kj::mv(thread_client));
409 return kj::READY_NOW;
410}
411
412std::atomic<int> server_reqs{0};
413
414std::string LongThreadName(const char* exe_name)
415{
417}
418
419} // namespace mp
Object holding network & rpc state associated with either an incoming server connection,...
Definition: proxy-io.h:331
CleanupIt addSyncCleanup(std::function< void()> fn)
Register synchronous cleanup function to run on event loop thread (with access to capnp thread local ...
Definition: proxy.cpp:143
EventLoopRef m_loop
Definition: proxy-io.h:370
~Connection()
Run cleanup functions.
Definition: proxy.cpp:82
CleanupList m_sync_cleanup_fns
Cleanup functions to run if connection is broken unexpectedly.
Definition: proxy-io.h:388
std::optional<::capnp::RpcSystem<::capnp::rpc::twoparty::VatId > > m_rpc_system
Definition: proxy-io.h:375
void removeSyncCleanup(CleanupIt it)
Definition: proxy.cpp:157
Event loop implementation.
Definition: proxy-io.h:171
kj::AsyncIoContext m_io_context
Capnp IO context.
Definition: proxy-io.h:257
void startAsyncThread() MP_REQUIRES(m_mutex)
Start asynchronous worker thread if necessary.
Definition: proxy.cpp:276
std::condition_variable m_cv
Definition: proxy-io.h:254
EventLoop(const char *exe_name, LogFn log_fn, void *context=nullptr)
Construct event loop object.
Definition: proxy.cpp:186
void addAsyncCleanup(std::function< void()> fn)
Register cleanup function to run on asynchronous worker thread without blocking the event loop thread...
Definition: proxy.cpp:163
Logger log()
Definition: proxy-io.h:215
void loop()
Run event loop.
Definition: proxy.cpp:214
Mutex m_mutex
Mutex and condition variable used to post tasks to event loop and async thread.
Definition: proxy-io.h:253
bool done() const MP_REQUIRES(m_mutex)
Check if loop should exit.
Definition: proxy.cpp:302
int m_post_fd
Pipe write handle used to wake up the event loop thread.
Definition: proxy-io.h:245
LogOptions m_log_opts
Logging options.
Definition: proxy-io.h:269
std::unique_ptr< kj::TaskSet > m_task_set
Capnp list of pending promises.
Definition: proxy-io.h:263
int m_wait_fd
Pipe read handle used to wake up the event loop thread.
Definition: proxy-io.h:242
std::thread m_async_thread
Handle of an async worker thread.
Definition: proxy-io.h:233
std::thread::id m_thread_id
ID of the event loop thread.
Definition: proxy-io.h:229
void post(kj::Function< void()> fn)
Run function on event loop thread.
Definition: proxy.cpp:258
Event loop smart pointer automatically managing m_num_clients.
Definition: proxy.h:51
EventLoopRef(EventLoop &loop, Lock *lock=nullptr)
Definition: proxy.cpp:48
void reset(bool relock=false)
Definition: proxy.cpp:58
EventLoop * m_loop
Definition: proxy.h:63
Lock * m_lock
Definition: proxy.h:64
Definition: util.h:170
std::unique_lock< std::mutex > m_lock
Definition: util.h:182
EventLoop & m_loop
Definition: proxy-io.h:100
void taskFailed(kj::Exception &&exception) override
Definition: proxy.cpp:42
boost::signals2::scoped_connection m_connection
Definition: interfaces.cpp:30
std::optional< mp::EventLoop > m_loop
EventLoop object which manages I/O events for all connections.
Definition: protocol.cpp:104
Context m_context
Definition: protocol.cpp:101
#define MP_NO_TSA
Definition: util.h:160
#define MP_REQUIRES(x)
Definition: util.h:155
Functions to serialize / deserialize common bitcoin types.
Definition: common-types.h:57
void Unlock(Lock &lock, Callback &&callback)
Definition: util.h:195
std::list< std::function< void()> > CleanupList
Definition: proxy.h:36
std::map< Connection *, ProxyClient< Thread > > ConnThreads
Definition: proxy-io.h:547
std::string ThreadName(const char *exe_name)
Format current thread name as "{exe_name}-{$pid}/{thread_name}-{$tid}".
Definition: util.cpp:48
std::atomic< int > server_reqs
Definition: proxy.cpp:412
thread_local ThreadContext g_thread_context
Definition: proxy.cpp:40
std::tuple< ConnThread, bool > SetThread(ConnThreads &threads, std::mutex &mutex, Connection *connection, const std::function< Thread::Client()> &make_thread)
Definition: proxy.cpp:308
std::function< void(bool raise, std::string message)> LogFn
Definition: proxy-io.h:103
std::string LongThreadName(const char *exe_name)
Definition: proxy.cpp:414
typename CleanupList::iterator CleanupIt
Definition: proxy.h:37
LogFn log_fn
External logging callback.
Definition: proxy-io.h:136
Mapping from capnp interface type to proxy client implementation (specializations are generated by pr...
Definition: proxy.h:25
ProxyContext(Connection *connection)
Definition: proxy.cpp:80
Mapping from capnp interface type to proxy server implementation (specializations are generated by pr...
Definition: proxy.h:28
Convenient wrapper around std::variant<T*, T>
Definition: util.h:134
std::unique_ptr< Waiter > waiter
Waiter object used to allow client threads blocked waiting for a server response to execute callbacks...
Definition: proxy-io.h:563
bool loop_thread
Whether this thread is a capnp event loop thread.
Definition: proxy-io.h:583
std::string thread_name
Identifying string for debug.
Definition: proxy-io.h:558
assert(!tx.IsCoinBase())