|
| 1 | +#pragma once |
| 2 | + |
| 3 | +#include "rocksdb/db.h" |
| 4 | +#include "crocksdb/c.h" |
| 5 | +#include "rust/cxx.h" |
| 6 | +#include "rocksdb/async_future.h" |
| 7 | + |
| 8 | +#include <memory> |
| 9 | +#include <tuple> |
| 10 | +#include <unordered_map> |
| 11 | +#include <set> |
| 12 | + |
| 13 | +#include <sys/uio.h> |
| 14 | + |
| 15 | +struct CRocksDB; |
| 16 | +struct RustStatus; |
| 17 | +struct Async_result; |
| 18 | + |
| 19 | +using ROCKSDB_NAMESPACE::Async_future; |
| 20 | +using ROCKSDB_NAMESPACE::ReadOptions; |
| 21 | +using ROCKSDB_NAMESPACE::PinnableSlice; |
| 22 | +using ReadTier = ROCKSDB_NAMESPACE::ReadTier; |
| 23 | +using Submit_queue = Async_future::Submit_queue; |
| 24 | +using Return_type = Async_future::Promise_type::Return_type; |
| 25 | + |
| 26 | +struct Async_reader { |
| 27 | + Async_reader() = delete; |
| 28 | + Async_reader(Async_reader&&) = delete; |
| 29 | + Async_reader(const Async_reader&) = delete; |
| 30 | + Async_reader& operator=(Async_reader&&) = delete; |
| 31 | + Async_reader& operator=(const Async_reader&) = delete; |
| 32 | + |
| 33 | + Async_reader(rocksdb::DB *db, size_t io_uring_size); |
| 34 | + |
| 35 | + ~Async_reader() noexcept; |
| 36 | + |
| 37 | + /** Reap entries from the io_uring completion queue (CQ). |
| 38 | + @return Number of processed CQEs */ |
| 39 | + uint32_t io_uring_reap_cq() const; |
| 40 | + |
| 41 | + /** Peek and check if there are any CQEs to process. |
| 42 | + @return true if there are CQEs in the CQ. */ |
| 43 | + bool io_uring_peek_cq() const; |
| 44 | + |
| 45 | + Async_result get(const ReadOptions *ropts, rust::String k) const; |
| 46 | + |
| 47 | + void setup_io_uring_sq_handler(ReadOptions *ropts) const; |
| 48 | + |
| 49 | + uint32_t pending_io_uring_sqe_count() const { |
| 50 | + return m_n_pending_sqe.load(); |
| 51 | + } |
| 52 | + |
| 53 | + static RustStatus get_result(Async_result async_result, rust::String &v); |
| 54 | + |
| 55 | + private: |
| 56 | + using Promise = Async_future::promise_type; |
| 57 | + |
| 58 | + static void schedule_task(Promise* promise) noexcept; |
| 59 | + |
| 60 | +private: |
| 61 | + struct IO_key { |
| 62 | + bool operator==(const IO_key& rhs) const { |
| 63 | + return m_fd == rhs.m_fd && m_off == rhs.m_off; |
| 64 | + } |
| 65 | + |
| 66 | + int m_fd{-1}; |
| 67 | + off_t m_off{}; |
| 68 | + }; |
| 69 | + |
| 70 | + struct IO_key_hash { |
| 71 | + size_t operator()(const IO_key &io_key) const noexcept { |
| 72 | + return io_key.m_fd ^ io_key.m_off; |
| 73 | + } |
| 74 | + }; |
| 75 | + |
| 76 | + using IO_value = std::unordered_set<size_t>; |
| 77 | + |
| 78 | + /** All data members are mutable so that we can use const functions. |
| 79 | + This allows us to use std::shared_ptr from Rust with an immutable |
| 80 | + reference. */ |
| 81 | + mutable rocksdb::DB *m_db{}; |
| 82 | + mutable std::atomic<int> m_n_pending_sqe{}; |
| 83 | + mutable std::shared_ptr<io_uring> m_io_uring{}; |
| 84 | + mutable std::shared_ptr<Submit_queue> m_submit_queue{}; |
| 85 | + mutable std::unordered_map<IO_key, IO_value, IO_key_hash> m_pending_io{}; |
| 86 | +}; |
| 87 | + |
| 88 | +std::shared_ptr<Async_reader> new_async_reader(CRocksDB* db, uint32_t io_uring_size); |
| 89 | + |
| 90 | +RustStatus get_async_result(Async_result async_result, rust::String &v); |
0 commit comments