Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Chore] Remove derive-getters #1818

Merged
merged 3 commits into from
Aug 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 8 additions & 16 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,10 @@ datafusion = { version = "40.0.0", default-features = false, features = [
"unicode_expressions",
] }
datafusion-expr = { version = "40.0.0" }
derive-getters = { version = "0.4.0" }
derive_builder = "0.20.0"
derive_more = { version = "0.99.17" }
dialoguer = { version = "0.11.0" }
downcast-rs = { version ="1.2.1" }
enum-map = { version = "2.7.3" }
enumset = { version = "1.1.3" }
flexbuffers = { version = "2.0.0" }
Expand Down
2 changes: 1 addition & 1 deletion crates/admin/src/cluster_controller/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ where
.metadata_store_client
.put(
SCHEDULING_PLAN_KEY.clone(),
scheduling_plan.clone(),
&scheduling_plan,
Precondition::MatchesVersion(self.scheduling_plan.version()),
)
.await
Expand Down
30 changes: 11 additions & 19 deletions crates/admin/src/cluster_controller/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -421,10 +421,9 @@ async fn signal_all_partitions_started(
#[cfg(test)]
mod tests {
use super::Service;
use bytes::Bytes;
use googletest::assert_that;
use googletest::matchers::eq;
use googletest::{assert_that, pat};
use restate_bifrost::{Bifrost, MaybeRecord, TrimGap};
use restate_bifrost::Bifrost;
use restate_core::network::{MessageHandler, NetworkSender};
use restate_core::{
MockNetworkSender, NoOpMessageHandler, TaskKind, TestCoreEnv, TestCoreEnvBuilder,
Expand Down Expand Up @@ -482,18 +481,14 @@ mod tests {
.tc
.run_in_scope("test", None, async move {
for _ in 1..=5 {
appender.append_raw("").await?;
appender.append("").await?;
}

svc_handle.trim_log(LOG_ID, Lsn::from(3)).await??;

let record = bifrost.read(LOG_ID, Lsn::OLDEST).await?.unwrap();
assert_that!(
record.record,
pat!(MaybeRecord::TrimGap(pat!(TrimGap {
to: eq(Lsn::from(3)),
})))
);
assert_that!(record.sequence_number(), eq(Lsn::OLDEST));
assert_that!(record.trim_gap_to_sequence_number(), eq(Some(Lsn::new(3))));
Ok::<(), anyhow::Error>(())
})
.await?;
Expand Down Expand Up @@ -571,7 +566,7 @@ mod tests {
.run_in_scope("test", None, async move {
let mut appender = bifrost.create_appender(LOG_ID)?;
for i in 1..=20 {
let lsn = appender.append_raw("").await?;
let lsn = appender.append("").await?;
assert_eq!(Lsn::from(i), lsn);
}

Expand Down Expand Up @@ -636,7 +631,7 @@ mod tests {
.run_in_scope("test", None, async move {
let mut appender = bifrost.create_appender(LOG_ID)?;
for i in 1..=20 {
let lsn = appender.append_raw(format!("record{}", i)).await?;
let lsn = appender.append(format!("record{}", i)).await?;
assert_eq!(Lsn::from(i), lsn);
}
tokio::time::sleep(interval_duration * 10).await;
Expand All @@ -650,12 +645,9 @@ mod tests {
assert_eq!(bifrost.get_trim_point(LOG_ID).await?, Lsn::from(3));
// we should be able to after the last persisted lsn
let v = bifrost.read(LOG_ID, Lsn::from(4)).await?.unwrap();
assert_eq!(Lsn::from(4), v.offset);
assert!(v.record.is_data());
assert_eq!(
&Bytes::from_static(b"record4"),
v.record.try_as_data().unwrap().body()
);
assert_that!(v.sequence_number(), eq(Lsn::new(4)));
assert!(v.is_data_record());
assert_that!(v.decode_unchecked::<String>(), eq("record4".to_owned()));

persisted_lsn.store(20, Ordering::Relaxed);

Expand Down Expand Up @@ -708,7 +700,7 @@ mod tests {
.run_in_scope("test", None, async move {
let mut appender = bifrost.create_appender(LOG_ID)?;
for i in 1..=5 {
let lsn = appender.append_raw(format!("record{}", i)).await?;
let lsn = appender.append(format!("record{}", i)).await?;
assert_eq!(Lsn::from(i), lsn);
}

Expand Down
2 changes: 1 addition & 1 deletion crates/bifrost/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ tokio-stream = { workspace = true, features = ["sync"] }
tokio-util = { workspace = true }
tracing = { workspace = true }


[dev-dependencies]
restate-core = { workspace = true, features = ["test-util"] }
restate-metadata-store = { workspace = true }
Expand All @@ -58,6 +57,7 @@ restate-types = { workspace = true, features = ["test-util"] }
criterion = { workspace = true, features = ["async_tokio"] }
googletest = { workspace = true, features = ["anyhow"] }
paste = { workspace = true }
rlimit = { workspace = true }
tempfile = { workspace = true }
test-log = { workspace = true }
tokio = { workspace = true, features = ["test-util"] }
Expand Down
13 changes: 6 additions & 7 deletions crates/bifrost/benches/append_throughput.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use restate_rocksdb::{DbName, RocksDbManager};
use restate_types::config::{
BifrostOptionsBuilder, CommonOptionsBuilder, ConfigurationBuilder, LocalLogletOptionsBuilder,
};
use restate_types::live::Live;
use restate_types::logs::LogId;
use tracing::info;
use tracing_subscriber::EnvFilter;
Expand All @@ -33,7 +34,7 @@ async fn append_records_multi_log(bifrost: Bifrost, log_id_range: Range<u64>, co
let _ = bifrost
.create_appender(LogId::new(log_id))
.expect("log exists")
.append_raw("")
.append("")
.await
.unwrap();
})
Expand All @@ -50,7 +51,7 @@ async fn append_records_concurrent_single_log(bifrost: Bifrost, log_id: LogId, c
bifrost
.create_appender(log_id)
.expect("log exists")
.append_raw("")
.append("")
.await
.unwrap()
})
Expand All @@ -61,10 +62,7 @@ async fn append_records_concurrent_single_log(bifrost: Bifrost, log_id: LogId, c
async fn append_seq(bifrost: Bifrost, log_id: LogId, count: u64) {
let mut appender = bifrost.create_appender(log_id).expect("log exists");
for _ in 1..=count {
let _ = appender
.append_raw("")
.await
.expect("bifrost accept record");
let _ = appender.append("").await.expect("bifrost accept record");
}
}

Expand Down Expand Up @@ -108,7 +106,8 @@ fn write_throughput_local_loglet(c: &mut Criterion) {

let bifrost = tc.block_on("bifrost-init", None, async {
let metadata = metadata();
let bifrost_svc = BifrostService::new(restate_core::task_center(), metadata);
let bifrost_svc = BifrostService::new(restate_core::task_center(), metadata)
.enable_local_loglet(&Live::from_value(config));
let bifrost = bifrost_svc.handle();

// start bifrost service in the background
Expand Down
6 changes: 5 additions & 1 deletion crates/bifrost/benches/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,16 @@ use restate_types::config::Configuration;
use restate_types::live::Constant;
use restate_types::logs::metadata::ProviderKind;
use restate_types::metadata_store::keys::BIFROST_CONFIG_KEY;
use tracing::warn;

pub async fn spawn_environment(
config: Configuration,
num_logs: u64,
provider: ProviderKind,
) -> TaskCenter {
if rlimit::increase_nofile_limit(u64::MAX).is_err() {
warn!("Failed to increase the number of open file descriptors limit.");
}
let tc = TaskCenterBuilder::default()
.options(config.common.clone())
.build()
Expand Down Expand Up @@ -51,7 +55,7 @@ pub async fn spawn_environment(
let logs = restate_types::logs::metadata::bootstrap_logs_metadata(provider, num_logs);

metadata_store_client
.put(BIFROST_CONFIG_KEY.clone(), logs.clone(), Precondition::None)
.put(BIFROST_CONFIG_KEY.clone(), &logs, Precondition::None)
.await
.expect("to store bifrost config in metadata store");
metadata_writer.submit(logs);
Expand Down
Loading
Loading