From ceec826c66632318a66b48b7ff800216bc74da1d Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 15:39:17 +0900 Subject: [PATCH 1/6] wip --- Cargo.toml | 2 +- src/client.rs | 23 ++++++++++++++++------- src/db.rs | 3 +++ src/result_data.rs | 2 ++ 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 29e8cd70..07c0d0a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ tokio-rustls = { version = "0.26.0", optional = true } rustls-pki-types = { version = "1.7.0", optional = true } base64 = "0.22.1" -rand = "0.8" +rand = {version = "0.8", features = ["small_rng"]} hickory-resolver = "0.24.1" rand_regex = "0.17.0" regex-syntax = "0.8.4" diff --git a/src/client.rs b/src/client.rs index 5a9331e1..e79097c0 100644 --- a/src/client.rs +++ b/src/client.rs @@ -3,6 +3,7 @@ use hyper::http; use hyper_util::rt::{TokioExecutor, TokioIo}; use rand::prelude::*; use std::{ + borrow::Cow, sync::{ atomic::{AtomicBool, Ordering::Relaxed}, Arc, @@ -30,6 +31,7 @@ pub struct ConnectionTime { #[derive(Debug, Clone)] /// a result for a request pub struct RequestResult { + pub rng: SmallRng, // When the query should started pub start_latency_correction: Option, /// When the query started @@ -177,28 +179,28 @@ pub struct Client { } struct ClientStateHttp1 { - rng: StdRng, + rng: SmallRng, send_request: Option, } impl Default for ClientStateHttp1 { fn default() -> Self { Self { - rng: StdRng::from_entropy(), + rng: SmallRng::from_entropy(), send_request: None, } } } struct ClientStateHttp2 { - rng: StdRng, + rng: SmallRng, send_request: SendRequestHttp2, } impl Clone for ClientStateHttp2 { fn clone(&self) -> Self { Self { - rng: StdRng::from_entropy(), + rng: SmallRng::from_entropy(), send_request: self.send_request.clone(), } } @@ -315,6 +317,11 @@ impl Client { Ok(()) } + pub fn generate_url(&self, rng: &mut SmallRng) -> Result<(Cow, SmallRng), ClientError> { + let snapshot = rng.clone(); + Ok((self.url_generator.generate(rng)?, snapshot)) + } + async fn client( &self, url: &Url, @@ -467,7 +474,7 @@ impl Client { client_state: &mut ClientStateHttp1, ) -> Result { let do_req = async { - let url = self.url_generator.generate(&mut client_state.rng)?; + let (url, rng) = self.generate_url(&mut client_state.rng)?; let mut start = std::time::Instant::now(); let mut connection_time: Option = None; @@ -523,6 +530,7 @@ impl Client { let end = std::time::Instant::now(); let result = RequestResult { + rng, start_latency_correction: None, start, end, @@ -573,7 +581,7 @@ impl Client { client_state: &mut ClientStateHttp2, ) -> Result { let do_req = async { - let url = self.url_generator.generate(&mut client_state.rng)?; + let (url, rng) = self.generate_url(&mut client_state.rng)?; let start = std::time::Instant::now(); let connection_time: Option = None; @@ -591,6 +599,7 @@ impl Client { let end = std::time::Instant::now(); let result = RequestResult { + rng, start_latency_correction: None, start, end, @@ -760,7 +769,7 @@ fn is_hyper_error(res: &Result) -> bool { } async fn setup_http2(client: &Client) -> Result<(ConnectionTime, ClientStateHttp2), ClientError> { - let mut rng = StdRng::from_entropy(); + let mut rng = SmallRng::from_entropy(); let url = client.url_generator.generate(&mut rng)?; let (connection_time, send_request) = client.connect_http2(&url, &mut rng).await?; diff --git a/src/db.rs b/src/db.rs index 94cfca50..6e1175ec 100644 --- a/src/db.rs +++ b/src/db.rs @@ -48,12 +48,15 @@ pub fn store( #[cfg(test)] mod test_db { + use rand::{rngs::SmallRng, SeedableRng}; + use super::*; #[test] fn test_store() { let start = std::time::Instant::now(); let test_val = RequestResult { + rng: SmallRng::seed_from_u64(0), status: hyper::StatusCode::OK, len_bytes: 100, start_latency_correction: None, diff --git a/src/result_data.rs b/src/result_data.rs index d205e86a..470618e2 100644 --- a/src/result_data.rs +++ b/src/result_data.rs @@ -179,6 +179,7 @@ impl ResultData { #[cfg(test)] mod tests { use float_cmp::assert_approx_eq; + use rand::{rngs::SmallRng, SeedableRng}; use super::*; use crate::client::{ClientError, ConnectionTime, RequestResult}; @@ -193,6 +194,7 @@ mod tests { ) -> Result { let now = Instant::now(); Ok(RequestResult { + rng: SmallRng::seed_from_u64(0), start_latency_correction: None, start: now, connection_time: Some(ConnectionTime { From fc6f6bca9e767c762ccfc0541851f34185fe1649 Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 15:52:37 +0900 Subject: [PATCH 2/6] wip --- src/client.rs | 25 +++++++------------------ src/db.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- src/main.rs | 28 ++++++++++++++-------------- 3 files changed, 61 insertions(+), 35 deletions(-) diff --git a/src/client.rs b/src/client.rs index e79097c0..81092bf5 100644 --- a/src/client.rs +++ b/src/client.rs @@ -813,7 +813,7 @@ fn set_start_latency_correction( /// Run n tasks by m workers pub async fn work_debug( - client: Client, + client: Arc, _report_tx: flume::Sender>, ) -> Result<(), ClientError> { let mut rng = StdRng::from_entropy(); @@ -845,7 +845,7 @@ pub async fn work_debug( /// Run n tasks by m workers pub async fn work( - client: Client, + client: Arc, report_tx: flume::Sender>, n_tasks: usize, n_connections: usize, @@ -854,8 +854,6 @@ pub async fn work( use std::sync::atomic::{AtomicUsize, Ordering}; let counter = Arc::new(AtomicUsize::new(0)); - let client = Arc::new(client); - if client.is_http2() { let futures = (0..n_connections) .map(|_| { @@ -956,7 +954,7 @@ pub async fn work( /// n tasks by m workers limit to qps works in a second pub async fn work_with_qps( - client: Client, + client: Arc, report_tx: flume::Sender>, query_limit: QueryLimit, n_tasks: usize, @@ -1001,8 +999,6 @@ pub async fn work_with_qps( Ok::<(), flume::SendError<_>>(()) }; - let client = Arc::new(client); - if client.is_http2() { let futures = (0..n_connections) .map(|_| { @@ -1103,7 +1099,7 @@ pub async fn work_with_qps( /// n tasks by m workers limit to qps works in a second with latency correction pub async fn work_with_qps_latency_correction( - client: Client, + client: Arc, report_tx: flume::Sender>, query_limit: QueryLimit, n_tasks: usize, @@ -1151,8 +1147,6 @@ pub async fn work_with_qps_latency_correction( Ok::<(), flume::SendError<_>>(()) }; - let client = Arc::new(client); - if client.is_http2() { let futures = (0..n_connections) .map(|_| { @@ -1254,14 +1248,13 @@ pub async fn work_with_qps_latency_correction( /// Run until dead_line by n workers pub async fn work_until( - client: Client, + client: Arc, report_tx: flume::Sender>, dead_line: std::time::Instant, n_connections: usize, n_http2_parallel: usize, wait_ongoing_requests_after_deadline: bool, ) { - let client = Arc::new(client); if client.is_http2() { // Using semaphore to control the deadline // Maybe there is a better concurrent primitive to do this @@ -1396,7 +1389,7 @@ pub async fn work_until( /// Run until dead_line by n workers limit to qps works in a second #[allow(clippy::too_many_arguments)] pub async fn work_until_with_qps( - client: Client, + client: Arc, report_tx: flume::Sender>, query_limit: QueryLimit, start: std::time::Instant, @@ -1443,8 +1436,6 @@ pub async fn work_until_with_qps( } }; - let client = Arc::new(client); - if client.is_http2() { let s = Arc::new(tokio::sync::Semaphore::new(0)); @@ -1581,7 +1572,7 @@ pub async fn work_until_with_qps( /// Run until dead_line by n workers limit to qps works in a second with latency correction #[allow(clippy::too_many_arguments)] pub async fn work_until_with_qps_latency_correction( - client: Client, + client: Arc, report_tx: flume::Sender>, query_limit: QueryLimit, start: std::time::Instant, @@ -1627,8 +1618,6 @@ pub async fn work_until_with_qps_latency_correction( } }; - let client = Arc::new(client); - if client.is_http2() { let s = Arc::new(tokio::sync::Semaphore::new(0)); diff --git a/src/db.rs b/src/db.rs index 6e1175ec..4a799e14 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,10 +1,11 @@ use rusqlite::Connection; -use crate::client::RequestResult; +use crate::client::{Client, RequestResult}; fn create_db(conn: &Connection) -> Result { conn.execute( "CREATE TABLE oha ( + url TEXT NOT NULL, start REAL NOT NULL, start_latency_correction REAL, end REAL NOT NULL, @@ -17,6 +18,7 @@ fn create_db(conn: &Connection) -> Result { } pub fn store( + client: &Client, db_url: &str, start: std::time::Instant, request_records: &[RequestResult], @@ -28,9 +30,11 @@ pub fn store( let mut affected_rows = 0; for request in request_records { + let url = client.generate_url(&mut request.rng.clone()).unwrap().0; affected_rows += t.execute( - "INSERT INTO oha (start, start_latency_correction, end, duration, status, len_bytes) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", + "INSERT INTO oha (url, start, start_latency_correction, end, duration, status, len_bytes) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", ( + url.to_string(), (request.start - start).as_secs_f64(), request.start_latency_correction.map(|d| (d - start).as_secs_f64()), (request.end - start).as_secs_f64(), @@ -48,8 +52,11 @@ pub fn store( #[cfg(test)] mod test_db { + use hyper::{HeaderMap, Method, Version}; use rand::{rngs::SmallRng, SeedableRng}; + use crate::{client::Dns, url_generator::UrlGenerator}; + use super::*; #[test] @@ -65,7 +72,37 @@ mod test_db { end: std::time::Instant::now(), }; let test_vec = vec![test_val.clone(), test_val.clone()]; - let result = store(":memory:", start, &test_vec); + let client = Client { + http_version: Version::HTTP_11, + url_generator: UrlGenerator::new_static("http://example.com".parse().unwrap()), + method: Method::GET, + headers: HeaderMap::new(), + body: None, + dns: Dns { + resolver: hickory_resolver::AsyncResolver::tokio_from_system_conf().unwrap(), + connect_to: Vec::new(), + }, + timeout: None, + redirect_limit: 0, + disable_keepalive: false, + insecure: false, + #[cfg(unix)] + unix_socket: None, + #[cfg(feature = "vsock")] + vsock_addr: None, + #[cfg(feature = "rustls")] + // Cache rustls_native_certs::load_native_certs() because it's expensive. + root_cert_store: { + let mut root_cert_store = rustls::RootCertStore::empty(); + for cert in + rustls_native_certs::load_native_certs().expect("could not load platform certs") + { + root_cert_store.add(cert).unwrap(); + } + std::sync::Arc::new(root_cert_store) + }, + }; + let result = store(&client, ":memory:", start, &test_vec); assert_eq!(result.unwrap(), 2); } } diff --git a/src/main.rs b/src/main.rs index 570e0674..962f6904 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,7 +12,7 @@ use rand::prelude::*; use rand_regex::Regex; use ratatui::crossterm; use result_data::ResultData; -use std::{env, io::Read, str::FromStr}; +use std::{env, io::Read, str::FromStr, sync::Arc}; use url::Url; use url_generator::UrlGenerator; @@ -444,7 +444,7 @@ async fn main() -> anyhow::Result<()> { resolver_opts.ip_strategy = ip_strategy; let resolver = hickory_resolver::AsyncResolver::tokio(config, resolver_opts); - let client = client::Client { + let client = Arc::new(client::Client { http_version, url_generator, method: opts.method, @@ -473,7 +473,7 @@ async fn main() -> anyhow::Result<()> { } std::sync::Arc::new(root_cert_store) }, - }; + }); if !opts.no_pre_lookup { client.pre_lookup().await?; @@ -540,7 +540,7 @@ async fn main() -> anyhow::Result<()> { Some(0) | None => match opts.burst_duration { None => { client::work_until( - client, + client.clone(), result_tx, start + duration.into(), opts.n_connections, @@ -552,7 +552,7 @@ async fn main() -> anyhow::Result<()> { Some(burst_duration) => { if opts.latency_correction { client::work_until_with_qps_latency_correction( - client, + client.clone(), result_tx, client::QueryLimit::Burst( burst_duration.into(), @@ -567,7 +567,7 @@ async fn main() -> anyhow::Result<()> { .await } else { client::work_until_with_qps( - client, + client.clone(), result_tx, client::QueryLimit::Burst( burst_duration.into(), @@ -586,7 +586,7 @@ async fn main() -> anyhow::Result<()> { Some(qps) => { if opts.latency_correction { client::work_until_with_qps_latency_correction( - client, + client.clone(), result_tx, client::QueryLimit::Qps(qps), start, @@ -598,7 +598,7 @@ async fn main() -> anyhow::Result<()> { .await } else { client::work_until_with_qps( - client, + client.clone(), result_tx, client::QueryLimit::Qps(qps), start, @@ -616,7 +616,7 @@ async fn main() -> anyhow::Result<()> { Some(0) | None => match opts.burst_duration { None => { client::work( - client, + client.clone(), result_tx, opts.n_requests, opts.n_connections, @@ -627,7 +627,7 @@ async fn main() -> anyhow::Result<()> { Some(burst_duration) => { if opts.latency_correction { client::work_with_qps_latency_correction( - client, + client.clone(), result_tx, client::QueryLimit::Burst( burst_duration.into(), @@ -640,7 +640,7 @@ async fn main() -> anyhow::Result<()> { .await } else { client::work_with_qps( - client, + client.clone(), result_tx, client::QueryLimit::Burst( burst_duration.into(), @@ -657,7 +657,7 @@ async fn main() -> anyhow::Result<()> { Some(qps) => { if opts.latency_correction { client::work_with_qps_latency_correction( - client, + client.clone(), result_tx, client::QueryLimit::Qps(qps), opts.n_requests, @@ -667,7 +667,7 @@ async fn main() -> anyhow::Result<()> { .await } else { client::work_with_qps( - client, + client.clone(), result_tx, client::QueryLimit::Qps(qps), opts.n_requests, @@ -696,7 +696,7 @@ async fn main() -> anyhow::Result<()> { if let Some(db_url) = opts.db_url { eprintln!("Storing results to {db_url}"); - let _ = db::store(&db_url, start, res.success()); + let _ = db::store(&client, &db_url, start, res.success()); } Ok(()) From 6e9783ba00fa417ef51f4b3f529f1a7884f4b7f5 Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 15:55:00 +0900 Subject: [PATCH 3/6] test --- src/url_generator.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/url_generator.rs b/src/url_generator.rs index 1b4b613a..78f7911f 100644 --- a/src/url_generator.rs +++ b/src/url_generator.rs @@ -70,4 +70,20 @@ mod tests { .captures(url.path()) .is_some()); } + + #[test] + fn test_url_generator_dynamic_consistency() { + let url_generator = UrlGenerator::new_dynamic( + RandRegex::compile(r"http://127\.0\.0\.1/[a-z][a-z][0-9]", 4).unwrap(), + ); + + for _ in 0..100 { + let rng = SmallRng::from_entropy(); + + assert_eq!( + url_generator.generate(&mut rng.clone()).unwrap(), + url_generator.generate(&mut rng.clone()).unwrap() + ); + } + } } From ed11ace4d22491e65c21b2cbd84efd5320b0aee9 Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 16:27:41 +0900 Subject: [PATCH 4/6] pcg64si --- Cargo.lock | 1 + Cargo.toml | 3 ++- src/client.rs | 15 ++++++++------- src/db.rs | 4 ++-- src/main.rs | 1 + src/pcg64si.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++ src/result_data.rs | 4 ++-- src/url_generator.rs | 4 +++- 8 files changed, 63 insertions(+), 13 deletions(-) create mode 100644 src/pcg64si.rs diff --git a/Cargo.lock b/Cargo.lock index b1477d18..8733c67c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2203,6 +2203,7 @@ dependencies = [ "pin-project-lite", "predicates", "rand", + "rand_core", "rand_regex", "ratatui", "regex", diff --git a/Cargo.toml b/Cargo.toml index 07c0d0a6..5c6f8361 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,8 @@ tokio-rustls = { version = "0.26.0", optional = true } rustls-pki-types = { version = "1.7.0", optional = true } base64 = "0.22.1" -rand = {version = "0.8", features = ["small_rng"]} +rand = "0.8" +rand_core = "0.6.4" hickory-resolver = "0.24.1" rand_regex = "0.17.0" regex-syntax = "0.8.4" diff --git a/src/client.rs b/src/client.rs index 81092bf5..087abeb9 100644 --- a/src/client.rs +++ b/src/client.rs @@ -15,6 +15,7 @@ use tokio::net::TcpStream; use url::{ParseError, Url}; use crate::{ + pcg64si::Pcg64Si, url_generator::{UrlGenerator, UrlGeneratorError}, ConnectToEntry, }; @@ -31,7 +32,7 @@ pub struct ConnectionTime { #[derive(Debug, Clone)] /// a result for a request pub struct RequestResult { - pub rng: SmallRng, + pub rng: Pcg64Si, // When the query should started pub start_latency_correction: Option, /// When the query started @@ -179,28 +180,28 @@ pub struct Client { } struct ClientStateHttp1 { - rng: SmallRng, + rng: Pcg64Si, send_request: Option, } impl Default for ClientStateHttp1 { fn default() -> Self { Self { - rng: SmallRng::from_entropy(), + rng: SeedableRng::from_entropy(), send_request: None, } } } struct ClientStateHttp2 { - rng: SmallRng, + rng: Pcg64Si, send_request: SendRequestHttp2, } impl Clone for ClientStateHttp2 { fn clone(&self) -> Self { Self { - rng: SmallRng::from_entropy(), + rng: SeedableRng::from_entropy(), send_request: self.send_request.clone(), } } @@ -317,7 +318,7 @@ impl Client { Ok(()) } - pub fn generate_url(&self, rng: &mut SmallRng) -> Result<(Cow, SmallRng), ClientError> { + pub fn generate_url(&self, rng: &mut Pcg64Si) -> Result<(Cow, Pcg64Si), ClientError> { let snapshot = rng.clone(); Ok((self.url_generator.generate(rng)?, snapshot)) } @@ -769,7 +770,7 @@ fn is_hyper_error(res: &Result) -> bool { } async fn setup_http2(client: &Client) -> Result<(ConnectionTime, ClientStateHttp2), ClientError> { - let mut rng = SmallRng::from_entropy(); + let mut rng = SeedableRng::from_entropy(); let url = client.url_generator.generate(&mut rng)?; let (connection_time, send_request) = client.connect_http2(&url, &mut rng).await?; diff --git a/src/db.rs b/src/db.rs index 4a799e14..ed212a92 100644 --- a/src/db.rs +++ b/src/db.rs @@ -53,7 +53,7 @@ pub fn store( #[cfg(test)] mod test_db { use hyper::{HeaderMap, Method, Version}; - use rand::{rngs::SmallRng, SeedableRng}; + use rand::SeedableRng; use crate::{client::Dns, url_generator::UrlGenerator}; @@ -63,7 +63,7 @@ mod test_db { fn test_store() { let start = std::time::Instant::now(); let test_val = RequestResult { - rng: SmallRng::seed_from_u64(0), + rng: SeedableRng::seed_from_u64(0), status: hyper::StatusCode::OK, len_bytes: 100, start_latency_correction: None, diff --git a/src/main.rs b/src/main.rs index 962f6904..4cf88159 100644 --- a/src/main.rs +++ b/src/main.rs @@ -20,6 +20,7 @@ mod client; mod db; mod histogram; mod monitor; +mod pcg64si; mod printer; mod result_data; mod timescale; diff --git a/src/pcg64si.rs b/src/pcg64si.rs new file mode 100644 index 00000000..02f64cbe --- /dev/null +++ b/src/pcg64si.rs @@ -0,0 +1,44 @@ +// https://github.com/imneme/pcg-c +use rand::{Error, RngCore, SeedableRng}; +use rand_core::impls; + +#[derive(Debug, Copy, Clone)] +pub struct Pcg64Si { + state: u64, +} + +impl RngCore for Pcg64Si { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + let old_state = self.state; + self.state = self + .state + .wrapping_mul(6364136223846793005) + .wrapping_add(1442695040888963407); + + let word = + ((old_state >> ((old_state >> 59) + 5)) ^ old_state).wrapping_mul(12605985483714917081); + (word >> 43) ^ word + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + impls::fill_bytes_via_next(self, dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + Ok(self.fill_bytes(dest)) + } +} + +impl SeedableRng for Pcg64Si { + type Seed = [u8; 8]; + + fn from_seed(seed: Self::Seed) -> Pcg64Si { + Pcg64Si { + state: u64::from_le_bytes(seed), + } + } +} diff --git a/src/result_data.rs b/src/result_data.rs index 470618e2..bd8c8a9d 100644 --- a/src/result_data.rs +++ b/src/result_data.rs @@ -179,7 +179,7 @@ impl ResultData { #[cfg(test)] mod tests { use float_cmp::assert_approx_eq; - use rand::{rngs::SmallRng, SeedableRng}; + use rand::SeedableRng; use super::*; use crate::client::{ClientError, ConnectionTime, RequestResult}; @@ -194,7 +194,7 @@ mod tests { ) -> Result { let now = Instant::now(); Ok(RequestResult { - rng: SmallRng::seed_from_u64(0), + rng: SeedableRng::seed_from_u64(0), start_latency_correction: None, start: now, connection_time: Some(ConnectionTime { diff --git a/src/url_generator.rs b/src/url_generator.rs index 78f7911f..3e605fa2 100644 --- a/src/url_generator.rs +++ b/src/url_generator.rs @@ -43,6 +43,8 @@ impl UrlGenerator { #[cfg(test)] mod tests { + use crate::pcg64si::Pcg64Si; + use super::*; use rand_regex::Regex as RandRegex; use regex::Regex; @@ -78,7 +80,7 @@ mod tests { ); for _ in 0..100 { - let rng = SmallRng::from_entropy(); + let rng: Pcg64Si = SeedableRng::from_entropy(); assert_eq!( url_generator.generate(&mut rng.clone()).unwrap(), From 3eda6a247e643f037de63acd5ad3a3ac605a82d2 Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 16:32:21 +0900 Subject: [PATCH 5/6] show sqlite error --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 4cf88159..8ae00ab7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -697,7 +697,7 @@ async fn main() -> anyhow::Result<()> { if let Some(db_url) = opts.db_url { eprintln!("Storing results to {db_url}"); - let _ = db::store(&client, &db_url, start, res.success()); + db::store(&client, &db_url, start, res.success())?; } Ok(()) From 6edbb0acc0251fdca984f55d6242b19ef45cb552 Mon Sep 17 00:00:00 2001 From: hatoo Date: Wed, 27 Nov 2024 16:35:18 +0900 Subject: [PATCH 6/6] clippy --- src/client.rs | 2 +- src/pcg64si.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index 087abeb9..0f7e3460 100644 --- a/src/client.rs +++ b/src/client.rs @@ -319,7 +319,7 @@ impl Client { } pub fn generate_url(&self, rng: &mut Pcg64Si) -> Result<(Cow, Pcg64Si), ClientError> { - let snapshot = rng.clone(); + let snapshot = *rng; Ok((self.url_generator.generate(rng)?, snapshot)) } diff --git a/src/pcg64si.rs b/src/pcg64si.rs index 02f64cbe..558d0dad 100644 --- a/src/pcg64si.rs +++ b/src/pcg64si.rs @@ -29,7 +29,8 @@ impl RngCore for Pcg64Si { } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - Ok(self.fill_bytes(dest)) + self.fill_bytes(dest); + Ok(()) } }