diff --git a/README.md b/README.md index 63095a9..b2fbe6b 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ By default, the app's settings are persisted after closing. The purpose of this crate is to study tradeoffs regarding model inference, native GUIs and video decoding approaches, in Rust :crab:. -There are a couple of Todos will make `InFur` more intersting beyond exploring +There are a couple of Todos will make `InFur` more interesting beyond exploring production-readiness as now: - [ ] GATify `type Output` in `trait Processor` diff --git a/ff-video/src/decoder.rs b/ff-video/src/decoder.rs index 5667e11..844550f 100644 --- a/ff-video/src/decoder.rs +++ b/ff-video/src/decoder.rs @@ -174,7 +174,7 @@ enum StreamInfoTerm { Final(String), } -/// Deliver infos about an ffmpeg video process trhough its stderr file +/// Deliver infos about an ffmpeg video process through its stderr file /// /// The receiver can be read until satisfying info was obtained and dropped anytime. /// By default, frame updates and other infos are logged as tracing event. diff --git a/ff-video/src/parse.rs b/ff-video/src/parse.rs index c0cd652..fc737ae 100644 --- a/ff-video/src/parse.rs +++ b/ff-video/src/parse.rs @@ -251,7 +251,7 @@ impl InfoParser { /// Blanket implementation for lines of ffmpeg's default stderr bytes. pub(crate) trait FFMpegLineIter: Iterator { - /// Emit lines on \n, \r (CR) or both but never emtpy lines. + /// Emit lines on \n, \r (CR) or both but never empty lines. fn ffmpeg_lines(self) -> FFMpegLines where Self: Sized, diff --git a/infur-test-gen/build.rs b/infur-test-gen/build.rs index 39ecad4..e0ad0d1 100644 --- a/infur-test-gen/build.rs +++ b/infur-test-gen/build.rs @@ -27,7 +27,7 @@ fn run_ffmpeg_synth( .expect("synthesizing video couldn't start, do you have ffmpeg in PATH?") .wait() .expect("synthesizing video didn't finish"); - assert!(status.success(), "synthesizing videos didn't finish succesfully"); + assert!(status.success(), "synthesizing videos didn't finish successfully"); } fn download(source_url: &str, target_file: impl AsRef) { @@ -85,7 +85,7 @@ pub fn main() { } // models - // segementation model, see: https://github.com/onnx/models/tree/main/vision/object_detection_segmentation/fcn + // segmentation model, see: https://github.com/onnx/models/tree/main/vision/object_detection_segmentation/fcn let fcn_resnet50_12_int8 = gen_root.join("models").join("fcn-resnet50-12-int8.onnx"); download("https://github.com/onnx/models/raw/main/vision/object_detection_segmentation/fcn/model/fcn-resnet50-12-int8.onnx", &fcn_resnet50_12_int8); diff --git a/infur/src/decode_predict.rs b/infur/src/decode_predict.rs index 400b821..8a90441 100644 --- a/infur/src/decode_predict.rs +++ b/infur/src/decode_predict.rs @@ -6,7 +6,7 @@ use onnxruntime::ndarray::Array3; /// /// adapted from: /// and: -const COLORS_PALATTE: [(u8, u8, u8); 20] = [ +const COLORS_PALETTE: [(u8, u8, u8); 20] = [ (75, 180, 60), (75, 25, 230), (25, 225, 255), @@ -31,7 +31,7 @@ const COLORS_PALATTE: [(u8, u8, u8); 20] = [ fn color_code(klass: usize, alpha: f32) -> Color32 { // todo: pre-transform COLORS into linear space - let (r, g, b) = COLORS_PALATTE[klass % COLORS_PALATTE.len()]; + let (r, g, b) = COLORS_PALETTE[klass % COLORS_PALETTE.len()]; Color32::from_rgba_unmultiplied(r, g, b, (alpha * 255.0f32) as u8) } @@ -92,7 +92,7 @@ mod test { #[test] fn color_2() { - let c = COLORS_PALATTE[2]; + let c = COLORS_PALETTE[2]; assert_eq!(color_code(2, 0.5), Color32::from_rgba_unmultiplied(c.0, c.1, c.2, 127)); } diff --git a/infur/src/predict_onnx.rs b/infur/src/predict_onnx.rs index 563dcd6..a29281f 100644 --- a/infur/src/predict_onnx.rs +++ b/infur/src/predict_onnx.rs @@ -70,7 +70,7 @@ struct ImageSession<'s> { /// ONNX session with pre-processing u8 images. impl<'s> ImageSession<'s> { - /// Constract an `ImageSession` by inferring some required image input meta data. + /// Construct an `ImageSession` by inferring some required image input meta data. /// /// The basic assumption is that images are passed as batches at position 0. /// @@ -375,7 +375,7 @@ mod test { let mut tensors = vec![]; m.advance(&img, &mut tensors).unwrap(); - assert_eq!(tensors.len(), 2, "this sementation model should return two tensors"); + assert_eq!(tensors.len(), 2, "this segmentation model should return two tensors"); assert_eq!(tensors[0].shape(), [21, 240, 320], "out should be 21 classes upscaled"); assert_eq!(tensors[1].shape(), [21, 240, 320], "aux should be 21 classes upscaled"); }