ray-tracing2/ray-tracing-tev/src/main.rs

156 lines
4 KiB
Rust

use clap::Parser;
use rand::{rngs::SmallRng, SeedableRng};
use ray_tracing_core::{
camera::{BasicCamera, Camera},
prelude::*,
renderer::ClassicalRenderer,
scene::Scene,
};
use ray_tracing_renderer::{
depth_renderer::DepthRenderer, next_event_estimation::NextEventEstimation,
path_tracer::PathTracer, path_tracer_importance::PathTracerImportance,
};
use ray_tracing_scene::examples::example_scenes;
use rayon::prelude::*;
use std::net::TcpStream;
use tev_client::{PacketCreateImage, PacketUpdateImage, TevClient, TevError};
#[derive(Parser)]
struct Args {
#[arg(long, default_value = "127.0.0.1:14158")]
tev: String,
scenes: Vec<String>,
#[arg(long, default_value_t = 400)]
width: u32,
#[arg(long, default_value_t = 400)]
height: u32,
#[arg(long, default_value_t = 1024)]
samples_per_pixel: usize,
}
fn render_image<
R: ClassicalRenderer<SmallRng, S, C> + Sync,
S: Scene<SmallRng> + Sync,
C: Camera<SmallRng> + Sync,
>(
name: impl AsRef<str>,
renderer: &R,
scene: &S,
camera: &C,
samples_per_pixel: usize,
tev: &mut TevClient,
) -> Result<(), TevError> {
let mut data = vec![0.0; (renderer.width() * renderer.height() * 3) as usize];
data.par_chunks_mut(3).enumerate().for_each(|(i, c)| {
let x = (i % renderer.width() as usize) as u32;
let y = (i / renderer.width() as usize) as u32;
let mut rng = SmallRng::seed_from_u64((x + y * renderer.width()) as u64);
for _ in 0..samples_per_pixel {
let r =
renderer.render_pixel(scene, camera, x, y, &mut rng) / (samples_per_pixel as Float);
c[0] += r.r();
c[1] += r.g();
c[2] += r.b();
}
});
let channel_names = &["r", "g", "b"];
tev.send(PacketCreateImage {
image_name: name.as_ref(),
grab_focus: false,
width: renderer.width(),
height: renderer.height(),
channel_names,
})?;
tev.send(PacketUpdateImage {
image_name: name.as_ref(),
grab_focus: false,
channel_names,
channel_offsets: &[0, 1, 2],
channel_strides: &[3, 3, 3],
x: 0,
y: 0,
width: renderer.width(),
height: renderer.height(),
data: &data,
})?;
Ok(())
}
fn main() {
let args = Args::parse();
let mut client = TevClient::wrap(TcpStream::connect(args.tev).unwrap());
let map = example_scenes();
let scenes: Vec<&str> = if args.scenes.is_empty() {
map.keys().copied().collect()
} else {
args.scenes.iter().map(|s| s.as_str()).collect()
};
for scene in scenes {
let f = map.get(scene).unwrap();
let s = f.get_scene();
let c = BasicCamera::from_look_at(
args.width,
args.height,
f.get_camera_pos(),
f.get_camera_look_at(),
f.get_camera_up(),
f.get_horizontal_fov(),
);
let r = DepthRenderer::new(args.width, args.height);
render_image(
format!("{scene} - depth renderer"),
&r,
&s,
&c,
args.samples_per_pixel,
&mut client,
)
.unwrap();
let r = PathTracer::new(args.width, args.height);
render_image(
format!("{scene} - path tracer"),
&r,
&s,
&c,
args.samples_per_pixel,
&mut client,
)
.unwrap();
let r = PathTracerImportance::new(args.width, args.height);
render_image(
format!("{scene} - path tracer importance"),
&r,
&s,
&c,
args.samples_per_pixel,
&mut client,
)
.unwrap();
let r = NextEventEstimation::new(args.width, args.height);
render_image(
format!("{scene} - next event estimation"),
&r,
&s,
&c,
args.samples_per_pixel,
&mut client,
)
.unwrap();
}
}