diff --git a/src/main.rs b/src/main.rs index 858e3a9..5260715 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,12 +8,12 @@ type InferenceBackend = burn::backend::Cuda; type TrainingBackend = Autodiff; const LIMITS: wgpu::Limits = wgpu::Limits::defaults(); +const FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb; use strafesnet_graphics::setup; use strafesnet_roblox_bot_file::v0; -const SIZE_X: usize = 64; -const SIZE_Y: usize = 36; -const INPUT: usize = SIZE_X * SIZE_Y; +const SIZE: glam::UVec2 = glam::uvec2(64, 36); +const INPUT: usize = (SIZE.x * SIZE.y) as usize; const HIDDEN: [usize; 2] = [INPUT >> 3, INPUT >> 7]; // MoveForward // MoveLeft @@ -24,6 +24,9 @@ const HIDDEN: [usize; 2] = [INPUT >> 3, INPUT >> 7]; // mouse_dy const OUTPUT: usize = 7; +// bytes_per_row needs to be a multiple of 256. +const STRIDE_SIZE: u32 = (SIZE.x * size_of::() as u32).next_multiple_of(256); + #[derive(Module, Debug)] struct Net { input: Linear, @@ -61,6 +64,140 @@ impl Net { } } +struct GraphicsState { + device: wgpu::Device, + queue: wgpu::Queue, + graphics: strafesnet_roblox_bot_player::graphics::Graphics, + graphics_texture_view: wgpu::TextureView, + output_staging_buffer: wgpu::Buffer, + texture_data: Vec, +} +impl GraphicsState { + fn new(map: &strafesnet_common::map::CompleteMap) -> Self { + let desc = wgpu::InstanceDescriptor::new_without_display_handle_from_env(); + let instance = wgpu::Instance::new(desc); + let (device, queue) = pollster::block_on(async { + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::HighPerformance, + force_fallback_adapter: false, + compatible_surface: None, + }) + .await + .unwrap(); + setup::step4::request_device(&adapter, LIMITS) + .await + .unwrap() + }); + let mut graphics = strafesnet_roblox_bot_player::graphics::Graphics::new( + &device, &queue, SIZE, FORMAT, LIMITS, + ); + graphics.change_map(&device, &queue, map).unwrap(); + let graphics_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("RGB texture"), + format: FORMAT, + size: wgpu::Extent3d { + width: SIZE.x, + height: SIZE.y, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }); + let graphics_texture_view = graphics_texture.create_view(&wgpu::TextureViewDescriptor { + label: Some("RGB texture view"), + aspect: wgpu::TextureAspect::All, + usage: Some( + wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING, + ), + ..Default::default() + }); + let texture_data = Vec::::with_capacity((STRIDE_SIZE * SIZE.y) as usize); + let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { + label: Some("Output staging buffer"), + size: texture_data.capacity() as u64, + usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, + mapped_at_creation: false, + }); + Self { + device, + queue, + graphics, + graphics_texture_view, + output_staging_buffer, + texture_data, + } + } + fn generate_inputs(&mut self, pos: glam::Vec3, angles: glam::Vec2, inputs: &mut Vec) { + let mut encoder = self + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("wgpu encoder"), + }); + + // render! + self.graphics + .encode_commands(&mut encoder, &self.graphics_texture_view, pos, angles); + + // copy the depth texture into ram + encoder.copy_texture_to_buffer( + wgpu::TexelCopyTextureInfo { + texture: self.graphics.depth_texture(), + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + wgpu::TexelCopyBufferInfo { + buffer: &self.output_staging_buffer, + layout: wgpu::TexelCopyBufferLayout { + offset: 0, + // This needs to be a multiple of 256. + bytes_per_row: Some(STRIDE_SIZE), + rows_per_image: Some(SIZE.y), + }, + }, + wgpu::Extent3d { + width: SIZE.x, + height: SIZE.y, + depth_or_array_layers: 1, + }, + ); + + self.queue.submit([encoder.finish()]); + + // map buffer + let buffer_slice = self.output_staging_buffer.slice(..); + let (sender, receiver) = std::sync::mpsc::channel(); + buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap()); + self.device + .poll(wgpu::PollType::wait_indefinitely()) + .unwrap(); + receiver.recv().unwrap().unwrap(); + + // copy texture inside a scope so the mapped view gets dropped + { + let view = buffer_slice.get_mapped_range(); + self.texture_data.extend_from_slice(&view[..]); + } + self.output_staging_buffer.unmap(); + + // discombolulate stride + for y in 0..SIZE.y { + inputs.extend( + self.texture_data[(STRIDE_SIZE * y) as usize + ..(STRIDE_SIZE * y + SIZE.x * size_of::() as u32) as usize] + .chunks_exact(4) + .map(|b| f32::from_le_bytes(b.try_into().unwrap())), + ) + } + + self.texture_data.clear(); + } +} + fn training() { let gpu_id: usize = std::env::args() .skip(1) @@ -85,63 +222,11 @@ fn training() { let world_offset = bot.world_offset(); let timelines = bot.timelines(); - // setup graphics - let desc = wgpu::InstanceDescriptor::new_without_display_handle_from_env(); - let instance = wgpu::Instance::new(desc); - let (device, queue) = pollster::block_on(async { - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::HighPerformance, - force_fallback_adapter: false, - compatible_surface: None, - }) - .await - .unwrap(); - setup::step4::request_device(&adapter, LIMITS) - .await - .unwrap() - }); - - const FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb; - let size = [SIZE_X as u32, SIZE_Y as u32].into(); - let mut graphics = strafesnet_roblox_bot_player::graphics::Graphics::new( - &device, &queue, size, FORMAT, LIMITS, - ); - graphics.change_map(&device, &queue, &map).unwrap(); - // setup simulation // run progressively longer segments of the map, starting very close to the end of the run and working the starting time backwards until the ai can run the whole map - // set up textures - let graphics_texture = device.create_texture(&wgpu::TextureDescriptor { - label: Some("RGB texture"), - format: FORMAT, - size: wgpu::Extent3d { - width: size.x, - height: size.y, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING, - view_formats: &[], - }); - let graphics_texture_view = graphics_texture.create_view(&wgpu::TextureViewDescriptor { - label: Some("RGB texture view"), - aspect: wgpu::TextureAspect::All, - usage: Some(wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING), - ..Default::default() - }); - // bytes_per_row needs to be a multiple of 256. - let stride_size = (size.x * size_of::() as u32).next_multiple_of(256); - let mut texture_data = Vec::::with_capacity((stride_size * size.y) as usize); - let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { - label: Some("Output staging buffer"), - size: texture_data.capacity() as u64, - usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, - mapped_at_creation: false, - }); + // set up graphics + let mut g = GraphicsState::new(&map); // training data let training_samples = timelines.input_events.len() - 1; @@ -234,64 +319,7 @@ fn training() { let pos = vec3(output_event.event.position) - world_offset; let angles = angles(output_event.event.angles); - let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("wgpu encoder"), - }); - - // render! - graphics.encode_commands(&mut encoder, &graphics_texture_view, pos, angles); - - // copy the depth texture into ram - encoder.copy_texture_to_buffer( - wgpu::TexelCopyTextureInfo { - texture: graphics.depth_texture(), - mip_level: 0, - origin: wgpu::Origin3d::ZERO, - aspect: wgpu::TextureAspect::All, - }, - wgpu::TexelCopyBufferInfo { - buffer: &output_staging_buffer, - layout: wgpu::TexelCopyBufferLayout { - offset: 0, - // This needs to be a multiple of 256. - bytes_per_row: Some(stride_size as u32), - rows_per_image: Some(size.y), - }, - }, - wgpu::Extent3d { - width: size.x, - height: size.y, - depth_or_array_layers: 1, - }, - ); - - queue.submit([encoder.finish()]); - - // map buffer - let buffer_slice = output_staging_buffer.slice(..); - let (sender, receiver) = std::sync::mpsc::channel(); - buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap()); - device.poll(wgpu::PollType::wait_indefinitely()).unwrap(); - receiver.recv().unwrap().unwrap(); - - // copy texture inside a scope so the mapped view gets dropped - { - let view = buffer_slice.get_mapped_range(); - texture_data.extend_from_slice(&view[..]); - } - output_staging_buffer.unmap(); - - // discombolulate stride - for y in 0..size.y { - inputs.extend( - texture_data[(stride_size * y) as usize - ..(stride_size * y + size.x * size_of::() as u32) as usize] - .chunks_exact(4) - .map(|b| f32::from_le_bytes(b.try_into().unwrap())), - ) - } - - texture_data.clear(); + g.generate_inputs(pos, angles, &mut inputs); } // normalize inputs