1 Commits

Author SHA1 Message Date
8d93fc528e print input 2026-03-27 10:30:51 -07:00
3 changed files with 28 additions and 31 deletions

1
Cargo.lock generated
View File

@@ -5451,7 +5451,6 @@ name = "strafe-ai"
version = "0.1.0"
dependencies = [
"burn",
"glam",
"pollster",
"strafesnet_common",
"strafesnet_graphics",

View File

@@ -14,4 +14,3 @@ strafesnet_roblox_bot_file = { version = "0.9.4", registry = "strafesnet" }
strafesnet_roblox_bot_player = { version = "=0.6.2-depth2", registry = "strafesnet" }
strafesnet_snf = { version = "0.4.0", registry = "strafesnet" }
pollster = "0.4.0"
glam = "0.32.1"

View File

@@ -1,7 +1,7 @@
use burn::backend::Autodiff;
use burn::nn::loss::{MseLoss, Reduction};
use burn::nn::{Linear, LinearConfig, Relu};
use burn::optim::{AdamConfig, GradientsParams, Optimizer};
use burn::optim::{GradientsParams, Optimizer, AdamConfig};
use burn::prelude::*;
type InferenceBackend = burn::backend::Cuda<f32>;
@@ -14,7 +14,10 @@ use strafesnet_roblox_bot_file::v0;
const SIZE_X: usize = 64;
const SIZE_Y: usize = 36;
const INPUT: usize = SIZE_X * SIZE_Y;
const HIDDEN: [usize; 2] = [INPUT >> 3, INPUT >> 7];
const HIDDEN: [usize; 2] = [
INPUT >> 3,
INPUT >> 7,
];
// MoveForward
// MoveLeft
// MoveBack
@@ -146,7 +149,7 @@ fn training() {
// training data
let training_samples = timelines.input_events.len() - 1;
let input_size = INPUT * size_of::<f32>();
let input_size = (size.x * size.y) as usize * size_of::<f32>();
let mut inputs = Vec::with_capacity(input_size * training_samples);
let mut targets = Vec::with_capacity(OUTPUT * training_samples);
@@ -224,22 +227,27 @@ fn training() {
.unwrap(),
};
fn vec3(v: v0::Vector3) -> glam::Vec3 {
glam::vec3(v.x, v.y, v.z)
fn p(v: v0::Vector3) -> [f32; 3] {
[v.x, v.y, v.z]
}
fn angles(a: v0::Vector3) -> glam::Vec2 {
glam::vec2(a.y, a.x)
fn a(a: v0::Vector3) -> [f32; 2] {
[a.y, a.x]
}
fn sub<T: core::ops::Sub>(lhs: T, rhs: T) -> T::Output {
lhs - rhs
}
let pos = vec3(output_event.event.position) - world_offset;
let angles = angles(output_event.event.angles);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("wgpu encoder"),
});
// render!
graphics.encode_commands(&mut encoder, &graphics_texture_view, pos, angles);
graphics.encode_commands(
&mut encoder,
&graphics_texture_view,
sub(p(output_event.event.position).into(), world_offset),
a(output_event.event.angles).into(),
);
// copy the depth texture into ram
encoder.copy_texture_to_buffer(
@@ -281,6 +289,7 @@ fn training() {
}
output_staging_buffer.unmap();
let inputs_start = inputs.len();
// discombolulate stride
for y in 0..size.y {
inputs.extend(
@@ -290,25 +299,12 @@ fn training() {
.map(|b| f32::from_le_bytes(b.try_into().unwrap())),
)
}
let inputs_end = inputs.len();
println!("inputs = {:?}", &inputs[inputs_start..inputs_end]);
texture_data.clear();
}
// normalize inputs
let global_min = *inputs
.iter()
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
let global_max = *inputs
.iter()
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
let global_range = global_max - global_min;
println!("Normalizing to range {global_min} - {global_max}");
inputs.iter_mut().for_each(|value| {
*value = 1.0 - (*value - global_min) / global_range;
});
let device = burn::backend::cuda::CudaDevice::new(gpu_id);
let mut model: Net<TrainingBackend> = Net::init(&device);
@@ -317,7 +313,10 @@ fn training() {
let mut optim = AdamConfig::new().init();
let inputs = Tensor::from_data(
TensorData::new(inputs, Shape::new([training_samples, INPUT])),
TensorData::new(
inputs,
Shape::new([training_samples, (size.x * size.y) as usize]),
),
&device,
);
let targets = Tensor::from_data(
@@ -325,8 +324,8 @@ fn training() {
&device,
);
const LEARNING_RATE: f64 = 0.001;
const EPOCHS: usize = 100000;
const LEARNING_RATE: f64 = 0.5;
const EPOCHS: usize = 10000;
for epoch in 0..EPOCHS {
let predictions = model.forward(inputs.clone());