Skip to content

Commit

Permalink
some improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
MrNeRF committed Aug 6, 2023
1 parent b15249e commit 05f73d4
Show file tree
Hide file tree
Showing 6 changed files with 118 additions and 81 deletions.
27 changes: 26 additions & 1 deletion debug/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

# Specification for tensors
tensor_specs = {
"image": {"dims": 3, "shape": [3, 546, 979], "type": float},
"means3D": {"dims": 2, "shape": [136029, 3], "type": float},
"sh": {"dims": 3, "shape": [136029, 16, 3], "type": float},
"colors_precomp": {"dims": 1, "shape": [0], "type": float},
Expand All @@ -20,9 +21,15 @@
"projmatrix": {"dims": 2, "shape": [4, 4], "type": float},
"sh_degree": {"dims": 0, "shape": [], "type": np.int64},
"camera_center": {"dims": 1, "shape": [3], "type": float},
"prefiltered": {"dims": 0, "shape": [], "type": bool}
"prefiltered": {"dims": 0, "shape": [], "type": bool},
"max_radii2D": {"dims": 1, "shape": [136029], "type": float},
"visibility_filter": {"dims": 1, "shape": [136029], "type": bool},
"radii": {"dims": 1, "shape": [136029], "type": int},
"viewspace_point_tensor": {"dims": 2, "shape": [136029, 3], "type": float},
"max_radii2D_masked": {"dims": 1, "shape": [136029], "type": float},
}


def load_tensor(filename, tensor_spec):
with open(filename, 'rb') as f:
dims = int.from_bytes(f.read(4), 'little')
Expand All @@ -36,11 +43,16 @@ def load_tensor(filename, tensor_spec):
data = np.fromfile(f, dtype=np.bool_).astype(np.bool_)
elif data_type == float:
data = np.fromfile(f, dtype=np.float32).astype(np.float32)
elif data_type == int:
data = np.fromfile(f, dtype=np.int32).astype(np.int32)
else:
data = np.fromfile(f, dtype=np.int64).astype(np.int64)

# Reshape the data based on tensor specification, unless it's a scalar
if tensor_spec["dims"] != 0:
print(f"Filename: {filename}")
print(f"Total size of loaded data: {data.size}")
print(f"Expected shape from tensor_spec: {tensor_spec['shape']}")
data = data.reshape(tensor_spec["shape"])

return torch.from_numpy(data)
Expand Down Expand Up @@ -83,5 +95,18 @@ def load_tensor(filename, tensor_spec):

if not torch.all(approx_equal):
print(f"Value mismatch for {name}")

# Get indices of mismatched values
mismatched_indices = torch.nonzero(~approx_equal, as_tuple=True)

# Get the mismatched values from both tensors
mismatched_pytorch_values = tensor[mismatched_indices]
mismatched_libtorch_values = libtorch_tensor[mismatched_indices]

# Show up to 20 of the mismatched values side by side
print(f"Showing up to 20 of {mismatched_pytorch_values.shape[0]} mismatched values")
num_to_show = min(20, mismatched_pytorch_values.shape[0])
for i in range(num_to_show):
print(f"Index: {mismatched_indices[0][i]}, PyTorch: {mismatched_pytorch_values[i]}, LibTorch: {mismatched_libtorch_values[i]}")
else:
print(f"{name} matches!")
15 changes: 13 additions & 2 deletions includes/debug_utils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,19 @@ namespace ts {
// Write sizes
outfile.write(reinterpret_cast<char*>(sizes.data()), dims * sizeof(int64_t));

// Write tensor data
outfile.write(reinterpret_cast<char*>(cpu_tensor.data_ptr()), numel * sizeof(float));
// Write tensor data based on its type
if (cpu_tensor.dtype() == torch::kFloat32) {
outfile.write(reinterpret_cast<char*>(cpu_tensor.data_ptr<float>()), numel * sizeof(float));
} else if (cpu_tensor.dtype() == torch::kInt64) {
outfile.write(reinterpret_cast<char*>(cpu_tensor.data_ptr<int64_t>()), numel * sizeof(int64_t));
} else if (cpu_tensor.dtype() == torch::kBool) {
outfile.write(reinterpret_cast<char*>(cpu_tensor.data_ptr<bool>()), numel * sizeof(bool));
} else if (cpu_tensor.dtype() == torch::kInt32) {
outfile.write(reinterpret_cast<char*>(cpu_tensor.data_ptr<int32_t>()), numel * sizeof(int32_t));
} else {
throw std::runtime_error("Unsupported tensor type");
}
// Add more data types as needed...

outfile.close();
}
Expand Down
3 changes: 1 addition & 2 deletions includes/parameters.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@ struct OptimizationParameters {
float position_lr_init = 0.00016f;
float position_lr_final = 0.0000016f;
float position_lr_delay_mult = 0.01f;
uint64_t posititon_lr_max_steps = 30'000;
int64_t position_lr_max_steps = 30'000;
float feature_lr = 0.0025f;
float percent_dense = 0.01f;
float opacity_lr = 0.05f;
float scaling_lr = 0.001f;
float rotation_lr = 0.001f;
float position_lr_max_steps = 0.f;
float lambda_dssim = 0.2f;
uint64_t densification_interval = 100;
uint64_t opacity_reset_interval = 3'000;
Expand Down
6 changes: 4 additions & 2 deletions src/gaussian.cu
Original file line number Diff line number Diff line change
Expand Up @@ -132,14 +132,16 @@ void GaussianModel::Training_setup(const OptimizationParameters& params) {
static_cast<torch::optim::AdamOptions&>(_optimizer_params_groups[4].options()).eps(1e-15);
static_cast<torch::optim::AdamOptions&>(_optimizer_params_groups[5].options()).eps(1e-15);

_optimizer = std::make_unique<torch::optim::Adam>(_optimizer_params_groups, torch::optim::AdamOptions(params.position_lr_init * this->_spatial_lr_scale).eps(1e-15));
_optimizer = std::make_unique<torch::optim::Adam>(_optimizer_params_groups, torch::optim::AdamOptions(0.f).eps(1e-15));
std::cout << "Training setup done" << std::endl;
}

void GaussianModel::Update_learning_rate(float iteration) {
// This is hacky because you cant change in libtorch individual parameter learning rate
// xyz is added first, since _optimizer->param_groups() return a vector, we assume that xyz stays first
static_cast<torch::optim::AdamOptions&>(_optimizer->param_groups()[0].options()).set_lr(_xyz_scheduler_args(iteration));
auto lr = _xyz_scheduler_args(iteration);
std::cout << "Setting lr to " << lr << std::endl;
static_cast<torch::optim::AdamOptions&>(_optimizer->param_groups()[0].options()).set_lr(lr);
}

void GaussianModel::Save_as_ply(const std::string& filename) {
Expand Down
16 changes: 8 additions & 8 deletions src/main.cu
Original file line number Diff line number Diff line change
Expand Up @@ -48,27 +48,27 @@ int main(int argc, char* argv[]) {

// Loss Computations
ts::save_my_tensor(image, "libtorch_image.pt");
exit(0);
auto gt_image = cam.Get_original_image().to(torch::kCUDA);
auto l1l = gaussian_splatting::l1_loss(image, gt_image);
auto loss = (1.0 - optimParams.lambda_dssim) * l1l + optimParams.lambda_dssim * (1.0 - gaussian_splatting::ssim(image, gt_image));
std::cout << "Iteration: " << iter << " Loss: " << loss.item<float>() << std::endl;

loss.backward();
if (!gaussians._opacity.grad().defined()) {
std::cout << "Opacity gradient is not defined! Iter: " << iter << std::endl;
}
{
torch::NoGradGuard no_grad;
// Keep track of max radii in image-space for pruning
std::cout << "visibility_filter size" << visibility_filter.sizes() << std::endl;
std::cout << "gaussian._max_radii2D size" << gaussians._max_radii2D.sizes() << std::endl;
ts::save_my_tensor(gaussians._max_radii2D, "libtorch_max_radii2D.pt");
ts::save_my_tensor(visibility_filter, "libtorch_visibility_filter.pt");
ts::save_my_tensor(radii, "libtorch_radii.pt");
ts::save_my_tensor(viewspace_point_tensor, "libtorch_viewspace_point_tensor.pt");
auto visible_max_radii = gaussians._max_radii2D.masked_select(visibility_filter);
std::cout << "visible_max_raddi size" << visible_max_radii.sizes() << std::endl;
auto visible_radii = radii.masked_select(visibility_filter);

auto max_radii = torch::max(visible_max_radii, visible_radii);
gaussians._max_radii2D.masked_scatter_(visibility_filter, max_radii);
ts::save_my_tensor(gaussians._max_radii2D, "libtorch_max_radii2D_masked.pt");
if (iter == 2) {
exit(0);
}

// TODO: support saving
// if (iteration in saving_iterations):
Expand Down
132 changes: 66 additions & 66 deletions src/rasterize_points.cu
Original file line number Diff line number Diff line change
Expand Up @@ -99,72 +99,72 @@ RasterizeGaussiansCUDA(
M = sh.size(1);
}

// print_tensor_info(background, "background");
// print_tensor_info(means3D, "means3D");
// print_tensor_info(colors, "colors");
// print_tensor_info(opacity, "opacity");
// print_tensor_info(scales, "scales");
// print_tensor_info(rotations, "rotations");
// print_tensor_info(cov3D_precomp, "cov3D_precomp");
// print_tensor_info(viewmatrix, "viewmatrix");
// print_tensor_info(projmatrix, "projmatrix");
// print_tensor_info(sh, "sh");
// print_tensor_info(campos, "campos");
// print_tensor_info(out_color, "out_color");
// print_tensor_info(radii, "radii");
//
// if (!background.data_ptr<float>()) {
// std::cout << "Null data pointer: background"
// << "\n";
// }
// if (!means3D.data_ptr<float>()) {
// std::cout << "Null data pointer: means3D"
// << "\n";
// }
// if (!colors.data_ptr<float>()) {
// std::cout << "Null data pointer: colors"
// << "\n";
// }
// if (!opacity.data_ptr<float>()) {
// std::cout << "Null data pointer: opacity"
// << "\n";
// }
// if (!scales.data_ptr<float>()) {
// std::cout << "Null data pointer: scales"
// << "\n";
// }
// if (!rotations.data_ptr<float>()) {
// std::cout << "Null data pointer: rotations"
// << "\n";
// }
// if (!cov3D_precomp.data_ptr<float>()) {
// std::cout << "Null data pointer: cov3D_precomp"
// << "\n";
// }
// if (!viewmatrix.data_ptr<float>()) {
// std::cout << "Null data pointer: viewmatrix"
// << "\n";
// }
// if (!projmatrix.data_ptr<float>()) {
// std::cout << "Null data pointer: projmatrix"
// << "\n";
// }
// if (!sh.data_ptr<float>()) {
// std::cout << "Null data pointer: sh"
// << "\n";
// }
// if (!campos.data_ptr<float>()) {
// std::cout << "Null data pointer: campos"
// << "\n";
// }
// if (!out_color.data_ptr<float>()) {
// std::cout << "Null data pointer: out_color"
// << "\n";
// }
// if (!radii.data_ptr<int>()) {
// std::cout << "Null data pointer: radii"
// << "\n";
// }
print_tensor_info(background, "background");
print_tensor_info(means3D, "means3D");
print_tensor_info(colors, "colors");
print_tensor_info(opacity, "opacity");
print_tensor_info(scales, "scales");
print_tensor_info(rotations, "rotations");
print_tensor_info(cov3D_precomp, "cov3D_precomp");
print_tensor_info(viewmatrix, "viewmatrix");
print_tensor_info(projmatrix, "projmatrix");
print_tensor_info(sh, "sh");
print_tensor_info(campos, "campos");
print_tensor_info(out_color, "out_color");
print_tensor_info(radii, "radii");

if (!background.data_ptr<float>()) {
std::cout << "Null data pointer: background"
<< "\n";
}
if (!means3D.data_ptr<float>()) {
std::cout << "Null data pointer: means3D"
<< "\n";
}
if (!colors.data_ptr<float>()) {
std::cout << "Null data pointer: colors"
<< "\n";
}
if (!opacity.data_ptr<float>()) {
std::cout << "Null data pointer: opacity"
<< "\n";
}
if (!scales.data_ptr<float>()) {
std::cout << "Null data pointer: scales"
<< "\n";
}
if (!rotations.data_ptr<float>()) {
std::cout << "Null data pointer: rotations"
<< "\n";
}
if (!cov3D_precomp.data_ptr<float>()) {
std::cout << "Null data pointer: cov3D_precomp"
<< "\n";
}
if (!viewmatrix.data_ptr<float>()) {
std::cout << "Null data pointer: viewmatrix"
<< "\n";
}
if (!projmatrix.data_ptr<float>()) {
std::cout << "Null data pointer: projmatrix"
<< "\n";
}
if (!sh.data_ptr<float>()) {
std::cout << "Null data pointer: sh"
<< "\n";
}
if (!campos.data_ptr<float>()) {
std::cout << "Null data pointer: campos"
<< "\n";
}
if (!out_color.data_ptr<float>()) {
std::cout << "Null data pointer: out_color"
<< "\n";
}
if (!radii.data_ptr<int>()) {
std::cout << "Null data pointer: radii"
<< "\n";
}

rendered = CudaRasterizer::Rasterizer::forward(
geomFunc,
Expand Down

0 comments on commit 05f73d4

Please sign in to comment.