|
|
''' |
|
|
NBRDF MLP model |
|
|
|
|
|
Input: Cartesian coordinate for positional samples |
|
|
(1: theta_h, 2: theta_d, 3: phi_d, 4: phi_h = 0) -> (hx, hy, hz, dx, dy, dz) |
|
|
Output: MERL reflectance value |
|
|
|
|
|
- input_size 6 |
|
|
- hidden_size 21 |
|
|
- hidden_layer 3 |
|
|
- output_size 3 |
|
|
|
|
|
@author |
|
|
Copyright (c) 2024-2025 Peter HU. |
|
|
|
|
|
@file |
|
|
reference: https://github.com/asztr/Neural-BRDF |
|
|
|
|
|
''' |
|
|
|
|
|
import sys |
|
|
import path |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import random |
|
|
|
|
|
|
|
|
device = torch.device( |
|
|
"cuda" if torch.cuda.is_available() |
|
|
else torch.device("mps") if torch.backends.mps.is_available() |
|
|
else "cpu") |
|
|
|
|
|
|
|
|
class MLP(nn.Module): |
|
|
'''Pytorch NBRDF MLP model''' |
|
|
def __init__(self, input_size, hidden_size, output_size) -> None: |
|
|
super().__init__() |
|
|
|
|
|
self.fc1 = nn.Linear(input_size, hidden_size, bias=True) |
|
|
self.fc2 = nn.Linear(hidden_size, hidden_size, bias=True) |
|
|
self.fc3 = nn.Linear(hidden_size, output_size, bias=True) |
|
|
|
|
|
|
|
|
|
|
|
torch.manual_seed(0) |
|
|
random.seed(0) |
|
|
with torch.no_grad(): |
|
|
for func in [self.fc1, self.fc2, self.fc3]: |
|
|
func.bias.zero_() |
|
|
func.weight.uniform_(0.0, 0.02) |
|
|
|
|
|
|
|
|
def forward(self, x): |
|
|
out = self.fc1(x) |
|
|
out = F.leaky_relu(out) |
|
|
out = self.fc2(out) |
|
|
out = F.leaky_relu(out) |
|
|
out = self.fc3(out) |
|
|
out = F.relu(torch.exp(out) - 1.0) |
|
|
|
|
|
return out |
|
|
|