This seems to be a common question for folks who start using pytorch and I guess this one is my version of it.
Is it clear to a more experienced pytorch user why my params are not updating as my code loops?
import torch
import numpy as np
np.random.seed(10)
def optimize(final_shares: torch.Tensor, target_weight, prices, loss_func=None):
final_shares = final_shares.clamp(0.)
mv = torch.multiply(final_shares, prices)
w = torch.div(mv, torch.sum(mv))
print(w)
return loss_func(w, target_weight)
def main():
position_count = 16
cash_buffer = .001
starting_shares = torch.tensor(np.random.uniform(low=1, high=50, size=position_count), dtype=torch.float64)
prices = torch.tensor(np.random.uniform(low=1, high=100, size=position_count), dtype=torch.float64)
prices[-1] = 1.
x_param = torch.nn.Parameter(starting_shares, requires_grad=True)
target_weights = ((1 - cash_buffer) / (position_count - 1))
target_weights_vec = [target_weights] * (position_count - 1)
target_weights_vec.append(cash_buffer)
target_weights_vec = torch.tensor(target_weights_vec, dtype=torch.float64)
loss_func = torch.nn.MSELoss()
eta = 0.01
optimizer = torch.optim.SGD([x_param], lr=eta)
for epoch in range(10000):
optimizer.zero_grad()
loss_incurred = optimize(final_shares=x_param, target_weight=target_weights_vec,
prices=prices, loss_func=loss_func)
loss_incurred.backward()
optimizer.step()
optimize(final_shares=x_param.data, target_weight=target_weights_vec,
prices=prices, loss_func=loss_func)
if __name__ == '__main__':
main()
source https://stackoverflow.com/questions/77711955/pytorch-parameters-are-not-updating
Comments
Post a Comment