This seems to be a common question for folks who start using pytorch and I guess this one is my version of it. Is it clear to a more experienced pytorch user why my params are not updating as my code loops? import torch import numpy as np np.random.seed(10) def optimize(final_shares: torch.Tensor, target_weight, prices, loss_func=None): final_shares = final_shares.clamp(0.) mv = torch.multiply(final_shares, prices) w = torch.div(mv, torch.sum(mv)) print(w) return loss_func(w, target_weight) def main(): position_count = 16 cash_buffer = .001 starting_shares = torch.tensor(np.random.uniform(low=1, high=50, size=position_count), dtype=torch.float64) prices = torch.tensor(np.random.uniform(low=1, high=100, size=position_count), dtype=torch.float64) prices[-1] = 1. x_param = torch.nn.Parameter(starting_shares, requires_grad=True) target_weights = ((1 - cash_buffer) / (position_count - 1)) target_weights_vec = [target_weights] * (p
A site where you can share knowledge