Check the forbenius norm of the model and each layers
def calculate_frobenius_norm(model): """ Calculate Frobenius Norm per Layer Args: model: nn.module Neural network instance Returns: norm: float Norm value labels: list Targets ws: list Weights for each layers """ # Initialization of variables norm, ws, labels = 0.0, [], [] # Sum all the parameters for name, parameters in model.named_parameters(): p = torch.sum(parameters**2) norm += p ws.append((p**0.5).cpu().detach().numpy()) labels.append(name) # Take a square root of the sum of squares of all the parameters norm = (norm**0.5).cpu().detach().numpy() return norm, ws, labels set_seed(SEED) net = nn.Linear(10,1) norm, ws, labels = calculate_frobenius_norm(net) print(f'Frobenius norm of Single Linear Layer: {norm:.4f}') # Plots the weights plot_weights(norm, labels, ws)