Use tqdm.write in order to not break progress bar

This commit is contained in:
otthorn 2021-04-30 11:46:53 +02:00
parent 2d69c119a3
commit 475ef1ad23

View file

@ -526,7 +526,7 @@ for iteration in range(n_iter):
# In case we reached the last 75% of iterations, # In case we reached the last 75% of iterations,
# decrease the learning rate of the optimizer 10-fold. # decrease the learning rate of the optimizer 10-fold.
if iteration == round(n_iter * 0.75): if iteration == round(n_iter * 0.75):
print("Decreasing LR 10-fold ...") tqdm.write("Decreasing LR 10-fold ...")
optimizer = torch.optim.Adam( optimizer = torch.optim.Adam(
neural_radiance_field.parameters(), lr=lr * 0.1 neural_radiance_field.parameters(), lr=lr * 0.1
) )
@ -596,7 +596,7 @@ for iteration in range(n_iter):
# Every 10 iterations, print the current values of the losses. # Every 10 iterations, print the current values of the losses.
if iteration % 10 == 0: if iteration % 10 == 0:
print( tqdm.write(
f"Iteration {iteration:05d}:" f"Iteration {iteration:05d}:"
+ f" loss color = {float(color_err):1.2e}" + f" loss color = {float(color_err):1.2e}"
+ f" loss silhouette = {float(sil_err):1.2e}" + f" loss silhouette = {float(sil_err):1.2e}"