Use tqdm.write in order to not break progress bar

cli
otthorn 3 years ago
parent 2d69c119a3
commit 475ef1ad23

@ -526,7 +526,7 @@ for iteration in range(n_iter):
# In case we reached the last 75% of iterations,
# decrease the learning rate of the optimizer 10-fold.
if iteration == round(n_iter * 0.75):
print("Decreasing LR 10-fold ...")
tqdm.write("Decreasing LR 10-fold ...")
optimizer = torch.optim.Adam(
neural_radiance_field.parameters(), lr=lr * 0.1
)
@ -596,7 +596,7 @@ for iteration in range(n_iter):
# Every 10 iterations, print the current values of the losses.
if iteration % 10 == 0:
print(
tqdm.write(
f"Iteration {iteration:05d}:"
+ f" loss color = {float(color_err):1.2e}"
+ f" loss silhouette = {float(sil_err):1.2e}"

Loading…
Cancel
Save