|
37 | 37 | groundtruth_dir = os.path.join('demo', 'groundtruth') |
38 | 38 | os.makedirs(result_dir, exist_ok=True) |
39 | 39 | os.makedirs(groundtruth_dir, exist_ok=True) |
40 | | - for images, masks in tqdm.tqdm(testloader, desc='Demo'): |
| 40 | + for image, target in tqdm.tqdm(testloader, desc='Demo'): |
41 | 41 | # mask에 255를 곱하여 0~1 사이의 값을 0~255 값으로 변경 + 채널 차원 제거 |
42 | | - masks.mul_(255).squeeze_(dim=1) |
| 42 | + target.mul_(255).squeeze_(dim=1) |
43 | 43 |
|
44 | | - images, masks = images.to(device), masks.type(torch.LongTensor) |
| 44 | + image, target = image.to(device), target.type(torch.LongTensor) |
45 | 45 |
|
46 | 46 | # 예측 |
47 | 47 | with torch.no_grad(): |
48 | | - masks_pred = model(images) |
49 | | - masks_pred = F.log_softmax(masks_pred, dim=1) |
50 | | - masks_pred = torch.argmax(masks_pred, dim=1) |
| 48 | + output = model(image) |
| 49 | + output = F.log_softmax(output, dim=1) |
| 50 | + output = torch.argmax(output, dim=1) |
51 | 51 |
|
52 | 52 | # 1 배치단위 처리 |
53 | | - assert masks.shape[0] == masks_pred.shape[0] |
54 | | - for i in range(masks.shape[0]): |
55 | | - plt.imsave(os.path.join(result_dir, image_names[step]), masks_pred[i].cpu(), cmap=cmap) |
56 | | - plt.imsave(os.path.join(groundtruth_dir, image_names[step]), masks[i], cmap=cmap) |
| 53 | + assert target.shape[0] == output.shape[0] |
| 54 | + for i in range(target.shape[0]): |
| 55 | + plt.imsave(os.path.join(result_dir, image_names[step]), output[i].cpu(), cmap=cmap) |
| 56 | + plt.imsave(os.path.join(groundtruth_dir, image_names[step]), target[i], cmap=cmap) |
57 | 57 | step += 1 |
0 commit comments