I extracted feature maps using ResNet, then quantized (segmented) using Kmeans. Now I want to visualize the quantized feature maps (labels) on the input image. Does anyone have an idea how I can do this?
model = models.resnet18(weights='ResNet18_Weights.DEFAULT')
model_children = list(model.children())
feature_extractor = torch.nn.Sequential(\*list(model.children())\[:-2\])
feature_extractor.eval()
image_path = 'image.jpg'
transform = transforms.Compose(\[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=0., std=1.)
\])
image = transform(Image.open(image_path)).unsqueeze(0)
with torch.no_grad():
feature_maps = feature_extractor(image)
feature = feature_maps.squeeze(0)
feature = feature.view(512, -1)
feature = feature.detach().numpy()
feature= np.transpose(feature)
\#Kmeans Algorithm
num_clusters = 10
kmeans = KMeans(n_clusters=num_clusters,n_init='auto', random_state=0).fit(feature)
labels = kmeans.labels\_
labels= labels.reshape(7,7)
plt.imshow(labels)
plt.show()
via Chebli Mohamed
Aucun commentaire:
Enregistrer un commentaire