This is a minimally working/reproducible example:
import torch
import torch.nn as nn
from torchsummary import summary
class Network(nn.Module): 
    def __init__(self, channels_img, features_d, num_classes, img_size): 
        super(Network, self).__init__()
        self.img_size = img_size
        self.disc = nn.Conv2d(
            in_channels = channels_img + 1, 
            out_channels = features_d, 
            kernel_size = (4,4)
        )
        # ConditionalGan: 
        self.embed = nn.Embedding(
            num_embeddings = num_classes, 
            embedding_dim = img_size * img_size
        )
   def forward(self, x, labels): 
        embedding = self.embed(labels).view(labels.shape[0], 1, self.img_size, self.img_size)
        x = torch.cat([x, embedding], dim = 1)
        return self.disc(x) 
    
# device: 
device = torch.device("cpu")
# hyperparameter: 
batch_size = 64
# Initialize model: 
model = Network(
    channels_img = 1, 
    features_d = 16, 
    num_classes = 10, 
    img_size = 28).to(device) 
# Print model summary: 
summary(
    model, 
    input_size = [(1, 28, 28), (1, 28, 28)], # MNIST
    batch_size = batch_size
)
The error message I get is (for the line with summary(...)):
Expected tensor for argument #1 'indices' to have scalar type Long; but got torch.cuda.FloatTensor instead (while checking arguments for embedding)
I saw in this post, that .to(torch.int64) is supposed to help, but I honestly don't know where to write it.
Thank you!
 
    