I am trying to train a bert-base-multilingual-uncased model for a task. I have all the required files present in my dataset including the config.json bert file but when I run the model it gives an error.
Config
class config:
    DEVICE = "cuda:2"
    MAX_LEN = 256
    TRAIN_BATCH_SIZE = 8
    VALID_BATCH_SIZE = 4
    EPOCHS = 1
    BERT_PATH = "workspace/data/jigsaw-multilingual/input/bert-base-multilingual-uncased"
    MODEL_PATH = "workspace/data/jigsaw-multilingual/model.bin"
    TOKENIZER = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True)
Model
class BERTBaseUncased(nn.Module):
    def __init__(self):
        super(BERTBaseUncased, self).__init__()
        self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH)
        self.bert_drop = nn.Dropout(0.3)
        self.out = nn.Linear(768 * 2, 1) # *2 since we have 2 pooling layers
    def forward(self, ids, mask, token_type_ids):
        o1, _ = self.bert(ids, attention_mask=mask, token_type_ids=token_type_ids)
        
        mean_pooling = torch.mean(o1, 1)
        max_pooling, _ = torch.max(o1, 1)
        cat = torch.cat((mean_pooling, max_pooling), 1)
        
        bo = self.bert_drop(cat)
        output = self.out(bo)
        return output
Error
---------------------------------------------------------------------------
OSError                                   Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/transformers/configuration_utils.py in get_config_dict(cls, pretrained_model_name_or_path, **kwargs)
    241             if resolved_config_file is None:
--> 242                 raise EnvironmentError
    243             config_dict = cls._dict_from_json_file(resolved_config_file)
OSError: 
During handling of the above exception, another exception occurred:
OSError                                   Traceback (most recent call last)
<ipython-input-64-9f2999c88020> in <module>
     79 
     80 if __name__ == "__main__":
---> 81     run()
<ipython-input-64-9f2999c88020> in run()
     38 
     39     device = torch.device(config.DEVICE)
---> 40     model = BERTBaseUncased()
     41     model.to(device)
     42 
<ipython-input-60-8e1508eac60a> in __init__(self)
      2     def __init__(self):
      3         super(BERTBaseUncased, self).__init__()
----> 4         self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH)
      5         self.bert_drop = nn.Dropout(0.3)
      6         self.out = nn.Linear(768 * 2, 1) # *2 since we have 2 pooling layers
/opt/conda/lib/python3.6/site-packages/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
    601                 proxies=proxies,
    602                 local_files_only=local_files_only,
--> 603                 **kwargs,
    604             )
    605         else:
/opt/conda/lib/python3.6/site-packages/transformers/configuration_utils.py in from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
    198 
    199         """
--> 200         config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
    201         return cls.from_dict(config_dict, **kwargs)
    202 
/opt/conda/lib/python3.6/site-packages/transformers/configuration_utils.py in get_config_dict(cls, pretrained_model_name_or_path, **kwargs)
    249                 f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
    250             )
--> 251             raise EnvironmentError(msg)
    252 
    253         except json.JSONDecodeError:
OSError: Can't load config for 'workspace/data/jigsaw-multilingual/input/bert-base-multilingual-uncased'. Make sure that:
- 'workspace/data/jigsaw-multilingual/input/bert-base-multilingual-uncased' is a correct model identifier listed on 'https://huggingface.co/models'
- or 'workspace/data/jigsaw-multilingual/input/bert-base-multilingual-uncased' is the correct path to a directory containing a config.json file
These are the files present in my bert dataset:
-> config.json
->pytorch_model.bin
-> vocab.txt
How to fix this issue?
 
    